clippy: fix doc_markdown warnings

This commit is contained in:
Laurentiu Nicola
2017-05-16 19:13:00 +03:00
committed by Paul Masurel
parent e5c7c0b8b9
commit ac02c76b1e
13 changed files with 23 additions and 23 deletions

View File

@@ -5,13 +5,13 @@ use std::cmp::{Ordering, Ord};
#[cfg(test)]
use std::sync::atomic;
/// Tantivy SegmentId.
/// Tantivy `SegmentId`.
///
/// Tantivy's segment are identified
/// by a UUID which is used to prefix the filenames
/// of all of the file associated with the segment.
///
/// In unit test, for reproducability, the SegmentId are
/// In unit test, for reproducability, the `SegmentId` are
/// simply generated in an autoincrement fashion.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct SegmentId(Uuid);

View File

@@ -9,7 +9,7 @@ struct DeleteMeta {
opstamp: u64,
}
/// SegmentMeta contains simple meta information about a segment.
/// `SegmentMeta` contains simple meta information about a segment.
///
/// For instance the number of docs it contains,
/// how many are deleted, etc.

View File

@@ -6,7 +6,7 @@ use directory::ReadOnlySource;
use DocId;
use common::HasLen;
/// Write a delete BitSet
/// Write a delete `BitSet`
///
/// where `delete_bitset` is the set of deleted `DocId`.
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {

View File

@@ -1,7 +1,7 @@
use std::result;
use schema::FieldEntry;
/// FastFieldNotAvailableError is returned when the
/// `FastFieldNotAvailableError` is returned when the
/// user requested for a fast field reader, and the field was not
/// defined in the schema as a fast field.
#[derive(Debug)]

View File

@@ -37,7 +37,7 @@ pub trait FastFieldReader: Sized {
fn is_enabled(field_type: &FieldType) -> bool;
}
/// FastFieldReader for unsigned 64-bits integers.
/// `FastFieldReader` for unsigned 64-bits integers.
pub struct U64FastFieldReader {
_data: ReadOnlySource,
bit_unpacker: BitUnpacker,
@@ -133,7 +133,7 @@ impl From<Vec<u64>> for U64FastFieldReader {
}
}
/// FastFieldReader for signed 64-bits integers.
/// `FastFieldReader` for signed 64-bits integers.
pub struct I64FastFieldReader {
underlying: U64FastFieldReader,
}
@@ -189,7 +189,7 @@ impl FastFieldReader for I64FastFieldReader {
/// The FastFieldsReader` is the datastructure containing
/// The `FastFieldsReader` is the datastructure containing
/// all of the fast fields' data.
///
/// It contains a mapping that associated these fields to

View File

@@ -96,7 +96,7 @@ impl !Sync for IndexWriter {}
/// `IndexWriter` on the system is accessing the index directory,
/// it is safe to manually delete the lockfile.
///
/// num_threads specifies the number of indexing workers that
/// `num_threads` specifies the number of indexing workers that
/// should work at the same time.
/// # Errors
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.

View File

@@ -9,7 +9,7 @@ const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
const DEFAULT_MIN_MERGE_SIZE: usize = 8;
/// LogMergePolicy tries tries to merge segments that have a similar number of
/// `LogMergePolicy` tries tries to merge segments that have a similar number of
/// documents.
#[derive(Debug, Clone)]
pub struct LogMergePolicy {

View File

@@ -9,7 +9,7 @@ use std::fmt::Debug;
pub struct MergeCandidate(pub Vec<SegmentId>);
/// The Merge policy defines which segments should be merged.
/// The `MergePolicy` defines which segments should be merged.
///
/// Every time a the list of segments changes, the segment updater
/// asks the merge policy if some segments should be merged.
@@ -52,7 +52,7 @@ pub mod tests {
use core::SegmentMeta;
/// Merge policy useful for test purposes.
/// `MergePolicy` useful for test purposes.
///
/// Everytime there is more than one segment,
/// it will suggest to merge them.

View File

@@ -22,5 +22,5 @@ pub use self::log_merge_policy::LogMergePolicy;
pub use self::merge_policy::{NoMergePolicy, MergeCandidate, MergePolicy};
pub use self::segment_manager::SegmentManager;
/// Alias for the default merge policy, which is the LogMergePolicy.
/// Alias for the default merge policy, which is the `LogMergePolicy`.
pub type DefaultMergePolicy = LogMergePolicy;

View File

@@ -24,16 +24,16 @@ impl SegmentState {
/// A segment entry describes the state of
/// a given segment, at a given instant.
///
/// In addition to segment meta,
/// In addition to segment `meta`,
/// it contains a few transient states
/// - state expresses whether the segment is already in the
/// - `state` expresses whether the segment is already in the
/// middle of a merge
/// - delete_bitset is a bitset describing
/// - `delete_bitset` is a bitset describing
/// documents that were deleted during the commit
/// itself.
/// - Delete cursor, is the position in the delete queue.
/// - `delete_cursor` is the position in the delete queue.
/// Deletes happening before the cursor are reflected either
/// in the .del file or in the delete_bitset.
/// in the .del file or in the `delete_bitset`.
#[derive(Clone)]
pub struct SegmentEntry {
meta: SegmentMeta,

View File

@@ -1,7 +1,7 @@
use query::Occur;
/// An OccurFilter represents a filter over a bitset of
/// An `OccurFilter` represents a filter over a bitset of
// at most 64 elements.
///
/// It wraps some simple bitmask to compute the filter

View File

@@ -36,7 +36,7 @@ impl<'a> Scorer for Box<Scorer + 'a> {
}
}
/// EmptyScorer is a dummy Scorer in which no document matches.
/// `EmptyScorer` is a dummy `Scorer` in which no document matches.
///
/// It is useful for tests and handling edge cases.
pub struct EmptyScorer;

View File

@@ -13,9 +13,9 @@ use std::any::Any;
/// The score associated is defined as
/// `idf` * sqrt(`term_freq` / `field norm`)
/// in which :
/// * idf - inverse document frequency.
/// * term_freq - number of occurrences of the term in the field
/// * field norm - number of tokens in the field.
/// * `idf` - inverse document frequency.
/// * `term_freq` - number of occurrences of the term in the field
/// * `field norm` - number of tokens in the field.
#[derive(Debug)]
pub struct TermQuery {
term: Term,