From ac02c76b1e23019ec2a464bc846f59be26ab8990 Mon Sep 17 00:00:00 2001 From: Laurentiu Nicola Date: Tue, 16 May 2017 19:13:00 +0300 Subject: [PATCH] clippy: fix doc_markdown warnings --- src/core/segment_id.rs | 4 ++-- src/core/segment_meta.rs | 2 +- src/fastfield/delete.rs | 2 +- src/fastfield/error.rs | 2 +- src/fastfield/reader.rs | 6 +++--- src/indexer/index_writer.rs | 2 +- src/indexer/log_merge_policy.rs | 2 +- src/indexer/merge_policy.rs | 4 ++-- src/indexer/mod.rs | 2 +- src/indexer/segment_entry.rs | 10 +++++----- src/query/occur_filter.rs | 2 +- src/query/scorer.rs | 2 +- src/query/term_query/term_query.rs | 6 +++--- 13 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/core/segment_id.rs b/src/core/segment_id.rs index 52978c152..b9612b46a 100644 --- a/src/core/segment_id.rs +++ b/src/core/segment_id.rs @@ -5,13 +5,13 @@ use std::cmp::{Ordering, Ord}; #[cfg(test)] use std::sync::atomic; -/// Tantivy SegmentId. +/// Tantivy `SegmentId`. /// /// Tantivy's segment are identified /// by a UUID which is used to prefix the filenames /// of all of the file associated with the segment. /// -/// In unit test, for reproducability, the SegmentId are +/// In unit test, for reproducability, the `SegmentId` are /// simply generated in an autoincrement fashion. #[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct SegmentId(Uuid); diff --git a/src/core/segment_meta.rs b/src/core/segment_meta.rs index 5c9194e6e..623b22442 100644 --- a/src/core/segment_meta.rs +++ b/src/core/segment_meta.rs @@ -9,7 +9,7 @@ struct DeleteMeta { opstamp: u64, } -/// SegmentMeta contains simple meta information about a segment. +/// `SegmentMeta` contains simple meta information about a segment. /// /// For instance the number of docs it contains, /// how many are deleted, etc. diff --git a/src/fastfield/delete.rs b/src/fastfield/delete.rs index 08ad41e8e..8923437c8 100644 --- a/src/fastfield/delete.rs +++ b/src/fastfield/delete.rs @@ -6,7 +6,7 @@ use directory::ReadOnlySource; use DocId; use common::HasLen; -/// Write a delete BitSet +/// Write a delete `BitSet` /// /// where `delete_bitset` is the set of deleted `DocId`. pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> { diff --git a/src/fastfield/error.rs b/src/fastfield/error.rs index 88902833b..e28474702 100644 --- a/src/fastfield/error.rs +++ b/src/fastfield/error.rs @@ -1,7 +1,7 @@ use std::result; use schema::FieldEntry; -/// FastFieldNotAvailableError is returned when the +/// `FastFieldNotAvailableError` is returned when the /// user requested for a fast field reader, and the field was not /// defined in the schema as a fast field. #[derive(Debug)] diff --git a/src/fastfield/reader.rs b/src/fastfield/reader.rs index 74b69cb9e..ad281e377 100644 --- a/src/fastfield/reader.rs +++ b/src/fastfield/reader.rs @@ -37,7 +37,7 @@ pub trait FastFieldReader: Sized { fn is_enabled(field_type: &FieldType) -> bool; } -/// FastFieldReader for unsigned 64-bits integers. +/// `FastFieldReader` for unsigned 64-bits integers. pub struct U64FastFieldReader { _data: ReadOnlySource, bit_unpacker: BitUnpacker, @@ -133,7 +133,7 @@ impl From> for U64FastFieldReader { } } -/// FastFieldReader for signed 64-bits integers. +/// `FastFieldReader` for signed 64-bits integers. pub struct I64FastFieldReader { underlying: U64FastFieldReader, } @@ -189,7 +189,7 @@ impl FastFieldReader for I64FastFieldReader { -/// The FastFieldsReader` is the datastructure containing +/// The `FastFieldsReader` is the datastructure containing /// all of the fast fields' data. /// /// It contains a mapping that associated these fields to diff --git a/src/indexer/index_writer.rs b/src/indexer/index_writer.rs index 1c8afa8f7..5c0fa7ddf 100644 --- a/src/indexer/index_writer.rs +++ b/src/indexer/index_writer.rs @@ -96,7 +96,7 @@ impl !Sync for IndexWriter {} /// `IndexWriter` on the system is accessing the index directory, /// it is safe to manually delete the lockfile. /// -/// num_threads specifies the number of indexing workers that +/// `num_threads` specifies the number of indexing workers that /// should work at the same time. /// # Errors /// If the lockfile already exists, returns `Error::FileAlreadyExists`. diff --git a/src/indexer/log_merge_policy.rs b/src/indexer/log_merge_policy.rs index 0fea3d3ee..47f496998 100644 --- a/src/indexer/log_merge_policy.rs +++ b/src/indexer/log_merge_policy.rs @@ -9,7 +9,7 @@ const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000; const DEFAULT_MIN_MERGE_SIZE: usize = 8; -/// LogMergePolicy tries tries to merge segments that have a similar number of +/// `LogMergePolicy` tries tries to merge segments that have a similar number of /// documents. #[derive(Debug, Clone)] pub struct LogMergePolicy { diff --git a/src/indexer/merge_policy.rs b/src/indexer/merge_policy.rs index 69a958b39..9f8b59748 100644 --- a/src/indexer/merge_policy.rs +++ b/src/indexer/merge_policy.rs @@ -9,7 +9,7 @@ use std::fmt::Debug; pub struct MergeCandidate(pub Vec); -/// The Merge policy defines which segments should be merged. +/// The `MergePolicy` defines which segments should be merged. /// /// Every time a the list of segments changes, the segment updater /// asks the merge policy if some segments should be merged. @@ -52,7 +52,7 @@ pub mod tests { use core::SegmentMeta; - /// Merge policy useful for test purposes. + /// `MergePolicy` useful for test purposes. /// /// Everytime there is more than one segment, /// it will suggest to merge them. diff --git a/src/indexer/mod.rs b/src/indexer/mod.rs index 4dcd9fe12..f76477567 100644 --- a/src/indexer/mod.rs +++ b/src/indexer/mod.rs @@ -22,5 +22,5 @@ pub use self::log_merge_policy::LogMergePolicy; pub use self::merge_policy::{NoMergePolicy, MergeCandidate, MergePolicy}; pub use self::segment_manager::SegmentManager; -/// Alias for the default merge policy, which is the LogMergePolicy. +/// Alias for the default merge policy, which is the `LogMergePolicy`. pub type DefaultMergePolicy = LogMergePolicy; diff --git a/src/indexer/segment_entry.rs b/src/indexer/segment_entry.rs index 728cf2e55..082f9e1c1 100644 --- a/src/indexer/segment_entry.rs +++ b/src/indexer/segment_entry.rs @@ -24,16 +24,16 @@ impl SegmentState { /// A segment entry describes the state of /// a given segment, at a given instant. /// -/// In addition to segment meta, +/// In addition to segment `meta`, /// it contains a few transient states -/// - state expresses whether the segment is already in the +/// - `state` expresses whether the segment is already in the /// middle of a merge -/// - delete_bitset is a bitset describing +/// - `delete_bitset` is a bitset describing /// documents that were deleted during the commit /// itself. -/// - Delete cursor, is the position in the delete queue. +/// - `delete_cursor` is the position in the delete queue. /// Deletes happening before the cursor are reflected either -/// in the .del file or in the delete_bitset. +/// in the .del file or in the `delete_bitset`. #[derive(Clone)] pub struct SegmentEntry { meta: SegmentMeta, diff --git a/src/query/occur_filter.rs b/src/query/occur_filter.rs index 42246770d..1b21dea6e 100644 --- a/src/query/occur_filter.rs +++ b/src/query/occur_filter.rs @@ -1,7 +1,7 @@ use query::Occur; -/// An OccurFilter represents a filter over a bitset of +/// An `OccurFilter` represents a filter over a bitset of // at most 64 elements. /// /// It wraps some simple bitmask to compute the filter diff --git a/src/query/scorer.rs b/src/query/scorer.rs index 4ac05cd83..20b6e7e72 100644 --- a/src/query/scorer.rs +++ b/src/query/scorer.rs @@ -36,7 +36,7 @@ impl<'a> Scorer for Box { } } -/// EmptyScorer is a dummy Scorer in which no document matches. +/// `EmptyScorer` is a dummy `Scorer` in which no document matches. /// /// It is useful for tests and handling edge cases. pub struct EmptyScorer; diff --git a/src/query/term_query/term_query.rs b/src/query/term_query/term_query.rs index 330138edc..5c468c442 100644 --- a/src/query/term_query/term_query.rs +++ b/src/query/term_query/term_query.rs @@ -13,9 +13,9 @@ use std::any::Any; /// The score associated is defined as /// `idf` * sqrt(`term_freq` / `field norm`) /// in which : -/// * idf - inverse document frequency. -/// * term_freq - number of occurrences of the term in the field -/// * field norm - number of tokens in the field. +/// * `idf` - inverse document frequency. +/// * `term_freq` - number of occurrences of the term in the field +/// * `field norm` - number of tokens in the field. #[derive(Debug)] pub struct TermQuery { term: Term,