From 2069e3e52b87768ffe6cdce4fe30b8078250e34a Mon Sep 17 00:00:00 2001 From: Paul Masurel Date: Tue, 1 Feb 2022 10:24:05 +0900 Subject: [PATCH] Fixing clippy comments --- fastfield_codecs/src/main.rs | 3 +-- ownedbytes/src/lib.rs | 3 +-- src/collector/top_collector.rs | 2 +- src/core/index.rs | 9 +++++++++ src/core/index_meta.rs | 1 + src/core/segment.rs | 1 + src/directory/file_slice.rs | 4 ++++ src/directory/mmap_directory.rs | 2 +- src/fieldnorm/mod.rs | 2 +- src/indexer/demuxer.rs | 4 ++-- src/indexer/segment_updater.rs | 9 +++------ src/lib.rs | 1 - src/query/boolean_query/block_wand.rs | 1 + src/query/more_like_this/query.rs | 8 ++++++++ src/query/term_query/mod.rs | 2 +- src/reader/mod.rs | 15 +++++++++------ src/reader/warming.rs | 2 +- src/schema/bytes_options.rs | 4 ++++ src/schema/facet_options.rs | 1 + src/schema/field.rs | 3 +-- src/schema/int_options.rs | 4 ++++ src/schema/text_options.rs | 5 +++++ src/tokenizer/tokenizer.rs | 1 + 23 files changed, 61 insertions(+), 26 deletions(-) diff --git a/fastfield_codecs/src/main.rs b/fastfield_codecs/src/main.rs index bfce54ca4..18fef5c60 100644 --- a/fastfield_codecs/src/main.rs +++ b/fastfield_codecs/src/main.rs @@ -39,7 +39,6 @@ fn main() { } else { (est.to_string(), comp.to_string()) }; - #[allow(clippy::all)] let style = if comp == best_compression_ratio_codec.1 { "Fb" } else { @@ -47,7 +46,7 @@ fn main() { }; table.add_row(Row::new(vec![ - Cell::new(&name.to_string()).style_spec("bFg"), + Cell::new(name).style_spec("bFg"), Cell::new(&ratio_cell).style_spec(style), Cell::new(&est_cell).style_spec(""), ])); diff --git a/ownedbytes/src/lib.rs b/ownedbytes/src/lib.rs index 6bb0ee311..0f8f82583 100644 --- a/ownedbytes/src/lib.rs +++ b/ownedbytes/src/lib.rs @@ -1,5 +1,3 @@ -#![allow(clippy::return_self_not_must_use)] - use std::convert::TryInto; use std::ops::{Deref, Range}; use std::sync::Arc; @@ -85,6 +83,7 @@ impl OwnedBytes { /// Splits the right part of the `OwnedBytes` at the given offset. /// /// `self` is truncated to `split_len`, left with the remaining bytes. + #[allow(clippy::return_self_not_must_use)] pub fn split_off(&mut self, split_len: usize) -> OwnedBytes { let right_box_stable_deref = self.box_stable_deref.clone(); let right_piece = OwnedBytes { diff --git a/src/collector/top_collector.rs b/src/collector/top_collector.rs index 34dbc8d33..e37272a9f 100644 --- a/src/collector/top_collector.rs +++ b/src/collector/top_collector.rs @@ -250,7 +250,7 @@ mod tests { // when harvesting we have to guarantee stable sorting in case of a tie // on the score let doc_ids_collection = [4, 5, 6]; - let score = 3.14; + let score = 3.3f32; let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2); for id in &doc_ids_collection { diff --git a/src/core/index.rs b/src/core/index.rs index b75536be1..a5cc0feb3 100644 --- a/src/core/index.rs +++ b/src/core/index.rs @@ -88,16 +88,21 @@ impl IndexBuilder { index_settings: IndexSettings::default(), } } + /// Set the settings + #[must_use] pub fn settings(mut self, settings: IndexSettings) -> Self { self.index_settings = settings; self } + /// Set the schema + #[must_use] pub fn schema(mut self, schema: Schema) -> Self { self.schema = Some(schema); self } + /// Creates a new index using the `RAMDirectory`. /// /// The index will be allocated in anonymous memory. @@ -108,6 +113,7 @@ impl IndexBuilder { .create(ram_directory) .expect("Creating a RAMDirectory should never fail")) } + /// Creates a new index in a given filepath. /// The index will use the `MMapDirectory`. /// @@ -120,6 +126,7 @@ impl IndexBuilder { } self.create(mmap_directory) } + /// Creates a new index in a temp directory. /// /// The index will use the `MMapDirectory` in a newly created directory. @@ -133,12 +140,14 @@ impl IndexBuilder { let mmap_directory: Box = Box::new(MmapDirectory::create_from_tempdir()?); self.create(mmap_directory) } + fn get_expect_schema(&self) -> crate::Result { self.schema .as_ref() .cloned() .ok_or(TantivyError::IndexBuilderMissingArgument("schema")) } + /// Opens or creates a new index in the provided directory pub fn open_or_create>>(self, dir: T) -> crate::Result { let dir = dir.into(); diff --git a/src/core/index_meta.rs b/src/core/index_meta.rs index 812222893..d62e9902e 100644 --- a/src/core/index_meta.rs +++ b/src/core/index_meta.rs @@ -192,6 +192,7 @@ impl SegmentMeta { } #[doc(hidden)] + #[must_use] pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta { assert!( num_deleted_docs <= self.max_doc(), diff --git a/src/core/segment.rs b/src/core/segment.rs index c82295d12..5f96279ff 100644 --- a/src/core/segment.rs +++ b/src/core/segment.rs @@ -54,6 +54,7 @@ impl Segment { } #[doc(hidden)] + #[must_use] pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment { Segment { index: self.index, diff --git a/src/directory/file_slice.rs b/src/directory/file_slice.rs index 9c18f1eb5..076caeeb1 100644 --- a/src/directory/file_slice.rs +++ b/src/directory/file_slice.rs @@ -77,6 +77,7 @@ impl FileSlice { /// # Panics /// /// Panics if `byte_range.end` exceeds the filesize. + #[must_use] pub fn slice(&self, byte_range: Range) -> FileSlice { assert!(byte_range.end <= self.len()); FileSlice { @@ -136,6 +137,7 @@ impl FileSlice { /// boundary. /// /// Equivalent to `.slice(from_offset, self.len())` + #[must_use] pub fn slice_from(&self, from_offset: usize) -> FileSlice { self.slice(from_offset..self.len()) } @@ -143,6 +145,7 @@ impl FileSlice { /// Returns a slice from the end. /// /// Equivalent to `.slice(self.len() - from_offset, self.len())` + #[must_use] pub fn slice_from_end(&self, from_offset: usize) -> FileSlice { self.slice(self.len() - from_offset..self.len()) } @@ -151,6 +154,7 @@ impl FileSlice { /// boundary. /// /// Equivalent to `.slice(0, to_offset)` + #[must_use] pub fn slice_to(&self, to_offset: usize) -> FileSlice { self.slice(0..to_offset) } diff --git a/src/directory/mmap_directory.rs b/src/directory/mmap_directory.rs index 381ea1333..e56036dcb 100644 --- a/src/directory/mmap_directory.rs +++ b/src/directory/mmap_directory.rs @@ -514,7 +514,7 @@ mod tests { { for path in &paths { let mut w = mmap_directory.open_write(path).unwrap(); - w.write(content).unwrap(); + w.write_all(content).unwrap(); w.flush().unwrap(); } } diff --git a/src/fieldnorm/mod.rs b/src/fieldnorm/mod.rs index 9eb64fb71..e270149fc 100644 --- a/src/fieldnorm/mod.rs +++ b/src/fieldnorm/mod.rs @@ -79,7 +79,7 @@ mod tests { fieldnorm_writers.record(3u32, *TXT_FIELD, 3); fieldnorm_writers.serialize(serializer, None)?; } - let file = directory.open_read(&path)?; + let file = directory.open_read(path)?; { let fields_composite = CompositeFile::open(&file)?; assert!(fields_composite.open_read(*FIELD).is_none()); diff --git a/src/indexer/demuxer.rs b/src/indexer/demuxer.rs index 54409eade..7934af9bd 100644 --- a/src/indexer/demuxer.rs +++ b/src/indexer/demuxer.rs @@ -272,7 +272,7 @@ mod tests { let text_field = index.schema().get_field("text").unwrap(); let do_search = |term: &str| { - let query = QueryParser::for_index(&index, vec![text_field]) + let query = QueryParser::for_index(index, vec![text_field]) .parse_query(term) .unwrap(); let top_docs: Vec<(f32, DocAddress)> = @@ -301,7 +301,7 @@ mod tests { let text_field = index.schema().get_field("text").unwrap(); let do_search = |term: &str| { - let query = QueryParser::for_index(&index, vec![text_field]) + let query = QueryParser::for_index(index, vec![text_field]) .parse_query(term) .unwrap(); let top_docs: Vec<(f32, DocAddress)> = diff --git a/src/indexer/segment_updater.rs b/src/indexer/segment_updater.rs index 5dad00b6d..c1255eec6 100644 --- a/src/indexer/segment_updater.rs +++ b/src/indexer/segment_updater.rs @@ -1081,7 +1081,7 @@ mod tests { let target_schema = segments[0].schema(); let merged_index = Index::create( RamDirectory::default(), - target_schema.clone(), + target_schema, target_settings.clone(), )?; let merger: IndexMerger = IndexMerger::open_with_custom_alive_set( @@ -1098,11 +1098,8 @@ mod tests { { let filter_segments = vec![None]; let target_schema = segments[0].schema(); - let merged_index = Index::create( - RamDirectory::default(), - target_schema.clone(), - target_settings.clone(), - )?; + let merged_index = + Index::create(RamDirectory::default(), target_schema, target_settings)?; let merger: IndexMerger = IndexMerger::open_with_custom_alive_set( merged_index.schema(), merged_index.settings().clone(), diff --git a/src/lib.rs b/src/lib.rs index 676149801..5cc600833 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,7 +11,6 @@ #![doc(test(attr(allow(unused_variables), deny(warnings))))] #![warn(missing_docs)] #![allow(clippy::len_without_is_empty)] -#![allow(clippy::return_self_not_must_use)] //! # `tantivy` //! diff --git a/src/query/boolean_query/block_wand.rs b/src/query/boolean_query/block_wand.rs index 0361751eb..ed97e8cfc 100644 --- a/src/query/boolean_query/block_wand.rs +++ b/src/query/boolean_query/block_wand.rs @@ -417,6 +417,7 @@ mod tests { .boxed() } + #[allow(clippy::type_complexity)] fn gen_term_scorers(num_scorers: usize) -> BoxedStrategy<(Vec>, Vec)> { (1u32..100u32) .prop_flat_map(move |max_doc: u32| { diff --git a/src/query/more_like_this/query.rs b/src/query/more_like_this/query.rs index d8114ff88..6d12c3272 100644 --- a/src/query/more_like_this/query.rs +++ b/src/query/more_like_this/query.rs @@ -67,6 +67,7 @@ impl MoreLikeThisQueryBuilder { /// /// The resulting query will ignore words which do not occur /// in at least this many docs. + #[must_use] pub fn with_min_doc_frequency(mut self, value: u64) -> Self { self.mlt.min_doc_frequency = Some(value); self @@ -76,6 +77,7 @@ impl MoreLikeThisQueryBuilder { /// /// The resulting query will ignore words which occur /// in more than this many docs. + #[must_use] pub fn with_max_doc_frequency(mut self, value: u64) -> Self { self.mlt.max_doc_frequency = Some(value); self @@ -85,6 +87,7 @@ impl MoreLikeThisQueryBuilder { /// /// The resulting query will ignore words less /// frequent that this number. + #[must_use] pub fn with_min_term_frequency(mut self, value: usize) -> Self { self.mlt.min_term_frequency = Some(value); self @@ -93,6 +96,7 @@ impl MoreLikeThisQueryBuilder { /// Sets the maximum query terms. /// /// The resulting query will not return a query with more clause than this. + #[must_use] pub fn with_max_query_terms(mut self, value: usize) -> Self { self.mlt.max_query_terms = Some(value); self @@ -101,6 +105,7 @@ impl MoreLikeThisQueryBuilder { /// Sets the minimum word length. /// /// The resulting query will ignore words shorter than this length. + #[must_use] pub fn with_min_word_length(mut self, value: usize) -> Self { self.mlt.min_word_length = Some(value); self @@ -109,6 +114,7 @@ impl MoreLikeThisQueryBuilder { /// Sets the maximum word length. /// /// The resulting query will ignore words longer than this length. + #[must_use] pub fn with_max_word_length(mut self, value: usize) -> Self { self.mlt.max_word_length = Some(value); self @@ -117,6 +123,7 @@ impl MoreLikeThisQueryBuilder { /// Sets the boost factor /// /// The boost factor used by the resulting query for boosting terms. + #[must_use] pub fn with_boost_factor(mut self, value: f32) -> Self { self.mlt.boost_factor = Some(value); self @@ -125,6 +132,7 @@ impl MoreLikeThisQueryBuilder { /// Sets the set of stop words /// /// The resulting query will ignore these set of words. + #[must_use] pub fn with_stop_words(mut self, value: Vec) -> Self { self.mlt.stop_words = value; self diff --git a/src/query/term_query/mod.rs b/src/query/term_query/mod.rs index d519f8b30..f997cdc51 100644 --- a/src/query/term_query/mod.rs +++ b/src/query/term_query/mod.rs @@ -195,7 +195,7 @@ mod tests { let searcher = index.reader()?.searcher(); { let explanation = term_query.explain(&searcher, DocAddress::new(0u32, 1u32))?; - assert_nearly_equals!(explanation.value(), 0.6931472); + assert_nearly_equals!(explanation.value(), std::f32::consts::LN_2); } { let explanation_err = term_query.explain(&searcher, DocAddress::new(0u32, 0u32)); diff --git a/src/reader/mod.rs b/src/reader/mod.rs index 53f82c88b..07713fac2 100644 --- a/src/reader/mod.rs +++ b/src/reader/mod.rs @@ -50,6 +50,7 @@ pub struct IndexReaderBuilder { } impl IndexReaderBuilder { + #[must_use] pub(crate) fn new(index: Index) -> IndexReaderBuilder { IndexReaderBuilder { num_searchers: num_cpus::get(), @@ -65,7 +66,6 @@ impl IndexReaderBuilder { /// Building the reader is a non-trivial operation that requires /// to open different segment readers. It may take hundreds of milliseconds /// of time and it may return an error. - #[allow(clippy::needless_late_init)] pub fn try_into(self) -> crate::Result { let searcher_generation_inventory = Inventory::default(); let warming_state = WarmingState::new( @@ -83,11 +83,10 @@ impl IndexReaderBuilder { }; inner_reader.reload()?; let inner_reader_arc = Arc::new(inner_reader); - let watch_handle_opt: Option; - match self.reload_policy { + let watch_handle_opt: Option = match self.reload_policy { ReloadPolicy::Manual => { // No need to set anything... - watch_handle_opt = None; + None } ReloadPolicy::OnCommit => { let inner_reader_arc_clone = inner_reader_arc.clone(); @@ -103,9 +102,9 @@ impl IndexReaderBuilder { .index .directory() .watch(WatchCallback::new(callback))?; - watch_handle_opt = Some(watch_handle); + Some(watch_handle) } - } + }; Ok(IndexReader { inner: inner_reader_arc, _watch_handle_opt: watch_handle_opt, @@ -115,6 +114,7 @@ impl IndexReaderBuilder { /// Sets the reload_policy. /// /// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details. + #[must_use] pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder { self.reload_policy = reload_policy; self @@ -123,12 +123,14 @@ impl IndexReaderBuilder { /// Sets the number of [Searcher] to pool. /// /// See [Self::searcher()]. + #[must_use] pub fn num_searchers(mut self, num_searchers: usize) -> IndexReaderBuilder { self.num_searchers = num_searchers; self } /// Set the [Warmer]s that are invoked when reloading searchable segments. + #[must_use] pub fn warmers(mut self, warmers: Vec>) -> IndexReaderBuilder { self.warmers = warmers; self @@ -138,6 +140,7 @@ impl IndexReaderBuilder { /// /// This allows parallelizing warming work when there are multiple [Warmer] registered with the /// [IndexReader]. + #[must_use] pub fn num_warming_threads(mut self, num_warming_threads: usize) -> IndexReaderBuilder { self.num_warming_threads = num_warming_threads; self diff --git a/src/reader/warming.rs b/src/reader/warming.rs index ad2681622..4469006d0 100644 --- a/src/reader/warming.rs +++ b/src/reader/warming.rs @@ -251,7 +251,7 @@ mod tests { let schema = schema_builder.build(); let directory = RamDirectory::create(); - let index = Index::create(directory.clone(), schema, IndexSettings::default())?; + let index = Index::create(directory, schema, IndexSettings::default())?; let num_writer_threads = 4; let mut writer = index diff --git a/src/schema/bytes_options.rs b/src/schema/bytes_options.rs index 2adbe659a..ce07c535e 100644 --- a/src/schema/bytes_options.rs +++ b/src/schema/bytes_options.rs @@ -63,6 +63,7 @@ impl BytesOptions { /// /// Setting an integer as indexed will generate /// a posting list for each value taken by the integer. + #[must_use] pub fn set_indexed(mut self) -> BytesOptions { self.indexed = true; self @@ -72,6 +73,7 @@ impl BytesOptions { /// /// Setting an integer as normed will generate /// the fieldnorm data for it. + #[must_use] pub fn set_fieldnorms(mut self) -> BytesOptions { self.fieldnorms = true; self @@ -83,6 +85,7 @@ impl BytesOptions { /// Access time are similar to a random lookup in an array. /// If more than one value is associated to a fast field, only the last one is /// kept. + #[must_use] pub fn set_fast(mut self) -> BytesOptions { self.fast = true; self @@ -92,6 +95,7 @@ impl BytesOptions { /// /// Only the fields that are set as *stored* are /// persisted into the Tantivy's store. + #[must_use] pub fn set_stored(mut self) -> BytesOptions { self.stored = true; self diff --git a/src/schema/facet_options.rs b/src/schema/facet_options.rs index 078153b2f..eace12f2f 100644 --- a/src/schema/facet_options.rs +++ b/src/schema/facet_options.rs @@ -22,6 +22,7 @@ impl FacetOptions { /// /// Only the fields that are set as *stored* are /// persisted into the Tantivy's store. + #[must_use] pub fn set_stored(mut self) -> FacetOptions { self.stored = true; self diff --git a/src/schema/field.rs b/src/schema/field.rs index 1b42a6b0e..65bc4ab71 100644 --- a/src/schema/field.rs +++ b/src/schema/field.rs @@ -17,8 +17,7 @@ impl Field { } /// Returns a u32 identifying uniquely a field within a schema. - #[allow(clippy::trivially_copy_pass_by_ref)] - pub const fn field_id(&self) -> u32 { + pub const fn field_id(self) -> u32 { self.0 } } diff --git a/src/schema/int_options.rs b/src/schema/int_options.rs index d13cb5d4d..70c3265fb 100644 --- a/src/schema/int_options.rs +++ b/src/schema/int_options.rs @@ -79,6 +79,7 @@ impl IntOptions { /// /// Only the fields that are set as *stored* are /// persisted into the Tantivy's store. + #[must_use] pub fn set_stored(mut self) -> IntOptions { self.stored = true; self @@ -90,6 +91,7 @@ impl IntOptions { /// a posting list for each value taken by the integer. /// /// This is required for the field to be searchable. + #[must_use] pub fn set_indexed(mut self) -> IntOptions { self.indexed = true; self @@ -99,6 +101,7 @@ impl IntOptions { /// /// Setting an integer as fieldnorm will generate /// the fieldnorm data for it. + #[must_use] pub fn set_fieldnorm(mut self) -> IntOptions { self.fieldnorms = true; self @@ -110,6 +113,7 @@ impl IntOptions { /// Access time are similar to a random lookup in an array. /// If more than one value is associated to a fast field, only the last one is /// kept. + #[must_use] pub fn set_fast(mut self, cardinality: Cardinality) -> IntOptions { self.fast = Some(cardinality); self diff --git a/src/schema/text_options.rs b/src/schema/text_options.rs index 4c1810870..83b2dd674 100644 --- a/src/schema/text_options.rs +++ b/src/schema/text_options.rs @@ -25,12 +25,14 @@ impl TextOptions { } /// Sets the field as stored + #[must_use] pub fn set_stored(mut self) -> TextOptions { self.stored = true; self } /// Sets the field as indexed, with the specific indexing options. + #[must_use] pub fn set_indexing_options(mut self, indexing: TextFieldIndexing) -> TextOptions { self.indexing = Some(indexing); self @@ -63,6 +65,7 @@ impl Default for TextFieldIndexing { impl TextFieldIndexing { /// Sets the tokenizer to be used for a given field. + #[must_use] pub fn set_tokenizer(mut self, tokenizer_name: &str) -> TextFieldIndexing { self.tokenizer = Cow::Owned(tokenizer_name.to_string()); self @@ -74,6 +77,7 @@ impl TextFieldIndexing { } /// Sets fieldnorms + #[must_use] pub fn set_fieldnorms(mut self, fieldnorms: bool) -> TextFieldIndexing { self.fieldnorms = fieldnorms; self @@ -87,6 +91,7 @@ impl TextFieldIndexing { /// Sets which information should be indexed with the tokens. /// /// See [IndexRecordOption](./enum.IndexRecordOption.html) for more detail. + #[must_use] pub fn set_index_option(mut self, index_option: IndexRecordOption) -> TextFieldIndexing { self.record = index_option; self diff --git a/src/tokenizer/tokenizer.rs b/src/tokenizer/tokenizer.rs index 82056a07c..2de1ce730 100644 --- a/src/tokenizer/tokenizer.rs +++ b/src/tokenizer/tokenizer.rs @@ -78,6 +78,7 @@ impl TextAnalyzer { /// .filter(LowerCaser) /// .filter(Stemmer::default()); /// ``` + #[must_use] pub fn filter>(mut self, token_filter: F) -> Self { self.token_filters.push(token_filter.into()); self