diff --git a/examples/basic_search.rs b/examples/basic_search.rs index 5195bd29c..3a0a71e2f 100644 --- a/examples/basic_search.rs +++ b/examples/basic_search.rs @@ -106,37 +106,37 @@ fn main() -> tantivy::Result<()> { // For convenience, tantivy also comes with a macro to // reduce the boilerplate above. index_writer.add_document(doc!( - title => "Of Mice and Men", - body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ - bank and runs deep and green. The water is warm too, for it has slipped twinkling \ - over the yellow sands in the sunlight before reaching the narrow pool. On one \ - side of the river the golden foothill slopes curve up to the strong and rocky \ - Gabilan Mountains, but on the valley side the water is lined with trees—willows \ - fresh and green with every spring, carrying in their lower leaf junctures the \ - debris of the winter’s flooding; and sycamores with mottled, white, recumbent \ - limbs and branches that arch over the pool" + title => "Of Mice and Men", + body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ + bank and runs deep and green. The water is warm too, for it has slipped twinkling \ + over the yellow sands in the sunlight before reaching the narrow pool. On one \ + side of the river the golden foothill slopes curve up to the strong and rocky \ + Gabilan Mountains, but on the valley side the water is lined with trees—willows \ + fresh and green with every spring, carrying in their lower leaf junctures the \ + debris of the winter’s flooding; and sycamores with mottled, white, recumbent \ + limbs and branches that arch over the pool" )); index_writer.add_document(doc!( - title => "Of Mice and Men", - body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ - bank and runs deep and green. The water is warm too, for it has slipped twinkling \ - over the yellow sands in the sunlight before reaching the narrow pool. On one \ - side of the river the golden foothill slopes curve up to the strong and rocky \ - Gabilan Mountains, but on the valley side the water is lined with trees—willows \ - fresh and green with every spring, carrying in their lower leaf junctures the \ - debris of the winter’s flooding; and sycamores with mottled, white, recumbent \ - limbs and branches that arch over the pool" + title => "Of Mice and Men", + body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ + bank and runs deep and green. The water is warm too, for it has slipped twinkling \ + over the yellow sands in the sunlight before reaching the narrow pool. On one \ + side of the river the golden foothill slopes curve up to the strong and rocky \ + Gabilan Mountains, but on the valley side the water is lined with trees—willows \ + fresh and green with every spring, carrying in their lower leaf junctures the \ + debris of the winter’s flooding; and sycamores with mottled, white, recumbent \ + limbs and branches that arch over the pool" )); // Multivalued field just need to be repeated. index_writer.add_document(doc!( - title => "Frankenstein", - title => "The Modern Prometheus", - body => "You will rejoice to hear that no disaster has accompanied the commencement of an \ - enterprise which you have regarded with such evil forebodings. I arrived here \ - yesterday, and my first task is to assure my dear sister of my welfare and \ - increasing confidence in the success of my undertaking." + title => "Frankenstein", + title => "The Modern Prometheus", + body => "You will rejoice to hear that no disaster has accompanied the commencement of an \ + enterprise which you have regarded with such evil forebodings. I arrived here \ + yesterday, and my first task is to assure my dear sister of my welfare and \ + increasing confidence in the success of my undertaking." )); // This is an example, so we will only index 3 documents diff --git a/examples/custom_tokenizer.rs b/examples/custom_tokenizer.rs index 8c4ce8e06..72b69184d 100644 --- a/examples/custom_tokenizer.rs +++ b/examples/custom_tokenizer.rs @@ -68,12 +68,12 @@ fn main() -> tantivy::Result<()> { // heap for the indexer can increase its throughput. let mut index_writer = index.writer(50_000_000)?; index_writer.add_document(doc!( - title => "The Old Man and the Sea", - body => "He was an old man who fished alone in a skiff in the Gulf Stream and \ - he had gone eighty-four days now without taking a fish." + title => "The Old Man and the Sea", + body => "He was an old man who fished alone in a skiff in the Gulf Stream and \ + he had gone eighty-four days now without taking a fish." )); index_writer.add_document(doc!( - title => "Of Mice and Men", + title => "Of Mice and Men", body => r#"A few miles south of Soledad, the Salinas River drops in close to the hillside bank and runs deep and green. The water is warm too, for it has slipped twinkling over the yellow sands in the sunlight before reaching the narrow pool. On one @@ -84,7 +84,7 @@ fn main() -> tantivy::Result<()> { limbs and branches that arch over the pool"# )); index_writer.add_document(doc!( - title => "Frankenstein", + title => "Frankenstein", body => r#"You will rejoice to hear that no disaster has accompanied the commencement of an enterprise which you have regarded with such evil forebodings. I arrived here yesterday, and my first task is to assure my dear sister of my welfare and diff --git a/examples/snippet.rs b/examples/snippet.rs index 559176ce1..35ba07557 100644 --- a/examples/snippet.rs +++ b/examples/snippet.rs @@ -35,15 +35,15 @@ fn main() -> tantivy::Result<()> { // we'll only need one doc for this example. index_writer.add_document(doc!( - title => "Of Mice and Men", - body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ - bank and runs deep and green. The water is warm too, for it has slipped twinkling \ - over the yellow sands in the sunlight before reaching the narrow pool. On one \ - side of the river the golden foothill slopes curve up to the strong and rocky \ - Gabilan Mountains, but on the valley side the water is lined with trees—willows \ - fresh and green with every spring, carrying in their lower leaf junctures the \ - debris of the winter’s flooding; and sycamores with mottled, white, recumbent \ - limbs and branches that arch over the pool" + title => "Of Mice and Men", + body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ + bank and runs deep and green. The water is warm too, for it has slipped twinkling \ + over the yellow sands in the sunlight before reaching the narrow pool. On one \ + side of the river the golden foothill slopes curve up to the strong and rocky \ + Gabilan Mountains, but on the valley side the water is lined with trees—willows \ + fresh and green with every spring, carrying in their lower leaf junctures the \ + debris of the winter’s flooding; and sycamores with mottled, white, recumbent \ + limbs and branches that arch over the pool" )); // ... index_writer.commit()?; @@ -56,7 +56,7 @@ fn main() -> tantivy::Result<()> { let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?; - let snippet_generator = SnippetGenerator::new(&searcher, &*query, body)?; + let snippet_generator = SnippetGenerator::create(&searcher, &*query, body)?; for (score, doc_address) in top_docs { let doc = searcher.doc(doc_address)?; diff --git a/examples/stop_words.rs b/examples/stop_words.rs index 7eba72bdd..cdfe054e8 100644 --- a/examples/stop_words.rs +++ b/examples/stop_words.rs @@ -72,26 +72,26 @@ fn main() -> tantivy::Result<()> { title => "The Old Man and the Sea", body => "He was an old man who fished alone in a skiff in the Gulf Stream and \ he had gone eighty-four days now without taking a fish." - )); + )); index_writer.add_document(doc!( - title => "Of Mice and Men", - body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ - bank and runs deep and green. The water is warm too, for it has slipped twinkling \ - over the yellow sands in the sunlight before reaching the narrow pool. On one \ - side of the river the golden foothill slopes curve up to the strong and rocky \ - Gabilan Mountains, but on the valley side the water is lined with trees—willows \ - fresh and green with every spring, carrying in their lower leaf junctures the \ - debris of the winter’s flooding; and sycamores with mottled, white, recumbent \ - limbs and branches that arch over the pool" - )); + title => "Of Mice and Men", + body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ + bank and runs deep and green. The water is warm too, for it has slipped twinkling \ + over the yellow sands in the sunlight before reaching the narrow pool. On one \ + side of the river the golden foothill slopes curve up to the strong and rocky \ + Gabilan Mountains, but on the valley side the water is lined with trees—willows \ + fresh and green with every spring, carrying in their lower leaf junctures the \ + debris of the winter’s flooding; and sycamores with mottled, white, recumbent \ + limbs and branches that arch over the pool" + )); index_writer.add_document(doc!( - title => "Frankenstein", - body => "You will rejoice to hear that no disaster has accompanied the commencement of an \ - enterprise which you have regarded with such evil forebodings. I arrived here \ - yesterday, and my first task is to assure my dear sister of my welfare and \ - increasing confidence in the success of my undertaking." + title => "Frankenstein", + body => "You will rejoice to hear that no disaster has accompanied the commencement of an \ + enterprise which you have regarded with such evil forebodings. I arrived here \ + yesterday, and my first task is to assure my dear sister of my welfare and \ + increasing confidence in the success of my undertaking." )); index_writer.commit()?; diff --git a/src/collector/facet_collector.rs b/src/collector/facet_collector.rs index 2937a7cfe..df2b7684c 100644 --- a/src/collector/facet_collector.rs +++ b/src/collector/facet_collector.rs @@ -474,7 +474,8 @@ mod tests { n /= 4; let leaf = n % 5; Facet::from(&format!("/top{}/mid{}/leaf{}", top, mid, leaf)) - }).collect(); + }) + .collect(); for i in 0..num_facets * 10 { let mut doc = Document::new(); doc.add_facet(facet_field, facets[i % num_facets].clone()); @@ -500,18 +501,16 @@ mod tests { ("/top1/mid2", 50), ("/top1/mid3", 50), ] - .iter() - .map(|&(facet_str, count)| (String::from(facet_str), count)) - .collect::>() + .iter() + .map(|&(facet_str, count)| (String::from(facet_str), count)) + .collect::>() ); } } #[test] - #[should_panic( - expected = "Tried to add a facet which is a descendant of \ - an already added facet." - )] + #[should_panic(expected = "Tried to add a facet which is a descendant of \ + an already added facet.")] fn test_misused_facet_collector() { let mut facet_collector = FacetCollector::for_field(Field(0)); facet_collector.add_facet(Facet::from("/country")); @@ -563,13 +562,15 @@ mod tests { let facet = Facet::from(&format!("/facet/{}", c)); let doc = doc!(facet_field => facet); iter::repeat(doc).take(count) - }).map(|mut doc| { + }) + .map(|mut doc| { doc.add_facet( facet_field, &format!("/facet/{}", thread_rng().sample(&uniform)), ); doc - }).collect(); + }) + .collect(); docs[..].shuffle(&mut thread_rng()); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); diff --git a/src/collector/multi_collector.rs b/src/collector/multi_collector.rs index 1fb119f1e..2595e7f24 100644 --- a/src/collector/multi_collector.rs +++ b/src/collector/multi_collector.rs @@ -43,7 +43,8 @@ impl Collector for CollectorWrapper { let err_msg = format!("Failed to cast child collector fruit. {:?}", e); TantivyError::InvalidArgument(err_msg) }) - }).collect::>()?; + }) + .collect::>()?; let merged_fruit = self.0.merge_fruits(typed_fruit)?; Ok(Box::new(merged_fruit)) } @@ -147,6 +148,8 @@ impl FruitHandle { /// Ok(()) /// } /// ``` +#[allow(clippy::type_complexity)] +#[derive(Default)] pub struct MultiCollector<'a> { collector_wrappers: Vec, Fruit = Box> + 'a>>, @@ -154,10 +157,8 @@ pub struct MultiCollector<'a> { impl<'a> MultiCollector<'a> { /// Create a new `MultiCollector` - pub fn new() -> MultiCollector<'a> { - MultiCollector { - collector_wrappers: Vec::new(), - } + pub fn new() -> Self { + Default::default() } /// Add a new collector to our `MultiCollector`. @@ -213,7 +214,8 @@ impl<'a> Collector for MultiCollector<'a> { .zip(segment_fruits_list) .map(|(child_collector, segment_fruits)| { Ok(Some(child_collector.merge_fruits(segment_fruits)?)) - }).collect::>()?; + }) + .collect::>()?; Ok(MultiFruit { sub_fruits }) } } diff --git a/src/collector/top_collector.rs b/src/collector/top_collector.rs index d4bb7239d..b17a7d6c0 100644 --- a/src/collector/top_collector.rs +++ b/src/collector/top_collector.rs @@ -84,11 +84,9 @@ where for (feature, doc) in child_fruit { if top_collector.len() < self.limit { top_collector.push(ComparableDoc { feature, doc }); - } else { - if let Some(mut head) = top_collector.peek_mut() { - if head.feature < feature { - *head = ComparableDoc { feature, doc }; - } + } else if let Some(mut head) = top_collector.peek_mut() { + if head.feature < feature { + *head = ComparableDoc { feature, doc }; } } } @@ -142,7 +140,8 @@ impl TopSegmentCollector { comparable_doc.feature, DocAddress(segment_id, comparable_doc.doc), ) - }).collect() + }) + .collect() } /// Return true iff at least K documents have gone through diff --git a/src/core/executor.rs b/src/core/executor.rs index 9663bba3a..ef346c56b 100644 --- a/src/core/executor.rs +++ b/src/core/executor.rs @@ -94,7 +94,8 @@ mod tests { panic!("panic should propagate"); }, vec![0].into_iter(), - ).unwrap(); + ) + .unwrap(); } #[test] @@ -106,7 +107,8 @@ mod tests { panic!("panic should propagate"); }, vec![0].into_iter(), - ).unwrap(); + ) + .unwrap(); } #[test] diff --git a/src/core/index.rs b/src/core/index.rs index 2618b70e5..d634ea6bf 100644 --- a/src/core/index.rs +++ b/src/core/index.rs @@ -135,7 +135,7 @@ impl Index { /// Creates a new index given an implementation of the trait `Directory` pub fn create(dir: Dir, schema: Schema) -> Result { - let directory = ManagedDirectory::new(dir)?; + let directory = ManagedDirectory::wrap(dir)?; Index::from_directory(directory, schema) } @@ -199,7 +199,7 @@ impl Index { /// Open the index using the provided directory pub fn open(directory: D) -> Result { - let directory = ManagedDirectory::new(directory)?; + let directory = ManagedDirectory::wrap(directory)?; let metas = load_metas(&directory)?; Index::create_from_metas(directory, &metas) } diff --git a/src/core/inverted_index_reader.rs b/src/core/inverted_index_reader.rs index ba9d77c70..51c5b02ae 100644 --- a/src/core/inverted_index_reader.rs +++ b/src/core/inverted_index_reader.rs @@ -32,10 +32,7 @@ pub struct InvertedIndexReader { } impl InvertedIndexReader { - #[cfg_attr( - feature = "cargo-clippy", - allow(clippy::needless_pass_by_value) - )] // for symetry + #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symetry pub(crate) fn new( termdict: TermDictionary, postings_source: ReadOnlySource, diff --git a/src/core/searcher.rs b/src/core/searcher.rs index 025f26072..9e74fddd0 100644 --- a/src/core/searcher.rs +++ b/src/core/searcher.rs @@ -104,7 +104,8 @@ impl Searcher { .iter() .map(|segment_reader| { u64::from(segment_reader.inverted_index(term.field()).doc_freq(term)) - }).sum::() + }) + .sum::() } /// Return the list of segment readers diff --git a/src/directory/managed_directory.rs b/src/directory/managed_directory.rs index 34259c184..4364069fd 100644 --- a/src/directory/managed_directory.rs +++ b/src/directory/managed_directory.rs @@ -59,7 +59,7 @@ fn save_managed_paths( impl ManagedDirectory { /// Wraps a directory as managed directory. - pub fn new(directory: Dir) -> Result { + pub fn wrap(directory: Dir) -> Result { match directory.atomic_read(&MANAGED_FILEPATH) { Ok(data) => { let managed_files_json = String::from_utf8_lossy(&data); @@ -260,7 +260,7 @@ mod tests { let tempdir_path = PathBuf::from(tempdir.path()); { let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); - let mut managed_directory = ManagedDirectory::new(mmap_directory).unwrap(); + let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap(); { let mut write_file = managed_directory.open_write(*TEST_PATH1).unwrap(); write_file.flush().unwrap(); @@ -286,7 +286,7 @@ mod tests { } { let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); - let mut managed_directory = ManagedDirectory::new(mmap_directory).unwrap(); + let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap(); { assert!(managed_directory.exists(*TEST_PATH1)); assert!(!managed_directory.exists(*TEST_PATH2)); @@ -310,7 +310,7 @@ mod tests { let living_files = HashSet::new(); let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); - let mut managed_directory = ManagedDirectory::new(mmap_directory).unwrap(); + let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap(); managed_directory .atomic_write(*TEST_PATH1, &vec![0u8, 1u8]) .unwrap(); diff --git a/src/directory/ram_directory.rs b/src/directory/ram_directory.rs index ad79319e7..2f1733e0f 100644 --- a/src/directory/ram_directory.rs +++ b/src/directory/ram_directory.rs @@ -100,7 +100,8 @@ impl InnerDirectory { ); let io_err = make_io_err(msg); OpenReadError::IOError(IOError::with_path(path.to_owned(), io_err)) - }).and_then(|readable_map| { + }) + .and_then(|readable_map| { readable_map .get(path) .ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path))) @@ -120,7 +121,8 @@ impl InnerDirectory { ); let io_err = make_io_err(msg); DeleteError::IOError(IOError::with_path(path.to_owned(), io_err)) - }).and_then(|mut writable_map| match writable_map.remove(path) { + }) + .and_then(|mut writable_map| match writable_map.remove(path) { Some(_) => Ok(()), None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))), }) diff --git a/src/indexer/delete_queue.rs b/src/indexer/delete_queue.rs index 842b7a2f3..be36bef7a 100644 --- a/src/indexer/delete_queue.rs +++ b/src/indexer/delete_queue.rs @@ -191,10 +191,7 @@ impl DeleteCursor { } } - #[cfg_attr( - feature = "cargo-clippy", - allow(clippy::wrong_self_convention) - )] + #[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))] fn is_behind_opstamp(&mut self, target_opstamp: u64) -> bool { self.get() .map(|operation| operation.opstamp < target_opstamp) diff --git a/src/indexer/index_writer.rs b/src/indexer/index_writer.rs index 663507470..e453dc1d5 100644 --- a/src/indexer/index_writer.rs +++ b/src/indexer/index_writer.rs @@ -61,7 +61,8 @@ fn initial_table_size(per_thread_memory_budget: usize) -> usize { "Per thread memory is too small: {}", per_thread_memory_budget ) - }).min(19) // we cap it at 512K + }) + .min(19) // we cap it at 512K } /// `IndexWriter` is the user entry-point to add document to an index. @@ -139,7 +140,7 @@ pub fn open_index_writer( let stamper = Stamper::new(current_opstamp); let segment_updater = - SegmentUpdater::new(index.clone(), stamper.clone(), &delete_queue.cursor())?; + SegmentUpdater::create(index.clone(), stamper.clone(), &delete_queue.cursor())?; let mut index_writer = IndexWriter { _directory_lock: Some(directory_lock), @@ -390,7 +391,8 @@ impl IndexWriter { .name(format!( "thrd-tantivy-index{}-gen{}", self.worker_id, generation - )).spawn(move || { + )) + .spawn(move || { loop { let mut document_iterator = document_receiver_clone.clone().into_iter().peekable(); @@ -465,10 +467,8 @@ impl IndexWriter { /// /// Returns the former segment_ready channel. fn recreate_document_channel(&mut self) -> DocumentReceiver { - let (mut document_sender, mut document_receiver): ( - DocumentSender, - DocumentReceiver, - ) = channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS); + let (mut document_sender, mut document_receiver): (DocumentSender, DocumentReceiver) = + channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS); swap(&mut self.document_sender, &mut document_sender); swap(&mut self.document_receiver, &mut document_receiver); document_receiver diff --git a/src/indexer/merger.rs b/src/indexer/merger.rs index 2b8cfe1b2..5c7b18d10 100644 --- a/src/indexer/merger.rs +++ b/src/indexer/merger.rs @@ -40,13 +40,15 @@ fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 { total_tokens += reader.inverted_index(field).total_num_tokens(); } } - total_tokens + count - .iter() - .cloned() - .enumerate() - .map(|(fieldnorm_ord, count)| { - count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8)) - }).sum::() + total_tokens + + count + .iter() + .cloned() + .enumerate() + .map(|(fieldnorm_ord, count)| { + count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8)) + }) + .sum::() } pub struct IndexMerger { @@ -523,7 +525,8 @@ impl IndexMerger { } } None - }).collect(); + }) + .collect(); // At this point, `segment_postings` contains the posting list // of all of the segments containing the given term. @@ -664,7 +667,8 @@ mod tests { TextFieldIndexing::default() .set_tokenizer("default") .set_index_option(IndexRecordOption::WithFreqs), - ).set_stored(); + ) + .set_stored(); let text_field = schema_builder.add_text_field("text", text_fieldtype); let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue); let score_field = schema_builder.add_u64_field("score", score_fieldtype); @@ -803,7 +807,8 @@ mod tests { .search( &query, &BytesFastFieldTestCollector::for_field(bytes_score_field), - ).expect("failed to search") + ) + .expect("failed to search") }; assert_eq!( get_fast_vals(vec![Term::from_field_text(text_field, "a")]), @@ -823,7 +828,8 @@ mod tests { let text_fieldtype = schema::TextOptions::default() .set_indexing_options( TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs), - ).set_stored(); + ) + .set_stored(); let text_field = schema_builder.add_text_field("text", text_fieldtype); let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue); let score_field = schema_builder.add_u64_field("score", score_fieldtype); @@ -851,21 +857,21 @@ mod tests { { // a first commit index_writer.add_document(doc!( - text_field => "a b d", - score_field => 1u64, - bytes_score_field => vec![0u8, 0, 0, 1], - )); + text_field => "a b d", + score_field => 1u64, + bytes_score_field => vec![0u8, 0, 0, 1], + )); index_writer.add_document(doc!( - text_field => "b c", - score_field => 2u64, - bytes_score_field => vec![0u8, 0, 0, 2], - )); + text_field => "b c", + score_field => 2u64, + bytes_score_field => vec![0u8, 0, 0, 2], + )); index_writer.delete_term(Term::from_field_text(text_field, "c")); index_writer.add_document(doc!( - text_field => "c d", - score_field => 3u64, - bytes_score_field => vec![0u8, 0, 0, 3], - )); + text_field => "c d", + score_field => 3u64, + bytes_score_field => vec![0u8, 0, 0, 3], + )); index_writer.commit().expect("committed"); index.load_searchers().unwrap(); let ref searcher = *index.searcher(); @@ -892,27 +898,27 @@ mod tests { { // a second commit index_writer.add_document(doc!( - text_field => "a d e", - score_field => 4_000u64, - bytes_score_field => vec![0u8, 0, 0, 4], - )); + text_field => "a d e", + score_field => 4_000u64, + bytes_score_field => vec![0u8, 0, 0, 4], + )); index_writer.add_document(doc!( - text_field => "e f", - score_field => 5_000u64, - bytes_score_field => vec![0u8, 0, 0, 5], - )); + text_field => "e f", + score_field => 5_000u64, + bytes_score_field => vec![0u8, 0, 0, 5], + )); index_writer.delete_term(Term::from_field_text(text_field, "a")); index_writer.delete_term(Term::from_field_text(text_field, "f")); index_writer.add_document(doc!( - text_field => "f g", - score_field => 6_000u64, - bytes_score_field => vec![0u8, 0, 23, 112], - )); + text_field => "f g", + score_field => 6_000u64, + bytes_score_field => vec![0u8, 0, 23, 112], + )); index_writer.add_document(doc!( - text_field => "g h", - score_field => 7_000u64, - bytes_score_field => vec![0u8, 0, 27, 88], - )); + text_field => "g h", + score_field => 7_000u64, + bytes_score_field => vec![0u8, 0, 27, 88], + )); index_writer.commit().expect("committed"); index.load_searchers().unwrap(); let searcher = index.searcher(); diff --git a/src/indexer/segment_updater.rs b/src/indexer/segment_updater.rs index 80651a585..30f292ebf 100644 --- a/src/indexer/segment_updater.rs +++ b/src/indexer/segment_updater.rs @@ -138,7 +138,7 @@ struct InnerSegmentUpdater { } impl SegmentUpdater { - pub fn new( + pub fn create( index: Index, stamper: Stamper, delete_cursor: &DeleteCursor, @@ -195,7 +195,8 @@ impl SegmentUpdater { segment_updater.0.segment_manager.add_segment(segment_entry); segment_updater.consider_merge_options(); true - }).forget(); + }) + .forget(); true } else { false @@ -249,14 +250,16 @@ impl SegmentUpdater { opstamp, commit_message, directory.box_clone().borrow_mut(), - ).expect("Could not save metas."); + ) + .expect("Could not save metas."); } } pub fn garbage_collect_files(&self) -> Result<()> { self.run_async(move |segment_updater| { segment_updater.garbage_collect_files_exec(); - }).wait() + }) + .wait() } fn garbage_collect_files_exec(&self) { @@ -278,7 +281,8 @@ impl SegmentUpdater { segment_updater.garbage_collect_files_exec(); segment_updater.consider_merge_options(); } - }).wait() + }) + .wait() } pub fn start_merge(&self, segment_ids: &[SegmentId]) -> Result> { @@ -286,7 +290,8 @@ impl SegmentUpdater { let segment_ids_vec = segment_ids.to_vec(); self.run_async(move |segment_updater| { segment_updater.start_merge_impl(&segment_ids_vec[..]) - }).wait()? + }) + .wait()? } // `segment_ids` is required to be non-empty. @@ -352,7 +357,8 @@ impl SegmentUpdater { .unwrap() .remove(&merging_thread_id); Ok(()) - }).expect("Failed to spawn a thread."); + }) + .expect("Failed to spawn a thread."); self.0 .merging_threads .write() @@ -443,7 +449,8 @@ impl SegmentUpdater { let previous_metas = segment_updater.0.index.load_metas().unwrap(); segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload); segment_updater.garbage_collect_files_exec(); - }).wait() + }) + .wait() } /// Wait for current merging threads. diff --git a/src/indexer/segment_writer.rs b/src/indexer/segment_writer.rs index ce4b1eb68..e4f478450 100644 --- a/src/indexer/segment_writer.rs +++ b/src/indexer/segment_writer.rs @@ -62,7 +62,8 @@ impl SegmentWriter { segment.index().tokenizers().get(tokenizer_name) }), _ => None, - }).collect(); + }) + .collect(); Ok(SegmentWriter { max_doc: 0, multifield_postings, @@ -117,7 +118,8 @@ impl SegmentWriter { _ => { panic!("Expected hierarchical facet"); } - }).collect(); + }) + .collect(); let mut term = Term::for_field(field); // we set the Term for facet_bytes in facets { let mut unordered_term_id_opt = None; @@ -145,7 +147,8 @@ impl SegmentWriter { .flat_map(|field_value| match *field_value.value() { Value::Str(ref text) => Some(text.as_str()), _ => None, - }).collect(); + }) + .collect(); if texts.is_empty() { 0 } else { diff --git a/src/lib.rs b/src/lib.rs index e3b769fad..49fb977e4 100755 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,5 @@ #![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")] #![cfg_attr(all(feature = "unstable", test), feature(test))] -#![cfg_attr(feature = "cargo-clippy", feature(tool_lints))] #![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))] #![doc(test(attr(allow(unused_variables), deny(warnings))))] #![warn(missing_docs)] @@ -184,10 +183,7 @@ mod macros; pub use error::TantivyError; -#[deprecated( - since = "0.7.0", - note = "please use `tantivy::TantivyError` instead" -)] +#[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")] pub use error::TantivyError as Error; extern crate census; @@ -217,7 +213,7 @@ pub mod store; pub mod termdict; mod snippet; -pub use self::snippet::{SnippetGenerator, Snippet}; +pub use self::snippet::{Snippet, SnippetGenerator}; mod docset; pub use self::docset::{DocSet, SkipResult}; @@ -514,11 +510,9 @@ mod tests { let searcher = index.searcher(); let reader = searcher.segment_reader(0); let inverted_index = reader.inverted_index(text_field); - assert!( - inverted_index - .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) - .is_none() - ); + assert!(inverted_index + .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) + .is_none()); { let mut postings = inverted_index .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) @@ -553,11 +547,9 @@ mod tests { let reader = searcher.segment_reader(0); let inverted_index = reader.inverted_index(term_abcd.field()); - assert!( - inverted_index - .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) - .is_none() - ); + assert!(inverted_index + .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) + .is_none()); { let mut postings = inverted_index .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) @@ -591,11 +583,9 @@ mod tests { let searcher = index.searcher(); let reader = searcher.segment_reader(0); let inverted_index = reader.inverted_index(term_abcd.field()); - assert!( - inverted_index - .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) - .is_none() - ); + assert!(inverted_index + .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) + .is_none()); { let mut postings = inverted_index .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) @@ -743,11 +733,9 @@ mod tests { let reader = searcher.segment_reader(0); let inverted_index = reader.inverted_index(text_field); let term_abcd = Term::from_field_text(text_field, "abcd"); - assert!( - inverted_index - .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) - .is_none() - ); + assert!(inverted_index + .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) + .is_none()); let term_af = Term::from_field_text(text_field, "af"); let mut postings = inverted_index .read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions) diff --git a/src/macros.rs b/src/macros.rs index baa9dff4a..c86d293a8 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -77,10 +77,10 @@ mod test { let likes = schema_builder.add_u64_field("num_u64", FAST); let _schema = schema_builder.build(); let _doc = doc!( - title => "Life Aquatic", - author => "Wes Anderson", - likes => 4u64 - ); + title => "Life Aquatic", + author => "Wes Anderson", + likes => 4u64 + ); } #[test] @@ -91,9 +91,9 @@ mod test { let likes = schema_builder.add_u64_field("num_u64", FAST); let _schema = schema_builder.build(); let _doc = doc!( - title => "Life Aquatic", - author => "Wes Anderson", - likes => 4u64, - ); + title => "Life Aquatic", + author => "Wes Anderson", + likes => 4u64, + ); } } diff --git a/src/postings/mod.rs b/src/postings/mod.rs index d84870517..199ce6efd 100644 --- a/src/postings/mod.rs +++ b/src/postings/mod.rs @@ -221,12 +221,10 @@ pub mod tests { } { let term_a = Term::from_field_text(text_field, "abcdef"); - assert!( - segment_reader - .inverted_index(term_a.field()) - .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) - .is_none() - ); + assert!(segment_reader + .inverted_index(term_a.field()) + .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) + .is_none()); } { let term_a = Term::from_field_text(text_field, "a"); diff --git a/src/postings/postings_writer.rs b/src/postings/postings_writer.rs index dd0f691ae..ad089be52 100644 --- a/src/postings/postings_writer.rs +++ b/src/postings/postings_writer.rs @@ -29,7 +29,8 @@ fn posting_from_field_entry(field_entry: &FieldEntry) -> Box { IndexRecordOption::WithFreqsAndPositions => { SpecializedPostingsWriter::::new_boxed() } - }).unwrap_or_else(|| SpecializedPostingsWriter::::new_boxed()), + }) + .unwrap_or_else(|| SpecializedPostingsWriter::::new_boxed()), FieldType::U64(_) | FieldType::I64(_) | FieldType::HierarchicalFacet => { SpecializedPostingsWriter::::new_boxed() } @@ -107,10 +108,8 @@ impl MultiFieldPostingsWriter { .map(|(key, _, _)| Term::wrap(key).field()) .enumerate(); - let mut unordered_term_mappings: HashMap< - Field, - HashMap, - > = HashMap::new(); + let mut unordered_term_mappings: HashMap> = + HashMap::new(); let mut prev_field = Field(u32::max_value()); for (offset, field) in term_offsets_it { @@ -138,7 +137,8 @@ impl MultiFieldPostingsWriter { .enumerate() .map(|(term_ord, unord_term_id)| { (unord_term_id as UnorderedTermId, term_ord as TermOrdinal) - }).collect(); + }) + .collect(); unordered_term_mappings.insert(field, mapping); } FieldType::U64(_) | FieldType::I64(_) => {} diff --git a/src/postings/segment_postings.rs b/src/postings/segment_postings.rs index cd7b96e02..9373799ef 100644 --- a/src/postings/segment_postings.rs +++ b/src/postings/segment_postings.rs @@ -533,7 +533,8 @@ impl BlockSegmentPostings { } else { BlockSegmentPostingsSkipResult::Terminated } - }).unwrap_or(BlockSegmentPostingsSkipResult::Terminated); + }) + .unwrap_or(BlockSegmentPostingsSkipResult::Terminated); } BlockSegmentPostingsSkipResult::Terminated } diff --git a/src/postings/serializer.rs b/src/postings/serializer.rs index f578f2caf..7c61a18e9 100644 --- a/src/postings/serializer.rs +++ b/src/postings/serializer.rs @@ -55,7 +55,7 @@ pub struct InvertedIndexSerializer { impl InvertedIndexSerializer { /// Open a new `PostingsSerializer` for the given segment - fn new( + fn create( terms_write: CompositeWrite, postings_write: CompositeWrite, positions_write: CompositeWrite, @@ -74,7 +74,7 @@ impl InvertedIndexSerializer { /// Open a new `PostingsSerializer` for the given segment pub fn open(segment: &mut Segment) -> Result { use SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS}; - InvertedIndexSerializer::new( + InvertedIndexSerializer::create( CompositeWrite::wrap(segment.open_write(TERMS)?), CompositeWrite::wrap(segment.open_write(POSTINGS)?), CompositeWrite::wrap(segment.open_write(POSITIONS)?), @@ -99,7 +99,7 @@ impl InvertedIndexSerializer { let positions_write = self.positions_write.for_field(field); let positionsidx_write = self.positionsidx_write.for_field(field); let field_type: FieldType = (*field_entry.field_type()).clone(); - FieldSerializer::new( + FieldSerializer::create( &field_type, term_dictionary_write, postings_write, @@ -130,7 +130,7 @@ pub struct FieldSerializer<'a> { } impl<'a> FieldSerializer<'a> { - fn new( + fn create( field_type: &FieldType, term_dictionary_write: &'a mut CountingWriter, postings_write: &'a mut CountingWriter, @@ -152,7 +152,7 @@ impl<'a> FieldSerializer<'a> { _ => (false, false), }; let term_dictionary_builder = - TermDictionaryBuilder::new(term_dictionary_write, &field_type)?; + TermDictionaryBuilder::create(term_dictionary_write, &field_type)?; let postings_serializer = PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled); let positions_serializer_opt = if position_enabled { diff --git a/src/query/bm25.rs b/src/query/bm25.rs index eb2546725..4a3a25590 100644 --- a/src/query/bm25.rs +++ b/src/query/bm25.rs @@ -63,7 +63,8 @@ impl BM25Weight { .map(|term| { let term_doc_freq = searcher.doc_freq(term); idf(term_doc_freq, total_num_docs) - }).sum::(); + }) + .sum::(); BM25Weight::new(idf, average_fieldnorm) } diff --git a/src/query/boolean_query/boolean_query.rs b/src/query/boolean_query/boolean_query.rs index b530c6b0a..353c89806 100644 --- a/src/query/boolean_query/boolean_query.rs +++ b/src/query/boolean_query/boolean_query.rs @@ -47,7 +47,8 @@ impl Query for BooleanQuery { .iter() .map(|&(ref occur, ref subquery)| { Ok((*occur, subquery.weight(searcher, scoring_enabled)?)) - }).collect::>()?; + }) + .collect::>()?; Ok(Box::new(BooleanWeight::new(sub_weights, scoring_enabled))) } @@ -68,7 +69,8 @@ impl BooleanQuery { let term_query: Box = Box::new(TermQuery::new(term, IndexRecordOption::WithFreqs)); (Occur::Should, term_query) - }).collect(); + }) + .collect(); BooleanQuery::from(occur_term_queries) } diff --git a/src/query/phrase_query/phrase_scorer.rs b/src/query/phrase_query/phrase_scorer.rs index 9b896a46a..85f075d3a 100644 --- a/src/query/phrase_query/phrase_scorer.rs +++ b/src/query/phrase_query/phrase_scorer.rs @@ -134,7 +134,8 @@ impl PhraseScorer { .into_iter() .map(|(offset, postings)| { PostingsWithOffset::new(postings, (max_offset - offset) as u32) - }).collect::>(); + }) + .collect::>(); PhraseScorer { intersection_docset: Intersection::new(postings_with_offsets), num_docsets, diff --git a/src/query/query_parser/query_parser.rs b/src/query/query_parser/query_parser.rs index 97bf3a2e8..2dcdacfda 100644 --- a/src/query/query_parser/query_parser.rs +++ b/src/query/query_parser/query_parser.rs @@ -68,7 +68,8 @@ fn trim_ast(logical_ast: LogicalAST) -> Option { .into_iter() .flat_map(|(occur, child)| { trim_ast(child).map(|trimmed_child| (occur, trimmed_child)) - }).collect::>(); + }) + .collect::>(); if trimmed_children.is_empty() { None } else { @@ -422,7 +423,8 @@ impl QueryParser { lower: self.resolve_bound(field, &lower)?, upper: self.resolve_bound(field, &upper)?, }))) - }).collect::, QueryParserError>>()?; + }) + .collect::, QueryParserError>>()?; let result_ast = if clauses.len() == 1 { clauses.pop().unwrap() } else { @@ -598,25 +600,19 @@ mod test { assert!(query_parser.parse_query("signed:2324").is_ok()); assert!(query_parser.parse_query("signed:\"22\"").is_ok()); assert!(query_parser.parse_query("signed:\"-2234\"").is_ok()); - assert!( - query_parser - .parse_query("signed:\"-9999999999999\"") - .is_ok() - ); + assert!(query_parser + .parse_query("signed:\"-9999999999999\"") + .is_ok()); assert!(query_parser.parse_query("signed:\"a\"").is_err()); assert!(query_parser.parse_query("signed:\"2a\"").is_err()); - assert!( - query_parser - .parse_query("signed:\"18446744073709551615\"") - .is_err() - ); + assert!(query_parser + .parse_query("signed:\"18446744073709551615\"") + .is_err()); assert!(query_parser.parse_query("unsigned:\"2\"").is_ok()); assert!(query_parser.parse_query("unsigned:\"-2\"").is_err()); - assert!( - query_parser - .parse_query("unsigned:\"18446744073709551615\"") - .is_ok() - ); + assert!(query_parser + .parse_query("unsigned:\"18446744073709551615\"") + .is_ok()); test_parse_query_to_logical_ast_helper( "unsigned:2324", "Term([0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 9, 20])", diff --git a/src/query/term_query/mod.rs b/src/query/term_query/mod.rs index 04bd97ecd..11f6ea934 100644 --- a/src/query/term_query/mod.rs +++ b/src/query/term_query/mod.rs @@ -1,4 +1,3 @@ - mod term_query; mod term_scorer; mod term_weight; diff --git a/src/query/union.rs b/src/query/union.rs index 5bbe902a0..b4a7441a3 100644 --- a/src/query/union.rs +++ b/src/query/union.rs @@ -55,7 +55,8 @@ where None } }, - ).collect(); + ) + .collect(); Union { docsets: non_empty_docsets, bitsets: Box::new([TinySet::empty(); HORIZON_NUM_TINYBITSETS]), @@ -214,10 +215,7 @@ where // The target is outside of the buffered horizon. // advance all docsets to a doc >= to the target. - #[cfg_attr( - feature = "cargo-clippy", - allow(clippy::clippy::collapsible_if) - )] + #[cfg_attr(feature = "cargo-clippy", allow(clippy::clippy::collapsible_if))] unordered_drain_filter(&mut self.docsets, |docset| { if docset.doc() < target { if docset.skip_next(target) == SkipResult::End { diff --git a/src/schema/schema.rs b/src/schema/schema.rs index 14fdb1763..3255c8839 100644 --- a/src/schema/schema.rs +++ b/src/schema/schema.rs @@ -232,12 +232,14 @@ impl Schema { let field_entry = self.get_field_entry(field); let field_type = field_entry.field_type(); match *json_value { - JsonValue::Array(ref json_items) => for json_item in json_items { - let value = field_type - .value_from_json(json_item) - .map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?; - doc.add(FieldValue::new(field, value)); - }, + JsonValue::Array(ref json_items) => { + for json_item in json_items { + let value = field_type.value_from_json(json_item).map_err(|e| { + DocParsingError::ValueError(field_name.clone(), e) + })?; + doc.add(FieldValue::new(field, value)); + } + } _ => { let value = field_type .value_from_json(json_value) @@ -446,7 +448,8 @@ mod tests { "count": 4, "popularity": 10 }"#, - ).unwrap(); + ) + .unwrap(); assert_eq!(doc.get_first(title_field).unwrap().text(), Some("my title")); assert_eq!( doc.get_first(author_field).unwrap().text(), diff --git a/src/snippet/mod.rs b/src/snippet/mod.rs index d42db4214..77a289baf 100644 --- a/src/snippet/mod.rs +++ b/src/snippet/mod.rs @@ -192,7 +192,8 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str) item.start - fragment.start_offset, item.stop - fragment.start_offset, ) - }).collect(); + }) + .collect(); Snippet { fragments: fragment_text.to_string(), highlighted, @@ -242,7 +243,7 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str) /// let query = query_parser.parse_query("haleurs flamands").unwrap(); /// # index.load_searchers()?; /// # let searcher = index.searcher(); -/// let mut snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field)?; +/// let mut snippet_generator = SnippetGenerator::create(&searcher, &*query, text_field)?; /// snippet_generator.set_max_num_chars(100); /// let snippet = snippet_generator.snippet_from_doc(&doc); /// let snippet_html: String = snippet.to_html(); @@ -259,7 +260,7 @@ pub struct SnippetGenerator { impl SnippetGenerator { /// Creates a new snippet generator - pub fn new(searcher: &Searcher, query: &Query, field: Field) -> Result { + pub fn create(searcher: &Searcher, query: &Query, field: Field) -> Result { let mut terms = BTreeSet::new(); query.query_terms(&mut terms); let terms_text: BTreeMap = terms @@ -273,7 +274,8 @@ impl SnippetGenerator { } else { None } - }).collect(); + }) + .collect(); let tokenizer = searcher.index().tokenizer_for_field(field)?; Ok(SnippetGenerator { terms_text, @@ -532,12 +534,14 @@ Survey in 2016, 2017, and 2018."#; let query_parser = QueryParser::for_index(&index, vec![text_field]); { let query = query_parser.parse_query("e").unwrap(); - let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap(); + let snippet_generator = + SnippetGenerator::create(&searcher, &*query, text_field).unwrap(); assert!(snippet_generator.terms_text().is_empty()); } { let query = query_parser.parse_query("a").unwrap(); - let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap(); + let snippet_generator = + SnippetGenerator::create(&searcher, &*query, text_field).unwrap(); assert_eq!( &btreemap!("a".to_string() => 0.25f32), snippet_generator.terms_text() @@ -545,7 +549,8 @@ Survey in 2016, 2017, and 2018."#; } { let query = query_parser.parse_query("a b").unwrap(); - let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap(); + let snippet_generator = + SnippetGenerator::create(&searcher, &*query, text_field).unwrap(); assert_eq!( &btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5), snippet_generator.terms_text() @@ -553,7 +558,8 @@ Survey in 2016, 2017, and 2018."#; } { let query = query_parser.parse_query("a b c").unwrap(); - let snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap(); + let snippet_generator = + SnippetGenerator::create(&searcher, &*query, text_field).unwrap(); assert_eq!( &btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5), snippet_generator.terms_text() @@ -585,7 +591,8 @@ Survey in 2016, 2017, and 2018."#; let searcher = index.searcher(); let query_parser = QueryParser::for_index(&index, vec![text_field]); let query = query_parser.parse_query("rust design").unwrap(); - let mut snippet_generator = SnippetGenerator::new(&searcher, &*query, text_field).unwrap(); + let mut snippet_generator = + SnippetGenerator::create(&searcher, &*query, text_field).unwrap(); { let snippet = snippet_generator.snippet(TEST_TEXT); assert_eq!(snippet.to_html(), "imperative-procedural paradigms. Rust is syntactically similar to C++[according to whom?],\nbut its designers intend it to provide better memory safety"); diff --git a/src/space_usage/mod.rs b/src/space_usage/mod.rs index 35f175c6c..9ca77637f 100644 --- a/src/space_usage/mod.rs +++ b/src/space_usage/mod.rs @@ -80,6 +80,7 @@ pub struct SegmentSpaceUsage { } impl SegmentSpaceUsage { + #[allow(clippy::too_many_arguments)] pub(crate) fn new( num_docs: u32, termdict: PerFieldSpaceUsage, diff --git a/src/store/reader.rs b/src/store/reader.rs index e94705bb3..31ae1746c 100644 --- a/src/store/reader.rs +++ b/src/store/reader.rs @@ -95,10 +95,7 @@ impl StoreReader { } } -#[cfg_attr( - feature = "cargo-clippy", - allow(clippy::needless_pass_by_value) -)] +#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] fn split_source(data: ReadOnlySource) -> (ReadOnlySource, ReadOnlySource, DocId) { let data_len = data.len(); let footer_offset = data_len - size_of::() - size_of::(); diff --git a/src/termdict/merger.rs b/src/termdict/merger.rs index 1d3844067..407a49e90 100644 --- a/src/termdict/merger.rs +++ b/src/termdict/merger.rs @@ -53,7 +53,8 @@ impl<'a> TermMerger<'a> { .map(|(ord, streamer)| HeapItem { streamer, segment_ord: ord, - }).collect(), + }) + .collect(), } } @@ -122,10 +123,7 @@ impl<'a> TermMerger<'a> { } /// Iterates through terms - #[cfg_attr( - feature = "cargo-clippy", - allow(clippy::should_implement_trait) - )] + #[cfg_attr(feature = "cargo-clippy", allow(clippy::should_implement_trait))] pub fn next(&mut self) -> Option> { if self.advance() { Some(Term::wrap(self.current_streamers[0].streamer.key())) diff --git a/src/termdict/mod.rs b/src/termdict/mod.rs index 9bc196ea8..211605496 100644 --- a/src/termdict/mod.rs +++ b/src/termdict/mod.rs @@ -66,7 +66,7 @@ mod tests { let write = directory.open_write(&path).unwrap(); let field_type = FieldType::Str(TEXT); let mut term_dictionary_builder = - TermDictionaryBuilder::new(write, &field_type).unwrap(); + TermDictionaryBuilder::create(write, &field_type).unwrap(); for term in COUNTRIES.iter() { term_dictionary_builder .insert(term.as_bytes(), &make_term_info(0u64)) @@ -92,7 +92,7 @@ mod tests { let write = directory.open_write(&path).unwrap(); let field_type = FieldType::Str(TEXT); let mut term_dictionary_builder = - TermDictionaryBuilder::new(write, &field_type).unwrap(); + TermDictionaryBuilder::create(write, &field_type).unwrap(); term_dictionary_builder .insert("abc".as_bytes(), &make_term_info(34u64)) .unwrap(); @@ -180,7 +180,7 @@ mod tests { let field_type = FieldType::Str(TEXT); let buffer: Vec = { let mut term_dictionary_builder = - TermDictionaryBuilder::new(vec![], &field_type).unwrap(); + TermDictionaryBuilder::create(vec![], &field_type).unwrap(); for &(ref id, ref i) in &ids { term_dictionary_builder .insert(id.as_bytes(), &make_term_info(*i as u64)) @@ -210,7 +210,7 @@ mod tests { let field_type = FieldType::Str(TEXT); let buffer: Vec = { let mut term_dictionary_builder = - TermDictionaryBuilder::new(vec![], &field_type).unwrap(); + TermDictionaryBuilder::create(vec![], &field_type).unwrap(); // term requires more than 16bits term_dictionary_builder .insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1)) @@ -245,7 +245,7 @@ mod tests { let field_type = FieldType::Str(TEXT); let buffer: Vec = { let mut term_dictionary_builder = - TermDictionaryBuilder::new(vec![], &field_type).unwrap(); + TermDictionaryBuilder::create(vec![], &field_type).unwrap(); for &(ref id, ref i) in &ids { term_dictionary_builder .insert(id.as_bytes(), &make_term_info(*i as u64)) @@ -314,7 +314,7 @@ mod tests { let field_type = FieldType::Str(TEXT); let buffer: Vec = { let mut term_dictionary_builder = - TermDictionaryBuilder::new(vec![], &field_type).unwrap(); + TermDictionaryBuilder::create(vec![], &field_type).unwrap(); term_dictionary_builder .insert(&[], &make_term_info(1 as u64)) .unwrap(); @@ -338,7 +338,7 @@ mod tests { let field_type = FieldType::Str(TEXT); let buffer: Vec = { let mut term_dictionary_builder = - TermDictionaryBuilder::new(vec![], &field_type).unwrap(); + TermDictionaryBuilder::create(vec![], &field_type).unwrap(); for i in 0u8..10u8 { let number_arr = [i; 1]; term_dictionary_builder @@ -408,7 +408,7 @@ mod tests { let write = directory.open_write(&path).unwrap(); let field_type = FieldType::Str(TEXT); let mut term_dictionary_builder = - TermDictionaryBuilder::new(write, &field_type).unwrap(); + TermDictionaryBuilder::create(write, &field_type).unwrap(); for term in COUNTRIES.iter() { term_dictionary_builder .insert(term.as_bytes(), &make_term_info(0u64)) diff --git a/src/termdict/streamer.rs b/src/termdict/streamer.rs index 98277f2ef..f1dc74532 100644 --- a/src/termdict/streamer.rs +++ b/src/termdict/streamer.rs @@ -132,10 +132,7 @@ where } /// Return the next `(key, value)` pair. - #[cfg_attr( - feature = "cargo-clippy", - allow(clippy::should_implement_trait) - )] + #[cfg_attr(feature = "cargo-clippy", allow(clippy::should_implement_trait))] pub fn next(&mut self) -> Option<(&[u8], &TermInfo)> { if self.advance() { Some((self.key(), self.value())) diff --git a/src/termdict/termdict.rs b/src/termdict/termdict.rs index 0f8a28231..b63bb54d0 100644 --- a/src/termdict/termdict.rs +++ b/src/termdict/termdict.rs @@ -29,7 +29,7 @@ where W: Write, { /// Creates a new `TermDictionaryBuilder` - pub fn new(w: W, _field_type: &FieldType) -> io::Result { + pub fn create(w: W, _field_type: &FieldType) -> io::Result { let fst_builder = fst::MapBuilder::new(w).map_err(convert_fst_error)?; Ok(TermDictionaryBuilder { fst_builder, @@ -132,7 +132,7 @@ impl TermDictionary { /// Creates an empty term dictionary which contains no terms. pub fn empty(field_type: &FieldType) -> Self { let term_dictionary_data: Vec = - TermDictionaryBuilder::new(Vec::::new(), &field_type) + TermDictionaryBuilder::create(Vec::::new(), &field_type) .expect("Creating a TermDictionaryBuilder in a Vec should never fail") .finish() .expect("Writing in a Vec should never fail");