diff --git a/CHANGELOG.md b/CHANGELOG.md index 1cd1bd9f2..233713102 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Tantivy 0.10.0 Minor --------- +- Switched to Rust 2018 (@uvd) - Small simplification of the code. Calling .freq() or .doc() when .advance() has never been called on segment postings should panic from now on. diff --git a/Cargo.toml b/Cargo.toml index f75249785..caf2f9a0b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ homepage = "https://github.com/tantivy-search/tantivy" repository = "https://github.com/tantivy-search/tantivy" readme = "README.md" keywords = ["search", "information", "retrieval"] +edition = "2018" [dependencies] base64 = "0.10.0" diff --git a/examples/basic_search.rs b/examples/basic_search.rs index 78c2c2d3b..416f86f35 100644 --- a/examples/basic_search.rs +++ b/examples/basic_search.rs @@ -10,8 +10,6 @@ // - search for the best document matchings "sea whale" // - retrieve the best document original content. -extern crate tempdir; - // --- // Importing tantivy... #[macro_use] diff --git a/examples/custom_collector.rs b/examples/custom_collector.rs index 0497542d0..e63eb9f92 100644 --- a/examples/custom_collector.rs +++ b/examples/custom_collector.rs @@ -7,8 +7,6 @@ // Of course, you can have a look at the tantivy's built-in collectors // such as the `CountCollector` for more examples. -extern crate tempdir; - // --- // Importing tantivy... #[macro_use] diff --git a/examples/faceted_search.rs b/examples/faceted_search.rs index 0a99c7131..98e0a2753 100644 --- a/examples/faceted_search.rs +++ b/examples/faceted_search.rs @@ -10,8 +10,6 @@ // - search for the best document matchings "sea whale" // - retrieve the best document original content. -extern crate tempdir; - // --- // Importing tantivy... #[macro_use] diff --git a/examples/multiple_producer.rs b/examples/multiple_producer.rs index 8a51d40d4..912c4310e 100644 --- a/examples/multiple_producer.rs +++ b/examples/multiple_producer.rs @@ -23,8 +23,6 @@ // index a single document?), but aims at demonstrating the mechanism that makes indexing // from several threads possible. -extern crate tempdir; - // --- // Importing tantivy... #[macro_use] diff --git a/examples/snippet.rs b/examples/snippet.rs index b79ede83b..0d87834ea 100644 --- a/examples/snippet.rs +++ b/examples/snippet.rs @@ -4,7 +4,6 @@ // your hit result. // Snippet are an extracted of a target document, and returned in HTML format. // The keyword searched by the user are highlighted with a `` tag. -extern crate tempdir; // --- // Importing tantivy... diff --git a/examples/stop_words.rs b/examples/stop_words.rs index a6b338060..9246e236a 100644 --- a/examples/stop_words.rs +++ b/examples/stop_words.rs @@ -9,8 +9,6 @@ // - add a few stop words // - index few documents in our index -extern crate tempdir; - // --- // Importing tantivy... #[macro_use] diff --git a/examples/working_with_json.rs b/examples/working_with_json.rs index 75710cf2b..411ca4449 100644 --- a/examples/working_with_json.rs +++ b/examples/working_with_json.rs @@ -1,4 +1,4 @@ -extern crate tantivy; +use tantivy; use tantivy::schema::*; // # Document from json diff --git a/src/collector/count_collector.rs b/src/collector/count_collector.rs index 85ceaa3ab..c5db47225 100644 --- a/src/collector/count_collector.rs +++ b/src/collector/count_collector.rs @@ -1,10 +1,10 @@ use super::Collector; -use collector::SegmentCollector; -use DocId; -use Result; -use Score; -use SegmentLocalId; -use SegmentReader; +use crate::collector::SegmentCollector; +use crate::DocId; +use crate::Result; +use crate::Score; +use crate::SegmentLocalId; +use crate::SegmentReader; /// `CountCollector` collector only counts how many /// documents match the query. @@ -94,8 +94,8 @@ impl SegmentCollector for SegmentCountCollector { #[cfg(test)] mod tests { use super::{Count, SegmentCountCollector}; - use collector::Collector; - use collector::SegmentCollector; + use crate::collector::Collector; + use crate::collector::SegmentCollector; #[test] fn test_count_collect_does_not_requires_scoring() { diff --git a/src/collector/facet_collector.rs b/src/collector/facet_collector.rs index f86cc9483..46fd24052 100644 --- a/src/collector/facet_collector.rs +++ b/src/collector/facet_collector.rs @@ -1,9 +1,15 @@ -use collector::Collector; -use collector::SegmentCollector; -use docset::SkipResult; -use fastfield::FacetReader; -use schema::Facet; -use schema::Field; +use crate::collector::Collector; +use crate::collector::SegmentCollector; +use crate::docset::SkipResult; +use crate::fastfield::FacetReader; +use crate::schema::Facet; +use crate::schema::Field; +use crate::DocId; +use crate::Result; +use crate::Score; +use crate::SegmentLocalId; +use crate::SegmentReader; +use crate::TantivyError; use std::cmp::Ordering; use std::collections::btree_map; use std::collections::BTreeMap; @@ -12,12 +18,6 @@ use std::collections::BinaryHeap; use std::collections::Bound; use std::iter::Peekable; use std::{u64, usize}; -use DocId; -use Result; -use Score; -use SegmentLocalId; -use SegmentReader; -use TantivyError; struct Hit<'a> { count: u64, @@ -27,13 +27,13 @@ struct Hit<'a> { impl<'a> Eq for Hit<'a> {} impl<'a> PartialEq> for Hit<'a> { - fn eq(&self, other: &Hit) -> bool { + fn eq(&self, other: &Hit<'_>) -> bool { self.count == other.count } } impl<'a> PartialOrd> for Hit<'a> { - fn partial_cmp(&self, other: &Hit) -> Option { + fn partial_cmp(&self, other: &Hit<'_>) -> Option { Some(self.cmp(other)) } } @@ -398,7 +398,7 @@ impl<'a> Iterator for FacetChildIterator<'a> { } impl FacetCounts { - pub fn get(&self, facet_from: T) -> FacetChildIterator + pub fn get(&self, facet_from: T) -> FacetChildIterator<'_> where Facet: From, { @@ -412,7 +412,8 @@ impl FacetCounts { let facet_after = Facet::from_encoded_string(facet_after_bytes); Bound::Excluded(facet_after) }; - let underlying: btree_map::Range<_, _> = self.facet_counts.range((left_bound, right_bound)); + let underlying: btree_map::Range<'_, _, _> = + self.facet_counts.range((left_bound, right_bound)); FacetChildIterator { underlying } } @@ -453,12 +454,12 @@ impl FacetCounts { #[cfg(test)] mod tests { use super::{FacetCollector, FacetCounts}; - use core::Index; - use query::AllQuery; + use crate::core::Index; + use crate::query::AllQuery; + use crate::schema::{Document, Facet, Field, Schema}; use rand::distributions::Uniform; use rand::prelude::SliceRandom; use rand::{thread_rng, Rng}; - use schema::{Document, Facet, Field, Schema}; use std::iter; #[test] diff --git a/src/collector/mod.rs b/src/collector/mod.rs index 515799fdf..e6ceb6a05 100644 --- a/src/collector/mod.rs +++ b/src/collector/mod.rs @@ -85,12 +85,12 @@ See the `custom_collector` example. */ +use crate::DocId; +use crate::Result; +use crate::Score; +use crate::SegmentLocalId; +use crate::SegmentReader; use downcast_rs; -use DocId; -use Result; -use Score; -use SegmentLocalId; -use SegmentReader; mod count_collector; pub use self::count_collector::Count; diff --git a/src/collector/multi_collector.rs b/src/collector/multi_collector.rs index 43d5dde9e..5358a6c43 100644 --- a/src/collector/multi_collector.rs +++ b/src/collector/multi_collector.rs @@ -1,30 +1,30 @@ use super::Collector; use super::SegmentCollector; -use collector::Fruit; +use crate::collector::Fruit; +use crate::DocId; +use crate::Result; +use crate::Score; +use crate::SegmentLocalId; +use crate::SegmentReader; +use crate::TantivyError; use std::marker::PhantomData; use std::ops::Deref; -use DocId; -use Result; -use Score; -use SegmentLocalId; -use SegmentReader; -use TantivyError; pub struct MultiFruit { - sub_fruits: Vec>>, + sub_fruits: Vec>>, } pub struct CollectorWrapper(TCollector); impl Collector for CollectorWrapper { - type Fruit = Box; - type Child = Box; + type Fruit = Box; + type Child = Box; fn for_segment( &self, segment_local_id: u32, reader: &SegmentReader, - ) -> Result> { + ) -> Result> { let child = self.0.for_segment(segment_local_id, reader)?; Ok(Box::new(SegmentCollectorWrapper(child))) } @@ -33,7 +33,7 @@ impl Collector for CollectorWrapper { self.0.requires_scoring() } - fn merge_fruits(&self, children: Vec<::Fruit>) -> Result> { + fn merge_fruits(&self, children: Vec<::Fruit>) -> Result> { let typed_fruit: Vec = children .into_iter() .map(|untyped_fruit| { @@ -50,21 +50,21 @@ impl Collector for CollectorWrapper { } } -impl SegmentCollector for Box { - type Fruit = Box; +impl SegmentCollector for Box { + type Fruit = Box; fn collect(&mut self, doc: u32, score: f32) { self.as_mut().collect(doc, score); } - fn harvest(self) -> Box { + fn harvest(self) -> Box { BoxableSegmentCollector::harvest_from_box(self) } } pub trait BoxableSegmentCollector { fn collect(&mut self, doc: u32, score: f32); - fn harvest_from_box(self: Box) -> Box; + fn harvest_from_box(self: Box) -> Box; } pub struct SegmentCollectorWrapper(TSegmentCollector); @@ -76,7 +76,7 @@ impl BoxableSegmentCollector self.0.collect(doc, score); } - fn harvest_from_box(self: Box) -> Box { + fn harvest_from_box(self: Box) -> Box { Box::new(self.0.harvest()) } } @@ -157,8 +157,9 @@ impl FruitHandle { #[allow(clippy::type_complexity)] #[derive(Default)] pub struct MultiCollector<'a> { - collector_wrappers: - Vec, Fruit = Box> + 'a>>, + collector_wrappers: Vec< + Box, Fruit = Box> + 'a>, + >, } impl<'a> MultiCollector<'a> { @@ -207,7 +208,7 @@ impl<'a> Collector for MultiCollector<'a> { } fn merge_fruits(&self, segments_multifruits: Vec) -> Result { - let mut segment_fruits_list: Vec>> = (0..self.collector_wrappers.len()) + let mut segment_fruits_list: Vec>> = (0..self.collector_wrappers.len()) .map(|_| Vec::with_capacity(segments_multifruits.len())) .collect::>(); for segment_multifruit in segments_multifruits { @@ -230,7 +231,7 @@ impl<'a> Collector for MultiCollector<'a> { } pub struct MultiCollectorChild { - children: Vec>, + children: Vec>, } impl SegmentCollector for MultiCollectorChild { @@ -257,12 +258,12 @@ impl SegmentCollector for MultiCollectorChild { mod tests { use super::*; - use collector::{Count, TopDocs}; - use query::TermQuery; - use schema::IndexRecordOption; - use schema::{Schema, TEXT}; - use Index; - use Term; + use crate::collector::{Count, TopDocs}; + use crate::query::TermQuery; + use crate::schema::IndexRecordOption; + use crate::schema::{Schema, TEXT}; + use crate::Index; + use crate::Term; #[test] fn test_multi_collector() { diff --git a/src/collector/tests.rs b/src/collector/tests.rs index 424ceb3fd..f348b4c15 100644 --- a/src/collector/tests.rs +++ b/src/collector/tests.rs @@ -1,12 +1,12 @@ use super::*; -use core::SegmentReader; -use fastfield::BytesFastFieldReader; -use fastfield::FastFieldReader; -use schema::Field; -use DocAddress; -use DocId; -use Score; -use SegmentLocalId; +use crate::core::SegmentReader; +use crate::fastfield::BytesFastFieldReader; +use crate::fastfield::FastFieldReader; +use crate::schema::Field; +use crate::DocAddress; +use crate::DocId; +use crate::Score; +use crate::SegmentLocalId; /// Stores all of the doc ids. /// This collector is only used for tests. diff --git a/src/collector/top_collector.rs b/src/collector/top_collector.rs index 880df6950..ea2b7ca02 100644 --- a/src/collector/top_collector.rs +++ b/src/collector/top_collector.rs @@ -1,11 +1,11 @@ +use crate::DocAddress; +use crate::DocId; +use crate::Result; +use crate::SegmentLocalId; +use crate::SegmentReader; use serde::export::PhantomData; use std::cmp::Ordering; use std::collections::BinaryHeap; -use DocAddress; -use DocId; -use Result; -use SegmentLocalId; -use SegmentReader; /// Contains a feature (field, score, etc.) of a document along with the document address. /// @@ -178,8 +178,8 @@ impl TopSegmentCollector { #[cfg(test)] mod tests { use super::{TopCollector, TopSegmentCollector}; - use DocAddress; - use Score; + use crate::DocAddress; + use crate::Score; #[test] fn test_top_collector_not_at_capacity() { diff --git a/src/collector/top_field_collector.rs b/src/collector/top_field_collector.rs index b7cf7c990..d679d0122 100644 --- a/src/collector/top_field_collector.rs +++ b/src/collector/top_field_collector.rs @@ -1,16 +1,16 @@ use super::Collector; -use collector::top_collector::TopCollector; -use collector::top_collector::TopSegmentCollector; -use collector::SegmentCollector; -use fastfield::FastFieldReader; -use fastfield::FastValue; -use schema::Field; +use crate::collector::top_collector::TopCollector; +use crate::collector::top_collector::TopSegmentCollector; +use crate::collector::SegmentCollector; +use crate::fastfield::FastFieldReader; +use crate::fastfield::FastValue; +use crate::schema::Field; +use crate::DocAddress; +use crate::Result; +use crate::SegmentLocalId; +use crate::SegmentReader; +use crate::TantivyError; use std::marker::PhantomData; -use DocAddress; -use Result; -use SegmentLocalId; -use SegmentReader; -use TantivyError; /// The Top Field Collector keeps track of the K documents /// sorted by a fast field in the index @@ -159,17 +159,17 @@ impl SegmentCollector #[cfg(test)] mod tests { use super::TopDocsByField; - use collector::Collector; - use collector::TopDocs; - use query::Query; - use query::QueryParser; - use schema::Field; - use schema::IntOptions; - use schema::{Schema, FAST, TEXT}; - use DocAddress; - use Index; - use IndexWriter; - use TantivyError; + use crate::collector::Collector; + use crate::collector::TopDocs; + use crate::query::Query; + use crate::query::QueryParser; + use crate::schema::Field; + use crate::schema::IntOptions; + use crate::schema::{Schema, FAST, TEXT}; + use crate::DocAddress; + use crate::Index; + use crate::IndexWriter; + use crate::TantivyError; const TITLE: &str = "title"; const SIZE: &str = "size"; @@ -258,7 +258,7 @@ mod tests { query_field: Field, schema: Schema, mut doc_adder: impl FnMut(&mut IndexWriter) -> (), - ) -> (Index, Box) { + ) -> (Index, Box) { let index = Index::create_in_ram(schema); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); diff --git a/src/collector/top_score_collector.rs b/src/collector/top_score_collector.rs index 43b7424d6..bc247d400 100644 --- a/src/collector/top_score_collector.rs +++ b/src/collector/top_score_collector.rs @@ -1,16 +1,16 @@ use super::Collector; -use collector::top_collector::TopCollector; -use collector::top_collector::TopSegmentCollector; -use collector::SegmentCollector; -use collector::TopDocsByField; -use fastfield::FastValue; -use schema::Field; -use DocAddress; -use DocId; -use Result; -use Score; -use SegmentLocalId; -use SegmentReader; +use crate::collector::top_collector::TopCollector; +use crate::collector::top_collector::TopSegmentCollector; +use crate::collector::SegmentCollector; +use crate::collector::TopDocsByField; +use crate::fastfield::FastValue; +use crate::schema::Field; +use crate::DocAddress; +use crate::DocId; +use crate::Result; +use crate::Score; +use crate::SegmentLocalId; +use crate::SegmentReader; /// The Top Score Collector keeps track of the K documents /// sorted by their score. @@ -128,12 +128,12 @@ impl SegmentCollector for TopScoreSegmentCollector { #[cfg(test)] mod tests { use super::TopDocs; - use query::QueryParser; - use schema::Schema; - use schema::TEXT; - use DocAddress; - use Index; - use Score; + use crate::query::QueryParser; + use crate::schema::Schema; + use crate::schema::TEXT; + use crate::DocAddress; + use crate::Index; + use crate::Score; fn make_index() -> Index { let mut schema_builder = Schema::builder(); diff --git a/src/common/bitset.rs b/src/common/bitset.rs index a125f4cbc..527aa8d4a 100644 --- a/src/common/bitset.rs +++ b/src/common/bitset.rs @@ -5,7 +5,7 @@ use std::u64; pub(crate) struct TinySet(u64); impl fmt::Debug for TinySet { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.into_iter().collect::>().fmt(f) } } @@ -204,12 +204,12 @@ mod tests { use super::BitSet; use super::TinySet; - use docset::DocSet; - use query::BitSetDocSet; + use crate::docset::DocSet; + use crate::query::BitSetDocSet; + use crate::tests; + use crate::tests::generate_nonunique_unsorted; use std::collections::BTreeSet; use std::collections::HashSet; - use tests; - use tests::generate_nonunique_unsorted; #[test] fn test_tiny_set() { diff --git a/src/common/composite_file.rs b/src/common/composite_file.rs index 21538c096..f2c2d2208 100644 --- a/src/common/composite_file.rs +++ b/src/common/composite_file.rs @@ -1,11 +1,11 @@ -use common::BinarySerializable; -use common::CountingWriter; -use common::VInt; -use directory::ReadOnlySource; -use directory::WritePtr; -use schema::Field; -use space_usage::FieldUsage; -use space_usage::PerFieldSpaceUsage; +use crate::common::BinarySerializable; +use crate::common::CountingWriter; +use crate::common::VInt; +use crate::directory::ReadOnlySource; +use crate::directory::WritePtr; +use crate::schema::Field; +use crate::space_usage::FieldUsage; +use crate::space_usage::PerFieldSpaceUsage; use std::collections::HashMap; use std::io::Write; use std::io::{self, Read}; @@ -185,10 +185,10 @@ impl CompositeFile { mod test { use super::{CompositeFile, CompositeWrite}; - use common::BinarySerializable; - use common::VInt; - use directory::{Directory, RAMDirectory}; - use schema::Field; + use crate::common::BinarySerializable; + use crate::common::VInt; + use crate::directory::{Directory, RAMDirectory}; + use crate::schema::Field; use std::io::Write; use std::path::Path; diff --git a/src/common/serialize.rs b/src/common/serialize.rs index 0df4f75ae..4156115c7 100644 --- a/src/common/serialize.rs +++ b/src/common/serialize.rs @@ -1,6 +1,6 @@ +use crate::common::Endianness; +use crate::common::VInt; use byteorder::{ReadBytesExt, WriteBytesExt}; -use common::Endianness; -use common::VInt; use std::fmt; use std::io; use std::io::Read; @@ -136,7 +136,7 @@ impl BinarySerializable for String { pub mod test { use super::*; - use common::VInt; + use crate::common::VInt; pub fn fixed_size_test() { let mut buffer = Vec::new(); diff --git a/src/common/vint.rs b/src/common/vint.rs index 65dfd72ba..b7f52d612 100644 --- a/src/common/vint.rs +++ b/src/common/vint.rs @@ -30,16 +30,16 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) { let val = u64::from(val); const STOP_BIT: u64 = 128u64; match val { - 0...STOP_1 => (val | STOP_BIT, 1), - START_2...STOP_2 => ( + 0..=STOP_1 => (val | STOP_BIT, 1), + START_2..=STOP_2 => ( (val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)), 2, ), - START_3...STOP_3 => ( + START_3..=STOP_3 => ( (val & MASK_1) | ((val & MASK_2) << 1) | ((val & MASK_3) << 2) | (STOP_BIT << (8 * 2)), 3, ), - START_4...STOP_4 => ( + START_4..=STOP_4 => ( (val & MASK_1) | ((val & MASK_2) << 1) | ((val & MASK_3) << 2) @@ -171,8 +171,8 @@ mod tests { use super::serialize_vint_u32; use super::VInt; + use crate::common::BinarySerializable; use byteorder::{ByteOrder, LittleEndian}; - use common::BinarySerializable; fn aux_test_vint(val: u64) { let mut v = [14u8; 10]; diff --git a/src/core/executor.rs b/src/core/executor.rs index 281e96b91..c90d0b8a6 100644 --- a/src/core/executor.rs +++ b/src/core/executor.rs @@ -1,6 +1,6 @@ +use crate::Result; use crossbeam::channel; use scoped_pool::{Pool, ThreadConfig}; -use Result; /// Search executor whether search request are single thread or multithread. /// diff --git a/src/core/index.rs b/src/core/index.rs index 578002a16..b31142b53 100644 --- a/src/core/index.rs +++ b/src/core/index.rs @@ -1,38 +1,38 @@ use super::segment::create_segment; use super::segment::Segment; -use core::Executor; -use core::IndexMeta; -use core::SegmentId; -use core::SegmentMeta; -use core::META_FILEPATH; -use directory::ManagedDirectory; +use crate::core::Executor; +use crate::core::IndexMeta; +use crate::core::SegmentId; +use crate::core::SegmentMeta; +use crate::core::META_FILEPATH; +use crate::directory::ManagedDirectory; #[cfg(feature = "mmap")] -use directory::MmapDirectory; -use directory::INDEX_WRITER_LOCK; -use directory::{Directory, RAMDirectory}; -use error::DataCorruption; -use error::TantivyError; -use indexer::index_writer::open_index_writer; -use indexer::index_writer::HEAP_SIZE_MIN; -use indexer::segment_updater::save_new_metas; +use crate::directory::MmapDirectory; +use crate::directory::INDEX_WRITER_LOCK; +use crate::directory::{Directory, RAMDirectory}; +use crate::error::DataCorruption; +use crate::error::TantivyError; +use crate::indexer::index_writer::open_index_writer; +use crate::indexer::index_writer::HEAP_SIZE_MIN; +use crate::indexer::segment_updater::save_new_metas; +use crate::reader::IndexReader; +use crate::reader::IndexReaderBuilder; +use crate::schema::Field; +use crate::schema::FieldType; +use crate::schema::Schema; +use crate::tokenizer::BoxedTokenizer; +use crate::tokenizer::TokenizerManager; +use crate::IndexWriter; +use crate::Result; use num_cpus; -use reader::IndexReader; -use reader::IndexReaderBuilder; -use schema::Field; -use schema::FieldType; -use schema::Schema; use serde_json; use std::borrow::BorrowMut; use std::fmt; #[cfg(feature = "mmap")] use std::path::Path; use std::sync::Arc; -use tokenizer::BoxedTokenizer; -use tokenizer::TokenizerManager; -use IndexWriter; -use Result; -fn load_metas(directory: &Directory) -> Result { +fn load_metas(directory: &dyn Directory) -> Result { let meta_data = directory.atomic_read(&META_FILEPATH)?; let meta_string = String::from_utf8_lossy(&meta_data); serde_json::from_str(&meta_string) @@ -169,11 +169,11 @@ impl Index { } /// Helper to access the tokenizer associated to a specific field. - pub fn tokenizer_for_field(&self, field: Field) -> Result> { + pub fn tokenizer_for_field(&self, field: Field) -> Result> { let field_entry = self.schema.get_field_entry(field); let field_type = field_entry.field_type(); let tokenizer_manager: &TokenizerManager = self.tokenizers(); - let tokenizer_name_opt: Option> = match field_type { + let tokenizer_name_opt: Option> = match field_type { FieldType::Str(text_options) => text_options .get_indexing_options() .map(|text_indexing_options| text_indexing_options.tokenizer().to_string()) @@ -346,22 +346,22 @@ impl Index { } impl fmt::Debug for Index { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Index({:?})", self.directory) } } #[cfg(test)] mod tests { - use directory::RAMDirectory; - use schema::Field; - use schema::{Schema, INDEXED, TEXT}; + use crate::directory::RAMDirectory; + use crate::schema::Field; + use crate::schema::{Schema, INDEXED, TEXT}; + use crate::Index; + use crate::IndexReader; + use crate::IndexWriter; + use crate::ReloadPolicy; use std::thread; use std::time::Duration; - use Index; - use IndexReader; - use IndexWriter; - use ReloadPolicy; #[test] fn test_indexer_for_field() { diff --git a/src/core/index_meta.rs b/src/core/index_meta.rs index eb50df6f8..18ff95e04 100644 --- a/src/core/index_meta.rs +++ b/src/core/index_meta.rs @@ -1,8 +1,8 @@ -use core::SegmentMeta; -use schema::Schema; +use crate::core::SegmentMeta; +use crate::schema::Schema; +use crate::Opstamp; use serde_json; use std::fmt; -use Opstamp; /// Meta information about the `Index`. /// @@ -46,7 +46,7 @@ impl IndexMeta { } impl fmt::Debug for IndexMeta { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}", @@ -60,7 +60,7 @@ impl fmt::Debug for IndexMeta { mod tests { use super::IndexMeta; - use schema::{Schema, TEXT}; + use crate::schema::{Schema, TEXT}; use serde_json; #[test] diff --git a/src/core/inverted_index_reader.rs b/src/core/inverted_index_reader.rs index 51c5b02ae..02290947f 100644 --- a/src/core/inverted_index_reader.rs +++ b/src/core/inverted_index_reader.rs @@ -1,13 +1,13 @@ -use common::BinarySerializable; -use directory::ReadOnlySource; +use crate::common::BinarySerializable; +use crate::directory::ReadOnlySource; +use crate::positions::PositionReader; +use crate::postings::TermInfo; +use crate::postings::{BlockSegmentPostings, SegmentPostings}; +use crate::schema::FieldType; +use crate::schema::IndexRecordOption; +use crate::schema::Term; +use crate::termdict::TermDictionary; use owned_read::OwnedRead; -use positions::PositionReader; -use postings::TermInfo; -use postings::{BlockSegmentPostings, SegmentPostings}; -use schema::FieldType; -use schema::IndexRecordOption; -use schema::Term; -use termdict::TermDictionary; /// The inverted index reader is in charge of accessing /// the inverted index associated to a specific field. diff --git a/src/core/searcher.rs b/src/core/searcher.rs index 3c0c74514..8949c9e63 100644 --- a/src/core/searcher.rs +++ b/src/core/searcher.rs @@ -1,26 +1,26 @@ -use collector::Collector; -use collector::SegmentCollector; -use core::Executor; -use core::InvertedIndexReader; -use core::SegmentReader; -use query::Query; -use query::Scorer; -use query::Weight; -use schema::Document; -use schema::Schema; -use schema::{Field, Term}; -use space_usage::SearcherSpaceUsage; +use crate::collector::Collector; +use crate::collector::SegmentCollector; +use crate::core::Executor; +use crate::core::InvertedIndexReader; +use crate::core::SegmentReader; +use crate::query::Query; +use crate::query::Scorer; +use crate::query::Weight; +use crate::schema::Document; +use crate::schema::Schema; +use crate::schema::{Field, Term}; +use crate::space_usage::SearcherSpaceUsage; +use crate::store::StoreReader; +use crate::termdict::TermMerger; +use crate::DocAddress; +use crate::Index; +use crate::Result; use std::fmt; use std::sync::Arc; -use store::StoreReader; -use termdict::TermMerger; -use DocAddress; -use Index; -use Result; fn collect_segment( collector: &C, - weight: &Weight, + weight: &dyn Weight, segment_ord: u32, segment_reader: &SegmentReader, ) -> Result { @@ -132,7 +132,7 @@ impl Searcher { /// /// Finally, the Collector merges each of the child collectors into itself for result usability /// by the caller. - pub fn search(&self, query: &Query, collector: &C) -> Result { + pub fn search(&self, query: &dyn Query, collector: &C) -> Result { let executor = self.index.search_executor(); self.search_with_executor(query, collector, executor) } @@ -151,7 +151,7 @@ impl Searcher { /// hurt it. It will however, decrease the average response time. pub fn search_with_executor( &self, - query: &Query, + query: &dyn Query, collector: &C, executor: &Executor, ) -> Result { @@ -203,7 +203,7 @@ impl FieldSearcher { /// Returns a Stream over all of the sorted unique terms of /// for the given field. - pub fn terms(&self) -> TermMerger { + pub fn terms(&self) -> TermMerger<'_> { let term_streamers: Vec<_> = self .inv_index_readers .iter() @@ -214,7 +214,7 @@ impl FieldSearcher { } impl fmt::Debug for Searcher { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let segment_ids = self .segment_readers .iter() diff --git a/src/core/segment.rs b/src/core/segment.rs index b87208fcb..a5177cc66 100644 --- a/src/core/segment.rs +++ b/src/core/segment.rs @@ -1,17 +1,17 @@ use super::SegmentComponent; -use core::Index; -use core::SegmentId; -use core::SegmentMeta; -use directory::error::{OpenReadError, OpenWriteError}; -use directory::Directory; -use directory::{ReadOnlySource, WritePtr}; -use indexer::segment_serializer::SegmentSerializer; -use schema::Schema; +use crate::core::Index; +use crate::core::SegmentId; +use crate::core::SegmentMeta; +use crate::directory::error::{OpenReadError, OpenWriteError}; +use crate::directory::Directory; +use crate::directory::{ReadOnlySource, WritePtr}; +use crate::indexer::segment_serializer::SegmentSerializer; +use crate::schema::Schema; +use crate::Opstamp; +use crate::Result; use std::fmt; use std::path::PathBuf; use std::result; -use Opstamp; -use Result; /// A segment is a piece of the index. #[derive(Clone)] @@ -21,7 +21,7 @@ pub struct Segment { } impl fmt::Debug for Segment { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Segment({:?})", self.id().uuid_string()) } } diff --git a/src/core/segment_id.rs b/src/core/segment_id.rs index 5727dbe10..e86d08535 100644 --- a/src/core/segment_id.rs +++ b/src/core/segment_id.rs @@ -62,7 +62,7 @@ impl SegmentId { } impl fmt::Debug for SegmentId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Seg({:?})", self.short_uuid_string()) } } diff --git a/src/core/segment_meta.rs b/src/core/segment_meta.rs index 2f4b4177e..1834c5514 100644 --- a/src/core/segment_meta.rs +++ b/src/core/segment_meta.rs @@ -1,11 +1,11 @@ use super::SegmentComponent; +use crate::core::SegmentId; +use crate::Opstamp; use census::{Inventory, TrackedObject}; -use core::SegmentId; use serde; use std::collections::HashSet; use std::fmt; use std::path::PathBuf; -use Opstamp; lazy_static! { static ref INVENTORY: Inventory = { Inventory::new() }; @@ -27,7 +27,7 @@ pub struct SegmentMeta { } impl fmt::Debug for SegmentMeta { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.tracked.fmt(f) } } diff --git a/src/core/segment_reader.rs b/src/core/segment_reader.rs index 42c1ded64..7c89c691f 100644 --- a/src/core/segment_reader.rs +++ b/src/core/segment_reader.rs @@ -1,26 +1,26 @@ -use common::CompositeFile; -use common::HasLen; -use core::InvertedIndexReader; -use core::Segment; -use core::SegmentComponent; -use core::SegmentId; -use directory::ReadOnlySource; -use fastfield::DeleteBitSet; -use fastfield::FacetReader; -use fastfield::FastFieldReaders; -use fieldnorm::FieldNormReader; -use schema::Field; -use schema::FieldType; -use schema::Schema; -use space_usage::SegmentSpaceUsage; +use crate::common::CompositeFile; +use crate::common::HasLen; +use crate::core::InvertedIndexReader; +use crate::core::Segment; +use crate::core::SegmentComponent; +use crate::core::SegmentId; +use crate::directory::ReadOnlySource; +use crate::fastfield::DeleteBitSet; +use crate::fastfield::FacetReader; +use crate::fastfield::FastFieldReaders; +use crate::fieldnorm::FieldNormReader; +use crate::schema::Field; +use crate::schema::FieldType; +use crate::schema::Schema; +use crate::space_usage::SegmentSpaceUsage; +use crate::store::StoreReader; +use crate::termdict::TermDictionary; +use crate::DocId; +use crate::Result; use std::collections::HashMap; use std::fmt; use std::sync::Arc; use std::sync::RwLock; -use store::StoreReader; -use termdict::TermDictionary; -use DocId; -use Result; /// Entry point to access all of the datastructures of the `Segment` /// @@ -243,10 +243,9 @@ impl SegmentReader { let postings_source = postings_source_opt.unwrap(); - let termdict_source = self - .termdict_composite - .open_read(field) - .expect("Failed to open field term dictionary in composite file. Is the field indexed?"); + let termdict_source = self.termdict_composite.open_read(field).expect( + "Failed to open field term dictionary in composite file. Is the field indexed?", + ); let positions_source = self .positions_composite @@ -296,7 +295,7 @@ impl SegmentReader { } /// Returns an iterator that will iterate over the alive document ids - pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator { + pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator<'_> { SegmentReaderAliveDocsIterator::new(&self) } @@ -320,7 +319,7 @@ impl SegmentReader { } impl fmt::Debug for SegmentReader { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "SegmentReader({:?})", self.segment_id) } } @@ -373,9 +372,9 @@ impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> { #[cfg(test)] mod test { - use core::Index; - use schema::{Schema, Term, STORED, TEXT}; - use DocId; + use crate::core::Index; + use crate::schema::{Schema, Term, STORED, TEXT}; + use crate::DocId; #[test] fn test_alive_docs_iterator() { diff --git a/src/directory/directory.rs b/src/directory/directory.rs index cc3208f07..8648ba1b4 100644 --- a/src/directory/directory.rs +++ b/src/directory/directory.rs @@ -1,9 +1,9 @@ -use directory::directory_lock::Lock; -use directory::error::LockError; -use directory::error::{DeleteError, OpenReadError, OpenWriteError}; -use directory::WatchCallback; -use directory::WatchHandle; -use directory::{ReadOnlySource, WritePtr}; +use crate::directory::directory_lock::Lock; +use crate::directory::error::LockError; +use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; +use crate::directory::WatchCallback; +use crate::directory::WatchHandle; +use crate::directory::{ReadOnlySource, WritePtr}; use std::fmt; use std::io; use std::io::Write; @@ -48,10 +48,10 @@ impl RetryPolicy { /// /// It is transparently associated to a lock file, that gets deleted /// on `Drop.` The lock is released automatically on `Drop`. -pub struct DirectoryLock(Box); +pub struct DirectoryLock(Box); struct DirectoryLockGuard { - directory: Box, + directory: Box, path: PathBuf, } @@ -76,7 +76,7 @@ enum TryAcquireLockError { fn try_acquire_lock( filepath: &Path, - directory: &mut Directory, + directory: &mut dyn Directory, ) -> Result { let mut write = directory.open_write(filepath).map_err(|e| match e { OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists, @@ -210,14 +210,14 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static { /// DirectoryClone pub trait DirectoryClone { /// Clones the directory and boxes the clone - fn box_clone(&self) -> Box; + fn box_clone(&self) -> Box; } impl DirectoryClone for T where T: 'static + Directory + Clone, { - fn box_clone(&self) -> Box { + fn box_clone(&self) -> Box { Box::new(self.clone()) } } diff --git a/src/directory/error.rs b/src/directory/error.rs index a57ae1371..4cc509443 100644 --- a/src/directory/error.rs +++ b/src/directory/error.rs @@ -33,7 +33,7 @@ impl Into for IOError { } impl fmt::Display for IOError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.path { Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err), None => write!(f, "io error occurred: '{}'", self.err), @@ -46,7 +46,7 @@ impl StdError for IOError { "io error occurred" } - fn cause(&self) -> Option<&StdError> { + fn cause(&self) -> Option<&dyn StdError> { Some(&self.err) } } @@ -84,7 +84,7 @@ impl From for OpenDirectoryError { } impl fmt::Display for OpenDirectoryError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { OpenDirectoryError::DoesNotExist(ref path) => { write!(f, "the underlying directory '{:?}' does not exist", path) @@ -106,7 +106,7 @@ impl StdError for OpenDirectoryError { "error occurred while opening a directory" } - fn cause(&self) -> Option<&StdError> { + fn cause(&self) -> Option<&dyn StdError> { None } } @@ -129,7 +129,7 @@ impl From for OpenWriteError { } impl fmt::Display for OpenWriteError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { OpenWriteError::FileAlreadyExists(ref path) => { write!(f, "the file '{:?}' already exists", path) @@ -148,7 +148,7 @@ impl StdError for OpenWriteError { "error occurred while opening a file for writing" } - fn cause(&self) -> Option<&StdError> { + fn cause(&self) -> Option<&dyn StdError> { match *self { OpenWriteError::FileAlreadyExists(_) => None, OpenWriteError::IOError(ref err) => Some(err), @@ -173,7 +173,7 @@ impl From for OpenReadError { } impl fmt::Display for OpenReadError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { OpenReadError::FileDoesNotExist(ref path) => { write!(f, "the file '{:?}' does not exist", path) @@ -192,7 +192,7 @@ impl StdError for OpenReadError { "error occurred while opening a file for reading" } - fn cause(&self) -> Option<&StdError> { + fn cause(&self) -> Option<&dyn StdError> { match *self { OpenReadError::FileDoesNotExist(_) => None, OpenReadError::IOError(ref err) => Some(err), @@ -217,7 +217,7 @@ impl From for DeleteError { } impl fmt::Display for DeleteError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { DeleteError::FileDoesNotExist(ref path) => { write!(f, "the file '{:?}' does not exist", path) @@ -234,7 +234,7 @@ impl StdError for DeleteError { "error occurred while deleting a file" } - fn cause(&self) -> Option<&StdError> { + fn cause(&self) -> Option<&dyn StdError> { match *self { DeleteError::FileDoesNotExist(_) => None, DeleteError::IOError(ref err) => Some(err), diff --git a/src/directory/managed_directory.rs b/src/directory/managed_directory.rs index 8faef439d..e70a0d342 100644 --- a/src/directory/managed_directory.rs +++ b/src/directory/managed_directory.rs @@ -1,11 +1,13 @@ -use core::MANAGED_FILEPATH; -use directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError}; -use directory::DirectoryLock; -use directory::Lock; -use directory::META_LOCK; -use directory::{ReadOnlySource, WritePtr}; -use directory::{WatchCallback, WatchHandle}; -use error::DataCorruption; +use crate::core::MANAGED_FILEPATH; +use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError}; +use crate::directory::DirectoryLock; +use crate::directory::Lock; +use crate::directory::META_LOCK; +use crate::directory::{ReadOnlySource, WritePtr}; +use crate::directory::{WatchCallback, WatchHandle}; +use crate::error::DataCorruption; +use crate::Directory; +use crate::Result; use serde_json; use std::collections::HashSet; use std::io; @@ -14,8 +16,6 @@ use std::path::{Path, PathBuf}; use std::result; use std::sync::RwLockWriteGuard; use std::sync::{Arc, RwLock}; -use Directory; -use Result; /// Returns true iff the file is "managed". /// Non-managed file are not subject to garbage collection. @@ -39,7 +39,7 @@ fn is_managed(path: &Path) -> bool { /// useful anymore. #[derive(Debug)] pub struct ManagedDirectory { - directory: Box, + directory: Box, meta_informations: Arc>, } @@ -51,8 +51,8 @@ struct MetaInformation { /// Saves the file containing the list of existing files /// that were created by tantivy. fn save_managed_paths( - directory: &mut Directory, - wlock: &RwLockWriteGuard, + directory: &mut dyn Directory, + wlock: &RwLockWriteGuard<'_, MetaInformation>, ) -> io::Result<()> { let mut w = serde_json::to_vec(&wlock.managed_paths)?; writeln!(&mut w)?; @@ -272,7 +272,7 @@ mod tests { static ref TEST_PATH2: &'static Path = Path::new("some_path_for_test2"); } - use directory::MmapDirectory; + use crate::directory::MmapDirectory; use std::io::Write; #[test] diff --git a/src/directory/mmap_directory.rs b/src/directory/mmap_directory.rs index b3888c62a..d73b9c160 100644 --- a/src/directory/mmap_directory.rs +++ b/src/directory/mmap_directory.rs @@ -1,23 +1,25 @@ -extern crate fs2; -extern crate notify; +use fs2; +use notify; use self::fs2::FileExt; use self::notify::RawEvent; use self::notify::RecursiveMode; use self::notify::Watcher; +use crate::core::META_FILEPATH; +use crate::directory::error::LockError; +use crate::directory::error::{ + DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError, +}; +use crate::directory::read_only_source::BoxedData; +use crate::directory::Directory; +use crate::directory::DirectoryLock; +use crate::directory::Lock; +use crate::directory::ReadOnlySource; +use crate::directory::WatchCallback; +use crate::directory::WatchCallbackList; +use crate::directory::WatchHandle; +use crate::directory::WritePtr; use atomicwrites; -use core::META_FILEPATH; -use directory::error::LockError; -use directory::error::{DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError}; -use directory::read_only_source::BoxedData; -use directory::Directory; -use directory::DirectoryLock; -use directory::Lock; -use directory::ReadOnlySource; -use directory::WatchCallback; -use directory::WatchCallbackList; -use directory::WatchHandle; -use directory::WritePtr; use memmap::Mmap; use std::collections::HashMap; use std::convert::From; @@ -254,7 +256,7 @@ impl MmapDirectoryInner { } impl fmt::Debug for MmapDirectory { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "MmapDirectory({:?})", self.inner.root_path) } } @@ -525,13 +527,13 @@ mod tests { // The following tests are specific to the MmapDirectory use super::*; - use schema::{Schema, SchemaBuilder, TEXT}; + use crate::schema::{Schema, SchemaBuilder, TEXT}; + use crate::Index; + use crate::ReloadPolicy; use std::fs; use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread; use std::time::Duration; - use Index; - use ReloadPolicy; #[test] fn test_open_non_existant_path() { diff --git a/src/directory/mod.rs b/src/directory/mod.rs index 8d880b0f9..dd94ccf9e 100644 --- a/src/directory/mod.rs +++ b/src/directory/mod.rs @@ -39,7 +39,7 @@ impl SeekableWrite for T {} /// /// `WritePtr` are required to implement both Write /// and Seek. -pub type WritePtr = BufWriter>; +pub type WritePtr = BufWriter>; #[cfg(test)] mod tests; diff --git a/src/directory/ram_directory.rs b/src/directory/ram_directory.rs index 38fc35cc4..cef5462da 100644 --- a/src/directory/ram_directory.rs +++ b/src/directory/ram_directory.rs @@ -1,8 +1,8 @@ -use core::META_FILEPATH; -use directory::error::{DeleteError, OpenReadError, OpenWriteError}; -use directory::WatchCallbackList; -use directory::WritePtr; -use directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle}; +use crate::core::META_FILEPATH; +use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; +use crate::directory::WatchCallbackList; +use crate::directory::WritePtr; +use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle}; use std::collections::HashMap; use std::fmt; use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write}; @@ -110,7 +110,7 @@ impl InnerDirectory { } impl fmt::Debug for RAMDirectory { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "RAMDirectory") } } diff --git a/src/directory/read_only_source.rs b/src/directory/read_only_source.rs index a09e32593..1a49fc0aa 100644 --- a/src/directory/read_only_source.rs +++ b/src/directory/read_only_source.rs @@ -1,9 +1,9 @@ -use common::HasLen; +use crate::common::HasLen; use stable_deref_trait::{CloneStableDeref, StableDeref}; use std::ops::Deref; use std::sync::Arc; -pub type BoxedData = Box + Send + Sync + 'static>; +pub type BoxedData = Box + Send + Sync + 'static>; /// Read object that represents files in tantivy. /// diff --git a/src/directory/tests.rs b/src/directory/tests.rs index 13cb34ede..d935fa3fb 100644 --- a/src/directory/tests.rs +++ b/src/directory/tests.rs @@ -34,7 +34,7 @@ fn ram_directory_panics_if_flush_forgotten() { assert!(write_file.write_all(&[4]).is_ok()); } -fn test_simple(directory: &mut Directory) { +fn test_simple(directory: &mut dyn Directory) { { let mut write_file = directory.open_write(*TEST_PATH).unwrap(); assert!(directory.exists(*TEST_PATH)); @@ -52,7 +52,7 @@ fn test_simple(directory: &mut Directory) { assert!(!directory.exists(*TEST_PATH)); } -fn test_seek(directory: &mut Directory) { +fn test_seek(directory: &mut dyn Directory) { { { let mut write_file = directory.open_write(*TEST_PATH).unwrap(); @@ -69,7 +69,7 @@ fn test_seek(directory: &mut Directory) { assert!(directory.delete(*TEST_PATH).is_ok()); } -fn test_rewrite_forbidden(directory: &mut Directory) { +fn test_rewrite_forbidden(directory: &mut dyn Directory) { { directory.open_write(*TEST_PATH).unwrap(); assert!(directory.exists(*TEST_PATH)); @@ -80,7 +80,7 @@ fn test_rewrite_forbidden(directory: &mut Directory) { assert!(directory.delete(*TEST_PATH).is_ok()); } -fn test_write_create_the_file(directory: &mut Directory) { +fn test_write_create_the_file(directory: &mut dyn Directory) { { assert!(directory.open_read(*TEST_PATH).is_err()); let _w = directory.open_write(*TEST_PATH).unwrap(); @@ -90,7 +90,7 @@ fn test_write_create_the_file(directory: &mut Directory) { } } -fn test_directory_delete(directory: &mut Directory) { +fn test_directory_delete(directory: &mut dyn Directory) { assert!(directory.open_read(*TEST_PATH).is_err()); let mut write_file = directory.open_write(*TEST_PATH).unwrap(); write_file.write_all(&[1, 2, 3, 4]).unwrap(); @@ -118,7 +118,7 @@ fn test_directory_delete(directory: &mut Directory) { assert!(directory.delete(*TEST_PATH).is_err()); } -fn test_directory(directory: &mut Directory) { +fn test_directory(directory: &mut dyn Directory) { test_simple(directory); test_seek(directory); test_rewrite_forbidden(directory); @@ -129,7 +129,7 @@ fn test_directory(directory: &mut Directory) { test_watch(directory); } -fn test_watch(directory: &mut Directory) { +fn test_watch(directory: &mut dyn Directory) { let counter: Arc = Default::default(); let counter_clone = counter.clone(); let watch_callback = Box::new(move || { @@ -163,7 +163,7 @@ fn test_watch(directory: &mut Directory) { assert_eq!(10, counter.load(Ordering::SeqCst)); } -fn test_lock_non_blocking(directory: &mut Directory) { +fn test_lock_non_blocking(directory: &mut dyn Directory) { { let lock_a_res = directory.acquire_lock(&Lock { filepath: PathBuf::from("a.lock"), @@ -188,7 +188,7 @@ fn test_lock_non_blocking(directory: &mut Directory) { assert!(lock_a_res.is_ok()); } -fn test_lock_blocking(directory: &mut Directory) { +fn test_lock_blocking(directory: &mut dyn Directory) { let lock_a_res = directory.acquire_lock(&Lock { filepath: PathBuf::from("a.lock"), is_blocking: true, diff --git a/src/directory/watch_event_router.rs b/src/directory/watch_event_router.rs index 820c73a11..6aa898c5c 100644 --- a/src/directory/watch_event_router.rs +++ b/src/directory/watch_event_router.rs @@ -3,7 +3,7 @@ use std::sync::RwLock; use std::sync::Weak; /// Type alias for callbacks registered when watching files of a `Directory`. -pub type WatchCallback = Box () + Sync + Send>; +pub type WatchCallback = Box () + Sync + Send>; /// Helper struct to implement the watch method in `Directory` implementations. /// @@ -67,7 +67,7 @@ impl WatchCallbackList { #[cfg(test)] mod tests { - use directory::WatchCallbackList; + use crate::directory::WatchCallbackList; use std::mem; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; diff --git a/src/docset.rs b/src/docset.rs index 5d6b6f677..f72e10225 100644 --- a/src/docset.rs +++ b/src/docset.rs @@ -1,9 +1,9 @@ -use common::BitSet; -use fastfield::DeleteBitSet; +use crate::common::BitSet; +use crate::fastfield::DeleteBitSet; +use crate::DocId; use std::borrow::Borrow; use std::borrow::BorrowMut; use std::cmp::Ordering; -use DocId; /// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`. #[derive(PartialEq, Eq, Debug)] diff --git a/src/error.rs b/src/error.rs index 5da069105..142250ca1 100644 --- a/src/error.rs +++ b/src/error.rs @@ -2,11 +2,11 @@ use std::io; -use directory::error::LockError; -use directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError}; -use fastfield::FastFieldNotAvailableError; -use query; -use schema; +use crate::directory::error::LockError; +use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError}; +use crate::fastfield::FastFieldNotAvailableError; +use crate::query; +use crate::schema; use serde_json; use std::fmt; use std::path::PathBuf; @@ -34,7 +34,7 @@ impl DataCorruption { } impl fmt::Debug for DataCorruption { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { write!(f, "Data corruption: ")?; if let Some(ref filepath) = &self.filepath { write!(f, "(in file `{:?}`)", filepath)?; diff --git a/src/fastfield/bytes/mod.rs b/src/fastfield/bytes/mod.rs index 0106d3a8d..9998e7cb1 100644 --- a/src/fastfield/bytes/mod.rs +++ b/src/fastfield/bytes/mod.rs @@ -6,8 +6,8 @@ pub use self::writer::BytesFastFieldWriter; #[cfg(test)] mod tests { - use schema::Schema; - use Index; + use crate::schema::Schema; + use crate::Index; #[test] fn test_bytes() { diff --git a/src/fastfield/bytes/reader.rs b/src/fastfield/bytes/reader.rs index 11b652066..bf120152c 100644 --- a/src/fastfield/bytes/reader.rs +++ b/src/fastfield/bytes/reader.rs @@ -1,8 +1,8 @@ use owning_ref::OwningRef; -use directory::ReadOnlySource; -use fastfield::FastFieldReader; -use DocId; +use crate::directory::ReadOnlySource; +use crate::fastfield::FastFieldReader; +use crate::DocId; /// Reader for byte array fast fields /// diff --git a/src/fastfield/bytes/writer.rs b/src/fastfield/bytes/writer.rs index 472e8d682..8b64cbb47 100644 --- a/src/fastfield/bytes/writer.rs +++ b/src/fastfield/bytes/writer.rs @@ -1,8 +1,8 @@ use std::io; -use fastfield::serializer::FastFieldSerializer; -use schema::{Document, Field, Value}; -use DocId; +use crate::fastfield::serializer::FastFieldSerializer; +use crate::schema::{Document, Field, Value}; +use crate::DocId; /// Writer for byte array (as in, any number of bytes per document) fast fields /// diff --git a/src/fastfield/delete.rs b/src/fastfield/delete.rs index d77dcc0f1..19a30e999 100644 --- a/src/fastfield/delete.rs +++ b/src/fastfield/delete.rs @@ -1,11 +1,11 @@ +use crate::common::HasLen; +use crate::directory::ReadOnlySource; +use crate::directory::WritePtr; +use crate::space_usage::ByteCount; +use crate::DocId; use bit_set::BitSet; -use common::HasLen; -use directory::ReadOnlySource; -use directory::WritePtr; -use space_usage::ByteCount; use std::io; use std::io::Write; -use DocId; /// Write a delete `BitSet` /// @@ -82,8 +82,8 @@ impl HasLen for DeleteBitSet { #[cfg(test)] mod tests { use super::*; + use crate::directory::*; use bit_set::BitSet; - use directory::*; use std::path::PathBuf; fn test_delete_bitset_helper(bitset: &BitSet) { diff --git a/src/fastfield/error.rs b/src/fastfield/error.rs index df6c2febe..96160cb75 100644 --- a/src/fastfield/error.rs +++ b/src/fastfield/error.rs @@ -1,4 +1,4 @@ -use schema::FieldEntry; +use crate::schema::FieldEntry; use std::result; /// `FastFieldNotAvailableError` is returned when the diff --git a/src/fastfield/facet_reader.rs b/src/fastfield/facet_reader.rs index 4c61e5dc3..4bf8f3671 100644 --- a/src/fastfield/facet_reader.rs +++ b/src/fastfield/facet_reader.rs @@ -1,9 +1,9 @@ use super::MultiValueIntFastFieldReader; -use schema::Facet; +use crate::schema::Facet; +use crate::termdict::TermDictionary; +use crate::termdict::TermOrdinal; +use crate::DocId; use std::str; -use termdict::TermDictionary; -use termdict::TermOrdinal; -use DocId; /// The facet reader makes it possible to access the list of /// facets associated to a given document in a specific diff --git a/src/fastfield/mod.rs b/src/fastfield/mod.rs index 6e5c3b76a..79f125233 100644 --- a/src/fastfield/mod.rs +++ b/src/fastfield/mod.rs @@ -33,10 +33,10 @@ pub use self::reader::FastFieldReader; pub use self::readers::FastFieldReaders; pub use self::serializer::FastFieldSerializer; pub use self::writer::{FastFieldsWriter, IntFastFieldWriter}; -use common; -use schema::Cardinality; -use schema::FieldType; -use schema::Value; +use crate::common; +use crate::schema::Cardinality; +use crate::schema::FieldType; +use crate::schema::Value; mod bytes; mod delete; @@ -126,16 +126,16 @@ fn value_to_u64(value: &Value) -> u64 { mod tests { use super::*; - use common::CompositeFile; - use directory::{Directory, RAMDirectory, WritePtr}; - use fastfield::FastFieldReader; + use crate::common::CompositeFile; + use crate::directory::{Directory, RAMDirectory, WritePtr}; + use crate::fastfield::FastFieldReader; + use crate::schema::Document; + use crate::schema::Field; + use crate::schema::Schema; + use crate::schema::FAST; use rand::prelude::SliceRandom; use rand::rngs::StdRng; use rand::SeedableRng; - use schema::Document; - use schema::Field; - use schema::Schema; - use schema::FAST; use std::collections::HashMap; use std::path::Path; diff --git a/src/fastfield/multivalued/mod.rs b/src/fastfield/multivalued/mod.rs index ad23fb710..9a57d15bb 100644 --- a/src/fastfield/multivalued/mod.rs +++ b/src/fastfield/multivalued/mod.rs @@ -7,16 +7,16 @@ pub use self::writer::MultiValueIntFastFieldWriter; #[cfg(test)] mod tests { - extern crate time; + use time; use self::time::Duration; - use collector::TopDocs; - use query::QueryParser; - use schema::Cardinality; - use schema::Facet; - use schema::IntOptions; - use schema::Schema; - use Index; + use crate::collector::TopDocs; + use crate::query::QueryParser; + use crate::schema::Cardinality; + use crate::schema::Facet; + use crate::schema::IntOptions; + use crate::schema::Schema; + use crate::Index; #[test] fn test_multivalued_u64() { diff --git a/src/fastfield/multivalued/reader.rs b/src/fastfield/multivalued/reader.rs index ee3c6154b..95b62eafd 100644 --- a/src/fastfield/multivalued/reader.rs +++ b/src/fastfield/multivalued/reader.rs @@ -1,5 +1,5 @@ -use fastfield::{FastFieldReader, FastValue}; -use DocId; +use crate::fastfield::{FastFieldReader, FastValue}; +use crate::DocId; /// Reader for a multivalued `u64` fast field. /// @@ -64,8 +64,8 @@ impl MultiValueIntFastFieldReader { #[cfg(test)] mod tests { - use core::Index; - use schema::{Facet, Schema}; + use crate::core::Index; + use crate::schema::{Facet, Schema}; #[test] fn test_multifastfield_reader() { diff --git a/src/fastfield/multivalued/writer.rs b/src/fastfield/multivalued/writer.rs index a4186ffd7..9899fb19d 100644 --- a/src/fastfield/multivalued/writer.rs +++ b/src/fastfield/multivalued/writer.rs @@ -1,13 +1,13 @@ -use fastfield::serializer::FastSingleFieldSerializer; -use fastfield::value_to_u64; -use fastfield::FastFieldSerializer; +use crate::fastfield::serializer::FastSingleFieldSerializer; +use crate::fastfield::value_to_u64; +use crate::fastfield::FastFieldSerializer; +use crate::postings::UnorderedTermId; +use crate::schema::{Document, Field}; +use crate::termdict::TermOrdinal; +use crate::DocId; use itertools::Itertools; -use postings::UnorderedTermId; -use schema::{Document, Field}; use std::collections::HashMap; use std::io; -use termdict::TermOrdinal; -use DocId; /// Writer for multi-valued (as in, more than one value per document) /// int fast field. @@ -116,7 +116,7 @@ impl MultiValueIntFastFieldWriter { } { // writing the values themselves. - let mut value_serializer: FastSingleFieldSerializer<_>; + let mut value_serializer: FastSingleFieldSerializer<'_, _>; match mapping_opt { Some(mapping) => { value_serializer = serializer.new_u64_fast_field_with_idx( diff --git a/src/fastfield/reader.rs b/src/fastfield/reader.rs index bba8c244a..867163b99 100644 --- a/src/fastfield/reader.rs +++ b/src/fastfield/reader.rs @@ -1,18 +1,18 @@ use super::FastValue; -use common::bitpacker::BitUnpacker; -use common::compute_num_bits; -use common::BinarySerializable; -use common::CompositeFile; -use directory::ReadOnlySource; -use directory::{Directory, RAMDirectory, WritePtr}; -use fastfield::{FastFieldSerializer, FastFieldsWriter}; +use crate::common::bitpacker::BitUnpacker; +use crate::common::compute_num_bits; +use crate::common::BinarySerializable; +use crate::common::CompositeFile; +use crate::directory::ReadOnlySource; +use crate::directory::{Directory, RAMDirectory, WritePtr}; +use crate::fastfield::{FastFieldSerializer, FastFieldsWriter}; +use crate::schema::Schema; +use crate::schema::FAST; +use crate::DocId; use owning_ref::OwningRef; -use schema::Schema; -use schema::FAST; use std::collections::HashMap; use std::marker::PhantomData; use std::path::Path; -use DocId; /// Trait for accessing a fastfield. /// diff --git a/src/fastfield/readers.rs b/src/fastfield/readers.rs index 47b4391d5..4019cf37c 100644 --- a/src/fastfield/readers.rs +++ b/src/fastfield/readers.rs @@ -1,11 +1,11 @@ -use common::CompositeFile; -use fastfield::BytesFastFieldReader; -use fastfield::MultiValueIntFastFieldReader; -use fastfield::{FastFieldNotAvailableError, FastFieldReader}; -use schema::{Cardinality, Field, FieldType, Schema}; -use space_usage::PerFieldSpaceUsage; +use crate::common::CompositeFile; +use crate::fastfield::BytesFastFieldReader; +use crate::fastfield::MultiValueIntFastFieldReader; +use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader}; +use crate::schema::{Cardinality, Field, FieldType, Schema}; +use crate::space_usage::PerFieldSpaceUsage; +use crate::Result; use std::collections::HashMap; -use Result; /// Provides access to all of the FastFieldReader. /// diff --git a/src/fastfield/serializer.rs b/src/fastfield/serializer.rs index ef781c2a0..60f3c1b97 100644 --- a/src/fastfield/serializer.rs +++ b/src/fastfield/serializer.rs @@ -1,10 +1,10 @@ -use common::bitpacker::BitPacker; -use common::compute_num_bits; -use common::BinarySerializable; -use common::CompositeWrite; -use common::CountingWriter; -use directory::WritePtr; -use schema::Field; +use crate::common::bitpacker::BitPacker; +use crate::common::compute_num_bits; +use crate::common::BinarySerializable; +use crate::common::CompositeWrite; +use crate::common::CountingWriter; +use crate::directory::WritePtr; +use crate::schema::Field; use std::io::{self, Write}; /// `FastFieldSerializer` is in charge of serializing @@ -45,7 +45,7 @@ impl FastFieldSerializer { field: Field, min_value: u64, max_value: u64, - ) -> io::Result>> { + ) -> io::Result>> { self.new_u64_fast_field_with_idx(field, min_value, max_value, 0) } @@ -56,7 +56,7 @@ impl FastFieldSerializer { min_value: u64, max_value: u64, idx: usize, - ) -> io::Result>> { + ) -> io::Result>> { let field_write = self.composite_write.for_field_with_idx(field, idx); FastSingleFieldSerializer::open(field_write, min_value, max_value) } @@ -66,7 +66,7 @@ impl FastFieldSerializer { &mut self, field: Field, idx: usize, - ) -> io::Result>> { + ) -> io::Result>> { let field_write = self.composite_write.for_field_with_idx(field, idx); FastBytesFieldSerializer::open(field_write) } @@ -79,7 +79,7 @@ impl FastFieldSerializer { } } -pub struct FastSingleFieldSerializer<'a, W: Write + 'a> { +pub struct FastSingleFieldSerializer<'a, W: Write> { bit_packer: BitPacker, write: &'a mut W, min_value: u64, @@ -127,7 +127,7 @@ impl<'a, W: Write> FastSingleFieldSerializer<'a, W> { } } -pub struct FastBytesFieldSerializer<'a, W: Write + 'a> { +pub struct FastBytesFieldSerializer<'a, W: Write> { write: &'a mut W, } diff --git a/src/fastfield/writer.rs b/src/fastfield/writer.rs index 1c3d11e7b..a9d91208f 100644 --- a/src/fastfield/writer.rs +++ b/src/fastfield/writer.rs @@ -1,13 +1,13 @@ use super::multivalued::MultiValueIntFastFieldWriter; -use common; -use common::BinarySerializable; -use common::VInt; -use fastfield::{BytesFastFieldWriter, FastFieldSerializer}; -use postings::UnorderedTermId; -use schema::{Cardinality, Document, Field, FieldType, Schema}; +use crate::common; +use crate::common::BinarySerializable; +use crate::common::VInt; +use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer}; +use crate::postings::UnorderedTermId; +use crate::schema::{Cardinality, Document, Field, FieldType, Schema}; +use crate::termdict::TermOrdinal; use std::collections::HashMap; use std::io; -use termdict::TermOrdinal; /// The fastfieldswriter regroup all of the fast field writers. pub struct FastFieldsWriter { diff --git a/src/fieldnorm/reader.rs b/src/fieldnorm/reader.rs index e16f3defb..8a57739fa 100644 --- a/src/fieldnorm/reader.rs +++ b/src/fieldnorm/reader.rs @@ -1,6 +1,6 @@ use super::{fieldnorm_to_id, id_to_fieldnorm}; -use directory::ReadOnlySource; -use DocId; +use crate::directory::ReadOnlySource; +use crate::DocId; /// Reads the fieldnorm associated to a document. /// The fieldnorm represents the length associated to diff --git a/src/fieldnorm/serializer.rs b/src/fieldnorm/serializer.rs index af6bcd3dc..057626fcc 100644 --- a/src/fieldnorm/serializer.rs +++ b/src/fieldnorm/serializer.rs @@ -1,6 +1,6 @@ -use common::CompositeWrite; -use directory::WritePtr; -use schema::Field; +use crate::common::CompositeWrite; +use crate::directory::WritePtr; +use crate::schema::Field; use std::io; use std::io::Write; diff --git a/src/fieldnorm/writer.rs b/src/fieldnorm/writer.rs index 5302f2856..9df7f94fc 100644 --- a/src/fieldnorm/writer.rs +++ b/src/fieldnorm/writer.rs @@ -1,9 +1,9 @@ -use DocId; +use crate::DocId; use super::fieldnorm_to_id; use super::FieldNormsSerializer; -use schema::Field; -use schema::Schema; +use crate::schema::Field; +use crate::schema::Schema; use std::io; /// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte diff --git a/src/functional_test.rs b/src/functional_test.rs index 1c2a6501c..ecd30beae 100644 --- a/src/functional_test.rs +++ b/src/functional_test.rs @@ -1,10 +1,10 @@ use rand::thread_rng; use std::collections::HashSet; +use crate::schema::*; +use crate::Index; +use crate::Searcher; use rand::Rng; -use schema::*; -use Index; -use Searcher; fn check_index_content(searcher: &Searcher, vals: &HashSet) { assert!(searcher.segment_readers().len() < 20); diff --git a/src/indexer/delete_queue.rs b/src/indexer/delete_queue.rs index 595691f98..73effff65 100644 --- a/src/indexer/delete_queue.rs +++ b/src/indexer/delete_queue.rs @@ -1,8 +1,8 @@ use super::operation::DeleteOperation; +use crate::Opstamp; use std::mem; use std::ops::DerefMut; use std::sync::{Arc, RwLock}; -use Opstamp; // The DeleteQueue is similar in conceptually to a multiple // consumer single producer broadcast channel. @@ -250,7 +250,7 @@ impl DeleteCursor { mod tests { use super::{DeleteOperation, DeleteQueue}; - use schema::{Field, Term}; + use crate::schema::{Field, Term}; #[test] fn test_deletequeue() { diff --git a/src/indexer/doc_opstamp_mapping.rs b/src/indexer/doc_opstamp_mapping.rs index d616800ab..71ec3a8f4 100644 --- a/src/indexer/doc_opstamp_mapping.rs +++ b/src/indexer/doc_opstamp_mapping.rs @@ -1,6 +1,6 @@ +use crate::DocId; +use crate::Opstamp; use std::sync::Arc; -use DocId; -use Opstamp; // Doc to opstamp is used to identify which // document should be deleted. diff --git a/src/indexer/index_writer.rs b/src/indexer/index_writer.rs index bd3d829bf..bb16641b7 100644 --- a/src/indexer/index_writer.rs +++ b/src/indexer/index_writer.rs @@ -1,37 +1,37 @@ use super::operation::{AddOperation, UserOperation}; use super::segment_updater::SegmentUpdater; use super::PreparedCommit; +use crate::core::Index; +use crate::core::Segment; +use crate::core::SegmentComponent; +use crate::core::SegmentId; +use crate::core::SegmentMeta; +use crate::core::SegmentReader; +use crate::directory::DirectoryLock; +use crate::docset::DocSet; +use crate::error::TantivyError; +use crate::fastfield::write_delete_bitset; +use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue}; +use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping; +use crate::indexer::operation::DeleteOperation; +use crate::indexer::stamper::Stamper; +use crate::indexer::MergePolicy; +use crate::indexer::SegmentEntry; +use crate::indexer::SegmentWriter; +use crate::postings::compute_table_size; +use crate::schema::Document; +use crate::schema::IndexRecordOption; +use crate::schema::Term; +use crate::Opstamp; +use crate::Result; use bit_set::BitSet; -use core::Index; -use core::Segment; -use core::SegmentComponent; -use core::SegmentId; -use core::SegmentMeta; -use core::SegmentReader; use crossbeam::channel; -use directory::DirectoryLock; -use docset::DocSet; -use error::TantivyError; -use fastfield::write_delete_bitset; use futures::{Canceled, Future}; -use indexer::delete_queue::{DeleteCursor, DeleteQueue}; -use indexer::doc_opstamp_mapping::DocToOpstampMapping; -use indexer::operation::DeleteOperation; -use indexer::stamper::Stamper; -use indexer::MergePolicy; -use indexer::SegmentEntry; -use indexer::SegmentWriter; -use postings::compute_table_size; -use schema::Document; -use schema::IndexRecordOption; -use schema::Term; use std::mem; use std::ops::Range; use std::sync::Arc; use std::thread; use std::thread::JoinHandle; -use Opstamp; -use Result; // Size of the margin for the heap. A segment is closed when the remaining memory // in the heap goes below MARGIN_IN_BYTES. @@ -268,7 +268,7 @@ fn index_documents( memory_budget: usize, segment: &Segment, generation: usize, - document_iterator: &mut Iterator>, + document_iterator: &mut dyn Iterator>, segment_updater: &mut SegmentUpdater, mut delete_cursor: DeleteCursor, ) -> Result { @@ -440,12 +440,12 @@ impl IndexWriter { } /// Accessor to the merge policy. - pub fn get_merge_policy(&self) -> Arc> { + pub fn get_merge_policy(&self) -> Arc> { self.segment_updater.get_merge_policy() } /// Set the merge policy. - pub fn set_merge_policy(&self, merge_policy: Box) { + pub fn set_merge_policy(&self, merge_policy: Box) { self.segment_updater.set_merge_policy(merge_policy); } @@ -603,7 +603,7 @@ impl IndexWriter { /// It is also possible to add a payload to the `commit` /// using this API. /// See [`PreparedCommit::set_payload()`](PreparedCommit.html) - pub fn prepare_commit(&mut self) -> Result { + pub fn prepare_commit(&mut self) -> Result> { // Here, because we join all of the worker threads, // all of the segment update for this commit have been // sent. @@ -773,15 +773,15 @@ mod tests { use super::super::operation::UserOperation; use super::initial_table_size; - use collector::TopDocs; - use directory::error::LockError; - use error::*; - use indexer::NoMergePolicy; - use query::TermQuery; - use schema::{self, IndexRecordOption}; - use Index; - use ReloadPolicy; - use Term; + use crate::collector::TopDocs; + use crate::directory::error::LockError; + use crate::error::*; + use crate::indexer::NoMergePolicy; + use crate::query::TermQuery; + use crate::schema::{self, IndexRecordOption}; + use crate::Index; + use crate::ReloadPolicy; + use crate::Term; #[test] fn test_operations_group() { diff --git a/src/indexer/log_merge_policy.rs b/src/indexer/log_merge_policy.rs index 4c0731cfe..7efc7e7b9 100644 --- a/src/indexer/log_merge_policy.rs +++ b/src/indexer/log_merge_policy.rs @@ -1,5 +1,5 @@ use super::merge_policy::{MergeCandidate, MergePolicy}; -use core::SegmentMeta; +use crate::core::SegmentMeta; use std::cmp; use std::f64; @@ -95,8 +95,8 @@ impl Default for LogMergePolicy { #[cfg(test)] mod tests { use super::*; - use core::{SegmentId, SegmentMeta}; - use indexer::merge_policy::MergePolicy; + use crate::core::{SegmentId, SegmentMeta}; + use crate::indexer::merge_policy::MergePolicy; fn test_merge_policy() -> LogMergePolicy { let mut log_merge_policy = LogMergePolicy::default(); diff --git a/src/indexer/merge_operation.rs b/src/indexer/merge_operation.rs index 0474fefd8..2fc2c1ea9 100644 --- a/src/indexer/merge_operation.rs +++ b/src/indexer/merge_operation.rs @@ -1,7 +1,7 @@ +use crate::Opstamp; +use crate::SegmentId; use census::{Inventory, TrackedObject}; use std::collections::HashSet; -use Opstamp; -use SegmentId; #[derive(Default)] pub struct MergeOperationInventory(Inventory); diff --git a/src/indexer/merge_policy.rs b/src/indexer/merge_policy.rs index 8dac58c9f..93499e071 100644 --- a/src/indexer/merge_policy.rs +++ b/src/indexer/merge_policy.rs @@ -1,5 +1,5 @@ -use core::SegmentId; -use core::SegmentMeta; +use crate::core::SegmentId; +use crate::core::SegmentMeta; use std::fmt::Debug; use std::marker; @@ -39,8 +39,8 @@ impl MergePolicy for NoMergePolicy { pub mod tests { use super::*; - use core::SegmentId; - use core::SegmentMeta; + use crate::core::SegmentId; + use crate::core::SegmentMeta; /// `MergePolicy` useful for test purposes. /// diff --git a/src/indexer/merger.rs b/src/indexer/merger.rs index ab3e22132..4d15d7ffa 100644 --- a/src/indexer/merger.rs +++ b/src/indexer/merger.rs @@ -1,31 +1,31 @@ -use common::MAX_DOC_LIMIT; -use core::Segment; -use core::SegmentReader; -use core::SerializableSegment; -use docset::DocSet; -use fastfield::BytesFastFieldReader; -use fastfield::DeleteBitSet; -use fastfield::FastFieldReader; -use fastfield::FastFieldSerializer; -use fastfield::MultiValueIntFastFieldReader; -use fieldnorm::FieldNormReader; -use fieldnorm::FieldNormsSerializer; -use fieldnorm::FieldNormsWriter; -use indexer::SegmentSerializer; +use crate::common::MAX_DOC_LIMIT; +use crate::core::Segment; +use crate::core::SegmentReader; +use crate::core::SerializableSegment; +use crate::docset::DocSet; +use crate::fastfield::BytesFastFieldReader; +use crate::fastfield::DeleteBitSet; +use crate::fastfield::FastFieldReader; +use crate::fastfield::FastFieldSerializer; +use crate::fastfield::MultiValueIntFastFieldReader; +use crate::fieldnorm::FieldNormReader; +use crate::fieldnorm::FieldNormsSerializer; +use crate::fieldnorm::FieldNormsWriter; +use crate::indexer::SegmentSerializer; +use crate::postings::InvertedIndexSerializer; +use crate::postings::Postings; +use crate::schema::Cardinality; +use crate::schema::FieldType; +use crate::schema::{Field, Schema}; +use crate::store::StoreWriter; +use crate::termdict::TermMerger; +use crate::termdict::TermOrdinal; +use crate::DocId; +use crate::Result; +use crate::TantivyError; use itertools::Itertools; -use postings::InvertedIndexSerializer; -use postings::Postings; -use schema::Cardinality; -use schema::FieldType; -use schema::{Field, Schema}; use std::cmp; use std::collections::HashMap; -use store::StoreWriter; -use termdict::TermMerger; -use termdict::TermOrdinal; -use DocId; -use Result; -use TantivyError; fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 { let mut total_tokens = 0u64; @@ -692,28 +692,28 @@ impl SerializableSegment for IndexMerger { #[cfg(test)] mod tests { + use crate::collector::tests::TestCollector; + use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector}; + use crate::collector::{Count, FacetCollector}; + use crate::core::Index; + use crate::query::AllQuery; + use crate::query::BooleanQuery; + use crate::query::TermQuery; + use crate::schema; + use crate::schema::Cardinality; + use crate::schema::Document; + use crate::schema::Facet; + use crate::schema::IndexRecordOption; + use crate::schema::IntOptions; + use crate::schema::Term; + use crate::schema::TextFieldIndexing; + use crate::schema::INDEXED; + use crate::DocAddress; + use crate::IndexWriter; + use crate::Searcher; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; - use collector::tests::TestCollector; - use collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector}; - use collector::{Count, FacetCollector}; - use core::Index; use futures::Future; - use query::AllQuery; - use query::BooleanQuery; - use query::TermQuery; - use schema; - use schema::Cardinality; - use schema::Document; - use schema::Facet; - use schema::IndexRecordOption; - use schema::IntOptions; - use schema::Term; - use schema::TextFieldIndexing; - use schema::INDEXED; use std::io::Cursor; - use DocAddress; - use IndexWriter; - use Searcher; #[test] fn test_index_merger_no_deletes() { diff --git a/src/indexer/operation.rs b/src/indexer/operation.rs index 03bfabb59..597fc4e3e 100644 --- a/src/indexer/operation.rs +++ b/src/indexer/operation.rs @@ -1,6 +1,6 @@ -use schema::Document; -use schema::Term; -use Opstamp; +use crate::schema::Document; +use crate::schema::Term; +use crate::Opstamp; /// Timestamped Delete operation. #[derive(Clone, Eq, PartialEq, Debug)] diff --git a/src/indexer/prepared_commit.rs b/src/indexer/prepared_commit.rs index 92f47cdfd..bdec577d6 100644 --- a/src/indexer/prepared_commit.rs +++ b/src/indexer/prepared_commit.rs @@ -1,6 +1,6 @@ use super::IndexWriter; -use Opstamp; -use Result; +use crate::Opstamp; +use crate::Result; /// A prepared commit pub struct PreparedCommit<'a> { @@ -10,7 +10,7 @@ pub struct PreparedCommit<'a> { } impl<'a> PreparedCommit<'a> { - pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit { + pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit<'_> { PreparedCommit { index_writer, payload: None, diff --git a/src/indexer/segment_entry.rs b/src/indexer/segment_entry.rs index 34bbaf8c2..7f4c8856c 100644 --- a/src/indexer/segment_entry.rs +++ b/src/indexer/segment_entry.rs @@ -1,7 +1,7 @@ +use crate::core::SegmentId; +use crate::core::SegmentMeta; +use crate::indexer::delete_queue::DeleteCursor; use bit_set::BitSet; -use core::SegmentId; -use core::SegmentMeta; -use indexer::delete_queue::DeleteCursor; use std::fmt; /// A segment entry describes the state of @@ -67,7 +67,7 @@ impl SegmentEntry { } impl fmt::Debug for SegmentEntry { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { write!(formatter, "SegmentEntry({:?})", self.meta) } } diff --git a/src/indexer/segment_manager.rs b/src/indexer/segment_manager.rs index 0087ac12c..e4113a45f 100644 --- a/src/indexer/segment_manager.rs +++ b/src/indexer/segment_manager.rs @@ -1,16 +1,16 @@ use super::segment_register::SegmentRegister; -use core::SegmentId; -use core::SegmentMeta; -use core::META_FILEPATH; -use error::TantivyError; -use indexer::delete_queue::DeleteCursor; -use indexer::SegmentEntry; +use crate::core::SegmentId; +use crate::core::SegmentMeta; +use crate::core::META_FILEPATH; +use crate::error::TantivyError; +use crate::indexer::delete_queue::DeleteCursor; +use crate::indexer::SegmentEntry; +use crate::Result as TantivyResult; use std::collections::hash_set::HashSet; use std::fmt::{self, Debug, Formatter}; use std::path::PathBuf; use std::sync::RwLock; use std::sync::{RwLockReadGuard, RwLockWriteGuard}; -use Result as TantivyResult; #[derive(Default)] struct SegmentRegisters { @@ -29,7 +29,7 @@ pub struct SegmentManager { } impl Debug for SegmentManager { - fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { let lock = self.read(); write!( f, @@ -91,13 +91,13 @@ impl SegmentManager { // Lock poisoning should never happen : // The lock is acquired and released within this class, // and the operations cannot panic. - fn read(&self) -> RwLockReadGuard { + fn read(&self) -> RwLockReadGuard<'_, SegmentRegisters> { self.registers .read() .expect("Failed to acquire read lock on SegmentManager.") } - fn write(&self) -> RwLockWriteGuard { + fn write(&self) -> RwLockWriteGuard<'_, SegmentRegisters> { self.registers .write() .expect("Failed to acquire write lock on SegmentManager.") diff --git a/src/indexer/segment_register.rs b/src/indexer/segment_register.rs index 6d4dc7e03..a8b3232a5 100644 --- a/src/indexer/segment_register.rs +++ b/src/indexer/segment_register.rs @@ -1,7 +1,7 @@ -use core::SegmentId; -use core::SegmentMeta; -use indexer::delete_queue::DeleteCursor; -use indexer::segment_entry::SegmentEntry; +use crate::core::SegmentId; +use crate::core::SegmentMeta; +use crate::indexer::delete_queue::DeleteCursor; +use crate::indexer::segment_entry::SegmentEntry; use std::collections::HashMap; use std::collections::HashSet; use std::fmt::{self, Debug, Formatter}; @@ -20,7 +20,7 @@ pub struct SegmentRegister { } impl Debug for SegmentRegister { - fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { write!(f, "SegmentRegister(")?; for k in self.segment_states.keys() { write!(f, "{}, ", k.short_uuid_string())?; @@ -93,9 +93,9 @@ impl SegmentRegister { #[cfg(test)] mod tests { use super::*; - use core::SegmentId; - use core::SegmentMeta; - use indexer::delete_queue::*; + use crate::core::SegmentId; + use crate::core::SegmentMeta; + use crate::indexer::delete_queue::*; fn segment_ids(segment_register: &SegmentRegister) -> Vec { segment_register diff --git a/src/indexer/segment_serializer.rs b/src/indexer/segment_serializer.rs index a1425f58f..94c666e43 100644 --- a/src/indexer/segment_serializer.rs +++ b/src/indexer/segment_serializer.rs @@ -1,11 +1,11 @@ -use Result; +use crate::Result; -use core::Segment; -use core::SegmentComponent; -use fastfield::FastFieldSerializer; -use fieldnorm::FieldNormsSerializer; -use postings::InvertedIndexSerializer; -use store::StoreWriter; +use crate::core::Segment; +use crate::core::SegmentComponent; +use crate::fastfield::FastFieldSerializer; +use crate::fieldnorm::FieldNormsSerializer; +use crate::postings::InvertedIndexSerializer; +use crate::store::StoreWriter; /// Segment serializer is in charge of laying out on disk /// the data accumulated and sorted by the `SegmentWriter`. diff --git a/src/indexer/segment_updater.rs b/src/indexer/segment_updater.rs index b56842a1d..b98331398 100644 --- a/src/indexer/segment_updater.rs +++ b/src/indexer/segment_updater.rs @@ -1,29 +1,31 @@ use super::segment_manager::{get_mergeable_segments, SegmentManager}; -use core::Index; -use core::IndexMeta; -use core::Segment; -use core::SegmentId; -use core::SegmentMeta; -use core::SerializableSegment; -use core::META_FILEPATH; -use directory::{Directory, DirectoryClone}; -use error::TantivyError; +use crate::core::Index; +use crate::core::IndexMeta; +use crate::core::Segment; +use crate::core::SegmentId; +use crate::core::SegmentMeta; +use crate::core::SerializableSegment; +use crate::core::META_FILEPATH; +use crate::directory::{Directory, DirectoryClone}; +use crate::error::TantivyError; +use crate::indexer::delete_queue::DeleteCursor; +use crate::indexer::index_writer::advance_deletes; +use crate::indexer::merge_operation::MergeOperationInventory; +use crate::indexer::merger::IndexMerger; +use crate::indexer::stamper::Stamper; +use crate::indexer::MergeOperation; +use crate::indexer::SegmentEntry; +use crate::indexer::SegmentSerializer; +use crate::indexer::{DefaultMergePolicy, MergePolicy}; +use crate::schema::Schema; +use crate::Opstamp; +use crate::Result; use futures::oneshot; use futures::sync::oneshot::Receiver; use futures::Future; use futures_cpupool::Builder as CpuPoolBuilder; use futures_cpupool::CpuFuture; use futures_cpupool::CpuPool; -use indexer::delete_queue::DeleteCursor; -use indexer::index_writer::advance_deletes; -use indexer::merge_operation::MergeOperationInventory; -use indexer::merger::IndexMerger; -use indexer::stamper::Stamper; -use indexer::MergeOperation; -use indexer::SegmentEntry; -use indexer::SegmentSerializer; -use indexer::{DefaultMergePolicy, MergePolicy}; -use schema::Schema; use serde_json; use std::borrow::BorrowMut; use std::collections::HashMap; @@ -36,8 +38,6 @@ use std::sync::Arc; use std::sync::RwLock; use std::thread; use std::thread::JoinHandle; -use Opstamp; -use Result; /// Save the index meta file. /// This operation is atomic : @@ -48,7 +48,7 @@ use Result; /// and flushed. /// /// This method is not part of tantivy's public API -pub fn save_new_metas(schema: Schema, directory: &mut Directory) -> Result<()> { +pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<()> { save_metas( &IndexMeta { segments: Vec::new(), @@ -69,7 +69,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut Directory) -> Result<()> { /// and flushed. /// /// This method is not part of tantivy's public API -fn save_metas(metas: &IndexMeta, directory: &mut Directory) -> Result<()> { +fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> { info!("save metas"); let mut buffer = serde_json::to_vec_pretty(metas)?; // Just adding a new line at the end of the buffer. @@ -142,7 +142,7 @@ struct InnerSegmentUpdater { pool: CpuPool, index: Index, segment_manager: SegmentManager, - merge_policy: RwLock>>, + merge_policy: RwLock>>, merging_thread_id: AtomicUsize, merging_threads: RwLock>>>, generation: AtomicUsize, @@ -179,11 +179,11 @@ impl SegmentUpdater { }))) } - pub fn get_merge_policy(&self) -> Arc> { + pub fn get_merge_policy(&self) -> Arc> { self.0.merge_policy.read().unwrap().clone() } - pub fn set_merge_policy(&self, merge_policy: Box) { + pub fn set_merge_policy(&self, merge_policy: Box) { let arc_merge_policy = Arc::new(merge_policy); *self.0.merge_policy.write().unwrap() = arc_merge_policy; } @@ -533,9 +533,9 @@ impl SegmentUpdater { #[cfg(test)] mod tests { - use indexer::merge_policy::tests::MergeWheneverPossible; - use schema::*; - use Index; + use crate::indexer::merge_policy::tests::MergeWheneverPossible; + use crate::schema::*; + use crate::Index; #[test] fn test_delete_during_merge() { diff --git a/src/indexer/segment_writer.rs b/src/indexer/segment_writer.rs index 0af1a74fd..b8fc00f21 100644 --- a/src/indexer/segment_writer.rs +++ b/src/indexer/segment_writer.rs @@ -1,23 +1,23 @@ use super::operation::AddOperation; -use core::Segment; -use core::SerializableSegment; -use fastfield::FastFieldsWriter; -use fieldnorm::FieldNormsWriter; -use indexer::segment_serializer::SegmentSerializer; -use postings::MultiFieldPostingsWriter; -use schema::FieldEntry; -use schema::FieldType; -use schema::Schema; -use schema::Term; -use schema::Value; +use crate::core::Segment; +use crate::core::SerializableSegment; +use crate::fastfield::FastFieldsWriter; +use crate::fieldnorm::FieldNormsWriter; +use crate::indexer::segment_serializer::SegmentSerializer; +use crate::postings::MultiFieldPostingsWriter; +use crate::schema::FieldEntry; +use crate::schema::FieldType; +use crate::schema::Schema; +use crate::schema::Term; +use crate::schema::Value; +use crate::tokenizer::BoxedTokenizer; +use crate::tokenizer::FacetTokenizer; +use crate::tokenizer::{TokenStream, Tokenizer}; +use crate::DocId; +use crate::Opstamp; +use crate::Result; use std::io; use std::str; -use tokenizer::BoxedTokenizer; -use tokenizer::FacetTokenizer; -use tokenizer::{TokenStream, Tokenizer}; -use DocId; -use Opstamp; -use Result; /// A `SegmentWriter` is in charge of creating segment index from a /// set of documents. @@ -31,7 +31,7 @@ pub struct SegmentWriter { fast_field_writers: FastFieldsWriter, fieldnorms_writer: FieldNormsWriter, doc_opstamps: Vec, - tokenizers: Vec>>, + tokenizers: Vec>>, } impl SegmentWriter { diff --git a/src/indexer/stamper.rs b/src/indexer/stamper.rs index 9385c098c..fde12148f 100644 --- a/src/indexer/stamper.rs +++ b/src/indexer/stamper.rs @@ -1,7 +1,7 @@ +use crate::Opstamp; use std::ops::Range; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; -use Opstamp; /// Stamper provides Opstamps, which is just an auto-increment id to label /// an operation. diff --git a/src/lib.rs b/src/lib.rs index 5f3231d04..8a3e299f4 100755 --- a/src/lib.rs +++ b/src/lib.rs @@ -125,31 +125,8 @@ extern crate failure; #[cfg(feature = "mmap")] extern crate atomicwrites; -extern crate base64; -extern crate bit_set; -extern crate bitpacking; -extern crate byteorder; -extern crate combine; -extern crate crossbeam; -extern crate fnv; -extern crate futures; -extern crate futures_cpupool; -extern crate htmlescape; -extern crate itertools; -extern crate levenshtein_automata; #[cfg(feature = "mmap")] extern crate memmap; -extern crate num_cpus; -extern crate owning_ref; -extern crate regex; -extern crate rust_stemmers; -extern crate scoped_pool; -extern crate serde; -extern crate stable_deref_trait; -extern crate tantivy_fst; -extern crate tempdir; -extern crate tempfile; -extern crate uuid; #[cfg(test)] #[macro_use] @@ -181,14 +158,11 @@ mod functional_test; #[macro_use] mod macros; -pub use error::TantivyError; +pub use crate::error::TantivyError; #[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")] -pub use error::TantivyError as Error; - -extern crate census; -pub extern crate chrono; -extern crate owned_read; +pub use crate::error::TantivyError as Error; +pub use chrono; /// Tantivy result. pub type Result = std::result::Result; @@ -225,15 +199,15 @@ pub use self::snippet::{Snippet, SnippetGenerator}; mod docset; pub use self::docset::{DocSet, SkipResult}; -pub use core::SegmentComponent; -pub use core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta}; -pub use core::{InvertedIndexReader, SegmentReader}; -pub use directory::Directory; -pub use indexer::IndexWriter; -pub use postings::Postings; -pub use schema::{Document, Term}; +pub use crate::core::SegmentComponent; +pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta}; +pub use crate::core::{InvertedIndexReader, SegmentReader}; +pub use crate::directory::Directory; +pub use crate::indexer::IndexWriter; +pub use crate::postings::Postings; +pub use crate::schema::{Document, Term}; -pub use common::{i64_to_u64, u64_to_i64}; +pub use crate::common::{i64_to_u64, u64_to_i64}; /// Expose the current version of tantivy, as well /// whether it was compiled with the simd compression. @@ -243,10 +217,10 @@ pub fn version() -> &'static str { /// Defines tantivy's merging strategy pub mod merge_policy { - pub use indexer::DefaultMergePolicy; - pub use indexer::LogMergePolicy; - pub use indexer::MergePolicy; - pub use indexer::NoMergePolicy; + pub use crate::indexer::DefaultMergePolicy; + pub use crate::indexer::LogMergePolicy; + pub use crate::indexer::MergePolicy; + pub use crate::indexer::NoMergePolicy; } /// A `u32` identifying a document within a segment. @@ -304,20 +278,20 @@ pub struct DocAddress(pub SegmentLocalId, pub DocId); #[cfg(test)] mod tests { - use collector::tests::TestCollector; - use core::SegmentReader; - use docset::DocSet; - use query::BooleanQuery; + use crate::collector::tests::TestCollector; + use crate::core::SegmentReader; + use crate::docset::DocSet; + use crate::query::BooleanQuery; + use crate::schema::*; + use crate::DocAddress; + use crate::Index; + use crate::IndexWriter; + use crate::Postings; + use crate::ReloadPolicy; use rand::distributions::Bernoulli; use rand::distributions::Uniform; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; - use schema::*; - use DocAddress; - use Index; - use IndexWriter; - use Postings; - use ReloadPolicy; pub fn assert_nearly_equals(expected: f32, val: f32) { assert!( @@ -480,7 +454,7 @@ mod tests { } } - fn advance_undeleted(docset: &mut DocSet, reader: &SegmentReader) -> bool { + fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool { while docset.advance() { if !reader.is_deleted(docset.doc()) { return true; diff --git a/src/macros.rs b/src/macros.rs index 7752093ed..640a70c35 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -67,7 +67,7 @@ macro_rules! doc( #[cfg(test)] mod test { - use schema::{Schema, FAST, TEXT}; + use crate::schema::{Schema, FAST, TEXT}; #[test] fn test_doc_basic() { diff --git a/src/positions/mod.rs b/src/positions/mod.rs index 592c18c51..7ac62d075 100644 --- a/src/positions/mod.rs +++ b/src/positions/mod.rs @@ -38,8 +38,8 @@ const LONG_SKIP_INTERVAL: u64 = (LONG_SKIP_IN_BLOCKS * COMPRESSION_BLOCK_SIZE) a pub mod tests { use super::{PositionReader, PositionSerializer}; - use directory::ReadOnlySource; - use positions::COMPRESSION_BLOCK_SIZE; + use crate::directory::ReadOnlySource; + use crate::positions::COMPRESSION_BLOCK_SIZE; use std::iter; fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) { diff --git a/src/positions/reader.rs b/src/positions/reader.rs index cd8b5f950..5c845c0b3 100644 --- a/src/positions/reader.rs +++ b/src/positions/reader.rs @@ -1,3 +1,9 @@ +use crate::common::{BinarySerializable, FixedSize}; +use crate::directory::ReadOnlySource; +use crate::positions::COMPRESSION_BLOCK_SIZE; +use crate::positions::LONG_SKIP_INTERVAL; +use crate::positions::LONG_SKIP_IN_BLOCKS; +use crate::postings::compression::compressed_block_size; /// Positions works as a long sequence of compressed block. /// All terms are chained one after the other. /// @@ -19,13 +25,7 @@ /// so skipping a block without decompressing it is just a matter of advancing that many /// bytes. use bitpacking::{BitPacker, BitPacker4x}; -use common::{BinarySerializable, FixedSize}; -use directory::ReadOnlySource; use owned_read::OwnedRead; -use positions::COMPRESSION_BLOCK_SIZE; -use positions::LONG_SKIP_INTERVAL; -use positions::LONG_SKIP_IN_BLOCKS; -use postings::compression::compressed_block_size; struct Positions { bit_packer: BitPacker4x, diff --git a/src/positions/serializer.rs b/src/positions/serializer.rs index 773be5e14..49cfdda83 100644 --- a/src/positions/serializer.rs +++ b/src/positions/serializer.rs @@ -1,8 +1,8 @@ +use crate::common::BinarySerializable; +use crate::common::CountingWriter; +use crate::positions::{COMPRESSION_BLOCK_SIZE, LONG_SKIP_INTERVAL}; use bitpacking::BitPacker; use bitpacking::BitPacker4x; -use common::BinarySerializable; -use common::CountingWriter; -use positions::{COMPRESSION_BLOCK_SIZE, LONG_SKIP_INTERVAL}; use std::io::{self, Write}; pub struct PositionSerializer { diff --git a/src/postings/block_search.rs b/src/postings/block_search.rs index 04da35b93..be6009fd2 100644 --- a/src/postings/block_search.rs +++ b/src/postings/block_search.rs @@ -1,4 +1,4 @@ -use postings::compression::AlignedBuffer; +use crate::postings::compression::AlignedBuffer; /// This modules define the logic used to search for a doc in a given /// block. (at most 128 docs) @@ -8,7 +8,7 @@ use postings::compression::AlignedBuffer; #[cfg(target_arch = "x86_64")] mod sse2 { - use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE}; + use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE}; use std::arch::x86_64::__m128i as DataType; use std::arch::x86_64::_mm_add_epi32 as op_add; use std::arch::x86_64::_mm_cmplt_epi32 as op_lt; @@ -49,7 +49,7 @@ mod sse2 { #[cfg(test)] mod test { use super::linear_search_sse2_128; - use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE}; + use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE}; #[test] fn test_linear_search_sse2_128_u32() { @@ -140,7 +140,7 @@ impl BlockSearcher { ) -> usize { #[cfg(target_arch = "x86_64")] { - use postings::compression::COMPRESSION_BLOCK_SIZE; + use crate::postings::compression::COMPRESSION_BLOCK_SIZE; if self == BlockSearcher::SSE2 && len == COMPRESSION_BLOCK_SIZE { return sse2::linear_search_sse2_128(block_docs, target); } @@ -166,7 +166,7 @@ mod tests { use super::exponential_search; use super::linear_search; use super::BlockSearcher; - use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE}; + use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE}; #[test] fn test_linear_search() { diff --git a/src/postings/compression/mod.rs b/src/postings/compression/mod.rs index d1bed4be3..a87f211e1 100644 --- a/src/postings/compression/mod.rs +++ b/src/postings/compression/mod.rs @@ -1,5 +1,5 @@ +use crate::common::FixedSize; use bitpacking::{BitPacker, BitPacker4x}; -use common::FixedSize; pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN; const COMPRESSED_BLOCK_MAX_SIZE: usize = COMPRESSION_BLOCK_SIZE * u32::SIZE_IN_BYTES; diff --git a/src/postings/mod.rs b/src/postings/mod.rs index edc9d6cd8..0981e872b 100644 --- a/src/postings/mod.rs +++ b/src/postings/mod.rs @@ -31,7 +31,7 @@ pub use self::segment_postings::{BlockSegmentPostings, SegmentPostings}; pub(crate) use self::stacker::compute_table_size; -pub use common::HasLen; +pub use crate::common::HasLen; pub(crate) const USE_SKIP_INFO_LIMIT: u32 = COMPRESSION_BLOCK_SIZE as u32; pub(crate) type UnorderedTermId = u64; @@ -48,24 +48,24 @@ pub(crate) enum FreqReadingOption { pub mod tests { use super::*; - use core::Index; - use core::SegmentComponent; - use core::SegmentReader; - use docset::{DocSet, SkipResult}; - use fieldnorm::FieldNormReader; - use indexer::operation::AddOperation; - use indexer::SegmentWriter; - use merge_policy::NoMergePolicy; - use query::Scorer; + use crate::core::Index; + use crate::core::SegmentComponent; + use crate::core::SegmentReader; + use crate::docset::{DocSet, SkipResult}; + use crate::fieldnorm::FieldNormReader; + use crate::indexer::operation::AddOperation; + use crate::indexer::SegmentWriter; + use crate::merge_policy::NoMergePolicy; + use crate::query::Scorer; + use crate::schema::{Document, Schema, Term, INDEXED, STRING, TEXT}; + use crate::schema::{Field, TextOptions}; + use crate::schema::{IndexRecordOption, TextFieldIndexing}; + use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN}; + use crate::DocId; + use crate::Score; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; - use schema::{Document, Schema, Term, INDEXED, STRING, TEXT}; - use schema::{Field, TextOptions}; - use schema::{IndexRecordOption, TextFieldIndexing}; use std::iter; - use tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN}; - use DocId; - use Score; #[test] pub fn test_position_write() { @@ -589,7 +589,7 @@ pub mod tests { } } - pub fn test_skip_against_unoptimized Box>( + pub fn test_skip_against_unoptimized Box>( postings_factory: F, targets: Vec, ) { diff --git a/src/postings/postings.rs b/src/postings/postings.rs index 0d4400e51..d6f76c3f3 100644 --- a/src/postings/postings.rs +++ b/src/postings/postings.rs @@ -1,4 +1,4 @@ -use docset::DocSet; +use crate::docset::DocSet; /// Postings (also called inverted list) /// diff --git a/src/postings/postings_writer.rs b/src/postings/postings_writer.rs index d5d769b33..bff89e453 100644 --- a/src/postings/postings_writer.rs +++ b/src/postings/postings_writer.rs @@ -1,23 +1,23 @@ use super::stacker::{Addr, MemoryArena, TermHashMap}; -use postings::recorder::{ +use crate::postings::recorder::{ BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder, }; -use postings::UnorderedTermId; -use postings::{FieldSerializer, InvertedIndexSerializer}; -use schema::IndexRecordOption; -use schema::{Field, FieldEntry, FieldType, Schema, Term}; +use crate::postings::UnorderedTermId; +use crate::postings::{FieldSerializer, InvertedIndexSerializer}; +use crate::schema::IndexRecordOption; +use crate::schema::{Field, FieldEntry, FieldType, Schema, Term}; +use crate::termdict::TermOrdinal; +use crate::tokenizer::TokenStream; +use crate::tokenizer::{Token, MAX_TOKEN_LEN}; +use crate::DocId; +use crate::Result; use std::collections::HashMap; use std::io; use std::marker::PhantomData; use std::ops::DerefMut; -use termdict::TermOrdinal; -use tokenizer::TokenStream; -use tokenizer::{Token, MAX_TOKEN_LEN}; -use DocId; -use Result; -fn posting_from_field_entry(field_entry: &FieldEntry) -> Box { +fn posting_from_field_entry(field_entry: &FieldEntry) -> Box { match *field_entry.field_type() { FieldType::Str(ref text_options) => text_options .get_indexing_options() @@ -49,7 +49,7 @@ pub struct MultiFieldPostingsWriter { heap: MemoryArena, schema: Schema, term_index: TermHashMap, - per_field_postings_writers: Vec>, + per_field_postings_writers: Vec>, } fn make_field_partition( @@ -99,7 +99,12 @@ impl MultiFieldPostingsWriter { self.term_index.mem_usage() + self.heap.mem_usage() } - pub fn index_text(&mut self, doc: DocId, field: Field, token_stream: &mut TokenStream) -> u32 { + pub fn index_text( + &mut self, + doc: DocId, + field: Field, + token_stream: &mut dyn TokenStream, + ) -> u32 { let postings_writer = self.per_field_postings_writers[field.0 as usize].deref_mut(); postings_writer.index_text( &mut self.term_index, @@ -138,10 +143,10 @@ impl MultiFieldPostingsWriter { FieldType::Str(_) | FieldType::HierarchicalFacet => { // populating the (unordered term ord) -> (ordered term ord) mapping // for the field. - let mut unordered_term_ids = term_offsets[start..stop] + let unordered_term_ids = term_offsets[start..stop] .iter() .map(|&(_, _, bucket)| bucket); - let mut mapping: HashMap = unordered_term_ids + let mapping: HashMap = unordered_term_ids .enumerate() .map(|(term_ord, unord_term_id)| { (unord_term_id as UnorderedTermId, term_ord as TermOrdinal) @@ -194,7 +199,7 @@ pub trait PostingsWriter { fn serialize( &self, term_addrs: &[(&[u8], Addr, UnorderedTermId)], - serializer: &mut FieldSerializer, + serializer: &mut FieldSerializer<'_>, term_heap: &MemoryArena, heap: &MemoryArena, ) -> io::Result<()>; @@ -205,7 +210,7 @@ pub trait PostingsWriter { term_index: &mut TermHashMap, doc_id: DocId, field: Field, - token_stream: &mut TokenStream, + token_stream: &mut dyn TokenStream, heap: &mut MemoryArena, ) -> u32 { let mut term = Term::for_field(field); @@ -246,7 +251,7 @@ impl SpecializedPostingsWriter { } /// Builds a `SpecializedPostingsWriter` storing its data in a heap. - pub fn new_boxed() -> Box { + pub fn new_boxed() -> Box { Box::new(SpecializedPostingsWriter::::new()) } } @@ -283,7 +288,7 @@ impl PostingsWriter for SpecializedPostingsWriter fn serialize( &self, term_addrs: &[(&[u8], Addr, UnorderedTermId)], - serializer: &mut FieldSerializer, + serializer: &mut FieldSerializer<'_>, termdict_heap: &MemoryArena, heap: &MemoryArena, ) -> io::Result<()> { diff --git a/src/postings/recorder.rs b/src/postings/recorder.rs index ad27ac0b5..0a1dd9217 100644 --- a/src/postings/recorder.rs +++ b/src/postings/recorder.rs @@ -1,8 +1,8 @@ use super::stacker::{ExpUnrolledLinkedList, MemoryArena}; -use common::{read_u32_vint, write_u32_vint}; -use postings::FieldSerializer; +use crate::common::{read_u32_vint, write_u32_vint}; +use crate::postings::FieldSerializer; +use crate::DocId; use std::io; -use DocId; const POSITION_END: u32 = 0; @@ -72,7 +72,7 @@ pub(crate) trait Recorder: Copy + 'static { fn serialize( &self, buffer_lender: &mut BufferLender, - serializer: &mut FieldSerializer, + serializer: &mut FieldSerializer<'_>, heap: &MemoryArena, ) -> io::Result<()>; } @@ -108,7 +108,7 @@ impl Recorder for NothingRecorder { fn serialize( &self, buffer_lender: &mut BufferLender, - serializer: &mut FieldSerializer, + serializer: &mut FieldSerializer<'_>, heap: &MemoryArena, ) -> io::Result<()> { let buffer = buffer_lender.lend_u8(); @@ -159,7 +159,7 @@ impl Recorder for TermFrequencyRecorder { fn serialize( &self, buffer_lender: &mut BufferLender, - serializer: &mut FieldSerializer, + serializer: &mut FieldSerializer<'_>, heap: &MemoryArena, ) -> io::Result<()> { let buffer = buffer_lender.lend_u8(); @@ -208,7 +208,7 @@ impl Recorder for TFAndPositionRecorder { fn serialize( &self, buffer_lender: &mut BufferLender, - serializer: &mut FieldSerializer, + serializer: &mut FieldSerializer<'_>, heap: &MemoryArena, ) -> io::Result<()> { let (buffer_u8, buffer_positions) = buffer_lender.lend_all(); diff --git a/src/postings/segment_postings.rs b/src/postings/segment_postings.rs index b189d0241..f70e5f429 100644 --- a/src/postings/segment_postings.rs +++ b/src/postings/segment_postings.rs @@ -1,21 +1,21 @@ -use common::BitSet; -use common::HasLen; -use common::{BinarySerializable, VInt}; -use docset::{DocSet, SkipResult}; +use crate::common::BitSet; +use crate::common::HasLen; +use crate::common::{BinarySerializable, VInt}; +use crate::docset::{DocSet, SkipResult}; +use crate::positions::PositionReader; +use crate::postings::compression::{compressed_block_size, AlignedBuffer}; +use crate::postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE}; +use crate::postings::serializer::PostingsSerializer; +use crate::postings::BlockSearcher; +use crate::postings::FreqReadingOption; +use crate::postings::Postings; +use crate::postings::SkipReader; +use crate::postings::USE_SKIP_INFO_LIMIT; +use crate::schema::IndexRecordOption; +use crate::DocId; use owned_read::OwnedRead; -use positions::PositionReader; -use postings::compression::{compressed_block_size, AlignedBuffer}; -use postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE}; -use postings::serializer::PostingsSerializer; -use postings::BlockSearcher; -use postings::FreqReadingOption; -use postings::Postings; -use postings::SkipReader; -use postings::USE_SKIP_INFO_LIMIT; -use schema::IndexRecordOption; use std::cmp::Ordering; use tantivy_fst::Streamer; -use DocId; struct PositionComputer { // store the amount of position int @@ -611,17 +611,17 @@ mod tests { use super::BlockSegmentPostings; use super::BlockSegmentPostingsSkipResult; use super::SegmentPostings; - use common::HasLen; - use core::Index; - use docset::DocSet; - use postings::postings::Postings; - use schema::IndexRecordOption; - use schema::Schema; - use schema::Term; - use schema::INDEXED; + use crate::common::HasLen; + use crate::core::Index; + use crate::docset::DocSet; + use crate::postings::postings::Postings; + use crate::schema::IndexRecordOption; + use crate::schema::Schema; + use crate::schema::Term; + use crate::schema::INDEXED; + use crate::DocId; + use crate::SkipResult; use tantivy_fst::Streamer; - use DocId; - use SkipResult; #[test] fn test_empty_segment_postings() { diff --git a/src/postings/serializer.rs b/src/postings/serializer.rs index 846780e2e..9fedc527b 100644 --- a/src/postings/serializer.rs +++ b/src/postings/serializer.rs @@ -1,18 +1,18 @@ use super::TermInfo; -use common::{BinarySerializable, VInt}; -use common::{CompositeWrite, CountingWriter}; -use core::Segment; -use directory::WritePtr; -use positions::PositionSerializer; -use postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE}; -use postings::skip::SkipSerializer; -use postings::USE_SKIP_INFO_LIMIT; -use schema::Schema; -use schema::{Field, FieldEntry, FieldType}; +use crate::common::{BinarySerializable, VInt}; +use crate::common::{CompositeWrite, CountingWriter}; +use crate::core::Segment; +use crate::directory::WritePtr; +use crate::positions::PositionSerializer; +use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE}; +use crate::postings::skip::SkipSerializer; +use crate::postings::USE_SKIP_INFO_LIMIT; +use crate::schema::Schema; +use crate::schema::{Field, FieldEntry, FieldType}; +use crate::termdict::{TermDictionaryBuilder, TermOrdinal}; +use crate::DocId; +use crate::Result; use std::io::{self, Write}; -use termdict::{TermDictionaryBuilder, TermOrdinal}; -use DocId; -use Result; /// `InvertedIndexSerializer` is in charge of serializing /// postings on disk, in the @@ -73,7 +73,7 @@ impl InvertedIndexSerializer { /// Open a new `PostingsSerializer` for the given segment pub fn open(segment: &mut Segment) -> Result { - use SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS}; + use crate::SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS}; InvertedIndexSerializer::create( CompositeWrite::wrap(segment.open_write(TERMS)?), CompositeWrite::wrap(segment.open_write(POSTINGS)?), @@ -91,7 +91,7 @@ impl InvertedIndexSerializer { &mut self, field: Field, total_num_tokens: u64, - ) -> io::Result { + ) -> io::Result> { let field_entry: &FieldEntry = self.schema.get_field_entry(field); let term_dictionary_write = self.terms_write.for_field(field); let postings_write = self.postings_write.for_field(field); diff --git a/src/postings/skip.rs b/src/postings/skip.rs index ab2dcb6c2..165664847 100644 --- a/src/postings/skip.rs +++ b/src/postings/skip.rs @@ -1,8 +1,8 @@ -use common::BinarySerializable; +use crate::common::BinarySerializable; +use crate::postings::compression::COMPRESSION_BLOCK_SIZE; +use crate::schema::IndexRecordOption; +use crate::DocId; use owned_read::OwnedRead; -use postings::compression::COMPRESSION_BLOCK_SIZE; -use schema::IndexRecordOption; -use DocId; pub struct SkipSerializer { buffer: Vec, diff --git a/src/postings/stacker/expull.rs b/src/postings/stacker/expull.rs index 58fa6e1ba..0c41075dd 100644 --- a/src/postings/stacker/expull.rs +++ b/src/postings/stacker/expull.rs @@ -1,7 +1,7 @@ use super::{Addr, MemoryArena}; -use postings::stacker::memory_arena::load; -use postings::stacker::memory_arena::store; +use crate::postings::stacker::memory_arena::load; +use crate::postings::stacker::memory_arena::store; use std::io; use std::mem; @@ -16,8 +16,8 @@ enum CapacityResult { fn len_to_capacity(len: u32) -> CapacityResult { match len { - 0...15 => CapacityResult::Available(FIRST_BLOCK as u32 - len), - 16...MAX_BLOCK_LEN => { + 0..=15 => CapacityResult::Available(FIRST_BLOCK as u32 - len), + 16..=MAX_BLOCK_LEN => { let cap = 1 << (32u32 - (len - 1u32).leading_zeros()); let available = cap - len; if available == 0 { diff --git a/src/postings/stacker/term_hashmap.rs b/src/postings/stacker/term_hashmap.rs index 50e30e559..f72704880 100644 --- a/src/postings/stacker/term_hashmap.rs +++ b/src/postings/stacker/term_hashmap.rs @@ -1,11 +1,11 @@ -extern crate murmurhash32; +use murmurhash32; use self::murmurhash32::murmurhash2; use super::{Addr, MemoryArena}; +use crate::postings::stacker::memory_arena::store; +use crate::postings::UnorderedTermId; use byteorder::{ByteOrder, NativeEndian}; -use postings::stacker::memory_arena::store; -use postings::UnorderedTermId; use std::iter; use std::mem; use std::slice; @@ -154,7 +154,7 @@ impl TermHashMap { unordered_term_id } - pub fn iter(&self) -> Iter { + pub fn iter(&self) -> Iter<'_> { Iter { inner: self.occupied.iter(), hashmap: &self, diff --git a/src/postings/term_info.rs b/src/postings/term_info.rs index 1f6567ec6..55a414955 100644 --- a/src/postings/term_info.rs +++ b/src/postings/term_info.rs @@ -1,4 +1,4 @@ -use common::{BinarySerializable, FixedSize}; +use crate::common::{BinarySerializable, FixedSize}; use std::io; /// `TermInfo` wraps the metadata associated to a Term. @@ -45,7 +45,7 @@ impl BinarySerializable for TermInfo { mod tests { use super::TermInfo; - use common::test::fixed_size_test; + use crate::common::test::fixed_size_test; #[test] fn test_fixed_size() { diff --git a/src/query/all_query.rs b/src/query/all_query.rs index 6fa13670e..cdd5cc531 100644 --- a/src/query/all_query.rs +++ b/src/query/all_query.rs @@ -1,11 +1,11 @@ -use core::Searcher; -use core::SegmentReader; -use docset::DocSet; -use query::explanation::does_not_match; -use query::{Explanation, Query, Scorer, Weight}; -use DocId; -use Result; -use Score; +use crate::core::Searcher; +use crate::core::SegmentReader; +use crate::docset::DocSet; +use crate::query::explanation::does_not_match; +use crate::query::{Explanation, Query, Scorer, Weight}; +use crate::DocId; +use crate::Result; +use crate::Score; /// Query that matches all of the documents. /// @@ -14,7 +14,7 @@ use Score; pub struct AllQuery; impl Query for AllQuery { - fn weight(&self, _: &Searcher, _: bool) -> Result> { + fn weight(&self, _: &Searcher, _: bool) -> Result> { Ok(Box::new(AllWeight)) } } @@ -23,7 +23,7 @@ impl Query for AllQuery { pub struct AllWeight; impl Weight for AllWeight { - fn scorer(&self, reader: &SegmentReader) -> Result> { + fn scorer(&self, reader: &SegmentReader) -> Result> { Ok(Box::new(AllScorer { state: State::NotStarted, doc: 0u32, @@ -93,9 +93,9 @@ impl Scorer for AllScorer { mod tests { use super::AllQuery; - use query::Query; - use schema::{Schema, TEXT}; - use Index; + use crate::query::Query; + use crate::schema::{Schema, TEXT}; + use crate::Index; #[test] fn test_all_query() { diff --git a/src/query/automaton_weight.rs b/src/query/automaton_weight.rs index 74037bc43..eefb00390 100644 --- a/src/query/automaton_weight.rs +++ b/src/query/automaton_weight.rs @@ -1,14 +1,14 @@ -use common::BitSet; -use core::SegmentReader; -use query::ConstScorer; -use query::{BitSetDocSet, Explanation}; -use query::{Scorer, Weight}; -use schema::{Field, IndexRecordOption}; +use crate::common::BitSet; +use crate::core::SegmentReader; +use crate::query::ConstScorer; +use crate::query::{BitSetDocSet, Explanation}; +use crate::query::{Scorer, Weight}; +use crate::schema::{Field, IndexRecordOption}; +use crate::termdict::{TermDictionary, TermStreamer}; +use crate::DocId; +use crate::TantivyError; +use crate::{Result, SkipResult}; use tantivy_fst::Automaton; -use termdict::{TermDictionary, TermStreamer}; -use DocId; -use TantivyError; -use {Result, SkipResult}; /// A weight struct for Fuzzy Term and Regex Queries pub struct AutomatonWeight @@ -38,7 +38,7 @@ impl Weight for AutomatonWeight where A: Automaton + Send + Sync + 'static, { - fn scorer(&self, reader: &SegmentReader) -> Result> { + fn scorer(&self, reader: &SegmentReader) -> Result> { let max_doc = reader.max_doc(); let mut doc_bitset = BitSet::with_max_value(max_doc); diff --git a/src/query/bitset/mod.rs b/src/query/bitset/mod.rs index bfffd7091..3b01d9653 100644 --- a/src/query/bitset/mod.rs +++ b/src/query/bitset/mod.rs @@ -1,7 +1,7 @@ -use common::{BitSet, TinySet}; -use docset::{DocSet, SkipResult}; +use crate::common::{BitSet, TinySet}; +use crate::docset::{DocSet, SkipResult}; +use crate::DocId; use std::cmp::Ordering; -use DocId; /// A `BitSetDocSet` makes it possible to iterate through a bitset as if it was a `DocSet`. /// @@ -121,9 +121,9 @@ impl DocSet for BitSetDocSet { #[cfg(test)] mod tests { use super::BitSetDocSet; - use common::BitSet; - use docset::{DocSet, SkipResult}; - use DocId; + use crate::common::BitSet; + use crate::docset::{DocSet, SkipResult}; + use crate::DocId; fn create_docbitset(docs: &[DocId], max_doc: DocId) -> BitSetDocSet { let mut docset = BitSet::with_max_value(max_doc); diff --git a/src/query/bm25.rs b/src/query/bm25.rs index 4765ad1de..3f5b416e4 100644 --- a/src/query/bm25.rs +++ b/src/query/bm25.rs @@ -1,8 +1,8 @@ -use fieldnorm::FieldNormReader; -use query::Explanation; -use Score; -use Searcher; -use Term; +use crate::fieldnorm::FieldNormReader; +use crate::query::Explanation; +use crate::Score; +use crate::Searcher; +use crate::Term; const K1: f32 = 1.2; const B: f32 = 0.75; @@ -131,7 +131,7 @@ impl BM25Weight { mod tests { use super::idf; - use tests::assert_nearly_equals; + use crate::tests::assert_nearly_equals; #[test] fn test_idf() { diff --git a/src/query/boolean_query/boolean_query.rs b/src/query/boolean_query/boolean_query.rs index 353c89806..00658aa39 100644 --- a/src/query/boolean_query/boolean_query.rs +++ b/src/query/boolean_query/boolean_query.rs @@ -1,13 +1,13 @@ use super::boolean_weight::BooleanWeight; -use query::Occur; -use query::Query; -use query::TermQuery; -use query::Weight; -use schema::IndexRecordOption; -use schema::Term; +use crate::query::Occur; +use crate::query::Query; +use crate::query::TermQuery; +use crate::query::Weight; +use crate::schema::IndexRecordOption; +use crate::schema::Term; +use crate::Result; +use crate::Searcher; use std::collections::BTreeSet; -use Result; -use Searcher; /// The boolean query combines a set of queries /// @@ -21,7 +21,7 @@ use Searcher; /// a `MustNot` occurence. #[derive(Debug)] pub struct BooleanQuery { - subqueries: Vec<(Occur, Box)>, + subqueries: Vec<(Occur, Box)>, } impl Clone for BooleanQuery { @@ -34,14 +34,14 @@ impl Clone for BooleanQuery { } } -impl From)>> for BooleanQuery { - fn from(subqueries: Vec<(Occur, Box)>) -> BooleanQuery { +impl From)>> for BooleanQuery { + fn from(subqueries: Vec<(Occur, Box)>) -> BooleanQuery { BooleanQuery { subqueries } } } impl Query for BooleanQuery { - fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result> { + fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result> { let sub_weights = self .subqueries .iter() @@ -63,10 +63,10 @@ impl BooleanQuery { /// Helper method to create a boolean query matching a given list of terms. /// The resulting query is a disjunction of the terms. pub fn new_multiterms_query(terms: Vec) -> BooleanQuery { - let occur_term_queries: Vec<(Occur, Box)> = terms + let occur_term_queries: Vec<(Occur, Box)> = terms .into_iter() .map(|term| { - let term_query: Box = + let term_query: Box = Box::new(TermQuery::new(term, IndexRecordOption::WithFreqs)); (Occur::Should, term_query) }) @@ -75,7 +75,7 @@ impl BooleanQuery { } /// Deconstructed view of the clauses making up this query. - pub fn clauses(&self) -> &[(Occur, Box)] { + pub fn clauses(&self) -> &[(Occur, Box)] { &self.subqueries[..] } } diff --git a/src/query/boolean_query/boolean_weight.rs b/src/query/boolean_query/boolean_weight.rs index 51ddacf11..d44d34862 100644 --- a/src/query/boolean_query/boolean_weight.rs +++ b/src/query/boolean_query/boolean_weight.rs @@ -1,20 +1,20 @@ -use core::SegmentReader; -use query::explanation::does_not_match; -use query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner}; -use query::term_query::TermScorer; -use query::EmptyScorer; -use query::Exclude; -use query::Occur; -use query::RequiredOptionalScorer; -use query::Scorer; -use query::Union; -use query::Weight; -use query::{intersect_scorers, Explanation}; +use crate::core::SegmentReader; +use crate::query::explanation::does_not_match; +use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner}; +use crate::query::term_query::TermScorer; +use crate::query::EmptyScorer; +use crate::query::Exclude; +use crate::query::Occur; +use crate::query::RequiredOptionalScorer; +use crate::query::Scorer; +use crate::query::Union; +use crate::query::Weight; +use crate::query::{intersect_scorers, Explanation}; +use crate::Result; +use crate::{DocId, SkipResult}; use std::collections::HashMap; -use Result; -use {DocId, SkipResult}; -fn scorer_union(scorers: Vec>) -> Box +fn scorer_union(scorers: Vec>) -> Box where TScoreCombiner: ScoreCombiner, { @@ -30,22 +30,23 @@ where .into_iter() .map(|scorer| *(scorer.downcast::().map_err(|_| ()).unwrap())) .collect(); - let scorer: Box = Box::new(Union::::from(scorers)); + let scorer: Box = + Box::new(Union::::from(scorers)); return scorer; } } - let scorer: Box = Box::new(Union::<_, TScoreCombiner>::from(scorers)); + let scorer: Box = Box::new(Union::<_, TScoreCombiner>::from(scorers)); scorer } pub struct BooleanWeight { - weights: Vec<(Occur, Box)>, + weights: Vec<(Occur, Box)>, scoring_enabled: bool, } impl BooleanWeight { - pub fn new(weights: Vec<(Occur, Box)>, scoring_enabled: bool) -> BooleanWeight { + pub fn new(weights: Vec<(Occur, Box)>, scoring_enabled: bool) -> BooleanWeight { BooleanWeight { weights, scoring_enabled, @@ -55,10 +56,10 @@ impl BooleanWeight { fn per_occur_scorers( &self, reader: &SegmentReader, - ) -> Result>>> { - let mut per_occur_scorers: HashMap>> = HashMap::new(); + ) -> Result>>> { + let mut per_occur_scorers: HashMap>> = HashMap::new(); for &(ref occur, ref subweight) in &self.weights { - let sub_scorer: Box = subweight.scorer(reader)?; + let sub_scorer: Box = subweight.scorer(reader)?; per_occur_scorers .entry(*occur) .or_insert_with(Vec::new) @@ -70,22 +71,22 @@ impl BooleanWeight { fn complex_scorer( &self, reader: &SegmentReader, - ) -> Result> { + ) -> Result> { let mut per_occur_scorers = self.per_occur_scorers(reader)?; - let should_scorer_opt: Option> = per_occur_scorers + let should_scorer_opt: Option> = per_occur_scorers .remove(&Occur::Should) .map(scorer_union::); - let exclude_scorer_opt: Option> = per_occur_scorers + let exclude_scorer_opt: Option> = per_occur_scorers .remove(&Occur::MustNot) .map(scorer_union::); - let must_scorer_opt: Option> = per_occur_scorers + let must_scorer_opt: Option> = per_occur_scorers .remove(&Occur::Must) .map(intersect_scorers); - let positive_scorer: Box = match (should_scorer_opt, must_scorer_opt) { + let positive_scorer: Box = match (should_scorer_opt, must_scorer_opt) { (Some(should_scorer), Some(must_scorer)) => { if self.scoring_enabled { Box::new(RequiredOptionalScorer::<_, _, TScoreCombiner>::new( @@ -112,7 +113,7 @@ impl BooleanWeight { } impl Weight for BooleanWeight { - fn scorer(&self, reader: &SegmentReader) -> Result> { + fn scorer(&self, reader: &SegmentReader) -> Result> { if self.weights.is_empty() { Ok(Box::new(EmptyScorer)) } else if self.weights.len() == 1 { diff --git a/src/query/boolean_query/mod.rs b/src/query/boolean_query/mod.rs index b7f19805f..04f248322 100644 --- a/src/query/boolean_query/mod.rs +++ b/src/query/boolean_query/mod.rs @@ -7,19 +7,19 @@ pub use self::boolean_query::BooleanQuery; mod tests { use super::*; - use collector::tests::TestCollector; - use query::score_combiner::SumWithCoordsCombiner; - use query::term_query::TermScorer; - use query::Intersection; - use query::Occur; - use query::Query; - use query::QueryParser; - use query::RequiredOptionalScorer; - use query::Scorer; - use query::TermQuery; - use schema::*; - use Index; - use {DocAddress, DocId}; + use crate::collector::tests::TestCollector; + use crate::query::score_combiner::SumWithCoordsCombiner; + use crate::query::term_query::TermScorer; + use crate::query::Intersection; + use crate::query::Occur; + use crate::query::Query; + use crate::query::QueryParser; + use crate::query::RequiredOptionalScorer; + use crate::query::Scorer; + use crate::query::TermQuery; + use crate::schema::*; + use crate::Index; + use crate::{DocAddress, DocId}; fn aux_test_helper() -> (Index, Field) { let mut schema_builder = Schema::builder(); @@ -89,7 +89,7 @@ mod tests { let query = query_parser.parse_query("+a +(b c)").unwrap(); let weight = query.weight(&searcher, true).unwrap(); let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap(); - assert!(scorer.is::>>()); + assert!(scorer.is::>>()); } } @@ -102,8 +102,11 @@ mod tests { let query = query_parser.parse_query("+a b").unwrap(); let weight = query.weight(&searcher, true).unwrap(); let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap(); - assert!(scorer - .is::, Box, SumWithCoordsCombiner>>()); + assert!(scorer.is::, + Box, + SumWithCoordsCombiner, + >>()); } { let query = query_parser.parse_query("+a b").unwrap(); @@ -122,13 +125,13 @@ mod tests { Term::from_field_text(text_field, text), IndexRecordOption::Basic, ); - let query: Box = Box::new(term_query); + let query: Box = Box::new(term_query); query }; let reader = index.reader().unwrap(); - let matching_docs = |boolean_query: &Query| { + let matching_docs = |boolean_query: &dyn Query| { reader .searcher() .search(boolean_query, &TestCollector) @@ -185,11 +188,11 @@ mod tests { Term::from_field_text(text_field, text), IndexRecordOption::Basic, ); - let query: Box = Box::new(term_query); + let query: Box = Box::new(term_query); query }; let reader = index.reader().unwrap(); - let score_docs = |boolean_query: &Query| { + let score_docs = |boolean_query: &dyn Query| { let fruit = reader .searcher() .search(boolean_query, &TestCollector) diff --git a/src/query/empty_query.rs b/src/query/empty_query.rs index 936677318..dd7c0973f 100644 --- a/src/query/empty_query.rs +++ b/src/query/empty_query.rs @@ -1,13 +1,13 @@ use super::Scorer; -use query::explanation::does_not_match; -use query::Weight; -use query::{Explanation, Query}; -use DocId; -use DocSet; -use Result; -use Score; -use Searcher; -use SegmentReader; +use crate::query::explanation::does_not_match; +use crate::query::Weight; +use crate::query::{Explanation, Query}; +use crate::DocId; +use crate::DocSet; +use crate::Result; +use crate::Score; +use crate::Searcher; +use crate::SegmentReader; /// `EmptyQuery` is a dummy `Query` in which no document matches. /// @@ -16,7 +16,7 @@ use SegmentReader; pub struct EmptyQuery; impl Query for EmptyQuery { - fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result> { + fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result> { Ok(Box::new(EmptyWeight)) } @@ -30,7 +30,7 @@ impl Query for EmptyQuery { /// It is useful for tests and handling edge cases. pub struct EmptyWeight; impl Weight for EmptyWeight { - fn scorer(&self, _reader: &SegmentReader) -> Result> { + fn scorer(&self, _reader: &SegmentReader) -> Result> { Ok(Box::new(EmptyScorer)) } @@ -69,8 +69,8 @@ impl Scorer for EmptyScorer { #[cfg(test)] mod tests { - use query::EmptyScorer; - use DocSet; + use crate::query::EmptyScorer; + use crate::DocSet; #[test] fn test_empty_scorer() { diff --git a/src/query/exclude.rs b/src/query/exclude.rs index 4fcb71b72..7ddcfedf1 100644 --- a/src/query/exclude.rs +++ b/src/query/exclude.rs @@ -1,7 +1,7 @@ -use docset::{DocSet, SkipResult}; -use query::Scorer; -use DocId; -use Score; +use crate::docset::{DocSet, SkipResult}; +use crate::query::Scorer; +use crate::DocId; +use crate::Score; #[derive(Clone, Copy, Debug)] enum State { @@ -130,9 +130,9 @@ where mod tests { use super::*; - use postings::tests::test_skip_against_unoptimized; - use query::VecDocSet; - use tests::sample_with_seed; + use crate::postings::tests::test_skip_against_unoptimized; + use crate::query::VecDocSet; + use crate::tests::sample_with_seed; #[test] fn test_exclude() { diff --git a/src/query/explanation.rs b/src/query/explanation.rs index ad391239c..cabb08138 100644 --- a/src/query/explanation.rs +++ b/src/query/explanation.rs @@ -1,4 +1,4 @@ -use {DocId, TantivyError}; +use crate::{DocId, TantivyError}; pub(crate) fn does_not_match(doc: DocId) -> TantivyError { TantivyError::InvalidArgument(format!("Document #({}) does not match", doc)) diff --git a/src/query/fuzzy_query.rs b/src/query/fuzzy_query.rs index ea54b789b..982031b7c 100644 --- a/src/query/fuzzy_query.rs +++ b/src/query/fuzzy_query.rs @@ -1,9 +1,9 @@ +use crate::query::{AutomatonWeight, Query, Weight}; +use crate::schema::Term; +use crate::Result; +use crate::Searcher; use levenshtein_automata::{LevenshteinAutomatonBuilder, DFA}; -use query::{AutomatonWeight, Query, Weight}; -use schema::Term; use std::collections::HashMap; -use Result; -use Searcher; lazy_static! { static ref LEV_BUILDER: HashMap<(u8, bool), LevenshteinAutomatonBuilder> = { @@ -109,7 +109,7 @@ impl FuzzyTermQuery { } impl Query for FuzzyTermQuery { - fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result> { + fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result> { Ok(Box::new(self.specialized_weight()?)) } } @@ -117,12 +117,12 @@ impl Query for FuzzyTermQuery { #[cfg(test)] mod test { use super::FuzzyTermQuery; - use collector::TopDocs; - use schema::Schema; - use schema::TEXT; - use tests::assert_nearly_equals; - use Index; - use Term; + use crate::collector::TopDocs; + use crate::schema::Schema; + use crate::schema::TEXT; + use crate::tests::assert_nearly_equals; + use crate::Index; + use crate::Term; #[test] pub fn test_fuzzy_term() { diff --git a/src/query/intersection.rs b/src/query/intersection.rs index a35a8ef6c..e241c1ea7 100644 --- a/src/query/intersection.rs +++ b/src/query/intersection.rs @@ -1,9 +1,9 @@ -use docset::{DocSet, SkipResult}; -use query::term_query::TermScorer; -use query::EmptyScorer; -use query::Scorer; -use DocId; -use Score; +use crate::docset::{DocSet, SkipResult}; +use crate::query::term_query::TermScorer; +use crate::query::EmptyScorer; +use crate::query::Scorer; +use crate::DocId; +use crate::Score; /// Returns the intersection scorer. /// @@ -13,7 +13,7 @@ use Score; /// For better performance, the function uses a /// specialized implementation if the two /// shortest scorers are `TermScorer`s. -pub fn intersect_scorers(mut scorers: Vec>) -> Box { +pub fn intersect_scorers(mut scorers: Vec>) -> Box { if scorers.is_empty() { return Box::new(EmptyScorer); } @@ -46,7 +46,7 @@ pub fn intersect_scorers(mut scorers: Vec>) -> Box { } /// Creates a `DocSet` that iterator through the intersection of two `DocSet`s. -pub struct Intersection> { +pub struct Intersection> { left: TDocSet, right: TDocSet, others: Vec, @@ -81,7 +81,7 @@ impl Intersection { } impl Intersection { - pub(crate) fn docset_mut(&mut self, ord: usize) -> &mut DocSet { + pub(crate) fn docset_mut(&mut self, ord: usize) -> &mut dyn DocSet { match ord { 0 => &mut self.left, 1 => &mut self.right, @@ -229,9 +229,9 @@ where #[cfg(test)] mod tests { use super::Intersection; - use docset::{DocSet, SkipResult}; - use postings::tests::test_skip_against_unoptimized; - use query::VecDocSet; + use crate::docset::{DocSet, SkipResult}; + use crate::postings::tests::test_skip_against_unoptimized; + use crate::query::VecDocSet; #[test] fn test_intersection() { diff --git a/src/query/mod.rs b/src/query/mod.rs index bfa0256ca..c03912197 100644 --- a/src/query/mod.rs +++ b/src/query/mod.rs @@ -58,11 +58,11 @@ pub use self::weight::Weight; #[cfg(test)] mod tests { - use query::QueryParser; - use schema::{Schema, TEXT}; + use crate::query::QueryParser; + use crate::schema::{Schema, TEXT}; + use crate::Index; + use crate::Term; use std::collections::BTreeSet; - use Index; - use Term; #[test] fn test_query_terms() { diff --git a/src/query/phrase_query/mod.rs b/src/query/phrase_query/mod.rs index a08e505a4..306ad2730 100644 --- a/src/query/phrase_query/mod.rs +++ b/src/query/phrase_query/mod.rs @@ -10,13 +10,13 @@ pub use self::phrase_weight::PhraseWeight; mod tests { use super::*; - use collector::tests::TestCollector; - use core::Index; - use error::TantivyError; - use schema::{Schema, Term, TEXT}; - use tests::assert_nearly_equals; - use DocAddress; - use DocId; + use crate::collector::tests::TestCollector; + use crate::core::Index; + use crate::error::TantivyError; + use crate::schema::{Schema, Term, TEXT}; + use crate::tests::assert_nearly_equals; + use crate::DocAddress; + use crate::DocId; fn create_index(texts: &[&'static str]) -> Index { let mut schema_builder = Schema::builder(); @@ -71,9 +71,9 @@ mod tests { #[test] pub fn test_phrase_query_no_positions() { let mut schema_builder = Schema::builder(); - use schema::IndexRecordOption; - use schema::TextFieldIndexing; - use schema::TextOptions; + use crate::schema::IndexRecordOption; + use crate::schema::TextFieldIndexing; + use crate::schema::TextOptions; let no_positions = TextOptions::default().set_indexing_options( TextFieldIndexing::default() .set_tokenizer("default") diff --git a/src/query/phrase_query/phrase_query.rs b/src/query/phrase_query/phrase_query.rs index 51bb0d251..a4e9d708b 100644 --- a/src/query/phrase_query/phrase_query.rs +++ b/src/query/phrase_query/phrase_query.rs @@ -1,13 +1,13 @@ use super::PhraseWeight; -use core::searcher::Searcher; -use error::TantivyError; -use query::bm25::BM25Weight; -use query::Query; -use query::Weight; -use schema::IndexRecordOption; -use schema::{Field, Term}; +use crate::core::searcher::Searcher; +use crate::error::TantivyError; +use crate::query::bm25::BM25Weight; +use crate::query::Query; +use crate::query::Weight; +use crate::schema::IndexRecordOption; +use crate::schema::{Field, Term}; +use crate::Result; use std::collections::BTreeSet; -use Result; /// `PhraseQuery` matches a specific sequence of words. /// @@ -78,7 +78,7 @@ impl Query for PhraseQuery { /// Create the weight associated to a query. /// /// See [`Weight`](./trait.Weight.html). - fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result> { + fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result> { let schema = searcher.schema(); let field_entry = schema.get_field_entry(self.field); let has_positions = field_entry diff --git a/src/query/phrase_query/phrase_scorer.rs b/src/query/phrase_query/phrase_scorer.rs index 666715fa4..c85cfa8cc 100644 --- a/src/query/phrase_query/phrase_scorer.rs +++ b/src/query/phrase_query/phrase_scorer.rs @@ -1,9 +1,9 @@ -use docset::{DocSet, SkipResult}; -use fieldnorm::FieldNormReader; -use postings::Postings; -use query::bm25::BM25Weight; -use query::{Intersection, Scorer}; -use DocId; +use crate::docset::{DocSet, SkipResult}; +use crate::fieldnorm::FieldNormReader; +use crate::postings::Postings; +use crate::query::bm25::BM25Weight; +use crate::query::{Intersection, Scorer}; +use crate::DocId; struct PostingsWithOffset { offset: u32, diff --git a/src/query/phrase_query/phrase_weight.rs b/src/query/phrase_query/phrase_weight.rs index 2229a52f3..b60ce5a07 100644 --- a/src/query/phrase_query/phrase_weight.rs +++ b/src/query/phrase_query/phrase_weight.rs @@ -1,16 +1,16 @@ use super::PhraseScorer; -use core::SegmentReader; -use fieldnorm::FieldNormReader; -use postings::SegmentPostings; -use query::bm25::BM25Weight; -use query::explanation::does_not_match; -use query::Scorer; -use query::Weight; -use query::{EmptyScorer, Explanation}; -use schema::IndexRecordOption; -use schema::Term; -use {DocId, DocSet}; -use {Result, SkipResult}; +use crate::core::SegmentReader; +use crate::fieldnorm::FieldNormReader; +use crate::postings::SegmentPostings; +use crate::query::bm25::BM25Weight; +use crate::query::explanation::does_not_match; +use crate::query::Scorer; +use crate::query::Weight; +use crate::query::{EmptyScorer, Explanation}; +use crate::schema::IndexRecordOption; +use crate::schema::Term; +use crate::{DocId, DocSet}; +use crate::{Result, SkipResult}; pub struct PhraseWeight { phrase_terms: Vec<(usize, Term)>, @@ -84,7 +84,7 @@ impl PhraseWeight { } impl Weight for PhraseWeight { - fn scorer(&self, reader: &SegmentReader) -> Result> { + fn scorer(&self, reader: &SegmentReader) -> Result> { if let Some(scorer) = self.phrase_scorer(reader)? { Ok(Box::new(scorer)) } else { diff --git a/src/query/query.rs b/src/query/query.rs index 4d8eb4ca6..caa2a974a 100644 --- a/src/query/query.rs +++ b/src/query/query.rs @@ -1,11 +1,11 @@ use super::Weight; -use core::searcher::Searcher; -use query::Explanation; +use crate::core::searcher::Searcher; +use crate::query::Explanation; +use crate::Result; +use crate::Term; +use crate::{downcast_rs, DocAddress}; use std::collections::BTreeSet; use std::fmt; -use Result; -use Term; -use {downcast_rs, DocAddress}; /// The `Query` trait defines a set of documents and a scoring method /// for those documents. @@ -47,7 +47,7 @@ pub trait Query: QueryClone + downcast_rs::Downcast + fmt::Debug { /// can increase performances. /// /// See [`Weight`](./trait.Weight.html). - fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result>; + fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result>; /// Returns an `Explanation` for the score of the document. fn explain(&self, searcher: &Searcher, doc_address: DocAddress) -> Result { @@ -72,20 +72,20 @@ pub trait Query: QueryClone + downcast_rs::Downcast + fmt::Debug { } pub trait QueryClone { - fn box_clone(&self) -> Box; + fn box_clone(&self) -> Box; } impl QueryClone for T where T: 'static + Query + Clone, { - fn box_clone(&self) -> Box { + fn box_clone(&self) -> Box { Box::new(self.clone()) } } -impl Query for Box { - fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result> { +impl Query for Box { + fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result> { self.as_ref().weight(searcher, scoring_enabled) } @@ -98,8 +98,8 @@ impl Query for Box { } } -impl QueryClone for Box { - fn box_clone(&self) -> Box { +impl QueryClone for Box { + fn box_clone(&self) -> Box { self.as_ref().box_clone() } } diff --git a/src/query/query_parser/logical_ast.rs b/src/query/query_parser/logical_ast.rs index 608831578..d3a25a1fd 100644 --- a/src/query/query_parser/logical_ast.rs +++ b/src/query/query_parser/logical_ast.rs @@ -1,7 +1,7 @@ -use query::Occur; -use schema::Field; -use schema::Term; -use schema::Type; +use crate::query::Occur; +use crate::schema::Field; +use crate::schema::Term; +use crate::schema::Type; use std::fmt; use std::ops::Bound; @@ -33,7 +33,7 @@ fn occur_letter(occur: Occur) -> &'static str { } impl fmt::Debug for LogicalAST { - fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match *self { LogicalAST::Clause(ref clause) => { if clause.is_empty() { @@ -60,7 +60,7 @@ impl From for LogicalAST { } impl fmt::Debug for LogicalLiteral { - fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match *self { LogicalLiteral::Term(ref term) => write!(formatter, "{:?}", term), LogicalLiteral::Phrase(ref terms) => write!(formatter, "\"{:?}\"", terms), diff --git a/src/query/query_parser/query_grammar.rs b/src/query/query_parser/query_grammar.rs index a06845b3f..cd22237b5 100644 --- a/src/query/query_parser/query_grammar.rs +++ b/src/query/query_parser/query_grammar.rs @@ -3,12 +3,12 @@ use super::query_grammar; use super::user_input_ast::*; +use crate::query::occur::Occur; +use crate::query::query_parser::user_input_ast::UserInputBound; use combine::char::*; use combine::error::StreamError; use combine::stream::StreamErrorFor; use combine::*; -use query::occur::Occur; -use query::query_parser::user_input_ast::UserInputBound; parser! { fn field[I]()(I) -> String diff --git a/src/query/query_parser/query_parser.rs b/src/query/query_parser/query_parser.rs index 7f8b737d2..eb35a4464 100644 --- a/src/query/query_parser/query_parser.rs +++ b/src/query/query_parser/query_parser.rs @@ -1,26 +1,26 @@ use super::logical_ast::*; use super::query_grammar::parse_to_ast; use super::user_input_ast::*; +use crate::core::Index; +use crate::query::occur::compose_occur; +use crate::query::query_parser::logical_ast::LogicalAST; +use crate::query::AllQuery; +use crate::query::BooleanQuery; +use crate::query::EmptyQuery; +use crate::query::Occur; +use crate::query::PhraseQuery; +use crate::query::Query; +use crate::query::RangeQuery; +use crate::query::TermQuery; +use crate::schema::IndexRecordOption; +use crate::schema::{Field, Schema}; +use crate::schema::{FieldType, Term}; +use crate::tokenizer::TokenizerManager; use combine::Parser; -use core::Index; -use query::occur::compose_occur; -use query::query_parser::logical_ast::LogicalAST; -use query::AllQuery; -use query::BooleanQuery; -use query::EmptyQuery; -use query::Occur; -use query::PhraseQuery; -use query::Query; -use query::RangeQuery; -use query::TermQuery; -use schema::IndexRecordOption; -use schema::{Field, Schema}; -use schema::{FieldType, Term}; use std::borrow::Cow; use std::num::ParseIntError; use std::ops::Bound; use std::str::FromStr; -use tokenizer::TokenizerManager; /// Possible error that may happen when parsing a query. #[derive(Debug, PartialEq, Eq)] @@ -192,7 +192,7 @@ impl QueryParser { /// /// Implementing a lenient mode for this query parser is tracked /// in [Issue 5](https://github.com/fulmicoton/tantivy/issues/5) - pub fn parse_query(&self, query: &str) -> Result, QueryParserError> { + pub fn parse_query(&self, query: &str) -> Result, QueryParserError> { let logical_ast = self.parse_query_to_logical_ast(query)?; Ok(convert_to_query(logical_ast)) } @@ -253,7 +253,7 @@ impl QueryParser { } FieldType::Str(ref str_options) => { if let Some(option) = str_options.get_indexing_options() { - let mut tokenizer = + let tokenizer = self.tokenizer_manager .get(option.tokenizer()) .ok_or_else(|| { @@ -347,7 +347,7 @@ impl QueryParser { fn resolved_fields( &self, given_field: &Option, - ) -> Result, QueryParserError> { + ) -> Result, QueryParserError> { match *given_field { None => { if self.default_fields.is_empty() { @@ -458,7 +458,7 @@ impl QueryParser { } } -fn convert_literal_to_query(logical_literal: LogicalLiteral) -> Box { +fn convert_literal_to_query(logical_literal: LogicalLiteral) -> Box { match logical_literal { LogicalLiteral::Term(term) => Box::new(TermQuery::new(term, IndexRecordOption::WithFreqs)), LogicalLiteral::Phrase(term_with_offsets) => { @@ -476,7 +476,7 @@ fn convert_literal_to_query(logical_literal: LogicalLiteral) -> Box { } } -fn convert_to_query(logical_ast: LogicalAST) -> Box { +fn convert_to_query(logical_ast: LogicalAST) -> Box { match trim_ast(logical_ast) { Some(LogicalAST::Clause(trimmed_clause)) => { let occur_subqueries = trimmed_clause @@ -501,12 +501,14 @@ mod test { use super::super::logical_ast::*; use super::QueryParser; use super::QueryParserError; - use query::Query; - use schema::Field; - use schema::{IndexRecordOption, TextFieldIndexing, TextOptions}; - use schema::{Schema, Term, INDEXED, STORED, STRING, TEXT}; - use tokenizer::{LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager}; - use Index; + use crate::query::Query; + use crate::schema::Field; + use crate::schema::{IndexRecordOption, TextFieldIndexing, TextOptions}; + use crate::schema::{Schema, Term, INDEXED, STORED, STRING, TEXT}; + use crate::tokenizer::{ + LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager, + }; + use crate::Index; fn make_query_parser() -> QueryParser { let mut schema_builder = Schema::builder(); @@ -570,7 +572,7 @@ mod test { let query_parser = make_query_parser(); let is_not_indexed_err = |query: &str| { - let result: Result, QueryParserError> = query_parser.parse_query(query); + let result: Result, QueryParserError> = query_parser.parse_query(query); if let Err(QueryParserError::FieldNotIndexed(field_name)) = result { Some(field_name.clone()) } else { diff --git a/src/query/query_parser/user_input_ast.rs b/src/query/query_parser/user_input_ast.rs index 52ab4d293..649326e68 100644 --- a/src/query/query_parser/user_input_ast.rs +++ b/src/query/query_parser/user_input_ast.rs @@ -1,7 +1,7 @@ use std::fmt; use std::fmt::{Debug, Formatter}; -use query::Occur; +use crate::query::Occur; pub enum UserInputLeaf { Literal(UserInputLiteral), @@ -14,7 +14,7 @@ pub enum UserInputLeaf { } impl Debug for UserInputLeaf { - fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> { match self { UserInputLeaf::Literal(literal) => literal.fmt(formatter), UserInputLeaf::Range { @@ -41,7 +41,7 @@ pub struct UserInputLiteral { } impl fmt::Debug for UserInputLiteral { - fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match self.field_name { Some(ref field_name) => write!(formatter, "{}:\"{}\"", field_name, self.phrase), None => write!(formatter, "\"{}\"", self.phrase), @@ -55,14 +55,14 @@ pub enum UserInputBound { } impl UserInputBound { - fn display_lower(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn display_lower(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match *self { UserInputBound::Inclusive(ref word) => write!(formatter, "[\"{}\"", word), UserInputBound::Exclusive(ref word) => write!(formatter, "{{\"{}\"", word), } } - fn display_upper(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn display_upper(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match *self { UserInputBound::Inclusive(ref word) => write!(formatter, "\"{}\"]", word), UserInputBound::Exclusive(ref word) => write!(formatter, "\"{}\"}}", word), @@ -163,7 +163,7 @@ impl From for UserInputAST { } impl fmt::Debug for UserInputAST { - fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match *self { UserInputAST::Clause(ref subqueries) => { if subqueries.is_empty() { diff --git a/src/query/range_query.rs b/src/query/range_query.rs index bda442fc5..1ec9d20ce 100644 --- a/src/query/range_query.rs +++ b/src/query/range_query.rs @@ -1,18 +1,18 @@ -use common::BitSet; -use core::Searcher; -use core::SegmentReader; -use error::TantivyError; -use query::explanation::does_not_match; -use query::ConstScorer; -use query::{BitSetDocSet, Explanation}; -use query::{Query, Scorer, Weight}; -use schema::Type; -use schema::{Field, IndexRecordOption, Term}; +use crate::common::BitSet; +use crate::core::Searcher; +use crate::core::SegmentReader; +use crate::error::TantivyError; +use crate::query::explanation::does_not_match; +use crate::query::ConstScorer; +use crate::query::{BitSetDocSet, Explanation}; +use crate::query::{Query, Scorer, Weight}; +use crate::schema::Type; +use crate::schema::{Field, IndexRecordOption, Term}; +use crate::termdict::{TermDictionary, TermStreamer}; +use crate::DocId; +use crate::{Result, SkipResult}; use std::collections::Bound; use std::ops::Range; -use termdict::{TermDictionary, TermStreamer}; -use DocId; -use {Result, SkipResult}; fn map_bound TTo>( bound: &Bound, @@ -225,7 +225,7 @@ impl RangeQuery { } impl Query for RangeQuery { - fn weight(&self, searcher: &Searcher, _scoring_enabled: bool) -> Result> { + fn weight(&self, searcher: &Searcher, _scoring_enabled: bool) -> Result> { let schema = searcher.schema(); let value_type = schema.get_field_entry(self.field).field_type().value_type(); if value_type != self.value_type { @@ -268,7 +268,7 @@ impl RangeWeight { } impl Weight for RangeWeight { - fn scorer(&self, reader: &SegmentReader) -> Result> { + fn scorer(&self, reader: &SegmentReader) -> Result> { let max_doc = reader.max_doc(); let mut doc_bitset = BitSet::with_max_value(max_doc); @@ -302,11 +302,11 @@ impl Weight for RangeWeight { mod tests { use super::RangeQuery; - use collector::Count; - use schema::{Document, Field, Schema, INDEXED}; + use crate::collector::Count; + use crate::schema::{Document, Field, Schema, INDEXED}; + use crate::Index; + use crate::Result; use std::collections::Bound; - use Index; - use Result; #[test] fn test_range_query_simple() { diff --git a/src/query/regex_query.rs b/src/query/regex_query.rs index ec7dcceb7..3296a8df1 100644 --- a/src/query/regex_query.rs +++ b/src/query/regex_query.rs @@ -1,10 +1,10 @@ -use error::TantivyError; -use query::{AutomatonWeight, Query, Weight}; -use schema::Field; +use crate::error::TantivyError; +use crate::query::{AutomatonWeight, Query, Weight}; +use crate::schema::Field; +use crate::Result; +use crate::Searcher; use std::clone::Clone; use tantivy_fst::Regex; -use Result; -use Searcher; // A Regex Query matches all of the documents /// containing a specific term that matches @@ -78,7 +78,7 @@ impl RegexQuery { } impl Query for RegexQuery { - fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result> { + fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result> { Ok(Box::new(self.specialized_weight()?)) } } @@ -86,11 +86,11 @@ impl Query for RegexQuery { #[cfg(test)] mod test { use super::RegexQuery; - use collector::TopDocs; - use schema::Schema; - use schema::TEXT; - use tests::assert_nearly_equals; - use Index; + use crate::collector::TopDocs; + use crate::schema::Schema; + use crate::schema::TEXT; + use crate::tests::assert_nearly_equals; + use crate::Index; #[test] pub fn test_regex_query() { diff --git a/src/query/reqopt_scorer.rs b/src/query/reqopt_scorer.rs index 16b367804..56ff2966d 100644 --- a/src/query/reqopt_scorer.rs +++ b/src/query/reqopt_scorer.rs @@ -1,10 +1,10 @@ -use docset::{DocSet, SkipResult}; -use query::score_combiner::ScoreCombiner; -use query::Scorer; +use crate::docset::{DocSet, SkipResult}; +use crate::query::score_combiner::ScoreCombiner; +use crate::query::Scorer; +use crate::DocId; +use crate::Score; use std::cmp::Ordering; use std::marker::PhantomData; -use DocId; -use Score; /// Given a required scorer and an optional scorer /// matches all document from the required scorer @@ -102,13 +102,13 @@ where #[cfg(test)] mod tests { use super::RequiredOptionalScorer; - use docset::DocSet; - use postings::tests::test_skip_against_unoptimized; - use query::score_combiner::{DoNothingCombiner, SumCombiner}; - use query::ConstScorer; - use query::Scorer; - use query::VecDocSet; - use tests::sample_with_seed; + use crate::docset::DocSet; + use crate::postings::tests::test_skip_against_unoptimized; + use crate::query::score_combiner::{DoNothingCombiner, SumCombiner}; + use crate::query::ConstScorer; + use crate::query::Scorer; + use crate::query::VecDocSet; + use crate::tests::sample_with_seed; #[test] fn test_reqopt_scorer_empty() { diff --git a/src/query/score_combiner.rs b/src/query/score_combiner.rs index 74c541b37..2e21343ac 100644 --- a/src/query/score_combiner.rs +++ b/src/query/score_combiner.rs @@ -1,5 +1,5 @@ -use query::Scorer; -use Score; +use crate::query::Scorer; +use crate::Score; /// The `ScoreCombiner` trait defines how to compute /// an overall score given a list of scores. diff --git a/src/query/scorer.rs b/src/query/scorer.rs index 55f9ee1c0..2dbae69bb 100644 --- a/src/query/scorer.rs +++ b/src/query/scorer.rs @@ -1,9 +1,9 @@ -use common::BitSet; -use docset::{DocSet, SkipResult}; +use crate::common::BitSet; +use crate::docset::{DocSet, SkipResult}; +use crate::DocId; +use crate::Score; use downcast_rs; use std::ops::DerefMut; -use DocId; -use Score; /// Scored set of documents matching a query within a specific segment. /// @@ -16,7 +16,7 @@ pub trait Scorer: downcast_rs::Downcast + DocSet + 'static { /// Iterates through all of the document matched by the DocSet /// `DocSet` and push the scored documents to the collector. - fn for_each(&mut self, callback: &mut FnMut(DocId, Score)) { + fn for_each(&mut self, callback: &mut dyn FnMut(DocId, Score)) { while self.advance() { callback(self.doc(), self.score()); } @@ -25,12 +25,12 @@ pub trait Scorer: downcast_rs::Downcast + DocSet + 'static { impl_downcast!(Scorer); -impl Scorer for Box { +impl Scorer for Box { fn score(&mut self) -> Score { self.deref_mut().score() } - fn for_each(&mut self, callback: &mut FnMut(DocId, Score)) { + fn for_each(&mut self, callback: &mut dyn FnMut(DocId, Score)) { let scorer = self.deref_mut(); scorer.for_each(callback); } diff --git a/src/query/term_query/mod.rs b/src/query/term_query/mod.rs index 23db31f7a..3eeb7feb4 100644 --- a/src/query/term_query/mod.rs +++ b/src/query/term_query/mod.rs @@ -9,13 +9,13 @@ pub use self::term_weight::TermWeight; #[cfg(test)] mod tests { - use collector::TopDocs; - use docset::DocSet; - use query::{Query, QueryParser, Scorer, TermQuery}; - use schema::{IndexRecordOption, Schema, STRING, TEXT}; - use tests::assert_nearly_equals; - use Index; - use Term; + use crate::collector::TopDocs; + use crate::docset::DocSet; + use crate::query::{Query, QueryParser, Scorer, TermQuery}; + use crate::schema::{IndexRecordOption, Schema, STRING, TEXT}; + use crate::tests::assert_nearly_equals; + use crate::Index; + use crate::Term; #[test] pub fn test_term_query_no_freq() { diff --git a/src/query/term_query/term_query.rs b/src/query/term_query/term_query.rs index 6dc52acb2..424532cd3 100644 --- a/src/query/term_query/term_query.rs +++ b/src/query/term_query/term_query.rs @@ -1,12 +1,12 @@ use super::term_weight::TermWeight; -use query::bm25::BM25Weight; -use query::Query; -use query::Weight; -use schema::IndexRecordOption; +use crate::query::bm25::BM25Weight; +use crate::query::Query; +use crate::query::Weight; +use crate::schema::IndexRecordOption; +use crate::Result; +use crate::Searcher; +use crate::Term; use std::collections::BTreeSet; -use Result; -use Searcher; -use Term; /// A Term query matches all of the documents /// containing a specific term. @@ -99,7 +99,7 @@ impl TermQuery { } impl Query for TermQuery { - fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result> { + fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result> { Ok(Box::new(self.specialized_weight(searcher, scoring_enabled))) } fn query_terms(&self, term_set: &mut BTreeSet) { diff --git a/src/query/term_query/term_scorer.rs b/src/query/term_query/term_scorer.rs index 7e0d4fc56..e950edbac 100644 --- a/src/query/term_query/term_scorer.rs +++ b/src/query/term_query/term_scorer.rs @@ -1,12 +1,12 @@ -use docset::{DocSet, SkipResult}; -use query::{Explanation, Scorer}; -use DocId; -use Score; +use crate::docset::{DocSet, SkipResult}; +use crate::query::{Explanation, Scorer}; +use crate::DocId; +use crate::Score; -use fieldnorm::FieldNormReader; -use postings::Postings; -use postings::SegmentPostings; -use query::bm25::BM25Weight; +use crate::fieldnorm::FieldNormReader; +use crate::postings::Postings; +use crate::postings::SegmentPostings; +use crate::query::bm25::BM25Weight; pub struct TermScorer { postings: SegmentPostings, diff --git a/src/query/term_query/term_weight.rs b/src/query/term_query/term_weight.rs index 950f6fdf5..7da8dbf78 100644 --- a/src/query/term_query/term_weight.rs +++ b/src/query/term_query/term_weight.rs @@ -1,15 +1,15 @@ use super::term_scorer::TermScorer; -use core::SegmentReader; -use docset::DocSet; -use postings::SegmentPostings; -use query::bm25::BM25Weight; -use query::explanation::does_not_match; -use query::Weight; -use query::{Explanation, Scorer}; -use schema::IndexRecordOption; -use DocId; -use Term; -use {Result, SkipResult}; +use crate::core::SegmentReader; +use crate::docset::DocSet; +use crate::postings::SegmentPostings; +use crate::query::bm25::BM25Weight; +use crate::query::explanation::does_not_match; +use crate::query::Weight; +use crate::query::{Explanation, Scorer}; +use crate::schema::IndexRecordOption; +use crate::DocId; +use crate::Term; +use crate::{Result, SkipResult}; pub struct TermWeight { term: Term, @@ -18,7 +18,7 @@ pub struct TermWeight { } impl Weight for TermWeight { - fn scorer(&self, reader: &SegmentReader) -> Result> { + fn scorer(&self, reader: &SegmentReader) -> Result> { let term_scorer = self.scorer_specialized(reader)?; Ok(Box::new(term_scorer)) } diff --git a/src/query/union.rs b/src/query/union.rs index f636f0576..374949d1e 100644 --- a/src/query/union.rs +++ b/src/query/union.rs @@ -1,10 +1,10 @@ -use common::TinySet; -use docset::{DocSet, SkipResult}; -use query::score_combiner::{DoNothingCombiner, ScoreCombiner}; -use query::Scorer; +use crate::common::TinySet; +use crate::docset::{DocSet, SkipResult}; +use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner}; +use crate::query::Scorer; +use crate::DocId; +use crate::Score; use std::cmp::Ordering; -use DocId; -use Score; const HORIZON_NUM_TINYBITSETS: usize = 64; const HORIZON: u32 = 64u32 * HORIZON_NUM_TINYBITSETS as u32; @@ -267,14 +267,14 @@ mod tests { use super::Union; use super::HORIZON; - use docset::{DocSet, SkipResult}; - use postings::tests::test_skip_against_unoptimized; - use query::score_combiner::DoNothingCombiner; - use query::ConstScorer; - use query::VecDocSet; + use crate::docset::{DocSet, SkipResult}; + use crate::postings::tests::test_skip_against_unoptimized; + use crate::query::score_combiner::DoNothingCombiner; + use crate::query::ConstScorer; + use crate::query::VecDocSet; + use crate::tests; + use crate::DocId; use std::collections::BTreeSet; - use tests; - use DocId; fn aux_test_union(vals: Vec>) { let mut val_set: BTreeSet = BTreeSet::new(); @@ -334,7 +334,7 @@ mod tests { } } let docset_factory = || { - let res: Box = Box::new(Union::<_, DoNothingCombiner>::from( + let res: Box = Box::new(Union::<_, DoNothingCombiner>::from( docs_list .iter() .map(|docs| docs.clone()) diff --git a/src/query/vec_docset.rs b/src/query/vec_docset.rs index 906d3021a..f06840127 100644 --- a/src/query/vec_docset.rs +++ b/src/query/vec_docset.rs @@ -1,9 +1,9 @@ #![allow(dead_code)] -use common::HasLen; -use docset::DocSet; +use crate::common::HasLen; +use crate::docset::DocSet; +use crate::DocId; use std::num::Wrapping; -use DocId; /// Simulate a `Postings` objects from a `VecPostings`. /// `VecPostings` only exist for testing purposes. @@ -49,8 +49,8 @@ impl HasLen for VecDocSet { pub mod tests { use super::*; - use docset::{DocSet, SkipResult}; - use DocId; + use crate::docset::{DocSet, SkipResult}; + use crate::DocId; #[test] pub fn test_vec_postings() { diff --git a/src/query/weight.rs b/src/query/weight.rs index b0e65f8bd..c51fea554 100644 --- a/src/query/weight.rs +++ b/src/query/weight.rs @@ -1,7 +1,7 @@ use super::Scorer; -use core::SegmentReader; -use query::Explanation; -use {DocId, Result}; +use crate::core::SegmentReader; +use crate::query::Explanation; +use crate::{DocId, Result}; /// A Weight is the specialization of a Query /// for a given set of segments. @@ -10,7 +10,7 @@ use {DocId, Result}; pub trait Weight: Send + Sync + 'static { /// Returns the scorer for the given segment. /// See [`Query`](./trait.Query.html). - fn scorer(&self, reader: &SegmentReader) -> Result>; + fn scorer(&self, reader: &SegmentReader) -> Result>; /// Returns an `Explanation` for the given document. fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result; diff --git a/src/reader/mod.rs b/src/reader/mod.rs index 67d0ba52c..cea65ca43 100644 --- a/src/reader/mod.rs +++ b/src/reader/mod.rs @@ -1,15 +1,15 @@ mod pool; use self::pool::{LeasedItem, Pool}; -use core::Segment; -use directory::Directory; -use directory::WatchHandle; -use directory::META_LOCK; +use crate::core::Segment; +use crate::directory::Directory; +use crate::directory::WatchHandle; +use crate::directory::META_LOCK; +use crate::Index; +use crate::Result; +use crate::Searcher; +use crate::SegmentReader; use std::sync::Arc; -use Index; -use Result; -use Searcher; -use SegmentReader; /// Defines when a new version of the index should be reloaded. /// diff --git a/src/schema/document.rs b/src/schema/document.rs index 687baef1b..678970f62 100644 --- a/src/schema/document.rs +++ b/src/schema/document.rs @@ -1,9 +1,9 @@ use super::*; -use common::BinarySerializable; -use common::VInt; +use crate::common::BinarySerializable; +use crate::common::VInt; +use crate::DateTime; use itertools::Itertools; use std::io::{self, Read, Write}; -use DateTime; /// Tantivy's Document is the object that can /// be indexed and then searched for. @@ -163,7 +163,7 @@ impl BinarySerializable for Document { #[cfg(test)] mod tests { - use schema::*; + use crate::schema::*; #[test] fn test_doc() { diff --git a/src/schema/facet.rs b/src/schema/facet.rs index c6efe5ae2..28d2ea81d 100644 --- a/src/schema/facet.rs +++ b/src/schema/facet.rs @@ -1,4 +1,4 @@ -use common::BinarySerializable; +use crate::common::BinarySerializable; use regex::Regex; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::borrow::Borrow; @@ -173,7 +173,7 @@ impl BinarySerializable for Facet { } impl Display for Facet { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { for step in self.0.split(FACET_SEP_CHAR) { write!(f, "/")?; write!(f, "{}", escape_slashes(step))?; @@ -182,7 +182,7 @@ impl Display for Facet { } } -fn escape_slashes(s: &str) -> Cow { +fn escape_slashes(s: &str) -> Cow<'_, str> { lazy_static! { static ref SLASH_PTN: Regex = Regex::new(r"[\\/]").unwrap(); } @@ -208,7 +208,7 @@ impl<'de> Deserialize<'de> for Facet { } impl Debug for Facet { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "Facet({})", self)?; Ok(()) } diff --git a/src/schema/field.rs b/src/schema/field.rs index 557bf36e4..73e55296a 100644 --- a/src/schema/field.rs +++ b/src/schema/field.rs @@ -1,4 +1,4 @@ -use common::BinarySerializable; +use crate::common::BinarySerializable; use std::io; use std::io::Read; use std::io::Write; diff --git a/src/schema/field_entry.rs b/src/schema/field_entry.rs index 89a8b251d..8c47d02ba 100644 --- a/src/schema/field_entry.rs +++ b/src/schema/field_entry.rs @@ -1,7 +1,7 @@ -use schema::IntOptions; -use schema::TextOptions; +use crate::schema::IntOptions; +use crate::schema::TextOptions; -use schema::FieldType; +use crate::schema::FieldType; use serde::de::{self, MapAccess, Visitor}; use serde::ser::SerializeStruct; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -174,7 +174,7 @@ impl<'de> Deserialize<'de> for FieldEntry { impl<'de> Visitor<'de> for FieldEntryVisitor { type Value = FieldEntry; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("struct FieldEntry") } @@ -247,7 +247,7 @@ impl<'de> Deserialize<'de> for FieldEntry { #[cfg(test)] mod tests { use super::*; - use schema::TEXT; + use crate::schema::TEXT; use serde_json; #[test] diff --git a/src/schema/field_type.rs b/src/schema/field_type.rs index 561ba3f81..d30436db3 100644 --- a/src/schema/field_type.rs +++ b/src/schema/field_type.rs @@ -1,11 +1,11 @@ use base64::decode; -use schema::{IntOptions, TextOptions}; +use crate::schema::{IntOptions, TextOptions}; -use schema::Facet; -use schema::IndexRecordOption; -use schema::TextFieldIndexing; -use schema::Value; +use crate::schema::Facet; +use crate::schema::IndexRecordOption; +use crate::schema::TextFieldIndexing; +use crate::schema::Value; use serde_json::Value as JsonValue; /// Possible error that may occur while parsing a field value @@ -166,8 +166,8 @@ impl FieldType { #[cfg(test)] mod tests { use super::FieldType; - use schema::field_type::ValueParsingError; - use schema::Value; + use crate::schema::field_type::ValueParsingError; + use crate::schema::Value; #[test] fn test_bytes_value_from_json() { diff --git a/src/schema/field_value.rs b/src/schema/field_value.rs index eb10c7e6e..78b8b9613 100644 --- a/src/schema/field_value.rs +++ b/src/schema/field_value.rs @@ -1,6 +1,6 @@ -use common::BinarySerializable; -use schema::Field; -use schema::Value; +use crate::common::BinarySerializable; +use crate::schema::Field; +use crate::schema::Value; use std::io; use std::io::Read; use std::io::Write; diff --git a/src/schema/flags.rs b/src/schema/flags.rs index 104df2b33..33be5f612 100644 --- a/src/schema/flags.rs +++ b/src/schema/flags.rs @@ -1,5 +1,5 @@ -use schema::IntOptions; -use schema::TextOptions; +use crate::schema::IntOptions; +use crate::schema::TextOptions; use std::ops::BitOr; #[derive(Clone)] diff --git a/src/schema/int_options.rs b/src/schema/int_options.rs index a95f236c3..19131d766 100644 --- a/src/schema/int_options.rs +++ b/src/schema/int_options.rs @@ -1,4 +1,4 @@ -use schema::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag}; +use crate::schema::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag}; use std::ops::BitOr; /// Express whether a field is single-value or multi-valued. diff --git a/src/schema/named_field_document.rs b/src/schema/named_field_document.rs index c971499d5..4db8ab0e3 100644 --- a/src/schema/named_field_document.rs +++ b/src/schema/named_field_document.rs @@ -1,4 +1,4 @@ -use schema::Value; +use crate::schema::Value; use std::collections::BTreeMap; /// Internal representation of a document used for JSON diff --git a/src/schema/schema.rs b/src/schema/schema.rs index bacfd7417..781acf5e5 100644 --- a/src/schema/schema.rs +++ b/src/schema/schema.rs @@ -1,4 +1,4 @@ -use schema::field_type::ValueParsingError; +use crate::schema::field_type::ValueParsingError; use std::collections::BTreeMap; use std::collections::HashMap; use std::sync::Arc; @@ -313,7 +313,7 @@ impl<'de> Deserialize<'de> for Schema { impl<'de> Visitor<'de> for SchemaVisitor { type Value = Schema; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("struct Schema") } @@ -353,9 +353,9 @@ pub enum DocParsingError { #[cfg(test)] mod tests { - use schema::field_type::ValueParsingError; - use schema::schema::DocParsingError::NotJSON; - use schema::*; + use crate::schema::field_type::ValueParsingError; + use crate::schema::schema::DocParsingError::NotJSON; + use crate::schema::*; use serde_json; #[test] diff --git a/src/schema/term.rs b/src/schema/term.rs index a7b450fa6..7c85c89c3 100644 --- a/src/schema/term.rs +++ b/src/schema/term.rs @@ -1,11 +1,11 @@ use std::fmt; use super::Field; +use crate::common; +use crate::schema::Facet; +use crate::DateTime; use byteorder::{BigEndian, ByteOrder}; -use common; -use schema::Facet; use std::str; -use DateTime; /// Size (in bytes) of the buffer of a int field. const INT_TERM_LEN: usize = 4 + 8; @@ -197,7 +197,7 @@ where } impl fmt::Debug for Term { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Term({:?})", &self.0[..]) } } @@ -205,7 +205,7 @@ impl fmt::Debug for Term { #[cfg(test)] mod tests { - use schema::*; + use crate::schema::*; #[test] pub fn test_term() { diff --git a/src/schema/text_options.rs b/src/schema/text_options.rs index 78f58819a..11ab8accd 100644 --- a/src/schema/text_options.rs +++ b/src/schema/text_options.rs @@ -1,6 +1,6 @@ -use schema::flags::SchemaFlagList; -use schema::flags::StoredFlag; -use schema::IndexRecordOption; +use crate::schema::flags::SchemaFlagList; +use crate::schema::flags::StoredFlag; +use crate::schema::IndexRecordOption; use std::borrow::Cow; use std::ops::BitOr; @@ -151,7 +151,7 @@ where #[cfg(test)] mod tests { - use schema::*; + use crate::schema::*; #[test] fn test_field_options() { diff --git a/src/schema/value.rs b/src/schema/value.rs index cb27c64b3..bb576e982 100644 --- a/src/schema/value.rs +++ b/src/schema/value.rs @@ -1,8 +1,8 @@ -use schema::Facet; +use crate::schema::Facet; +use crate::DateTime; use serde::de::Visitor; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; -use DateTime; /// Value represents the value of a any field. /// It is an enum over all over all of the possible field type. @@ -48,7 +48,7 @@ impl<'de> Deserialize<'de> for Value { impl<'de> Visitor<'de> for ValueVisitor { type Value = Value; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a string or u32") } @@ -163,9 +163,9 @@ impl From> for Value { mod binary_serialize { use super::Value; + use crate::common::BinarySerializable; + use crate::schema::Facet; use chrono::{TimeZone, Utc}; - use common::BinarySerializable; - use schema::Facet; use std::io::{self, Read, Write}; const TEXT_CODE: u8 = 0; diff --git a/src/snippet/mod.rs b/src/snippet/mod.rs index 3a8e46a5c..1b1a08012 100644 --- a/src/snippet/mod.rs +++ b/src/snippet/mod.rs @@ -1,15 +1,15 @@ +use crate::query::Query; +use crate::schema::Field; +use crate::schema::Value; +use crate::tokenizer::BoxedTokenizer; +use crate::tokenizer::{Token, TokenStream}; +use crate::Document; +use crate::Result; +use crate::Searcher; use htmlescape::encode_minimal; -use query::Query; -use schema::Field; -use schema::Value; use std::cmp::Ordering; use std::collections::BTreeMap; use std::collections::BTreeSet; -use tokenizer::BoxedTokenizer; -use tokenizer::{Token, TokenStream}; -use Document; -use Result; -use Searcher; const DEFAULT_MAX_NUM_CHARS: usize = 150; @@ -142,7 +142,7 @@ impl Snippet { /// Fragments must be valid in the sense that `&text[fragment.start..fragment.stop]`\ /// has to be a valid string. fn search_fragments<'a>( - tokenizer: &BoxedTokenizer, + tokenizer: &dyn BoxedTokenizer, text: &'a str, terms: &BTreeMap, max_num_chars: usize, @@ -254,14 +254,18 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str) /// ``` pub struct SnippetGenerator { terms_text: BTreeMap, - tokenizer: Box, + tokenizer: Box, field: Field, max_num_chars: usize, } impl SnippetGenerator { /// Creates a new snippet generator - pub fn create(searcher: &Searcher, query: &Query, field: Field) -> Result { + pub fn create( + searcher: &Searcher, + query: &dyn Query, + field: Field, + ) -> Result { let mut terms = BTreeSet::new(); query.query_terms(&mut terms); let terms_text: BTreeMap = terms @@ -325,13 +329,13 @@ impl SnippetGenerator { #[cfg(test)] mod tests { use super::{search_fragments, select_best_fragment_combination}; - use query::QueryParser; - use schema::{IndexRecordOption, Schema, TextFieldIndexing, TextOptions, TEXT}; + use crate::query::QueryParser; + use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, TextOptions, TEXT}; + use crate::tokenizer::{box_tokenizer, SimpleTokenizer}; + use crate::Index; + use crate::SnippetGenerator; use std::collections::BTreeMap; use std::iter::Iterator; - use tokenizer::{box_tokenizer, SimpleTokenizer}; - use Index; - use SnippetGenerator; const TEST_TEXT: &'static str = r#"Rust is a systems programming language sponsored by Mozilla which diff --git a/src/space_usage/mod.rs b/src/space_usage/mod.rs index cccca508d..93c1c953e 100644 --- a/src/space_usage/mod.rs +++ b/src/space_usage/mod.rs @@ -9,9 +9,9 @@ details into consideration. For example, if your file system block size is 4096 under-count actual resultant space usage by up to 4095 bytes per file. */ -use schema::Field; +use crate::schema::Field; +use crate::SegmentComponent; use std::collections::HashMap; -use SegmentComponent; /// Indicates space usage in bytes pub type ByteCount = usize; @@ -119,7 +119,7 @@ impl SegmentSpaceUsage { /// Use the components directly if this is somehow in performance critical code. pub fn component(&self, component: SegmentComponent) -> ComponentSpaceUsage { use self::ComponentSpaceUsage::*; - use SegmentComponent::*; + use crate::SegmentComponent::*; match component { POSTINGS => PerField(self.postings().clone()), POSITIONS => PerField(self.positions().clone()), @@ -292,13 +292,13 @@ impl FieldUsage { #[cfg(test)] mod test { - use core::Index; - use schema::Field; - use schema::Schema; - use schema::{FAST, INDEXED, STORED, TEXT}; - use space_usage::ByteCount; - use space_usage::PerFieldSpaceUsage; - use Term; + use crate::core::Index; + use crate::schema::Field; + use crate::schema::Schema; + use crate::schema::{FAST, INDEXED, STORED, TEXT}; + use crate::space_usage::ByteCount; + use crate::space_usage::PerFieldSpaceUsage; + use crate::Term; #[test] fn test_empty() { diff --git a/src/store/compression_snap.rs b/src/store/compression_snap.rs index eaf3ad444..b5cc2ded9 100644 --- a/src/store/compression_snap.rs +++ b/src/store/compression_snap.rs @@ -1,4 +1,4 @@ -extern crate snap; +use snap; use std::io::{self, Read, Write}; diff --git a/src/store/mod.rs b/src/store/mod.rs index 5f01fb277..9f9d5aac0 100644 --- a/src/store/mod.rs +++ b/src/store/mod.rs @@ -53,11 +53,11 @@ use self::compression_snap::*; pub mod tests { use super::*; - use directory::{Directory, RAMDirectory, WritePtr}; - use schema::Document; - use schema::FieldValue; - use schema::Schema; - use schema::TextOptions; + use crate::directory::{Directory, RAMDirectory, WritePtr}; + use crate::schema::Document; + use crate::schema::FieldValue; + use crate::schema::Schema; + use crate::schema::TextOptions; use std::path::Path; pub fn write_lorem_ipsum_store(writer: WritePtr, num_docs: usize) -> Schema { diff --git a/src/store/reader.rs b/src/store/reader.rs index 31ae1746c..edad843a0 100644 --- a/src/store/reader.rs +++ b/src/store/reader.rs @@ -1,16 +1,16 @@ -use Result; +use crate::Result; use super::decompress; use super::skiplist::SkipList; -use common::BinarySerializable; -use common::VInt; -use directory::ReadOnlySource; -use schema::Document; -use space_usage::StoreSpaceUsage; +use crate::common::BinarySerializable; +use crate::common::VInt; +use crate::directory::ReadOnlySource; +use crate::schema::Document; +use crate::space_usage::StoreSpaceUsage; +use crate::DocId; use std::cell::RefCell; use std::io; use std::mem::size_of; -use DocId; /// Reads document off tantivy's [`Store`](./index.html) #[derive(Clone)] @@ -35,7 +35,7 @@ impl StoreReader { } } - pub(crate) fn block_index(&self) -> SkipList { + pub(crate) fn block_index(&self) -> SkipList<'_, u64> { SkipList::from(self.offset_index_source.as_slice()) } diff --git a/src/store/skiplist/mod.rs b/src/store/skiplist/mod.rs index bed5f61eb..29dfb4bea 100644 --- a/src/store/skiplist/mod.rs +++ b/src/store/skiplist/mod.rs @@ -17,7 +17,7 @@ mod tests { let mut skip_list_builder: SkipListBuilder = SkipListBuilder::new(8); skip_list_builder.insert(2, &3).unwrap(); skip_list_builder.write::>(&mut output).unwrap(); - let mut skip_list: SkipList = SkipList::from(output.as_slice()); + let mut skip_list: SkipList<'_, u32> = SkipList::from(output.as_slice()); assert_eq!(skip_list.next(), Some((2, 3))); } @@ -26,7 +26,7 @@ mod tests { let mut output: Vec = Vec::new(); let skip_list_builder: SkipListBuilder = SkipListBuilder::new(8); skip_list_builder.write::>(&mut output).unwrap(); - let mut skip_list: SkipList = SkipList::from(output.as_slice()); + let mut skip_list: SkipList<'_, u32> = SkipList::from(output.as_slice()); assert_eq!(skip_list.next(), None); } @@ -40,7 +40,7 @@ mod tests { skip_list_builder.insert(7, &()).unwrap(); skip_list_builder.insert(9, &()).unwrap(); skip_list_builder.write::>(&mut output).unwrap(); - let mut skip_list: SkipList<()> = SkipList::from(output.as_slice()); + let mut skip_list: SkipList<'_, ()> = SkipList::from(output.as_slice()); assert_eq!(skip_list.next().unwrap(), (2, ())); assert_eq!(skip_list.next().unwrap(), (3, ())); assert_eq!(skip_list.next().unwrap(), (5, ())); @@ -59,7 +59,7 @@ mod tests { skip_list_builder.insert(7, &()).unwrap(); skip_list_builder.insert(9, &()).unwrap(); skip_list_builder.write::>(&mut output).unwrap(); - let mut skip_list: SkipList<()> = SkipList::from(output.as_slice()); + let mut skip_list: SkipList<'_, ()> = SkipList::from(output.as_slice()); assert_eq!(skip_list.next().unwrap(), (2, ())); skip_list.seek(5); assert_eq!(skip_list.next().unwrap(), (5, ())); @@ -77,7 +77,7 @@ mod tests { skip_list_builder.insert(5, &()).unwrap(); skip_list_builder.insert(6, &()).unwrap(); skip_list_builder.write::>(&mut output).unwrap(); - let mut skip_list: SkipList<()> = SkipList::from(output.as_slice()); + let mut skip_list: SkipList<'_, ()> = SkipList::from(output.as_slice()); assert_eq!(skip_list.next().unwrap(), (2, ())); skip_list.seek(6); assert_eq!(skip_list.next().unwrap(), (6, ())); @@ -94,7 +94,7 @@ mod tests { skip_list_builder.insert(7, &()).unwrap(); skip_list_builder.insert(9, &()).unwrap(); skip_list_builder.write::>(&mut output).unwrap(); - let mut skip_list: SkipList<()> = SkipList::from(output.as_slice()); + let mut skip_list: SkipList<'_, ()> = SkipList::from(output.as_slice()); assert_eq!(skip_list.next().unwrap(), (2, ())); skip_list.seek(10); assert_eq!(skip_list.next(), None); @@ -109,7 +109,7 @@ mod tests { } skip_list_builder.insert(1004, &()).unwrap(); skip_list_builder.write::>(&mut output).unwrap(); - let mut skip_list: SkipList<()> = SkipList::from(output.as_slice()); + let mut skip_list: SkipList<'_, ()> = SkipList::from(output.as_slice()); assert_eq!(skip_list.next().unwrap(), (0, ())); skip_list.seek(431); assert_eq!(skip_list.next().unwrap(), (431, ())); diff --git a/src/store/skiplist/skiplist.rs b/src/store/skiplist/skiplist.rs index d7e8ed6a5..f5cbde036 100644 --- a/src/store/skiplist/skiplist.rs +++ b/src/store/skiplist/skiplist.rs @@ -1,4 +1,4 @@ -use common::{BinarySerializable, VInt}; +use crate::common::{BinarySerializable, VInt}; use std::cmp::max; use std::marker::PhantomData; diff --git a/src/store/skiplist/skiplist_builder.rs b/src/store/skiplist/skiplist_builder.rs index 61f04bf34..7c3d8fe1f 100644 --- a/src/store/skiplist/skiplist_builder.rs +++ b/src/store/skiplist/skiplist_builder.rs @@ -1,4 +1,4 @@ -use common::{is_power_of_2, BinarySerializable, VInt}; +use crate::common::{is_power_of_2, BinarySerializable, VInt}; use std::io; use std::io::Write; use std::marker::PhantomData; @@ -15,7 +15,7 @@ impl LayerBuilder { self.buffer.len() } - fn write(&self, output: &mut Write) -> Result<(), io::Error> { + fn write(&self, output: &mut dyn Write) -> Result<(), io::Error> { output.write_all(&self.buffer)?; Ok(()) } diff --git a/src/store/writer.rs b/src/store/writer.rs index 4693b8900..bcb74f99d 100644 --- a/src/store/writer.rs +++ b/src/store/writer.rs @@ -1,12 +1,12 @@ use super::compress; use super::skiplist::SkipListBuilder; use super::StoreReader; -use common::CountingWriter; -use common::{BinarySerializable, VInt}; -use directory::WritePtr; -use schema::Document; +use crate::common::CountingWriter; +use crate::common::{BinarySerializable, VInt}; +use crate::directory::WritePtr; +use crate::schema::Document; +use crate::DocId; use std::io::{self, Write}; -use DocId; const BLOCK_SIZE: usize = 16_384; diff --git a/src/termdict/merger.rs b/src/termdict/merger.rs index 407a49e90..bf566b4ae 100644 --- a/src/termdict/merger.rs +++ b/src/termdict/merger.rs @@ -1,8 +1,8 @@ -use schema::Term; +use crate::schema::Term; +use crate::termdict::TermOrdinal; +use crate::termdict::TermStreamer; use std::cmp::Ordering; use std::collections::BinaryHeap; -use termdict::TermOrdinal; -use termdict::TermStreamer; pub struct HeapItem<'a> { pub streamer: TermStreamer<'a>, @@ -60,7 +60,7 @@ impl<'a> TermMerger<'a> { pub(crate) fn matching_segments<'b: 'a>( &'b self, - ) -> Box<'b + Iterator> { + ) -> Box> { Box::new( self.current_streamers .iter() diff --git a/src/termdict/mod.rs b/src/termdict/mod.rs index 89a98f072..c1c387a1b 100644 --- a/src/termdict/mod.rs +++ b/src/termdict/mod.rs @@ -32,10 +32,10 @@ pub use self::termdict::{TermDictionary, TermDictionaryBuilder}; #[cfg(test)] mod tests { use super::{TermDictionary, TermDictionaryBuilder, TermStreamer}; - use core::Index; - use directory::{Directory, RAMDirectory, ReadOnlySource}; - use postings::TermInfo; - use schema::{Document, FieldType, Schema, TEXT}; + use crate::core::Index; + use crate::directory::{Directory, RAMDirectory, ReadOnlySource}; + use crate::postings::TermInfo; + use crate::schema::{Document, FieldType, Schema, TEXT}; use std::path::PathBuf; use std::str; @@ -349,7 +349,7 @@ mod tests { let source = ReadOnlySource::from(buffer); let term_dictionary: TermDictionary = TermDictionary::from_source(&source); - let value_list = |mut streamer: TermStreamer| { + let value_list = |mut streamer: TermStreamer<'_>| { let mut res: Vec = vec![]; while let Some((_, ref v)) = streamer.next() { res.push(v.doc_freq); diff --git a/src/termdict/streamer.rs b/src/termdict/streamer.rs index 6dce8b78d..e5aecc1d6 100644 --- a/src/termdict/streamer.rs +++ b/src/termdict/streamer.rs @@ -1,10 +1,10 @@ use super::TermDictionary; -use postings::TermInfo; +use crate::postings::TermInfo; +use crate::termdict::TermOrdinal; use tantivy_fst::automaton::AlwaysMatch; use tantivy_fst::map::{Stream, StreamBuilder}; use tantivy_fst::Automaton; use tantivy_fst::{IntoStreamer, Streamer}; -use termdict::TermOrdinal; /// `TermStreamerBuilder` is a helper object used to define /// a range of terms that should be streamed. diff --git a/src/termdict/term_info_store.rs b/src/termdict/term_info_store.rs index caac6fef7..b859ccf77 100644 --- a/src/termdict/term_info_store.rs +++ b/src/termdict/term_info_store.rs @@ -1,13 +1,13 @@ +use crate::common::bitpacker::BitPacker; +use crate::common::compute_num_bits; +use crate::common::Endianness; +use crate::common::{BinarySerializable, FixedSize}; +use crate::directory::ReadOnlySource; +use crate::postings::TermInfo; +use crate::termdict::TermOrdinal; use byteorder::{ByteOrder, LittleEndian}; -use common::bitpacker::BitPacker; -use common::compute_num_bits; -use common::Endianness; -use common::{BinarySerializable, FixedSize}; -use directory::ReadOnlySource; -use postings::TermInfo; use std::cmp; use std::io::{self, Read, Write}; -use termdict::TermOrdinal; const BLOCK_LEN: usize = 256; @@ -259,12 +259,12 @@ mod tests { use super::extract_bits; use super::TermInfoBlockMeta; use super::{TermInfoStore, TermInfoStoreWriter}; - use common; - use common::bitpacker::BitPacker; - use common::compute_num_bits; - use common::BinarySerializable; - use directory::ReadOnlySource; - use postings::TermInfo; + use crate::common; + use crate::common::bitpacker::BitPacker; + use crate::common::compute_num_bits; + use crate::common::BinarySerializable; + use crate::directory::ReadOnlySource; + use crate::postings::TermInfo; #[test] fn test_term_info_block() { diff --git a/src/termdict/termdict.rs b/src/termdict/termdict.rs index 43ab2eac5..6bd47ee62 100644 --- a/src/termdict/termdict.rs +++ b/src/termdict/termdict.rs @@ -1,15 +1,15 @@ use super::term_info_store::{TermInfoStore, TermInfoStoreWriter}; use super::{TermStreamer, TermStreamerBuilder}; -use common::BinarySerializable; -use common::CountingWriter; -use directory::ReadOnlySource; -use postings::TermInfo; -use schema::FieldType; +use crate::common::BinarySerializable; +use crate::common::CountingWriter; +use crate::directory::ReadOnlySource; +use crate::postings::TermInfo; +use crate::schema::FieldType; +use crate::termdict::TermOrdinal; use std::io::{self, Write}; use tantivy_fst; use tantivy_fst::raw::Fst; use tantivy_fst::Automaton; -use termdict::TermOrdinal; fn convert_fst_error(e: tantivy_fst::Error) -> io::Error { io::Error::new(io::ErrorKind::Other, e) @@ -186,12 +186,12 @@ impl TermDictionary { /// Returns a range builder, to stream all of the terms /// within an interval. - pub fn range(&self) -> TermStreamerBuilder { + pub fn range(&self) -> TermStreamerBuilder<'_> { TermStreamerBuilder::new(self, self.fst_index.range()) } /// A stream of all the sorted terms. [See also `.stream_field()`](#method.stream_field) - pub fn stream(&self) -> TermStreamer { + pub fn stream(&self) -> TermStreamer<'_> { self.range().into_stream() } diff --git a/src/tokenizer/ascii_folding_filter.rs b/src/tokenizer/ascii_folding_filter.rs index cbd124a8b..7193911ac 100644 --- a/src/tokenizer/ascii_folding_filter.rs +++ b/src/tokenizer/ascii_folding_filter.rs @@ -1558,12 +1558,12 @@ fn to_ascii(text: &mut String, output: &mut String) { #[cfg(test)] mod tests { use super::to_ascii; + use crate::tokenizer::AsciiFoldingFilter; + use crate::tokenizer::RawTokenizer; + use crate::tokenizer::SimpleTokenizer; + use crate::tokenizer::TokenStream; + use crate::tokenizer::Tokenizer; use std::iter; - use tokenizer::AsciiFoldingFilter; - use tokenizer::RawTokenizer; - use tokenizer::SimpleTokenizer; - use tokenizer::TokenStream; - use tokenizer::Tokenizer; #[test] fn test_ascii_folding() { diff --git a/src/tokenizer/facet_tokenizer.rs b/src/tokenizer/facet_tokenizer.rs index 31d2701a4..24f26589e 100644 --- a/src/tokenizer/facet_tokenizer.rs +++ b/src/tokenizer/facet_tokenizer.rs @@ -1,5 +1,5 @@ use super::{Token, TokenStream, Tokenizer}; -use schema::FACET_SEP_BYTE; +use crate::schema::FACET_SEP_BYTE; /// The `FacetTokenizer` process a `Facet` binary representation /// and emits a token for all of its parent. @@ -83,8 +83,8 @@ impl<'a> TokenStream for FacetTokenStream<'a> { mod tests { use super::FacetTokenizer; - use schema::Facet; - use tokenizer::{Token, TokenStream, Tokenizer}; + use crate::schema::Facet; + use crate::tokenizer::{Token, TokenStream, Tokenizer}; #[test] fn test_facet_tokenizer() { diff --git a/src/tokenizer/lower_caser.rs b/src/tokenizer/lower_caser.rs index 058c056a5..b1d1ead29 100644 --- a/src/tokenizer/lower_caser.rs +++ b/src/tokenizer/lower_caser.rs @@ -72,10 +72,10 @@ where #[cfg(test)] mod tests { - use tokenizer::LowerCaser; - use tokenizer::SimpleTokenizer; - use tokenizer::TokenStream; - use tokenizer::Tokenizer; + use crate::tokenizer::LowerCaser; + use crate::tokenizer::SimpleTokenizer; + use crate::tokenizer::TokenStream; + use crate::tokenizer::Tokenizer; #[test] fn test_to_lower_case() { diff --git a/src/tokenizer/ngram_tokenizer.rs b/src/tokenizer/ngram_tokenizer.rs index a0c53b15d..465e03a5d 100644 --- a/src/tokenizer/ngram_tokenizer.rs +++ b/src/tokenizer/ngram_tokenizer.rs @@ -309,9 +309,9 @@ mod tests { use super::CodepointFrontiers; use super::NgramTokenizer; use super::StutteringIterator; - use tokenizer::tests::assert_token; - use tokenizer::tokenizer::{TokenStream, Tokenizer}; - use tokenizer::Token; + use crate::tokenizer::tests::assert_token; + use crate::tokenizer::tokenizer::{TokenStream, Tokenizer}; + use crate::tokenizer::Token; fn test_helper(mut tokenizer: T) -> Vec { let mut tokens: Vec = vec![]; diff --git a/src/tokenizer/token_stream_chain.rs b/src/tokenizer/token_stream_chain.rs index 224d7746c..a0598571a 100644 --- a/src/tokenizer/token_stream_chain.rs +++ b/src/tokenizer/token_stream_chain.rs @@ -1,4 +1,4 @@ -use tokenizer::{Token, TokenStream}; +use crate::tokenizer::{Token, TokenStream}; const POSITION_GAP: usize = 2; diff --git a/src/tokenizer/tokenizer.rs b/src/tokenizer/tokenizer.rs index 46808d07e..2e2016157 100644 --- a/src/tokenizer/tokenizer.rs +++ b/src/tokenizer/tokenizer.rs @@ -1,7 +1,7 @@ +use crate::tokenizer::TokenStreamChain; /// The tokenizer module contains all of the tools used to process /// text in `tantivy`. use std::borrow::{Borrow, BorrowMut}; -use tokenizer::TokenStreamChain; /// Token #[derive(Debug, Clone)] @@ -82,17 +82,17 @@ pub trait Tokenizer<'a>: Sized + Clone { /// A boxed tokenizer pub trait BoxedTokenizer: Send + Sync { /// Tokenize a `&str` - fn token_stream<'a>(&self, text: &'a str) -> Box; + fn token_stream<'a>(&self, text: &'a str) -> Box; /// Tokenize an array`&str` /// /// The resulting `TokenStream` is equivalent to what would be obtained if the &str were /// one concatenated `&str`, with an artificial position gap of `2` between the different fields /// to prevent accidental `PhraseQuery` to match accross two terms. - fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box; + fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box; /// Return a boxed clone of the tokenizer - fn boxed_clone(&self) -> Box; + fn boxed_clone(&self) -> Box; } #[derive(Clone)] @@ -104,11 +104,11 @@ impl BoxedTokenizer for BoxableTokenizer where A: 'static + Send + Sync + for<'a> Tokenizer<'a>, { - fn token_stream<'a>(&self, text: &'a str) -> Box { + fn token_stream<'a>(&self, text: &'a str) -> Box { Box::new(self.0.token_stream(text)) } - fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box { + fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box { assert!(!texts.is_empty()); if texts.len() == 1 { Box::new(self.0.token_stream(texts[0])) @@ -125,31 +125,31 @@ where } } - fn boxed_clone(&self) -> Box { + fn boxed_clone(&self) -> Box { Box::new(self.clone()) } } -pub(crate) fn box_tokenizer(a: A) -> Box +pub(crate) fn box_tokenizer(a: A) -> Box where A: 'static + Send + Sync + for<'a> Tokenizer<'a>, { Box::new(BoxableTokenizer(a)) } -impl<'b> TokenStream for Box { +impl<'b> TokenStream for Box { fn advance(&mut self) -> bool { - let token_stream: &mut TokenStream = self.borrow_mut(); + let token_stream: &mut dyn TokenStream = self.borrow_mut(); token_stream.advance() } fn token(&self) -> &Token { - let token_stream: &TokenStream = self.borrow(); + let token_stream: &dyn TokenStream = self.borrow(); token_stream.token() } fn token_mut(&mut self) -> &mut Token { - let token_stream: &mut TokenStream = self.borrow_mut(); + let token_stream: &mut dyn TokenStream = self.borrow_mut(); token_stream.token_mut() } } @@ -226,7 +226,7 @@ pub trait TokenStream { /// Helper function to consume the entire `TokenStream` /// and push the tokens to a sink function. - fn process(&mut self, sink: &mut FnMut(&Token)) -> u32 { + fn process(&mut self, sink: &mut dyn FnMut(&Token)) -> u32 { let mut num_tokens_pushed = 0u32; while self.advance() { sink(self.token()); diff --git a/src/tokenizer/tokenizer_manager.rs b/src/tokenizer/tokenizer_manager.rs index 4c724284c..0f6f6ecbc 100644 --- a/src/tokenizer/tokenizer_manager.rs +++ b/src/tokenizer/tokenizer_manager.rs @@ -1,15 +1,15 @@ +use crate::tokenizer::box_tokenizer; +use crate::tokenizer::stemmer::Language; +use crate::tokenizer::BoxedTokenizer; +use crate::tokenizer::LowerCaser; +use crate::tokenizer::RawTokenizer; +use crate::tokenizer::RemoveLongFilter; +use crate::tokenizer::SimpleTokenizer; +use crate::tokenizer::Stemmer; +use crate::tokenizer::Tokenizer; use std::collections::HashMap; use std::ops::Deref; use std::sync::{Arc, RwLock}; -use tokenizer::box_tokenizer; -use tokenizer::stemmer::Language; -use tokenizer::BoxedTokenizer; -use tokenizer::LowerCaser; -use tokenizer::RawTokenizer; -use tokenizer::RemoveLongFilter; -use tokenizer::SimpleTokenizer; -use tokenizer::Stemmer; -use tokenizer::Tokenizer; /// The tokenizer manager serves as a store for /// all of the pre-configured tokenizer pipelines. @@ -25,7 +25,7 @@ use tokenizer::Tokenizer; /// search engine. #[derive(Clone)] pub struct TokenizerManager { - tokenizers: Arc>>>, + tokenizers: Arc>>>, } impl TokenizerManager { @@ -42,7 +42,7 @@ impl TokenizerManager { } /// Accessing a tokenizer given its name. - pub fn get(&self, tokenizer_name: &str) -> Option> { + pub fn get(&self, tokenizer_name: &str) -> Option> { self.tokenizers .read() .expect("Acquiring the lock should never fail")