Compare commits

..

3 Commits

Author SHA1 Message Date
Paul Masurel
d7973892a2 extra commit 2019-12-27 22:53:04 +09:00
Paul Masurel
cd7484c035 Added ReadOnlyDirectory and implemented Bundle Directory 2019-12-27 12:05:39 +09:00
Paul Masurel
7ed6bc8718 Added serialize to bundle in the RAMDirectory. 2019-12-26 10:06:52 +09:00
79 changed files with 1071 additions and 953 deletions

View File

@@ -1,16 +1,3 @@
Tantivy 0.12.0
======================
- Removing static dispatch in tokenizers for simplicity. (#762)
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
## How to update?
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
minor changes. Check https://github.com/tantivy-search/tantivy/blob/master/examples/custom_tokenizer.rs
to check for some code sample.
Tantivy 0.11.3 Tantivy 0.11.3
======================= =======================
- Fixed DateTime as a fast field (#735) - Fixed DateTime as a fast field (#735)

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.12.0" version = "0.11.3"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -18,7 +18,7 @@ byteorder = "1.0"
crc32fast = "1.2.0" crc32fast = "1.2.0"
once_cell = "1.0" once_cell = "1.0"
regex ={version = "1.3.0", default-features = false, features = ["std"]} regex ={version = "1.3.0", default-features = false, features = ["std"]}
tantivy-fst = "0.2.1" tantivy-fst = "0.1"
memmap = {version = "0.7", optional=true} memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true} lz4 = {version="1.20", optional=true}
snap = {version="0.2"} snap = {version="0.2"}
@@ -60,6 +60,7 @@ winapi = "0.3"
rand = "0.7" rand = "0.7"
maplit = "1" maplit = "1"
matches = "0.1.8" matches = "0.1.8"
time = "0.1.42"
[dev-dependencies.fail] [dev-dependencies.fail]
version = "0.3" version = "0.3"

View File

@@ -9,7 +9,7 @@
// - import tokenized text straight from json, // - import tokenized text straight from json,
// - perform a search on documents with pre-tokenized text // - perform a search on documents with pre-tokenized text
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer}; use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer};
use tantivy::collector::{Count, TopDocs}; use tantivy::collector::{Count, TopDocs};
use tantivy::query::TermQuery; use tantivy::query::TermQuery;

View File

@@ -50,7 +50,7 @@ fn main() -> tantivy::Result<()> {
// This tokenizer lowers all of the text (to help with stop word matching) // This tokenizer lowers all of the text (to help with stop word matching)
// then removes all instances of `the` and `and` from the corpus // then removes all instances of `the` and `and` from the corpus
let tokenizer = TextAnalyzer::from(SimpleTokenizer) let tokenizer = SimpleTokenizer
.filter(LowerCaser) .filter(LowerCaser)
.filter(StopWordFilter::remove(vec![ .filter(StopWordFilter::remove(vec![
"the".to_string(), "the".to_string(),

View File

@@ -1,6 +1,7 @@
use super::Collector; use super::Collector;
use crate::collector::SegmentCollector; use crate::collector::SegmentCollector;
use crate::DocId; use crate::DocId;
use crate::Result;
use crate::Score; use crate::Score;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
@@ -43,11 +44,7 @@ impl Collector for Count {
type Child = SegmentCountCollector; type Child = SegmentCountCollector;
fn for_segment( fn for_segment(&self, _: SegmentLocalId, _: &SegmentReader) -> Result<SegmentCountCollector> {
&self,
_: SegmentLocalId,
_: &SegmentReader,
) -> crate::Result<SegmentCountCollector> {
Ok(SegmentCountCollector::default()) Ok(SegmentCountCollector::default())
} }
@@ -55,7 +52,7 @@ impl Collector for Count {
false false
} }
fn merge_fruits(&self, segment_counts: Vec<usize>) -> crate::Result<usize> { fn merge_fruits(&self, segment_counts: Vec<usize>) -> Result<usize> {
Ok(segment_counts.into_iter().sum()) Ok(segment_counts.into_iter().sum())
} }
} }

View File

@@ -1,5 +1,6 @@
use crate::collector::top_collector::{TopCollector, TopSegmentCollector}; use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
use crate::collector::{Collector, SegmentCollector}; use crate::collector::{Collector, SegmentCollector};
use crate::Result;
use crate::{DocAddress, DocId, Score, SegmentReader}; use crate::{DocAddress, DocId, Score, SegmentReader};
pub(crate) struct CustomScoreTopCollector<TCustomScorer, TScore = Score> { pub(crate) struct CustomScoreTopCollector<TCustomScorer, TScore = Score> {
@@ -41,7 +42,7 @@ pub trait CustomScorer<TScore>: Sync {
type Child: CustomSegmentScorer<TScore>; type Child: CustomSegmentScorer<TScore>;
/// Builds a child scorer for a specific segment. The child scorer is associated to /// Builds a child scorer for a specific segment. The child scorer is associated to
/// a specific segment. /// a specific segment.
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>; fn segment_scorer(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
} }
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore> impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
@@ -57,7 +58,7 @@ where
&self, &self,
segment_local_id: u32, segment_local_id: u32,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> crate::Result<Self::Child> { ) -> Result<Self::Child> {
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?; let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
let segment_collector = self let segment_collector = self
.collector .collector
@@ -72,7 +73,7 @@ where
false false
} }
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> { fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit> {
self.collector.merge_fruits(segment_fruits) self.collector.merge_fruits(segment_fruits)
} }
} }
@@ -110,7 +111,7 @@ where
{ {
type Child = T; type Child = T;
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> { fn segment_scorer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
Ok((self)(segment_reader)) Ok((self)(segment_reader))
} }
} }

View File

@@ -5,6 +5,7 @@ use crate::fastfield::FacetReader;
use crate::schema::Facet; use crate::schema::Facet;
use crate::schema::Field; use crate::schema::Field;
use crate::DocId; use crate::DocId;
use crate::Result;
use crate::Score; use crate::Score;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
@@ -83,9 +84,9 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// use tantivy::collector::FacetCollector; /// use tantivy::collector::FacetCollector;
/// use tantivy::query::AllQuery; /// use tantivy::query::AllQuery;
/// use tantivy::schema::{Facet, Schema, TEXT}; /// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{doc, Index}; /// use tantivy::{doc, Index, Result};
/// ///
/// fn example() -> tantivy::Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// ///
/// // Facet have their own specific type. /// // Facet have their own specific type.
@@ -261,7 +262,7 @@ impl Collector for FacetCollector {
&self, &self,
_: SegmentLocalId, _: SegmentLocalId,
reader: &SegmentReader, reader: &SegmentReader,
) -> crate::Result<FacetSegmentCollector> { ) -> Result<FacetSegmentCollector> {
let field_name = reader.schema().get_field_name(self.field); let field_name = reader.schema().get_field_name(self.field);
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| { let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name)) TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
@@ -327,7 +328,7 @@ impl Collector for FacetCollector {
false false
} }
fn merge_fruits(&self, segments_facet_counts: Vec<FacetCounts>) -> crate::Result<FacetCounts> { fn merge_fruits(&self, segments_facet_counts: Vec<FacetCounts>) -> Result<FacetCounts> {
let mut facet_counts: BTreeMap<Facet, u64> = BTreeMap::new(); let mut facet_counts: BTreeMap<Facet, u64> = BTreeMap::new();
for segment_facet_counts in segments_facet_counts { for segment_facet_counts in segments_facet_counts {
for (facet, count) in segment_facet_counts.facet_counts { for (facet, count) in segment_facet_counts.facet_counts {

View File

@@ -85,6 +85,7 @@ See the `custom_collector` example.
*/ */
use crate::DocId; use crate::DocId;
use crate::Result;
use crate::Score; use crate::Score;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
@@ -146,14 +147,14 @@ pub trait Collector: Sync {
&self, &self,
segment_local_id: SegmentLocalId, segment_local_id: SegmentLocalId,
segment: &SegmentReader, segment: &SegmentReader,
) -> crate::Result<Self::Child>; ) -> Result<Self::Child>;
/// Returns true iff the collector requires to compute scores for documents. /// Returns true iff the collector requires to compute scores for documents.
fn requires_scoring(&self) -> bool; fn requires_scoring(&self) -> bool;
/// Combines the fruit associated to the collection of each segments /// Combines the fruit associated to the collection of each segments
/// into one fruit. /// into one fruit.
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit>; fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit>;
} }
/// The `SegmentCollector` is the trait in charge of defining the /// The `SegmentCollector` is the trait in charge of defining the
@@ -184,11 +185,7 @@ where
type Fruit = (Left::Fruit, Right::Fruit); type Fruit = (Left::Fruit, Right::Fruit);
type Child = (Left::Child, Right::Child); type Child = (Left::Child, Right::Child);
fn for_segment( fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
&self,
segment_local_id: u32,
segment: &SegmentReader,
) -> crate::Result<Self::Child> {
let left = self.0.for_segment(segment_local_id, segment)?; let left = self.0.for_segment(segment_local_id, segment)?;
let right = self.1.for_segment(segment_local_id, segment)?; let right = self.1.for_segment(segment_local_id, segment)?;
Ok((left, right)) Ok((left, right))
@@ -201,7 +198,7 @@ where
fn merge_fruits( fn merge_fruits(
&self, &self,
children: Vec<(Left::Fruit, Right::Fruit)>, children: Vec<(Left::Fruit, Right::Fruit)>,
) -> crate::Result<(Left::Fruit, Right::Fruit)> { ) -> Result<(Left::Fruit, Right::Fruit)> {
let mut left_fruits = vec![]; let mut left_fruits = vec![];
let mut right_fruits = vec![]; let mut right_fruits = vec![];
for (left_fruit, right_fruit) in children { for (left_fruit, right_fruit) in children {
@@ -243,11 +240,7 @@ where
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit); type Fruit = (One::Fruit, Two::Fruit, Three::Fruit);
type Child = (One::Child, Two::Child, Three::Child); type Child = (One::Child, Two::Child, Three::Child);
fn for_segment( fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
&self,
segment_local_id: u32,
segment: &SegmentReader,
) -> crate::Result<Self::Child> {
let one = self.0.for_segment(segment_local_id, segment)?; let one = self.0.for_segment(segment_local_id, segment)?;
let two = self.1.for_segment(segment_local_id, segment)?; let two = self.1.for_segment(segment_local_id, segment)?;
let three = self.2.for_segment(segment_local_id, segment)?; let three = self.2.for_segment(segment_local_id, segment)?;
@@ -258,7 +251,7 @@ where
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring() self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
} }
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> { fn merge_fruits(&self, children: Vec<Self::Fruit>) -> Result<Self::Fruit> {
let mut one_fruits = vec![]; let mut one_fruits = vec![];
let mut two_fruits = vec![]; let mut two_fruits = vec![];
let mut three_fruits = vec![]; let mut three_fruits = vec![];
@@ -306,11 +299,7 @@ where
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit, Four::Fruit); type Fruit = (One::Fruit, Two::Fruit, Three::Fruit, Four::Fruit);
type Child = (One::Child, Two::Child, Three::Child, Four::Child); type Child = (One::Child, Two::Child, Three::Child, Four::Child);
fn for_segment( fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
&self,
segment_local_id: u32,
segment: &SegmentReader,
) -> crate::Result<Self::Child> {
let one = self.0.for_segment(segment_local_id, segment)?; let one = self.0.for_segment(segment_local_id, segment)?;
let two = self.1.for_segment(segment_local_id, segment)?; let two = self.1.for_segment(segment_local_id, segment)?;
let three = self.2.for_segment(segment_local_id, segment)?; let three = self.2.for_segment(segment_local_id, segment)?;
@@ -325,7 +314,7 @@ where
|| self.3.requires_scoring() || self.3.requires_scoring()
} }
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> { fn merge_fruits(&self, children: Vec<Self::Fruit>) -> Result<Self::Fruit> {
let mut one_fruits = vec![]; let mut one_fruits = vec![];
let mut two_fruits = vec![]; let mut two_fruits = vec![];
let mut three_fruits = vec![]; let mut three_fruits = vec![];

View File

@@ -2,6 +2,7 @@ use super::Collector;
use super::SegmentCollector; use super::SegmentCollector;
use crate::collector::Fruit; use crate::collector::Fruit;
use crate::DocId; use crate::DocId;
use crate::Result;
use crate::Score; use crate::Score;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
@@ -23,7 +24,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
&self, &self,
segment_local_id: u32, segment_local_id: u32,
reader: &SegmentReader, reader: &SegmentReader,
) -> crate::Result<Box<dyn BoxableSegmentCollector>> { ) -> Result<Box<dyn BoxableSegmentCollector>> {
let child = self.0.for_segment(segment_local_id, reader)?; let child = self.0.for_segment(segment_local_id, reader)?;
Ok(Box::new(SegmentCollectorWrapper(child))) Ok(Box::new(SegmentCollectorWrapper(child)))
} }
@@ -32,10 +33,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
self.0.requires_scoring() self.0.requires_scoring()
} }
fn merge_fruits( fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<dyn Fruit>> {
&self,
children: Vec<<Self as Collector>::Fruit>,
) -> crate::Result<Box<dyn Fruit>> {
let typed_fruit: Vec<TCollector::Fruit> = children let typed_fruit: Vec<TCollector::Fruit> = children
.into_iter() .into_iter()
.map(|untyped_fruit| { .map(|untyped_fruit| {
@@ -46,7 +44,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string()) TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
}) })
}) })
.collect::<crate::Result<_>>()?; .collect::<Result<_>>()?;
let merged_fruit = self.0.merge_fruits(typed_fruit)?; let merged_fruit = self.0.merge_fruits(typed_fruit)?;
Ok(Box::new(merged_fruit)) Ok(Box::new(merged_fruit))
} }
@@ -177,12 +175,12 @@ impl<'a> Collector for MultiCollector<'a> {
&self, &self,
segment_local_id: SegmentLocalId, segment_local_id: SegmentLocalId,
segment: &SegmentReader, segment: &SegmentReader,
) -> crate::Result<MultiCollectorChild> { ) -> Result<MultiCollectorChild> {
let children = self let children = self
.collector_wrappers .collector_wrappers
.iter() .iter()
.map(|collector_wrapper| collector_wrapper.for_segment(segment_local_id, segment)) .map(|collector_wrapper| collector_wrapper.for_segment(segment_local_id, segment))
.collect::<crate::Result<Vec<_>>>()?; .collect::<Result<Vec<_>>>()?;
Ok(MultiCollectorChild { children }) Ok(MultiCollectorChild { children })
} }
@@ -193,7 +191,7 @@ impl<'a> Collector for MultiCollector<'a> {
.any(Collector::requires_scoring) .any(Collector::requires_scoring)
} }
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> crate::Result<MultiFruit> { fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {
let mut segment_fruits_list: Vec<Vec<Box<dyn Fruit>>> = (0..self.collector_wrappers.len()) let mut segment_fruits_list: Vec<Vec<Box<dyn Fruit>>> = (0..self.collector_wrappers.len())
.map(|_| Vec::with_capacity(segments_multifruits.len())) .map(|_| Vec::with_capacity(segments_multifruits.len()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@@ -211,7 +209,7 @@ impl<'a> Collector for MultiCollector<'a> {
.map(|(child_collector, segment_fruits)| { .map(|(child_collector, segment_fruits)| {
Ok(Some(child_collector.merge_fruits(segment_fruits)?)) Ok(Some(child_collector.merge_fruits(segment_fruits)?))
}) })
.collect::<crate::Result<_>>()?; .collect::<Result<_>>()?;
Ok(MultiFruit { sub_fruits }) Ok(MultiFruit { sub_fruits })
} }
} }

View File

@@ -55,7 +55,7 @@ impl Collector for TestCollector {
&self, &self,
segment_id: SegmentLocalId, segment_id: SegmentLocalId,
_reader: &SegmentReader, _reader: &SegmentReader,
) -> crate::Result<TestSegmentCollector> { ) -> Result<TestSegmentCollector> {
Ok(TestSegmentCollector { Ok(TestSegmentCollector {
segment_id, segment_id,
fruit: TestFruit::default(), fruit: TestFruit::default(),
@@ -66,7 +66,7 @@ impl Collector for TestCollector {
self.compute_score self.compute_score
} }
fn merge_fruits(&self, mut children: Vec<TestFruit>) -> crate::Result<TestFruit> { fn merge_fruits(&self, mut children: Vec<TestFruit>) -> Result<TestFruit> {
children.sort_by_key(|fruit| { children.sort_by_key(|fruit| {
if fruit.docs().is_empty() { if fruit.docs().is_empty() {
0 0
@@ -124,7 +124,7 @@ impl Collector for FastFieldTestCollector {
&self, &self,
_: SegmentLocalId, _: SegmentLocalId,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> crate::Result<FastFieldSegmentCollector> { ) -> Result<FastFieldSegmentCollector> {
let reader = segment_reader let reader = segment_reader
.fast_fields() .fast_fields()
.u64(self.field) .u64(self.field)
@@ -139,7 +139,7 @@ impl Collector for FastFieldTestCollector {
false false
} }
fn merge_fruits(&self, children: Vec<Vec<u64>>) -> crate::Result<Vec<u64>> { fn merge_fruits(&self, children: Vec<Vec<u64>>) -> Result<Vec<u64>> {
Ok(children.into_iter().flat_map(|v| v.into_iter()).collect()) Ok(children.into_iter().flat_map(|v| v.into_iter()).collect())
} }
} }
@@ -184,7 +184,7 @@ impl Collector for BytesFastFieldTestCollector {
&self, &self,
_segment_local_id: u32, _segment_local_id: u32,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> crate::Result<BytesFastFieldSegmentCollector> { ) -> Result<BytesFastFieldSegmentCollector> {
Ok(BytesFastFieldSegmentCollector { Ok(BytesFastFieldSegmentCollector {
vals: Vec::new(), vals: Vec::new(),
reader: segment_reader reader: segment_reader
@@ -198,7 +198,7 @@ impl Collector for BytesFastFieldTestCollector {
false false
} }
fn merge_fruits(&self, children: Vec<Vec<u8>>) -> crate::Result<Vec<u8>> { fn merge_fruits(&self, children: Vec<Vec<u8>>) -> Result<Vec<u8>> {
Ok(children.into_iter().flat_map(|c| c.into_iter()).collect()) Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
} }
} }

View File

@@ -1,5 +1,6 @@
use crate::DocAddress; use crate::DocAddress;
use crate::DocId; use crate::DocId;
use crate::Result;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
use serde::export::PhantomData; use serde::export::PhantomData;
@@ -85,7 +86,7 @@ where
pub fn merge_fruits( pub fn merge_fruits(
&self, &self,
children: Vec<Vec<(T, DocAddress)>>, children: Vec<Vec<(T, DocAddress)>>,
) -> crate::Result<Vec<(T, DocAddress)>> { ) -> Result<Vec<(T, DocAddress)>> {
if self.limit == 0 { if self.limit == 0 {
return Ok(Vec::new()); return Ok(Vec::new());
} }
@@ -112,7 +113,7 @@ where
&self, &self,
segment_id: SegmentLocalId, segment_id: SegmentLocalId,
_: &SegmentReader, _: &SegmentReader,
) -> crate::Result<TopSegmentCollector<F>> { ) -> Result<TopSegmentCollector<F>> {
Ok(TopSegmentCollector::new(segment_id, self.limit)) Ok(TopSegmentCollector::new(segment_id, self.limit))
} }
} }

View File

@@ -6,10 +6,10 @@ use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
use crate::collector::{ use crate::collector::{
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector, CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
}; };
use crate::fastfield::FastFieldReader;
use crate::schema::Field; use crate::schema::Field;
use crate::DocAddress; use crate::DocAddress;
use crate::DocId; use crate::DocId;
use crate::Result;
use crate::Score; use crate::Score;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
@@ -61,36 +61,6 @@ impl fmt::Debug for TopDocs {
} }
} }
struct ScorerByFastFieldReader {
ff_reader: FastFieldReader<u64>,
}
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
fn score(&self, doc: DocId) -> u64 {
self.ff_reader.get_u64(u64::from(doc))
}
}
struct ScorerByField {
field: Field,
}
impl CustomScorer<u64> for ScorerByField {
type Child = ScorerByFastFieldReader;
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
let ff_reader = segment_reader
.fast_fields()
.u64(self.field)
.ok_or_else(|| {
crate::TantivyError::SchemaError(format!(
"Field requested is not a i64/u64 fast field."
))
})?;
Ok(ScorerByFastFieldReader { ff_reader })
}
}
impl TopDocs { impl TopDocs {
/// Creates a top score collector, with a number of documents equal to "limit". /// Creates a top score collector, with a number of documents equal to "limit".
/// ///
@@ -104,7 +74,7 @@ impl TopDocs {
/// ///
/// ```rust /// ```rust
/// # use tantivy::schema::{Schema, FAST, TEXT}; /// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress}; /// # use tantivy::{doc, Index, Result, DocAddress};
/// # use tantivy::query::{Query, QueryParser}; /// # use tantivy::query::{Query, QueryParser};
/// use tantivy::Searcher; /// use tantivy::Searcher;
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
@@ -141,7 +111,7 @@ impl TopDocs {
/// fn docs_sorted_by_rating(searcher: &Searcher, /// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &dyn Query, /// query: &dyn Query,
/// sort_by_field: Field) /// sort_by_field: Field)
/// -> tantivy::Result<Vec<(u64, DocAddress)>> { /// -> Result<Vec<(u64, DocAddress)>> {
/// ///
/// // This is where we build our topdocs collector /// // This is where we build our topdocs collector
/// // /// //
@@ -173,7 +143,14 @@ impl TopDocs {
self, self,
field: Field, field: Field,
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> { ) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
self.custom_score(ScorerByField { field }) self.custom_score(move |segment_reader: &SegmentReader| {
let ff_reader = segment_reader
.fast_fields()
.u64(field)
.expect("Field requested is not a i64/u64 fast field.");
//TODO error message missmatch actual behavior for i64
move |doc: DocId| ff_reader.get(doc)
})
} }
/// Ranks the documents using a custom score. /// Ranks the documents using a custom score.
@@ -407,7 +384,7 @@ impl Collector for TopDocs {
&self, &self,
segment_local_id: SegmentLocalId, segment_local_id: SegmentLocalId,
reader: &SegmentReader, reader: &SegmentReader,
) -> crate::Result<Self::Child> { ) -> Result<Self::Child> {
let collector = self.0.for_segment(segment_local_id, reader)?; let collector = self.0.for_segment(segment_local_id, reader)?;
Ok(TopScoreSegmentCollector(collector)) Ok(TopScoreSegmentCollector(collector))
} }
@@ -416,10 +393,7 @@ impl Collector for TopDocs {
true true
} }
fn merge_fruits( fn merge_fruits(&self, child_fruits: Vec<Vec<(Score, DocAddress)>>) -> Result<Self::Fruit> {
&self,
child_fruits: Vec<Vec<(Score, DocAddress)>>,
) -> crate::Result<Self::Fruit> {
self.0.merge_fruits(child_fruits) self.0.merge_fruits(child_fruits)
} }
} }
@@ -598,6 +572,7 @@ mod tests {
} }
#[test] #[test]
#[should_panic(expected = "Field requested is not a i64/u64 fast field")]
fn test_field_not_fast_field() { fn test_field_not_fast_field() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT); let title = schema_builder.add_text_field(TITLE, TEXT);
@@ -612,12 +587,7 @@ mod tests {
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment = searcher.segment_reader(0); let segment = searcher.segment_reader(0);
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size); let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
let err = top_collector.for_segment(0, segment); assert!(top_collector.for_segment(0, segment).is_ok());
if let Err(crate::TantivyError::SchemaError(msg)) = err {
assert_eq!(msg, "Field requested is not a i64/u64 fast field.");
} else {
assert!(false);
}
} }
fn index( fn index(

View File

@@ -186,7 +186,7 @@ mod test {
use super::{CompositeFile, CompositeWrite}; use super::{CompositeFile, CompositeWrite};
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::VInt; use crate::common::VInt;
use crate::directory::{Directory, RAMDirectory}; use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory};
use crate::schema::Field; use crate::schema::Field;
use std::io::Write; use std::io::Write;
use std::path::Path; use std::path::Path;

View File

@@ -1,3 +1,4 @@
use crate::Result;
use crossbeam::channel; use crossbeam::channel;
use rayon::{ThreadPool, ThreadPoolBuilder}; use rayon::{ThreadPool, ThreadPoolBuilder};
@@ -9,9 +10,7 @@ use rayon::{ThreadPool, ThreadPoolBuilder};
/// API of a dependency, knowing it might conflict with a different version /// API of a dependency, knowing it might conflict with a different version
/// used by the client. Second, we may stop using rayon in the future. /// used by the client. Second, we may stop using rayon in the future.
pub enum Executor { pub enum Executor {
/// Single thread variant of an Executor
SingleThread, SingleThread,
/// Thread pool variant of an Executor
ThreadPool(ThreadPool), ThreadPool(ThreadPool),
} }
@@ -21,8 +20,8 @@ impl Executor {
Executor::SingleThread Executor::SingleThread
} }
/// Creates an Executor that dispatches the tasks in a thread pool. // Creates an Executor that dispatches the tasks in a thread pool.
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> crate::Result<Executor> { pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Result<Executor> {
let pool = ThreadPoolBuilder::new() let pool = ThreadPoolBuilder::new()
.num_threads(num_threads) .num_threads(num_threads)
.thread_name(move |num| format!("{}{}", prefix, num)) .thread_name(move |num| format!("{}{}", prefix, num))
@@ -30,22 +29,22 @@ impl Executor {
Ok(Executor::ThreadPool(pool)) Ok(Executor::ThreadPool(pool))
} }
/// Perform a map in the thread pool. // Perform a map in the thread pool.
/// //
/// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task // Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
/// will propagate to the caller. // will propagate to the caller.
pub fn map< pub fn map<
A: Send, A: Send,
R: Send, R: Send,
AIterator: Iterator<Item = A>, AIterator: Iterator<Item = A>,
F: Sized + Sync + Fn(A) -> crate::Result<R>, F: Sized + Sync + Fn(A) -> Result<R>,
>( >(
&self, &self,
f: F, f: F,
args: AIterator, args: AIterator,
) -> crate::Result<Vec<R>> { ) -> Result<Vec<R>> {
match self { match self {
Executor::SingleThread => args.map(f).collect::<crate::Result<_>>(), Executor::SingleThread => args.map(f).collect::<Result<_>>(),
Executor::ThreadPool(pool) => { Executor::ThreadPool(pool) => {
let args_with_indices: Vec<(usize, A)> = args.enumerate().collect(); let args_with_indices: Vec<(usize, A)> = args.enumerate().collect();
let num_fruits = args_with_indices.len(); let num_fruits = args_with_indices.len();

View File

@@ -20,8 +20,10 @@ use crate::reader::IndexReaderBuilder;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::FieldType; use crate::schema::FieldType;
use crate::schema::Schema; use crate::schema::Schema;
use crate::tokenizer::{TextAnalyzer, TokenizerManager}; use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::TokenizerManager;
use crate::IndexWriter; use crate::IndexWriter;
use crate::Result;
use num_cpus; use num_cpus;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::collections::HashSet; use std::collections::HashSet;
@@ -30,10 +32,7 @@ use std::fmt;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
fn load_metas( fn load_metas(directory: &dyn Directory, inventory: &SegmentMetaInventory) -> Result<IndexMeta> {
directory: &dyn Directory,
inventory: &SegmentMetaInventory,
) -> crate::Result<IndexMeta> {
let meta_data = directory.atomic_read(&META_FILEPATH)?; let meta_data = directory.atomic_read(&META_FILEPATH)?;
let meta_string = String::from_utf8_lossy(&meta_data); let meta_string = String::from_utf8_lossy(&meta_data);
IndexMeta::deserialize(&meta_string, &inventory) IndexMeta::deserialize(&meta_string, &inventory)
@@ -74,14 +73,14 @@ impl Index {
/// Replace the default single thread search executor pool /// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads. /// by a thread pool with a given number of threads.
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> { pub fn set_multithread_executor(&mut self, num_threads: usize) -> Result<()> {
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-")?); self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-")?);
Ok(()) Ok(())
} }
/// Replace the default single thread search executor pool /// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads. /// by a thread pool with a given number of threads.
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> { pub fn set_default_multithread_executor(&mut self) -> Result<()> {
let default_num_threads = num_cpus::get(); let default_num_threads = num_cpus::get();
self.set_multithread_executor(default_num_threads) self.set_multithread_executor(default_num_threads)
} }
@@ -100,10 +99,7 @@ impl Index {
/// ///
/// If a previous index was in this directory, then its meta file will be destroyed. /// If a previous index was in this directory, then its meta file will be destroyed.
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub fn create_in_dir<P: AsRef<Path>>( pub fn create_in_dir<P: AsRef<Path>>(directory_path: P, schema: Schema) -> Result<Index> {
directory_path: P,
schema: Schema,
) -> crate::Result<Index> {
let mmap_directory = MmapDirectory::open(directory_path)?; let mmap_directory = MmapDirectory::open(directory_path)?;
if Index::exists(&mmap_directory) { if Index::exists(&mmap_directory) {
return Err(TantivyError::IndexAlreadyExists); return Err(TantivyError::IndexAlreadyExists);
@@ -112,7 +108,7 @@ impl Index {
} }
/// Opens or creates a new index in the provided directory /// Opens or creates a new index in the provided directory
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> { pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
if !Index::exists(&dir) { if !Index::exists(&dir) {
return Index::create(dir, schema); return Index::create(dir, schema);
} }
@@ -135,13 +131,13 @@ impl Index {
/// The temp directory is only used for testing the `MmapDirectory`. /// The temp directory is only used for testing the `MmapDirectory`.
/// For other unit tests, prefer the `RAMDirectory`, see: `create_in_ram`. /// For other unit tests, prefer the `RAMDirectory`, see: `create_in_ram`.
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub fn create_from_tempdir(schema: Schema) -> crate::Result<Index> { pub fn create_from_tempdir(schema: Schema) -> Result<Index> {
let mmap_directory = MmapDirectory::create_from_tempdir()?; let mmap_directory = MmapDirectory::create_from_tempdir()?;
Index::create(mmap_directory, schema) Index::create(mmap_directory, schema)
} }
/// Creates a new index given an implementation of the trait `Directory` /// Creates a new index given an implementation of the trait `Directory`
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> { pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
let directory = ManagedDirectory::wrap(dir)?; let directory = ManagedDirectory::wrap(dir)?;
Index::from_directory(directory, schema) Index::from_directory(directory, schema)
} }
@@ -149,7 +145,7 @@ impl Index {
/// Create a new index from a directory. /// Create a new index from a directory.
/// ///
/// This will overwrite existing meta.json /// This will overwrite existing meta.json
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> { fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> Result<Index> {
save_new_metas(schema.clone(), directory.borrow_mut())?; save_new_metas(schema.clone(), directory.borrow_mut())?;
let metas = IndexMeta::with_schema(schema); let metas = IndexMeta::with_schema(schema);
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default()) Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
@@ -160,7 +156,7 @@ impl Index {
directory: ManagedDirectory, directory: ManagedDirectory,
metas: &IndexMeta, metas: &IndexMeta,
inventory: SegmentMetaInventory, inventory: SegmentMetaInventory,
) -> crate::Result<Index> { ) -> Result<Index> {
let schema = metas.schema.clone(); let schema = metas.schema.clone();
Ok(Index { Ok(Index {
directory, directory,
@@ -177,11 +173,11 @@ impl Index {
} }
/// Helper to access the tokenizer associated to a specific field. /// Helper to access the tokenizer associated to a specific field.
pub fn tokenizer_for_field(&self, field: Field) -> crate::Result<TextAnalyzer> { pub fn tokenizer_for_field(&self, field: Field) -> Result<BoxedTokenizer> {
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
let tokenizer_manager: &TokenizerManager = self.tokenizers(); let tokenizer_manager: &TokenizerManager = self.tokenizers();
let tokenizer_name_opt: Option<TextAnalyzer> = match field_type { let tokenizer_name_opt: Option<BoxedTokenizer> = match field_type {
FieldType::Str(text_options) => text_options FieldType::Str(text_options) => text_options
.get_indexing_options() .get_indexing_options()
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string()) .map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
@@ -200,7 +196,7 @@ impl Index {
/// Create a default `IndexReader` for the given index. /// Create a default `IndexReader` for the given index.
/// ///
/// See [`Index.reader_builder()`](#method.reader_builder). /// See [`Index.reader_builder()`](#method.reader_builder).
pub fn reader(&self) -> crate::Result<IndexReader> { pub fn reader(&self) -> Result<IndexReader> {
self.reader_builder().try_into() self.reader_builder().try_into()
} }
@@ -215,7 +211,7 @@ impl Index {
/// Opens a new directory from an index path. /// Opens a new directory from an index path.
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> crate::Result<Index> { pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> Result<Index> {
let mmap_directory = MmapDirectory::open(directory_path)?; let mmap_directory = MmapDirectory::open(directory_path)?;
Index::open(mmap_directory) Index::open(mmap_directory)
} }
@@ -239,7 +235,7 @@ impl Index {
} }
/// Open the index using the provided directory /// Open the index using the provided directory
pub fn open<D: Directory>(directory: D) -> crate::Result<Index> { pub fn open<D: Directory>(directory: D) -> Result<Index> {
let directory = ManagedDirectory::wrap(directory)?; let directory = ManagedDirectory::wrap(directory)?;
let inventory = SegmentMetaInventory::default(); let inventory = SegmentMetaInventory::default();
let metas = load_metas(&directory, &inventory)?; let metas = load_metas(&directory, &inventory)?;
@@ -247,7 +243,7 @@ impl Index {
} }
/// Reads the index meta file from the directory. /// Reads the index meta file from the directory.
pub fn load_metas(&self) -> crate::Result<IndexMeta> { pub fn load_metas(&self) -> Result<IndexMeta> {
load_metas(self.directory(), &self.inventory) load_metas(self.directory(), &self.inventory)
} }
@@ -275,7 +271,7 @@ impl Index {
&self, &self,
num_threads: usize, num_threads: usize,
overall_heap_size_in_bytes: usize, overall_heap_size_in_bytes: usize,
) -> crate::Result<IndexWriter> { ) -> Result<IndexWriter> {
let directory_lock = self let directory_lock = self
.directory .directory
.acquire_lock(&INDEX_WRITER_LOCK) .acquire_lock(&INDEX_WRITER_LOCK)
@@ -310,7 +306,7 @@ impl Index {
/// If the lockfile already exists, returns `Error::FileAlreadyExists`. /// If the lockfile already exists, returns `Error::FileAlreadyExists`.
/// # Panics /// # Panics
/// If the heap size per thread is too small, panics. /// If the heap size per thread is too small, panics.
pub fn writer(&self, overall_heap_size_in_bytes: usize) -> crate::Result<IndexWriter> { pub fn writer(&self, overall_heap_size_in_bytes: usize) -> Result<IndexWriter> {
let mut num_threads = num_cpus::get(); let mut num_threads = num_cpus::get();
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads; let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN { if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
@@ -327,7 +323,7 @@ impl Index {
} }
/// Returns the list of segments that are searchable /// Returns the list of segments that are searchable
pub fn searchable_segments(&self) -> crate::Result<Vec<Segment>> { pub fn searchable_segments(&self) -> Result<Vec<Segment>> {
Ok(self Ok(self
.searchable_segment_metas()? .searchable_segment_metas()?
.into_iter() .into_iter()
@@ -342,7 +338,7 @@ impl Index {
/// Creates a new segment. /// Creates a new segment.
pub fn new_segment(&self) -> Segment { pub fn new_segment(&self) -> Segment {
let segment_meta = self let mut segment_meta = self
.inventory .inventory
.new_segment_meta(SegmentId::generate_random(), 0); .new_segment_meta(SegmentId::generate_random(), 0);
self.segment(segment_meta) self.segment(segment_meta)
@@ -360,12 +356,12 @@ impl Index {
/// Reads the meta.json and returns the list of /// Reads the meta.json and returns the list of
/// `SegmentMeta` from the last commit. /// `SegmentMeta` from the last commit.
pub fn searchable_segment_metas(&self) -> crate::Result<Vec<SegmentMeta>> { pub fn searchable_segment_metas(&self) -> Result<Vec<SegmentMeta>> {
Ok(self.load_metas()?.segments) Ok(self.load_metas()?.segments)
} }
/// Returns the list of segment ids that are searchable. /// Returns the list of segment ids that are searchable.
pub fn searchable_segment_ids(&self) -> crate::Result<Vec<SegmentId>> { pub fn searchable_segment_ids(&self) -> Result<Vec<SegmentId>> {
Ok(self Ok(self
.searchable_segment_metas()? .searchable_segment_metas()?
.iter() .iter()
@@ -374,7 +370,7 @@ impl Index {
} }
/// Returns the set of corrupted files /// Returns the set of corrupted files
pub fn validate_checksum(&self) -> crate::Result<HashSet<PathBuf>> { pub fn validate_checksum(&self) -> Result<HashSet<PathBuf>> {
self.directory.list_damaged().map_err(Into::into) self.directory.list_damaged().map_err(Into::into)
} }
} }

View File

@@ -35,6 +35,7 @@ impl SegmentMetaInventory {
segment_id, segment_id,
max_doc, max_doc,
deletes: None, deletes: None,
bundled: false,
}; };
SegmentMeta::from(self.inventory.track(inner)) SegmentMeta::from(self.inventory.track(inner))
} }
@@ -81,6 +82,19 @@ impl SegmentMeta {
self.tracked.segment_id self.tracked.segment_id
} }
pub fn with_bundled(self) -> SegmentMeta {
SegmentMeta::from(self.tracked.map(|inner| InnerSegmentMeta {
segment_id: inner.segment_id,
max_doc: inner.max_doc,
deletes: inner.deletes.clone(),
bundled: true,
}))
}
pub fn is_bundled(&self) -> bool {
self.tracked.bundled
}
/// Returns the number of deleted documents. /// Returns the number of deleted documents.
pub fn num_deleted_docs(&self) -> u32 { pub fn num_deleted_docs(&self) -> u32 {
self.tracked self.tracked
@@ -107,8 +121,12 @@ impl SegmentMeta {
/// It just joins the segment id with the extension /// It just joins the segment id with the extension
/// associated to a segment component. /// associated to a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf { pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
let mut path = self.id().uuid_string(); let suffix = self.suffix(component);
path.push_str(&*match component { self.relative_path_from_suffix(&suffix)
}
fn suffix(&self, component: SegmentComponent) -> String {
match component {
SegmentComponent::POSTINGS => ".idx".to_string(), SegmentComponent::POSTINGS => ".idx".to_string(),
SegmentComponent::POSITIONS => ".pos".to_string(), SegmentComponent::POSITIONS => ".pos".to_string(),
SegmentComponent::POSITIONSSKIP => ".posidx".to_string(), SegmentComponent::POSITIONSSKIP => ".posidx".to_string(),
@@ -117,7 +135,17 @@ impl SegmentMeta {
SegmentComponent::FASTFIELDS => ".fast".to_string(), SegmentComponent::FASTFIELDS => ".fast".to_string(),
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(), SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)), SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
}); }
}
/// Returns the relative path of a component of our segment.
///
/// It just joins the segment id with the extension
/// associated to a segment component.
pub fn relative_path_from_suffix(&self, suffix: &str) -> PathBuf {
let mut path = self.id().uuid_string();
path.push_str(".");
path.push_str(&suffix);
PathBuf::from(path) PathBuf::from(path)
} }
@@ -161,6 +189,7 @@ impl SegmentMeta {
segment_id: inner_meta.segment_id, segment_id: inner_meta.segment_id,
max_doc, max_doc,
deletes: None, deletes: None,
bundled: inner_meta.bundled,
}); });
SegmentMeta { tracked } SegmentMeta { tracked }
} }
@@ -175,6 +204,7 @@ impl SegmentMeta {
segment_id: inner_meta.segment_id, segment_id: inner_meta.segment_id,
max_doc: inner_meta.max_doc, max_doc: inner_meta.max_doc,
deletes: Some(delete_meta), deletes: Some(delete_meta),
bundled: inner_meta.bundled,
}); });
SegmentMeta { tracked } SegmentMeta { tracked }
} }
@@ -185,6 +215,7 @@ struct InnerSegmentMeta {
segment_id: SegmentId, segment_id: SegmentId,
max_doc: u32, max_doc: u32,
deletes: Option<DeleteMeta>, deletes: Option<DeleteMeta>,
bundled: bool,
} }
impl InnerSegmentMeta { impl InnerSegmentMeta {

View File

@@ -60,7 +60,7 @@ impl InvertedIndexReader {
.get_index_record_option() .get_index_record_option()
.unwrap_or(IndexRecordOption::Basic); .unwrap_or(IndexRecordOption::Basic);
InvertedIndexReader { InvertedIndexReader {
termdict: TermDictionary::empty(), termdict: TermDictionary::empty(&field_type),
postings_source: ReadOnlySource::empty(), postings_source: ReadOnlySource::empty(),
positions_source: ReadOnlySource::empty(), positions_source: ReadOnlySource::empty(),
positions_idx_source: ReadOnlySource::empty(), positions_idx_source: ReadOnlySource::empty(),

View File

@@ -14,6 +14,7 @@ use crate::store::StoreReader;
use crate::termdict::TermMerger; use crate::termdict::TermMerger;
use crate::DocAddress; use crate::DocAddress;
use crate::Index; use crate::Index;
use crate::Result;
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
@@ -22,7 +23,7 @@ fn collect_segment<C: Collector>(
weight: &dyn Weight, weight: &dyn Weight,
segment_ord: u32, segment_ord: u32,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> crate::Result<C::Fruit> { ) -> Result<C::Fruit> {
let mut scorer = weight.scorer(segment_reader)?; let mut scorer = weight.scorer(segment_reader)?;
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?; let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
if let Some(delete_bitset) = segment_reader.delete_bitset() { if let Some(delete_bitset) = segment_reader.delete_bitset() {
@@ -77,7 +78,7 @@ impl Searcher {
/// ///
/// The searcher uses the segment ordinal to route the /// The searcher uses the segment ordinal to route the
/// the request to the right `Segment`. /// the request to the right `Segment`.
pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> { pub fn doc(&self, doc_address: DocAddress) -> Result<Document> {
let DocAddress(segment_local_id, doc_id) = doc_address; let DocAddress(segment_local_id, doc_id) = doc_address;
let store_reader = &self.store_readers[segment_local_id as usize]; let store_reader = &self.store_readers[segment_local_id as usize];
store_reader.get(doc_id) store_reader.get(doc_id)
@@ -131,11 +132,7 @@ impl Searcher {
/// ///
/// Finally, the Collector merges each of the child collectors into itself for result usability /// Finally, the Collector merges each of the child collectors into itself for result usability
/// by the caller. /// by the caller.
pub fn search<C: Collector>( pub fn search<C: Collector>(&self, query: &dyn Query, collector: &C) -> Result<C::Fruit> {
&self,
query: &dyn Query,
collector: &C,
) -> crate::Result<C::Fruit> {
let executor = self.index.search_executor(); let executor = self.index.search_executor();
self.search_with_executor(query, collector, executor) self.search_with_executor(query, collector, executor)
} }
@@ -157,7 +154,7 @@ impl Searcher {
query: &dyn Query, query: &dyn Query,
collector: &C, collector: &C,
executor: &Executor, executor: &Executor,
) -> crate::Result<C::Fruit> { ) -> Result<C::Fruit> {
let scoring_enabled = collector.requires_scoring(); let scoring_enabled = collector.requires_scoring();
let weight = query.weight(self, scoring_enabled)?; let weight = query.weight(self, scoring_enabled)?;
let segment_readers = self.segment_readers(); let segment_readers = self.segment_readers();

View File

@@ -4,7 +4,7 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError}; use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::Directory; use crate::directory::Directory;
use crate::directory::{ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlyDirectory, ReadOnlySource, WritePtr};
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
@@ -90,8 +90,21 @@ impl Segment {
/// Open one of the component file for *regular* write. /// Open one of the component file for *regular* write.
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> { pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> {
let path = self.relative_path(component); let path = self.relative_path(component);
let write = self.index.directory_mut().open_write(&path)?; self.index.directory_mut().open_write(&path)
Ok(write) }
pub fn open_bundle_writer(&mut self) -> Result<WritePtr, OpenWriteError> {
let path = self.meta.relative_path_from_suffix("bundle");
self.index.directory_mut().open_write(&path)
}
pub(crate) fn open_write_in_directory(
&mut self,
component: SegmentComponent,
directory: &mut dyn Directory,
) -> Result<WritePtr, OpenWriteError> {
let path = self.relative_path(component);
directory.open_write(&path)
} }
} }

View File

@@ -16,6 +16,7 @@ use crate::space_usage::SegmentSpaceUsage;
use crate::store::StoreReader; use crate::store::StoreReader;
use crate::termdict::TermDictionary; use crate::termdict::TermDictionary;
use crate::DocId; use crate::DocId;
use crate::Result;
use fail::fail_point; use fail::fail_point;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
@@ -144,7 +145,7 @@ impl SegmentReader {
} }
/// Open a new segment for reading. /// Open a new segment for reading.
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> { pub fn open(segment: &Segment) -> Result<SegmentReader> {
let termdict_source = segment.open_read(SegmentComponent::TERMS)?; let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
let termdict_composite = CompositeFile::open(&termdict_source)?; let termdict_composite = CompositeFile::open(&termdict_source)?;

View File

@@ -0,0 +1,97 @@
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::OpenReadError;
use crate::directory::ReadOnlySource;
use crate::error::DataCorruption;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
#[derive(Clone)]
struct BundleDirectory {
source_map: Arc<HashMap<PathBuf, ReadOnlySource>>,
}
impl BundleDirectory {
pub fn from_source(source: ReadOnlySource) -> Result<BundleDirectory, DataCorruption> {
let mut index_offset_buf = [0u8; 8];
let (body_idx, footer_offset) = source.split_from_end(8);
index_offset_buf.copy_from_slice(footer_offset.as_slice());
let offset = u64::from_le_bytes(index_offset_buf);
let (body_source, idx_source) = body_idx.split(offset as usize);
let idx: HashMap<PathBuf, (u64, u64)> = serde_json::from_slice(idx_source.as_slice())
.map_err(|err| {
let msg = format!("Failed to read index from bundle. {:?}", err);
DataCorruption::comment_only(msg)
})?;
let source_map: HashMap<PathBuf, ReadOnlySource> = idx
.into_iter()
.map(|(path, (start, stop))| {
let source = body_source.slice(start as usize, stop as usize);
(path, source)
})
.collect();
Ok(BundleDirectory {
source_map: Arc::new(source_map),
})
}
}
impl ReadOnlyDirectory for BundleDirectory {
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
self.source_map
.get(path)
.cloned()
.ok_or_else(|| OpenReadError::FileDoesNotExist(path.to_path_buf()))
}
fn exists(&self, path: &Path) -> bool {
self.source_map.contains_key(path)
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let source = self
.source_map
.get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(path.to_path_buf()))?;
Ok(source.as_slice().to_vec())
}
}
#[cfg(test)]
mod tests {
use super::BundleDirectory;
use crate::directory::{RAMDirectory, ReadOnlyDirectory, TerminatingWrite};
use crate::Directory;
use std::io::Write;
use std::path::Path;
#[test]
fn test_bundle_directory() {
let mut ram_directory = RAMDirectory::default();
let test_path_atomic = Path::new("testpath_atomic");
let test_path_wrt = Path::new("testpath_wrt");
assert!(ram_directory
.atomic_write(test_path_atomic, b"titi")
.is_ok());
{
let mut test_wrt = ram_directory.open_write(test_path_wrt).unwrap();
assert!(test_wrt.write_all(b"toto").is_ok());
assert!(test_wrt.terminate().is_ok());
}
let mut dest_directory = RAMDirectory::default();
let bundle_path = Path::new("bundle");
let mut wrt = dest_directory.open_write(bundle_path).unwrap();
assert!(ram_directory.serialize_bundle(&mut wrt).is_ok());
assert!(wrt.terminate().is_ok());
let source = dest_directory.open_read(bundle_path).unwrap();
let bundle_directory = BundleDirectory::from_source(source).unwrap();
assert_eq!(
&bundle_directory.atomic_read(test_path_atomic).unwrap()[..],
b"titi"
);
assert_eq!(
&bundle_directory.open_read(test_path_wrt).unwrap()[..],
b"toto"
);
}
}

View File

@@ -100,17 +100,7 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
} }
} }
/// Write-once read many (WORM) abstraction for where pub trait ReadOnlyDirectory {
/// tantivy's data should be stored.
///
/// There are currently two implementations of `Directory`
///
/// - The [`MMapDirectory`](struct.MmapDirectory.html), this
/// should be your default choice.
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
/// should be used mostly for tests.
///
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// Opens a virtual file for read. /// Opens a virtual file for read.
/// ///
/// Once a virtual file is open, its data may not /// Once a virtual file is open, its data may not
@@ -122,6 +112,31 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// You should only use this to read files create with [Directory::open_write]. /// You should only use this to read files create with [Directory::open_write].
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>; fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Returns true iff the file exists
fn exists(&self, path: &Path) -> bool;
/// Reads the full content file that has been written using
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [Directory::atomic_write].
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
}
/// Write-once read many (WORM) abstraction for where
/// tantivy's data should be stored.
///
/// There are currently two implementations of `Directory`
///
/// - The [`MMapDirectory`](struct.MmapDirectory.html), this
/// should be your default choice.
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
/// should be used mostly for tests.
///
pub trait Directory:
DirectoryClone + ReadOnlyDirectory + fmt::Debug + Send + Sync + 'static
{
/// Removes a file /// Removes a file
/// ///
/// Removing a file will not affect an eventual /// Removing a file will not affect an eventual
@@ -131,9 +146,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// `DeleteError::DoesNotExist`. /// `DeleteError::DoesNotExist`.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>; fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
/// Returns true iff the file exists
fn exists(&self, path: &Path) -> bool;
/// Opens a writer for the *virtual file* associated with /// Opens a writer for the *virtual file* associated with
/// a Path. /// a Path.
/// ///
@@ -155,14 +167,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// The file may not previously exist. /// The file may not previously exist.
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>; fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
/// Reads the full content file that has been written using
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [Directory::atomic_write].
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data. /// Atomically replace the content of a file with data.
/// ///
/// This calls ensure that reads can never *observe* /// This calls ensure that reads can never *observe*

View File

@@ -10,6 +10,7 @@ use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::Directory; use crate::Directory;
use crate::directory::directory::ReadOnlyDirectory;
use crc32fast::Hasher; use crc32fast::Hasher;
use serde_json; use serde_json;
use std::collections::HashSet; use std::collections::HashSet;
@@ -150,7 +151,7 @@ impl ManagedDirectory {
} }
Err(err) => { Err(err) => {
error!("Failed to acquire lock for GC"); error!("Failed to acquire lock for GC");
return Err(crate::TantivyError::from(err)); return Err(crate::Error::from(err));
} }
} }
} }
@@ -264,14 +265,6 @@ impl ManagedDirectory {
} }
impl Directory for ManagedDirectory { impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
footer.is_compatible()?;
Ok(reader)
}
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path) self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
@@ -289,18 +282,10 @@ impl Directory for ManagedDirectory {
self.directory.atomic_write(path, data) self.directory.atomic_write(path, data)
} }
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
self.directory.atomic_read(path)
}
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
self.directory.delete(path) self.directory.delete(path)
} }
fn exists(&self, path: &Path) -> bool {
self.directory.exists(path)
}
fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> { fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> {
self.directory.acquire_lock(lock) self.directory.acquire_lock(lock)
} }
@@ -310,6 +295,24 @@ impl Directory for ManagedDirectory {
} }
} }
impl ReadOnlyDirectory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
footer.is_compatible()?;
Ok(reader)
}
fn exists(&self, path: &Path) -> bool {
self.directory.exists(path)
}
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
self.directory.atomic_read(path)
}
}
impl Clone for ManagedDirectory { impl Clone for ManagedDirectory {
fn clone(&self) -> ManagedDirectory { fn clone(&self) -> ManagedDirectory {
ManagedDirectory { ManagedDirectory {
@@ -323,7 +326,9 @@ impl Clone for ManagedDirectory {
#[cfg(test)] #[cfg(test)]
mod tests_mmap_specific { mod tests_mmap_specific {
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite}; use crate::directory::{
Directory, ManagedDirectory, MmapDirectory, ReadOnlyDirectory, TerminatingWrite,
};
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::io::Write; use std::io::Write;

View File

@@ -6,6 +6,7 @@ use self::notify::RawEvent;
use self::notify::RecursiveMode; use self::notify::RecursiveMode;
use self::notify::Watcher; use self::notify::Watcher;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::LockError; use crate::directory::error::LockError;
use crate::directory::error::{ use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError, DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
@@ -142,7 +143,7 @@ impl MmapCache {
} }
struct WatcherWrapper { struct WatcherWrapper {
_watcher: Mutex<notify::PollWatcher>, _watcher: Mutex<notify::RecommendedWatcher>,
watcher_router: Arc<WatchCallbackList>, watcher_router: Arc<WatchCallbackList>,
} }
@@ -150,7 +151,7 @@ impl WatcherWrapper {
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> { pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel(); let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the // We need to initialize the
let watcher = notify::poll::PollWatcher::with_delay_ms(tx, 1) let watcher = notify::raw_watcher(tx)
.and_then(|mut watcher| { .and_then(|mut watcher| {
watcher.watch(path, RecursiveMode::Recursive)?; watcher.watch(path, RecursiveMode::Recursive)?;
Ok(watcher) Ok(watcher)
@@ -407,24 +408,6 @@ impl TerminatingWrite for SafeFileWriter {
} }
impl Directory for MmapDirectory { impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!(
"Failed to acquired write lock \
on mmap cache while reading {:?}",
path
);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
Ok(mmap_cache
.get_mmap(&full_path)?
.map(ReadOnlySource::from)
.unwrap_or_else(ReadOnlySource::empty))
}
/// Any entry associated to the path in the mmap will be /// Any entry associated to the path in the mmap will be
/// removed before the file is deleted. /// removed before the file is deleted.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
@@ -443,11 +426,6 @@ impl Directory for MmapDirectory {
} }
} }
fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path);
full_path.exists()
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
debug!("Open Write {:?}", path); debug!("Open Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -478,25 +456,6 @@ impl Directory for MmapDirectory {
Ok(BufWriter::new(Box::new(writer))) Ok(BufWriter::new(Box::new(writer)))
} }
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path);
let mut buffer = Vec::new();
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(buffer)
}
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
debug!("Atomic Write {:?}", path); debug!("Atomic Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -530,6 +489,50 @@ impl Directory for MmapDirectory {
} }
} }
impl ReadOnlyDirectory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!(
"Failed to acquired write lock \
on mmap cache while reading {:?}",
path
);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
Ok(mmap_cache
.get_mmap(&full_path)?
.map(ReadOnlySource::from)
.unwrap_or_else(ReadOnlySource::empty))
}
fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path);
full_path.exists()
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path);
let mut buffer = Vec::new();
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(buffer)
}
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {

View File

@@ -7,6 +7,7 @@ WORM directory abstraction.
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
mod mmap_directory; mod mmap_directory;
mod bundle_directory;
mod directory; mod directory;
mod directory_lock; mod directory_lock;
mod footer; mod footer;
@@ -19,7 +20,7 @@ mod watch_event_router;
pub mod error; pub mod error;
pub use self::directory::DirectoryLock; pub use self::directory::DirectoryLock;
pub use self::directory::{Directory, DirectoryClone}; pub use self::directory::{Directory, DirectoryClone, ReadOnlyDirectory};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK}; pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub use self::ram_directory::RAMDirectory; pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource; pub use self::read_only_source::ReadOnlySource;

View File

@@ -1,4 +1,6 @@
use crate::common::CountingWriter;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken; use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
@@ -115,6 +117,22 @@ impl InnerDirectory {
fn total_mem_usage(&self) -> usize { fn total_mem_usage(&self) -> usize {
self.fs.values().map(|f| f.len()).sum() self.fs.values().map(|f| f.len()).sum()
} }
fn serialize_bundle(&self, wrt: &mut WritePtr) -> io::Result<()> {
let mut counting_writer = CountingWriter::wrap(wrt);
let mut file_index: HashMap<PathBuf, (u64, u64)> = HashMap::default();
for (path, source) in &self.fs {
let start = counting_writer.written_bytes();
counting_writer.write_all(source.as_slice())?;
let stop = counting_writer.written_bytes();
file_index.insert(path.to_path_buf(), (start, stop));
}
let index_offset = counting_writer.written_bytes();
serde_json::to_writer(&mut counting_writer, &file_index)?;
let index_offset_buffer = index_offset.to_le_bytes();
counting_writer.write_all(&index_offset_buffer[..])?;
Ok(())
}
} }
impl fmt::Debug for RAMDirectory { impl fmt::Debug for RAMDirectory {
@@ -144,13 +162,18 @@ impl RAMDirectory {
pub fn total_mem_usage(&self) -> usize { pub fn total_mem_usage(&self) -> usize {
self.fs.read().unwrap().total_mem_usage() self.fs.read().unwrap().total_mem_usage()
} }
/// Serialize the RAMDirectory into a bundle.
///
/// This method will fail, write nothing, and return an error if a
/// clone of this repository exists.
pub fn serialize_bundle(self, wrt: &mut WritePtr) -> io::Result<()> {
let inner_directory_rlock = self.fs.read().unwrap();
inner_directory_rlock.serialize_bundle(wrt)
}
} }
impl Directory for RAMDirectory { impl Directory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.read().unwrap().open_read(path)
}
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
fail_point!("RAMDirectory::delete", |_| { fail_point!("RAMDirectory::delete", |_| {
use crate::directory::error::IOError; use crate::directory::error::IOError;
@@ -160,10 +183,6 @@ impl Directory for RAMDirectory {
self.fs.write().unwrap().delete(path) self.fs.write().unwrap().delete(path)
} }
fn exists(&self, path: &Path) -> bool {
self.fs.read().unwrap().exists(path)
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
let mut fs = self.fs.write().unwrap(); let mut fs = self.fs.write().unwrap();
let path_buf = PathBuf::from(path); let path_buf = PathBuf::from(path);
@@ -177,10 +196,6 @@ impl Directory for RAMDirectory {
} }
} }
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
Ok(self.open_read(path)?.as_slice().to_owned())
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new( fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Other,
@@ -204,3 +219,17 @@ impl Directory for RAMDirectory {
Ok(self.fs.write().unwrap().watch(watch_callback)) Ok(self.fs.write().unwrap().watch(watch_callback))
} }
} }
impl ReadOnlyDirectory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.read().unwrap().open_read(path)
}
fn exists(&self, path: &Path) -> bool {
self.fs.read().unwrap().exists(path)
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
Ok(self.open_read(path)?.as_slice().to_owned())
}
}

View File

@@ -25,10 +25,10 @@ impl DataCorruption {
} }
} }
pub fn comment_only(comment: String) -> DataCorruption { pub fn comment_only<TS: ToString>(comment: TS) -> DataCorruption {
DataCorruption { DataCorruption {
filepath: None, filepath: None,
comment, comment: comment.to_string(),
} }
} }
} }

View File

@@ -179,7 +179,7 @@ mod tests {
use super::*; use super::*;
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr};
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::merge_policy::NoMergePolicy; use crate::merge_policy::NoMergePolicy;
use crate::schema::Field; use crate::schema::Field;

View File

@@ -7,6 +7,9 @@ pub use self::writer::MultiValueIntFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use time;
use self::time::Duration;
use crate::collector::TopDocs; use crate::collector::TopDocs;
use crate::query::QueryParser; use crate::query::QueryParser;
use crate::schema::Cardinality; use crate::schema::Cardinality;
@@ -14,7 +17,6 @@ mod tests {
use crate::schema::IntOptions; use crate::schema::IntOptions;
use crate::schema::Schema; use crate::schema::Schema;
use crate::Index; use crate::Index;
use chrono::Duration;
#[test] #[test]
fn test_multivalued_u64() { fn test_multivalued_u64() {

View File

@@ -4,7 +4,7 @@ use crate::common::compute_num_bits;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr};
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter}; use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;

View File

@@ -4,6 +4,7 @@ use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader}; use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
use crate::schema::{Cardinality, Field, FieldType, Schema}; use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
use crate::Result;
use std::collections::HashMap; use std::collections::HashMap;
/// Provides access to all of the FastFieldReader. /// Provides access to all of the FastFieldReader.
@@ -53,7 +54,7 @@ impl FastFieldReaders {
pub(crate) fn load_all( pub(crate) fn load_all(
schema: &Schema, schema: &Schema,
fast_fields_composite: &CompositeFile, fast_fields_composite: &CompositeFile,
) -> crate::Result<FastFieldReaders> { ) -> Result<FastFieldReaders> {
let mut fast_field_readers = FastFieldReaders { let mut fast_field_readers = FastFieldReaders {
fast_field_i64: Default::default(), fast_field_i64: Default::default(),
fast_field_u64: Default::default(), fast_field_u64: Default::default(),

View File

@@ -897,7 +897,7 @@ mod tests {
let index_writer = index.writer(3_000_000).unwrap(); let index_writer = index.writer(3_000_000).unwrap();
assert_eq!( assert_eq!(
format!("{:?}", index_writer.get_merge_policy()), format!("{:?}", index_writer.get_merge_policy()),
"LogMergePolicy { min_merge_size: 8, max_merge_size: 10000000, min_layer_size: 10000, \ "LogMergePolicy { min_merge_size: 8, min_layer_size: 10000, \
level_log_size: 0.75 }" level_log_size: 0.75 }"
); );
let merge_policy = Box::new(NoMergePolicy::default()); let merge_policy = Box::new(NoMergePolicy::default());

View File

@@ -6,14 +6,12 @@ use std::f64;
const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75; const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75;
const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000; const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
const DEFAULT_MIN_MERGE_SIZE: usize = 8; const DEFAULT_MIN_MERGE_SIZE: usize = 8;
const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000;
/// `LogMergePolicy` tries tries to merge segments that have a similar number of /// `LogMergePolicy` tries tries to merge segments that have a similar number of
/// documents. /// documents.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct LogMergePolicy { pub struct LogMergePolicy {
min_merge_size: usize, min_merge_size: usize,
max_merge_size: usize,
min_layer_size: u32, min_layer_size: u32,
level_log_size: f64, level_log_size: f64,
} }
@@ -28,12 +26,6 @@ impl LogMergePolicy {
self.min_merge_size = min_merge_size; self.min_merge_size = min_merge_size;
} }
/// Set the maximum number docs in a segment for it to be considered for
/// merging.
pub fn set_max_merge_size(&mut self, max_merge_size: usize) {
self.max_merge_size = max_merge_size;
}
/// Set the minimum segment size under which all segment belong /// Set the minimum segment size under which all segment belong
/// to the same level. /// to the same level.
pub fn set_min_layer_size(&mut self, min_layer_size: u32) { pub fn set_min_layer_size(&mut self, min_layer_size: u32) {
@@ -61,7 +53,6 @@ impl MergePolicy for LogMergePolicy {
let mut size_sorted_tuples = segments let mut size_sorted_tuples = segments
.iter() .iter()
.map(SegmentMeta::num_docs) .map(SegmentMeta::num_docs)
.filter(|s| s <= &(self.max_merge_size as u32))
.enumerate() .enumerate()
.collect::<Vec<(usize, u32)>>(); .collect::<Vec<(usize, u32)>>();
@@ -95,7 +86,6 @@ impl Default for LogMergePolicy {
fn default() -> LogMergePolicy { fn default() -> LogMergePolicy {
LogMergePolicy { LogMergePolicy {
min_merge_size: DEFAULT_MIN_MERGE_SIZE, min_merge_size: DEFAULT_MIN_MERGE_SIZE,
max_merge_size: DEFAULT_MAX_MERGE_SIZE,
min_layer_size: DEFAULT_MIN_LAYER_SIZE, min_layer_size: DEFAULT_MIN_LAYER_SIZE,
level_log_size: DEFAULT_LEVEL_LOG_SIZE, level_log_size: DEFAULT_LEVEL_LOG_SIZE,
} }
@@ -114,7 +104,6 @@ mod tests {
fn test_merge_policy() -> LogMergePolicy { fn test_merge_policy() -> LogMergePolicy {
let mut log_merge_policy = LogMergePolicy::default(); let mut log_merge_policy = LogMergePolicy::default();
log_merge_policy.set_min_merge_size(3); log_merge_policy.set_min_merge_size(3);
log_merge_policy.set_max_merge_size(100_000);
log_merge_policy.set_min_layer_size(2); log_merge_policy.set_min_layer_size(2);
log_merge_policy log_merge_policy
} }
@@ -152,11 +141,11 @@ mod tests {
create_random_segment_meta(10), create_random_segment_meta(10),
create_random_segment_meta(10), create_random_segment_meta(10),
create_random_segment_meta(10), create_random_segment_meta(10),
create_random_segment_meta(1_000), create_random_segment_meta(1000),
create_random_segment_meta(1_000), create_random_segment_meta(1000),
create_random_segment_meta(1_000), create_random_segment_meta(1000),
create_random_segment_meta(10_000), create_random_segment_meta(10000),
create_random_segment_meta(10_000), create_random_segment_meta(10000),
create_random_segment_meta(10), create_random_segment_meta(10),
create_random_segment_meta(10), create_random_segment_meta(10),
create_random_segment_meta(10), create_random_segment_meta(10),
@@ -193,19 +182,4 @@ mod tests {
let result_list = test_merge_policy().compute_merge_candidates(&test_input); let result_list = test_merge_policy().compute_merge_candidates(&test_input);
assert_eq!(result_list.len(), 1); assert_eq!(result_list.len(), 1);
} }
#[test]
fn test_large_merge_segments() {
let test_input = vec![
create_random_segment_meta(1_000_000),
create_random_segment_meta(100_001),
create_random_segment_meta(100_000),
create_random_segment_meta(100_000),
create_random_segment_meta(100_000),
];
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
// Do not include large segments
assert_eq!(result_list.len(), 1);
assert_eq!(result_list[0].0.len(), 3)
}
} }

View File

@@ -21,6 +21,8 @@ use crate::store::StoreWriter;
use crate::termdict::TermMerger; use crate::termdict::TermMerger;
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use crate::DocId; use crate::DocId;
use crate::Result;
use crate::TantivyError;
use itertools::Itertools; use itertools::Itertools;
use std::cmp; use std::cmp;
use std::collections::HashMap; use std::collections::HashMap;
@@ -141,7 +143,7 @@ impl DeltaComputer {
} }
impl IndexMerger { impl IndexMerger {
pub fn open(schema: Schema, segments: &[Segment]) -> crate::Result<IndexMerger> { pub fn open(schema: Schema, segments: &[Segment]) -> Result<IndexMerger> {
let mut readers = vec![]; let mut readers = vec![];
let mut max_doc: u32 = 0u32; let mut max_doc: u32 = 0u32;
for segment in segments { for segment in segments {
@@ -157,7 +159,7 @@ impl IndexMerger {
which exceeds the limit {}.", which exceeds the limit {}.",
max_doc, MAX_DOC_LIMIT max_doc, MAX_DOC_LIMIT
); );
return Err(crate::TantivyError::InvalidArgument(err_msg)); return Err(TantivyError::InvalidArgument(err_msg));
} }
Ok(IndexMerger { Ok(IndexMerger {
schema, schema,
@@ -166,10 +168,7 @@ impl IndexMerger {
}) })
} }
fn write_fieldnorms( fn write_fieldnorms(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> Result<()> {
&self,
fieldnorms_serializer: &mut FieldNormsSerializer,
) -> crate::Result<()> {
let fields = FieldNormsWriter::fields_with_fieldnorm(&self.schema); let fields = FieldNormsWriter::fields_with_fieldnorm(&self.schema);
let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize); let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize);
for field in fields { for field in fields {
@@ -190,7 +189,7 @@ impl IndexMerger {
&self, &self,
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>, mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
) -> crate::Result<()> { ) -> Result<()> {
for (field, field_entry) in self.schema.fields() { for (field, field_entry) in self.schema.fields() {
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
match *field_type { match *field_type {
@@ -235,7 +234,7 @@ impl IndexMerger {
&self, &self,
field: Field, field: Field,
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
) -> crate::Result<()> { ) -> Result<()> {
let mut u64_readers = vec![]; let mut u64_readers = vec![];
let mut min_value = u64::max_value(); let mut min_value = u64::max_value();
let mut max_value = u64::min_value(); let mut max_value = u64::min_value();
@@ -285,7 +284,7 @@ impl IndexMerger {
&self, &self,
field: Field, field: Field,
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
) -> crate::Result<()> { ) -> Result<()> {
let mut total_num_vals = 0u64; let mut total_num_vals = 0u64;
let mut u64s_readers: Vec<MultiValueIntFastFieldReader<u64>> = Vec::new(); let mut u64s_readers: Vec<MultiValueIntFastFieldReader<u64>> = Vec::new();
@@ -332,7 +331,7 @@ impl IndexMerger {
field: Field, field: Field,
term_ordinal_mappings: &TermOrdinalMapping, term_ordinal_mappings: &TermOrdinalMapping,
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
) -> crate::Result<()> { ) -> Result<()> {
// Multifastfield consists in 2 fastfields. // Multifastfield consists in 2 fastfields.
// The first serves as an index into the second one and is stricly increasing. // The first serves as an index into the second one and is stricly increasing.
// The second contains the actual values. // The second contains the actual values.
@@ -372,7 +371,7 @@ impl IndexMerger {
&self, &self,
field: Field, field: Field,
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
) -> crate::Result<()> { ) -> Result<()> {
// Multifastfield consists in 2 fastfields. // Multifastfield consists in 2 fastfields.
// The first serves as an index into the second one and is stricly increasing. // The first serves as an index into the second one and is stricly increasing.
// The second contains the actual values. // The second contains the actual values.
@@ -437,7 +436,7 @@ impl IndexMerger {
&self, &self,
field: Field, field: Field,
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
) -> crate::Result<()> { ) -> Result<()> {
let mut total_num_vals = 0u64; let mut total_num_vals = 0u64;
let mut bytes_readers: Vec<BytesFastFieldReader> = Vec::new(); let mut bytes_readers: Vec<BytesFastFieldReader> = Vec::new();
@@ -493,7 +492,7 @@ impl IndexMerger {
indexed_field: Field, indexed_field: Field,
field_type: &FieldType, field_type: &FieldType,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
) -> crate::Result<Option<TermOrdinalMapping>> { ) -> Result<Option<TermOrdinalMapping>> {
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000); let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
let mut delta_computer = DeltaComputer::new(); let mut delta_computer = DeltaComputer::new();
let field_readers = self let field_readers = self
@@ -647,7 +646,7 @@ impl IndexMerger {
fn write_postings( fn write_postings(
&self, &self,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> { ) -> Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new(); let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() { for (field, field_entry) in self.schema.fields() {
if field_entry.is_indexed() { if field_entry.is_indexed() {
@@ -661,7 +660,7 @@ impl IndexMerger {
Ok(term_ordinal_mappings) Ok(term_ordinal_mappings)
} }
fn write_storable_fields(&self, store_writer: &mut StoreWriter) -> crate::Result<()> { fn write_storable_fields(&self, store_writer: &mut StoreWriter) -> Result<()> {
for reader in &self.readers { for reader in &self.readers {
let store_reader = reader.get_store_reader(); let store_reader = reader.get_store_reader();
if reader.num_deleted_docs() > 0 { if reader.num_deleted_docs() > 0 {
@@ -678,7 +677,7 @@ impl IndexMerger {
} }
impl SerializableSegment for IndexMerger { impl SerializableSegment for IndexMerger {
fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> { fn write(&self, mut serializer: SegmentSerializer) -> Result<u32> {
let term_ord_mappings = self.write_postings(serializer.get_postings_serializer())?; let term_ord_mappings = self.write_postings(serializer.get_postings_serializer())?;
self.write_fieldnorms(serializer.get_fieldnorms_serializer())?; self.write_fieldnorms(serializer.get_fieldnorms_serializer())?;
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?; self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;

View File

@@ -19,8 +19,6 @@ pub struct AddOperation {
/// UserOperation is an enum type that encapsulates other operation types. /// UserOperation is an enum type that encapsulates other operation types.
#[derive(Eq, PartialEq, Debug)] #[derive(Eq, PartialEq, Debug)]
pub enum UserOperation { pub enum UserOperation {
/// Add operation
Add(Document), Add(Document),
/// Delete operation
Delete(Term), Delete(Term),
} }

View File

@@ -1,5 +1,6 @@
use super::IndexWriter; use super::IndexWriter;
use crate::Opstamp; use crate::Opstamp;
use crate::Result;
use futures::executor::block_on; use futures::executor::block_on;
/// A prepared commit /// A prepared commit
@@ -26,11 +27,11 @@ impl<'a> PreparedCommit<'a> {
self.payload = Some(payload.to_string()) self.payload = Some(payload.to_string())
} }
pub fn abort(self) -> crate::Result<Opstamp> { pub fn abort(self) -> Result<Opstamp> {
self.index_writer.rollback() self.index_writer.rollback()
} }
pub fn commit(self) -> crate::Result<Opstamp> { pub fn commit(self) -> Result<Opstamp> {
info!("committing {}", self.opstamp); info!("committing {}", self.opstamp);
let _ = block_on( let _ = block_on(
self.index_writer self.index_writer

View File

@@ -4,6 +4,7 @@ use crate::core::SegmentMeta;
use crate::error::TantivyError; use crate::error::TantivyError;
use crate::indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::SegmentEntry; use crate::indexer::SegmentEntry;
use crate::Result as TantivyResult;
use std::collections::hash_set::HashSet; use std::collections::hash_set::HashSet;
use std::fmt::{self, Debug, Formatter}; use std::fmt::{self, Debug, Formatter};
use std::sync::RwLock; use std::sync::RwLock;
@@ -48,7 +49,7 @@ pub struct SegmentManager {
} }
impl Debug for SegmentManager { impl Debug for SegmentManager {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
let lock = self.read(); let lock = self.read();
write!( write!(
f, f,
@@ -144,7 +145,7 @@ impl SegmentManager {
/// Returns an error if some segments are missing, or if /// Returns an error if some segments are missing, or if
/// the `segment_ids` are not either all committed or all /// the `segment_ids` are not either all committed or all
/// uncommitted. /// uncommitted.
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> crate::Result<Vec<SegmentEntry>> { pub fn start_merge(&self, segment_ids: &[SegmentId]) -> TantivyResult<Vec<SegmentEntry>> {
let registers_lock = self.read(); let registers_lock = self.read();
let mut segment_entries = vec![]; let mut segment_entries = vec![];
if registers_lock.uncommitted.contains_all(segment_ids) { if registers_lock.uncommitted.contains_all(segment_ids) {
@@ -187,7 +188,7 @@ impl SegmentManager {
.segments_status(before_merge_segment_ids) .segments_status(before_merge_segment_ids)
.ok_or_else(|| { .ok_or_else(|| {
warn!("couldn't find segment in SegmentManager"); warn!("couldn't find segment in SegmentManager");
crate::TantivyError::InvalidArgument( crate::Error::InvalidArgument(
"The segments that were merged could not be found in the SegmentManager. \ "The segments that were merged could not be found in the SegmentManager. \
This is not necessarily a bug, and can happen after a rollback for instance." This is not necessarily a bug, and can happen after a rollback for instance."
.to_string(), .to_string(),

View File

@@ -1,8 +1,13 @@
use crate::Directory;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::directory::error::OpenWriteError;
use crate::directory::{DirectoryClone, RAMDirectory, TerminatingWrite, WritePtr};
use crate::fastfield::FastFieldSerializer; use crate::fastfield::FastFieldSerializer;
use crate::fieldnorm::FieldNormsSerializer; use crate::fieldnorm::FieldNormsSerializer;
use crate::postings::InvertedIndexSerializer; use crate::postings::InvertedIndexSerializer;
use crate::schema::Schema;
use crate::store::StoreWriter; use crate::store::StoreWriter;
/// Segment serializer is in charge of laying out on disk /// Segment serializer is in charge of laying out on disk
@@ -12,25 +17,50 @@ pub struct SegmentSerializer {
fast_field_serializer: FastFieldSerializer, fast_field_serializer: FastFieldSerializer,
fieldnorms_serializer: FieldNormsSerializer, fieldnorms_serializer: FieldNormsSerializer,
postings_serializer: InvertedIndexSerializer, postings_serializer: InvertedIndexSerializer,
bundle_writer: Option<(RAMDirectory, WritePtr)>,
}
pub(crate) struct SegmentSerializerWriters {
postings_wrt: WritePtr,
positions_skip_wrt: WritePtr,
positions_wrt: WritePtr,
terms_wrt: WritePtr,
fast_field_wrt: WritePtr,
fieldnorms_wrt: WritePtr,
store_wrt: WritePtr,
}
impl SegmentSerializerWriters {
pub(crate) fn for_segment(segment: &mut Segment) -> Result<Self, OpenWriteError> {
Ok(SegmentSerializerWriters {
postings_wrt: segment.open_write(SegmentComponent::POSTINGS)?,
positions_skip_wrt: segment.open_write(SegmentComponent::POSITIONS)?,
positions_wrt: segment.open_write(SegmentComponent::POSITIONSSKIP)?,
terms_wrt: segment.open_write(SegmentComponent::TERMS)?,
fast_field_wrt: segment.open_write(SegmentComponent::FASTFIELDS)?,
fieldnorms_wrt: segment.open_write(SegmentComponent::FIELDNORMS)?,
store_wrt: segment.open_write(SegmentComponent::STORE)?,
})
}
} }
impl SegmentSerializer { impl SegmentSerializer {
/// Creates a new `SegmentSerializer`. pub(crate) fn new(schema: Schema, writers: SegmentSerializerWriters) -> crate::Result<Self> {
pub fn for_segment(segment: &mut Segment) -> crate::Result<SegmentSerializer> { let fast_field_serializer = FastFieldSerializer::from_write(writers.fast_field_wrt)?;
let store_write = segment.open_write(SegmentComponent::STORE)?; let fieldnorms_serializer = FieldNormsSerializer::from_write(writers.fieldnorms_wrt)?;
let postings_serializer = InvertedIndexSerializer::open(
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?; schema,
let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?; writers.terms_wrt,
writers.postings_wrt,
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?; writers.positions_wrt,
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?; writers.positions_skip_wrt,
);
let postings_serializer = InvertedIndexSerializer::open(segment)?;
Ok(SegmentSerializer { Ok(SegmentSerializer {
store_writer: StoreWriter::new(store_write), store_writer: StoreWriter::new(writers.store_wrt),
fast_field_serializer, fast_field_serializer,
fieldnorms_serializer, fieldnorms_serializer,
postings_serializer, postings_serializer,
bundle_writer: None,
}) })
} }
@@ -55,11 +85,15 @@ impl SegmentSerializer {
} }
/// Finalize the segment serialization. /// Finalize the segment serialization.
pub fn close(self) -> crate::Result<()> { pub fn close(mut self) -> crate::Result<()> {
self.fast_field_serializer.close()?; self.fast_field_serializer.close()?;
self.postings_serializer.close()?; self.postings_serializer.close()?;
self.store_writer.close()?; self.store_writer.close()?;
self.fieldnorms_serializer.close()?; self.fieldnorms_serializer.close()?;
if let Some((ram_directory, mut bundle_wrt)) = self.bundle_writer.take() {
ram_directory.serialize_bundle(&mut bundle_wrt)?;
bundle_wrt.terminate()?;
}
Ok(()) Ok(())
} }
} }

View File

@@ -12,6 +12,7 @@ use crate::indexer::index_writer::advance_deletes;
use crate::indexer::merge_operation::MergeOperationInventory; use crate::indexer::merge_operation::MergeOperationInventory;
use crate::indexer::merger::IndexMerger; use crate::indexer::merger::IndexMerger;
use crate::indexer::segment_manager::SegmentsStatus; use crate::indexer::segment_manager::SegmentsStatus;
use crate::indexer::segment_serializer::SegmentSerializerWriters;
use crate::indexer::stamper::Stamper; use crate::indexer::stamper::Stamper;
use crate::indexer::SegmentEntry; use crate::indexer::SegmentEntry;
use crate::indexer::SegmentSerializer; use crate::indexer::SegmentSerializer;
@@ -132,7 +133,9 @@ fn merge(
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?; let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
// ... we just serialize this index merger in our new segment to merge the two segments. // ... we just serialize this index merger in our new segment to merge the two segments.
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?; let segment_serializer_wrts = SegmentSerializerWriters::for_segment(&mut merged_segment)?;
let segment_serializer =
SegmentSerializer::new(merged_segment.schema(), segment_serializer_wrts)?;
let num_docs = merger.write(segment_serializer)?; let num_docs = merger.write(segment_serializer)?;
@@ -173,18 +176,14 @@ impl SegmentUpdater {
.pool_size(1) .pool_size(1)
.create() .create()
.map_err(|_| { .map_err(|_| {
crate::TantivyError::SystemError( crate::Error::SystemError("Failed to spawn segment updater thread".to_string())
"Failed to spawn segment updater thread".to_string(),
)
})?; })?;
let merge_thread_pool = ThreadPoolBuilder::new() let merge_thread_pool = ThreadPoolBuilder::new()
.name_prefix("merge_thread") .name_prefix("merge_thread")
.pool_size(NUM_MERGE_THREADS) .pool_size(NUM_MERGE_THREADS)
.create() .create()
.map_err(|_| { .map_err(|_| {
crate::TantivyError::SystemError( crate::Error::SystemError("Failed to spawn segment merging thread".to_string())
"Failed to spawn segment merging thread".to_string(),
)
})?; })?;
let index_meta = index.load_metas()?; let index_meta = index.load_metas()?;
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater { Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
@@ -226,7 +225,7 @@ impl SegmentUpdater {
receiver.unwrap_or_else(|_| { receiver.unwrap_or_else(|_| {
let err_msg = let err_msg =
"A segment_updater future did not success. This should never happen.".to_string(); "A segment_updater future did not success. This should never happen.".to_string();
Err(crate::TantivyError::SystemError(err_msg)) Err(crate::Error::SystemError(err_msg))
}) })
} }
@@ -423,7 +422,7 @@ impl SegmentUpdater {
}); });
Ok(merging_future_recv Ok(merging_future_recv
.unwrap_or_else(|_| Err(crate::TantivyError::SystemError("Merge failed".to_string())))) .unwrap_or_else(|_| Err(crate::Error::SystemError("Merge failed".to_string()))))
} }
async fn consider_merge_options(&self) { async fn consider_merge_options(&self) {

View File

@@ -3,7 +3,7 @@ use crate::core::Segment;
use crate::core::SerializableSegment; use crate::core::SerializableSegment;
use crate::fastfield::FastFieldsWriter; use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::FieldNormsWriter; use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::{SegmentSerializer, SegmentSerializerWriters};
use crate::postings::compute_table_size; use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter; use crate::postings::MultiFieldPostingsWriter;
use crate::schema::FieldType; use crate::schema::FieldType;
@@ -11,18 +11,21 @@ use crate::schema::Schema;
use crate::schema::Term; use crate::schema::Term;
use crate::schema::Value; use crate::schema::Value;
use crate::schema::{Field, FieldEntry}; use crate::schema::{Field, FieldEntry};
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream}; use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::{FacetTokenizer, TextAnalyzer}; use crate::tokenizer::FacetTokenizer;
use crate::tokenizer::{TokenStreamChain, Tokenizer}; use crate::tokenizer::PreTokenizedStream;
use crate::tokenizer::{TokenStream, TokenStreamChain, Tokenizer};
use crate::DocId; use crate::DocId;
use crate::Opstamp; use crate::Opstamp;
use crate::Result;
use crate::TantivyError;
use std::io; use std::io;
use std::str; use std::str;
/// Computes the initial size of the hash table. /// Computes the initial size of the hash table.
/// ///
/// Returns a number of bit `b`, such that the recommended initial table size is 2^b. /// Returns a number of bit `b`, such that the recommended initial table size is 2^b.
fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> { fn initial_table_size(per_thread_memory_budget: usize) -> Result<usize> {
let table_memory_upper_bound = per_thread_memory_budget / 3; let table_memory_upper_bound = per_thread_memory_budget / 3;
if let Some(limit) = (10..) if let Some(limit) = (10..)
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_memory_upper_bound) .take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_memory_upper_bound)
@@ -30,7 +33,7 @@ fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> {
{ {
Ok(limit.min(19)) // we cap it at 2^19 = 512K. Ok(limit.min(19)) // we cap it at 2^19 = 512K.
} else { } else {
Err(crate::TantivyError::InvalidArgument( Err(TantivyError::InvalidArgument(
format!("per thread memory budget (={}) is too small. Raise the memory budget or lower the number of threads.", per_thread_memory_budget))) format!("per thread memory budget (={}) is too small. Raise the memory budget or lower the number of threads.", per_thread_memory_budget)))
} }
} }
@@ -47,7 +50,7 @@ pub struct SegmentWriter {
fast_field_writers: FastFieldsWriter, fast_field_writers: FastFieldsWriter,
fieldnorms_writer: FieldNormsWriter, fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>, doc_opstamps: Vec<Opstamp>,
tokenizers: Vec<Option<TextAnalyzer>>, tokenizers: Vec<Option<BoxedTokenizer>>,
} }
impl SegmentWriter { impl SegmentWriter {
@@ -64,9 +67,10 @@ impl SegmentWriter {
memory_budget: usize, memory_budget: usize,
mut segment: Segment, mut segment: Segment,
schema: &Schema, schema: &Schema,
) -> crate::Result<SegmentWriter> { ) -> Result<SegmentWriter> {
let table_num_bits = initial_table_size(memory_budget)?; let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?; let segment_serializer_wrts = SegmentSerializerWriters::for_segment(&mut segment)?;
let segment_serializer = SegmentSerializer::new(segment.schema(), segment_serializer_wrts)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits); let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers = schema let tokenizers = schema
.fields() .fields()
@@ -97,7 +101,7 @@ impl SegmentWriter {
/// ///
/// Finalize consumes the `SegmentWriter`, so that it cannot /// Finalize consumes the `SegmentWriter`, so that it cannot
/// be used afterwards. /// be used afterwards.
pub fn finalize(mut self) -> crate::Result<Vec<u64>> { pub fn finalize(mut self) -> Result<Vec<u64>> {
self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc); self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc);
write( write(
&self.multifield_postings, &self.multifield_postings,
@@ -156,7 +160,7 @@ impl SegmentWriter {
} }
} }
FieldType::Str(_) => { FieldType::Str(_) => {
let mut token_streams: Vec<BoxTokenStream> = vec![]; let mut token_streams: Vec<Box<dyn TokenStream>> = vec![];
let mut offsets = vec![]; let mut offsets = vec![];
let mut total_offset = 0; let mut total_offset = 0;
@@ -169,7 +173,7 @@ impl SegmentWriter {
} }
token_streams token_streams
.push(PreTokenizedStream::from(tok_str.clone()).into()); .push(Box::new(PreTokenizedStream::from(tok_str.clone())));
} }
Value::Str(ref text) => { Value::Str(ref text) => {
if let Some(ref mut tokenizer) = if let Some(ref mut tokenizer) =
@@ -188,7 +192,8 @@ impl SegmentWriter {
let num_tokens = if token_streams.is_empty() { let num_tokens = if token_streams.is_empty() {
0 0
} else { } else {
let mut token_stream = TokenStreamChain::new(offsets, token_streams); let mut token_stream: Box<dyn TokenStream> =
Box::new(TokenStreamChain::new(offsets, token_streams));
self.multifield_postings self.multifield_postings
.index_text(doc_id, field, &mut token_stream) .index_text(doc_id, field, &mut token_stream)
}; };
@@ -279,7 +284,7 @@ fn write(
fast_field_writers: &FastFieldsWriter, fast_field_writers: &FastFieldsWriter,
fieldnorms_writer: &FieldNormsWriter, fieldnorms_writer: &FieldNormsWriter,
mut serializer: SegmentSerializer, mut serializer: SegmentSerializer,
) -> crate::Result<()> { ) -> Result<()> {
let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?; let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?;
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?; fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer())?; fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer())?;
@@ -288,7 +293,7 @@ fn write(
} }
impl SerializableSegment for SegmentWriter { impl SerializableSegment for SegmentWriter {
fn write(&self, serializer: SegmentSerializer) -> crate::Result<u32> { fn write(&self, serializer: SegmentSerializer) -> Result<u32> {
let max_doc = self.max_doc; let max_doc = self.max_doc;
write( write(
&self.multifield_postings, &self.multifield_postings,

View File

@@ -1,76 +1,18 @@
use crate::Opstamp; use crate::Opstamp;
use std::ops::Range; use std::ops::Range;
use std::sync::atomic::Ordering; use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc; use std::sync::Arc;
#[cfg(not(target_arch = "arm"))]
mod atomic_impl {
use crate::Opstamp;
use std::sync::atomic::{AtomicU64, Ordering};
#[derive(Default)]
pub struct AtomicU64Wrapper(AtomicU64);
impl AtomicU64Wrapper {
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
AtomicU64Wrapper(AtomicU64::new(first_opstamp as u64))
}
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
self.0.fetch_add(val as u64, order) as u64
}
pub fn revert(&self, val: u64, order: Ordering) -> u64 {
self.0.store(val, order);
val
}
}
}
#[cfg(target_arch = "arm")]
mod atomic_impl {
use crate::Opstamp;
/// Under other architecture, we rely on a mutex.
use std::sync::atomic::Ordering;
use std::sync::RwLock;
#[derive(Default)]
pub struct AtomicU64Wrapper(RwLock<u64>);
impl AtomicU64Wrapper {
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
AtomicU64Wrapper(RwLock::new(first_opstamp))
}
pub fn fetch_add(&self, incr: u64, _order: Ordering) -> u64 {
let mut lock = self.0.write().unwrap();
let previous_val = *lock;
*lock = previous_val + incr;
previous_val
}
pub fn revert(&self, val: u64, _order: Ordering) -> u64 {
let mut lock = self.0.write().unwrap();
*lock = val;
val
}
}
}
use self::atomic_impl::AtomicU64Wrapper;
/// Stamper provides Opstamps, which is just an auto-increment id to label /// Stamper provides Opstamps, which is just an auto-increment id to label
/// an operation. /// an operation.
/// ///
/// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`. /// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`.
#[derive(Clone, Default)] #[derive(Clone, Default)]
pub struct Stamper(Arc<AtomicU64Wrapper>); pub struct Stamper(Arc<AtomicU64>);
impl Stamper { impl Stamper {
pub fn new(first_opstamp: Opstamp) -> Stamper { pub fn new(first_opstamp: Opstamp) -> Stamper {
Stamper(Arc::new(AtomicU64Wrapper::new(first_opstamp))) Stamper(Arc::new(AtomicU64::new(first_opstamp)))
} }
pub fn stamp(&self) -> Opstamp { pub fn stamp(&self) -> Opstamp {
@@ -89,7 +31,8 @@ impl Stamper {
/// Reverts the stamper to a given `Opstamp` value and returns it /// Reverts the stamper to a given `Opstamp` value and returns it
pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp { pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp {
self.0.revert(to_opstamp, Ordering::SeqCst) self.0.store(to_opstamp, Ordering::SeqCst);
to_opstamp
} }
} }

View File

@@ -121,13 +121,13 @@ mod functional_test;
mod macros; mod macros;
pub use crate::error::TantivyError; pub use crate::error::TantivyError;
#[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")]
pub use crate::error::TantivyError as Error;
pub use chrono; pub use chrono;
/// Tantivy result. /// Tantivy result.
/// pub type Result<T> = std::result::Result<T, error::TantivyError>;
/// Within tantivy, please avoid importing `Result` using `use crate::Result`
/// and instead, refer to this as `crate::Result<T>`.
pub type Result<T> = std::result::Result<T, TantivyError>;
/// Tantivy DateTime /// Tantivy DateTime
pub type DateTime = chrono::DateTime<chrono::Utc>; pub type DateTime = chrono::DateTime<chrono::Utc>;
@@ -161,11 +161,10 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset; mod docset;
pub use self::docset::{DocSet, SkipResult}; pub use self::docset::{DocSet, SkipResult};
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
pub use crate::core::{Executor, SegmentComponent}; pub use crate::core::SegmentComponent;
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta}; pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
pub use crate::core::{InvertedIndexReader, SegmentReader}; pub use crate::core::{InvertedIndexReader, SegmentReader};
pub use crate::directory::Directory; pub use crate::directory::Directory;
pub use crate::indexer::operation::UserOperation;
pub use crate::indexer::IndexWriter; pub use crate::indexer::IndexWriter;
pub use crate::postings::Postings; pub use crate::postings::Postings;
pub use crate::reader::LeasedItem; pub use crate::reader::LeasedItem;

View File

@@ -75,7 +75,7 @@ pub mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut segment = index.new_segment(); let mut segment = index.new_segment();
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap(); let mut posting_serializer = InvertedIndexSerializer::for_segment(&mut segment).unwrap();
{ {
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap(); let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
field_serializer.new_term("abc".as_bytes()).unwrap(); field_serializer.new_term("abc".as_bytes()).unwrap();

View File

@@ -11,6 +11,7 @@ use crate::termdict::TermOrdinal;
use crate::tokenizer::TokenStream; use crate::tokenizer::TokenStream;
use crate::tokenizer::{Token, MAX_TOKEN_LEN}; use crate::tokenizer::{Token, MAX_TOKEN_LEN};
use crate::DocId; use crate::DocId;
use crate::Result;
use fnv::FnvHashMap; use fnv::FnvHashMap;
use std::collections::HashMap; use std::collections::HashMap;
use std::io; use std::io;
@@ -128,7 +129,7 @@ impl MultiFieldPostingsWriter {
pub fn serialize( pub fn serialize(
&self, &self,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> { ) -> Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> = let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
self.term_index.iter().collect(); self.term_index.iter().collect();
term_offsets.sort_unstable_by_key(|&(k, _, _)| k); term_offsets.sort_unstable_by_key(|&(k, _, _)| k);

View File

@@ -10,7 +10,8 @@ use crate::postings::USE_SKIP_INFO_LIMIT;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType}; use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal}; use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::DocId; use crate::Result;
use crate::{Directory, DocId};
use std::io::{self, Write}; use std::io::{self, Write};
/// `InvertedIndexSerializer` is in charge of serializing /// `InvertedIndexSerializer` is in charge of serializing
@@ -53,33 +54,36 @@ pub struct InvertedIndexSerializer {
} }
impl InvertedIndexSerializer { impl InvertedIndexSerializer {
/// Open a new `InvertedIndexSerializer` for the given segment pub(crate) fn for_segment(segment: &mut Segment) -> crate::Result<Self> {
fn create( let schema = segment.schema();
terms_write: CompositeWrite<WritePtr>, use crate::core::SegmentComponent;
postings_write: CompositeWrite<WritePtr>, let terms_wrt = segment.open_write(SegmentComponent::TERMS)?;
positions_write: CompositeWrite<WritePtr>, let postings_wrt = segment.open_write(SegmentComponent::POSTINGS)?;
positionsidx_write: CompositeWrite<WritePtr>, let positions_wrt = segment.open_write(SegmentComponent::POSITIONS)?;
schema: Schema, let positions_idx_wrt = segment.open_write(SegmentComponent::POSITIONSSKIP)?;
) -> crate::Result<InvertedIndexSerializer> { Ok(Self::open(
Ok(InvertedIndexSerializer {
terms_write,
postings_write,
positions_write,
positionsidx_write,
schema, schema,
}) terms_wrt,
postings_wrt,
positions_wrt,
positions_idx_wrt,
))
} }
/// Open a new `PostingsSerializer` for the given segment /// Open a new `PostingsSerializer` for the given segment
pub fn open(segment: &mut Segment) -> crate::Result<InvertedIndexSerializer> { pub(crate) fn open(
use crate::SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS}; schema: Schema,
InvertedIndexSerializer::create( terms_wrt: WritePtr,
CompositeWrite::wrap(segment.open_write(TERMS)?), postings_wrt: WritePtr,
CompositeWrite::wrap(segment.open_write(POSTINGS)?), positions_wrt: WritePtr,
CompositeWrite::wrap(segment.open_write(POSITIONS)?), positions_idx_wrt: WritePtr,
CompositeWrite::wrap(segment.open_write(POSITIONSSKIP)?), ) -> InvertedIndexSerializer {
segment.schema(), InvertedIndexSerializer {
) terms_write: CompositeWrite::wrap(terms_wrt),
postings_write: CompositeWrite::wrap(postings_wrt),
positions_write: CompositeWrite::wrap(positions_wrt),
positionsidx_write: CompositeWrite::wrap(positions_idx_wrt),
schema,
}
} }
/// Must be called before starting pushing terms of /// Must be called before starting pushing terms of
@@ -147,7 +151,8 @@ impl<'a> FieldSerializer<'a> {
} }
_ => (false, false), _ => (false, false),
}; };
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?; let term_dictionary_builder =
TermDictionaryBuilder::create(term_dictionary_write, &field_type)?;
let postings_serializer = let postings_serializer =
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled); PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
let positions_serializer_opt = if position_enabled { let positions_serializer_opt = if position_enabled {

View File

@@ -4,6 +4,7 @@ use crate::docset::DocSet;
use crate::query::explanation::does_not_match; use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight}; use crate::query::{Explanation, Query, Scorer, Weight};
use crate::DocId; use crate::DocId;
use crate::Result;
use crate::Score; use crate::Score;
/// Query that matches all of the documents. /// Query that matches all of the documents.
@@ -13,7 +14,7 @@ use crate::Score;
pub struct AllQuery; pub struct AllQuery;
impl Query for AllQuery { impl Query for AllQuery {
fn weight(&self, _: &Searcher, _: bool) -> crate::Result<Box<dyn Weight>> { fn weight(&self, _: &Searcher, _: bool) -> Result<Box<dyn Weight>> {
Ok(Box::new(AllWeight)) Ok(Box::new(AllWeight))
} }
} }
@@ -22,7 +23,7 @@ impl Query for AllQuery {
pub struct AllWeight; pub struct AllWeight;
impl Weight for AllWeight { impl Weight for AllWeight {
fn scorer(&self, reader: &SegmentReader) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
Ok(Box::new(AllScorer { Ok(Box::new(AllScorer {
state: State::NotStarted, state: State::NotStarted,
doc: 0u32, doc: 0u32,
@@ -30,7 +31,7 @@ impl Weight for AllWeight {
})) }))
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
if doc >= reader.max_doc() { if doc >= reader.max_doc() {
return Err(does_not_match(doc)); return Err(does_not_match(doc));
} }

View File

@@ -5,6 +5,7 @@ use crate::query::TermQuery;
use crate::query::Weight; use crate::query::Weight;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::Term; use crate::schema::Term;
use crate::Result;
use crate::Searcher; use crate::Searcher;
use std::collections::BTreeSet; use std::collections::BTreeSet;
@@ -29,9 +30,9 @@ use std::collections::BTreeSet;
///use tantivy::query::{BooleanQuery, Occur, PhraseQuery, Query, TermQuery}; ///use tantivy::query::{BooleanQuery, Occur, PhraseQuery, Query, TermQuery};
///use tantivy::schema::{IndexRecordOption, Schema, TEXT}; ///use tantivy::schema::{IndexRecordOption, Schema, TEXT};
///use tantivy::Term; ///use tantivy::Term;
///use tantivy::Index; ///use tantivy::{Index, Result};
/// ///
///fn main() -> tantivy::Result<()> { ///fn main() -> Result<()> {
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
/// let body = schema_builder.add_text_field("body", TEXT); /// let body = schema_builder.add_text_field("body", TEXT);
@@ -148,14 +149,14 @@ impl From<Vec<(Occur, Box<dyn Query>)>> for BooleanQuery {
} }
impl Query for BooleanQuery { impl Query for BooleanQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> { fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result<Box<dyn Weight>> {
let sub_weights = self let sub_weights = self
.subqueries .subqueries
.iter() .iter()
.map(|&(ref occur, ref subquery)| { .map(|&(ref occur, ref subquery)| {
Ok((*occur, subquery.weight(searcher, scoring_enabled)?)) Ok((*occur, subquery.weight(searcher, scoring_enabled)?))
}) })
.collect::<crate::Result<_>>()?; .collect::<Result<_>>()?;
Ok(Box::new(BooleanWeight::new(sub_weights, scoring_enabled))) Ok(Box::new(BooleanWeight::new(sub_weights, scoring_enabled)))
} }

View File

@@ -10,6 +10,7 @@ use crate::query::Scorer;
use crate::query::Union; use crate::query::Union;
use crate::query::Weight; use crate::query::Weight;
use crate::query::{intersect_scorers, Explanation}; use crate::query::{intersect_scorers, Explanation};
use crate::Result;
use crate::{DocId, SkipResult}; use crate::{DocId, SkipResult};
use std::collections::HashMap; use std::collections::HashMap;
@@ -55,7 +56,7 @@ impl BooleanWeight {
fn per_occur_scorers( fn per_occur_scorers(
&self, &self,
reader: &SegmentReader, reader: &SegmentReader,
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> { ) -> Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new(); let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
for &(ref occur, ref subweight) in &self.weights { for &(ref occur, ref subweight) in &self.weights {
let sub_scorer: Box<dyn Scorer> = subweight.scorer(reader)?; let sub_scorer: Box<dyn Scorer> = subweight.scorer(reader)?;
@@ -70,7 +71,7 @@ impl BooleanWeight {
fn complex_scorer<TScoreCombiner: ScoreCombiner>( fn complex_scorer<TScoreCombiner: ScoreCombiner>(
&self, &self,
reader: &SegmentReader, reader: &SegmentReader,
) -> crate::Result<Box<dyn Scorer>> { ) -> Result<Box<dyn Scorer>> {
let mut per_occur_scorers = self.per_occur_scorers(reader)?; let mut per_occur_scorers = self.per_occur_scorers(reader)?;
let should_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers let should_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
@@ -112,7 +113,7 @@ impl BooleanWeight {
} }
impl Weight for BooleanWeight { impl Weight for BooleanWeight {
fn scorer(&self, reader: &SegmentReader) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
if self.weights.is_empty() { if self.weights.is_empty() {
Ok(Box::new(EmptyScorer)) Ok(Box::new(EmptyScorer))
} else if self.weights.len() == 1 { } else if self.weights.len() == 1 {
@@ -129,7 +130,7 @@ impl Weight for BooleanWeight {
} }
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader)?; let mut scorer = self.scorer(reader)?;
if scorer.skip_next(doc) != SkipResult::Reached { if scorer.skip_next(doc) != SkipResult::Reached {
return Err(does_not_match(doc)); return Err(does_not_match(doc));

View File

@@ -4,6 +4,7 @@ use crate::query::Weight;
use crate::query::{Explanation, Query}; use crate::query::{Explanation, Query};
use crate::DocId; use crate::DocId;
use crate::DocSet; use crate::DocSet;
use crate::Result;
use crate::Score; use crate::Score;
use crate::Searcher; use crate::Searcher;
use crate::SegmentReader; use crate::SegmentReader;
@@ -15,15 +16,11 @@ use crate::SegmentReader;
pub struct EmptyQuery; pub struct EmptyQuery;
impl Query for EmptyQuery { impl Query for EmptyQuery {
fn weight( fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<dyn Weight>> {
&self,
_searcher: &Searcher,
_scoring_enabled: bool,
) -> crate::Result<Box<dyn Weight>> {
Ok(Box::new(EmptyWeight)) Ok(Box::new(EmptyWeight))
} }
fn count(&self, _searcher: &Searcher) -> crate::Result<usize> { fn count(&self, _searcher: &Searcher) -> Result<usize> {
Ok(0) Ok(0)
} }
} }
@@ -33,11 +30,11 @@ impl Query for EmptyQuery {
/// It is useful for tests and handling edge cases. /// It is useful for tests and handling edge cases.
pub struct EmptyWeight; pub struct EmptyWeight;
impl Weight for EmptyWeight { impl Weight for EmptyWeight {
fn scorer(&self, _reader: &SegmentReader) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, _reader: &SegmentReader) -> Result<Box<dyn Scorer>> {
Ok(Box::new(EmptyScorer)) Ok(Box::new(EmptyScorer))
} }
fn explain(&self, _reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> { fn explain(&self, _reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
Err(does_not_match(doc)) Err(does_not_match(doc))
} }
} }

View File

@@ -1,7 +1,8 @@
use crate::error::TantivyError::InvalidArgument;
use crate::query::{AutomatonWeight, Query, Weight}; use crate::query::{AutomatonWeight, Query, Weight};
use crate::schema::Term; use crate::schema::Term;
use crate::Result;
use crate::Searcher; use crate::Searcher;
use crate::TantivyError::InvalidArgument;
use levenshtein_automata::{LevenshteinAutomatonBuilder, DFA}; use levenshtein_automata::{LevenshteinAutomatonBuilder, DFA};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use std::collections::HashMap; use std::collections::HashMap;
@@ -30,9 +31,9 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// use tantivy::collector::{Count, TopDocs}; /// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::FuzzyTermQuery; /// use tantivy::query::FuzzyTermQuery;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Term}; /// use tantivy::{doc, Index, Result, Term};
/// ///
/// fn example() -> tantivy::Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
@@ -101,7 +102,7 @@ impl FuzzyTermQuery {
} }
} }
fn specialized_weight(&self) -> crate::Result<AutomatonWeight<DFA>> { fn specialized_weight(&self) -> Result<AutomatonWeight<DFA>> {
// LEV_BUILDER is a HashMap, whose `get` method returns an Option // LEV_BUILDER is a HashMap, whose `get` method returns an Option
match LEV_BUILDER.get(&(self.distance, false)) { match LEV_BUILDER.get(&(self.distance, false)) {
// Unwrap the option and build the Ok(AutomatonWeight) // Unwrap the option and build the Ok(AutomatonWeight)
@@ -118,11 +119,7 @@ impl FuzzyTermQuery {
} }
impl Query for FuzzyTermQuery { impl Query for FuzzyTermQuery {
fn weight( fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<dyn Weight>> {
&self,
_searcher: &Searcher,
_scoring_enabled: bool,
) -> crate::Result<Box<dyn Weight>> {
Ok(Box::new(self.specialized_weight()?)) Ok(Box::new(self.specialized_weight()?))
} }
} }

View File

@@ -1,10 +1,12 @@
use super::PhraseWeight; use super::PhraseWeight;
use crate::core::searcher::Searcher; use crate::core::searcher::Searcher;
use crate::error::TantivyError;
use crate::query::bm25::BM25Weight; use crate::query::bm25::BM25Weight;
use crate::query::Query; use crate::query::Query;
use crate::query::Weight; use crate::query::Weight;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::{Field, Term}; use crate::schema::{Field, Term};
use crate::Result;
use std::collections::BTreeSet; use std::collections::BTreeSet;
/// `PhraseQuery` matches a specific sequence of words. /// `PhraseQuery` matches a specific sequence of words.
@@ -79,7 +81,7 @@ impl PhraseQuery {
&self, &self,
searcher: &Searcher, searcher: &Searcher,
scoring_enabled: bool, scoring_enabled: bool,
) -> crate::Result<PhraseWeight> { ) -> Result<PhraseWeight> {
let schema = searcher.schema(); let schema = searcher.schema();
let field_entry = schema.get_field_entry(self.field); let field_entry = schema.get_field_entry(self.field);
let has_positions = field_entry let has_positions = field_entry
@@ -89,7 +91,7 @@ impl PhraseQuery {
.unwrap_or(false); .unwrap_or(false);
if !has_positions { if !has_positions {
let field_name = field_entry.name(); let field_name = field_entry.name();
return Err(crate::TantivyError::SchemaError(format!( return Err(TantivyError::SchemaError(format!(
"Applied phrase query on field {:?}, which does not have positions indexed", "Applied phrase query on field {:?}, which does not have positions indexed",
field_name field_name
))); )));
@@ -108,7 +110,7 @@ impl Query for PhraseQuery {
/// Create the weight associated to a query. /// Create the weight associated to a query.
/// ///
/// See [`Weight`](./trait.Weight.html). /// See [`Weight`](./trait.Weight.html).
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> { fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result<Box<dyn Weight>> {
let phrase_weight = self.phrase_weight(searcher, scoring_enabled)?; let phrase_weight = self.phrase_weight(searcher, scoring_enabled)?;
Ok(Box::new(phrase_weight)) Ok(Box::new(phrase_weight))
} }

View File

@@ -2,6 +2,7 @@ use super::Weight;
use crate::core::searcher::Searcher; use crate::core::searcher::Searcher;
use crate::query::Explanation; use crate::query::Explanation;
use crate::DocAddress; use crate::DocAddress;
use crate::Result;
use crate::Term; use crate::Term;
use downcast_rs::impl_downcast; use downcast_rs::impl_downcast;
use std::collections::BTreeSet; use std::collections::BTreeSet;
@@ -47,17 +48,17 @@ pub trait Query: QueryClone + downcast_rs::Downcast + fmt::Debug {
/// can increase performances. /// can increase performances.
/// ///
/// See [`Weight`](./trait.Weight.html). /// See [`Weight`](./trait.Weight.html).
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>>; fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result<Box<dyn Weight>>;
/// Returns an `Explanation` for the score of the document. /// Returns an `Explanation` for the score of the document.
fn explain(&self, searcher: &Searcher, doc_address: DocAddress) -> crate::Result<Explanation> { fn explain(&self, searcher: &Searcher, doc_address: DocAddress) -> Result<Explanation> {
let reader = searcher.segment_reader(doc_address.segment_ord()); let reader = searcher.segment_reader(doc_address.segment_ord());
let weight = self.weight(searcher, true)?; let weight = self.weight(searcher, true)?;
weight.explain(reader, doc_address.doc()) weight.explain(reader, doc_address.doc())
} }
/// Returns the number of documents matching the query. /// Returns the number of documents matching the query.
fn count(&self, searcher: &Searcher) -> crate::Result<usize> { fn count(&self, searcher: &Searcher) -> Result<usize> {
let weight = self.weight(searcher, false)?; let weight = self.weight(searcher, false)?;
let mut result = 0; let mut result = 0;
for reader in searcher.segment_readers() { for reader in searcher.segment_readers() {
@@ -85,11 +86,11 @@ where
} }
impl Query for Box<dyn Query> { impl Query for Box<dyn Query> {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> { fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result<Box<dyn Weight>> {
self.as_ref().weight(searcher, scoring_enabled) self.as_ref().weight(searcher, scoring_enabled)
} }
fn count(&self, searcher: &Searcher) -> crate::Result<usize> { fn count(&self, searcher: &Searcher) -> Result<usize> {
self.as_ref().count(searcher) self.as_ref().count(searcher)
} }

View File

@@ -533,7 +533,7 @@ mod test {
use crate::schema::{IndexRecordOption, TextFieldIndexing, TextOptions}; use crate::schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
use crate::schema::{Schema, Term, INDEXED, STORED, STRING, TEXT}; use crate::schema::{Schema, Term, INDEXED, STORED, STRING, TEXT};
use crate::tokenizer::{ use crate::tokenizer::{
LowerCaser, SimpleTokenizer, StopWordFilter, TextAnalyzer, TokenizerManager, LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager,
}; };
use crate::Index; use crate::Index;
use matches::assert_matches; use matches::assert_matches;
@@ -563,7 +563,7 @@ mod test {
let tokenizer_manager = TokenizerManager::default(); let tokenizer_manager = TokenizerManager::default();
tokenizer_manager.register( tokenizer_manager.register(
"en_with_stop_words", "en_with_stop_words",
TextAnalyzer::from(SimpleTokenizer) SimpleTokenizer
.filter(LowerCaser) .filter(LowerCaser)
.filter(StopWordFilter::remove(vec!["the".to_string()])), .filter(StopWordFilter::remove(vec!["the".to_string()])),
); );

View File

@@ -1,6 +1,7 @@
use crate::error::TantivyError; use crate::error::TantivyError;
use crate::query::{AutomatonWeight, Query, Weight}; use crate::query::{AutomatonWeight, Query, Weight};
use crate::schema::Field; use crate::schema::Field;
use crate::Result;
use crate::Searcher; use crate::Searcher;
use std::clone::Clone; use std::clone::Clone;
use std::sync::Arc; use std::sync::Arc;
@@ -57,7 +58,7 @@ pub struct RegexQuery {
impl RegexQuery { impl RegexQuery {
/// Creates a new RegexQuery from a given pattern /// Creates a new RegexQuery from a given pattern
pub fn from_pattern(regex_pattern: &str, field: Field) -> crate::Result<Self> { pub fn from_pattern(regex_pattern: &str, field: Field) -> Result<Self> {
let regex = Regex::new(&regex_pattern) let regex = Regex::new(&regex_pattern)
.map_err(|_| TantivyError::InvalidArgument(regex_pattern.to_string()))?; .map_err(|_| TantivyError::InvalidArgument(regex_pattern.to_string()))?;
Ok(RegexQuery::from_regex(regex, field)) Ok(RegexQuery::from_regex(regex, field))
@@ -77,11 +78,7 @@ impl RegexQuery {
} }
impl Query for RegexQuery { impl Query for RegexQuery {
fn weight( fn weight(&self, _searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<dyn Weight>> {
&self,
_searcher: &Searcher,
_scoring_enabled: bool,
) -> crate::Result<Box<dyn Weight>> {
Ok(Box::new(self.specialized_weight())) Ok(Box::new(self.specialized_weight()))
} }
} }

View File

@@ -3,6 +3,7 @@ use crate::query::bm25::BM25Weight;
use crate::query::Query; use crate::query::Query;
use crate::query::Weight; use crate::query::Weight;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::Result;
use crate::Searcher; use crate::Searcher;
use crate::Term; use crate::Term;
use std::collections::BTreeSet; use std::collections::BTreeSet;
@@ -100,7 +101,7 @@ impl TermQuery {
} }
impl Query for TermQuery { impl Query for TermQuery {
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> { fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result<Box<dyn Weight>> {
Ok(Box::new(self.specialized_weight(searcher, scoring_enabled))) Ok(Box::new(self.specialized_weight(searcher, scoring_enabled)))
} }
fn query_terms(&self, term_set: &mut BTreeSet<Term>) { fn query_terms(&self, term_set: &mut BTreeSet<Term>) {

View File

@@ -7,6 +7,7 @@ use crate::directory::Directory;
use crate::directory::WatchHandle; use crate::directory::WatchHandle;
use crate::directory::META_LOCK; use crate::directory::META_LOCK;
use crate::Index; use crate::Index;
use crate::Result;
use crate::Searcher; use crate::Searcher;
use crate::SegmentReader; use crate::SegmentReader;
use std::sync::Arc; use std::sync::Arc;
@@ -61,7 +62,7 @@ impl IndexReaderBuilder {
/// to open different segment readers. It may take hundreds of milliseconds /// to open different segment readers. It may take hundreds of milliseconds
/// of time and it may return an error. /// of time and it may return an error.
/// TODO(pmasurel) Use the `TryInto` trait once it is available in stable. /// TODO(pmasurel) Use the `TryInto` trait once it is available in stable.
pub fn try_into(self) -> crate::Result<IndexReader> { pub fn try_into(self) -> Result<IndexReader> {
let inner_reader = InnerIndexReader { let inner_reader = InnerIndexReader {
index: self.index, index: self.index,
num_searchers: self.num_searchers, num_searchers: self.num_searchers,
@@ -120,14 +121,14 @@ struct InnerIndexReader {
} }
impl InnerIndexReader { impl InnerIndexReader {
fn reload(&self) -> crate::Result<()> { fn reload(&self) -> Result<()> {
let segment_readers: Vec<SegmentReader> = { let segment_readers: Vec<SegmentReader> = {
let _meta_lock = self.index.directory().acquire_lock(&META_LOCK)?; let _meta_lock = self.index.directory().acquire_lock(&META_LOCK)?;
let searchable_segments = self.searchable_segments()?; let searchable_segments = self.searchable_segments()?;
searchable_segments searchable_segments
.iter() .iter()
.map(SegmentReader::open) .map(SegmentReader::open)
.collect::<crate::Result<_>>()? .collect::<Result<_>>()?
}; };
let schema = self.index.schema(); let schema = self.index.schema();
let searchers = (0..self.num_searchers) let searchers = (0..self.num_searchers)
@@ -138,7 +139,7 @@ impl InnerIndexReader {
} }
/// Returns the list of segments that are searchable /// Returns the list of segments that are searchable
fn searchable_segments(&self) -> crate::Result<Vec<Segment>> { fn searchable_segments(&self) -> Result<Vec<Segment>> {
self.index.searchable_segments() self.index.searchable_segments()
} }
@@ -175,7 +176,7 @@ impl IndexReader {
/// ///
/// This automatic reload can take 10s of milliseconds to kick in however, and in unit tests /// This automatic reload can take 10s of milliseconds to kick in however, and in unit tests
/// it can be nice to deterministically force the reload of searchers. /// it can be nice to deterministically force the reload of searchers.
pub fn reload(&self) -> crate::Result<()> { pub fn reload(&self) -> Result<()> {
self.inner.reload() self.inner.reload()
} }

View File

@@ -122,11 +122,6 @@ impl Facet {
pub fn to_path(&self) -> Vec<&str> { pub fn to_path(&self) -> Vec<&str> {
self.encoded_str().split(|c| c == FACET_SEP_CHAR).collect() self.encoded_str().split(|c| c == FACET_SEP_CHAR).collect()
} }
/// This function is the inverse of Facet::from(&str).
pub fn to_path_string(&self) -> String {
format!("{}", self.to_string())
}
} }
impl Borrow<str> for Facet { impl Borrow<str> for Facet {
@@ -270,21 +265,4 @@ mod tests {
let facet = Facet::from_path(v.iter()); let facet = Facet::from_path(v.iter());
assert_eq!(facet.to_path(), v); assert_eq!(facet.to_path(), v);
} }
#[test]
fn test_to_path_string() {
let v = ["first", "second", "third/not_fourth"];
let facet = Facet::from_path(v.iter());
assert_eq!(
facet.to_path_string(),
String::from("/first/second/third\\/not_fourth")
);
}
#[test]
fn test_to_path_string_empty() {
let v: Vec<&str> = vec![];
let facet = Facet::from_path(v.iter());
assert_eq!(facet.to_path_string(), "/");
}
} }

View File

@@ -1,8 +1,10 @@
use crate::query::Query; use crate::query::Query;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::Value; use crate::schema::Value;
use crate::tokenizer::{TextAnalyzer, Token}; use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::{Token, TokenStream};
use crate::Document; use crate::Document;
use crate::Result;
use crate::Searcher; use crate::Searcher;
use htmlescape::encode_minimal; use htmlescape::encode_minimal;
use std::cmp::Ordering; use std::cmp::Ordering;
@@ -140,7 +142,7 @@ impl Snippet {
/// Fragments must be valid in the sense that `&text[fragment.start..fragment.stop]`\ /// Fragments must be valid in the sense that `&text[fragment.start..fragment.stop]`\
/// has to be a valid string. /// has to be a valid string.
fn search_fragments<'a>( fn search_fragments<'a>(
tokenizer: &TextAnalyzer, tokenizer: &BoxedTokenizer,
text: &'a str, text: &'a str,
terms: &BTreeMap<String, f32>, terms: &BTreeMap<String, f32>,
max_num_chars: usize, max_num_chars: usize,
@@ -249,7 +251,7 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
/// ``` /// ```
pub struct SnippetGenerator { pub struct SnippetGenerator {
terms_text: BTreeMap<String, f32>, terms_text: BTreeMap<String, f32>,
tokenizer: TextAnalyzer, tokenizer: BoxedTokenizer,
field: Field, field: Field,
max_num_chars: usize, max_num_chars: usize,
} }
@@ -260,7 +262,7 @@ impl SnippetGenerator {
searcher: &Searcher, searcher: &Searcher,
query: &dyn Query, query: &dyn Query,
field: Field, field: Field,
) -> crate::Result<SnippetGenerator> { ) -> Result<SnippetGenerator> {
let mut terms = BTreeSet::new(); let mut terms = BTreeSet::new();
query.query_terms(&mut terms); query.query_terms(&mut terms);
let terms_text: BTreeMap<String, f32> = terms let terms_text: BTreeMap<String, f32> = terms
@@ -345,11 +347,12 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet() { fn test_snippet() {
let boxed_tokenizer = SimpleTokenizer.into();
let terms = btreemap! { let terms = btreemap! {
String::from("rust") => 1.0, String::from("rust") => 1.0,
String::from("language") => 0.9 String::from("language") => 0.9
}; };
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 100); let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 100);
assert_eq!(fragments.len(), 7); assert_eq!(fragments.len(), 7);
{ {
let first = &fragments[0]; let first = &fragments[0];
@@ -371,12 +374,13 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_scored_fragment() { fn test_snippet_scored_fragment() {
let boxed_tokenizer = SimpleTokenizer.into();
{ {
let terms = btreemap! { let terms = btreemap! {
String::from("rust") =>1.0f32, String::from("rust") =>1.0f32,
String::from("language") => 0.9f32 String::from("language") => 0.9f32
}; };
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20); let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 20);
{ {
let first = &fragments[0]; let first = &fragments[0];
assert_eq!(first.score, 1.0); assert_eq!(first.score, 1.0);
@@ -385,12 +389,13 @@ Survey in 2016, 2017, and 2018."#;
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT); let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems") assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems")
} }
let boxed_tokenizer = SimpleTokenizer.into();
{ {
let terms = btreemap! { let terms = btreemap! {
String::from("rust") =>0.9f32, String::from("rust") =>0.9f32,
String::from("language") => 1.0f32 String::from("language") => 1.0f32
}; };
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20); let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 20);
//assert_eq!(fragments.len(), 7); //assert_eq!(fragments.len(), 7);
{ {
let first = &fragments[0]; let first = &fragments[0];
@@ -404,12 +409,14 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_in_second_fragment() { fn test_snippet_in_second_fragment() {
let boxed_tokenizer = SimpleTokenizer.into();
let text = "a b c d e f g"; let text = "a b c d e f g";
let mut terms = BTreeMap::new(); let mut terms = BTreeMap::new();
terms.insert(String::from("c"), 1.0); terms.insert(String::from("c"), 1.0);
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3); let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
assert_eq!(fragments.len(), 1); assert_eq!(fragments.len(), 1);
{ {
@@ -426,12 +433,14 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_with_term_at_the_end_of_fragment() { fn test_snippet_with_term_at_the_end_of_fragment() {
let boxed_tokenizer = SimpleTokenizer.into();
let text = "a b c d e f f g"; let text = "a b c d e f f g";
let mut terms = BTreeMap::new(); let mut terms = BTreeMap::new();
terms.insert(String::from("f"), 1.0); terms.insert(String::from("f"), 1.0);
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3); let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
assert_eq!(fragments.len(), 2); assert_eq!(fragments.len(), 2);
{ {
@@ -448,13 +457,15 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_with_second_fragment_has_the_highest_score() { fn test_snippet_with_second_fragment_has_the_highest_score() {
let boxed_tokenizer = SimpleTokenizer.into();
let text = "a b c d e f g"; let text = "a b c d e f g";
let mut terms = BTreeMap::new(); let mut terms = BTreeMap::new();
terms.insert(String::from("f"), 1.0); terms.insert(String::from("f"), 1.0);
terms.insert(String::from("a"), 0.9); terms.insert(String::from("a"), 0.9);
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 7); let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 7);
assert_eq!(fragments.len(), 2); assert_eq!(fragments.len(), 2);
{ {
@@ -471,12 +482,14 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_with_term_not_in_text() { fn test_snippet_with_term_not_in_text() {
let boxed_tokenizer = SimpleTokenizer.into();
let text = "a b c d"; let text = "a b c d";
let mut terms = BTreeMap::new(); let mut terms = BTreeMap::new();
terms.insert(String::from("z"), 1.0); terms.insert(String::from("z"), 1.0);
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3); let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
assert_eq!(fragments.len(), 0); assert_eq!(fragments.len(), 0);
@@ -487,10 +500,12 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_with_no_terms() { fn test_snippet_with_no_terms() {
let boxed_tokenizer = SimpleTokenizer.into();
let text = "a b c d"; let text = "a b c d";
let terms = BTreeMap::new(); let terms = BTreeMap::new();
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3); let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
assert_eq!(fragments.len(), 0); assert_eq!(fragments.len(), 0);
let snippet = select_best_fragment_combination(&fragments[..], &text); let snippet = select_best_fragment_combination(&fragments[..], &text);

View File

@@ -57,7 +57,7 @@ use self::compression_snap::{compress, decompress};
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr};
use crate::schema::Document; use crate::schema::Document;
use crate::schema::FieldValue; use crate::schema::FieldValue;
use crate::schema::Schema; use crate::schema::Schema;

View File

@@ -1,3 +1,5 @@
use crate::Result;
use super::decompress; use super::decompress;
use super::skiplist::SkipList; use super::skiplist::SkipList;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
@@ -73,7 +75,7 @@ impl StoreReader {
/// ///
/// It should not be called to score documents /// It should not be called to score documents
/// for instance. /// for instance.
pub fn get(&self, doc_id: DocId) -> crate::Result<Document> { pub fn get(&self, doc_id: DocId) -> Result<Document> {
let (first_doc_id, block_offset) = self.block_offset(doc_id); let (first_doc_id, block_offset) = self.block_offset(doc_id);
self.read_block(block_offset as usize)?; self.read_block(block_offset as usize)?;
let current_block_mut = self.current_block.borrow_mut(); let current_block_mut = self.current_block.borrow_mut();

View File

@@ -36,9 +36,9 @@ pub use self::termdict::{TermDictionary, TermDictionaryBuilder};
mod tests { mod tests {
use super::{TermDictionary, TermDictionaryBuilder, TermStreamer}; use super::{TermDictionary, TermDictionaryBuilder, TermStreamer};
use crate::core::Index; use crate::core::Index;
use crate::directory::{Directory, RAMDirectory, ReadOnlySource}; use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, ReadOnlySource};
use crate::postings::TermInfo; use crate::postings::TermInfo;
use crate::schema::{Document, Schema, TEXT}; use crate::schema::{Document, FieldType, Schema, TEXT};
use std::path::PathBuf; use std::path::PathBuf;
use std::str; use std::str;
@@ -52,12 +52,6 @@ mod tests {
} }
} }
#[test]
fn test_empty_term_dictionary() {
let empty = TermDictionary::empty();
assert!(empty.stream().next().is_none());
}
#[test] #[test]
fn test_term_ordinals() { fn test_term_ordinals() {
const COUNTRIES: [&'static str; 7] = [ const COUNTRIES: [&'static str; 7] = [
@@ -73,7 +67,9 @@ mod tests {
let path = PathBuf::from("TermDictionary"); let path = PathBuf::from("TermDictionary");
{ {
let write = directory.open_write(&path).unwrap(); let write = directory.open_write(&path).unwrap();
let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap(); let field_type = FieldType::Str(TEXT);
let mut term_dictionary_builder =
TermDictionaryBuilder::create(write, &field_type).unwrap();
for term in COUNTRIES.iter() { for term in COUNTRIES.iter() {
term_dictionary_builder term_dictionary_builder
.insert(term.as_bytes(), &make_term_info(0u64)) .insert(term.as_bytes(), &make_term_info(0u64))
@@ -97,7 +93,9 @@ mod tests {
let path = PathBuf::from("TermDictionary"); let path = PathBuf::from("TermDictionary");
{ {
let write = directory.open_write(&path).unwrap(); let write = directory.open_write(&path).unwrap();
let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap(); let field_type = FieldType::Str(TEXT);
let mut term_dictionary_builder =
TermDictionaryBuilder::create(write, &field_type).unwrap();
term_dictionary_builder term_dictionary_builder
.insert("abc".as_bytes(), &make_term_info(34u64)) .insert("abc".as_bytes(), &make_term_info(34u64))
.unwrap(); .unwrap();
@@ -181,8 +179,10 @@ mod tests {
let ids: Vec<_> = (0u32..10_000u32) let ids: Vec<_> = (0u32..10_000u32)
.map(|i| (format!("doc{:0>6}", i), i)) .map(|i| (format!("doc{:0>6}", i), i))
.collect(); .collect();
let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap(); let mut term_dictionary_builder =
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
for &(ref id, ref i) in &ids { for &(ref id, ref i) in &ids {
term_dictionary_builder term_dictionary_builder
.insert(id.as_bytes(), &make_term_info(*i as u64)) .insert(id.as_bytes(), &make_term_info(*i as u64))
@@ -209,8 +209,10 @@ mod tests {
#[test] #[test]
fn test_stream_high_range_prefix_suffix() { fn test_stream_high_range_prefix_suffix() {
let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap(); let mut term_dictionary_builder =
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
// term requires more than 16bits // term requires more than 16bits
term_dictionary_builder term_dictionary_builder
.insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1)) .insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1))
@@ -242,8 +244,10 @@ mod tests {
let ids: Vec<_> = (0u32..10_000u32) let ids: Vec<_> = (0u32..10_000u32)
.map(|i| (format!("doc{:0>6}", i), i)) .map(|i| (format!("doc{:0>6}", i), i))
.collect(); .collect();
let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap(); let mut term_dictionary_builder =
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
for &(ref id, ref i) in &ids { for &(ref id, ref i) in &ids {
term_dictionary_builder term_dictionary_builder
.insert(id.as_bytes(), &make_term_info(*i as u64)) .insert(id.as_bytes(), &make_term_info(*i as u64))
@@ -309,8 +313,10 @@ mod tests {
#[test] #[test]
fn test_empty_string() { fn test_empty_string() {
let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap(); let mut term_dictionary_builder =
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
term_dictionary_builder term_dictionary_builder
.insert(&[], &make_term_info(1 as u64)) .insert(&[], &make_term_info(1 as u64))
.unwrap(); .unwrap();
@@ -331,8 +337,10 @@ mod tests {
#[test] #[test]
fn test_stream_range_boundaries() { fn test_stream_range_boundaries() {
let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap(); let mut term_dictionary_builder =
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
for i in 0u8..10u8 { for i in 0u8..10u8 {
let number_arr = [i; 1]; let number_arr = [i; 1];
term_dictionary_builder term_dictionary_builder
@@ -344,91 +352,41 @@ mod tests {
let source = ReadOnlySource::from(buffer); let source = ReadOnlySource::from(buffer);
let term_dictionary: TermDictionary = TermDictionary::from_source(&source); let term_dictionary: TermDictionary = TermDictionary::from_source(&source);
let value_list = |mut streamer: TermStreamer<'_>, backwards: bool| { let value_list = |mut streamer: TermStreamer<'_>| {
let mut res: Vec<u32> = vec![]; let mut res: Vec<u32> = vec![];
while let Some((_, ref v)) = streamer.next() { while let Some((_, ref v)) = streamer.next() {
res.push(v.doc_freq); res.push(v.doc_freq);
} }
if backwards {
res.reverse();
}
res res
}; };
{
let range = term_dictionary.range().backward().into_stream();
assert_eq!(
value_list(range, true),
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
);
}
{ {
let range = term_dictionary.range().ge([2u8]).into_stream(); let range = term_dictionary.range().ge([2u8]).into_stream();
assert_eq!( assert_eq!(
value_list(range, false), value_list(range),
vec![2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
);
}
{
let range = term_dictionary.range().ge([2u8]).backward().into_stream();
assert_eq!(
value_list(range, true),
vec![2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32] vec![2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
); );
} }
{ {
let range = term_dictionary.range().gt([2u8]).into_stream(); let range = term_dictionary.range().gt([2u8]).into_stream();
assert_eq!( assert_eq!(
value_list(range, false), value_list(range),
vec![3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
);
}
{
let range = term_dictionary.range().gt([2u8]).backward().into_stream();
assert_eq!(
value_list(range, true),
vec![3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32] vec![3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
); );
} }
{ {
let range = term_dictionary.range().lt([6u8]).into_stream(); let range = term_dictionary.range().lt([6u8]).into_stream();
assert_eq!( assert_eq!(value_list(range), vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32]);
value_list(range, false),
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32]
);
}
{
let range = term_dictionary.range().lt([6u8]).backward().into_stream();
assert_eq!(
value_list(range, true),
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32]
);
} }
{ {
let range = term_dictionary.range().le([6u8]).into_stream(); let range = term_dictionary.range().le([6u8]).into_stream();
assert_eq!( assert_eq!(
value_list(range, false), value_list(range),
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32]
);
}
{
let range = term_dictionary.range().le([6u8]).backward().into_stream();
assert_eq!(
value_list(range, true),
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32] vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32]
); );
} }
{ {
let range = term_dictionary.range().ge([0u8]).lt([5u8]).into_stream(); let range = term_dictionary.range().ge([0u8]).lt([5u8]).into_stream();
assert_eq!(value_list(range, false), vec![0u32, 1u32, 2u32, 3u32, 4u32]); assert_eq!(value_list(range), vec![0u32, 1u32, 2u32, 3u32, 4u32]);
}
{
let range = term_dictionary
.range()
.ge([0u8])
.lt([5u8])
.backward()
.into_stream();
assert_eq!(value_list(range, true), vec![0u32, 1u32, 2u32, 3u32, 4u32]);
} }
} }
@@ -450,7 +408,9 @@ mod tests {
let path = PathBuf::from("TermDictionary"); let path = PathBuf::from("TermDictionary");
{ {
let write = directory.open_write(&path).unwrap(); let write = directory.open_write(&path).unwrap();
let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap(); let field_type = FieldType::Str(TEXT);
let mut term_dictionary_builder =
TermDictionaryBuilder::create(write, &field_type).unwrap();
for term in COUNTRIES.iter() { for term in COUNTRIES.iter() {
term_dictionary_builder term_dictionary_builder
.insert(term.as_bytes(), &make_term_info(0u64)) .insert(term.as_bytes(), &make_term_info(0u64))

View File

@@ -51,12 +51,6 @@ where
self self
} }
/// Iterate over the range backwards.
pub fn backward(mut self) -> Self {
self.stream_builder = self.stream_builder.backward();
self
}
/// Creates the stream corresponding to the range /// Creates the stream corresponding to the range
/// of terms defined using the `TermStreamerBuilder`. /// of terms defined using the `TermStreamerBuilder`.
pub fn into_stream(self) -> TermStreamer<'a, A> { pub fn into_stream(self) -> TermStreamer<'a, A> {

View File

@@ -4,8 +4,8 @@ use crate::common::BinarySerializable;
use crate::common::CountingWriter; use crate::common::CountingWriter;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::postings::TermInfo; use crate::postings::TermInfo;
use crate::schema::FieldType;
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use once_cell::sync::Lazy;
use std::io::{self, Write}; use std::io::{self, Write};
use tantivy_fst; use tantivy_fst;
use tantivy_fst::raw::Fst; use tantivy_fst::raw::Fst;
@@ -29,7 +29,7 @@ where
W: Write, W: Write,
{ {
/// Creates a new `TermDictionaryBuilder` /// Creates a new `TermDictionaryBuilder`
pub fn create(w: W) -> io::Result<Self> { pub fn create(w: W, _field_type: &FieldType) -> io::Result<Self> {
let fst_builder = tantivy_fst::MapBuilder::new(w).map_err(convert_fst_error)?; let fst_builder = tantivy_fst::MapBuilder::new(w).map_err(convert_fst_error)?;
Ok(TermDictionaryBuilder { Ok(TermDictionaryBuilder {
fst_builder, fst_builder,
@@ -92,14 +92,6 @@ fn open_fst_index(source: ReadOnlySource) -> tantivy_fst::Map<ReadOnlySource> {
tantivy_fst::Map::from(fst) tantivy_fst::Map::from(fst)
} }
static EMPTY_DATA_SOURCE: Lazy<ReadOnlySource> = Lazy::new(|| {
let term_dictionary_data: Vec<u8> = TermDictionaryBuilder::create(Vec::<u8>::new())
.expect("Creating a TermDictionaryBuilder in a Vec<u8> should never fail")
.finish()
.expect("Writing in a Vec<u8> should never fail");
ReadOnlySource::from(term_dictionary_data)
});
/// The term dictionary contains all of the terms in /// The term dictionary contains all of the terms in
/// `tantivy index` in a sorted manner. /// `tantivy index` in a sorted manner.
/// ///
@@ -130,8 +122,14 @@ impl TermDictionary {
} }
/// Creates an empty term dictionary which contains no terms. /// Creates an empty term dictionary which contains no terms.
pub fn empty() -> Self { pub fn empty(field_type: &FieldType) -> Self {
TermDictionary::from_source(&*EMPTY_DATA_SOURCE) let term_dictionary_data: Vec<u8> =
TermDictionaryBuilder::create(Vec::<u8>::new(), &field_type)
.expect("Creating a TermDictionaryBuilder in a Vec<u8> should never fail")
.finish()
.expect("Writing in a Vec<u8> should never fail");
let source = ReadOnlySource::from(term_dictionary_data);
Self::from_source(&source)
} }
/// Returns the number of terms in the dictionary. /// Returns the number of terms in the dictionary.

View File

@@ -2,7 +2,7 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! let tokenizer = TextAnalyzer::from(RawTokenizer) //! let tokenizer = RawTokenizer
//! .filter(AlphaNumOnlyFilter); //! .filter(AlphaNumOnlyFilter);
//! //!
//! let mut stream = tokenizer.token_stream("hello there"); //! let mut stream = tokenizer.token_stream("hello there");
@@ -10,7 +10,7 @@
//! // contains a space //! // contains a space
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! //!
//! let tokenizer = TextAnalyzer::from(SimpleTokenizer) //! let tokenizer = SimpleTokenizer
//! .filter(AlphaNumOnlyFilter); //! .filter(AlphaNumOnlyFilter);
//! //!
//! let mut stream = tokenizer.token_stream("hello there 💣"); //! let mut stream = tokenizer.token_stream("hello there 💣");
@@ -19,30 +19,56 @@
//! // the "emoji" is dropped because its not an alphanum //! // the "emoji" is dropped because its not an alphanum
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! ``` //! ```
use super::{BoxTokenStream, Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
/// `TokenFilter` that removes all tokens that contain non /// `TokenFilter` that removes all tokens that contain non
/// ascii alphanumeric characters. /// ascii alphanumeric characters.
#[derive(Clone)] #[derive(Clone)]
pub struct AlphaNumOnlyFilter; pub struct AlphaNumOnlyFilter;
pub struct AlphaNumOnlyFilterStream<'a> { pub struct AlphaNumOnlyFilterStream<TailTokenStream>
tail: BoxTokenStream<'a>, where
TailTokenStream: TokenStream,
{
tail: TailTokenStream,
} }
impl<'a> AlphaNumOnlyFilterStream<'a> { impl<TailTokenStream> AlphaNumOnlyFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn predicate(&self, token: &Token) -> bool { fn predicate(&self, token: &Token) -> bool {
token.text.chars().all(|c| c.is_ascii_alphanumeric()) token.text.chars().all(|c| c.is_ascii_alphanumeric())
} }
}
impl TokenFilter for AlphaNumOnlyFilter { fn wrap(tail: TailTokenStream) -> AlphaNumOnlyFilterStream<TailTokenStream> {
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> { AlphaNumOnlyFilterStream { tail }
BoxTokenStream::from(AlphaNumOnlyFilterStream { tail: token_stream })
} }
} }
impl<'a> TokenStream for AlphaNumOnlyFilterStream<'a> { impl<TailTokenStream> TokenFilter<TailTokenStream> for AlphaNumOnlyFilter
where
TailTokenStream: TokenStream,
{
type ResultTokenStream = AlphaNumOnlyFilterStream<TailTokenStream>;
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
AlphaNumOnlyFilterStream::wrap(token_stream)
}
}
impl<TailTokenStream> TokenStream for AlphaNumOnlyFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
while self.tail.advance() { while self.tail.advance() {
if self.predicate(self.tail.token()) { if self.predicate(self.tail.token()) {
@@ -52,12 +78,4 @@ impl<'a> TokenStream for AlphaNumOnlyFilterStream<'a> {
false false
} }
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
} }

View File

@@ -1,4 +1,4 @@
use super::{BoxTokenStream, Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use std::mem; use std::mem;
/// This class converts alphabetic, numeric, and symbolic Unicode characters /// This class converts alphabetic, numeric, and symbolic Unicode characters
@@ -7,21 +7,26 @@ use std::mem;
#[derive(Clone)] #[derive(Clone)]
pub struct AsciiFoldingFilter; pub struct AsciiFoldingFilter;
impl TokenFilter for AsciiFoldingFilter { impl<TailTokenStream> TokenFilter<TailTokenStream> for AsciiFoldingFilter
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> { where
From::from(AsciiFoldingFilterTokenStream { TailTokenStream: TokenStream,
tail: token_stream, {
buffer: String::with_capacity(100), type ResultTokenStream = AsciiFoldingFilterTokenStream<TailTokenStream>;
})
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
AsciiFoldingFilterTokenStream::wrap(token_stream)
} }
} }
pub struct AsciiFoldingFilterTokenStream<'a> { pub struct AsciiFoldingFilterTokenStream<TailTokenStream> {
buffer: String, buffer: String,
tail: BoxTokenStream<'a>, tail: TailTokenStream,
} }
impl<'a> TokenStream for AsciiFoldingFilterTokenStream<'a> { impl<TailTokenStream> TokenStream for AsciiFoldingFilterTokenStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
if !self.tail.advance() { if !self.tail.advance() {
return false; return false;
@@ -43,6 +48,18 @@ impl<'a> TokenStream for AsciiFoldingFilterTokenStream<'a> {
} }
} }
impl<TailTokenStream> AsciiFoldingFilterTokenStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn wrap(tail: TailTokenStream) -> AsciiFoldingFilterTokenStream<TailTokenStream> {
AsciiFoldingFilterTokenStream {
tail,
buffer: String::with_capacity(100),
}
}
}
// Returns a string that represents the ascii folded version of // Returns a string that represents the ascii folded version of
// the character. If the `char` does not require ascii folding // the character. If the `char` does not require ascii folding
// (e.g. simple ASCII chars like `A`) or if the `char` // (e.g. simple ASCII chars like `A`) or if the `char`
@@ -1544,7 +1561,8 @@ mod tests {
use crate::tokenizer::AsciiFoldingFilter; use crate::tokenizer::AsciiFoldingFilter;
use crate::tokenizer::RawTokenizer; use crate::tokenizer::RawTokenizer;
use crate::tokenizer::SimpleTokenizer; use crate::tokenizer::SimpleTokenizer;
use crate::tokenizer::TextAnalyzer; use crate::tokenizer::TokenStream;
use crate::tokenizer::Tokenizer;
use std::iter; use std::iter;
#[test] #[test]
@@ -1561,7 +1579,7 @@ mod tests {
fn folding_helper(text: &str) -> Vec<String> { fn folding_helper(text: &str) -> Vec<String> {
let mut tokens = Vec::new(); let mut tokens = Vec::new();
TextAnalyzer::from(SimpleTokenizer) SimpleTokenizer
.filter(AsciiFoldingFilter) .filter(AsciiFoldingFilter)
.token_stream(text) .token_stream(text)
.process(&mut |token| { .process(&mut |token| {
@@ -1571,9 +1589,7 @@ mod tests {
} }
fn folding_using_raw_tokenizer_helper(text: &str) -> String { fn folding_using_raw_tokenizer_helper(text: &str) -> String {
let mut token_stream = TextAnalyzer::from(RawTokenizer) let mut token_stream = RawTokenizer.filter(AsciiFoldingFilter).token_stream(text);
.filter(AsciiFoldingFilter)
.token_stream(text);
token_stream.advance(); token_stream.advance();
token_stream.token().text.clone() token_stream.token().text.clone()
} }

View File

@@ -1,4 +1,4 @@
use super::{BoxTokenStream, Token, TokenStream, Tokenizer}; use super::{Token, TokenStream, Tokenizer};
use crate::schema::FACET_SEP_BYTE; use crate::schema::FACET_SEP_BYTE;
/// The `FacetTokenizer` process a `Facet` binary representation /// The `FacetTokenizer` process a `Facet` binary representation
@@ -25,14 +25,15 @@ pub struct FacetTokenStream<'a> {
token: Token, token: Token,
} }
impl Tokenizer for FacetTokenizer { impl<'a> Tokenizer<'a> for FacetTokenizer {
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> { type TokenStreamImpl = FacetTokenStream<'a>;
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
FacetTokenStream { FacetTokenStream {
text, text,
state: State::RootFacetNotEmitted, //< pos is the first char that has not been processed yet. state: State::RootFacetNotEmitted, //< pos is the first char that has not been processed yet.
token: Token::default(), token: Token::default(),
} }
.into()
} }
} }
@@ -83,7 +84,7 @@ mod tests {
use super::FacetTokenizer; use super::FacetTokenizer;
use crate::schema::Facet; use crate::schema::Facet;
use crate::tokenizer::{Token, Tokenizer}; use crate::tokenizer::{Token, TokenStream, Tokenizer};
#[test] #[test]
fn test_facet_tokenizer() { fn test_facet_tokenizer() {

View File

@@ -1,23 +1,24 @@
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use crate::tokenizer::BoxTokenStream;
use std::mem; use std::mem;
impl TokenFilter for LowerCaser {
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
BoxTokenStream::from(LowerCaserTokenStream {
tail: token_stream,
buffer: String::with_capacity(100),
})
}
}
/// Token filter that lowercase terms. /// Token filter that lowercase terms.
#[derive(Clone)] #[derive(Clone)]
pub struct LowerCaser; pub struct LowerCaser;
pub struct LowerCaserTokenStream<'a> { impl<TailTokenStream> TokenFilter<TailTokenStream> for LowerCaser
where
TailTokenStream: TokenStream,
{
type ResultTokenStream = LowerCaserTokenStream<TailTokenStream>;
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
LowerCaserTokenStream::wrap(token_stream)
}
}
pub struct LowerCaserTokenStream<TailTokenStream> {
buffer: String, buffer: String,
tail: BoxTokenStream<'a>, tail: TailTokenStream,
} }
// writes a lowercased version of text into output. // writes a lowercased version of text into output.
@@ -30,7 +31,18 @@ fn to_lowercase_unicode(text: &mut String, output: &mut String) {
} }
} }
impl<'a> TokenStream for LowerCaserTokenStream<'a> { impl<TailTokenStream> TokenStream for LowerCaserTokenStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
if !self.tail.advance() { if !self.tail.advance() {
return false; return false;
@@ -44,19 +56,26 @@ impl<'a> TokenStream for LowerCaserTokenStream<'a> {
} }
true true
} }
}
fn token(&self) -> &Token { impl<TailTokenStream> LowerCaserTokenStream<TailTokenStream>
self.tail.token() where
} TailTokenStream: TokenStream,
{
fn token_mut(&mut self) -> &mut Token { fn wrap(tail: TailTokenStream) -> LowerCaserTokenStream<TailTokenStream> {
self.tail.token_mut() LowerCaserTokenStream {
tail,
buffer: String::with_capacity(100),
}
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::tokenizer::{LowerCaser, SimpleTokenizer, TextAnalyzer}; use crate::tokenizer::LowerCaser;
use crate::tokenizer::SimpleTokenizer;
use crate::tokenizer::TokenStream;
use crate::tokenizer::Tokenizer;
#[test] #[test]
fn test_to_lower_case() { fn test_to_lower_case() {
@@ -68,9 +87,7 @@ mod tests {
fn lowercase_helper(text: &str) -> Vec<String> { fn lowercase_helper(text: &str) -> Vec<String> {
let mut tokens = vec![]; let mut tokens = vec![];
let mut token_stream = TextAnalyzer::from(SimpleTokenizer) let mut token_stream = SimpleTokenizer.filter(LowerCaser).token_stream(text);
.filter(LowerCaser)
.token_stream(text);
while token_stream.advance() { while token_stream.advance() {
let token_text = token_stream.token().text.clone(); let token_text = token_stream.token().text.clone();
tokens.push(token_text); tokens.push(token_text);

View File

@@ -64,7 +64,7 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! let en_stem = TextAnalyzer::from(SimpleTokenizer) //! let en_stem = SimpleTokenizer
//! .filter(RemoveLongFilter::limit(40)) //! .filter(RemoveLongFilter::limit(40))
//! .filter(LowerCaser) //! .filter(LowerCaser)
//! .filter(Stemmer::new(Language::English)); //! .filter(Stemmer::new(Language::English));
@@ -109,7 +109,7 @@
//! let index = Index::create_in_ram(schema); //! let index = Index::create_in_ram(schema);
//! //!
//! // We need to register our tokenizer : //! // We need to register our tokenizer :
//! let custom_en_tokenizer = TextAnalyzer::from(SimpleTokenizer) //! let custom_en_tokenizer = SimpleTokenizer
//! .filter(RemoveLongFilter::limit(40)) //! .filter(RemoveLongFilter::limit(40))
//! .filter(LowerCaser); //! .filter(LowerCaser);
//! index //! index
@@ -143,11 +143,10 @@ pub use self::simple_tokenizer::SimpleTokenizer;
pub use self::stemmer::{Language, Stemmer}; pub use self::stemmer::{Language, Stemmer};
pub use self::stop_word_filter::StopWordFilter; pub use self::stop_word_filter::StopWordFilter;
pub(crate) use self::token_stream_chain::TokenStreamChain; pub(crate) use self::token_stream_chain::TokenStreamChain;
pub use self::tokenizer::BoxedTokenizer;
pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString}; pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString};
pub use self::tokenizer::{ pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
BoxTokenFilter, BoxTokenStream, TextAnalyzer, Token, TokenFilter, TokenStream, Tokenizer,
};
pub use self::tokenizer_manager::TokenizerManager; pub use self::tokenizer_manager::TokenizerManager;
@@ -161,9 +160,9 @@ pub const MAX_TOKEN_LEN: usize = u16::max_value() as usize - 4;
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::{ use super::{
Language, LowerCaser, RemoveLongFilter, SimpleTokenizer, Stemmer, Token, TokenizerManager, Language, LowerCaser, RemoveLongFilter, SimpleTokenizer, Stemmer, Token, Tokenizer,
TokenizerManager,
}; };
use crate::tokenizer::TextAnalyzer;
/// This is a function that can be used in tests and doc tests /// This is a function that can be used in tests and doc tests
/// to assert a token's correctness. /// to assert a token's correctness.
@@ -230,7 +229,7 @@ pub mod tests {
let tokenizer_manager = TokenizerManager::default(); let tokenizer_manager = TokenizerManager::default();
tokenizer_manager.register( tokenizer_manager.register(
"el_stem", "el_stem",
TextAnalyzer::from(SimpleTokenizer) SimpleTokenizer
.filter(RemoveLongFilter::limit(40)) .filter(RemoveLongFilter::limit(40))
.filter(LowerCaser) .filter(LowerCaser)
.filter(Stemmer::new(Language::Greek)), .filter(Stemmer::new(Language::Greek)),

View File

@@ -1,5 +1,4 @@
use super::{Token, TokenStream, Tokenizer}; use super::{Token, TokenStream, Tokenizer};
use crate::tokenizer::BoxTokenStream;
/// Tokenize the text by splitting words into n-grams of the given size(s) /// Tokenize the text by splitting words into n-grams of the given size(s)
/// ///
@@ -130,9 +129,11 @@ pub struct NgramTokenStream<'a> {
token: Token, token: Token,
} }
impl Tokenizer for NgramTokenizer { impl<'a> Tokenizer<'a> for NgramTokenizer {
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> { type TokenStreamImpl = NgramTokenStream<'a>;
From::from(NgramTokenStream {
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
NgramTokenStream {
ngram_charidx_iterator: StutteringIterator::new( ngram_charidx_iterator: StutteringIterator::new(
CodepointFrontiers::for_str(text), CodepointFrontiers::for_str(text),
self.min_gram, self.min_gram,
@@ -141,7 +142,7 @@ impl Tokenizer for NgramTokenizer {
prefix_only: self.prefix_only, prefix_only: self.prefix_only,
text, text,
token: Token::default(), token: Token::default(),
}) }
} }
} }
@@ -307,10 +308,10 @@ mod tests {
use super::NgramTokenizer; use super::NgramTokenizer;
use super::StutteringIterator; use super::StutteringIterator;
use crate::tokenizer::tests::assert_token; use crate::tokenizer::tests::assert_token;
use crate::tokenizer::tokenizer::Tokenizer; use crate::tokenizer::tokenizer::{TokenStream, Tokenizer};
use crate::tokenizer::{BoxTokenStream, Token}; use crate::tokenizer::Token;
fn test_helper(mut tokenizer: BoxTokenStream) -> Vec<Token> { fn test_helper<T: TokenStream>(mut tokenizer: T) -> Vec<Token> {
let mut tokens: Vec<Token> = vec![]; let mut tokens: Vec<Token> = vec![];
tokenizer.process(&mut |token: &Token| tokens.push(token.clone())); tokenizer.process(&mut |token: &Token| tokens.push(token.clone()));
tokens tokens

View File

@@ -1,5 +1,4 @@
use super::{Token, TokenStream, Tokenizer}; use super::{Token, TokenStream, Tokenizer};
use crate::tokenizer::BoxTokenStream;
/// For each value of the field, emit a single unprocessed token. /// For each value of the field, emit a single unprocessed token.
#[derive(Clone)] #[derive(Clone)]
@@ -10,8 +9,10 @@ pub struct RawTokenStream {
has_token: bool, has_token: bool,
} }
impl Tokenizer for RawTokenizer { impl<'a> Tokenizer<'a> for RawTokenizer {
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> { type TokenStreamImpl = RawTokenStream;
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
let token = Token { let token = Token {
offset_from: 0, offset_from: 0,
offset_to: text.len(), offset_to: text.len(),
@@ -23,7 +24,6 @@ impl Tokenizer for RawTokenizer {
token, token,
has_token: true, has_token: true,
} }
.into()
} }
} }

View File

@@ -2,7 +2,7 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! let tokenizer = TextAnalyzer::from(SimpleTokenizer) //! let tokenizer = SimpleTokenizer
//! .filter(RemoveLongFilter::limit(5)); //! .filter(RemoveLongFilter::limit(5));
//! //!
//! let mut stream = tokenizer.token_stream("toolong nice"); //! let mut stream = tokenizer.token_stream("toolong nice");
@@ -13,7 +13,6 @@
//! ``` //! ```
//! //!
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use crate::tokenizer::BoxTokenStream;
/// `RemoveLongFilter` removes tokens that are longer /// `RemoveLongFilter` removes tokens that are longer
/// than a given number of bytes (in UTF-8 representation). /// than a given number of bytes (in UTF-8 representation).
@@ -32,27 +31,56 @@ impl RemoveLongFilter {
} }
} }
impl<'a> RemoveLongFilterStream<'a> { impl<TailTokenStream> RemoveLongFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn predicate(&self, token: &Token) -> bool { fn predicate(&self, token: &Token) -> bool {
token.text.len() < self.token_length_limit token.text.len() < self.token_length_limit
} }
}
impl TokenFilter for RemoveLongFilter { fn wrap(
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> { token_length_limit: usize,
BoxTokenStream::from(RemoveLongFilterStream { tail: TailTokenStream,
token_length_limit: self.length_limit, ) -> RemoveLongFilterStream<TailTokenStream> {
tail: token_stream, RemoveLongFilterStream {
}) token_length_limit,
tail,
}
} }
} }
pub struct RemoveLongFilterStream<'a> { impl<TailTokenStream> TokenFilter<TailTokenStream> for RemoveLongFilter
token_length_limit: usize, where
tail: BoxTokenStream<'a>, TailTokenStream: TokenStream,
{
type ResultTokenStream = RemoveLongFilterStream<TailTokenStream>;
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
RemoveLongFilterStream::wrap(self.length_limit, token_stream)
}
} }
impl<'a> TokenStream for RemoveLongFilterStream<'a> { pub struct RemoveLongFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
token_length_limit: usize,
tail: TailTokenStream,
}
impl<TailTokenStream> TokenStream for RemoveLongFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
while self.tail.advance() { while self.tail.advance() {
if self.predicate(self.tail.token()) { if self.predicate(self.tail.token()) {
@@ -61,12 +89,4 @@ impl<'a> TokenStream for RemoveLongFilterStream<'a> {
} }
false false
} }
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
} }

View File

@@ -1,4 +1,3 @@
use super::BoxTokenStream;
use super::{Token, TokenStream, Tokenizer}; use super::{Token, TokenStream, Tokenizer};
use std::str::CharIndices; use std::str::CharIndices;
@@ -12,13 +11,15 @@ pub struct SimpleTokenStream<'a> {
token: Token, token: Token,
} }
impl Tokenizer for SimpleTokenizer { impl<'a> Tokenizer<'a> for SimpleTokenizer {
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> { type TokenStreamImpl = SimpleTokenStream<'a>;
BoxTokenStream::from(SimpleTokenStream {
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
SimpleTokenStream {
text, text,
chars: text.char_indices(), chars: text.char_indices(),
token: Token::default(), token: Token::default(),
}) }
} }
} }

View File

@@ -1,5 +1,4 @@
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use crate::tokenizer::BoxTokenStream;
use rust_stemmers::{self, Algorithm}; use rust_stemmers::{self, Algorithm};
/// Available stemmer languages. /// Available stemmer languages.
@@ -76,22 +75,38 @@ impl Default for Stemmer {
} }
} }
impl TokenFilter for Stemmer { impl<TailTokenStream> TokenFilter<TailTokenStream> for Stemmer
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> { where
TailTokenStream: TokenStream,
{
type ResultTokenStream = StemmerTokenStream<TailTokenStream>;
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
let inner_stemmer = rust_stemmers::Stemmer::create(self.stemmer_algorithm); let inner_stemmer = rust_stemmers::Stemmer::create(self.stemmer_algorithm);
BoxTokenStream::from(StemmerTokenStream { StemmerTokenStream::wrap(inner_stemmer, token_stream)
tail: token_stream,
stemmer: inner_stemmer,
})
} }
} }
pub struct StemmerTokenStream<'a> { pub struct StemmerTokenStream<TailTokenStream>
tail: BoxTokenStream<'a>, where
TailTokenStream: TokenStream,
{
tail: TailTokenStream,
stemmer: rust_stemmers::Stemmer, stemmer: rust_stemmers::Stemmer,
} }
impl<'a> TokenStream for StemmerTokenStream<'a> { impl<TailTokenStream> TokenStream for StemmerTokenStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
if !self.tail.advance() { if !self.tail.advance() {
return false; return false;
@@ -102,12 +117,16 @@ impl<'a> TokenStream for StemmerTokenStream<'a> {
self.token_mut().text.push_str(&stemmed_str); self.token_mut().text.push_str(&stemmed_str);
true true
} }
}
fn token(&self) -> &Token { impl<TailTokenStream> StemmerTokenStream<TailTokenStream>
self.tail.token() where
} TailTokenStream: TokenStream,
{
fn token_mut(&mut self) -> &mut Token { fn wrap(
self.tail.token_mut() stemmer: rust_stemmers::Stemmer,
tail: TailTokenStream,
) -> StemmerTokenStream<TailTokenStream> {
StemmerTokenStream { tail, stemmer }
} }
} }

View File

@@ -2,7 +2,7 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! let tokenizer = TextAnalyzer::from(SimpleTokenizer) //! let tokenizer = SimpleTokenizer
//! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()])); //! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()]));
//! //!
//! let mut stream = tokenizer.token_stream("the fox is crafty"); //! let mut stream = tokenizer.token_stream("the fox is crafty");
@@ -11,7 +11,6 @@
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! ``` //! ```
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use crate::tokenizer::BoxTokenStream;
use fnv::FnvHasher; use fnv::FnvHasher;
use std::collections::HashSet; use std::collections::HashSet;
use std::hash::BuildHasherDefault; use std::hash::BuildHasherDefault;
@@ -49,27 +48,53 @@ impl StopWordFilter {
} }
} }
pub struct StopWordFilterStream<'a> { pub struct StopWordFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
words: StopWordHashSet, words: StopWordHashSet,
tail: BoxTokenStream<'a>, tail: TailTokenStream,
} }
impl TokenFilter for StopWordFilter { impl<TailTokenStream> TokenFilter<TailTokenStream> for StopWordFilter
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> { where
BoxTokenStream::from(StopWordFilterStream { TailTokenStream: TokenStream,
words: self.words.clone(), {
tail: token_stream, type ResultTokenStream = StopWordFilterStream<TailTokenStream>;
})
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
StopWordFilterStream::wrap(self.words.clone(), token_stream)
} }
} }
impl<'a> StopWordFilterStream<'a> { impl<TailTokenStream> StopWordFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn predicate(&self, token: &Token) -> bool { fn predicate(&self, token: &Token) -> bool {
!self.words.contains(&token.text) !self.words.contains(&token.text)
} }
fn wrap(
words: StopWordHashSet,
tail: TailTokenStream,
) -> StopWordFilterStream<TailTokenStream> {
StopWordFilterStream { words, tail }
}
} }
impl<'a> TokenStream for StopWordFilterStream<'a> { impl<TailTokenStream> TokenStream for StopWordFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
while self.tail.advance() { while self.tail.advance() {
if self.predicate(self.tail.token()) { if self.predicate(self.tail.token()) {
@@ -78,14 +103,6 @@ impl<'a> TokenStream for StopWordFilterStream<'a> {
} }
false false
} }
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
} }
impl Default for StopWordFilter { impl Default for StopWordFilter {

View File

@@ -1,21 +1,23 @@
use crate::tokenizer::{BoxTokenStream, Token, TokenStream}; use crate::tokenizer::{Token, TokenStream};
use std::ops::DerefMut;
const POSITION_GAP: usize = 2; const POSITION_GAP: usize = 2;
pub(crate) struct TokenStreamChain<'a> { pub(crate) struct TokenStreamChain<TTokenStream: TokenStream> {
offsets: Vec<usize>, offsets: Vec<usize>,
token_streams: Vec<BoxTokenStream<'a>>, token_streams: Vec<TTokenStream>,
position_shift: usize, position_shift: usize,
stream_idx: usize, stream_idx: usize,
token: Token, token: Token,
} }
impl<'a> TokenStreamChain<'a> { impl<'a, TTokenStream> TokenStreamChain<TTokenStream>
where
TTokenStream: TokenStream,
{
pub fn new( pub fn new(
offsets: Vec<usize>, offsets: Vec<usize>,
token_streams: Vec<BoxTokenStream<'a>>, token_streams: Vec<TTokenStream>,
) -> TokenStreamChain<'a> { ) -> TokenStreamChain<TTokenStream> {
TokenStreamChain { TokenStreamChain {
offsets, offsets,
stream_idx: 0, stream_idx: 0,
@@ -26,10 +28,13 @@ impl<'a> TokenStreamChain<'a> {
} }
} }
impl<'a> TokenStream for TokenStreamChain<'a> { impl<'a, TTokenStream> TokenStream for TokenStreamChain<TTokenStream>
where
TTokenStream: TokenStream,
{
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
while self.stream_idx < self.token_streams.len() { while self.stream_idx < self.token_streams.len() {
let token_stream = self.token_streams[self.stream_idx].deref_mut(); let token_stream = &mut self.token_streams[self.stream_idx];
if token_stream.advance() { if token_stream.advance() {
let token = token_stream.token(); let token = token_stream.token();
let offset_offset = self.offsets[self.stream_idx]; let offset_offset = self.offsets[self.stream_idx];

View File

@@ -1,4 +1,4 @@
use crate::tokenizer::{BoxTokenStream, Token, TokenStream, TokenStreamChain}; use crate::tokenizer::{Token, TokenStream, TokenStreamChain};
use std::cmp::Ordering; use std::cmp::Ordering;
/// Struct representing pre-tokenized text /// Struct representing pre-tokenized text
@@ -41,9 +41,9 @@ impl PreTokenizedStream {
/// Creates a TokenStream from PreTokenizedString array /// Creates a TokenStream from PreTokenizedString array
pub fn chain_tokenized_strings<'a>( pub fn chain_tokenized_strings<'a>(
tok_strings: &'a [&'a PreTokenizedString], tok_strings: &'a [&'a PreTokenizedString],
) -> BoxTokenStream { ) -> Box<dyn TokenStream + 'a> {
if tok_strings.len() == 1 { if tok_strings.len() == 1 {
PreTokenizedStream::from((*tok_strings[0]).clone()).into() Box::new(PreTokenizedStream::from((*tok_strings[0]).clone()))
} else { } else {
let mut offsets = vec![]; let mut offsets = vec![];
let mut total_offset = 0; let mut total_offset = 0;
@@ -53,12 +53,11 @@ impl PreTokenizedStream {
total_offset += last_token.offset_to; total_offset += last_token.offset_to;
} }
} }
// TODO remove the string cloning. let token_streams: Vec<_> = tok_strings
let token_streams: Vec<BoxTokenStream<'static>> = tok_strings
.iter() .iter()
.map(|&tok_string| PreTokenizedStream::from((*tok_string).clone()).into()) .map(|tok_string| PreTokenizedStream::from((*tok_string).clone()))
.collect(); .collect();
TokenStreamChain::new(offsets, token_streams).into() Box::new(TokenStreamChain::new(offsets, token_streams))
} }
} }
} }

View File

@@ -2,7 +2,6 @@ use crate::tokenizer::TokenStreamChain;
/// The tokenizer module contains all of the tools used to process /// The tokenizer module contains all of the tools used to process
/// text in `tantivy`. /// text in `tantivy`.
use std::borrow::{Borrow, BorrowMut}; use std::borrow::{Borrow, BorrowMut};
use std::ops::{Deref, DerefMut};
/// Token /// Token
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
@@ -34,31 +33,20 @@ impl Default for Token {
} }
} }
/// `TextAnalyzer` tokenizes an input text into tokens and modifies the resulting `TokenStream`. /// `Tokenizer` are in charge of splitting text into a stream of token
/// before indexing.
/// ///
/// It simply wraps a `Tokenizer` and a list of `TokenFilter` that are applied sequentially. /// See the [module documentation](./index.html) for more detail.
pub struct TextAnalyzer { ///
tokenizer: Box<dyn Tokenizer>, /// # Warning
token_filters: Vec<BoxTokenFilter>, ///
} /// This API may change to use associated types.
pub trait Tokenizer<'a>: Sized + Clone {
/// Type associated to the resulting tokenstream tokenstream.
type TokenStreamImpl: TokenStream;
impl<T: Tokenizer> From<T> for TextAnalyzer { /// Creates a token stream for a given `str`.
fn from(tokenizer: T) -> Self { fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl;
TextAnalyzer::new(tokenizer, Vec::new())
}
}
impl TextAnalyzer {
/// Creates a new `TextAnalyzer` given a tokenizer and a vector of `BoxTokenFilter`.
///
/// When creating a `TextAnalyzer` from a `Tokenizer` alone, prefer using
/// `TextAnalyzer::from(tokenizer)`.
pub fn new<T: Tokenizer>(tokenizer: T, token_filters: Vec<BoxTokenFilter>) -> TextAnalyzer {
TextAnalyzer {
tokenizer: Box::new(tokenizer),
token_filters,
}
}
/// Appends a token filter to the current tokenizer. /// Appends a token filter to the current tokenizer.
/// ///
@@ -70,26 +58,90 @@ impl TextAnalyzer {
/// ```rust /// ```rust
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// let en_stem = TextAnalyzer::from(SimpleTokenizer) /// let en_stem = SimpleTokenizer
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser) /// .filter(LowerCaser)
/// .filter(Stemmer::default()); /// .filter(Stemmer::default());
/// ``` /// ```
/// ///
pub fn filter<F: Into<BoxTokenFilter>>(mut self, token_filter: F) -> Self { fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self>
self.token_filters.push(token_filter.into()); where
self NewFilter: TokenFilter<<Self as Tokenizer<'a>>::TokenStreamImpl>,
{
ChainTokenizer {
head: new_filter,
tail: self,
}
}
}
/// A boxed tokenizer
trait BoxedTokenizerTrait: Send + Sync {
/// Tokenize a `&str`
fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a>;
/// Tokenize an array`&str`
///
/// The resulting `TokenStream` is equivalent to what would be obtained if the &str were
/// one concatenated `&str`, with an artificial position gap of `2` between the different fields
/// to prevent accidental `PhraseQuery` to match accross two terms.
fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b>;
/// Return a boxed clone of the tokenizer
fn boxed_clone(&self) -> BoxedTokenizer;
}
/// A boxed tokenizer
pub struct BoxedTokenizer(Box<dyn BoxedTokenizerTrait>);
impl<T> From<T> for BoxedTokenizer
where
T: 'static + Send + Sync + for<'a> Tokenizer<'a>,
{
fn from(tokenizer: T) -> BoxedTokenizer {
BoxedTokenizer(Box::new(BoxableTokenizer(tokenizer)))
}
}
impl BoxedTokenizer {
/// Tokenize a `&str`
pub fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a> {
self.0.token_stream(text)
} }
/// Tokenize an array`&str` /// Tokenize an array`&str`
/// ///
/// The resulting `BoxTokenStream` is equivalent to what would be obtained if the &str were /// The resulting `TokenStream` is equivalent to what would be obtained if the &str were
/// one concatenated `&str`, with an artificial position gap of `2` between the different fields /// one concatenated `&str`, with an artificial position gap of `2` between the different fields
/// to prevent accidental `PhraseQuery` to match accross two terms. /// to prevent accidental `PhraseQuery` to match accross two terms.
pub fn token_stream_texts<'a>(&self, texts: &'a [&'a str]) -> BoxTokenStream<'a> { pub fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b> {
self.0.token_stream_texts(texts)
}
}
impl Clone for BoxedTokenizer {
fn clone(&self) -> BoxedTokenizer {
self.0.boxed_clone()
}
}
#[derive(Clone)]
struct BoxableTokenizer<A>(A)
where
A: for<'a> Tokenizer<'a> + Send + Sync;
impl<A> BoxedTokenizerTrait for BoxableTokenizer<A>
where
A: 'static + Send + Sync + for<'a> Tokenizer<'a>,
{
fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a> {
Box::new(self.0.token_stream(text))
}
fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b> {
assert!(!texts.is_empty()); assert!(!texts.is_empty());
if texts.len() == 1 { if texts.len() == 1 {
self.token_stream(texts[0]) Box::new(self.0.token_stream(texts[0]))
} else { } else {
let mut offsets = vec![]; let mut offsets = vec![];
let mut total_offset = 0; let mut total_offset = 0;
@@ -97,124 +149,34 @@ impl TextAnalyzer {
offsets.push(total_offset); offsets.push(total_offset);
total_offset += text.len(); total_offset += text.len();
} }
let token_streams: Vec<BoxTokenStream<'a>> = texts let token_streams: Vec<_> =
.iter() texts.iter().map(|text| self.0.token_stream(text)).collect();
.cloned() Box::new(TokenStreamChain::new(offsets, token_streams))
.map(|text| self.token_stream(text))
.collect();
From::from(TokenStreamChain::new(offsets, token_streams))
} }
} }
/// Creates a token stream for a given `str`. fn boxed_clone(&self) -> BoxedTokenizer {
pub fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> { self.0.clone().into()
let mut token_stream = self.tokenizer.token_stream(text);
for token_filter in &self.token_filters {
token_stream = token_filter.transform(token_stream);
}
token_stream
} }
} }
impl Clone for TextAnalyzer { impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
fn clone(&self) -> Self {
TextAnalyzer {
tokenizer: self.tokenizer.box_clone(),
token_filters: self
.token_filters
.iter()
.map(|token_filter| token_filter.box_clone())
.collect(),
}
}
}
/// `Tokenizer` are in charge of splitting text into a stream of token
/// before indexing.
///
/// See the [module documentation](./index.html) for more detail.
///
/// # Warning
///
/// This API may change to use associated types.
pub trait Tokenizer: 'static + Send + Sync + TokenizerClone {
/// Creates a token stream for a given `str`.
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a>;
}
pub trait TokenizerClone {
fn box_clone(&self) -> Box<dyn Tokenizer>;
}
impl<T: Tokenizer + Clone> TokenizerClone for T {
fn box_clone(&self) -> Box<dyn Tokenizer> {
Box::new(self.clone())
}
}
impl<'a> TokenStream for Box<dyn TokenStream + 'a> {
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
let token_stream: &mut dyn TokenStream = self.borrow_mut(); let token_stream: &mut dyn TokenStream = self.borrow_mut();
token_stream.advance() token_stream.advance()
} }
fn token<'b>(&'b self) -> &'b Token { fn token(&self) -> &Token {
let token_stream: &'b (dyn TokenStream + 'a) = self.borrow(); let token_stream: &dyn TokenStream = self.borrow();
token_stream.token() token_stream.token()
} }
fn token_mut<'b>(&'b mut self) -> &'b mut Token { fn token_mut(&mut self) -> &mut Token {
let token_stream: &'b mut (dyn TokenStream + 'a) = self.borrow_mut(); let token_stream: &mut dyn TokenStream = self.borrow_mut();
token_stream.token_mut() token_stream.token_mut()
} }
} }
/// Simple wrapper of `Box<dyn TokenStream + 'a>`.
///
/// See `TokenStream` for more information.
pub struct BoxTokenStream<'a>(Box<dyn TokenStream + 'a>);
impl<'a, T> From<T> for BoxTokenStream<'a>
where
T: TokenStream + 'a,
{
fn from(token_stream: T) -> BoxTokenStream<'a> {
BoxTokenStream(Box::new(token_stream))
}
}
impl<'a> Deref for BoxTokenStream<'a> {
type Target = dyn TokenStream + 'a;
fn deref(&self) -> &Self::Target {
&*self.0
}
}
impl<'a> DerefMut for BoxTokenStream<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut *self.0
}
}
/// Simple wrapper of `Box<dyn TokenFilter + 'a>`.
///
/// See `TokenStream` for more information.
pub struct BoxTokenFilter(Box<dyn TokenFilter>);
impl Deref for BoxTokenFilter {
type Target = dyn TokenFilter;
fn deref(&self) -> &dyn TokenFilter {
&*self.0
}
}
impl<T: TokenFilter> From<T> for BoxTokenFilter {
fn from(tokenizer: T) -> BoxTokenFilter {
BoxTokenFilter(Box::new(tokenizer))
}
}
/// `TokenStream` is the result of the tokenization. /// `TokenStream` is the result of the tokenization.
/// ///
/// It consists consumable stream of `Token`s. /// It consists consumable stream of `Token`s.
@@ -224,7 +186,7 @@ impl<T: TokenFilter> From<T> for BoxTokenFilter {
/// ``` /// ```
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// let tokenizer = TextAnalyzer::from(SimpleTokenizer) /// let tokenizer = SimpleTokenizer
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser); /// .filter(LowerCaser);
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer"); /// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
@@ -263,7 +225,7 @@ pub trait TokenStream {
/// ``` /// ```
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// let tokenizer = TextAnalyzer::from(SimpleTokenizer) /// let tokenizer = SimpleTokenizer
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser); /// .filter(LowerCaser);
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer"); /// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
@@ -281,8 +243,6 @@ pub trait TokenStream {
/// Helper function to consume the entire `TokenStream` /// Helper function to consume the entire `TokenStream`
/// and push the tokens to a sink function. /// and push the tokens to a sink function.
///
/// Remove this.
fn process(&mut self, sink: &mut dyn FnMut(&Token)) -> u32 { fn process(&mut self, sink: &mut dyn FnMut(&Token)) -> u32 {
let mut num_tokens_pushed = 0u32; let mut num_tokens_pushed = 0u32;
while self.advance() { while self.advance() {
@@ -293,20 +253,33 @@ pub trait TokenStream {
} }
} }
pub trait TokenFilterClone { #[derive(Clone)]
fn box_clone(&self) -> BoxTokenFilter; pub struct ChainTokenizer<HeadTokenFilterFactory, TailTokenizer> {
head: HeadTokenFilterFactory,
tail: TailTokenizer,
}
impl<'a, HeadTokenFilterFactory, TailTokenizer> Tokenizer<'a>
for ChainTokenizer<HeadTokenFilterFactory, TailTokenizer>
where
HeadTokenFilterFactory: TokenFilter<TailTokenizer::TokenStreamImpl>,
TailTokenizer: Tokenizer<'a>,
{
type TokenStreamImpl = HeadTokenFilterFactory::ResultTokenStream;
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
let tail_token_stream = self.tail.token_stream(text);
self.head.transform(tail_token_stream)
}
} }
/// Trait for the pluggable components of `Tokenizer`s. /// Trait for the pluggable components of `Tokenizer`s.
pub trait TokenFilter: 'static + Send + Sync + TokenFilterClone { pub trait TokenFilter<TailTokenStream: TokenStream>: Clone {
/// Wraps a token stream and returns the modified one. /// The resulting `TokenStream` type.
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a>; type ResultTokenStream: TokenStream;
}
impl<T: TokenFilter + Clone> TokenFilterClone for T { /// Wraps a token stream and returns the modified one.
fn box_clone(&self) -> BoxTokenFilter { fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream;
BoxTokenFilter::from(self.clone())
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -1,10 +1,11 @@
use crate::tokenizer::stemmer::Language; use crate::tokenizer::stemmer::Language;
use crate::tokenizer::tokenizer::TextAnalyzer; use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::LowerCaser; use crate::tokenizer::LowerCaser;
use crate::tokenizer::RawTokenizer; use crate::tokenizer::RawTokenizer;
use crate::tokenizer::RemoveLongFilter; use crate::tokenizer::RemoveLongFilter;
use crate::tokenizer::SimpleTokenizer; use crate::tokenizer::SimpleTokenizer;
use crate::tokenizer::Stemmer; use crate::tokenizer::Stemmer;
use crate::tokenizer::Tokenizer;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
@@ -22,16 +23,16 @@ use std::sync::{Arc, RwLock};
/// search engine. /// search engine.
#[derive(Clone)] #[derive(Clone)]
pub struct TokenizerManager { pub struct TokenizerManager {
tokenizers: Arc<RwLock<HashMap<String, TextAnalyzer>>>, tokenizers: Arc<RwLock<HashMap<String, BoxedTokenizer>>>,
} }
impl TokenizerManager { impl TokenizerManager {
/// Registers a new tokenizer associated with a given name. /// Registers a new tokenizer associated with a given name.
pub fn register<T>(&self, tokenizer_name: &str, tokenizer: T) pub fn register<A>(&self, tokenizer_name: &str, tokenizer: A)
where where
TextAnalyzer: From<T>, A: Into<BoxedTokenizer>,
{ {
let boxed_tokenizer: TextAnalyzer = TextAnalyzer::from(tokenizer); let boxed_tokenizer = tokenizer.into();
self.tokenizers self.tokenizers
.write() .write()
.expect("Acquiring the lock should never fail") .expect("Acquiring the lock should never fail")
@@ -39,7 +40,7 @@ impl TokenizerManager {
} }
/// Accessing a tokenizer given its name. /// Accessing a tokenizer given its name.
pub fn get(&self, tokenizer_name: &str) -> Option<TextAnalyzer> { pub fn get(&self, tokenizer_name: &str) -> Option<BoxedTokenizer> {
self.tokenizers self.tokenizers
.read() .read()
.expect("Acquiring the lock should never fail") .expect("Acquiring the lock should never fail")
@@ -61,13 +62,13 @@ impl Default for TokenizerManager {
manager.register("raw", RawTokenizer); manager.register("raw", RawTokenizer);
manager.register( manager.register(
"default", "default",
TextAnalyzer::from(SimpleTokenizer) SimpleTokenizer
.filter(RemoveLongFilter::limit(40)) .filter(RemoveLongFilter::limit(40))
.filter(LowerCaser), .filter(LowerCaser),
); );
manager.register( manager.register(
"en_stem", "en_stem",
TextAnalyzer::from(SimpleTokenizer) SimpleTokenizer
.filter(RemoveLongFilter::limit(40)) .filter(RemoveLongFilter::limit(40))
.filter(LowerCaser) .filter(LowerCaser)
.filter(Stemmer::new(Language::English)), .filter(Stemmer::new(Language::English)),

View File

@@ -1,6 +1,8 @@
use fail; use fail;
use std::path::Path; use std::path::Path;
use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory, TerminatingWrite}; use tantivy::directory::{
Directory, ManagedDirectory, RAMDirectory, ReadOnlyDirectory, TerminatingWrite,
};
use tantivy::doc; use tantivy::doc;
use tantivy::schema::{Schema, TEXT}; use tantivy::schema::{Schema, TEXT};
use tantivy::{Index, Term}; use tantivy::{Index, Term};