Compare commits

..

17 Commits

Author SHA1 Message Date
Paul Masurel
548129fc6d Merge branch 'query-boost' of https://github.com/audunhalland/tantivy into audunhalland-query-boost 2020-01-31 15:51:59 +09:00
Alexander
55f5658d40 Make Executor public so Searcher::search_in_executor method now can be used (#769)
* Make Executor public so Searcher::search_in_executor method now can be used

* Fixed cargo fmt
2020-01-31 15:50:26 +09:00
Paul Masurel
3ae6363462 Updated CHANGELOG 2020-01-30 10:16:56 +09:00
Halvor Fladsrud Bø
9e20d7f8a5 Maximum size of segment to be considered for merge (#765)
* Replicated changes from dead PR

* Ran formatter.
2020-01-30 10:14:34 +09:00
Halvor Fladsrud Bø
ab13ffe377 Facet path string (#759)
* Added to_path_string

* Fixed logic. Found strange behavior with string comparisons.

* ran formatter

* Fixed test

* Fixed format

* Fixed comment
2020-01-30 10:11:29 +09:00
Paul Masurel
039138ed50 Added the empty dictionary item in the CHANGELOG 2020-01-30 10:10:34 +09:00
Paul Masurel
6227a0555a Added unit test for empty dictionaries. 2020-01-30 10:08:27 +09:00
Audun Halland
f85d0a522a Optimize TermDictionary::empty by precomputed data source (#767) 2020-01-30 10:04:58 +09:00
Halvor Fladsrud Bø
5795488ba7 Backward iteration for termdict range (#757)
* Added backwards iteration to termdict

* Ran formatter

* Updated fst dependency

* Updated dependency

* Changelog and version

* Fixed version

* Made it part of 12.0
2020-01-30 09:59:21 +09:00
Paul Masurel
c3045dfb5c Remove time dev-deps by relying on chrono::Duration reexport. 2020-01-29 23:25:03 +09:00
Paul Masurel
811fd0cb9e Dynamic analyzer (#755)
* Removed generics in tokenizers

* lowercaser

* Added TokenizerExt

* Introducing BoxedTokenizer

* Introducing BoxXXXXX helper struct

* Closes #762.

* Introducing a TextAnalyzer
2020-01-29 18:23:37 +09:00
Audun Halland
9f04f42b64 Add boost_by to FuzzyQuery, RegexQuery 2020-01-21 00:30:34 +01:00
Audun Halland
aeb8ae3ef0 Add TermQuery, PhraseQuery boost_by 2020-01-21 00:11:22 +01:00
dependabot-preview[bot]
f6847c46d7 Update tantivy-fst requirement from 0.1 to 0.2 (#750)
Updates the requirements on [tantivy-fst](https://github.com/tantivy-search/fst) to permit the latest version.
- [Release notes](https://github.com/tantivy-search/fst/releases)
- [Commits](https://github.com/tantivy-search/fst/compare/0.1.1...0.2.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-21 07:57:39 +09:00
Paul Masurel
92dac7af5c Return an error instead of panicking when sorting by a non fast field. (#748)
Closes #747
2020-01-08 13:41:02 +09:00
Paul Masurel
801905d77f Davide romanini arm atomic mutex (#746)
* Add atomic mutex implementation for ARM.

* Applied rustfmt.

* rustfmt

Co-authored-by: davide-romanini <davide.romanini@gmail.com>
2019-12-30 23:42:11 +09:00
Paul Horn
8f5ac86f30 Expose UserOperation as a public type. (#744)
In order to make `IndexWriter::run` callable from outside of the create,
the `UserOperation` type needs to be publicly available.
Since the `indexer` module is private, we just export the `UserOperation`
type directly.
2019-12-29 22:37:13 +09:00
58 changed files with 1071 additions and 1271 deletions

View File

@@ -1,8 +1,15 @@
Tantivy 0.12.0 Tantivy 0.12.0
========================== ======================
- By default IndexReader are in `Manual` mode. - Removing static dispatch in tokenizers for simplicity. (#762)
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
## How to update?
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
minor changes. Check https://github.com/tantivy-search/tantivy/blob/master/examples/custom_tokenizer.rs
to check for some code sample.
Tantivy 0.11.3 Tantivy 0.11.3
======================= =======================

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.11.3" version = "0.12.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -18,7 +18,7 @@ byteorder = "1.0"
crc32fast = "1.2.0" crc32fast = "1.2.0"
once_cell = "1.0" once_cell = "1.0"
regex ={version = "1.3.0", default-features = false, features = ["std"]} regex ={version = "1.3.0", default-features = false, features = ["std"]}
tantivy-fst = "0.1" tantivy-fst = "0.2.1"
memmap = {version = "0.7", optional=true} memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true} lz4 = {version="1.20", optional=true}
snap = {version="0.2"} snap = {version="0.2"}
@@ -60,7 +60,6 @@ winapi = "0.3"
rand = "0.7" rand = "0.7"
maplit = "1" maplit = "1"
matches = "0.1.8" matches = "0.1.8"
time = "0.1.42"
[dev-dependencies.fail] [dev-dependencies.fail]
version = "0.3" version = "0.3"

View File

@@ -9,7 +9,7 @@
// - import tokenized text straight from json, // - import tokenized text straight from json,
// - perform a search on documents with pre-tokenized text // - perform a search on documents with pre-tokenized text
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer}; use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer};
use tantivy::collector::{Count, TopDocs}; use tantivy::collector::{Count, TopDocs};
use tantivy::query::TermQuery; use tantivy::query::TermQuery;

View File

@@ -50,7 +50,7 @@ fn main() -> tantivy::Result<()> {
// This tokenizer lowers all of the text (to help with stop word matching) // This tokenizer lowers all of the text (to help with stop word matching)
// then removes all instances of `the` and `and` from the corpus // then removes all instances of `the` and `and` from the corpus
let tokenizer = SimpleTokenizer let tokenizer = TextAnalyzer::from(SimpleTokenizer)
.filter(LowerCaser) .filter(LowerCaser)
.filter(StopWordFilter::remove(vec![ .filter(StopWordFilter::remove(vec![
"the".to_string(), "the".to_string(),

View File

@@ -6,6 +6,7 @@ use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
use crate::collector::{ use crate::collector::{
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector, CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
}; };
use crate::fastfield::FastFieldReader;
use crate::schema::Field; use crate::schema::Field;
use crate::DocAddress; use crate::DocAddress;
use crate::DocId; use crate::DocId;
@@ -61,6 +62,34 @@ impl fmt::Debug for TopDocs {
} }
} }
struct ScorerByFastFieldReader {
ff_reader: FastFieldReader<u64>,
}
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
fn score(&self, doc: DocId) -> u64 {
self.ff_reader.get_u64(u64::from(doc))
}
}
struct ScorerByField {
field: Field,
}
impl CustomScorer<u64> for ScorerByField {
type Child = ScorerByFastFieldReader;
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
let ff_reader = segment_reader
.fast_fields()
.u64(self.field)
.ok_or_else(|| {
crate::Error::SchemaError(format!("Field requested is not a i64/u64 fast field."))
})?;
Ok(ScorerByFastFieldReader { ff_reader })
}
}
impl TopDocs { impl TopDocs {
/// Creates a top score collector, with a number of documents equal to "limit". /// Creates a top score collector, with a number of documents equal to "limit".
/// ///
@@ -143,14 +172,7 @@ impl TopDocs {
self, self,
field: Field, field: Field,
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> { ) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
self.custom_score(move |segment_reader: &SegmentReader| { self.custom_score(ScorerByField { field })
let ff_reader = segment_reader
.fast_fields()
.u64(field)
.expect("Field requested is not a i64/u64 fast field.");
//TODO error message missmatch actual behavior for i64
move |doc: DocId| ff_reader.get(doc)
})
} }
/// Ranks the documents using a custom score. /// Ranks the documents using a custom score.
@@ -572,7 +594,6 @@ mod tests {
} }
#[test] #[test]
#[should_panic(expected = "Field requested is not a i64/u64 fast field")]
fn test_field_not_fast_field() { fn test_field_not_fast_field() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT); let title = schema_builder.add_text_field(TITLE, TEXT);
@@ -587,7 +608,12 @@ mod tests {
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment = searcher.segment_reader(0); let segment = searcher.segment_reader(0);
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size); let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
assert!(top_collector.for_segment(0, segment).is_ok()); let err = top_collector.for_segment(0, segment);
if let Err(crate::Error::SchemaError(msg)) = err {
assert_eq!(msg, "Field requested is not a i64/u64 fast field.");
} else {
assert!(false);
}
} }
fn index( fn index(

View File

@@ -10,7 +10,9 @@ use rayon::{ThreadPool, ThreadPoolBuilder};
/// API of a dependency, knowing it might conflict with a different version /// API of a dependency, knowing it might conflict with a different version
/// used by the client. Second, we may stop using rayon in the future. /// used by the client. Second, we may stop using rayon in the future.
pub enum Executor { pub enum Executor {
/// Single thread variant of an Executor
SingleThread, SingleThread,
/// Thread pool variant of an Executor
ThreadPool(ThreadPool), ThreadPool(ThreadPool),
} }
@@ -20,7 +22,7 @@ impl Executor {
Executor::SingleThread Executor::SingleThread
} }
// Creates an Executor that dispatches the tasks in a thread pool. /// Creates an Executor that dispatches the tasks in a thread pool.
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Result<Executor> { pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Result<Executor> {
let pool = ThreadPoolBuilder::new() let pool = ThreadPoolBuilder::new()
.num_threads(num_threads) .num_threads(num_threads)
@@ -29,10 +31,10 @@ impl Executor {
Ok(Executor::ThreadPool(pool)) Ok(Executor::ThreadPool(pool))
} }
// Perform a map in the thread pool. /// Perform a map in the thread pool.
// ///
// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task /// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
// will propagate to the caller. /// will propagate to the caller.
pub fn map< pub fn map<
A: Send, A: Send,
R: Send, R: Send,

View File

@@ -1,3 +1,4 @@
use super::segment::create_segment;
use super::segment::Segment; use super::segment::Segment;
use crate::core::Executor; use crate::core::Executor;
use crate::core::IndexMeta; use crate::core::IndexMeta;
@@ -19,8 +20,7 @@ use crate::reader::IndexReaderBuilder;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::FieldType; use crate::schema::FieldType;
use crate::schema::Schema; use crate::schema::Schema;
use crate::tokenizer::BoxedTokenizer; use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::tokenizer::TokenizerManager;
use crate::IndexWriter; use crate::IndexWriter;
use crate::Result; use crate::Result;
use num_cpus; use num_cpus;
@@ -172,11 +172,11 @@ impl Index {
} }
/// Helper to access the tokenizer associated to a specific field. /// Helper to access the tokenizer associated to a specific field.
pub fn tokenizer_for_field(&self, field: Field) -> Result<BoxedTokenizer> { pub fn tokenizer_for_field(&self, field: Field) -> Result<TextAnalyzer> {
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
let tokenizer_manager: &TokenizerManager = self.tokenizers(); let tokenizer_manager: &TokenizerManager = self.tokenizers();
let tokenizer_name_opt: Option<BoxedTokenizer> = match field_type { let tokenizer_name_opt: Option<TextAnalyzer> = match field_type {
FieldType::Str(text_options) => text_options FieldType::Str(text_options) => text_options
.get_indexing_options() .get_indexing_options()
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string()) .map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
@@ -330,8 +330,9 @@ impl Index {
.collect()) .collect())
} }
pub(crate) fn segment(&self, segment_meta: SegmentMeta) -> Segment { #[doc(hidden)]
Segment::for_index(self.clone(), segment_meta) pub fn segment(&self, segment_meta: SegmentMeta) -> Segment {
create_segment(self.clone(), segment_meta)
} }
/// Creates a new segment. /// Creates a new segment.
@@ -342,13 +343,6 @@ impl Index {
self.segment(segment_meta) self.segment(segment_meta)
} }
/// Creates a new segment.
pub(crate) fn new_segment_unpersisted(&self) -> Segment {
let meta = self
.inventory
.new_segment_meta(SegmentId::generate_random(), 0);
Segment::new_volatile(meta, self.schema())
}
/// Return a reference to the index directory. /// Return a reference to the index directory.
pub fn directory(&self) -> &ManagedDirectory { pub fn directory(&self) -> &ManagedDirectory {
&self.directory &self.directory
@@ -471,7 +465,7 @@ mod tests {
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, index.clone(), &index, &reader); test_index_on_commit_reload_policy_aux(field, &index, &reader);
} }
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
@@ -495,7 +489,7 @@ mod tests {
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, index.clone(), &index, &reader); test_index_on_commit_reload_policy_aux(field, &index, &reader);
} }
#[test] #[test]
@@ -537,11 +531,12 @@ mod tests {
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, read_index, &write_index, &reader); test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
} }
} }
fn test_index_on_commit_reload_policy_aux(field: Field, mut reader_index: Index, index: &Index, reader: &IndexReader) { fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
let mut reader_index = reader.index();
let (sender, receiver) = crossbeam::channel::unbounded(); let (sender, receiver) = crossbeam::channel::unbounded();
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || { let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
let _ = sender.send(()); let _ = sender.send(());

View File

@@ -60,7 +60,7 @@ impl InvertedIndexReader {
.get_index_record_option() .get_index_record_option()
.unwrap_or(IndexRecordOption::Basic); .unwrap_or(IndexRecordOption::Basic);
InvertedIndexReader { InvertedIndexReader {
termdict: TermDictionary::empty(&field_type), termdict: TermDictionary::empty(),
postings_source: ReadOnlySource::empty(), postings_source: ReadOnlySource::empty(),
positions_source: ReadOnlySource::empty(), positions_source: ReadOnlySource::empty(),
positions_idx_source: ReadOnlySource::empty(), positions_idx_source: ReadOnlySource::empty(),

View File

@@ -3,62 +3,21 @@ use crate::core::Index;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError}; use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::{Directory, ManagedDirectory, RAMDirectory}; use crate::directory::Directory;
use crate::directory::{ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
use crate::Result; use crate::Result;
use failure::_core::ops::DerefMut;
use std::fmt; use std::fmt;
use std::ops::Deref;
use std::path::PathBuf; use std::path::PathBuf;
use std::result; use std::result;
#[derive(Clone)]
pub(crate) enum SegmentDirectory {
Persisted(ManagedDirectory),
Volatile(RAMDirectory),
}
impl SegmentDirectory {
pub fn new_volatile() -> SegmentDirectory {
SegmentDirectory::Volatile(RAMDirectory::default())
}
}
impl From<ManagedDirectory> for SegmentDirectory {
fn from(directory: ManagedDirectory) -> Self {
SegmentDirectory::Persisted(directory)
}
}
impl Deref for SegmentDirectory {
type Target = dyn Directory;
fn deref(&self) -> &Self::Target {
match self {
SegmentDirectory::Volatile(dir) => dir,
SegmentDirectory::Persisted(dir) => dir,
}
}
}
impl DerefMut for SegmentDirectory {
fn deref_mut(&mut self) -> &mut Self::Target {
match self {
SegmentDirectory::Volatile(dir) => dir,
SegmentDirectory::Persisted(dir) => dir,
}
}
}
/// A segment is a piece of the index. /// A segment is a piece of the index.
#[derive(Clone)] #[derive(Clone)]
pub struct Segment { pub struct Segment {
schema: Schema, index: Index,
meta: SegmentMeta, meta: SegmentMeta,
directory: SegmentDirectory,
} }
impl fmt::Debug for Segment { impl fmt::Debug for Segment {
@@ -67,56 +26,23 @@ impl fmt::Debug for Segment {
} }
} }
/// Creates a new segment given an `Index` and a `SegmentId`
///
/// The function is here to make it private outside `tantivy`.
/// #[doc(hidden)]
pub fn create_segment(index: Index, meta: SegmentMeta) -> Segment {
Segment { index, meta }
}
impl Segment { impl Segment {
/// Returns the index the segment belongs to.
pub fn index(&self) -> &Index {
&self.index
}
/// Returns our index's schema. /// Returns our index's schema.
// TODO return a ref.
pub fn schema(&self) -> Schema { pub fn schema(&self) -> Schema {
self.schema.clone() self.index.schema()
}
pub(crate) fn new_persisted(
meta: SegmentMeta,
directory: ManagedDirectory,
schema: Schema,
) -> Segment {
Segment {
meta,
schema,
directory: SegmentDirectory::from(directory),
}
}
/// Creates a new segment that embeds its own `RAMDirectory`.
///
/// That segment is entirely dissociated from the index directory.
/// It will be persisted by a background thread in charge of IO.
pub fn new_volatile(meta: SegmentMeta, schema: Schema) -> Segment {
Segment {
schema,
meta,
directory: SegmentDirectory::new_volatile(),
}
}
/// Creates a new segment given an `Index` and a `SegmentId`
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
Segment {
directory: SegmentDirectory::Persisted(index.directory().clone()),
schema: index.schema(),
meta,
}
}
pub fn persist(&mut self, mut dest_directory: ManagedDirectory) -> crate::Result<()> {
if let SegmentDirectory::Persisted(_) = self.directory {
// this segment is already persisted.
return Ok(());
}
if let SegmentDirectory::Volatile(ram_directory) = &self.directory {
ram_directory.persist(&mut dest_directory)?;
}
self.directory = SegmentDirectory::Persisted(dest_directory);
Ok(())
} }
/// Returns the segment meta-information /// Returns the segment meta-information
@@ -130,8 +56,7 @@ impl Segment {
/// as we finalize a fresh new segment. /// as we finalize a fresh new segment.
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment { pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
Segment { Segment {
directory: self.directory, index: self.index,
schema: self.schema,
meta: self.meta.with_max_doc(max_doc), meta: self.meta.with_max_doc(max_doc),
} }
} }
@@ -139,8 +64,7 @@ impl Segment {
#[doc(hidden)] #[doc(hidden)]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment { pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
Segment { Segment {
directory: self.directory, index: self.index,
schema: self.schema,
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp), meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
} }
} }
@@ -164,7 +88,7 @@ impl Segment {
component: SegmentComponent, component: SegmentComponent,
) -> result::Result<ReadOnlySource, OpenReadError> { ) -> result::Result<ReadOnlySource, OpenReadError> {
let path = self.relative_path(component); let path = self.relative_path(component);
let source = self.directory.open_read(&path)?; let source = self.index.directory().open_read(&path)?;
Ok(source) Ok(source)
} }
@@ -174,7 +98,7 @@ impl Segment {
component: SegmentComponent, component: SegmentComponent,
) -> result::Result<WritePtr, OpenWriteError> { ) -> result::Result<WritePtr, OpenWriteError> {
let path = self.relative_path(component); let path = self.relative_path(component);
let write = self.directory.open_write(&path)?; let write = self.index.directory_mut().open_write(&path)?;
Ok(write) Ok(write)
} }
} }

View File

@@ -57,68 +57,6 @@ pub struct SegmentReader {
} }
impl SegmentReader { impl SegmentReader {
/// Open a new segment for reading.
pub fn open(segment: &Segment) -> Result<SegmentReader> {
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
let termdict_composite = CompositeFile::open(&termdict_source)?;
let store_source = segment.open_read(SegmentComponent::STORE)?;
fail_point!("SegmentReader::open#middle");
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
let postings_composite = CompositeFile::open(&postings_source)?;
let positions_composite = {
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
CompositeFile::open(&source)?
} else {
CompositeFile::empty()
}
};
let positions_idx_composite = {
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
CompositeFile::open(&source)?
} else {
CompositeFile::empty()
}
};
let schema = segment.schema();
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
let fast_field_readers =
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
let delete_bitset_opt = if segment.meta().has_deletes() {
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
Some(DeleteBitSet::open(delete_data))
} else {
None
};
Ok(SegmentReader {
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
max_doc: segment.meta().max_doc(),
num_docs: segment.meta().num_docs(),
termdict_composite,
postings_composite,
fast_fields_readers: fast_field_readers,
fieldnorms_composite,
segment_id: segment.id(),
store_source,
delete_bitset_opt,
positions_composite,
positions_idx_composite,
schema,
})
}
/// Returns the highest document id ever attributed in /// Returns the highest document id ever attributed in
/// this segment + 1. /// this segment + 1.
/// Today, `tantivy` does not handle deletes, so it happens /// Today, `tantivy` does not handle deletes, so it happens
@@ -206,6 +144,68 @@ impl SegmentReader {
StoreReader::from_source(self.store_source.clone()) StoreReader::from_source(self.store_source.clone())
} }
/// Open a new segment for reading.
pub fn open(segment: &Segment) -> Result<SegmentReader> {
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
let termdict_composite = CompositeFile::open(&termdict_source)?;
let store_source = segment.open_read(SegmentComponent::STORE)?;
fail_point!("SegmentReader::open#middle");
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
let postings_composite = CompositeFile::open(&postings_source)?;
let positions_composite = {
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
CompositeFile::open(&source)?
} else {
CompositeFile::empty()
}
};
let positions_idx_composite = {
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
CompositeFile::open(&source)?
} else {
CompositeFile::empty()
}
};
let schema = segment.schema();
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
let fast_field_readers =
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
let delete_bitset_opt = if segment.meta().has_deletes() {
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
Some(DeleteBitSet::open(delete_data))
} else {
None
};
Ok(SegmentReader {
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
max_doc: segment.meta().max_doc(),
num_docs: segment.meta().num_docs(),
termdict_composite,
postings_composite,
fast_fields_readers: fast_field_readers,
fieldnorms_composite,
segment_id: segment.id(),
store_source,
delete_bitset_opt,
positions_composite,
positions_idx_composite,
schema,
})
}
/// Returns a field reader associated to the field given in argument. /// Returns a field reader associated to the field given in argument.
/// If the field was not present in the index during indexing time, /// If the field was not present in the index during indexing time,
/// the InvertedIndexReader is empty. /// the InvertedIndexReader is empty.

View File

@@ -144,16 +144,6 @@ impl RAMDirectory {
pub fn total_mem_usage(&self) -> usize { pub fn total_mem_usage(&self) -> usize {
self.fs.read().unwrap().total_mem_usage() self.fs.read().unwrap().total_mem_usage()
} }
pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
let wlock = self.fs.write().unwrap();
for (path, source) in wlock.fs.iter() {
let mut dest_wrt = dest.open_write(path)?;
dest_wrt.write_all(source.as_slice())?;
dest_wrt.terminate()?;
}
Ok(())
}
} }
impl Directory for RAMDirectory { impl Directory for RAMDirectory {

View File

@@ -7,9 +7,6 @@ pub use self::writer::MultiValueIntFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use time;
use self::time::Duration;
use crate::collector::TopDocs; use crate::collector::TopDocs;
use crate::query::QueryParser; use crate::query::QueryParser;
use crate::schema::Cardinality; use crate::schema::Cardinality;
@@ -17,6 +14,7 @@ mod tests {
use crate::schema::IntOptions; use crate::schema::IntOptions;
use crate::schema::Schema; use crate::schema::Schema;
use crate::Index; use crate::Index;
use chrono::Duration;
#[test] #[test]
fn test_multivalued_u64() { fn test_multivalued_u64() {

View File

@@ -8,33 +8,30 @@ use crate::core::SegmentComponent;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::directory::TerminatingWrite;
use crate::directory::{DirectoryLock, GarbageCollectionResult}; use crate::directory::{DirectoryLock, GarbageCollectionResult};
use crate::directory::{TerminatingWrite, WatchCallbackList};
use crate::docset::DocSet; use crate::docset::DocSet;
use crate::error::TantivyError; use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset; use crate::fastfield::write_delete_bitset;
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue}; use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping; use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
use crate::indexer::operation::DeleteOperation; use crate::indexer::operation::DeleteOperation;
use crate::indexer::segment_manager::SegmentRegisters;
use crate::indexer::segment_register::SegmentRegister;
use crate::indexer::stamper::Stamper; use crate::indexer::stamper::Stamper;
use crate::indexer::MergePolicy; use crate::indexer::MergePolicy;
use crate::indexer::SegmentEntry; use crate::indexer::SegmentEntry;
use crate::indexer::SegmentWriter; use crate::indexer::SegmentWriter;
use crate::reader::NRTReader;
use crate::schema::Document; use crate::schema::Document;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::Term; use crate::schema::Term;
use crate::tokenizer::TokenizerManager; use crate::Opstamp;
use crate::{IndexReader, Opstamp};
use crossbeam::channel; use crossbeam::channel;
use futures::executor::block_on; use futures::executor::block_on;
use futures::future::Future; use futures::future::Future;
use smallvec::{smallvec, SmallVec}; use smallvec::smallvec;
use smallvec::SmallVec;
use std::mem; use std::mem;
use std::ops::Range; use std::ops::Range;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use std::thread; use std::thread;
use std::thread::JoinHandle; use std::thread::JoinHandle;
@@ -71,8 +68,6 @@ pub struct IndexWriter {
// lifetime of the lock with that of the IndexWriter. // lifetime of the lock with that of the IndexWriter.
_directory_lock: Option<DirectoryLock>, _directory_lock: Option<DirectoryLock>,
segment_registers: Arc<RwLock<SegmentRegisters>>,
index: Index, index: Index,
heap_size_in_bytes_per_thread: usize, heap_size_in_bytes_per_thread: usize,
@@ -92,8 +87,6 @@ pub struct IndexWriter {
stamper: Stamper, stamper: Stamper,
committed_opstamp: Opstamp, committed_opstamp: Opstamp,
on_commit: WatchCallbackList,
} }
fn compute_deleted_bitset( fn compute_deleted_bitset(
@@ -140,6 +133,7 @@ fn compute_deleted_bitset(
/// For instance, there was no delete operation between the state of the `segment_entry` and /// For instance, there was no delete operation between the state of the `segment_entry` and
/// the `target_opstamp`, `segment_entry` is not updated. /// the `target_opstamp`, `segment_entry` is not updated.
pub(crate) fn advance_deletes( pub(crate) fn advance_deletes(
mut segment: Segment,
segment_entry: &mut SegmentEntry, segment_entry: &mut SegmentEntry,
target_opstamp: Opstamp, target_opstamp: Opstamp,
) -> crate::Result<()> { ) -> crate::Result<()> {
@@ -148,38 +142,28 @@ pub(crate) fn advance_deletes(
return Ok(()); return Ok(());
} }
let delete_bitset_opt = segment_entry.take_delete_bitset(); if segment_entry.delete_bitset().is_none() && segment_entry.delete_cursor().get().is_none() {
// We avoid directly advancing the `SegmentEntry` delete cursor, because
// we do not want to end up in an invalid state if the delete bitset
// serialization fails.
let mut delete_cursor = segment_entry.delete_cursor();
if delete_bitset_opt.is_none() && delete_cursor.get().is_none() {
// There has been no `DeleteOperation` between the segment status and `target_opstamp`. // There has been no `DeleteOperation` between the segment status and `target_opstamp`.
return Ok(()); return Ok(());
} }
// We open our current serialized segment to compute the new deleted bitset.
let segment = segment_entry.segment().clone();
let segment_reader = SegmentReader::open(&segment)?; let segment_reader = SegmentReader::open(&segment)?;
let max_doc = segment_reader.max_doc(); let max_doc = segment_reader.max_doc();
let mut delete_bitset: BitSet = let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
delete_bitset_opt.unwrap_or_else(|| BitSet::with_max_value(max_doc)); Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
None => BitSet::with_max_value(max_doc),
let num_deleted_docs_before = segment.meta().num_deleted_docs(); };
compute_deleted_bitset( compute_deleted_bitset(
&mut delete_bitset, &mut delete_bitset,
&segment_reader, &segment_reader,
&mut delete_cursor, segment_entry.delete_cursor(),
&DocToOpstampMapping::None, &DocToOpstampMapping::None,
target_opstamp, target_opstamp,
)?; )?;
// TODO optimize... We are simply manipulating bitsets here. // TODO optimize
// We should be able to compute the union much faster.
if let Some(seg_delete_bitset) = segment_reader.delete_bitset() { if let Some(seg_delete_bitset) = segment_reader.delete_bitset() {
for doc in 0u32..max_doc { for doc in 0u32..max_doc {
if seg_delete_bitset.is_deleted(doc) { if seg_delete_bitset.is_deleted(doc) {
@@ -188,23 +172,15 @@ pub(crate) fn advance_deletes(
} }
} }
let num_deleted_docs = delete_bitset.len() as u32; let num_deleted_docs = delete_bitset.len();
if num_deleted_docs > 0 {
if num_deleted_docs > num_deleted_docs_before { segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
// We need to write a new delete file. let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
let mut delete_file = segment
.with_delete_meta(num_deleted_docs, target_opstamp)
.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?; write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
delete_file.terminate()?; delete_file.terminate()?;
segment_entry.reset_delete_meta(num_deleted_docs as u32, target_opstamp);
} }
// Regardless of whether we did end up having to write a new file or not segment_entry.set_meta(segment.meta().clone());
// we advance the `delete_cursor`. This is an optimisation. We want to ensure we do not
// check that a given deleted term does not match any of our docs more than once.
segment_entry.set_delete_cursor(delete_cursor);
Ok(()) Ok(())
} }
@@ -213,12 +189,11 @@ fn index_documents(
segment: Segment, segment: Segment,
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>, grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
segment_updater: &mut SegmentUpdater, segment_updater: &mut SegmentUpdater,
tokenizers: &TokenizerManager,
mut delete_cursor: DeleteCursor, mut delete_cursor: DeleteCursor,
) -> crate::Result<bool> { ) -> crate::Result<bool> {
let mut segment_writer =
SegmentWriter::for_segment(memory_budget, segment.clone(), tokenizers)?;
let schema = segment.schema(); let schema = segment.schema();
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
for document_group in grouped_document_iterator { for document_group in grouped_document_iterator {
for doc in document_group { for doc in document_group {
segment_writer.add_document(doc, &schema)?; segment_writer.add_document(doc, &schema)?;
@@ -256,7 +231,11 @@ fn index_documents(
last_docstamp, last_docstamp,
)?; )?;
let segment_entry = SegmentEntry::new(segment_with_max_doc, delete_cursor, delete_bitset_opt); let segment_entry = SegmentEntry::new(
segment_with_max_doc.meta().clone(),
delete_cursor,
delete_bitset_opt,
);
block_on(segment_updater.schedule_add_segment(segment_entry))?; block_on(segment_updater.schedule_add_segment(segment_entry))?;
Ok(true) Ok(true)
} }
@@ -328,24 +307,16 @@ impl IndexWriter {
let delete_queue = DeleteQueue::new(); let delete_queue = DeleteQueue::new();
let meta = index.load_metas()?; let current_opstamp = index.load_metas()?.opstamp;
let stamper = Stamper::new(meta.opstamp); let stamper = Stamper::new(current_opstamp);
let commited_segments = SegmentRegister::new(
index.directory(),
&index.schema(),
meta.segments,
&delete_queue.cursor(),
);
let segment_registers = Arc::new(RwLock::new(SegmentRegisters::new(commited_segments)));
let segment_updater = let segment_updater =
SegmentUpdater::create(segment_registers.clone(), index.clone(), stamper.clone())?; SegmentUpdater::create(index.clone(), stamper.clone(), &delete_queue.cursor())?;
let mut index_writer = IndexWriter { let mut index_writer = IndexWriter {
_directory_lock: Some(directory_lock), _directory_lock: Some(directory_lock),
segment_registers,
heap_size_in_bytes_per_thread, heap_size_in_bytes_per_thread,
index: index.clone(), index: index.clone(),
@@ -359,12 +330,10 @@ impl IndexWriter {
delete_queue, delete_queue,
committed_opstamp: meta.opstamp, committed_opstamp: current_opstamp,
stamper, stamper,
worker_id: 0, worker_id: 0,
on_commit: Default::default(),
}; };
index_writer.start_workers()?; index_writer.start_workers()?;
Ok(index_writer) Ok(index_writer)
@@ -404,6 +373,13 @@ impl IndexWriter {
result result
} }
#[doc(hidden)]
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
let delete_cursor = self.delete_queue.cursor();
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
block_on(self.segment_updater.schedule_add_segment(segment_entry))
}
/// Creates a new segment. /// Creates a new segment.
/// ///
/// This method is useful only for users trying to do complex /// This method is useful only for users trying to do complex
@@ -452,13 +428,12 @@ impl IndexWriter {
// was dropped. // was dropped.
return Ok(()); return Ok(());
} }
let segment = index.new_segment_unpersisted(); let segment = index.new_segment();
index_documents( index_documents(
mem_budget, mem_budget,
segment, segment,
&mut document_iterator, &mut document_iterator,
&mut segment_updater, &mut segment_updater,
index.tokenizers(),
delete_cursor.clone(), delete_cursor.clone(),
)?; )?;
} }
@@ -485,21 +460,6 @@ impl IndexWriter {
Ok(()) Ok(())
} }
// TODO move me
pub(crate) fn trigger_commit(&self) -> impl Future<Output = ()> {
self.on_commit.broadcast()
}
pub fn reader(&self, num_searchers: usize) -> crate::Result<IndexReader> {
let nrt_reader = NRTReader::create(
num_searchers,
self.index.clone(),
self.segment_registers.clone(),
&self.on_commit,
)?;
Ok(IndexReader::NRT(nrt_reader))
}
/// Detects and removes the files that are not used by the index anymore. /// Detects and removes the files that are not used by the index anymore.
pub fn garbage_collect_files( pub fn garbage_collect_files(
&self, &self,
@@ -643,7 +603,7 @@ impl IndexWriter {
/// It is also possible to add a payload to the `commit` /// It is also possible to add a payload to the `commit`
/// using this API. /// using this API.
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html) /// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
pub fn prepare_commit(&mut self, soft_commit: bool) -> crate::Result<PreparedCommit> { pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> {
// Here, because we join all of the worker threads, // Here, because we join all of the worker threads,
// all of the segment update for this commit have been // all of the segment update for this commit have been
// sent. // sent.
@@ -671,7 +631,7 @@ impl IndexWriter {
} }
let commit_opstamp = self.stamper.stamp(); let commit_opstamp = self.stamper.stamp();
let prepared_commit = PreparedCommit::new(self, commit_opstamp, soft_commit); let prepared_commit = PreparedCommit::new(self, commit_opstamp);
info!("Prepared commit {}", commit_opstamp); info!("Prepared commit {}", commit_opstamp);
Ok(prepared_commit) Ok(prepared_commit)
} }
@@ -691,11 +651,7 @@ impl IndexWriter {
/// that made it in the commit. /// that made it in the commit.
/// ///
pub fn commit(&mut self) -> crate::Result<Opstamp> { pub fn commit(&mut self) -> crate::Result<Opstamp> {
self.prepare_commit(false)?.commit() self.prepare_commit()?.commit()
}
pub fn soft_commit(&mut self) -> crate::Result<Opstamp> {
self.prepare_commit(true)?.commit()
} }
pub(crate) fn segment_updater(&self) -> &SegmentUpdater { pub(crate) fn segment_updater(&self) -> &SegmentUpdater {
@@ -820,6 +776,7 @@ impl Drop for IndexWriter {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::super::operation::UserOperation; use super::super::operation::UserOperation;
use crate::collector::TopDocs; use crate::collector::TopDocs;
use crate::directory::error::LockError; use crate::directory::error::LockError;
@@ -940,7 +897,7 @@ mod tests {
let index_writer = index.writer(3_000_000).unwrap(); let index_writer = index.writer(3_000_000).unwrap();
assert_eq!( assert_eq!(
format!("{:?}", index_writer.get_merge_policy()), format!("{:?}", index_writer.get_merge_policy()),
"LogMergePolicy { min_merge_size: 8, min_layer_size: 10000, \ "LogMergePolicy { min_merge_size: 8, max_merge_size: 10000000, min_layer_size: 10000, \
level_log_size: 0.75 }" level_log_size: 0.75 }"
); );
let merge_policy = Box::new(NoMergePolicy::default()); let merge_policy = Box::new(NoMergePolicy::default());
@@ -1052,8 +1009,7 @@ mod tests {
index_writer.add_document(doc!(text_field => "a")); index_writer.add_document(doc!(text_field => "a"));
} }
{ {
let mut prepared_commit = let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
index_writer.prepare_commit(false).expect("commit failed");
prepared_commit.set_payload("first commit"); prepared_commit.set_payload("first commit");
prepared_commit.commit().expect("commit failed"); prepared_commit.commit().expect("commit failed");
} }
@@ -1086,8 +1042,7 @@ mod tests {
index_writer.add_document(doc!(text_field => "a")); index_writer.add_document(doc!(text_field => "a"));
} }
{ {
let mut prepared_commit = let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
index_writer.prepare_commit(false).expect("commit failed");
prepared_commit.set_payload("first commit"); prepared_commit.set_payload("first commit");
prepared_commit.abort().expect("commit failed"); prepared_commit.abort().expect("commit failed");
} }
@@ -1262,43 +1217,7 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(idfield=>"myid")); index_writer.add_document(doc!(idfield=>"myid"));
assert!(index_writer.commit().is_ok()); let commit = index_writer.commit();
} assert!(commit.is_ok());
#[test]
fn test_index_writer_reader() {
let mut schema_builder = schema::Schema::builder();
let idfield = schema_builder.add_text_field("id", STRING);
schema_builder.add_text_field("optfield", STRING);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(idfield=>"myid"));
assert!(index_writer.commit().is_ok());
let reader = index_writer.reader(2).unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 1u64);
index_writer.add_document(doc!(idfield=>"myid"));
assert!(index_writer.commit().is_ok());
assert_eq!(reader.searcher().num_docs(), 2u64);
assert_eq!(searcher.num_docs(), 1u64);
}
#[test]
fn test_index_writer_reader_soft_commit() {
let mut schema_builder = schema::Schema::builder();
let idfield = schema_builder.add_text_field("id", STRING);
schema_builder.add_text_field("optfield", STRING);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(idfield=>"myid"));
assert!(index_writer.soft_commit().is_ok());
let nrt_reader = index_writer.reader(2).unwrap();
let normal_reader = index.reader_builder().try_into().unwrap();
assert_eq!(nrt_reader.searcher().num_docs(), 1u64);
assert_eq!(normal_reader.searcher().num_docs(), 0u64);
assert!(index_writer.commit().is_ok());
assert!(normal_reader.reload().is_ok());
assert_eq!(nrt_reader.searcher().num_docs(), 1u64);
assert_eq!(normal_reader.searcher().num_docs(), 1u64);
} }
} }

View File

@@ -6,12 +6,14 @@ use std::f64;
const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75; const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75;
const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000; const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
const DEFAULT_MIN_MERGE_SIZE: usize = 8; const DEFAULT_MIN_MERGE_SIZE: usize = 8;
const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000;
/// `LogMergePolicy` tries tries to merge segments that have a similar number of /// `LogMergePolicy` tries tries to merge segments that have a similar number of
/// documents. /// documents.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct LogMergePolicy { pub struct LogMergePolicy {
min_merge_size: usize, min_merge_size: usize,
max_merge_size: usize,
min_layer_size: u32, min_layer_size: u32,
level_log_size: f64, level_log_size: f64,
} }
@@ -26,6 +28,12 @@ impl LogMergePolicy {
self.min_merge_size = min_merge_size; self.min_merge_size = min_merge_size;
} }
/// Set the maximum number docs in a segment for it to be considered for
/// merging.
pub fn set_max_merge_size(&mut self, max_merge_size: usize) {
self.max_merge_size = max_merge_size;
}
/// Set the minimum segment size under which all segment belong /// Set the minimum segment size under which all segment belong
/// to the same level. /// to the same level.
pub fn set_min_layer_size(&mut self, min_layer_size: u32) { pub fn set_min_layer_size(&mut self, min_layer_size: u32) {
@@ -53,6 +61,7 @@ impl MergePolicy for LogMergePolicy {
let mut size_sorted_tuples = segments let mut size_sorted_tuples = segments
.iter() .iter()
.map(SegmentMeta::num_docs) .map(SegmentMeta::num_docs)
.filter(|s| s <= &(self.max_merge_size as u32))
.enumerate() .enumerate()
.collect::<Vec<(usize, u32)>>(); .collect::<Vec<(usize, u32)>>();
@@ -86,6 +95,7 @@ impl Default for LogMergePolicy {
fn default() -> LogMergePolicy { fn default() -> LogMergePolicy {
LogMergePolicy { LogMergePolicy {
min_merge_size: DEFAULT_MIN_MERGE_SIZE, min_merge_size: DEFAULT_MIN_MERGE_SIZE,
max_merge_size: DEFAULT_MAX_MERGE_SIZE,
min_layer_size: DEFAULT_MIN_LAYER_SIZE, min_layer_size: DEFAULT_MIN_LAYER_SIZE,
level_log_size: DEFAULT_LEVEL_LOG_SIZE, level_log_size: DEFAULT_LEVEL_LOG_SIZE,
} }
@@ -104,6 +114,7 @@ mod tests {
fn test_merge_policy() -> LogMergePolicy { fn test_merge_policy() -> LogMergePolicy {
let mut log_merge_policy = LogMergePolicy::default(); let mut log_merge_policy = LogMergePolicy::default();
log_merge_policy.set_min_merge_size(3); log_merge_policy.set_min_merge_size(3);
log_merge_policy.set_max_merge_size(100_000);
log_merge_policy.set_min_layer_size(2); log_merge_policy.set_min_layer_size(2);
log_merge_policy log_merge_policy
} }
@@ -141,11 +152,11 @@ mod tests {
create_random_segment_meta(10), create_random_segment_meta(10),
create_random_segment_meta(10), create_random_segment_meta(10),
create_random_segment_meta(10), create_random_segment_meta(10),
create_random_segment_meta(1000), create_random_segment_meta(1_000),
create_random_segment_meta(1000), create_random_segment_meta(1_000),
create_random_segment_meta(1000), create_random_segment_meta(1_000),
create_random_segment_meta(10000), create_random_segment_meta(10_000),
create_random_segment_meta(10000), create_random_segment_meta(10_000),
create_random_segment_meta(10), create_random_segment_meta(10),
create_random_segment_meta(10), create_random_segment_meta(10),
create_random_segment_meta(10), create_random_segment_meta(10),
@@ -182,4 +193,19 @@ mod tests {
let result_list = test_merge_policy().compute_merge_candidates(&test_input); let result_list = test_merge_policy().compute_merge_candidates(&test_input);
assert_eq!(result_list.len(), 1); assert_eq!(result_list.len(), 1);
} }
#[test]
fn test_large_merge_segments() {
let test_input = vec![
create_random_segment_meta(1_000_000),
create_random_segment_meta(100_001),
create_random_segment_meta(100_000),
create_random_segment_meta(100_000),
create_random_segment_meta(100_000),
];
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
// Do not include large segments
assert_eq!(result_list.len(), 1);
assert_eq!(result_list[0].0.len(), 3)
}
} }

View File

@@ -23,7 +23,6 @@ pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
pub use self::prepared_commit::PreparedCommit; pub use self::prepared_commit::PreparedCommit;
pub use self::segment_entry::SegmentEntry; pub use self::segment_entry::SegmentEntry;
pub use self::segment_manager::SegmentManager; pub use self::segment_manager::SegmentManager;
pub(crate) use self::segment_manager::SegmentRegisters;
pub use self::segment_serializer::SegmentSerializer; pub use self::segment_serializer::SegmentSerializer;
pub use self::segment_writer::SegmentWriter; pub use self::segment_writer::SegmentWriter;

View File

@@ -19,6 +19,8 @@ pub struct AddOperation {
/// UserOperation is an enum type that encapsulates other operation types. /// UserOperation is an enum type that encapsulates other operation types.
#[derive(Eq, PartialEq, Debug)] #[derive(Eq, PartialEq, Debug)]
pub enum UserOperation { pub enum UserOperation {
/// Add operation
Add(Document), Add(Document),
/// Delete operation
Delete(Term), Delete(Term),
} }

View File

@@ -8,20 +8,14 @@ pub struct PreparedCommit<'a> {
index_writer: &'a mut IndexWriter, index_writer: &'a mut IndexWriter,
payload: Option<String>, payload: Option<String>,
opstamp: Opstamp, opstamp: Opstamp,
soft_commit: bool,
} }
impl<'a> PreparedCommit<'a> { impl<'a> PreparedCommit<'a> {
pub(crate) fn new( pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit<'_> {
index_writer: &'a mut IndexWriter,
opstamp: Opstamp,
soft_commit: bool,
) -> PreparedCommit {
PreparedCommit { PreparedCommit {
index_writer, index_writer,
payload: None, payload: None,
opstamp, opstamp,
soft_commit,
} }
} }
@@ -39,12 +33,11 @@ impl<'a> PreparedCommit<'a> {
pub fn commit(self) -> Result<Opstamp> { pub fn commit(self) -> Result<Opstamp> {
info!("committing {}", self.opstamp); info!("committing {}", self.opstamp);
block_on(self.index_writer.segment_updater().schedule_commit( let _ = block_on(
self.opstamp, self.index_writer
self.payload, .segment_updater()
self.soft_commit, .schedule_commit(self.opstamp, self.payload),
))?; );
block_on(self.index_writer.trigger_commit());
Ok(self.opstamp) Ok(self.opstamp)
} }
} }

View File

@@ -1,9 +1,7 @@
use crate::common::BitSet; use crate::common::BitSet;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::directory::ManagedDirectory;
use crate::indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use crate::{Opstamp, Segment};
use std::fmt; use std::fmt;
/// A segment entry describes the state of /// A segment entry describes the state of
@@ -21,81 +19,55 @@ use std::fmt;
/// in the .del file or in the `delete_bitset`. /// in the .del file or in the `delete_bitset`.
#[derive(Clone)] #[derive(Clone)]
pub struct SegmentEntry { pub struct SegmentEntry {
segment: Segment, meta: SegmentMeta,
delete_bitset: Option<BitSet>, delete_bitset: Option<BitSet>,
delete_cursor: DeleteCursor, delete_cursor: DeleteCursor,
} }
impl SegmentEntry { impl SegmentEntry {
/// Create a new `SegmentEntry` /// Create a new `SegmentEntry`
pub(crate) fn new( pub fn new(
segment: Segment, segment_meta: SegmentMeta,
delete_cursor: DeleteCursor, delete_cursor: DeleteCursor,
delete_bitset: Option<BitSet>, delete_bitset: Option<BitSet>,
) -> SegmentEntry { ) -> SegmentEntry {
SegmentEntry { SegmentEntry {
segment, meta: segment_meta,
delete_bitset, delete_bitset,
delete_cursor, delete_cursor,
} }
} }
pub fn persist(&mut self, dest_directory: ManagedDirectory) -> crate::Result<()> { /// Return a reference to the segment entry deleted bitset.
// TODO take in account delete bitset?
self.segment.persist(dest_directory)?;
Ok(())
}
pub fn segment(&self) -> &Segment {
&self.segment
}
/// `Takes` (as in Option::take) the delete bitset of
/// a segment entry.
/// ///
/// `DocId` in this bitset are flagged as deleted. /// `DocId` in this bitset are flagged as deleted.
pub fn take_delete_bitset(&mut self) -> Option<BitSet> { pub fn delete_bitset(&self) -> Option<&BitSet> {
self.delete_bitset.take() self.delete_bitset.as_ref()
} }
/// Reset the delete informmationo in this segment. /// Set the `SegmentMeta` for this segment.
/// pub fn set_meta(&mut self, segment_meta: SegmentMeta) {
/// The `SegmentEntry` segment's `SegmentMeta` gets updated, and self.meta = segment_meta;
/// any delete bitset is drop and set to None.
pub fn reset_delete_meta(&mut self, num_deleted_docs: u32, target_opstamp: Opstamp) {
self.segment = self
.segment
.clone()
.with_delete_meta(num_deleted_docs, target_opstamp);
self.delete_bitset = None;
} }
pub fn set_delete_cursor(&mut self, delete_cursor: DeleteCursor) {
self.delete_cursor = delete_cursor;
}
/// Return a reference to the segment_entry's delete cursor /// Return a reference to the segment_entry's delete cursor
pub fn delete_cursor(&mut self) -> DeleteCursor { pub fn delete_cursor(&mut self) -> &mut DeleteCursor {
self.delete_cursor.clone() &mut self.delete_cursor
} }
/// Returns the segment id. /// Returns the segment id.
pub fn segment_id(&self) -> SegmentId { pub fn segment_id(&self) -> SegmentId {
self.segment.id() self.meta.id()
} }
/// Accessor to the `SegmentMeta` /// Accessor to the `SegmentMeta`
pub fn meta(&self) -> &SegmentMeta { pub fn meta(&self) -> &SegmentMeta {
self.segment.meta() &self.meta
} }
} }
impl fmt::Debug for SegmentEntry { impl fmt::Debug for SegmentEntry {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
let num_deletes = self.delete_bitset.as_ref().map(|bitset| bitset.len()); write!(formatter, "SegmentEntry({:?})", self.meta)
write!(
formatter,
"SegmentEntry(seg={:?}, ndel={:?})",
self.segment, num_deletes
)
} }
} }

View File

@@ -1,14 +1,17 @@
use super::segment_register::SegmentRegister; use super::segment_register::SegmentRegister;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::error::TantivyError;
use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::SegmentEntry; use crate::indexer::SegmentEntry;
use crate::Segment; use crate::Result as TantivyResult;
use std::collections::hash_set::HashSet; use std::collections::hash_set::HashSet;
use std::sync::{Arc, RwLock}; use std::fmt::{self, Debug, Formatter};
use std::sync::RwLock;
use std::sync::{RwLockReadGuard, RwLockWriteGuard}; use std::sync::{RwLockReadGuard, RwLockWriteGuard};
#[derive(Default)] #[derive(Default)]
pub(crate) struct SegmentRegisters { struct SegmentRegisters {
uncommitted: SegmentRegister, uncommitted: SegmentRegister,
committed: SegmentRegister, committed: SegmentRegister,
} }
@@ -20,17 +23,6 @@ pub(crate) enum SegmentsStatus {
} }
impl SegmentRegisters { impl SegmentRegisters {
pub fn new(committed: SegmentRegister) -> SegmentRegisters {
SegmentRegisters {
uncommitted: Default::default(),
committed,
}
}
pub fn committed_segment(&self) -> Vec<Segment> {
self.committed.segments()
}
/// Check if all the segments are committed or uncommited. /// Check if all the segments are committed or uncommited.
/// ///
/// If some segment is missing or segments are in a different state (this should not happen /// If some segment is missing or segments are in a different state (this should not happen
@@ -53,7 +45,18 @@ impl SegmentRegisters {
/// changes (merges especially) /// changes (merges especially)
#[derive(Default)] #[derive(Default)]
pub struct SegmentManager { pub struct SegmentManager {
registers: Arc<RwLock<SegmentRegisters>>, registers: RwLock<SegmentRegisters>,
}
impl Debug for SegmentManager {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
let lock = self.read();
write!(
f,
"{{ uncommitted: {:?}, committed: {:?} }}",
lock.uncommitted, lock.committed
)
}
} }
pub fn get_mergeable_segments( pub fn get_mergeable_segments(
@@ -72,8 +75,16 @@ pub fn get_mergeable_segments(
} }
impl SegmentManager { impl SegmentManager {
pub(crate) fn new(registers: Arc<RwLock<SegmentRegisters>>) -> SegmentManager { pub fn from_segments(
SegmentManager { registers } segment_metas: Vec<SegmentMeta>,
delete_cursor: &DeleteCursor,
) -> SegmentManager {
SegmentManager {
registers: RwLock::new(SegmentRegisters {
uncommitted: SegmentRegister::default(),
committed: SegmentRegister::new(segment_metas, delete_cursor),
}),
}
} }
/// Returns all of the segment entries (committed or uncommitted) /// Returns all of the segment entries (committed or uncommitted)
@@ -134,7 +145,7 @@ impl SegmentManager {
/// Returns an error if some segments are missing, or if /// Returns an error if some segments are missing, or if
/// the `segment_ids` are not either all committed or all /// the `segment_ids` are not either all committed or all
/// uncommitted. /// uncommitted.
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> crate::Result<Vec<SegmentEntry>> { pub fn start_merge(&self, segment_ids: &[SegmentId]) -> TantivyResult<Vec<SegmentEntry>> {
let registers_lock = self.read(); let registers_lock = self.read();
let mut segment_entries = vec![]; let mut segment_entries = vec![];
if registers_lock.uncommitted.contains_all(segment_ids) { if registers_lock.uncommitted.contains_all(segment_ids) {
@@ -155,7 +166,7 @@ impl SegmentManager {
let error_msg = "Merge operation sent for segments that are not \ let error_msg = "Merge operation sent for segments that are not \
all uncommited or commited." all uncommited or commited."
.to_string(); .to_string();
return Err(crate::Error::InvalidArgument(error_msg)); return Err(TantivyError::InvalidArgument(error_msg));
} }
Ok(segment_entries) Ok(segment_entries)
} }

View File

@@ -1,10 +1,7 @@
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::directory::ManagedDirectory;
use crate::indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::segment_entry::SegmentEntry; use crate::indexer::segment_entry::SegmentEntry;
use crate::schema::Schema;
use crate::Segment;
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::fmt::{self, Debug, Formatter}; use std::fmt::{self, Debug, Formatter};
@@ -49,13 +46,6 @@ impl SegmentRegister {
.collect() .collect()
} }
pub fn segments(&self) -> Vec<Segment> {
self.segment_states
.values()
.map(|segment_entry| segment_entry.segment().clone())
.collect()
}
pub fn segment_entries(&self) -> Vec<SegmentEntry> { pub fn segment_entries(&self) -> Vec<SegmentEntry> {
self.segment_states.values().cloned().collect() self.segment_states.values().cloned().collect()
} }
@@ -89,17 +79,11 @@ impl SegmentRegister {
self.segment_states.get(segment_id).cloned() self.segment_states.get(segment_id).cloned()
} }
pub fn new( pub fn new(segment_metas: Vec<SegmentMeta>, delete_cursor: &DeleteCursor) -> SegmentRegister {
directory: &ManagedDirectory,
schema: &Schema,
segment_metas: Vec<SegmentMeta>,
delete_cursor: &DeleteCursor,
) -> SegmentRegister {
let mut segment_states = HashMap::new(); let mut segment_states = HashMap::new();
for meta in segment_metas { for segment_meta in segment_metas {
let segment_id = meta.id(); let segment_id = segment_meta.id();
let segment = Segment::new_persisted(meta, directory.clone(), schema.clone()); let segment_entry = SegmentEntry::new(segment_meta, delete_cursor.clone(), None);
let segment_entry = SegmentEntry::new(segment, delete_cursor.clone(), None);
segment_states.insert(segment_id, segment_entry); segment_states.insert(segment_id, segment_entry);
} }
SegmentRegister { segment_states } SegmentRegister { segment_states }
@@ -111,7 +95,6 @@ mod tests {
use super::*; use super::*;
use crate::core::{SegmentId, SegmentMetaInventory}; use crate::core::{SegmentId, SegmentMetaInventory};
use crate::indexer::delete_queue::*; use crate::indexer::delete_queue::*;
use crate::schema::Schema;
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> { fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
segment_register segment_register
@@ -125,7 +108,6 @@ mod tests {
fn test_segment_register() { fn test_segment_register() {
let inventory = SegmentMetaInventory::default(); let inventory = SegmentMetaInventory::default();
let delete_queue = DeleteQueue::new(); let delete_queue = DeleteQueue::new();
let schema = Schema::builder().build();
let mut segment_register = SegmentRegister::default(); let mut segment_register = SegmentRegister::default();
let segment_id_a = SegmentId::generate_random(); let segment_id_a = SegmentId::generate_random();
@@ -133,24 +115,21 @@ mod tests {
let segment_id_merged = SegmentId::generate_random(); let segment_id_merged = SegmentId::generate_random();
{ {
let meta = inventory.new_segment_meta(segment_id_a, 0u32); let segment_meta = inventory.new_segment_meta(segment_id_a, 0u32);
let segment = Segment::new_volatile(meta, schema.clone()); let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
segment_register.add_segment_entry(segment_entry); segment_register.add_segment_entry(segment_entry);
} }
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]); assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
{ {
let segment_meta = inventory.new_segment_meta(segment_id_b, 0u32); let segment_meta = inventory.new_segment_meta(segment_id_b, 0u32);
let segment = Segment::new_volatile(segment_meta, schema.clone()); let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
segment_register.add_segment_entry(segment_entry); segment_register.add_segment_entry(segment_entry);
} }
segment_register.remove_segment(&segment_id_a); segment_register.remove_segment(&segment_id_a);
segment_register.remove_segment(&segment_id_b); segment_register.remove_segment(&segment_id_b);
{ {
let segment_meta_merged = inventory.new_segment_meta(segment_id_merged, 0u32); let segment_meta_merged = inventory.new_segment_meta(segment_id_merged, 0u32);
let segment = Segment::new_volatile(segment_meta_merged, schema); let segment_entry = SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None);
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
segment_register.add_segment_entry(segment_entry); segment_register.add_segment_entry(segment_entry);
} }
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]); assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);

View File

@@ -7,10 +7,11 @@ use crate::core::SegmentMeta;
use crate::core::SerializableSegment; use crate::core::SerializableSegment;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult}; use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::index_writer::advance_deletes; use crate::indexer::index_writer::advance_deletes;
use crate::indexer::merge_operation::MergeOperationInventory; use crate::indexer::merge_operation::MergeOperationInventory;
use crate::indexer::merger::IndexMerger; use crate::indexer::merger::IndexMerger;
use crate::indexer::segment_manager::{SegmentRegisters, SegmentsStatus}; use crate::indexer::segment_manager::SegmentsStatus;
use crate::indexer::stamper::Stamper; use crate::indexer::stamper::Stamper;
use crate::indexer::SegmentEntry; use crate::indexer::SegmentEntry;
use crate::indexer::SegmentSerializer; use crate::indexer::SegmentSerializer;
@@ -116,7 +117,8 @@ fn merge(
// First we apply all of the delet to the merged segment, up to the target opstamp. // First we apply all of the delet to the merged segment, up to the target opstamp.
for segment_entry in &mut segment_entries { for segment_entry in &mut segment_entries {
advance_deletes(segment_entry, target_opstamp)?; let segment = index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?;
} }
let delete_cursor = segment_entries[0].delete_cursor().clone(); let delete_cursor = segment_entries[0].delete_cursor().clone();
@@ -132,13 +134,11 @@ fn merge(
// ... we just serialize this index merger in our new segment to merge the two segments. // ... we just serialize this index merger in our new segment to merge the two segments.
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?; let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
let max_doc = merger.write(segment_serializer)?; let num_docs = merger.write(segment_serializer)?;
Ok(SegmentEntry::new( let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
merged_segment.with_max_doc(max_doc),
delete_cursor, Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
None,
))
} }
pub(crate) struct InnerSegmentUpdater { pub(crate) struct InnerSegmentUpdater {
@@ -162,11 +162,12 @@ pub(crate) struct InnerSegmentUpdater {
impl SegmentUpdater { impl SegmentUpdater {
pub fn create( pub fn create(
segment_registers: Arc<RwLock<SegmentRegisters>>,
index: Index, index: Index,
stamper: Stamper, stamper: Stamper,
delete_cursor: &DeleteCursor,
) -> crate::Result<SegmentUpdater> { ) -> crate::Result<SegmentUpdater> {
let segment_manager = SegmentManager::new(segment_registers); let segments = index.searchable_segment_metas()?;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
let pool = ThreadPoolBuilder::new() let pool = ThreadPoolBuilder::new()
.name_prefix("segment_updater") .name_prefix("segment_updater")
.pool_size(1) .pool_size(1)
@@ -229,7 +230,6 @@ impl SegmentUpdater {
&self, &self,
segment_entry: SegmentEntry, segment_entry: SegmentEntry,
) -> impl Future<Output = crate::Result<()>> { ) -> impl Future<Output = crate::Result<()>> {
// TODO temporary: serializing the segment at this point.
let segment_updater = self.clone(); let segment_updater = self.clone();
self.schedule_future(async move { self.schedule_future(async move {
segment_updater.segment_manager.add_segment(segment_entry); segment_updater.segment_manager.add_segment(segment_entry);
@@ -258,7 +258,8 @@ impl SegmentUpdater {
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> { fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
let mut segment_entries = self.segment_manager.segment_entries(); let mut segment_entries = self.segment_manager.segment_entries();
for segment_entry in &mut segment_entries { for segment_entry in &mut segment_entries {
advance_deletes(segment_entry, target_opstamp)?; let segment = self.index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?;
} }
Ok(segment_entries) Ok(segment_entries)
} }
@@ -326,21 +327,12 @@ impl SegmentUpdater {
&self, &self,
opstamp: Opstamp, opstamp: Opstamp,
payload: Option<String>, payload: Option<String>,
soft_commit: bool,
) -> impl Future<Output = crate::Result<()>> { ) -> impl Future<Output = crate::Result<()>> {
let segment_updater: SegmentUpdater = self.clone(); let segment_updater: SegmentUpdater = self.clone();
let directory = self.index.directory().clone();
self.schedule_future(async move { self.schedule_future(async move {
let mut segment_entries = segment_updater.purge_deletes(opstamp)?; let segment_entries = segment_updater.purge_deletes(opstamp)?;
if !soft_commit {
for segment_entry in &mut segment_entries {
segment_entry.persist(directory.clone())?;
}
}
segment_updater.segment_manager.commit(segment_entries); segment_updater.segment_manager.commit(segment_entries);
if !soft_commit { segment_updater.save_metas(opstamp, payload)?;
segment_updater.save_metas(opstamp, payload)?;
}
let _ = garbage_collect_files(segment_updater.clone()).await; let _ = garbage_collect_files(segment_updater.clone()).await;
segment_updater.consider_merge_options().await; segment_updater.consider_merge_options().await;
Ok(()) Ok(())
@@ -478,14 +470,17 @@ impl SegmentUpdater {
let end_merge_future = self.schedule_future(async move { let end_merge_future = self.schedule_future(async move {
info!("End merge {:?}", after_merge_segment_entry.meta()); info!("End merge {:?}", after_merge_segment_entry.meta());
{ {
let mut delete_cursor = after_merge_segment_entry.delete_cursor(); let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
if let Some(delete_operation) = delete_cursor.get() { if let Some(delete_operation) = delete_cursor.get() {
let committed_opstamp = segment_updater.load_metas().opstamp; let committed_opstamp = segment_updater.load_metas().opstamp;
if delete_operation.opstamp < committed_opstamp { if delete_operation.opstamp < committed_opstamp {
let _index = &segment_updater.index; let index = &segment_updater.index;
if let Err(e) = let segment = index.segment(after_merge_segment_entry.meta().clone());
advance_deletes(&mut after_merge_segment_entry, committed_opstamp) if let Err(e) = advance_deletes(
{ segment,
&mut after_merge_segment_entry,
committed_opstamp,
) {
error!( error!(
"Merge of {:?} was cancelled (advancing deletes failed): {:?}", "Merge of {:?} was cancelled (advancing deletes failed): {:?}",
merge_operation.segment_ids(), merge_operation.segment_ids(),

View File

@@ -11,10 +11,9 @@ use crate::schema::Schema;
use crate::schema::Term; use crate::schema::Term;
use crate::schema::Value; use crate::schema::Value;
use crate::schema::{Field, FieldEntry}; use crate::schema::{Field, FieldEntry};
use crate::tokenizer::FacetTokenizer; use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
use crate::tokenizer::PreTokenizedStream; use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
use crate::tokenizer::{BoxedTokenizer, TokenizerManager}; use crate::tokenizer::{TokenStreamChain, Tokenizer};
use crate::tokenizer::{TokenStream, TokenStreamChain, Tokenizer};
use crate::DocId; use crate::DocId;
use crate::Opstamp; use crate::Opstamp;
use crate::Result; use crate::Result;
@@ -50,7 +49,7 @@ pub struct SegmentWriter {
fast_field_writers: FastFieldsWriter, fast_field_writers: FastFieldsWriter,
fieldnorms_writer: FieldNormsWriter, fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>, doc_opstamps: Vec<Opstamp>,
tokenizers: Vec<Option<BoxedTokenizer>>, tokenizers: Vec<Option<TextAnalyzer>>,
} }
impl SegmentWriter { impl SegmentWriter {
@@ -66,12 +65,11 @@ impl SegmentWriter {
pub fn for_segment( pub fn for_segment(
memory_budget: usize, memory_budget: usize,
mut segment: Segment, mut segment: Segment,
tokenizers: &TokenizerManager, schema: &Schema,
) -> Result<SegmentWriter> { ) -> Result<SegmentWriter> {
let schema = segment.schema();
let table_num_bits = initial_table_size(memory_budget)?; let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?; let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
let multifield_postings = MultiFieldPostingsWriter::new(&schema, table_num_bits); let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers = schema let tokenizers = schema
.fields() .fields()
.map( .map(
@@ -80,7 +78,7 @@ impl SegmentWriter {
.get_indexing_options() .get_indexing_options()
.and_then(|text_index_option| { .and_then(|text_index_option| {
let tokenizer_name = &text_index_option.tokenizer(); let tokenizer_name = &text_index_option.tokenizer();
tokenizers.get(tokenizer_name) segment.index().tokenizers().get(tokenizer_name)
}), }),
_ => None, _ => None,
}, },
@@ -89,9 +87,9 @@ impl SegmentWriter {
Ok(SegmentWriter { Ok(SegmentWriter {
max_doc: 0, max_doc: 0,
multifield_postings, multifield_postings,
fieldnorms_writer: FieldNormsWriter::for_schema(&schema), fieldnorms_writer: FieldNormsWriter::for_schema(schema),
segment_serializer, segment_serializer,
fast_field_writers: FastFieldsWriter::from_schema(&schema), fast_field_writers: FastFieldsWriter::from_schema(schema),
doc_opstamps: Vec::with_capacity(1_000), doc_opstamps: Vec::with_capacity(1_000),
tokenizers, tokenizers,
}) })
@@ -160,7 +158,7 @@ impl SegmentWriter {
} }
} }
FieldType::Str(_) => { FieldType::Str(_) => {
let mut token_streams: Vec<Box<dyn TokenStream>> = vec![]; let mut token_streams: Vec<BoxTokenStream> = vec![];
let mut offsets = vec![]; let mut offsets = vec![];
let mut total_offset = 0; let mut total_offset = 0;
@@ -173,7 +171,7 @@ impl SegmentWriter {
} }
token_streams token_streams
.push(Box::new(PreTokenizedStream::from(tok_str.clone()))); .push(PreTokenizedStream::from(tok_str.clone()).into());
} }
Value::Str(ref text) => { Value::Str(ref text) => {
if let Some(ref mut tokenizer) = if let Some(ref mut tokenizer) =
@@ -192,8 +190,7 @@ impl SegmentWriter {
let num_tokens = if token_streams.is_empty() { let num_tokens = if token_streams.is_empty() {
0 0
} else { } else {
let mut token_stream: Box<dyn TokenStream> = let mut token_stream = TokenStreamChain::new(offsets, token_streams);
Box::new(TokenStreamChain::new(offsets, token_streams));
self.multifield_postings self.multifield_postings
.index_text(doc_id, field, &mut token_stream) .index_text(doc_id, field, &mut token_stream)
}; };

View File

@@ -1,18 +1,76 @@
use crate::Opstamp; use crate::Opstamp;
use std::ops::Range; use std::ops::Range;
use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::atomic::Ordering;
use std::sync::Arc; use std::sync::Arc;
#[cfg(not(target_arch = "arm"))]
mod atomic_impl {
use crate::Opstamp;
use std::sync::atomic::{AtomicU64, Ordering};
#[derive(Default)]
pub struct AtomicU64Wrapper(AtomicU64);
impl AtomicU64Wrapper {
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
AtomicU64Wrapper(AtomicU64::new(first_opstamp as u64))
}
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
self.0.fetch_add(val as u64, order) as u64
}
pub fn revert(&self, val: u64, order: Ordering) -> u64 {
self.0.store(val, order);
val
}
}
}
#[cfg(target_arch = "arm")]
mod atomic_impl {
use crate::Opstamp;
/// Under other architecture, we rely on a mutex.
use std::sync::atomic::Ordering;
use std::sync::RwLock;
#[derive(Default)]
pub struct AtomicU64Wrapper(RwLock<u64>);
impl AtomicU64Wrapper {
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
AtomicU64Wrapper(RwLock::new(first_opstamp))
}
pub fn fetch_add(&self, incr: u64, _order: Ordering) -> u64 {
let mut lock = self.0.write().unwrap();
let previous_val = *lock;
*lock = previous_val + incr;
previous_val
}
pub fn revert(&self, val: u64, _order: Ordering) -> u64 {
let mut lock = self.0.write().unwrap();
*lock = val;
val
}
}
}
use self::atomic_impl::AtomicU64Wrapper;
/// Stamper provides Opstamps, which is just an auto-increment id to label /// Stamper provides Opstamps, which is just an auto-increment id to label
/// an operation. /// an operation.
/// ///
/// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`. /// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`.
#[derive(Clone, Default)] #[derive(Clone, Default)]
pub struct Stamper(Arc<AtomicU64>); pub struct Stamper(Arc<AtomicU64Wrapper>);
impl Stamper { impl Stamper {
pub fn new(first_opstamp: Opstamp) -> Stamper { pub fn new(first_opstamp: Opstamp) -> Stamper {
Stamper(Arc::new(AtomicU64::new(first_opstamp))) Stamper(Arc::new(AtomicU64Wrapper::new(first_opstamp)))
} }
pub fn stamp(&self) -> Opstamp { pub fn stamp(&self) -> Opstamp {
@@ -31,8 +89,7 @@ impl Stamper {
/// Reverts the stamper to a given `Opstamp` value and returns it /// Reverts the stamper to a given `Opstamp` value and returns it
pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp { pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp {
self.0.store(to_opstamp, Ordering::SeqCst); self.0.revert(to_opstamp, Ordering::SeqCst)
to_opstamp
} }
} }

View File

@@ -161,10 +161,11 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset; mod docset;
pub use self::docset::{DocSet, SkipResult}; pub use self::docset::{DocSet, SkipResult};
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
pub use crate::core::SegmentComponent; pub use crate::core::{Executor, SegmentComponent};
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta}; pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
pub use crate::core::{InvertedIndexReader, SegmentReader}; pub use crate::core::{InvertedIndexReader, SegmentReader};
pub use crate::directory::Directory; pub use crate::directory::Directory;
pub use crate::indexer::operation::UserOperation;
pub use crate::indexer::IndexWriter; pub use crate::indexer::IndexWriter;
pub use crate::postings::Postings; pub use crate::postings::Postings;
pub use crate::reader::LeasedItem; pub use crate::reader::LeasedItem;

View File

@@ -220,7 +220,7 @@ pub mod tests {
{ {
let mut segment_writer = let mut segment_writer =
SegmentWriter::for_segment(3_000_000, segment.clone(), index.tokenizers()).unwrap(); SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
{ {
let mut doc = Document::default(); let mut doc = Document::default();
// checking that position works if the field has two values // checking that position works if the field has two values

View File

@@ -148,8 +148,7 @@ impl<'a> FieldSerializer<'a> {
} }
_ => (false, false), _ => (false, false),
}; };
let term_dictionary_builder = let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
TermDictionaryBuilder::create(term_dictionary_write, &field_type)?;
let postings_serializer = let postings_serializer =
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled); PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
let positions_serializer_opt = if position_enabled { let positions_serializer_opt = if position_enabled {

View File

@@ -15,6 +15,7 @@ use tantivy_fst::Automaton;
pub struct AutomatonWeight<A> { pub struct AutomatonWeight<A> {
field: Field, field: Field,
automaton: Arc<A>, automaton: Arc<A>,
boost: f32,
} }
impl<A> AutomatonWeight<A> impl<A> AutomatonWeight<A>
@@ -26,9 +27,15 @@ where
AutomatonWeight { AutomatonWeight {
field, field,
automaton: automaton.into(), automaton: automaton.into(),
boost: 1.0,
} }
} }
/// Boost the scorer by the given factor.
pub fn boost_by(self, boost: f32) -> Self {
Self { boost, ..self }
}
fn automaton_stream<'a>(&'a self, term_dict: &'a TermDictionary) -> TermStreamer<'a, &'a A> { fn automaton_stream<'a>(&'a self, term_dict: &'a TermDictionary) -> TermStreamer<'a, &'a A> {
let automaton: &A = &*self.automaton; let automaton: &A = &*self.automaton;
let term_stream_builder = term_dict.search(automaton); let term_stream_builder = term_dict.search(automaton);
@@ -58,7 +65,7 @@ where
} }
} }
let doc_bitset = BitSetDocSet::from(doc_bitset); let doc_bitset = BitSetDocSet::from(doc_bitset);
Ok(Box::new(ConstScorer::new(doc_bitset))) Ok(Box::new(ConstScorer::with_score(doc_bitset, self.boost)))
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {

View File

@@ -34,7 +34,7 @@ pub struct BM25Weight {
} }
impl BM25Weight { impl BM25Weight {
pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> BM25Weight { pub fn for_terms(searcher: &Searcher, terms: &[Term], boost: f32) -> BM25Weight {
assert!(!terms.is_empty(), "BM25 requires at least one term"); assert!(!terms.is_empty(), "BM25 requires at least one term");
let field = terms[0].field(); let field = terms[0].field();
for term in &terms[1..] { for term in &terms[1..] {
@@ -75,11 +75,11 @@ impl BM25Weight {
.sum::<f32>(); .sum::<f32>();
idf_explain = Explanation::new("idf", idf); idf_explain = Explanation::new("idf", idf);
} }
BM25Weight::new(idf_explain, average_fieldnorm) BM25Weight::new(idf_explain, average_fieldnorm, boost)
} }
fn new(idf_explain: Explanation, average_fieldnorm: f32) -> BM25Weight { fn new(idf_explain: Explanation, average_fieldnorm: f32, boost: f32) -> BM25Weight {
let weight = idf_explain.value() * (1f32 + K1); let weight = idf_explain.value() * (1f32 + K1) * boost;
BM25Weight { BM25Weight {
idf_explain, idf_explain,
weight, weight,

View File

@@ -79,6 +79,7 @@ pub struct FuzzyTermQuery {
transposition_cost_one: bool, transposition_cost_one: bool,
/// ///
prefix: bool, prefix: bool,
boost: f32,
} }
impl FuzzyTermQuery { impl FuzzyTermQuery {
@@ -89,6 +90,7 @@ impl FuzzyTermQuery {
distance, distance,
transposition_cost_one, transposition_cost_one,
prefix: false, prefix: false,
boost: 1.0,
} }
} }
@@ -99,16 +101,22 @@ impl FuzzyTermQuery {
distance, distance,
transposition_cost_one, transposition_cost_one,
prefix: true, prefix: true,
boost: 1.0,
} }
} }
/// Boost the query score by the given factor.
pub fn boost_by(self, boost: f32) -> Self {
Self { boost, ..self }
}
fn specialized_weight(&self) -> Result<AutomatonWeight<DFA>> { fn specialized_weight(&self) -> Result<AutomatonWeight<DFA>> {
// LEV_BUILDER is a HashMap, whose `get` method returns an Option // LEV_BUILDER is a HashMap, whose `get` method returns an Option
match LEV_BUILDER.get(&(self.distance, false)) { match LEV_BUILDER.get(&(self.distance, false)) {
// Unwrap the option and build the Ok(AutomatonWeight) // Unwrap the option and build the Ok(AutomatonWeight)
Some(automaton_builder) => { Some(automaton_builder) => {
let automaton = automaton_builder.build_dfa(self.term.text()); let automaton = automaton_builder.build_dfa(self.term.text());
Ok(AutomatonWeight::new(self.term.field(), automaton)) Ok(AutomatonWeight::new(self.term.field(), automaton).boost_by(self.boost))
} }
None => Err(InvalidArgument(format!( None => Err(InvalidArgument(format!(
"Levenshtein distance of {} is not allowed. Choose a value in the {:?} range", "Levenshtein distance of {} is not allowed. Choose a value in the {:?} range",

View File

@@ -27,6 +27,7 @@ use std::collections::BTreeSet;
pub struct PhraseQuery { pub struct PhraseQuery {
field: Field, field: Field,
phrase_terms: Vec<(usize, Term)>, phrase_terms: Vec<(usize, Term)>,
boost: f32,
} }
impl PhraseQuery { impl PhraseQuery {
@@ -57,9 +58,15 @@ impl PhraseQuery {
PhraseQuery { PhraseQuery {
field, field,
phrase_terms: terms, phrase_terms: terms,
boost: 1.0,
} }
} }
/// Boost the query score by the given factor.
pub fn boost_by(self, boost: f32) -> Self {
Self { boost, ..self }
}
/// The `Field` this `PhraseQuery` is targeting. /// The `Field` this `PhraseQuery` is targeting.
pub fn field(&self) -> Field { pub fn field(&self) -> Field {
self.field self.field
@@ -97,7 +104,7 @@ impl PhraseQuery {
))); )));
} }
let terms = self.phrase_terms(); let terms = self.phrase_terms();
let bm25_weight = BM25Weight::for_terms(searcher, &terms); let bm25_weight = BM25Weight::for_terms(searcher, &terms, self.boost);
Ok(PhraseWeight::new( Ok(PhraseWeight::new(
self.phrase_terms.clone(), self.phrase_terms.clone(),
bm25_weight, bm25_weight,

View File

@@ -533,7 +533,7 @@ mod test {
use crate::schema::{IndexRecordOption, TextFieldIndexing, TextOptions}; use crate::schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
use crate::schema::{Schema, Term, INDEXED, STORED, STRING, TEXT}; use crate::schema::{Schema, Term, INDEXED, STORED, STRING, TEXT};
use crate::tokenizer::{ use crate::tokenizer::{
LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager, LowerCaser, SimpleTokenizer, StopWordFilter, TextAnalyzer, TokenizerManager,
}; };
use crate::Index; use crate::Index;
use matches::assert_matches; use matches::assert_matches;
@@ -563,7 +563,7 @@ mod test {
let tokenizer_manager = TokenizerManager::default(); let tokenizer_manager = TokenizerManager::default();
tokenizer_manager.register( tokenizer_manager.register(
"en_with_stop_words", "en_with_stop_words",
SimpleTokenizer TextAnalyzer::from(SimpleTokenizer)
.filter(LowerCaser) .filter(LowerCaser)
.filter(StopWordFilter::remove(vec!["the".to_string()])), .filter(StopWordFilter::remove(vec!["the".to_string()])),
); );

View File

@@ -54,6 +54,7 @@ use tantivy_fst::Regex;
pub struct RegexQuery { pub struct RegexQuery {
regex: Arc<Regex>, regex: Arc<Regex>,
field: Field, field: Field,
boost: f32,
} }
impl RegexQuery { impl RegexQuery {
@@ -69,11 +70,17 @@ impl RegexQuery {
RegexQuery { RegexQuery {
regex: regex.into(), regex: regex.into(),
field, field,
boost: 1.0,
} }
} }
/// Boost the query score by the given factor.
pub fn boost_by(self, boost: f32) -> Self {
Self { boost, ..self }
}
fn specialized_weight(&self) -> AutomatonWeight<Regex> { fn specialized_weight(&self) -> AutomatonWeight<Regex> {
AutomatonWeight::new(self.field, self.regex.clone()) AutomatonWeight::new(self.field, self.regex.clone()).boost_by(self.boost)
} }
} }

View File

@@ -56,6 +56,11 @@ impl<TDocSet: DocSet> ConstScorer<TDocSet> {
} }
} }
/// Creates a new `ConstScorer` with a custom score value
pub fn with_score(docset: TDocSet, score: f32) -> ConstScorer<TDocSet> {
ConstScorer { docset, score }
}
/// Sets the constant score to a different value. /// Sets the constant score to a different value.
pub fn set_score(&mut self, score: Score) { pub fn set_score(&mut self, score: Score) {
self.score = score; self.score = score;

View File

@@ -45,6 +45,35 @@ mod tests {
assert_eq!(term_scorer.score(), 0.28768212); assert_eq!(term_scorer.score(), 0.28768212);
} }
#[test]
pub fn test_term_query_boost_by() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
let doc = doc!(text_field => "a");
index_writer.add_document(doc);
}
assert!(index_writer.commit().is_ok());
}
let searcher = index.reader().unwrap().searcher();
let term_query = TermQuery::new(
Term::from_field_text(text_field, "a"),
IndexRecordOption::Basic,
)
.boost_by(42.0);
let term_weight = term_query.weight(&searcher, true).unwrap();
let segment_reader = searcher.segment_reader(0);
let mut term_scorer = term_weight.scorer(segment_reader).unwrap();
assert!(term_scorer.advance());
assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals(0.28768212 * 42.0, term_scorer.score());
}
#[test] #[test]
pub fn test_term_weight() { pub fn test_term_weight() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -112,7 +141,6 @@ mod tests {
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
let term_query = TermQuery::new(term_a, IndexRecordOption::Basic); let term_query = TermQuery::new(term_a, IndexRecordOption::Basic);
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
assert_eq!(reader.searcher().segment_readers().len(), 1);
assert_eq!(term_query.count(&*reader.searcher()).unwrap(), 1); assert_eq!(term_query.count(&*reader.searcher()).unwrap(), 1);
} }

View File

@@ -61,6 +61,7 @@ use std::fmt;
pub struct TermQuery { pub struct TermQuery {
term: Term, term: Term,
index_record_option: IndexRecordOption, index_record_option: IndexRecordOption,
boost: f32,
} }
impl fmt::Debug for TermQuery { impl fmt::Debug for TermQuery {
@@ -75,9 +76,15 @@ impl TermQuery {
TermQuery { TermQuery {
term, term,
index_record_option: segment_postings_options, index_record_option: segment_postings_options,
boost: 1.0,
} }
} }
/// Boost the query score by the given factor.
pub fn boost_by(self, boost: f32) -> Self {
Self { boost, ..self }
}
/// The `Term` this query is built out of. /// The `Term` this query is built out of.
pub fn term(&self) -> &Term { pub fn term(&self) -> &Term {
&self.term &self.term
@@ -90,7 +97,7 @@ impl TermQuery {
/// This is useful for optimization purpose. /// This is useful for optimization purpose.
pub fn specialized_weight(&self, searcher: &Searcher, scoring_enabled: bool) -> TermWeight { pub fn specialized_weight(&self, searcher: &Searcher, scoring_enabled: bool) -> TermWeight {
let term = self.term.clone(); let term = self.term.clone();
let bm25_weight = BM25Weight::for_terms(searcher, &[term]); let bm25_weight = BM25Weight::for_terms(searcher, &[term], self.boost);
let index_record_option = if scoring_enabled { let index_record_option = if scoring_enabled {
self.index_record_option self.index_record_option
} else { } else {

View File

@@ -1,83 +0,0 @@
use crate::directory::{WatchCallbackList, WatchHandle};
use crate::indexer::SegmentRegisters;
use crate::reader::pool::Pool;
use crate::{Index, LeasedItem, Searcher, Segment, SegmentReader};
use std::iter::repeat_with;
use std::sync::{Arc, RwLock, Weak};
struct InnerNRTReader {
num_searchers: usize,
index: Index,
searcher_pool: Pool<Searcher>,
segment_registers: Arc<RwLock<SegmentRegisters>>,
}
impl InnerNRTReader {
fn load_segment_readers(&self) -> crate::Result<Vec<SegmentReader>> {
let segments: Vec<Segment> = {
let rlock = self.segment_registers.read().unwrap();
rlock.committed_segment()
};
segments
.iter()
.map(SegmentReader::open)
.collect::<crate::Result<Vec<SegmentReader>>>()
}
pub fn reload(&self) -> crate::Result<()> {
let segment_readers: Vec<SegmentReader> = self.load_segment_readers()?;
let schema = self.index.schema();
let searchers = repeat_with(|| {
Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone())
})
.take(self.num_searchers)
.collect();
self.searcher_pool.publish_new_generation(searchers);
Ok(())
}
pub fn searcher(&self) -> LeasedItem<Searcher> {
self.searcher_pool.acquire()
}
}
#[derive(Clone)]
pub struct NRTReader {
inner: Arc<InnerNRTReader>,
watch_handle: WatchHandle,
}
impl NRTReader {
pub fn reload(&self) -> crate::Result<()> {
self.inner.reload()
}
pub fn searcher(&self) -> LeasedItem<Searcher> {
self.inner.searcher()
}
pub(crate) fn create(
num_searchers: usize,
index: Index,
segment_registers: Arc<RwLock<SegmentRegisters>>,
watch_callback_list: &WatchCallbackList,
) -> crate::Result<Self> {
let inner_reader: Arc<InnerNRTReader> = Arc::new(InnerNRTReader {
num_searchers,
index,
searcher_pool: Pool::new(),
segment_registers,
});
let inner_reader_weak: Weak<InnerNRTReader> = Arc::downgrade(&inner_reader);
let watch_handle = watch_callback_list.subscribe(Box::new(move || {
if let Some(nrt_reader_arc) = inner_reader_weak.upgrade() {
let _ = nrt_reader_arc.reload();
}
}));
inner_reader.reload()?;
Ok(NRTReader {
inner: inner_reader,
watch_handle,
})
}
}

View File

@@ -1,180 +0,0 @@
use super::pool::Pool;
use crate::core::Segment;
use crate::directory::Directory;
use crate::directory::WatchHandle;
use crate::directory::META_LOCK;
use crate::Searcher;
use crate::SegmentReader;
use crate::{Index, LeasedItem};
use crate::{IndexReader, Result};
use std::iter::repeat_with;
use std::sync::Arc;
/// Defines when a new version of the index should be reloaded.
///
/// Regardless of whether you search and index in the same process, tantivy does not necessarily
/// reflects the change that are commited to your index. `ReloadPolicy` precisely helps you define
/// when you want your index to be reloaded.
#[derive(Clone, Copy)]
pub enum ReloadPolicy {
/// The index is entirely reloaded manually.
/// All updates of the index should be manual.
///
/// No change is reflected automatically. You are required to call `.load_seacher()` manually.
Manual,
/// The index is reloaded within milliseconds after a new commit is available.
/// This is made possible by watching changes in the `meta.json` file.
OnCommit, // TODO add NEAR_REAL_TIME(target_ms)
}
/// `IndexReader` builder
///
/// It makes it possible to set the following values.
///
/// - `num_searchers` (by default, the number of detected CPU threads):
///
/// When `num_searchers` queries are requested at the same time, the `num_searchers` will block
/// until the one of the searcher in-use gets released.
/// - `reload_policy` (by default `ReloadPolicy::OnCommit`):
///
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
#[derive(Clone)]
pub struct IndexReaderBuilder {
num_searchers: usize,
reload_policy: ReloadPolicy,
index: Index,
}
impl IndexReaderBuilder {
pub(crate) fn new(index: Index) -> IndexReaderBuilder {
IndexReaderBuilder {
num_searchers: num_cpus::get(),
reload_policy: ReloadPolicy::Manual,
index,
}
}
/// Builds the reader.
///
/// Building the reader is a non-trivial operation that requires
/// to open different segment readers. It may take hundreds of milliseconds
/// of time and it may return an error.
/// TODO(pmasurel) Use the `TryInto` trait once it is available in stable.
pub fn try_into(self) -> Result<IndexReader> {
let inner_reader = MetaFileIndexReaderInner {
index: self.index,
num_searchers: self.num_searchers,
searcher_pool: Pool::new(),
};
inner_reader.reload()?;
let inner_reader_arc = Arc::new(inner_reader);
let watch_handle_opt: Option<WatchHandle>;
match self.reload_policy {
ReloadPolicy::Manual => {
// No need to set anything...
watch_handle_opt = None;
}
ReloadPolicy::OnCommit => {
let inner_reader_arc_clone = inner_reader_arc.clone();
let callback = move || {
if let Err(err) = inner_reader_arc_clone.reload() {
error!(
"Error while loading searcher after commit was detected. {:?}",
err
);
}
};
let watch_handle = inner_reader_arc
.index
.directory()
.watch(Box::new(callback))?;
watch_handle_opt = Some(watch_handle);
}
}
Ok(IndexReader::from(MetaFileIndexReader {
inner: inner_reader_arc,
watch_handle_opt,
}))
}
/// Sets the reload_policy.
///
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder {
self.reload_policy = reload_policy;
self
}
/// Sets the number of `Searcher` in the searcher pool.
pub fn num_searchers(mut self, num_searchers: usize) -> IndexReaderBuilder {
self.num_searchers = num_searchers;
self
}
}
struct MetaFileIndexReaderInner {
num_searchers: usize,
searcher_pool: Pool<Searcher>,
index: Index,
}
impl MetaFileIndexReaderInner {
fn load_segment_readers(&self) -> crate::Result<Vec<SegmentReader>> {
// We keep the lock until we have effectively finished opening the
// the `SegmentReader` because it prevents a diffferent process
// to garbage collect these file while we open them.
//
// Once opened, on linux & mac, the mmap will remain valid after
// the file has been deleted
// On windows, the file deletion will fail.
let _meta_lock = self.index.directory().acquire_lock(&META_LOCK)?;
let searchable_segments = self.searchable_segments()?;
searchable_segments
.iter()
.map(SegmentReader::open)
.collect::<Result<_>>()
}
fn reload(&self) -> crate::Result<()> {
let segment_readers: Vec<SegmentReader> = self.load_segment_readers()?;
let schema = self.index.schema();
let searchers = repeat_with(|| {
Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone())
})
.take(self.num_searchers)
.collect();
self.searcher_pool.publish_new_generation(searchers);
Ok(())
}
/// Returns the list of segments that are searchable
fn searchable_segments(&self) -> crate::Result<Vec<Segment>> {
self.index.searchable_segments()
}
fn searcher(&self) -> LeasedItem<Searcher> {
self.searcher_pool.acquire()
}
}
/// `IndexReader` is your entry point to read and search the index.
///
/// It controls when a new version of the index should be loaded and lends
/// you instances of `Searcher` for the last loaded version.
///
/// `Clone` does not clone the different pool of searcher. `IndexReader`
/// just wraps and `Arc`.
#[derive(Clone)]
pub struct MetaFileIndexReader {
inner: Arc<MetaFileIndexReaderInner>,
watch_handle_opt: Option<WatchHandle>,
}
impl MetaFileIndexReader {
pub fn reload(&self) -> crate::Result<()> {
self.inner.reload()
}
pub fn searcher(&self) -> LeasedItem<Searcher> {
self.inner.searcher()
}
}

View File

@@ -1,20 +1,172 @@
mod index_writer_reader;
mod meta_file_reader;
mod pool; mod pool;
use self::meta_file_reader::MetaFileIndexReader;
pub use self::meta_file_reader::{IndexReaderBuilder, ReloadPolicy};
pub use self::pool::LeasedItem; pub use self::pool::LeasedItem;
pub(crate) use crate::reader::index_writer_reader::{NRTReader}; use self::pool::Pool;
use crate::core::Segment;
use crate::directory::Directory;
use crate::directory::WatchHandle;
use crate::directory::META_LOCK;
use crate::Index;
use crate::Result;
use crate::Searcher; use crate::Searcher;
use crate::SegmentReader;
use std::sync::Arc;
/// Defines when a new version of the index should be reloaded.
///
/// Regardless of whether you search and index in the same process, tantivy does not necessarily
/// reflects the change that are commited to your index. `ReloadPolicy` precisely helps you define
/// when you want your index to be reloaded.
#[derive(Clone, Copy)]
pub enum ReloadPolicy {
/// The index is entirely reloaded manually.
/// All updates of the index should be manual.
///
/// No change is reflected automatically. You are required to call `.load_seacher()` manually.
Manual,
/// The index is reloaded within milliseconds after a new commit is available.
/// This is made possible by watching changes in the `meta.json` file.
OnCommit, // TODO add NEAR_REAL_TIME(target_ms)
}
/// `IndexReader` builder
///
/// It makes it possible to set the following values.
///
/// - `num_searchers` (by default, the number of detected CPU threads):
///
/// When `num_searchers` queries are requested at the same time, the `num_searchers` will block
/// until the one of the searcher in-use gets released.
/// - `reload_policy` (by default `ReloadPolicy::OnCommit`):
///
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
#[derive(Clone)] #[derive(Clone)]
pub enum IndexReader { pub struct IndexReaderBuilder {
FromMetaFile(MetaFileIndexReader), num_searchers: usize,
NRT(NRTReader), reload_policy: ReloadPolicy,
index: Index,
}
impl IndexReaderBuilder {
pub(crate) fn new(index: Index) -> IndexReaderBuilder {
IndexReaderBuilder {
num_searchers: num_cpus::get(),
reload_policy: ReloadPolicy::OnCommit,
index,
}
}
/// Builds the reader.
///
/// Building the reader is a non-trivial operation that requires
/// to open different segment readers. It may take hundreds of milliseconds
/// of time and it may return an error.
/// TODO(pmasurel) Use the `TryInto` trait once it is available in stable.
pub fn try_into(self) -> Result<IndexReader> {
let inner_reader = InnerIndexReader {
index: self.index,
num_searchers: self.num_searchers,
searcher_pool: Pool::new(),
};
inner_reader.reload()?;
let inner_reader_arc = Arc::new(inner_reader);
let watch_handle_opt: Option<WatchHandle>;
match self.reload_policy {
ReloadPolicy::Manual => {
// No need to set anything...
watch_handle_opt = None;
}
ReloadPolicy::OnCommit => {
let inner_reader_arc_clone = inner_reader_arc.clone();
let callback = move || {
if let Err(err) = inner_reader_arc_clone.reload() {
error!(
"Error while loading searcher after commit was detected. {:?}",
err
);
}
};
let watch_handle = inner_reader_arc
.index
.directory()
.watch(Box::new(callback))?;
watch_handle_opt = Some(watch_handle);
}
}
Ok(IndexReader {
inner: inner_reader_arc,
watch_handle_opt,
})
}
/// Sets the reload_policy.
///
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder {
self.reload_policy = reload_policy;
self
}
/// Sets the number of `Searcher` in the searcher pool.
pub fn num_searchers(mut self, num_searchers: usize) -> IndexReaderBuilder {
self.num_searchers = num_searchers;
self
}
}
struct InnerIndexReader {
num_searchers: usize,
searcher_pool: Pool<Searcher>,
index: Index,
}
impl InnerIndexReader {
fn reload(&self) -> Result<()> {
let segment_readers: Vec<SegmentReader> = {
let _meta_lock = self.index.directory().acquire_lock(&META_LOCK)?;
let searchable_segments = self.searchable_segments()?;
searchable_segments
.iter()
.map(SegmentReader::open)
.collect::<Result<_>>()?
};
let schema = self.index.schema();
let searchers = (0..self.num_searchers)
.map(|_| Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone()))
.collect();
self.searcher_pool.publish_new_generation(searchers);
Ok(())
}
/// Returns the list of segments that are searchable
fn searchable_segments(&self) -> Result<Vec<Segment>> {
self.index.searchable_segments()
}
fn searcher(&self) -> LeasedItem<Searcher> {
self.searcher_pool.acquire()
}
}
/// `IndexReader` is your entry point to read and search the index.
///
/// It controls when a new version of the index should be loaded and lends
/// you instances of `Searcher` for the last loaded version.
///
/// `Clone` does not clone the different pool of searcher. `IndexReader`
/// just wraps and `Arc`.
#[derive(Clone)]
pub struct IndexReader {
inner: Arc<InnerIndexReader>,
watch_handle_opt: Option<WatchHandle>,
} }
impl IndexReader { impl IndexReader {
#[cfg(test)]
pub(crate) fn index(&self) -> Index {
self.inner.index.clone()
}
/// Update searchers so that they reflect the state of the last /// Update searchers so that they reflect the state of the last
/// `.commit()`. /// `.commit()`.
/// ///
@@ -24,11 +176,8 @@ impl IndexReader {
/// ///
/// This automatic reload can take 10s of milliseconds to kick in however, and in unit tests /// This automatic reload can take 10s of milliseconds to kick in however, and in unit tests
/// it can be nice to deterministically force the reload of searchers. /// it can be nice to deterministically force the reload of searchers.
pub fn reload(&self) -> crate::Result<()> { pub fn reload(&self) -> Result<()> {
match self { self.inner.reload()
IndexReader::FromMetaFile(meta_file_reader) => meta_file_reader.reload(),
IndexReader::NRT(nrt_reader) => nrt_reader.reload(),
}
} }
/// Returns a searcher /// Returns a searcher
@@ -42,21 +191,6 @@ impl IndexReader {
/// The same searcher must be used for a given query, as it ensures /// The same searcher must be used for a given query, as it ensures
/// the use of a consistent segment set. /// the use of a consistent segment set.
pub fn searcher(&self) -> LeasedItem<Searcher> { pub fn searcher(&self) -> LeasedItem<Searcher> {
match self { self.inner.searcher()
IndexReader::FromMetaFile(meta_file_reader) => meta_file_reader.searcher(),
IndexReader::NRT(nrt_reader) => nrt_reader.searcher(),
}
}
}
impl From<MetaFileIndexReader> for IndexReader {
fn from(meta_file_reader: MetaFileIndexReader) -> Self {
IndexReader::FromMetaFile(meta_file_reader)
}
}
impl From<NRTReader> for IndexReader {
fn from(nrt_reader: NRTReader) -> Self {
IndexReader::NRT(nrt_reader)
} }
} }

View File

@@ -122,6 +122,11 @@ impl Facet {
pub fn to_path(&self) -> Vec<&str> { pub fn to_path(&self) -> Vec<&str> {
self.encoded_str().split(|c| c == FACET_SEP_CHAR).collect() self.encoded_str().split(|c| c == FACET_SEP_CHAR).collect()
} }
/// This function is the inverse of Facet::from(&str).
pub fn to_path_string(&self) -> String {
format!("{}", self.to_string())
}
} }
impl Borrow<str> for Facet { impl Borrow<str> for Facet {
@@ -265,4 +270,21 @@ mod tests {
let facet = Facet::from_path(v.iter()); let facet = Facet::from_path(v.iter());
assert_eq!(facet.to_path(), v); assert_eq!(facet.to_path(), v);
} }
#[test]
fn test_to_path_string() {
let v = ["first", "second", "third/not_fourth"];
let facet = Facet::from_path(v.iter());
assert_eq!(
facet.to_path_string(),
String::from("/first/second/third\\/not_fourth")
);
}
#[test]
fn test_to_path_string_empty() {
let v: Vec<&str> = vec![];
let facet = Facet::from_path(v.iter());
assert_eq!(facet.to_path_string(), "/");
}
} }

View File

@@ -1,8 +1,7 @@
use crate::query::Query; use crate::query::Query;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::Value; use crate::schema::Value;
use crate::tokenizer::BoxedTokenizer; use crate::tokenizer::{TextAnalyzer, Token};
use crate::tokenizer::{Token, TokenStream};
use crate::Document; use crate::Document;
use crate::Result; use crate::Result;
use crate::Searcher; use crate::Searcher;
@@ -142,7 +141,7 @@ impl Snippet {
/// Fragments must be valid in the sense that `&text[fragment.start..fragment.stop]`\ /// Fragments must be valid in the sense that `&text[fragment.start..fragment.stop]`\
/// has to be a valid string. /// has to be a valid string.
fn search_fragments<'a>( fn search_fragments<'a>(
tokenizer: &BoxedTokenizer, tokenizer: &TextAnalyzer,
text: &'a str, text: &'a str,
terms: &BTreeMap<String, f32>, terms: &BTreeMap<String, f32>,
max_num_chars: usize, max_num_chars: usize,
@@ -251,7 +250,7 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
/// ``` /// ```
pub struct SnippetGenerator { pub struct SnippetGenerator {
terms_text: BTreeMap<String, f32>, terms_text: BTreeMap<String, f32>,
tokenizer: BoxedTokenizer, tokenizer: TextAnalyzer,
field: Field, field: Field,
max_num_chars: usize, max_num_chars: usize,
} }
@@ -347,12 +346,11 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet() { fn test_snippet() {
let boxed_tokenizer = SimpleTokenizer.into();
let terms = btreemap! { let terms = btreemap! {
String::from("rust") => 1.0, String::from("rust") => 1.0,
String::from("language") => 0.9 String::from("language") => 0.9
}; };
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 100); let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 100);
assert_eq!(fragments.len(), 7); assert_eq!(fragments.len(), 7);
{ {
let first = &fragments[0]; let first = &fragments[0];
@@ -374,13 +372,12 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_scored_fragment() { fn test_snippet_scored_fragment() {
let boxed_tokenizer = SimpleTokenizer.into();
{ {
let terms = btreemap! { let terms = btreemap! {
String::from("rust") =>1.0f32, String::from("rust") =>1.0f32,
String::from("language") => 0.9f32 String::from("language") => 0.9f32
}; };
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 20); let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
{ {
let first = &fragments[0]; let first = &fragments[0];
assert_eq!(first.score, 1.0); assert_eq!(first.score, 1.0);
@@ -389,13 +386,12 @@ Survey in 2016, 2017, and 2018."#;
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT); let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems") assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems")
} }
let boxed_tokenizer = SimpleTokenizer.into();
{ {
let terms = btreemap! { let terms = btreemap! {
String::from("rust") =>0.9f32, String::from("rust") =>0.9f32,
String::from("language") => 1.0f32 String::from("language") => 1.0f32
}; };
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 20); let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
//assert_eq!(fragments.len(), 7); //assert_eq!(fragments.len(), 7);
{ {
let first = &fragments[0]; let first = &fragments[0];
@@ -409,14 +405,12 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_in_second_fragment() { fn test_snippet_in_second_fragment() {
let boxed_tokenizer = SimpleTokenizer.into();
let text = "a b c d e f g"; let text = "a b c d e f g";
let mut terms = BTreeMap::new(); let mut terms = BTreeMap::new();
terms.insert(String::from("c"), 1.0); terms.insert(String::from("c"), 1.0);
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3); let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
assert_eq!(fragments.len(), 1); assert_eq!(fragments.len(), 1);
{ {
@@ -433,14 +427,12 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_with_term_at_the_end_of_fragment() { fn test_snippet_with_term_at_the_end_of_fragment() {
let boxed_tokenizer = SimpleTokenizer.into();
let text = "a b c d e f f g"; let text = "a b c d e f f g";
let mut terms = BTreeMap::new(); let mut terms = BTreeMap::new();
terms.insert(String::from("f"), 1.0); terms.insert(String::from("f"), 1.0);
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3); let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
assert_eq!(fragments.len(), 2); assert_eq!(fragments.len(), 2);
{ {
@@ -457,15 +449,13 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_with_second_fragment_has_the_highest_score() { fn test_snippet_with_second_fragment_has_the_highest_score() {
let boxed_tokenizer = SimpleTokenizer.into();
let text = "a b c d e f g"; let text = "a b c d e f g";
let mut terms = BTreeMap::new(); let mut terms = BTreeMap::new();
terms.insert(String::from("f"), 1.0); terms.insert(String::from("f"), 1.0);
terms.insert(String::from("a"), 0.9); terms.insert(String::from("a"), 0.9);
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 7); let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 7);
assert_eq!(fragments.len(), 2); assert_eq!(fragments.len(), 2);
{ {
@@ -482,14 +472,12 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_with_term_not_in_text() { fn test_snippet_with_term_not_in_text() {
let boxed_tokenizer = SimpleTokenizer.into();
let text = "a b c d"; let text = "a b c d";
let mut terms = BTreeMap::new(); let mut terms = BTreeMap::new();
terms.insert(String::from("z"), 1.0); terms.insert(String::from("z"), 1.0);
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3); let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
assert_eq!(fragments.len(), 0); assert_eq!(fragments.len(), 0);
@@ -500,12 +488,10 @@ Survey in 2016, 2017, and 2018."#;
#[test] #[test]
fn test_snippet_with_no_terms() { fn test_snippet_with_no_terms() {
let boxed_tokenizer = SimpleTokenizer.into();
let text = "a b c d"; let text = "a b c d";
let terms = BTreeMap::new(); let terms = BTreeMap::new();
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3); let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
assert_eq!(fragments.len(), 0); assert_eq!(fragments.len(), 0);
let snippet = select_best_fragment_combination(&fragments[..], &text); let snippet = select_best_fragment_combination(&fragments[..], &text);

View File

@@ -38,7 +38,7 @@ mod tests {
use crate::core::Index; use crate::core::Index;
use crate::directory::{Directory, RAMDirectory, ReadOnlySource}; use crate::directory::{Directory, RAMDirectory, ReadOnlySource};
use crate::postings::TermInfo; use crate::postings::TermInfo;
use crate::schema::{Document, FieldType, Schema, TEXT}; use crate::schema::{Document, Schema, TEXT};
use std::path::PathBuf; use std::path::PathBuf;
use std::str; use std::str;
@@ -52,6 +52,12 @@ mod tests {
} }
} }
#[test]
fn test_empty_term_dictionary() {
let empty = TermDictionary::empty();
assert!(empty.stream().next().is_none());
}
#[test] #[test]
fn test_term_ordinals() { fn test_term_ordinals() {
const COUNTRIES: [&'static str; 7] = [ const COUNTRIES: [&'static str; 7] = [
@@ -67,9 +73,7 @@ mod tests {
let path = PathBuf::from("TermDictionary"); let path = PathBuf::from("TermDictionary");
{ {
let write = directory.open_write(&path).unwrap(); let write = directory.open_write(&path).unwrap();
let field_type = FieldType::Str(TEXT); let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap();
let mut term_dictionary_builder =
TermDictionaryBuilder::create(write, &field_type).unwrap();
for term in COUNTRIES.iter() { for term in COUNTRIES.iter() {
term_dictionary_builder term_dictionary_builder
.insert(term.as_bytes(), &make_term_info(0u64)) .insert(term.as_bytes(), &make_term_info(0u64))
@@ -93,9 +97,7 @@ mod tests {
let path = PathBuf::from("TermDictionary"); let path = PathBuf::from("TermDictionary");
{ {
let write = directory.open_write(&path).unwrap(); let write = directory.open_write(&path).unwrap();
let field_type = FieldType::Str(TEXT); let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap();
let mut term_dictionary_builder =
TermDictionaryBuilder::create(write, &field_type).unwrap();
term_dictionary_builder term_dictionary_builder
.insert("abc".as_bytes(), &make_term_info(34u64)) .insert("abc".as_bytes(), &make_term_info(34u64))
.unwrap(); .unwrap();
@@ -179,10 +181,8 @@ mod tests {
let ids: Vec<_> = (0u32..10_000u32) let ids: Vec<_> = (0u32..10_000u32)
.map(|i| (format!("doc{:0>6}", i), i)) .map(|i| (format!("doc{:0>6}", i), i))
.collect(); .collect();
let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
for &(ref id, ref i) in &ids { for &(ref id, ref i) in &ids {
term_dictionary_builder term_dictionary_builder
.insert(id.as_bytes(), &make_term_info(*i as u64)) .insert(id.as_bytes(), &make_term_info(*i as u64))
@@ -209,10 +209,8 @@ mod tests {
#[test] #[test]
fn test_stream_high_range_prefix_suffix() { fn test_stream_high_range_prefix_suffix() {
let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
// term requires more than 16bits // term requires more than 16bits
term_dictionary_builder term_dictionary_builder
.insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1)) .insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1))
@@ -244,10 +242,8 @@ mod tests {
let ids: Vec<_> = (0u32..10_000u32) let ids: Vec<_> = (0u32..10_000u32)
.map(|i| (format!("doc{:0>6}", i), i)) .map(|i| (format!("doc{:0>6}", i), i))
.collect(); .collect();
let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
for &(ref id, ref i) in &ids { for &(ref id, ref i) in &ids {
term_dictionary_builder term_dictionary_builder
.insert(id.as_bytes(), &make_term_info(*i as u64)) .insert(id.as_bytes(), &make_term_info(*i as u64))
@@ -313,10 +309,8 @@ mod tests {
#[test] #[test]
fn test_empty_string() { fn test_empty_string() {
let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
term_dictionary_builder term_dictionary_builder
.insert(&[], &make_term_info(1 as u64)) .insert(&[], &make_term_info(1 as u64))
.unwrap(); .unwrap();
@@ -337,10 +331,8 @@ mod tests {
#[test] #[test]
fn test_stream_range_boundaries() { fn test_stream_range_boundaries() {
let field_type = FieldType::Str(TEXT);
let buffer: Vec<u8> = { let buffer: Vec<u8> = {
let mut term_dictionary_builder = let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
for i in 0u8..10u8 { for i in 0u8..10u8 {
let number_arr = [i; 1]; let number_arr = [i; 1];
term_dictionary_builder term_dictionary_builder
@@ -352,41 +344,91 @@ mod tests {
let source = ReadOnlySource::from(buffer); let source = ReadOnlySource::from(buffer);
let term_dictionary: TermDictionary = TermDictionary::from_source(&source); let term_dictionary: TermDictionary = TermDictionary::from_source(&source);
let value_list = |mut streamer: TermStreamer<'_>| { let value_list = |mut streamer: TermStreamer<'_>, backwards: bool| {
let mut res: Vec<u32> = vec![]; let mut res: Vec<u32> = vec![];
while let Some((_, ref v)) = streamer.next() { while let Some((_, ref v)) = streamer.next() {
res.push(v.doc_freq); res.push(v.doc_freq);
} }
if backwards {
res.reverse();
}
res res
}; };
{
let range = term_dictionary.range().backward().into_stream();
assert_eq!(
value_list(range, true),
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
);
}
{ {
let range = term_dictionary.range().ge([2u8]).into_stream(); let range = term_dictionary.range().ge([2u8]).into_stream();
assert_eq!( assert_eq!(
value_list(range), value_list(range, false),
vec![2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
);
}
{
let range = term_dictionary.range().ge([2u8]).backward().into_stream();
assert_eq!(
value_list(range, true),
vec![2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32] vec![2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
); );
} }
{ {
let range = term_dictionary.range().gt([2u8]).into_stream(); let range = term_dictionary.range().gt([2u8]).into_stream();
assert_eq!( assert_eq!(
value_list(range), value_list(range, false),
vec![3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
);
}
{
let range = term_dictionary.range().gt([2u8]).backward().into_stream();
assert_eq!(
value_list(range, true),
vec![3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32] vec![3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
); );
} }
{ {
let range = term_dictionary.range().lt([6u8]).into_stream(); let range = term_dictionary.range().lt([6u8]).into_stream();
assert_eq!(value_list(range), vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32]); assert_eq!(
value_list(range, false),
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32]
);
}
{
let range = term_dictionary.range().lt([6u8]).backward().into_stream();
assert_eq!(
value_list(range, true),
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32]
);
} }
{ {
let range = term_dictionary.range().le([6u8]).into_stream(); let range = term_dictionary.range().le([6u8]).into_stream();
assert_eq!( assert_eq!(
value_list(range), value_list(range, false),
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32]
);
}
{
let range = term_dictionary.range().le([6u8]).backward().into_stream();
assert_eq!(
value_list(range, true),
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32] vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32]
); );
} }
{ {
let range = term_dictionary.range().ge([0u8]).lt([5u8]).into_stream(); let range = term_dictionary.range().ge([0u8]).lt([5u8]).into_stream();
assert_eq!(value_list(range), vec![0u32, 1u32, 2u32, 3u32, 4u32]); assert_eq!(value_list(range, false), vec![0u32, 1u32, 2u32, 3u32, 4u32]);
}
{
let range = term_dictionary
.range()
.ge([0u8])
.lt([5u8])
.backward()
.into_stream();
assert_eq!(value_list(range, true), vec![0u32, 1u32, 2u32, 3u32, 4u32]);
} }
} }
@@ -408,9 +450,7 @@ mod tests {
let path = PathBuf::from("TermDictionary"); let path = PathBuf::from("TermDictionary");
{ {
let write = directory.open_write(&path).unwrap(); let write = directory.open_write(&path).unwrap();
let field_type = FieldType::Str(TEXT); let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap();
let mut term_dictionary_builder =
TermDictionaryBuilder::create(write, &field_type).unwrap();
for term in COUNTRIES.iter() { for term in COUNTRIES.iter() {
term_dictionary_builder term_dictionary_builder
.insert(term.as_bytes(), &make_term_info(0u64)) .insert(term.as_bytes(), &make_term_info(0u64))

View File

@@ -51,6 +51,12 @@ where
self self
} }
/// Iterate over the range backwards.
pub fn backward(mut self) -> Self {
self.stream_builder = self.stream_builder.backward();
self
}
/// Creates the stream corresponding to the range /// Creates the stream corresponding to the range
/// of terms defined using the `TermStreamerBuilder`. /// of terms defined using the `TermStreamerBuilder`.
pub fn into_stream(self) -> TermStreamer<'a, A> { pub fn into_stream(self) -> TermStreamer<'a, A> {

View File

@@ -4,8 +4,8 @@ use crate::common::BinarySerializable;
use crate::common::CountingWriter; use crate::common::CountingWriter;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::postings::TermInfo; use crate::postings::TermInfo;
use crate::schema::FieldType;
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use once_cell::sync::Lazy;
use std::io::{self, Write}; use std::io::{self, Write};
use tantivy_fst; use tantivy_fst;
use tantivy_fst::raw::Fst; use tantivy_fst::raw::Fst;
@@ -29,7 +29,7 @@ where
W: Write, W: Write,
{ {
/// Creates a new `TermDictionaryBuilder` /// Creates a new `TermDictionaryBuilder`
pub fn create(w: W, _field_type: &FieldType) -> io::Result<Self> { pub fn create(w: W) -> io::Result<Self> {
let fst_builder = tantivy_fst::MapBuilder::new(w).map_err(convert_fst_error)?; let fst_builder = tantivy_fst::MapBuilder::new(w).map_err(convert_fst_error)?;
Ok(TermDictionaryBuilder { Ok(TermDictionaryBuilder {
fst_builder, fst_builder,
@@ -92,6 +92,14 @@ fn open_fst_index(source: ReadOnlySource) -> tantivy_fst::Map<ReadOnlySource> {
tantivy_fst::Map::from(fst) tantivy_fst::Map::from(fst)
} }
static EMPTY_DATA_SOURCE: Lazy<ReadOnlySource> = Lazy::new(|| {
let term_dictionary_data: Vec<u8> = TermDictionaryBuilder::create(Vec::<u8>::new())
.expect("Creating a TermDictionaryBuilder in a Vec<u8> should never fail")
.finish()
.expect("Writing in a Vec<u8> should never fail");
ReadOnlySource::from(term_dictionary_data)
});
/// The term dictionary contains all of the terms in /// The term dictionary contains all of the terms in
/// `tantivy index` in a sorted manner. /// `tantivy index` in a sorted manner.
/// ///
@@ -122,14 +130,8 @@ impl TermDictionary {
} }
/// Creates an empty term dictionary which contains no terms. /// Creates an empty term dictionary which contains no terms.
pub fn empty(field_type: &FieldType) -> Self { pub fn empty() -> Self {
let term_dictionary_data: Vec<u8> = TermDictionary::from_source(&*EMPTY_DATA_SOURCE)
TermDictionaryBuilder::create(Vec::<u8>::new(), &field_type)
.expect("Creating a TermDictionaryBuilder in a Vec<u8> should never fail")
.finish()
.expect("Writing in a Vec<u8> should never fail");
let source = ReadOnlySource::from(term_dictionary_data);
Self::from_source(&source)
} }
/// Returns the number of terms in the dictionary. /// Returns the number of terms in the dictionary.

View File

@@ -2,7 +2,7 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! let tokenizer = RawTokenizer //! let tokenizer = TextAnalyzer::from(RawTokenizer)
//! .filter(AlphaNumOnlyFilter); //! .filter(AlphaNumOnlyFilter);
//! //!
//! let mut stream = tokenizer.token_stream("hello there"); //! let mut stream = tokenizer.token_stream("hello there");
@@ -10,7 +10,7 @@
//! // contains a space //! // contains a space
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! //!
//! let tokenizer = SimpleTokenizer //! let tokenizer = TextAnalyzer::from(SimpleTokenizer)
//! .filter(AlphaNumOnlyFilter); //! .filter(AlphaNumOnlyFilter);
//! //!
//! let mut stream = tokenizer.token_stream("hello there 💣"); //! let mut stream = tokenizer.token_stream("hello there 💣");
@@ -19,56 +19,30 @@
//! // the "emoji" is dropped because its not an alphanum //! // the "emoji" is dropped because its not an alphanum
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! ``` //! ```
use super::{Token, TokenFilter, TokenStream}; use super::{BoxTokenStream, Token, TokenFilter, TokenStream};
/// `TokenFilter` that removes all tokens that contain non /// `TokenFilter` that removes all tokens that contain non
/// ascii alphanumeric characters. /// ascii alphanumeric characters.
#[derive(Clone)] #[derive(Clone)]
pub struct AlphaNumOnlyFilter; pub struct AlphaNumOnlyFilter;
pub struct AlphaNumOnlyFilterStream<TailTokenStream> pub struct AlphaNumOnlyFilterStream<'a> {
where tail: BoxTokenStream<'a>,
TailTokenStream: TokenStream,
{
tail: TailTokenStream,
} }
impl<TailTokenStream> AlphaNumOnlyFilterStream<TailTokenStream> impl<'a> AlphaNumOnlyFilterStream<'a> {
where
TailTokenStream: TokenStream,
{
fn predicate(&self, token: &Token) -> bool { fn predicate(&self, token: &Token) -> bool {
token.text.chars().all(|c| c.is_ascii_alphanumeric()) token.text.chars().all(|c| c.is_ascii_alphanumeric())
} }
}
fn wrap(tail: TailTokenStream) -> AlphaNumOnlyFilterStream<TailTokenStream> { impl TokenFilter for AlphaNumOnlyFilter {
AlphaNumOnlyFilterStream { tail } fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
BoxTokenStream::from(AlphaNumOnlyFilterStream { tail: token_stream })
} }
} }
impl<TailTokenStream> TokenFilter<TailTokenStream> for AlphaNumOnlyFilter impl<'a> TokenStream for AlphaNumOnlyFilterStream<'a> {
where
TailTokenStream: TokenStream,
{
type ResultTokenStream = AlphaNumOnlyFilterStream<TailTokenStream>;
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
AlphaNumOnlyFilterStream::wrap(token_stream)
}
}
impl<TailTokenStream> TokenStream for AlphaNumOnlyFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
while self.tail.advance() { while self.tail.advance() {
if self.predicate(self.tail.token()) { if self.predicate(self.tail.token()) {
@@ -78,4 +52,12 @@ where
false false
} }
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
} }

View File

@@ -1,4 +1,4 @@
use super::{Token, TokenFilter, TokenStream}; use super::{BoxTokenStream, Token, TokenFilter, TokenStream};
use std::mem; use std::mem;
/// This class converts alphabetic, numeric, and symbolic Unicode characters /// This class converts alphabetic, numeric, and symbolic Unicode characters
@@ -7,26 +7,21 @@ use std::mem;
#[derive(Clone)] #[derive(Clone)]
pub struct AsciiFoldingFilter; pub struct AsciiFoldingFilter;
impl<TailTokenStream> TokenFilter<TailTokenStream> for AsciiFoldingFilter impl TokenFilter for AsciiFoldingFilter {
where fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
TailTokenStream: TokenStream, From::from(AsciiFoldingFilterTokenStream {
{ tail: token_stream,
type ResultTokenStream = AsciiFoldingFilterTokenStream<TailTokenStream>; buffer: String::with_capacity(100),
})
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
AsciiFoldingFilterTokenStream::wrap(token_stream)
} }
} }
pub struct AsciiFoldingFilterTokenStream<TailTokenStream> { pub struct AsciiFoldingFilterTokenStream<'a> {
buffer: String, buffer: String,
tail: TailTokenStream, tail: BoxTokenStream<'a>,
} }
impl<TailTokenStream> TokenStream for AsciiFoldingFilterTokenStream<TailTokenStream> impl<'a> TokenStream for AsciiFoldingFilterTokenStream<'a> {
where
TailTokenStream: TokenStream,
{
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
if !self.tail.advance() { if !self.tail.advance() {
return false; return false;
@@ -48,18 +43,6 @@ where
} }
} }
impl<TailTokenStream> AsciiFoldingFilterTokenStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn wrap(tail: TailTokenStream) -> AsciiFoldingFilterTokenStream<TailTokenStream> {
AsciiFoldingFilterTokenStream {
tail,
buffer: String::with_capacity(100),
}
}
}
// Returns a string that represents the ascii folded version of // Returns a string that represents the ascii folded version of
// the character. If the `char` does not require ascii folding // the character. If the `char` does not require ascii folding
// (e.g. simple ASCII chars like `A`) or if the `char` // (e.g. simple ASCII chars like `A`) or if the `char`
@@ -1561,8 +1544,7 @@ mod tests {
use crate::tokenizer::AsciiFoldingFilter; use crate::tokenizer::AsciiFoldingFilter;
use crate::tokenizer::RawTokenizer; use crate::tokenizer::RawTokenizer;
use crate::tokenizer::SimpleTokenizer; use crate::tokenizer::SimpleTokenizer;
use crate::tokenizer::TokenStream; use crate::tokenizer::TextAnalyzer;
use crate::tokenizer::Tokenizer;
use std::iter; use std::iter;
#[test] #[test]
@@ -1579,7 +1561,7 @@ mod tests {
fn folding_helper(text: &str) -> Vec<String> { fn folding_helper(text: &str) -> Vec<String> {
let mut tokens = Vec::new(); let mut tokens = Vec::new();
SimpleTokenizer TextAnalyzer::from(SimpleTokenizer)
.filter(AsciiFoldingFilter) .filter(AsciiFoldingFilter)
.token_stream(text) .token_stream(text)
.process(&mut |token| { .process(&mut |token| {
@@ -1589,7 +1571,9 @@ mod tests {
} }
fn folding_using_raw_tokenizer_helper(text: &str) -> String { fn folding_using_raw_tokenizer_helper(text: &str) -> String {
let mut token_stream = RawTokenizer.filter(AsciiFoldingFilter).token_stream(text); let mut token_stream = TextAnalyzer::from(RawTokenizer)
.filter(AsciiFoldingFilter)
.token_stream(text);
token_stream.advance(); token_stream.advance();
token_stream.token().text.clone() token_stream.token().text.clone()
} }

View File

@@ -1,4 +1,4 @@
use super::{Token, TokenStream, Tokenizer}; use super::{BoxTokenStream, Token, TokenStream, Tokenizer};
use crate::schema::FACET_SEP_BYTE; use crate::schema::FACET_SEP_BYTE;
/// The `FacetTokenizer` process a `Facet` binary representation /// The `FacetTokenizer` process a `Facet` binary representation
@@ -25,15 +25,14 @@ pub struct FacetTokenStream<'a> {
token: Token, token: Token,
} }
impl<'a> Tokenizer<'a> for FacetTokenizer { impl Tokenizer for FacetTokenizer {
type TokenStreamImpl = FacetTokenStream<'a>; fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
FacetTokenStream { FacetTokenStream {
text, text,
state: State::RootFacetNotEmitted, //< pos is the first char that has not been processed yet. state: State::RootFacetNotEmitted, //< pos is the first char that has not been processed yet.
token: Token::default(), token: Token::default(),
} }
.into()
} }
} }
@@ -84,7 +83,7 @@ mod tests {
use super::FacetTokenizer; use super::FacetTokenizer;
use crate::schema::Facet; use crate::schema::Facet;
use crate::tokenizer::{Token, TokenStream, Tokenizer}; use crate::tokenizer::{Token, Tokenizer};
#[test] #[test]
fn test_facet_tokenizer() { fn test_facet_tokenizer() {

View File

@@ -1,24 +1,23 @@
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use crate::tokenizer::BoxTokenStream;
use std::mem; use std::mem;
impl TokenFilter for LowerCaser {
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
BoxTokenStream::from(LowerCaserTokenStream {
tail: token_stream,
buffer: String::with_capacity(100),
})
}
}
/// Token filter that lowercase terms. /// Token filter that lowercase terms.
#[derive(Clone)] #[derive(Clone)]
pub struct LowerCaser; pub struct LowerCaser;
impl<TailTokenStream> TokenFilter<TailTokenStream> for LowerCaser pub struct LowerCaserTokenStream<'a> {
where
TailTokenStream: TokenStream,
{
type ResultTokenStream = LowerCaserTokenStream<TailTokenStream>;
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
LowerCaserTokenStream::wrap(token_stream)
}
}
pub struct LowerCaserTokenStream<TailTokenStream> {
buffer: String, buffer: String,
tail: TailTokenStream, tail: BoxTokenStream<'a>,
} }
// writes a lowercased version of text into output. // writes a lowercased version of text into output.
@@ -31,18 +30,7 @@ fn to_lowercase_unicode(text: &mut String, output: &mut String) {
} }
} }
impl<TailTokenStream> TokenStream for LowerCaserTokenStream<TailTokenStream> impl<'a> TokenStream for LowerCaserTokenStream<'a> {
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
if !self.tail.advance() { if !self.tail.advance() {
return false; return false;
@@ -56,26 +44,19 @@ where
} }
true true
} }
}
impl<TailTokenStream> LowerCaserTokenStream<TailTokenStream> fn token(&self) -> &Token {
where self.tail.token()
TailTokenStream: TokenStream, }
{
fn wrap(tail: TailTokenStream) -> LowerCaserTokenStream<TailTokenStream> { fn token_mut(&mut self) -> &mut Token {
LowerCaserTokenStream { self.tail.token_mut()
tail,
buffer: String::with_capacity(100),
}
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::tokenizer::LowerCaser; use crate::tokenizer::{LowerCaser, SimpleTokenizer, TextAnalyzer};
use crate::tokenizer::SimpleTokenizer;
use crate::tokenizer::TokenStream;
use crate::tokenizer::Tokenizer;
#[test] #[test]
fn test_to_lower_case() { fn test_to_lower_case() {
@@ -87,7 +68,9 @@ mod tests {
fn lowercase_helper(text: &str) -> Vec<String> { fn lowercase_helper(text: &str) -> Vec<String> {
let mut tokens = vec![]; let mut tokens = vec![];
let mut token_stream = SimpleTokenizer.filter(LowerCaser).token_stream(text); let mut token_stream = TextAnalyzer::from(SimpleTokenizer)
.filter(LowerCaser)
.token_stream(text);
while token_stream.advance() { while token_stream.advance() {
let token_text = token_stream.token().text.clone(); let token_text = token_stream.token().text.clone();
tokens.push(token_text); tokens.push(token_text);

View File

@@ -64,7 +64,7 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! let en_stem = SimpleTokenizer //! let en_stem = TextAnalyzer::from(SimpleTokenizer)
//! .filter(RemoveLongFilter::limit(40)) //! .filter(RemoveLongFilter::limit(40))
//! .filter(LowerCaser) //! .filter(LowerCaser)
//! .filter(Stemmer::new(Language::English)); //! .filter(Stemmer::new(Language::English));
@@ -109,7 +109,7 @@
//! let index = Index::create_in_ram(schema); //! let index = Index::create_in_ram(schema);
//! //!
//! // We need to register our tokenizer : //! // We need to register our tokenizer :
//! let custom_en_tokenizer = SimpleTokenizer //! let custom_en_tokenizer = TextAnalyzer::from(SimpleTokenizer)
//! .filter(RemoveLongFilter::limit(40)) //! .filter(RemoveLongFilter::limit(40))
//! .filter(LowerCaser); //! .filter(LowerCaser);
//! index //! index
@@ -143,10 +143,11 @@ pub use self::simple_tokenizer::SimpleTokenizer;
pub use self::stemmer::{Language, Stemmer}; pub use self::stemmer::{Language, Stemmer};
pub use self::stop_word_filter::StopWordFilter; pub use self::stop_word_filter::StopWordFilter;
pub(crate) use self::token_stream_chain::TokenStreamChain; pub(crate) use self::token_stream_chain::TokenStreamChain;
pub use self::tokenizer::BoxedTokenizer;
pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString}; pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString};
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer}; pub use self::tokenizer::{
BoxTokenFilter, BoxTokenStream, TextAnalyzer, Token, TokenFilter, TokenStream, Tokenizer,
};
pub use self::tokenizer_manager::TokenizerManager; pub use self::tokenizer_manager::TokenizerManager;
@@ -160,9 +161,9 @@ pub const MAX_TOKEN_LEN: usize = u16::max_value() as usize - 4;
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::{ use super::{
Language, LowerCaser, RemoveLongFilter, SimpleTokenizer, Stemmer, Token, Tokenizer, Language, LowerCaser, RemoveLongFilter, SimpleTokenizer, Stemmer, Token, TokenizerManager,
TokenizerManager,
}; };
use crate::tokenizer::TextAnalyzer;
/// This is a function that can be used in tests and doc tests /// This is a function that can be used in tests and doc tests
/// to assert a token's correctness. /// to assert a token's correctness.
@@ -229,7 +230,7 @@ pub mod tests {
let tokenizer_manager = TokenizerManager::default(); let tokenizer_manager = TokenizerManager::default();
tokenizer_manager.register( tokenizer_manager.register(
"el_stem", "el_stem",
SimpleTokenizer TextAnalyzer::from(SimpleTokenizer)
.filter(RemoveLongFilter::limit(40)) .filter(RemoveLongFilter::limit(40))
.filter(LowerCaser) .filter(LowerCaser)
.filter(Stemmer::new(Language::Greek)), .filter(Stemmer::new(Language::Greek)),

View File

@@ -1,4 +1,5 @@
use super::{Token, TokenStream, Tokenizer}; use super::{Token, TokenStream, Tokenizer};
use crate::tokenizer::BoxTokenStream;
/// Tokenize the text by splitting words into n-grams of the given size(s) /// Tokenize the text by splitting words into n-grams of the given size(s)
/// ///
@@ -129,11 +130,9 @@ pub struct NgramTokenStream<'a> {
token: Token, token: Token,
} }
impl<'a> Tokenizer<'a> for NgramTokenizer { impl Tokenizer for NgramTokenizer {
type TokenStreamImpl = NgramTokenStream<'a>; fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
From::from(NgramTokenStream {
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
NgramTokenStream {
ngram_charidx_iterator: StutteringIterator::new( ngram_charidx_iterator: StutteringIterator::new(
CodepointFrontiers::for_str(text), CodepointFrontiers::for_str(text),
self.min_gram, self.min_gram,
@@ -142,7 +141,7 @@ impl<'a> Tokenizer<'a> for NgramTokenizer {
prefix_only: self.prefix_only, prefix_only: self.prefix_only,
text, text,
token: Token::default(), token: Token::default(),
} })
} }
} }
@@ -308,10 +307,10 @@ mod tests {
use super::NgramTokenizer; use super::NgramTokenizer;
use super::StutteringIterator; use super::StutteringIterator;
use crate::tokenizer::tests::assert_token; use crate::tokenizer::tests::assert_token;
use crate::tokenizer::tokenizer::{TokenStream, Tokenizer}; use crate::tokenizer::tokenizer::Tokenizer;
use crate::tokenizer::Token; use crate::tokenizer::{BoxTokenStream, Token};
fn test_helper<T: TokenStream>(mut tokenizer: T) -> Vec<Token> { fn test_helper(mut tokenizer: BoxTokenStream) -> Vec<Token> {
let mut tokens: Vec<Token> = vec![]; let mut tokens: Vec<Token> = vec![];
tokenizer.process(&mut |token: &Token| tokens.push(token.clone())); tokenizer.process(&mut |token: &Token| tokens.push(token.clone()));
tokens tokens

View File

@@ -1,4 +1,5 @@
use super::{Token, TokenStream, Tokenizer}; use super::{Token, TokenStream, Tokenizer};
use crate::tokenizer::BoxTokenStream;
/// For each value of the field, emit a single unprocessed token. /// For each value of the field, emit a single unprocessed token.
#[derive(Clone)] #[derive(Clone)]
@@ -9,10 +10,8 @@ pub struct RawTokenStream {
has_token: bool, has_token: bool,
} }
impl<'a> Tokenizer<'a> for RawTokenizer { impl Tokenizer for RawTokenizer {
type TokenStreamImpl = RawTokenStream; fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
let token = Token { let token = Token {
offset_from: 0, offset_from: 0,
offset_to: text.len(), offset_to: text.len(),
@@ -24,6 +23,7 @@ impl<'a> Tokenizer<'a> for RawTokenizer {
token, token,
has_token: true, has_token: true,
} }
.into()
} }
} }

View File

@@ -2,7 +2,7 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! let tokenizer = SimpleTokenizer //! let tokenizer = TextAnalyzer::from(SimpleTokenizer)
//! .filter(RemoveLongFilter::limit(5)); //! .filter(RemoveLongFilter::limit(5));
//! //!
//! let mut stream = tokenizer.token_stream("toolong nice"); //! let mut stream = tokenizer.token_stream("toolong nice");
@@ -13,6 +13,7 @@
//! ``` //! ```
//! //!
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use crate::tokenizer::BoxTokenStream;
/// `RemoveLongFilter` removes tokens that are longer /// `RemoveLongFilter` removes tokens that are longer
/// than a given number of bytes (in UTF-8 representation). /// than a given number of bytes (in UTF-8 representation).
@@ -31,56 +32,27 @@ impl RemoveLongFilter {
} }
} }
impl<TailTokenStream> RemoveLongFilterStream<TailTokenStream> impl<'a> RemoveLongFilterStream<'a> {
where
TailTokenStream: TokenStream,
{
fn predicate(&self, token: &Token) -> bool { fn predicate(&self, token: &Token) -> bool {
token.text.len() < self.token_length_limit token.text.len() < self.token_length_limit
} }
}
fn wrap( impl TokenFilter for RemoveLongFilter {
token_length_limit: usize, fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
tail: TailTokenStream, BoxTokenStream::from(RemoveLongFilterStream {
) -> RemoveLongFilterStream<TailTokenStream> { token_length_limit: self.length_limit,
RemoveLongFilterStream { tail: token_stream,
token_length_limit, })
tail,
}
} }
} }
impl<TailTokenStream> TokenFilter<TailTokenStream> for RemoveLongFilter pub struct RemoveLongFilterStream<'a> {
where
TailTokenStream: TokenStream,
{
type ResultTokenStream = RemoveLongFilterStream<TailTokenStream>;
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
RemoveLongFilterStream::wrap(self.length_limit, token_stream)
}
}
pub struct RemoveLongFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
token_length_limit: usize, token_length_limit: usize,
tail: TailTokenStream, tail: BoxTokenStream<'a>,
} }
impl<TailTokenStream> TokenStream for RemoveLongFilterStream<TailTokenStream> impl<'a> TokenStream for RemoveLongFilterStream<'a> {
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
while self.tail.advance() { while self.tail.advance() {
if self.predicate(self.tail.token()) { if self.predicate(self.tail.token()) {
@@ -89,4 +61,12 @@ where
} }
false false
} }
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
} }

View File

@@ -1,3 +1,4 @@
use super::BoxTokenStream;
use super::{Token, TokenStream, Tokenizer}; use super::{Token, TokenStream, Tokenizer};
use std::str::CharIndices; use std::str::CharIndices;
@@ -11,15 +12,13 @@ pub struct SimpleTokenStream<'a> {
token: Token, token: Token,
} }
impl<'a> Tokenizer<'a> for SimpleTokenizer { impl Tokenizer for SimpleTokenizer {
type TokenStreamImpl = SimpleTokenStream<'a>; fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
BoxTokenStream::from(SimpleTokenStream {
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
SimpleTokenStream {
text, text,
chars: text.char_indices(), chars: text.char_indices(),
token: Token::default(), token: Token::default(),
} })
} }
} }

View File

@@ -1,4 +1,5 @@
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use crate::tokenizer::BoxTokenStream;
use rust_stemmers::{self, Algorithm}; use rust_stemmers::{self, Algorithm};
/// Available stemmer languages. /// Available stemmer languages.
@@ -75,38 +76,22 @@ impl Default for Stemmer {
} }
} }
impl<TailTokenStream> TokenFilter<TailTokenStream> for Stemmer impl TokenFilter for Stemmer {
where fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
TailTokenStream: TokenStream,
{
type ResultTokenStream = StemmerTokenStream<TailTokenStream>;
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
let inner_stemmer = rust_stemmers::Stemmer::create(self.stemmer_algorithm); let inner_stemmer = rust_stemmers::Stemmer::create(self.stemmer_algorithm);
StemmerTokenStream::wrap(inner_stemmer, token_stream) BoxTokenStream::from(StemmerTokenStream {
tail: token_stream,
stemmer: inner_stemmer,
})
} }
} }
pub struct StemmerTokenStream<TailTokenStream> pub struct StemmerTokenStream<'a> {
where tail: BoxTokenStream<'a>,
TailTokenStream: TokenStream,
{
tail: TailTokenStream,
stemmer: rust_stemmers::Stemmer, stemmer: rust_stemmers::Stemmer,
} }
impl<TailTokenStream> TokenStream for StemmerTokenStream<TailTokenStream> impl<'a> TokenStream for StemmerTokenStream<'a> {
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
if !self.tail.advance() { if !self.tail.advance() {
return false; return false;
@@ -117,16 +102,12 @@ where
self.token_mut().text.push_str(&stemmed_str); self.token_mut().text.push_str(&stemmed_str);
true true
} }
}
impl<TailTokenStream> StemmerTokenStream<TailTokenStream> fn token(&self) -> &Token {
where self.tail.token()
TailTokenStream: TokenStream, }
{
fn wrap( fn token_mut(&mut self) -> &mut Token {
stemmer: rust_stemmers::Stemmer, self.tail.token_mut()
tail: TailTokenStream,
) -> StemmerTokenStream<TailTokenStream> {
StemmerTokenStream { tail, stemmer }
} }
} }

View File

@@ -2,7 +2,7 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! let tokenizer = SimpleTokenizer //! let tokenizer = TextAnalyzer::from(SimpleTokenizer)
//! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()])); //! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()]));
//! //!
//! let mut stream = tokenizer.token_stream("the fox is crafty"); //! let mut stream = tokenizer.token_stream("the fox is crafty");
@@ -11,6 +11,7 @@
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! ``` //! ```
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use crate::tokenizer::BoxTokenStream;
use fnv::FnvHasher; use fnv::FnvHasher;
use std::collections::HashSet; use std::collections::HashSet;
use std::hash::BuildHasherDefault; use std::hash::BuildHasherDefault;
@@ -48,53 +49,27 @@ impl StopWordFilter {
} }
} }
pub struct StopWordFilterStream<TailTokenStream> pub struct StopWordFilterStream<'a> {
where
TailTokenStream: TokenStream,
{
words: StopWordHashSet, words: StopWordHashSet,
tail: TailTokenStream, tail: BoxTokenStream<'a>,
} }
impl<TailTokenStream> TokenFilter<TailTokenStream> for StopWordFilter impl TokenFilter for StopWordFilter {
where fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
TailTokenStream: TokenStream, BoxTokenStream::from(StopWordFilterStream {
{ words: self.words.clone(),
type ResultTokenStream = StopWordFilterStream<TailTokenStream>; tail: token_stream,
})
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
StopWordFilterStream::wrap(self.words.clone(), token_stream)
} }
} }
impl<TailTokenStream> StopWordFilterStream<TailTokenStream> impl<'a> StopWordFilterStream<'a> {
where
TailTokenStream: TokenStream,
{
fn predicate(&self, token: &Token) -> bool { fn predicate(&self, token: &Token) -> bool {
!self.words.contains(&token.text) !self.words.contains(&token.text)
} }
fn wrap(
words: StopWordHashSet,
tail: TailTokenStream,
) -> StopWordFilterStream<TailTokenStream> {
StopWordFilterStream { words, tail }
}
} }
impl<TailTokenStream> TokenStream for StopWordFilterStream<TailTokenStream> impl<'a> TokenStream for StopWordFilterStream<'a> {
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
while self.tail.advance() { while self.tail.advance() {
if self.predicate(self.tail.token()) { if self.predicate(self.tail.token()) {
@@ -103,6 +78,14 @@ where
} }
false false
} }
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
} }
impl Default for StopWordFilter { impl Default for StopWordFilter {

View File

@@ -1,23 +1,21 @@
use crate::tokenizer::{Token, TokenStream}; use crate::tokenizer::{BoxTokenStream, Token, TokenStream};
use std::ops::DerefMut;
const POSITION_GAP: usize = 2; const POSITION_GAP: usize = 2;
pub(crate) struct TokenStreamChain<TTokenStream: TokenStream> { pub(crate) struct TokenStreamChain<'a> {
offsets: Vec<usize>, offsets: Vec<usize>,
token_streams: Vec<TTokenStream>, token_streams: Vec<BoxTokenStream<'a>>,
position_shift: usize, position_shift: usize,
stream_idx: usize, stream_idx: usize,
token: Token, token: Token,
} }
impl<'a, TTokenStream> TokenStreamChain<TTokenStream> impl<'a> TokenStreamChain<'a> {
where
TTokenStream: TokenStream,
{
pub fn new( pub fn new(
offsets: Vec<usize>, offsets: Vec<usize>,
token_streams: Vec<TTokenStream>, token_streams: Vec<BoxTokenStream<'a>>,
) -> TokenStreamChain<TTokenStream> { ) -> TokenStreamChain<'a> {
TokenStreamChain { TokenStreamChain {
offsets, offsets,
stream_idx: 0, stream_idx: 0,
@@ -28,13 +26,10 @@ where
} }
} }
impl<'a, TTokenStream> TokenStream for TokenStreamChain<TTokenStream> impl<'a> TokenStream for TokenStreamChain<'a> {
where
TTokenStream: TokenStream,
{
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
while self.stream_idx < self.token_streams.len() { while self.stream_idx < self.token_streams.len() {
let token_stream = &mut self.token_streams[self.stream_idx]; let token_stream = self.token_streams[self.stream_idx].deref_mut();
if token_stream.advance() { if token_stream.advance() {
let token = token_stream.token(); let token = token_stream.token();
let offset_offset = self.offsets[self.stream_idx]; let offset_offset = self.offsets[self.stream_idx];

View File

@@ -1,4 +1,4 @@
use crate::tokenizer::{Token, TokenStream, TokenStreamChain}; use crate::tokenizer::{BoxTokenStream, Token, TokenStream, TokenStreamChain};
use std::cmp::Ordering; use std::cmp::Ordering;
/// Struct representing pre-tokenized text /// Struct representing pre-tokenized text
@@ -41,9 +41,9 @@ impl PreTokenizedStream {
/// Creates a TokenStream from PreTokenizedString array /// Creates a TokenStream from PreTokenizedString array
pub fn chain_tokenized_strings<'a>( pub fn chain_tokenized_strings<'a>(
tok_strings: &'a [&'a PreTokenizedString], tok_strings: &'a [&'a PreTokenizedString],
) -> Box<dyn TokenStream + 'a> { ) -> BoxTokenStream {
if tok_strings.len() == 1 { if tok_strings.len() == 1 {
Box::new(PreTokenizedStream::from((*tok_strings[0]).clone())) PreTokenizedStream::from((*tok_strings[0]).clone()).into()
} else { } else {
let mut offsets = vec![]; let mut offsets = vec![];
let mut total_offset = 0; let mut total_offset = 0;
@@ -53,11 +53,12 @@ impl PreTokenizedStream {
total_offset += last_token.offset_to; total_offset += last_token.offset_to;
} }
} }
let token_streams: Vec<_> = tok_strings // TODO remove the string cloning.
let token_streams: Vec<BoxTokenStream<'static>> = tok_strings
.iter() .iter()
.map(|tok_string| PreTokenizedStream::from((*tok_string).clone())) .map(|&tok_string| PreTokenizedStream::from((*tok_string).clone()).into())
.collect(); .collect();
Box::new(TokenStreamChain::new(offsets, token_streams)) TokenStreamChain::new(offsets, token_streams).into()
} }
} }
} }

View File

@@ -2,6 +2,7 @@ use crate::tokenizer::TokenStreamChain;
/// The tokenizer module contains all of the tools used to process /// The tokenizer module contains all of the tools used to process
/// text in `tantivy`. /// text in `tantivy`.
use std::borrow::{Borrow, BorrowMut}; use std::borrow::{Borrow, BorrowMut};
use std::ops::{Deref, DerefMut};
/// Token /// Token
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
@@ -33,20 +34,31 @@ impl Default for Token {
} }
} }
/// `Tokenizer` are in charge of splitting text into a stream of token /// `TextAnalyzer` tokenizes an input text into tokens and modifies the resulting `TokenStream`.
/// before indexing.
/// ///
/// See the [module documentation](./index.html) for more detail. /// It simply wraps a `Tokenizer` and a list of `TokenFilter` that are applied sequentially.
/// pub struct TextAnalyzer {
/// # Warning tokenizer: Box<dyn Tokenizer>,
/// token_filters: Vec<BoxTokenFilter>,
/// This API may change to use associated types. }
pub trait Tokenizer<'a>: Sized + Clone {
/// Type associated to the resulting tokenstream tokenstream.
type TokenStreamImpl: TokenStream;
/// Creates a token stream for a given `str`. impl<T: Tokenizer> From<T> for TextAnalyzer {
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl; fn from(tokenizer: T) -> Self {
TextAnalyzer::new(tokenizer, Vec::new())
}
}
impl TextAnalyzer {
/// Creates a new `TextAnalyzer` given a tokenizer and a vector of `BoxTokenFilter`.
///
/// When creating a `TextAnalyzer` from a `Tokenizer` alone, prefer using
/// `TextAnalyzer::from(tokenizer)`.
pub fn new<T: Tokenizer>(tokenizer: T, token_filters: Vec<BoxTokenFilter>) -> TextAnalyzer {
TextAnalyzer {
tokenizer: Box::new(tokenizer),
token_filters,
}
}
/// Appends a token filter to the current tokenizer. /// Appends a token filter to the current tokenizer.
/// ///
@@ -58,90 +70,26 @@ pub trait Tokenizer<'a>: Sized + Clone {
/// ```rust /// ```rust
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// let en_stem = SimpleTokenizer /// let en_stem = TextAnalyzer::from(SimpleTokenizer)
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser) /// .filter(LowerCaser)
/// .filter(Stemmer::default()); /// .filter(Stemmer::default());
/// ``` /// ```
/// ///
fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self> pub fn filter<F: Into<BoxTokenFilter>>(mut self, token_filter: F) -> Self {
where self.token_filters.push(token_filter.into());
NewFilter: TokenFilter<<Self as Tokenizer<'a>>::TokenStreamImpl>, self
{
ChainTokenizer {
head: new_filter,
tail: self,
}
}
}
/// A boxed tokenizer
trait BoxedTokenizerTrait: Send + Sync {
/// Tokenize a `&str`
fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a>;
/// Tokenize an array`&str`
///
/// The resulting `TokenStream` is equivalent to what would be obtained if the &str were
/// one concatenated `&str`, with an artificial position gap of `2` between the different fields
/// to prevent accidental `PhraseQuery` to match accross two terms.
fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b>;
/// Return a boxed clone of the tokenizer
fn boxed_clone(&self) -> BoxedTokenizer;
}
/// A boxed tokenizer
pub struct BoxedTokenizer(Box<dyn BoxedTokenizerTrait>);
impl<T> From<T> for BoxedTokenizer
where
T: 'static + Send + Sync + for<'a> Tokenizer<'a>,
{
fn from(tokenizer: T) -> BoxedTokenizer {
BoxedTokenizer(Box::new(BoxableTokenizer(tokenizer)))
}
}
impl BoxedTokenizer {
/// Tokenize a `&str`
pub fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a> {
self.0.token_stream(text)
} }
/// Tokenize an array`&str` /// Tokenize an array`&str`
/// ///
/// The resulting `TokenStream` is equivalent to what would be obtained if the &str were /// The resulting `BoxTokenStream` is equivalent to what would be obtained if the &str were
/// one concatenated `&str`, with an artificial position gap of `2` between the different fields /// one concatenated `&str`, with an artificial position gap of `2` between the different fields
/// to prevent accidental `PhraseQuery` to match accross two terms. /// to prevent accidental `PhraseQuery` to match accross two terms.
pub fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b> { pub fn token_stream_texts<'a>(&self, texts: &'a [&'a str]) -> BoxTokenStream<'a> {
self.0.token_stream_texts(texts)
}
}
impl Clone for BoxedTokenizer {
fn clone(&self) -> BoxedTokenizer {
self.0.boxed_clone()
}
}
#[derive(Clone)]
struct BoxableTokenizer<A>(A)
where
A: for<'a> Tokenizer<'a> + Send + Sync;
impl<A> BoxedTokenizerTrait for BoxableTokenizer<A>
where
A: 'static + Send + Sync + for<'a> Tokenizer<'a>,
{
fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a> {
Box::new(self.0.token_stream(text))
}
fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b> {
assert!(!texts.is_empty()); assert!(!texts.is_empty());
if texts.len() == 1 { if texts.len() == 1 {
Box::new(self.0.token_stream(texts[0])) self.token_stream(texts[0])
} else { } else {
let mut offsets = vec![]; let mut offsets = vec![];
let mut total_offset = 0; let mut total_offset = 0;
@@ -149,34 +97,124 @@ where
offsets.push(total_offset); offsets.push(total_offset);
total_offset += text.len(); total_offset += text.len();
} }
let token_streams: Vec<_> = let token_streams: Vec<BoxTokenStream<'a>> = texts
texts.iter().map(|text| self.0.token_stream(text)).collect(); .iter()
Box::new(TokenStreamChain::new(offsets, token_streams)) .cloned()
.map(|text| self.token_stream(text))
.collect();
From::from(TokenStreamChain::new(offsets, token_streams))
} }
} }
fn boxed_clone(&self) -> BoxedTokenizer { /// Creates a token stream for a given `str`.
self.0.clone().into() pub fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
let mut token_stream = self.tokenizer.token_stream(text);
for token_filter in &self.token_filters {
token_stream = token_filter.transform(token_stream);
}
token_stream
} }
} }
impl<'b> TokenStream for Box<dyn TokenStream + 'b> { impl Clone for TextAnalyzer {
fn clone(&self) -> Self {
TextAnalyzer {
tokenizer: self.tokenizer.box_clone(),
token_filters: self
.token_filters
.iter()
.map(|token_filter| token_filter.box_clone())
.collect(),
}
}
}
/// `Tokenizer` are in charge of splitting text into a stream of token
/// before indexing.
///
/// See the [module documentation](./index.html) for more detail.
///
/// # Warning
///
/// This API may change to use associated types.
pub trait Tokenizer: 'static + Send + Sync + TokenizerClone {
/// Creates a token stream for a given `str`.
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a>;
}
pub trait TokenizerClone {
fn box_clone(&self) -> Box<dyn Tokenizer>;
}
impl<T: Tokenizer + Clone> TokenizerClone for T {
fn box_clone(&self) -> Box<dyn Tokenizer> {
Box::new(self.clone())
}
}
impl<'a> TokenStream for Box<dyn TokenStream + 'a> {
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
let token_stream: &mut dyn TokenStream = self.borrow_mut(); let token_stream: &mut dyn TokenStream = self.borrow_mut();
token_stream.advance() token_stream.advance()
} }
fn token(&self) -> &Token { fn token<'b>(&'b self) -> &'b Token {
let token_stream: &dyn TokenStream = self.borrow(); let token_stream: &'b (dyn TokenStream + 'a) = self.borrow();
token_stream.token() token_stream.token()
} }
fn token_mut(&mut self) -> &mut Token { fn token_mut<'b>(&'b mut self) -> &'b mut Token {
let token_stream: &mut dyn TokenStream = self.borrow_mut(); let token_stream: &'b mut (dyn TokenStream + 'a) = self.borrow_mut();
token_stream.token_mut() token_stream.token_mut()
} }
} }
/// Simple wrapper of `Box<dyn TokenStream + 'a>`.
///
/// See `TokenStream` for more information.
pub struct BoxTokenStream<'a>(Box<dyn TokenStream + 'a>);
impl<'a, T> From<T> for BoxTokenStream<'a>
where
T: TokenStream + 'a,
{
fn from(token_stream: T) -> BoxTokenStream<'a> {
BoxTokenStream(Box::new(token_stream))
}
}
impl<'a> Deref for BoxTokenStream<'a> {
type Target = dyn TokenStream + 'a;
fn deref(&self) -> &Self::Target {
&*self.0
}
}
impl<'a> DerefMut for BoxTokenStream<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut *self.0
}
}
/// Simple wrapper of `Box<dyn TokenFilter + 'a>`.
///
/// See `TokenStream` for more information.
pub struct BoxTokenFilter(Box<dyn TokenFilter>);
impl Deref for BoxTokenFilter {
type Target = dyn TokenFilter;
fn deref(&self) -> &dyn TokenFilter {
&*self.0
}
}
impl<T: TokenFilter> From<T> for BoxTokenFilter {
fn from(tokenizer: T) -> BoxTokenFilter {
BoxTokenFilter(Box::new(tokenizer))
}
}
/// `TokenStream` is the result of the tokenization. /// `TokenStream` is the result of the tokenization.
/// ///
/// It consists consumable stream of `Token`s. /// It consists consumable stream of `Token`s.
@@ -186,7 +224,7 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
/// ``` /// ```
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// let tokenizer = SimpleTokenizer /// let tokenizer = TextAnalyzer::from(SimpleTokenizer)
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser); /// .filter(LowerCaser);
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer"); /// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
@@ -225,7 +263,7 @@ pub trait TokenStream {
/// ``` /// ```
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// let tokenizer = SimpleTokenizer /// let tokenizer = TextAnalyzer::from(SimpleTokenizer)
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser); /// .filter(LowerCaser);
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer"); /// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
@@ -243,6 +281,8 @@ pub trait TokenStream {
/// Helper function to consume the entire `TokenStream` /// Helper function to consume the entire `TokenStream`
/// and push the tokens to a sink function. /// and push the tokens to a sink function.
///
/// Remove this.
fn process(&mut self, sink: &mut dyn FnMut(&Token)) -> u32 { fn process(&mut self, sink: &mut dyn FnMut(&Token)) -> u32 {
let mut num_tokens_pushed = 0u32; let mut num_tokens_pushed = 0u32;
while self.advance() { while self.advance() {
@@ -253,33 +293,20 @@ pub trait TokenStream {
} }
} }
#[derive(Clone)] pub trait TokenFilterClone {
pub struct ChainTokenizer<HeadTokenFilterFactory, TailTokenizer> { fn box_clone(&self) -> BoxTokenFilter;
head: HeadTokenFilterFactory,
tail: TailTokenizer,
}
impl<'a, HeadTokenFilterFactory, TailTokenizer> Tokenizer<'a>
for ChainTokenizer<HeadTokenFilterFactory, TailTokenizer>
where
HeadTokenFilterFactory: TokenFilter<TailTokenizer::TokenStreamImpl>,
TailTokenizer: Tokenizer<'a>,
{
type TokenStreamImpl = HeadTokenFilterFactory::ResultTokenStream;
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
let tail_token_stream = self.tail.token_stream(text);
self.head.transform(tail_token_stream)
}
} }
/// Trait for the pluggable components of `Tokenizer`s. /// Trait for the pluggable components of `Tokenizer`s.
pub trait TokenFilter<TailTokenStream: TokenStream>: Clone { pub trait TokenFilter: 'static + Send + Sync + TokenFilterClone {
/// The resulting `TokenStream` type.
type ResultTokenStream: TokenStream;
/// Wraps a token stream and returns the modified one. /// Wraps a token stream and returns the modified one.
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream; fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a>;
}
impl<T: TokenFilter + Clone> TokenFilterClone for T {
fn box_clone(&self) -> BoxTokenFilter {
BoxTokenFilter::from(self.clone())
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -1,11 +1,10 @@
use crate::tokenizer::stemmer::Language; use crate::tokenizer::stemmer::Language;
use crate::tokenizer::BoxedTokenizer; use crate::tokenizer::tokenizer::TextAnalyzer;
use crate::tokenizer::LowerCaser; use crate::tokenizer::LowerCaser;
use crate::tokenizer::RawTokenizer; use crate::tokenizer::RawTokenizer;
use crate::tokenizer::RemoveLongFilter; use crate::tokenizer::RemoveLongFilter;
use crate::tokenizer::SimpleTokenizer; use crate::tokenizer::SimpleTokenizer;
use crate::tokenizer::Stemmer; use crate::tokenizer::Stemmer;
use crate::tokenizer::Tokenizer;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
@@ -23,16 +22,16 @@ use std::sync::{Arc, RwLock};
/// search engine. /// search engine.
#[derive(Clone)] #[derive(Clone)]
pub struct TokenizerManager { pub struct TokenizerManager {
tokenizers: Arc<RwLock<HashMap<String, BoxedTokenizer>>>, tokenizers: Arc<RwLock<HashMap<String, TextAnalyzer>>>,
} }
impl TokenizerManager { impl TokenizerManager {
/// Registers a new tokenizer associated with a given name. /// Registers a new tokenizer associated with a given name.
pub fn register<A>(&self, tokenizer_name: &str, tokenizer: A) pub fn register<T>(&self, tokenizer_name: &str, tokenizer: T)
where where
A: Into<BoxedTokenizer>, TextAnalyzer: From<T>,
{ {
let boxed_tokenizer = tokenizer.into(); let boxed_tokenizer: TextAnalyzer = TextAnalyzer::from(tokenizer);
self.tokenizers self.tokenizers
.write() .write()
.expect("Acquiring the lock should never fail") .expect("Acquiring the lock should never fail")
@@ -40,7 +39,7 @@ impl TokenizerManager {
} }
/// Accessing a tokenizer given its name. /// Accessing a tokenizer given its name.
pub fn get(&self, tokenizer_name: &str) -> Option<BoxedTokenizer> { pub fn get(&self, tokenizer_name: &str) -> Option<TextAnalyzer> {
self.tokenizers self.tokenizers
.read() .read()
.expect("Acquiring the lock should never fail") .expect("Acquiring the lock should never fail")
@@ -62,13 +61,13 @@ impl Default for TokenizerManager {
manager.register("raw", RawTokenizer); manager.register("raw", RawTokenizer);
manager.register( manager.register(
"default", "default",
SimpleTokenizer TextAnalyzer::from(SimpleTokenizer)
.filter(RemoveLongFilter::limit(40)) .filter(RemoveLongFilter::limit(40))
.filter(LowerCaser), .filter(LowerCaser),
); );
manager.register( manager.register(
"en_stem", "en_stem",
SimpleTokenizer TextAnalyzer::from(SimpleTokenizer)
.filter(RemoveLongFilter::limit(40)) .filter(RemoveLongFilter::limit(40))
.filter(LowerCaser) .filter(LowerCaser)
.filter(Stemmer::new(Language::English)), .filter(Stemmer::new(Language::English)),