mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-01 23:12:54 +00:00
Compare commits
17 Commits
bundle
...
audunhalla
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
548129fc6d | ||
|
|
55f5658d40 | ||
|
|
3ae6363462 | ||
|
|
9e20d7f8a5 | ||
|
|
ab13ffe377 | ||
|
|
039138ed50 | ||
|
|
6227a0555a | ||
|
|
f85d0a522a | ||
|
|
5795488ba7 | ||
|
|
c3045dfb5c | ||
|
|
811fd0cb9e | ||
|
|
9f04f42b64 | ||
|
|
aeb8ae3ef0 | ||
|
|
f6847c46d7 | ||
|
|
92dac7af5c | ||
|
|
801905d77f | ||
|
|
8f5ac86f30 |
13
CHANGELOG.md
13
CHANGELOG.md
@@ -1,3 +1,16 @@
|
||||
Tantivy 0.12.0
|
||||
======================
|
||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
|
||||
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
|
||||
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
|
||||
|
||||
## How to update?
|
||||
|
||||
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
|
||||
minor changes. Check https://github.com/tantivy-search/tantivy/blob/master/examples/custom_tokenizer.rs
|
||||
to check for some code sample.
|
||||
|
||||
Tantivy 0.11.3
|
||||
=======================
|
||||
- Fixed DateTime as a fast field (#735)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.11.3"
|
||||
version = "0.12.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -18,7 +18,7 @@ byteorder = "1.0"
|
||||
crc32fast = "1.2.0"
|
||||
once_cell = "1.0"
|
||||
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
||||
tantivy-fst = "0.1"
|
||||
tantivy-fst = "0.2.1"
|
||||
memmap = {version = "0.7", optional=true}
|
||||
lz4 = {version="1.20", optional=true}
|
||||
snap = {version="0.2"}
|
||||
@@ -60,7 +60,6 @@ winapi = "0.3"
|
||||
rand = "0.7"
|
||||
maplit = "1"
|
||||
matches = "0.1.8"
|
||||
time = "0.1.42"
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.3"
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
// - import tokenized text straight from json,
|
||||
// - perform a search on documents with pre-tokenized text
|
||||
|
||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer};
|
||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer};
|
||||
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::TermQuery;
|
||||
|
||||
@@ -50,7 +50,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
// This tokenizer lowers all of the text (to help with stop word matching)
|
||||
// then removes all instances of `the` and `and` from the corpus
|
||||
let tokenizer = SimpleTokenizer
|
||||
let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(LowerCaser)
|
||||
.filter(StopWordFilter::remove(vec![
|
||||
"the".to_string(),
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||
use crate::collector::{
|
||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||
};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::schema::Field;
|
||||
use crate::DocAddress;
|
||||
use crate::DocId;
|
||||
@@ -61,6 +62,34 @@ impl fmt::Debug for TopDocs {
|
||||
}
|
||||
}
|
||||
|
||||
struct ScorerByFastFieldReader {
|
||||
ff_reader: FastFieldReader<u64>,
|
||||
}
|
||||
|
||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||
fn score(&self, doc: DocId) -> u64 {
|
||||
self.ff_reader.get_u64(u64::from(doc))
|
||||
}
|
||||
}
|
||||
|
||||
struct ScorerByField {
|
||||
field: Field,
|
||||
}
|
||||
|
||||
impl CustomScorer<u64> for ScorerByField {
|
||||
type Child = ScorerByFastFieldReader;
|
||||
|
||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
||||
let ff_reader = segment_reader
|
||||
.fast_fields()
|
||||
.u64(self.field)
|
||||
.ok_or_else(|| {
|
||||
crate::Error::SchemaError(format!("Field requested is not a i64/u64 fast field."))
|
||||
})?;
|
||||
Ok(ScorerByFastFieldReader { ff_reader })
|
||||
}
|
||||
}
|
||||
|
||||
impl TopDocs {
|
||||
/// Creates a top score collector, with a number of documents equal to "limit".
|
||||
///
|
||||
@@ -143,14 +172,7 @@ impl TopDocs {
|
||||
self,
|
||||
field: Field,
|
||||
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
||||
self.custom_score(move |segment_reader: &SegmentReader| {
|
||||
let ff_reader = segment_reader
|
||||
.fast_fields()
|
||||
.u64(field)
|
||||
.expect("Field requested is not a i64/u64 fast field.");
|
||||
//TODO error message missmatch actual behavior for i64
|
||||
move |doc: DocId| ff_reader.get(doc)
|
||||
})
|
||||
self.custom_score(ScorerByField { field })
|
||||
}
|
||||
|
||||
/// Ranks the documents using a custom score.
|
||||
@@ -572,7 +594,6 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Field requested is not a i64/u64 fast field")]
|
||||
fn test_field_not_fast_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
@@ -587,7 +608,12 @@ mod tests {
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||
assert!(top_collector.for_segment(0, segment).is_ok());
|
||||
let err = top_collector.for_segment(0, segment);
|
||||
if let Err(crate::Error::SchemaError(msg)) = err {
|
||||
assert_eq!(msg, "Field requested is not a i64/u64 fast field.");
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
|
||||
fn index(
|
||||
|
||||
@@ -186,7 +186,7 @@ mod test {
|
||||
use super::{CompositeFile, CompositeWrite};
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::VInt;
|
||||
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory};
|
||||
use crate::directory::{Directory, RAMDirectory};
|
||||
use crate::schema::Field;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
@@ -10,7 +10,9 @@ use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||
/// API of a dependency, knowing it might conflict with a different version
|
||||
/// used by the client. Second, we may stop using rayon in the future.
|
||||
pub enum Executor {
|
||||
/// Single thread variant of an Executor
|
||||
SingleThread,
|
||||
/// Thread pool variant of an Executor
|
||||
ThreadPool(ThreadPool),
|
||||
}
|
||||
|
||||
@@ -20,7 +22,7 @@ impl Executor {
|
||||
Executor::SingleThread
|
||||
}
|
||||
|
||||
// Creates an Executor that dispatches the tasks in a thread pool.
|
||||
/// Creates an Executor that dispatches the tasks in a thread pool.
|
||||
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Result<Executor> {
|
||||
let pool = ThreadPoolBuilder::new()
|
||||
.num_threads(num_threads)
|
||||
@@ -29,10 +31,10 @@ impl Executor {
|
||||
Ok(Executor::ThreadPool(pool))
|
||||
}
|
||||
|
||||
// Perform a map in the thread pool.
|
||||
//
|
||||
// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
|
||||
// will propagate to the caller.
|
||||
/// Perform a map in the thread pool.
|
||||
///
|
||||
/// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
|
||||
/// will propagate to the caller.
|
||||
pub fn map<
|
||||
A: Send,
|
||||
R: Send,
|
||||
|
||||
@@ -20,8 +20,7 @@ use crate::reader::IndexReaderBuilder;
|
||||
use crate::schema::Field;
|
||||
use crate::schema::FieldType;
|
||||
use crate::schema::Schema;
|
||||
use crate::tokenizer::BoxedTokenizer;
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||
use crate::IndexWriter;
|
||||
use crate::Result;
|
||||
use num_cpus;
|
||||
@@ -173,11 +172,11 @@ impl Index {
|
||||
}
|
||||
|
||||
/// Helper to access the tokenizer associated to a specific field.
|
||||
pub fn tokenizer_for_field(&self, field: Field) -> Result<BoxedTokenizer> {
|
||||
pub fn tokenizer_for_field(&self, field: Field) -> Result<TextAnalyzer> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
let tokenizer_manager: &TokenizerManager = self.tokenizers();
|
||||
let tokenizer_name_opt: Option<BoxedTokenizer> = match field_type {
|
||||
let tokenizer_name_opt: Option<TextAnalyzer> = match field_type {
|
||||
FieldType::Str(text_options) => text_options
|
||||
.get_indexing_options()
|
||||
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
|
||||
@@ -338,7 +337,7 @@ impl Index {
|
||||
|
||||
/// Creates a new segment.
|
||||
pub fn new_segment(&self) -> Segment {
|
||||
let mut segment_meta = self
|
||||
let segment_meta = self
|
||||
.inventory
|
||||
.new_segment_meta(SegmentId::generate_random(), 0);
|
||||
self.segment(segment_meta)
|
||||
|
||||
@@ -35,7 +35,6 @@ impl SegmentMetaInventory {
|
||||
segment_id,
|
||||
max_doc,
|
||||
deletes: None,
|
||||
bundled: false,
|
||||
};
|
||||
SegmentMeta::from(self.inventory.track(inner))
|
||||
}
|
||||
@@ -82,19 +81,6 @@ impl SegmentMeta {
|
||||
self.tracked.segment_id
|
||||
}
|
||||
|
||||
pub fn with_bundled(self) -> SegmentMeta {
|
||||
SegmentMeta::from(self.tracked.map(|inner| InnerSegmentMeta {
|
||||
segment_id: inner.segment_id,
|
||||
max_doc: inner.max_doc,
|
||||
deletes: inner.deletes.clone(),
|
||||
bundled: true,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn is_bundled(&self) -> bool {
|
||||
self.tracked.bundled
|
||||
}
|
||||
|
||||
/// Returns the number of deleted documents.
|
||||
pub fn num_deleted_docs(&self) -> u32 {
|
||||
self.tracked
|
||||
@@ -121,12 +107,8 @@ impl SegmentMeta {
|
||||
/// It just joins the segment id with the extension
|
||||
/// associated to a segment component.
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
let suffix = self.suffix(component);
|
||||
self.relative_path_from_suffix(&suffix)
|
||||
}
|
||||
|
||||
fn suffix(&self, component: SegmentComponent) -> String {
|
||||
match component {
|
||||
let mut path = self.id().uuid_string();
|
||||
path.push_str(&*match component {
|
||||
SegmentComponent::POSTINGS => ".idx".to_string(),
|
||||
SegmentComponent::POSITIONS => ".pos".to_string(),
|
||||
SegmentComponent::POSITIONSSKIP => ".posidx".to_string(),
|
||||
@@ -135,17 +117,7 @@ impl SegmentMeta {
|
||||
SegmentComponent::FASTFIELDS => ".fast".to_string(),
|
||||
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
|
||||
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the relative path of a component of our segment.
|
||||
///
|
||||
/// It just joins the segment id with the extension
|
||||
/// associated to a segment component.
|
||||
pub fn relative_path_from_suffix(&self, suffix: &str) -> PathBuf {
|
||||
let mut path = self.id().uuid_string();
|
||||
path.push_str(".");
|
||||
path.push_str(&suffix);
|
||||
});
|
||||
PathBuf::from(path)
|
||||
}
|
||||
|
||||
@@ -189,7 +161,6 @@ impl SegmentMeta {
|
||||
segment_id: inner_meta.segment_id,
|
||||
max_doc,
|
||||
deletes: None,
|
||||
bundled: inner_meta.bundled,
|
||||
});
|
||||
SegmentMeta { tracked }
|
||||
}
|
||||
@@ -204,7 +175,6 @@ impl SegmentMeta {
|
||||
segment_id: inner_meta.segment_id,
|
||||
max_doc: inner_meta.max_doc,
|
||||
deletes: Some(delete_meta),
|
||||
bundled: inner_meta.bundled,
|
||||
});
|
||||
SegmentMeta { tracked }
|
||||
}
|
||||
@@ -215,7 +185,6 @@ struct InnerSegmentMeta {
|
||||
segment_id: SegmentId,
|
||||
max_doc: u32,
|
||||
deletes: Option<DeleteMeta>,
|
||||
bundled: bool,
|
||||
}
|
||||
|
||||
impl InnerSegmentMeta {
|
||||
|
||||
@@ -60,7 +60,7 @@ impl InvertedIndexReader {
|
||||
.get_index_record_option()
|
||||
.unwrap_or(IndexRecordOption::Basic);
|
||||
InvertedIndexReader {
|
||||
termdict: TermDictionary::empty(&field_type),
|
||||
termdict: TermDictionary::empty(),
|
||||
postings_source: ReadOnlySource::empty(),
|
||||
positions_source: ReadOnlySource::empty(),
|
||||
positions_idx_source: ReadOnlySource::empty(),
|
||||
|
||||
@@ -4,12 +4,14 @@ use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::{ReadOnlyDirectory, ReadOnlySource, WritePtr};
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||
use crate::schema::Schema;
|
||||
use crate::Opstamp;
|
||||
use crate::Result;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::result;
|
||||
|
||||
/// A segment is a piece of the index.
|
||||
#[derive(Clone)]
|
||||
@@ -81,30 +83,23 @@ impl Segment {
|
||||
}
|
||||
|
||||
/// Open one of the component file for a *regular* read.
|
||||
pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
|
||||
pub fn open_read(
|
||||
&self,
|
||||
component: SegmentComponent,
|
||||
) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
let path = self.relative_path(component);
|
||||
let source = self.index.directory().open_read(&path)?;
|
||||
Ok(source)
|
||||
}
|
||||
|
||||
/// Open one of the component file for *regular* write.
|
||||
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> {
|
||||
let path = self.relative_path(component);
|
||||
self.index.directory_mut().open_write(&path)
|
||||
}
|
||||
|
||||
pub fn open_bundle_writer(&mut self) -> Result<WritePtr, OpenWriteError> {
|
||||
let path = self.meta.relative_path_from_suffix("bundle");
|
||||
self.index.directory_mut().open_write(&path)
|
||||
}
|
||||
|
||||
pub(crate) fn open_write_in_directory(
|
||||
pub fn open_write(
|
||||
&mut self,
|
||||
component: SegmentComponent,
|
||||
directory: &mut dyn Directory,
|
||||
) -> Result<WritePtr, OpenWriteError> {
|
||||
) -> result::Result<WritePtr, OpenWriteError> {
|
||||
let path = self.relative_path(component);
|
||||
directory.open_write(&path)
|
||||
let write = self.index.directory_mut().open_write(&path)?;
|
||||
Ok(write)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,5 +109,5 @@ pub trait SerializableSegment {
|
||||
///
|
||||
/// # Returns
|
||||
/// The number of documents in the segment.
|
||||
fn write(&self, serializer: SegmentSerializer) -> crate::Result<u32>;
|
||||
fn write(&self, serializer: SegmentSerializer) -> Result<u32>;
|
||||
}
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
use crate::directory::directory::ReadOnlyDirectory;
|
||||
use crate::directory::error::OpenReadError;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::error::DataCorruption;
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct BundleDirectory {
|
||||
source_map: Arc<HashMap<PathBuf, ReadOnlySource>>,
|
||||
}
|
||||
|
||||
impl BundleDirectory {
|
||||
pub fn from_source(source: ReadOnlySource) -> Result<BundleDirectory, DataCorruption> {
|
||||
let mut index_offset_buf = [0u8; 8];
|
||||
let (body_idx, footer_offset) = source.split_from_end(8);
|
||||
index_offset_buf.copy_from_slice(footer_offset.as_slice());
|
||||
let offset = u64::from_le_bytes(index_offset_buf);
|
||||
let (body_source, idx_source) = body_idx.split(offset as usize);
|
||||
let idx: HashMap<PathBuf, (u64, u64)> = serde_json::from_slice(idx_source.as_slice())
|
||||
.map_err(|err| {
|
||||
let msg = format!("Failed to read index from bundle. {:?}", err);
|
||||
DataCorruption::comment_only(msg)
|
||||
})?;
|
||||
let source_map: HashMap<PathBuf, ReadOnlySource> = idx
|
||||
.into_iter()
|
||||
.map(|(path, (start, stop))| {
|
||||
let source = body_source.slice(start as usize, stop as usize);
|
||||
(path, source)
|
||||
})
|
||||
.collect();
|
||||
Ok(BundleDirectory {
|
||||
source_map: Arc::new(source_map),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadOnlyDirectory for BundleDirectory {
|
||||
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
|
||||
self.source_map
|
||||
.get(path)
|
||||
.cloned()
|
||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(path.to_path_buf()))
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
self.source_map.contains_key(path)
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
let source = self
|
||||
.source_map
|
||||
.get(path)
|
||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(path.to_path_buf()))?;
|
||||
Ok(source.as_slice().to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::BundleDirectory;
|
||||
use crate::directory::{RAMDirectory, ReadOnlyDirectory, TerminatingWrite};
|
||||
use crate::Directory;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_bundle_directory() {
|
||||
let mut ram_directory = RAMDirectory::default();
|
||||
let test_path_atomic = Path::new("testpath_atomic");
|
||||
let test_path_wrt = Path::new("testpath_wrt");
|
||||
assert!(ram_directory
|
||||
.atomic_write(test_path_atomic, b"titi")
|
||||
.is_ok());
|
||||
{
|
||||
let mut test_wrt = ram_directory.open_write(test_path_wrt).unwrap();
|
||||
assert!(test_wrt.write_all(b"toto").is_ok());
|
||||
assert!(test_wrt.terminate().is_ok());
|
||||
}
|
||||
let mut dest_directory = RAMDirectory::default();
|
||||
let bundle_path = Path::new("bundle");
|
||||
let mut wrt = dest_directory.open_write(bundle_path).unwrap();
|
||||
assert!(ram_directory.serialize_bundle(&mut wrt).is_ok());
|
||||
assert!(wrt.terminate().is_ok());
|
||||
let source = dest_directory.open_read(bundle_path).unwrap();
|
||||
let bundle_directory = BundleDirectory::from_source(source).unwrap();
|
||||
assert_eq!(
|
||||
&bundle_directory.atomic_read(test_path_atomic).unwrap()[..],
|
||||
b"titi"
|
||||
);
|
||||
assert_eq!(
|
||||
&bundle_directory.open_read(test_path_wrt).unwrap()[..],
|
||||
b"toto"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -100,7 +100,17 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ReadOnlyDirectory {
|
||||
/// Write-once read many (WORM) abstraction for where
|
||||
/// tantivy's data should be stored.
|
||||
///
|
||||
/// There are currently two implementations of `Directory`
|
||||
///
|
||||
/// - The [`MMapDirectory`](struct.MmapDirectory.html), this
|
||||
/// should be your default choice.
|
||||
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
|
||||
/// should be used mostly for tests.
|
||||
///
|
||||
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// Opens a virtual file for read.
|
||||
///
|
||||
/// Once a virtual file is open, its data may not
|
||||
@@ -112,31 +122,6 @@ pub trait ReadOnlyDirectory {
|
||||
/// You should only use this to read files create with [Directory::open_write].
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
||||
|
||||
/// Returns true iff the file exists
|
||||
fn exists(&self, path: &Path) -> bool;
|
||||
|
||||
/// Reads the full content file that has been written using
|
||||
/// atomic_write.
|
||||
///
|
||||
/// This should only be used for small files.
|
||||
///
|
||||
/// You should only use this to read files create with [Directory::atomic_write].
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
||||
}
|
||||
|
||||
/// Write-once read many (WORM) abstraction for where
|
||||
/// tantivy's data should be stored.
|
||||
///
|
||||
/// There are currently two implementations of `Directory`
|
||||
///
|
||||
/// - The [`MMapDirectory`](struct.MmapDirectory.html), this
|
||||
/// should be your default choice.
|
||||
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which
|
||||
/// should be used mostly for tests.
|
||||
///
|
||||
pub trait Directory:
|
||||
DirectoryClone + ReadOnlyDirectory + fmt::Debug + Send + Sync + 'static
|
||||
{
|
||||
/// Removes a file
|
||||
///
|
||||
/// Removing a file will not affect an eventual
|
||||
@@ -146,6 +131,9 @@ pub trait Directory:
|
||||
/// `DeleteError::DoesNotExist`.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
|
||||
|
||||
/// Returns true iff the file exists
|
||||
fn exists(&self, path: &Path) -> bool;
|
||||
|
||||
/// Opens a writer for the *virtual file* associated with
|
||||
/// a Path.
|
||||
///
|
||||
@@ -167,6 +155,14 @@ pub trait Directory:
|
||||
/// The file may not previously exist.
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||
|
||||
/// Reads the full content file that has been written using
|
||||
/// atomic_write.
|
||||
///
|
||||
/// This should only be used for small files.
|
||||
///
|
||||
/// You should only use this to read files create with [Directory::atomic_write].
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
||||
|
||||
/// Atomically replace the content of a file with data.
|
||||
///
|
||||
/// This calls ensure that reads can never *observe*
|
||||
|
||||
@@ -10,7 +10,6 @@ use crate::directory::{WatchCallback, WatchHandle};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::Directory;
|
||||
|
||||
use crate::directory::directory::ReadOnlyDirectory;
|
||||
use crc32fast::Hasher;
|
||||
use serde_json;
|
||||
use std::collections::HashSet;
|
||||
@@ -265,6 +264,14 @@ impl ManagedDirectory {
|
||||
}
|
||||
|
||||
impl Directory for ManagedDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
let read_only_source = self.directory.open_read(path)?;
|
||||
let (footer, reader) = Footer::extract_footer(read_only_source)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
footer.is_compatible()?;
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
||||
self.register_file_as_managed(path)
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
@@ -282,10 +289,18 @@ impl Directory for ManagedDirectory {
|
||||
self.directory.atomic_write(path, data)
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
|
||||
self.directory.atomic_read(path)
|
||||
}
|
||||
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
self.directory.delete(path)
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
self.directory.exists(path)
|
||||
}
|
||||
|
||||
fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> {
|
||||
self.directory.acquire_lock(lock)
|
||||
}
|
||||
@@ -295,24 +310,6 @@ impl Directory for ManagedDirectory {
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadOnlyDirectory for ManagedDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
let read_only_source = self.directory.open_read(path)?;
|
||||
let (footer, reader) = Footer::extract_footer(read_only_source)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
footer.is_compatible()?;
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
self.directory.exists(path)
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
|
||||
self.directory.atomic_read(path)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for ManagedDirectory {
|
||||
fn clone(&self) -> ManagedDirectory {
|
||||
ManagedDirectory {
|
||||
@@ -326,9 +323,7 @@ impl Clone for ManagedDirectory {
|
||||
#[cfg(test)]
|
||||
mod tests_mmap_specific {
|
||||
|
||||
use crate::directory::{
|
||||
Directory, ManagedDirectory, MmapDirectory, ReadOnlyDirectory, TerminatingWrite,
|
||||
};
|
||||
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
|
||||
use std::collections::HashSet;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
|
||||
@@ -6,7 +6,6 @@ use self::notify::RawEvent;
|
||||
use self::notify::RecursiveMode;
|
||||
use self::notify::Watcher;
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::directory::ReadOnlyDirectory;
|
||||
use crate::directory::error::LockError;
|
||||
use crate::directory::error::{
|
||||
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
||||
@@ -408,6 +407,24 @@ impl TerminatingWrite for SafeFileWriter {
|
||||
}
|
||||
|
||||
impl Directory for MmapDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
debug!("Open Read {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
|
||||
let msg = format!(
|
||||
"Failed to acquired write lock \
|
||||
on mmap cache while reading {:?}",
|
||||
path
|
||||
);
|
||||
IOError::with_path(path.to_owned(), make_io_err(msg))
|
||||
})?;
|
||||
Ok(mmap_cache
|
||||
.get_mmap(&full_path)?
|
||||
.map(ReadOnlySource::from)
|
||||
.unwrap_or_else(ReadOnlySource::empty))
|
||||
}
|
||||
|
||||
/// Any entry associated to the path in the mmap will be
|
||||
/// removed before the file is deleted.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
@@ -426,6 +443,11 @@ impl Directory for MmapDirectory {
|
||||
}
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
let full_path = self.resolve_path(path);
|
||||
full_path.exists()
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
debug!("Open Write {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
@@ -456,6 +478,25 @@ impl Directory for MmapDirectory {
|
||||
Ok(BufWriter::new(Box::new(writer)))
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
let mut buffer = Vec::new();
|
||||
match File::open(&full_path) {
|
||||
Ok(mut file) => {
|
||||
file.read_to_end(&mut buffer)
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
Ok(buffer)
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
|
||||
} else {
|
||||
Err(IOError::with_path(path.to_owned(), e).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
debug!("Atomic Write {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
@@ -489,50 +530,6 @@ impl Directory for MmapDirectory {
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadOnlyDirectory for MmapDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
debug!("Open Read {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
|
||||
let msg = format!(
|
||||
"Failed to acquired write lock \
|
||||
on mmap cache while reading {:?}",
|
||||
path
|
||||
);
|
||||
IOError::with_path(path.to_owned(), make_io_err(msg))
|
||||
})?;
|
||||
Ok(mmap_cache
|
||||
.get_mmap(&full_path)?
|
||||
.map(ReadOnlySource::from)
|
||||
.unwrap_or_else(ReadOnlySource::empty))
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
let full_path = self.resolve_path(path);
|
||||
full_path.exists()
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
let mut buffer = Vec::new();
|
||||
match File::open(&full_path) {
|
||||
Ok(mut file) => {
|
||||
file.read_to_end(&mut buffer)
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
Ok(buffer)
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
|
||||
} else {
|
||||
Err(IOError::with_path(path.to_owned(), e).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ WORM directory abstraction.
|
||||
#[cfg(feature = "mmap")]
|
||||
mod mmap_directory;
|
||||
|
||||
mod bundle_directory;
|
||||
mod directory;
|
||||
mod directory_lock;
|
||||
mod footer;
|
||||
@@ -20,7 +19,7 @@ mod watch_event_router;
|
||||
pub mod error;
|
||||
|
||||
pub use self::directory::DirectoryLock;
|
||||
pub use self::directory::{Directory, DirectoryClone, ReadOnlyDirectory};
|
||||
pub use self::directory::{Directory, DirectoryClone};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub use self::ram_directory::RAMDirectory;
|
||||
pub use self::read_only_source::ReadOnlySource;
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
use crate::common::CountingWriter;
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::directory::ReadOnlyDirectory;
|
||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::WatchCallbackList;
|
||||
@@ -117,22 +115,6 @@ impl InnerDirectory {
|
||||
fn total_mem_usage(&self) -> usize {
|
||||
self.fs.values().map(|f| f.len()).sum()
|
||||
}
|
||||
|
||||
fn serialize_bundle(&self, wrt: &mut WritePtr) -> io::Result<()> {
|
||||
let mut counting_writer = CountingWriter::wrap(wrt);
|
||||
let mut file_index: HashMap<PathBuf, (u64, u64)> = HashMap::default();
|
||||
for (path, source) in &self.fs {
|
||||
let start = counting_writer.written_bytes();
|
||||
counting_writer.write_all(source.as_slice())?;
|
||||
let stop = counting_writer.written_bytes();
|
||||
file_index.insert(path.to_path_buf(), (start, stop));
|
||||
}
|
||||
let index_offset = counting_writer.written_bytes();
|
||||
serde_json::to_writer(&mut counting_writer, &file_index)?;
|
||||
let index_offset_buffer = index_offset.to_le_bytes();
|
||||
counting_writer.write_all(&index_offset_buffer[..])?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for RAMDirectory {
|
||||
@@ -162,18 +144,13 @@ impl RAMDirectory {
|
||||
pub fn total_mem_usage(&self) -> usize {
|
||||
self.fs.read().unwrap().total_mem_usage()
|
||||
}
|
||||
|
||||
/// Serialize the RAMDirectory into a bundle.
|
||||
///
|
||||
/// This method will fail, write nothing, and return an error if a
|
||||
/// clone of this repository exists.
|
||||
pub fn serialize_bundle(self, wrt: &mut WritePtr) -> io::Result<()> {
|
||||
let inner_directory_rlock = self.fs.read().unwrap();
|
||||
inner_directory_rlock.serialize_bundle(wrt)
|
||||
}
|
||||
}
|
||||
|
||||
impl Directory for RAMDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
self.fs.read().unwrap().open_read(path)
|
||||
}
|
||||
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
fail_point!("RAMDirectory::delete", |_| {
|
||||
use crate::directory::error::IOError;
|
||||
@@ -183,6 +160,10 @@ impl Directory for RAMDirectory {
|
||||
self.fs.write().unwrap().delete(path)
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
self.fs.read().unwrap().exists(path)
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
let mut fs = self.fs.write().unwrap();
|
||||
let path_buf = PathBuf::from(path);
|
||||
@@ -196,6 +177,10 @@ impl Directory for RAMDirectory {
|
||||
}
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
Ok(self.open_read(path)?.as_slice().to_owned())
|
||||
}
|
||||
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@@ -219,17 +204,3 @@ impl Directory for RAMDirectory {
|
||||
Ok(self.fs.write().unwrap().watch(watch_callback))
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadOnlyDirectory for RAMDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
self.fs.read().unwrap().open_read(path)
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
self.fs.read().unwrap().exists(path)
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
Ok(self.open_read(path)?.as_slice().to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,10 +25,10 @@ impl DataCorruption {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn comment_only<TS: ToString>(comment: TS) -> DataCorruption {
|
||||
pub fn comment_only(comment: String) -> DataCorruption {
|
||||
DataCorruption {
|
||||
filepath: None,
|
||||
comment: comment.to_string(),
|
||||
comment,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr};
|
||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::schema::Field;
|
||||
|
||||
@@ -7,9 +7,6 @@ pub use self::writer::MultiValueIntFastFieldWriter;
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use time;
|
||||
|
||||
use self::time::Duration;
|
||||
use crate::collector::TopDocs;
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::Cardinality;
|
||||
@@ -17,6 +14,7 @@ mod tests {
|
||||
use crate::schema::IntOptions;
|
||||
use crate::schema::Schema;
|
||||
use crate::Index;
|
||||
use chrono::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_u64() {
|
||||
|
||||
@@ -4,7 +4,7 @@ use crate::common::compute_num_bits;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr};
|
||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::FAST;
|
||||
|
||||
@@ -897,7 +897,7 @@ mod tests {
|
||||
let index_writer = index.writer(3_000_000).unwrap();
|
||||
assert_eq!(
|
||||
format!("{:?}", index_writer.get_merge_policy()),
|
||||
"LogMergePolicy { min_merge_size: 8, min_layer_size: 10000, \
|
||||
"LogMergePolicy { min_merge_size: 8, max_merge_size: 10000000, min_layer_size: 10000, \
|
||||
level_log_size: 0.75 }"
|
||||
);
|
||||
let merge_policy = Box::new(NoMergePolicy::default());
|
||||
|
||||
@@ -6,12 +6,14 @@ use std::f64;
|
||||
const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75;
|
||||
const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
|
||||
const DEFAULT_MIN_MERGE_SIZE: usize = 8;
|
||||
const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000;
|
||||
|
||||
/// `LogMergePolicy` tries tries to merge segments that have a similar number of
|
||||
/// documents.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LogMergePolicy {
|
||||
min_merge_size: usize,
|
||||
max_merge_size: usize,
|
||||
min_layer_size: u32,
|
||||
level_log_size: f64,
|
||||
}
|
||||
@@ -26,6 +28,12 @@ impl LogMergePolicy {
|
||||
self.min_merge_size = min_merge_size;
|
||||
}
|
||||
|
||||
/// Set the maximum number docs in a segment for it to be considered for
|
||||
/// merging.
|
||||
pub fn set_max_merge_size(&mut self, max_merge_size: usize) {
|
||||
self.max_merge_size = max_merge_size;
|
||||
}
|
||||
|
||||
/// Set the minimum segment size under which all segment belong
|
||||
/// to the same level.
|
||||
pub fn set_min_layer_size(&mut self, min_layer_size: u32) {
|
||||
@@ -53,6 +61,7 @@ impl MergePolicy for LogMergePolicy {
|
||||
let mut size_sorted_tuples = segments
|
||||
.iter()
|
||||
.map(SegmentMeta::num_docs)
|
||||
.filter(|s| s <= &(self.max_merge_size as u32))
|
||||
.enumerate()
|
||||
.collect::<Vec<(usize, u32)>>();
|
||||
|
||||
@@ -86,6 +95,7 @@ impl Default for LogMergePolicy {
|
||||
fn default() -> LogMergePolicy {
|
||||
LogMergePolicy {
|
||||
min_merge_size: DEFAULT_MIN_MERGE_SIZE,
|
||||
max_merge_size: DEFAULT_MAX_MERGE_SIZE,
|
||||
min_layer_size: DEFAULT_MIN_LAYER_SIZE,
|
||||
level_log_size: DEFAULT_LEVEL_LOG_SIZE,
|
||||
}
|
||||
@@ -104,6 +114,7 @@ mod tests {
|
||||
fn test_merge_policy() -> LogMergePolicy {
|
||||
let mut log_merge_policy = LogMergePolicy::default();
|
||||
log_merge_policy.set_min_merge_size(3);
|
||||
log_merge_policy.set_max_merge_size(100_000);
|
||||
log_merge_policy.set_min_layer_size(2);
|
||||
log_merge_policy
|
||||
}
|
||||
@@ -141,11 +152,11 @@ mod tests {
|
||||
create_random_segment_meta(10),
|
||||
create_random_segment_meta(10),
|
||||
create_random_segment_meta(10),
|
||||
create_random_segment_meta(1000),
|
||||
create_random_segment_meta(1000),
|
||||
create_random_segment_meta(1000),
|
||||
create_random_segment_meta(10000),
|
||||
create_random_segment_meta(10000),
|
||||
create_random_segment_meta(1_000),
|
||||
create_random_segment_meta(1_000),
|
||||
create_random_segment_meta(1_000),
|
||||
create_random_segment_meta(10_000),
|
||||
create_random_segment_meta(10_000),
|
||||
create_random_segment_meta(10),
|
||||
create_random_segment_meta(10),
|
||||
create_random_segment_meta(10),
|
||||
@@ -182,4 +193,19 @@ mod tests {
|
||||
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||
assert_eq!(result_list.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_large_merge_segments() {
|
||||
let test_input = vec![
|
||||
create_random_segment_meta(1_000_000),
|
||||
create_random_segment_meta(100_001),
|
||||
create_random_segment_meta(100_000),
|
||||
create_random_segment_meta(100_000),
|
||||
create_random_segment_meta(100_000),
|
||||
];
|
||||
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||
// Do not include large segments
|
||||
assert_eq!(result_list.len(), 1);
|
||||
assert_eq!(result_list[0].0.len(), 3)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@ pub struct AddOperation {
|
||||
/// UserOperation is an enum type that encapsulates other operation types.
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub enum UserOperation {
|
||||
/// Add operation
|
||||
Add(Document),
|
||||
/// Delete operation
|
||||
Delete(Term),
|
||||
}
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
use crate::Directory;
|
||||
use crate::Result;
|
||||
|
||||
use crate::core::Segment;
|
||||
use crate::core::SegmentComponent;
|
||||
use crate::directory::error::OpenWriteError;
|
||||
use crate::directory::{DirectoryClone, RAMDirectory, TerminatingWrite, WritePtr};
|
||||
use crate::fastfield::FastFieldSerializer;
|
||||
use crate::fieldnorm::FieldNormsSerializer;
|
||||
use crate::postings::InvertedIndexSerializer;
|
||||
use crate::schema::Schema;
|
||||
use crate::store::StoreWriter;
|
||||
|
||||
/// Segment serializer is in charge of laying out on disk
|
||||
@@ -17,50 +14,25 @@ pub struct SegmentSerializer {
|
||||
fast_field_serializer: FastFieldSerializer,
|
||||
fieldnorms_serializer: FieldNormsSerializer,
|
||||
postings_serializer: InvertedIndexSerializer,
|
||||
bundle_writer: Option<(RAMDirectory, WritePtr)>,
|
||||
}
|
||||
|
||||
pub(crate) struct SegmentSerializerWriters {
|
||||
postings_wrt: WritePtr,
|
||||
positions_skip_wrt: WritePtr,
|
||||
positions_wrt: WritePtr,
|
||||
terms_wrt: WritePtr,
|
||||
fast_field_wrt: WritePtr,
|
||||
fieldnorms_wrt: WritePtr,
|
||||
store_wrt: WritePtr,
|
||||
}
|
||||
|
||||
impl SegmentSerializerWriters {
|
||||
pub(crate) fn for_segment(segment: &mut Segment) -> Result<Self, OpenWriteError> {
|
||||
Ok(SegmentSerializerWriters {
|
||||
postings_wrt: segment.open_write(SegmentComponent::POSTINGS)?,
|
||||
positions_skip_wrt: segment.open_write(SegmentComponent::POSITIONS)?,
|
||||
positions_wrt: segment.open_write(SegmentComponent::POSITIONSSKIP)?,
|
||||
terms_wrt: segment.open_write(SegmentComponent::TERMS)?,
|
||||
fast_field_wrt: segment.open_write(SegmentComponent::FASTFIELDS)?,
|
||||
fieldnorms_wrt: segment.open_write(SegmentComponent::FIELDNORMS)?,
|
||||
store_wrt: segment.open_write(SegmentComponent::STORE)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentSerializer {
|
||||
pub(crate) fn new(schema: Schema, writers: SegmentSerializerWriters) -> crate::Result<Self> {
|
||||
let fast_field_serializer = FastFieldSerializer::from_write(writers.fast_field_wrt)?;
|
||||
let fieldnorms_serializer = FieldNormsSerializer::from_write(writers.fieldnorms_wrt)?;
|
||||
let postings_serializer = InvertedIndexSerializer::open(
|
||||
schema,
|
||||
writers.terms_wrt,
|
||||
writers.postings_wrt,
|
||||
writers.positions_wrt,
|
||||
writers.positions_skip_wrt,
|
||||
);
|
||||
/// Creates a new `SegmentSerializer`.
|
||||
pub fn for_segment(segment: &mut Segment) -> Result<SegmentSerializer> {
|
||||
let store_write = segment.open_write(SegmentComponent::STORE)?;
|
||||
|
||||
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
||||
let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?;
|
||||
|
||||
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
||||
|
||||
let postings_serializer = InvertedIndexSerializer::open(segment)?;
|
||||
Ok(SegmentSerializer {
|
||||
store_writer: StoreWriter::new(writers.store_wrt),
|
||||
store_writer: StoreWriter::new(store_write),
|
||||
fast_field_serializer,
|
||||
fieldnorms_serializer,
|
||||
postings_serializer,
|
||||
bundle_writer: None,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -85,15 +57,11 @@ impl SegmentSerializer {
|
||||
}
|
||||
|
||||
/// Finalize the segment serialization.
|
||||
pub fn close(mut self) -> crate::Result<()> {
|
||||
pub fn close(self) -> Result<()> {
|
||||
self.fast_field_serializer.close()?;
|
||||
self.postings_serializer.close()?;
|
||||
self.store_writer.close()?;
|
||||
self.fieldnorms_serializer.close()?;
|
||||
if let Some((ram_directory, mut bundle_wrt)) = self.bundle_writer.take() {
|
||||
ram_directory.serialize_bundle(&mut bundle_wrt)?;
|
||||
bundle_wrt.terminate()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ use crate::indexer::index_writer::advance_deletes;
|
||||
use crate::indexer::merge_operation::MergeOperationInventory;
|
||||
use crate::indexer::merger::IndexMerger;
|
||||
use crate::indexer::segment_manager::SegmentsStatus;
|
||||
use crate::indexer::segment_serializer::SegmentSerializerWriters;
|
||||
use crate::indexer::stamper::Stamper;
|
||||
use crate::indexer::SegmentEntry;
|
||||
use crate::indexer::SegmentSerializer;
|
||||
@@ -133,9 +132,7 @@ fn merge(
|
||||
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
|
||||
|
||||
// ... we just serialize this index merger in our new segment to merge the two segments.
|
||||
let segment_serializer_wrts = SegmentSerializerWriters::for_segment(&mut merged_segment)?;
|
||||
let segment_serializer =
|
||||
SegmentSerializer::new(merged_segment.schema(), segment_serializer_wrts)?;
|
||||
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
||||
|
||||
let num_docs = merger.write(segment_serializer)?;
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ use crate::core::Segment;
|
||||
use crate::core::SerializableSegment;
|
||||
use crate::fastfield::FastFieldsWriter;
|
||||
use crate::fieldnorm::FieldNormsWriter;
|
||||
use crate::indexer::segment_serializer::{SegmentSerializer, SegmentSerializerWriters};
|
||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||
use crate::postings::compute_table_size;
|
||||
use crate::postings::MultiFieldPostingsWriter;
|
||||
use crate::schema::FieldType;
|
||||
@@ -11,10 +11,9 @@ use crate::schema::Schema;
|
||||
use crate::schema::Term;
|
||||
use crate::schema::Value;
|
||||
use crate::schema::{Field, FieldEntry};
|
||||
use crate::tokenizer::BoxedTokenizer;
|
||||
use crate::tokenizer::FacetTokenizer;
|
||||
use crate::tokenizer::PreTokenizedStream;
|
||||
use crate::tokenizer::{TokenStream, TokenStreamChain, Tokenizer};
|
||||
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
||||
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
||||
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
||||
use crate::DocId;
|
||||
use crate::Opstamp;
|
||||
use crate::Result;
|
||||
@@ -50,7 +49,7 @@ pub struct SegmentWriter {
|
||||
fast_field_writers: FastFieldsWriter,
|
||||
fieldnorms_writer: FieldNormsWriter,
|
||||
doc_opstamps: Vec<Opstamp>,
|
||||
tokenizers: Vec<Option<BoxedTokenizer>>,
|
||||
tokenizers: Vec<Option<TextAnalyzer>>,
|
||||
}
|
||||
|
||||
impl SegmentWriter {
|
||||
@@ -69,8 +68,7 @@ impl SegmentWriter {
|
||||
schema: &Schema,
|
||||
) -> Result<SegmentWriter> {
|
||||
let table_num_bits = initial_table_size(memory_budget)?;
|
||||
let segment_serializer_wrts = SegmentSerializerWriters::for_segment(&mut segment)?;
|
||||
let segment_serializer = SegmentSerializer::new(segment.schema(), segment_serializer_wrts)?;
|
||||
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
|
||||
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
||||
let tokenizers = schema
|
||||
.fields()
|
||||
@@ -160,7 +158,7 @@ impl SegmentWriter {
|
||||
}
|
||||
}
|
||||
FieldType::Str(_) => {
|
||||
let mut token_streams: Vec<Box<dyn TokenStream>> = vec![];
|
||||
let mut token_streams: Vec<BoxTokenStream> = vec![];
|
||||
let mut offsets = vec![];
|
||||
let mut total_offset = 0;
|
||||
|
||||
@@ -173,7 +171,7 @@ impl SegmentWriter {
|
||||
}
|
||||
|
||||
token_streams
|
||||
.push(Box::new(PreTokenizedStream::from(tok_str.clone())));
|
||||
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
||||
}
|
||||
Value::Str(ref text) => {
|
||||
if let Some(ref mut tokenizer) =
|
||||
@@ -192,8 +190,7 @@ impl SegmentWriter {
|
||||
let num_tokens = if token_streams.is_empty() {
|
||||
0
|
||||
} else {
|
||||
let mut token_stream: Box<dyn TokenStream> =
|
||||
Box::new(TokenStreamChain::new(offsets, token_streams));
|
||||
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
|
||||
self.multifield_postings
|
||||
.index_text(doc_id, field, &mut token_stream)
|
||||
};
|
||||
|
||||
@@ -1,18 +1,76 @@
|
||||
use crate::Opstamp;
|
||||
use std::ops::Range;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(not(target_arch = "arm"))]
|
||||
mod atomic_impl {
|
||||
|
||||
use crate::Opstamp;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AtomicU64Wrapper(AtomicU64);
|
||||
|
||||
impl AtomicU64Wrapper {
|
||||
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
|
||||
AtomicU64Wrapper(AtomicU64::new(first_opstamp as u64))
|
||||
}
|
||||
|
||||
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
|
||||
self.0.fetch_add(val as u64, order) as u64
|
||||
}
|
||||
|
||||
pub fn revert(&self, val: u64, order: Ordering) -> u64 {
|
||||
self.0.store(val, order);
|
||||
val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "arm")]
|
||||
mod atomic_impl {
|
||||
|
||||
use crate::Opstamp;
|
||||
/// Under other architecture, we rely on a mutex.
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::RwLock;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AtomicU64Wrapper(RwLock<u64>);
|
||||
|
||||
impl AtomicU64Wrapper {
|
||||
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
|
||||
AtomicU64Wrapper(RwLock::new(first_opstamp))
|
||||
}
|
||||
|
||||
pub fn fetch_add(&self, incr: u64, _order: Ordering) -> u64 {
|
||||
let mut lock = self.0.write().unwrap();
|
||||
let previous_val = *lock;
|
||||
*lock = previous_val + incr;
|
||||
previous_val
|
||||
}
|
||||
|
||||
pub fn revert(&self, val: u64, _order: Ordering) -> u64 {
|
||||
let mut lock = self.0.write().unwrap();
|
||||
*lock = val;
|
||||
val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use self::atomic_impl::AtomicU64Wrapper;
|
||||
|
||||
/// Stamper provides Opstamps, which is just an auto-increment id to label
|
||||
/// an operation.
|
||||
///
|
||||
/// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Stamper(Arc<AtomicU64>);
|
||||
pub struct Stamper(Arc<AtomicU64Wrapper>);
|
||||
|
||||
impl Stamper {
|
||||
pub fn new(first_opstamp: Opstamp) -> Stamper {
|
||||
Stamper(Arc::new(AtomicU64::new(first_opstamp)))
|
||||
Stamper(Arc::new(AtomicU64Wrapper::new(first_opstamp)))
|
||||
}
|
||||
|
||||
pub fn stamp(&self) -> Opstamp {
|
||||
@@ -31,8 +89,7 @@ impl Stamper {
|
||||
|
||||
/// Reverts the stamper to a given `Opstamp` value and returns it
|
||||
pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp {
|
||||
self.0.store(to_opstamp, Ordering::SeqCst);
|
||||
to_opstamp
|
||||
self.0.revert(to_opstamp, Ordering::SeqCst)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -161,10 +161,11 @@ pub use self::snippet::{Snippet, SnippetGenerator};
|
||||
mod docset;
|
||||
pub use self::docset::{DocSet, SkipResult};
|
||||
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||
pub use crate::core::SegmentComponent;
|
||||
pub use crate::core::{Executor, SegmentComponent};
|
||||
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
||||
pub use crate::core::{InvertedIndexReader, SegmentReader};
|
||||
pub use crate::directory::Directory;
|
||||
pub use crate::indexer::operation::UserOperation;
|
||||
pub use crate::indexer::IndexWriter;
|
||||
pub use crate::postings::Postings;
|
||||
pub use crate::reader::LeasedItem;
|
||||
|
||||
@@ -75,7 +75,7 @@ pub mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut segment = index.new_segment();
|
||||
let mut posting_serializer = InvertedIndexSerializer::for_segment(&mut segment).unwrap();
|
||||
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
|
||||
{
|
||||
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
|
||||
field_serializer.new_term("abc".as_bytes()).unwrap();
|
||||
|
||||
@@ -10,8 +10,8 @@ use crate::postings::USE_SKIP_INFO_LIMIT;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::{Field, FieldEntry, FieldType};
|
||||
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
|
||||
use crate::DocId;
|
||||
use crate::Result;
|
||||
use crate::{Directory, DocId};
|
||||
use std::io::{self, Write};
|
||||
|
||||
/// `InvertedIndexSerializer` is in charge of serializing
|
||||
@@ -54,36 +54,33 @@ pub struct InvertedIndexSerializer {
|
||||
}
|
||||
|
||||
impl InvertedIndexSerializer {
|
||||
pub(crate) fn for_segment(segment: &mut Segment) -> crate::Result<Self> {
|
||||
let schema = segment.schema();
|
||||
use crate::core::SegmentComponent;
|
||||
let terms_wrt = segment.open_write(SegmentComponent::TERMS)?;
|
||||
let postings_wrt = segment.open_write(SegmentComponent::POSTINGS)?;
|
||||
let positions_wrt = segment.open_write(SegmentComponent::POSITIONS)?;
|
||||
let positions_idx_wrt = segment.open_write(SegmentComponent::POSITIONSSKIP)?;
|
||||
Ok(Self::open(
|
||||
schema,
|
||||
terms_wrt,
|
||||
postings_wrt,
|
||||
positions_wrt,
|
||||
positions_idx_wrt,
|
||||
))
|
||||
}
|
||||
/// Open a new `PostingsSerializer` for the given segment
|
||||
pub(crate) fn open(
|
||||
/// Open a new `InvertedIndexSerializer` for the given segment
|
||||
fn create(
|
||||
terms_write: CompositeWrite<WritePtr>,
|
||||
postings_write: CompositeWrite<WritePtr>,
|
||||
positions_write: CompositeWrite<WritePtr>,
|
||||
positionsidx_write: CompositeWrite<WritePtr>,
|
||||
schema: Schema,
|
||||
terms_wrt: WritePtr,
|
||||
postings_wrt: WritePtr,
|
||||
positions_wrt: WritePtr,
|
||||
positions_idx_wrt: WritePtr,
|
||||
) -> InvertedIndexSerializer {
|
||||
InvertedIndexSerializer {
|
||||
terms_write: CompositeWrite::wrap(terms_wrt),
|
||||
postings_write: CompositeWrite::wrap(postings_wrt),
|
||||
positions_write: CompositeWrite::wrap(positions_wrt),
|
||||
positionsidx_write: CompositeWrite::wrap(positions_idx_wrt),
|
||||
) -> Result<InvertedIndexSerializer> {
|
||||
Ok(InvertedIndexSerializer {
|
||||
terms_write,
|
||||
postings_write,
|
||||
positions_write,
|
||||
positionsidx_write,
|
||||
schema,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Open a new `PostingsSerializer` for the given segment
|
||||
pub fn open(segment: &mut Segment) -> Result<InvertedIndexSerializer> {
|
||||
use crate::SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS};
|
||||
InvertedIndexSerializer::create(
|
||||
CompositeWrite::wrap(segment.open_write(TERMS)?),
|
||||
CompositeWrite::wrap(segment.open_write(POSTINGS)?),
|
||||
CompositeWrite::wrap(segment.open_write(POSITIONS)?),
|
||||
CompositeWrite::wrap(segment.open_write(POSITIONSSKIP)?),
|
||||
segment.schema(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Must be called before starting pushing terms of
|
||||
@@ -151,8 +148,7 @@ impl<'a> FieldSerializer<'a> {
|
||||
}
|
||||
_ => (false, false),
|
||||
};
|
||||
let term_dictionary_builder =
|
||||
TermDictionaryBuilder::create(term_dictionary_write, &field_type)?;
|
||||
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
|
||||
let postings_serializer =
|
||||
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
|
||||
let positions_serializer_opt = if position_enabled {
|
||||
|
||||
@@ -15,6 +15,7 @@ use tantivy_fst::Automaton;
|
||||
pub struct AutomatonWeight<A> {
|
||||
field: Field,
|
||||
automaton: Arc<A>,
|
||||
boost: f32,
|
||||
}
|
||||
|
||||
impl<A> AutomatonWeight<A>
|
||||
@@ -26,9 +27,15 @@ where
|
||||
AutomatonWeight {
|
||||
field,
|
||||
automaton: automaton.into(),
|
||||
boost: 1.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Boost the scorer by the given factor.
|
||||
pub fn boost_by(self, boost: f32) -> Self {
|
||||
Self { boost, ..self }
|
||||
}
|
||||
|
||||
fn automaton_stream<'a>(&'a self, term_dict: &'a TermDictionary) -> TermStreamer<'a, &'a A> {
|
||||
let automaton: &A = &*self.automaton;
|
||||
let term_stream_builder = term_dict.search(automaton);
|
||||
@@ -58,7 +65,7 @@ where
|
||||
}
|
||||
}
|
||||
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
||||
Ok(Box::new(ConstScorer::new(doc_bitset)))
|
||||
Ok(Box::new(ConstScorer::with_score(doc_bitset, self.boost)))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
||||
|
||||
@@ -34,7 +34,7 @@ pub struct BM25Weight {
|
||||
}
|
||||
|
||||
impl BM25Weight {
|
||||
pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> BM25Weight {
|
||||
pub fn for_terms(searcher: &Searcher, terms: &[Term], boost: f32) -> BM25Weight {
|
||||
assert!(!terms.is_empty(), "BM25 requires at least one term");
|
||||
let field = terms[0].field();
|
||||
for term in &terms[1..] {
|
||||
@@ -75,11 +75,11 @@ impl BM25Weight {
|
||||
.sum::<f32>();
|
||||
idf_explain = Explanation::new("idf", idf);
|
||||
}
|
||||
BM25Weight::new(idf_explain, average_fieldnorm)
|
||||
BM25Weight::new(idf_explain, average_fieldnorm, boost)
|
||||
}
|
||||
|
||||
fn new(idf_explain: Explanation, average_fieldnorm: f32) -> BM25Weight {
|
||||
let weight = idf_explain.value() * (1f32 + K1);
|
||||
fn new(idf_explain: Explanation, average_fieldnorm: f32, boost: f32) -> BM25Weight {
|
||||
let weight = idf_explain.value() * (1f32 + K1) * boost;
|
||||
BM25Weight {
|
||||
idf_explain,
|
||||
weight,
|
||||
|
||||
@@ -79,6 +79,7 @@ pub struct FuzzyTermQuery {
|
||||
transposition_cost_one: bool,
|
||||
///
|
||||
prefix: bool,
|
||||
boost: f32,
|
||||
}
|
||||
|
||||
impl FuzzyTermQuery {
|
||||
@@ -89,6 +90,7 @@ impl FuzzyTermQuery {
|
||||
distance,
|
||||
transposition_cost_one,
|
||||
prefix: false,
|
||||
boost: 1.0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,16 +101,22 @@ impl FuzzyTermQuery {
|
||||
distance,
|
||||
transposition_cost_one,
|
||||
prefix: true,
|
||||
boost: 1.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Boost the query score by the given factor.
|
||||
pub fn boost_by(self, boost: f32) -> Self {
|
||||
Self { boost, ..self }
|
||||
}
|
||||
|
||||
fn specialized_weight(&self) -> Result<AutomatonWeight<DFA>> {
|
||||
// LEV_BUILDER is a HashMap, whose `get` method returns an Option
|
||||
match LEV_BUILDER.get(&(self.distance, false)) {
|
||||
// Unwrap the option and build the Ok(AutomatonWeight)
|
||||
Some(automaton_builder) => {
|
||||
let automaton = automaton_builder.build_dfa(self.term.text());
|
||||
Ok(AutomatonWeight::new(self.term.field(), automaton))
|
||||
Ok(AutomatonWeight::new(self.term.field(), automaton).boost_by(self.boost))
|
||||
}
|
||||
None => Err(InvalidArgument(format!(
|
||||
"Levenshtein distance of {} is not allowed. Choose a value in the {:?} range",
|
||||
|
||||
@@ -27,6 +27,7 @@ use std::collections::BTreeSet;
|
||||
pub struct PhraseQuery {
|
||||
field: Field,
|
||||
phrase_terms: Vec<(usize, Term)>,
|
||||
boost: f32,
|
||||
}
|
||||
|
||||
impl PhraseQuery {
|
||||
@@ -57,9 +58,15 @@ impl PhraseQuery {
|
||||
PhraseQuery {
|
||||
field,
|
||||
phrase_terms: terms,
|
||||
boost: 1.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Boost the query score by the given factor.
|
||||
pub fn boost_by(self, boost: f32) -> Self {
|
||||
Self { boost, ..self }
|
||||
}
|
||||
|
||||
/// The `Field` this `PhraseQuery` is targeting.
|
||||
pub fn field(&self) -> Field {
|
||||
self.field
|
||||
@@ -97,7 +104,7 @@ impl PhraseQuery {
|
||||
)));
|
||||
}
|
||||
let terms = self.phrase_terms();
|
||||
let bm25_weight = BM25Weight::for_terms(searcher, &terms);
|
||||
let bm25_weight = BM25Weight::for_terms(searcher, &terms, self.boost);
|
||||
Ok(PhraseWeight::new(
|
||||
self.phrase_terms.clone(),
|
||||
bm25_weight,
|
||||
|
||||
@@ -533,7 +533,7 @@ mod test {
|
||||
use crate::schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
|
||||
use crate::schema::{Schema, Term, INDEXED, STORED, STRING, TEXT};
|
||||
use crate::tokenizer::{
|
||||
LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager,
|
||||
LowerCaser, SimpleTokenizer, StopWordFilter, TextAnalyzer, TokenizerManager,
|
||||
};
|
||||
use crate::Index;
|
||||
use matches::assert_matches;
|
||||
@@ -563,7 +563,7 @@ mod test {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
tokenizer_manager.register(
|
||||
"en_with_stop_words",
|
||||
SimpleTokenizer
|
||||
TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(LowerCaser)
|
||||
.filter(StopWordFilter::remove(vec!["the".to_string()])),
|
||||
);
|
||||
|
||||
@@ -54,6 +54,7 @@ use tantivy_fst::Regex;
|
||||
pub struct RegexQuery {
|
||||
regex: Arc<Regex>,
|
||||
field: Field,
|
||||
boost: f32,
|
||||
}
|
||||
|
||||
impl RegexQuery {
|
||||
@@ -69,11 +70,17 @@ impl RegexQuery {
|
||||
RegexQuery {
|
||||
regex: regex.into(),
|
||||
field,
|
||||
boost: 1.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Boost the query score by the given factor.
|
||||
pub fn boost_by(self, boost: f32) -> Self {
|
||||
Self { boost, ..self }
|
||||
}
|
||||
|
||||
fn specialized_weight(&self) -> AutomatonWeight<Regex> {
|
||||
AutomatonWeight::new(self.field, self.regex.clone())
|
||||
AutomatonWeight::new(self.field, self.regex.clone()).boost_by(self.boost)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -56,6 +56,11 @@ impl<TDocSet: DocSet> ConstScorer<TDocSet> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new `ConstScorer` with a custom score value
|
||||
pub fn with_score(docset: TDocSet, score: f32) -> ConstScorer<TDocSet> {
|
||||
ConstScorer { docset, score }
|
||||
}
|
||||
|
||||
/// Sets the constant score to a different value.
|
||||
pub fn set_score(&mut self, score: Score) {
|
||||
self.score = score;
|
||||
|
||||
@@ -45,6 +45,35 @@ mod tests {
|
||||
assert_eq!(term_scorer.score(), 0.28768212);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_term_query_boost_by() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", STRING);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field => "a");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "a"),
|
||||
IndexRecordOption::Basic,
|
||||
)
|
||||
.boost_by(42.0);
|
||||
let term_weight = term_query.weight(&searcher, true).unwrap();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut term_scorer = term_weight.scorer(segment_reader).unwrap();
|
||||
assert!(term_scorer.advance());
|
||||
assert_eq!(term_scorer.doc(), 0);
|
||||
assert_nearly_equals(0.28768212 * 42.0, term_scorer.score());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_term_weight() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
@@ -61,6 +61,7 @@ use std::fmt;
|
||||
pub struct TermQuery {
|
||||
term: Term,
|
||||
index_record_option: IndexRecordOption,
|
||||
boost: f32,
|
||||
}
|
||||
|
||||
impl fmt::Debug for TermQuery {
|
||||
@@ -75,9 +76,15 @@ impl TermQuery {
|
||||
TermQuery {
|
||||
term,
|
||||
index_record_option: segment_postings_options,
|
||||
boost: 1.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Boost the query score by the given factor.
|
||||
pub fn boost_by(self, boost: f32) -> Self {
|
||||
Self { boost, ..self }
|
||||
}
|
||||
|
||||
/// The `Term` this query is built out of.
|
||||
pub fn term(&self) -> &Term {
|
||||
&self.term
|
||||
@@ -90,7 +97,7 @@ impl TermQuery {
|
||||
/// This is useful for optimization purpose.
|
||||
pub fn specialized_weight(&self, searcher: &Searcher, scoring_enabled: bool) -> TermWeight {
|
||||
let term = self.term.clone();
|
||||
let bm25_weight = BM25Weight::for_terms(searcher, &[term]);
|
||||
let bm25_weight = BM25Weight::for_terms(searcher, &[term], self.boost);
|
||||
let index_record_option = if scoring_enabled {
|
||||
self.index_record_option
|
||||
} else {
|
||||
|
||||
@@ -122,6 +122,11 @@ impl Facet {
|
||||
pub fn to_path(&self) -> Vec<&str> {
|
||||
self.encoded_str().split(|c| c == FACET_SEP_CHAR).collect()
|
||||
}
|
||||
|
||||
/// This function is the inverse of Facet::from(&str).
|
||||
pub fn to_path_string(&self) -> String {
|
||||
format!("{}", self.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl Borrow<str> for Facet {
|
||||
@@ -265,4 +270,21 @@ mod tests {
|
||||
let facet = Facet::from_path(v.iter());
|
||||
assert_eq!(facet.to_path(), v);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_path_string() {
|
||||
let v = ["first", "second", "third/not_fourth"];
|
||||
let facet = Facet::from_path(v.iter());
|
||||
assert_eq!(
|
||||
facet.to_path_string(),
|
||||
String::from("/first/second/third\\/not_fourth")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_path_string_empty() {
|
||||
let v: Vec<&str> = vec![];
|
||||
let facet = Facet::from_path(v.iter());
|
||||
assert_eq!(facet.to_path_string(), "/");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use crate::query::Query;
|
||||
use crate::schema::Field;
|
||||
use crate::schema::Value;
|
||||
use crate::tokenizer::BoxedTokenizer;
|
||||
use crate::tokenizer::{Token, TokenStream};
|
||||
use crate::tokenizer::{TextAnalyzer, Token};
|
||||
use crate::Document;
|
||||
use crate::Result;
|
||||
use crate::Searcher;
|
||||
@@ -142,7 +141,7 @@ impl Snippet {
|
||||
/// Fragments must be valid in the sense that `&text[fragment.start..fragment.stop]`\
|
||||
/// has to be a valid string.
|
||||
fn search_fragments<'a>(
|
||||
tokenizer: &BoxedTokenizer,
|
||||
tokenizer: &TextAnalyzer,
|
||||
text: &'a str,
|
||||
terms: &BTreeMap<String, f32>,
|
||||
max_num_chars: usize,
|
||||
@@ -251,7 +250,7 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
|
||||
/// ```
|
||||
pub struct SnippetGenerator {
|
||||
terms_text: BTreeMap<String, f32>,
|
||||
tokenizer: BoxedTokenizer,
|
||||
tokenizer: TextAnalyzer,
|
||||
field: Field,
|
||||
max_num_chars: usize,
|
||||
}
|
||||
@@ -347,12 +346,11 @@ Survey in 2016, 2017, and 2018."#;
|
||||
|
||||
#[test]
|
||||
fn test_snippet() {
|
||||
let boxed_tokenizer = SimpleTokenizer.into();
|
||||
let terms = btreemap! {
|
||||
String::from("rust") => 1.0,
|
||||
String::from("language") => 0.9
|
||||
};
|
||||
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 100);
|
||||
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 100);
|
||||
assert_eq!(fragments.len(), 7);
|
||||
{
|
||||
let first = &fragments[0];
|
||||
@@ -374,13 +372,12 @@ Survey in 2016, 2017, and 2018."#;
|
||||
|
||||
#[test]
|
||||
fn test_snippet_scored_fragment() {
|
||||
let boxed_tokenizer = SimpleTokenizer.into();
|
||||
{
|
||||
let terms = btreemap! {
|
||||
String::from("rust") =>1.0f32,
|
||||
String::from("language") => 0.9f32
|
||||
};
|
||||
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 20);
|
||||
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
|
||||
{
|
||||
let first = &fragments[0];
|
||||
assert_eq!(first.score, 1.0);
|
||||
@@ -389,13 +386,12 @@ Survey in 2016, 2017, and 2018."#;
|
||||
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
|
||||
assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems")
|
||||
}
|
||||
let boxed_tokenizer = SimpleTokenizer.into();
|
||||
{
|
||||
let terms = btreemap! {
|
||||
String::from("rust") =>0.9f32,
|
||||
String::from("language") => 1.0f32
|
||||
};
|
||||
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 20);
|
||||
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
|
||||
//assert_eq!(fragments.len(), 7);
|
||||
{
|
||||
let first = &fragments[0];
|
||||
@@ -409,14 +405,12 @@ Survey in 2016, 2017, and 2018."#;
|
||||
|
||||
#[test]
|
||||
fn test_snippet_in_second_fragment() {
|
||||
let boxed_tokenizer = SimpleTokenizer.into();
|
||||
|
||||
let text = "a b c d e f g";
|
||||
|
||||
let mut terms = BTreeMap::new();
|
||||
terms.insert(String::from("c"), 1.0);
|
||||
|
||||
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
|
||||
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
|
||||
|
||||
assert_eq!(fragments.len(), 1);
|
||||
{
|
||||
@@ -433,14 +427,12 @@ Survey in 2016, 2017, and 2018."#;
|
||||
|
||||
#[test]
|
||||
fn test_snippet_with_term_at_the_end_of_fragment() {
|
||||
let boxed_tokenizer = SimpleTokenizer.into();
|
||||
|
||||
let text = "a b c d e f f g";
|
||||
|
||||
let mut terms = BTreeMap::new();
|
||||
terms.insert(String::from("f"), 1.0);
|
||||
|
||||
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
|
||||
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
|
||||
|
||||
assert_eq!(fragments.len(), 2);
|
||||
{
|
||||
@@ -457,15 +449,13 @@ Survey in 2016, 2017, and 2018."#;
|
||||
|
||||
#[test]
|
||||
fn test_snippet_with_second_fragment_has_the_highest_score() {
|
||||
let boxed_tokenizer = SimpleTokenizer.into();
|
||||
|
||||
let text = "a b c d e f g";
|
||||
|
||||
let mut terms = BTreeMap::new();
|
||||
terms.insert(String::from("f"), 1.0);
|
||||
terms.insert(String::from("a"), 0.9);
|
||||
|
||||
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 7);
|
||||
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 7);
|
||||
|
||||
assert_eq!(fragments.len(), 2);
|
||||
{
|
||||
@@ -482,14 +472,12 @@ Survey in 2016, 2017, and 2018."#;
|
||||
|
||||
#[test]
|
||||
fn test_snippet_with_term_not_in_text() {
|
||||
let boxed_tokenizer = SimpleTokenizer.into();
|
||||
|
||||
let text = "a b c d";
|
||||
|
||||
let mut terms = BTreeMap::new();
|
||||
terms.insert(String::from("z"), 1.0);
|
||||
|
||||
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
|
||||
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
|
||||
|
||||
assert_eq!(fragments.len(), 0);
|
||||
|
||||
@@ -500,12 +488,10 @@ Survey in 2016, 2017, and 2018."#;
|
||||
|
||||
#[test]
|
||||
fn test_snippet_with_no_terms() {
|
||||
let boxed_tokenizer = SimpleTokenizer.into();
|
||||
|
||||
let text = "a b c d";
|
||||
|
||||
let terms = BTreeMap::new();
|
||||
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
|
||||
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
|
||||
assert_eq!(fragments.len(), 0);
|
||||
|
||||
let snippet = select_best_fragment_combination(&fragments[..], &text);
|
||||
|
||||
@@ -57,7 +57,7 @@ use self::compression_snap::{compress, decompress};
|
||||
pub mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr};
|
||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||
use crate::schema::Document;
|
||||
use crate::schema::FieldValue;
|
||||
use crate::schema::Schema;
|
||||
|
||||
@@ -36,9 +36,9 @@ pub use self::termdict::{TermDictionary, TermDictionaryBuilder};
|
||||
mod tests {
|
||||
use super::{TermDictionary, TermDictionaryBuilder, TermStreamer};
|
||||
use crate::core::Index;
|
||||
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, ReadOnlySource};
|
||||
use crate::directory::{Directory, RAMDirectory, ReadOnlySource};
|
||||
use crate::postings::TermInfo;
|
||||
use crate::schema::{Document, FieldType, Schema, TEXT};
|
||||
use crate::schema::{Document, Schema, TEXT};
|
||||
use std::path::PathBuf;
|
||||
use std::str;
|
||||
|
||||
@@ -52,6 +52,12 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_term_dictionary() {
|
||||
let empty = TermDictionary::empty();
|
||||
assert!(empty.stream().next().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_term_ordinals() {
|
||||
const COUNTRIES: [&'static str; 7] = [
|
||||
@@ -67,9 +73,7 @@ mod tests {
|
||||
let path = PathBuf::from("TermDictionary");
|
||||
{
|
||||
let write = directory.open_write(&path).unwrap();
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilder::create(write, &field_type).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap();
|
||||
for term in COUNTRIES.iter() {
|
||||
term_dictionary_builder
|
||||
.insert(term.as_bytes(), &make_term_info(0u64))
|
||||
@@ -93,9 +97,7 @@ mod tests {
|
||||
let path = PathBuf::from("TermDictionary");
|
||||
{
|
||||
let write = directory.open_write(&path).unwrap();
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilder::create(write, &field_type).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap();
|
||||
term_dictionary_builder
|
||||
.insert("abc".as_bytes(), &make_term_info(34u64))
|
||||
.unwrap();
|
||||
@@ -179,10 +181,8 @@ mod tests {
|
||||
let ids: Vec<_> = (0u32..10_000u32)
|
||||
.map(|i| (format!("doc{:0>6}", i), i))
|
||||
.collect();
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||
for &(ref id, ref i) in &ids {
|
||||
term_dictionary_builder
|
||||
.insert(id.as_bytes(), &make_term_info(*i as u64))
|
||||
@@ -209,10 +209,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_stream_high_range_prefix_suffix() {
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||
// term requires more than 16bits
|
||||
term_dictionary_builder
|
||||
.insert("abcdefghijklmnopqrstuvwxy", &make_term_info(1))
|
||||
@@ -244,10 +242,8 @@ mod tests {
|
||||
let ids: Vec<_> = (0u32..10_000u32)
|
||||
.map(|i| (format!("doc{:0>6}", i), i))
|
||||
.collect();
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||
for &(ref id, ref i) in &ids {
|
||||
term_dictionary_builder
|
||||
.insert(id.as_bytes(), &make_term_info(*i as u64))
|
||||
@@ -313,10 +309,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_empty_string() {
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||
term_dictionary_builder
|
||||
.insert(&[], &make_term_info(1 as u64))
|
||||
.unwrap();
|
||||
@@ -337,10 +331,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_stream_range_boundaries() {
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilder::create(vec![], &field_type).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||
for i in 0u8..10u8 {
|
||||
let number_arr = [i; 1];
|
||||
term_dictionary_builder
|
||||
@@ -352,41 +344,91 @@ mod tests {
|
||||
let source = ReadOnlySource::from(buffer);
|
||||
let term_dictionary: TermDictionary = TermDictionary::from_source(&source);
|
||||
|
||||
let value_list = |mut streamer: TermStreamer<'_>| {
|
||||
let value_list = |mut streamer: TermStreamer<'_>, backwards: bool| {
|
||||
let mut res: Vec<u32> = vec![];
|
||||
while let Some((_, ref v)) = streamer.next() {
|
||||
res.push(v.doc_freq);
|
||||
}
|
||||
if backwards {
|
||||
res.reverse();
|
||||
}
|
||||
res
|
||||
};
|
||||
{
|
||||
let range = term_dictionary.range().backward().into_stream();
|
||||
assert_eq!(
|
||||
value_list(range, true),
|
||||
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
|
||||
);
|
||||
}
|
||||
{
|
||||
let range = term_dictionary.range().ge([2u8]).into_stream();
|
||||
assert_eq!(
|
||||
value_list(range),
|
||||
value_list(range, false),
|
||||
vec![2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
|
||||
);
|
||||
}
|
||||
{
|
||||
let range = term_dictionary.range().ge([2u8]).backward().into_stream();
|
||||
assert_eq!(
|
||||
value_list(range, true),
|
||||
vec![2u32, 3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
|
||||
);
|
||||
}
|
||||
{
|
||||
let range = term_dictionary.range().gt([2u8]).into_stream();
|
||||
assert_eq!(
|
||||
value_list(range),
|
||||
value_list(range, false),
|
||||
vec![3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
|
||||
);
|
||||
}
|
||||
{
|
||||
let range = term_dictionary.range().gt([2u8]).backward().into_stream();
|
||||
assert_eq!(
|
||||
value_list(range, true),
|
||||
vec![3u32, 4u32, 5u32, 6u32, 7u32, 8u32, 9u32]
|
||||
);
|
||||
}
|
||||
{
|
||||
let range = term_dictionary.range().lt([6u8]).into_stream();
|
||||
assert_eq!(value_list(range), vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32]);
|
||||
assert_eq!(
|
||||
value_list(range, false),
|
||||
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32]
|
||||
);
|
||||
}
|
||||
{
|
||||
let range = term_dictionary.range().lt([6u8]).backward().into_stream();
|
||||
assert_eq!(
|
||||
value_list(range, true),
|
||||
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32]
|
||||
);
|
||||
}
|
||||
{
|
||||
let range = term_dictionary.range().le([6u8]).into_stream();
|
||||
assert_eq!(
|
||||
value_list(range),
|
||||
value_list(range, false),
|
||||
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32]
|
||||
);
|
||||
}
|
||||
{
|
||||
let range = term_dictionary.range().le([6u8]).backward().into_stream();
|
||||
assert_eq!(
|
||||
value_list(range, true),
|
||||
vec![0u32, 1u32, 2u32, 3u32, 4u32, 5u32, 6u32]
|
||||
);
|
||||
}
|
||||
{
|
||||
let range = term_dictionary.range().ge([0u8]).lt([5u8]).into_stream();
|
||||
assert_eq!(value_list(range), vec![0u32, 1u32, 2u32, 3u32, 4u32]);
|
||||
assert_eq!(value_list(range, false), vec![0u32, 1u32, 2u32, 3u32, 4u32]);
|
||||
}
|
||||
{
|
||||
let range = term_dictionary
|
||||
.range()
|
||||
.ge([0u8])
|
||||
.lt([5u8])
|
||||
.backward()
|
||||
.into_stream();
|
||||
assert_eq!(value_list(range, true), vec![0u32, 1u32, 2u32, 3u32, 4u32]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -408,9 +450,7 @@ mod tests {
|
||||
let path = PathBuf::from("TermDictionary");
|
||||
{
|
||||
let write = directory.open_write(&path).unwrap();
|
||||
let field_type = FieldType::Str(TEXT);
|
||||
let mut term_dictionary_builder =
|
||||
TermDictionaryBuilder::create(write, &field_type).unwrap();
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(write).unwrap();
|
||||
for term in COUNTRIES.iter() {
|
||||
term_dictionary_builder
|
||||
.insert(term.as_bytes(), &make_term_info(0u64))
|
||||
|
||||
@@ -51,6 +51,12 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
/// Iterate over the range backwards.
|
||||
pub fn backward(mut self) -> Self {
|
||||
self.stream_builder = self.stream_builder.backward();
|
||||
self
|
||||
}
|
||||
|
||||
/// Creates the stream corresponding to the range
|
||||
/// of terms defined using the `TermStreamerBuilder`.
|
||||
pub fn into_stream(self) -> TermStreamer<'a, A> {
|
||||
|
||||
@@ -4,8 +4,8 @@ use crate::common::BinarySerializable;
|
||||
use crate::common::CountingWriter;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::postings::TermInfo;
|
||||
use crate::schema::FieldType;
|
||||
use crate::termdict::TermOrdinal;
|
||||
use once_cell::sync::Lazy;
|
||||
use std::io::{self, Write};
|
||||
use tantivy_fst;
|
||||
use tantivy_fst::raw::Fst;
|
||||
@@ -29,7 +29,7 @@ where
|
||||
W: Write,
|
||||
{
|
||||
/// Creates a new `TermDictionaryBuilder`
|
||||
pub fn create(w: W, _field_type: &FieldType) -> io::Result<Self> {
|
||||
pub fn create(w: W) -> io::Result<Self> {
|
||||
let fst_builder = tantivy_fst::MapBuilder::new(w).map_err(convert_fst_error)?;
|
||||
Ok(TermDictionaryBuilder {
|
||||
fst_builder,
|
||||
@@ -92,6 +92,14 @@ fn open_fst_index(source: ReadOnlySource) -> tantivy_fst::Map<ReadOnlySource> {
|
||||
tantivy_fst::Map::from(fst)
|
||||
}
|
||||
|
||||
static EMPTY_DATA_SOURCE: Lazy<ReadOnlySource> = Lazy::new(|| {
|
||||
let term_dictionary_data: Vec<u8> = TermDictionaryBuilder::create(Vec::<u8>::new())
|
||||
.expect("Creating a TermDictionaryBuilder in a Vec<u8> should never fail")
|
||||
.finish()
|
||||
.expect("Writing in a Vec<u8> should never fail");
|
||||
ReadOnlySource::from(term_dictionary_data)
|
||||
});
|
||||
|
||||
/// The term dictionary contains all of the terms in
|
||||
/// `tantivy index` in a sorted manner.
|
||||
///
|
||||
@@ -122,14 +130,8 @@ impl TermDictionary {
|
||||
}
|
||||
|
||||
/// Creates an empty term dictionary which contains no terms.
|
||||
pub fn empty(field_type: &FieldType) -> Self {
|
||||
let term_dictionary_data: Vec<u8> =
|
||||
TermDictionaryBuilder::create(Vec::<u8>::new(), &field_type)
|
||||
.expect("Creating a TermDictionaryBuilder in a Vec<u8> should never fail")
|
||||
.finish()
|
||||
.expect("Writing in a Vec<u8> should never fail");
|
||||
let source = ReadOnlySource::from(term_dictionary_data);
|
||||
Self::from_source(&source)
|
||||
pub fn empty() -> Self {
|
||||
TermDictionary::from_source(&*EMPTY_DATA_SOURCE)
|
||||
}
|
||||
|
||||
/// Returns the number of terms in the dictionary.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//! ```rust
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! let tokenizer = RawTokenizer
|
||||
//! let tokenizer = TextAnalyzer::from(RawTokenizer)
|
||||
//! .filter(AlphaNumOnlyFilter);
|
||||
//!
|
||||
//! let mut stream = tokenizer.token_stream("hello there");
|
||||
@@ -10,7 +10,7 @@
|
||||
//! // contains a space
|
||||
//! assert!(stream.next().is_none());
|
||||
//!
|
||||
//! let tokenizer = SimpleTokenizer
|
||||
//! let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
//! .filter(AlphaNumOnlyFilter);
|
||||
//!
|
||||
//! let mut stream = tokenizer.token_stream("hello there 💣");
|
||||
@@ -19,56 +19,30 @@
|
||||
//! // the "emoji" is dropped because its not an alphanum
|
||||
//! assert!(stream.next().is_none());
|
||||
//! ```
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
use super::{BoxTokenStream, Token, TokenFilter, TokenStream};
|
||||
|
||||
/// `TokenFilter` that removes all tokens that contain non
|
||||
/// ascii alphanumeric characters.
|
||||
#[derive(Clone)]
|
||||
pub struct AlphaNumOnlyFilter;
|
||||
|
||||
pub struct AlphaNumOnlyFilterStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
tail: TailTokenStream,
|
||||
pub struct AlphaNumOnlyFilterStream<'a> {
|
||||
tail: BoxTokenStream<'a>,
|
||||
}
|
||||
|
||||
impl<TailTokenStream> AlphaNumOnlyFilterStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
impl<'a> AlphaNumOnlyFilterStream<'a> {
|
||||
fn predicate(&self, token: &Token) -> bool {
|
||||
token.text.chars().all(|c| c.is_ascii_alphanumeric())
|
||||
}
|
||||
}
|
||||
|
||||
fn wrap(tail: TailTokenStream) -> AlphaNumOnlyFilterStream<TailTokenStream> {
|
||||
AlphaNumOnlyFilterStream { tail }
|
||||
impl TokenFilter for AlphaNumOnlyFilter {
|
||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
||||
BoxTokenStream::from(AlphaNumOnlyFilterStream { tail: token_stream })
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenFilter<TailTokenStream> for AlphaNumOnlyFilter
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
type ResultTokenStream = AlphaNumOnlyFilterStream<TailTokenStream>;
|
||||
|
||||
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||
AlphaNumOnlyFilterStream::wrap(token_stream)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenStream for AlphaNumOnlyFilterStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
|
||||
impl<'a> TokenStream for AlphaNumOnlyFilterStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
while self.tail.advance() {
|
||||
if self.predicate(self.tail.token()) {
|
||||
@@ -78,4 +52,12 @@ where
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
use super::{BoxTokenStream, Token, TokenFilter, TokenStream};
|
||||
use std::mem;
|
||||
|
||||
/// This class converts alphabetic, numeric, and symbolic Unicode characters
|
||||
@@ -7,26 +7,21 @@ use std::mem;
|
||||
#[derive(Clone)]
|
||||
pub struct AsciiFoldingFilter;
|
||||
|
||||
impl<TailTokenStream> TokenFilter<TailTokenStream> for AsciiFoldingFilter
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
type ResultTokenStream = AsciiFoldingFilterTokenStream<TailTokenStream>;
|
||||
|
||||
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||
AsciiFoldingFilterTokenStream::wrap(token_stream)
|
||||
impl TokenFilter for AsciiFoldingFilter {
|
||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
||||
From::from(AsciiFoldingFilterTokenStream {
|
||||
tail: token_stream,
|
||||
buffer: String::with_capacity(100),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AsciiFoldingFilterTokenStream<TailTokenStream> {
|
||||
pub struct AsciiFoldingFilterTokenStream<'a> {
|
||||
buffer: String,
|
||||
tail: TailTokenStream,
|
||||
tail: BoxTokenStream<'a>,
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenStream for AsciiFoldingFilterTokenStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
impl<'a> TokenStream for AsciiFoldingFilterTokenStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
if !self.tail.advance() {
|
||||
return false;
|
||||
@@ -48,18 +43,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> AsciiFoldingFilterTokenStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
fn wrap(tail: TailTokenStream) -> AsciiFoldingFilterTokenStream<TailTokenStream> {
|
||||
AsciiFoldingFilterTokenStream {
|
||||
tail,
|
||||
buffer: String::with_capacity(100),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a string that represents the ascii folded version of
|
||||
// the character. If the `char` does not require ascii folding
|
||||
// (e.g. simple ASCII chars like `A`) or if the `char`
|
||||
@@ -1561,8 +1544,7 @@ mod tests {
|
||||
use crate::tokenizer::AsciiFoldingFilter;
|
||||
use crate::tokenizer::RawTokenizer;
|
||||
use crate::tokenizer::SimpleTokenizer;
|
||||
use crate::tokenizer::TokenStream;
|
||||
use crate::tokenizer::Tokenizer;
|
||||
use crate::tokenizer::TextAnalyzer;
|
||||
use std::iter;
|
||||
|
||||
#[test]
|
||||
@@ -1579,7 +1561,7 @@ mod tests {
|
||||
|
||||
fn folding_helper(text: &str) -> Vec<String> {
|
||||
let mut tokens = Vec::new();
|
||||
SimpleTokenizer
|
||||
TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(AsciiFoldingFilter)
|
||||
.token_stream(text)
|
||||
.process(&mut |token| {
|
||||
@@ -1589,7 +1571,9 @@ mod tests {
|
||||
}
|
||||
|
||||
fn folding_using_raw_tokenizer_helper(text: &str) -> String {
|
||||
let mut token_stream = RawTokenizer.filter(AsciiFoldingFilter).token_stream(text);
|
||||
let mut token_stream = TextAnalyzer::from(RawTokenizer)
|
||||
.filter(AsciiFoldingFilter)
|
||||
.token_stream(text);
|
||||
token_stream.advance();
|
||||
token_stream.token().text.clone()
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use super::{Token, TokenStream, Tokenizer};
|
||||
use super::{BoxTokenStream, Token, TokenStream, Tokenizer};
|
||||
use crate::schema::FACET_SEP_BYTE;
|
||||
|
||||
/// The `FacetTokenizer` process a `Facet` binary representation
|
||||
@@ -25,15 +25,14 @@ pub struct FacetTokenStream<'a> {
|
||||
token: Token,
|
||||
}
|
||||
|
||||
impl<'a> Tokenizer<'a> for FacetTokenizer {
|
||||
type TokenStreamImpl = FacetTokenStream<'a>;
|
||||
|
||||
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
|
||||
impl Tokenizer for FacetTokenizer {
|
||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
||||
FacetTokenStream {
|
||||
text,
|
||||
state: State::RootFacetNotEmitted, //< pos is the first char that has not been processed yet.
|
||||
token: Token::default(),
|
||||
}
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +83,7 @@ mod tests {
|
||||
|
||||
use super::FacetTokenizer;
|
||||
use crate::schema::Facet;
|
||||
use crate::tokenizer::{Token, TokenStream, Tokenizer};
|
||||
use crate::tokenizer::{Token, Tokenizer};
|
||||
|
||||
#[test]
|
||||
fn test_facet_tokenizer() {
|
||||
|
||||
@@ -1,24 +1,23 @@
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
use crate::tokenizer::BoxTokenStream;
|
||||
use std::mem;
|
||||
|
||||
impl TokenFilter for LowerCaser {
|
||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
||||
BoxTokenStream::from(LowerCaserTokenStream {
|
||||
tail: token_stream,
|
||||
buffer: String::with_capacity(100),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Token filter that lowercase terms.
|
||||
#[derive(Clone)]
|
||||
pub struct LowerCaser;
|
||||
|
||||
impl<TailTokenStream> TokenFilter<TailTokenStream> for LowerCaser
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
type ResultTokenStream = LowerCaserTokenStream<TailTokenStream>;
|
||||
|
||||
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||
LowerCaserTokenStream::wrap(token_stream)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LowerCaserTokenStream<TailTokenStream> {
|
||||
pub struct LowerCaserTokenStream<'a> {
|
||||
buffer: String,
|
||||
tail: TailTokenStream,
|
||||
tail: BoxTokenStream<'a>,
|
||||
}
|
||||
|
||||
// writes a lowercased version of text into output.
|
||||
@@ -31,18 +30,7 @@ fn to_lowercase_unicode(text: &mut String, output: &mut String) {
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenStream for LowerCaserTokenStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
|
||||
impl<'a> TokenStream for LowerCaserTokenStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
if !self.tail.advance() {
|
||||
return false;
|
||||
@@ -56,26 +44,19 @@ where
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> LowerCaserTokenStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
fn wrap(tail: TailTokenStream) -> LowerCaserTokenStream<TailTokenStream> {
|
||||
LowerCaserTokenStream {
|
||||
tail,
|
||||
buffer: String::with_capacity(100),
|
||||
}
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::tokenizer::LowerCaser;
|
||||
use crate::tokenizer::SimpleTokenizer;
|
||||
use crate::tokenizer::TokenStream;
|
||||
use crate::tokenizer::Tokenizer;
|
||||
use crate::tokenizer::{LowerCaser, SimpleTokenizer, TextAnalyzer};
|
||||
|
||||
#[test]
|
||||
fn test_to_lower_case() {
|
||||
@@ -87,7 +68,9 @@ mod tests {
|
||||
|
||||
fn lowercase_helper(text: &str) -> Vec<String> {
|
||||
let mut tokens = vec![];
|
||||
let mut token_stream = SimpleTokenizer.filter(LowerCaser).token_stream(text);
|
||||
let mut token_stream = TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(LowerCaser)
|
||||
.token_stream(text);
|
||||
while token_stream.advance() {
|
||||
let token_text = token_stream.token().text.clone();
|
||||
tokens.push(token_text);
|
||||
|
||||
@@ -64,7 +64,7 @@
|
||||
//! ```rust
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! let en_stem = SimpleTokenizer
|
||||
//! let en_stem = TextAnalyzer::from(SimpleTokenizer)
|
||||
//! .filter(RemoveLongFilter::limit(40))
|
||||
//! .filter(LowerCaser)
|
||||
//! .filter(Stemmer::new(Language::English));
|
||||
@@ -109,7 +109,7 @@
|
||||
//! let index = Index::create_in_ram(schema);
|
||||
//!
|
||||
//! // We need to register our tokenizer :
|
||||
//! let custom_en_tokenizer = SimpleTokenizer
|
||||
//! let custom_en_tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
//! .filter(RemoveLongFilter::limit(40))
|
||||
//! .filter(LowerCaser);
|
||||
//! index
|
||||
@@ -143,10 +143,11 @@ pub use self::simple_tokenizer::SimpleTokenizer;
|
||||
pub use self::stemmer::{Language, Stemmer};
|
||||
pub use self::stop_word_filter::StopWordFilter;
|
||||
pub(crate) use self::token_stream_chain::TokenStreamChain;
|
||||
pub use self::tokenizer::BoxedTokenizer;
|
||||
|
||||
pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString};
|
||||
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
|
||||
pub use self::tokenizer::{
|
||||
BoxTokenFilter, BoxTokenStream, TextAnalyzer, Token, TokenFilter, TokenStream, Tokenizer,
|
||||
};
|
||||
|
||||
pub use self::tokenizer_manager::TokenizerManager;
|
||||
|
||||
@@ -160,9 +161,9 @@ pub const MAX_TOKEN_LEN: usize = u16::max_value() as usize - 4;
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::{
|
||||
Language, LowerCaser, RemoveLongFilter, SimpleTokenizer, Stemmer, Token, Tokenizer,
|
||||
TokenizerManager,
|
||||
Language, LowerCaser, RemoveLongFilter, SimpleTokenizer, Stemmer, Token, TokenizerManager,
|
||||
};
|
||||
use crate::tokenizer::TextAnalyzer;
|
||||
|
||||
/// This is a function that can be used in tests and doc tests
|
||||
/// to assert a token's correctness.
|
||||
@@ -229,7 +230,7 @@ pub mod tests {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
tokenizer_manager.register(
|
||||
"el_stem",
|
||||
SimpleTokenizer
|
||||
TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(RemoveLongFilter::limit(40))
|
||||
.filter(LowerCaser)
|
||||
.filter(Stemmer::new(Language::Greek)),
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use super::{Token, TokenStream, Tokenizer};
|
||||
use crate::tokenizer::BoxTokenStream;
|
||||
|
||||
/// Tokenize the text by splitting words into n-grams of the given size(s)
|
||||
///
|
||||
@@ -129,11 +130,9 @@ pub struct NgramTokenStream<'a> {
|
||||
token: Token,
|
||||
}
|
||||
|
||||
impl<'a> Tokenizer<'a> for NgramTokenizer {
|
||||
type TokenStreamImpl = NgramTokenStream<'a>;
|
||||
|
||||
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
|
||||
NgramTokenStream {
|
||||
impl Tokenizer for NgramTokenizer {
|
||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
||||
From::from(NgramTokenStream {
|
||||
ngram_charidx_iterator: StutteringIterator::new(
|
||||
CodepointFrontiers::for_str(text),
|
||||
self.min_gram,
|
||||
@@ -142,7 +141,7 @@ impl<'a> Tokenizer<'a> for NgramTokenizer {
|
||||
prefix_only: self.prefix_only,
|
||||
text,
|
||||
token: Token::default(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -308,10 +307,10 @@ mod tests {
|
||||
use super::NgramTokenizer;
|
||||
use super::StutteringIterator;
|
||||
use crate::tokenizer::tests::assert_token;
|
||||
use crate::tokenizer::tokenizer::{TokenStream, Tokenizer};
|
||||
use crate::tokenizer::Token;
|
||||
use crate::tokenizer::tokenizer::Tokenizer;
|
||||
use crate::tokenizer::{BoxTokenStream, Token};
|
||||
|
||||
fn test_helper<T: TokenStream>(mut tokenizer: T) -> Vec<Token> {
|
||||
fn test_helper(mut tokenizer: BoxTokenStream) -> Vec<Token> {
|
||||
let mut tokens: Vec<Token> = vec![];
|
||||
tokenizer.process(&mut |token: &Token| tokens.push(token.clone()));
|
||||
tokens
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use super::{Token, TokenStream, Tokenizer};
|
||||
use crate::tokenizer::BoxTokenStream;
|
||||
|
||||
/// For each value of the field, emit a single unprocessed token.
|
||||
#[derive(Clone)]
|
||||
@@ -9,10 +10,8 @@ pub struct RawTokenStream {
|
||||
has_token: bool,
|
||||
}
|
||||
|
||||
impl<'a> Tokenizer<'a> for RawTokenizer {
|
||||
type TokenStreamImpl = RawTokenStream;
|
||||
|
||||
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
|
||||
impl Tokenizer for RawTokenizer {
|
||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
||||
let token = Token {
|
||||
offset_from: 0,
|
||||
offset_to: text.len(),
|
||||
@@ -24,6 +23,7 @@ impl<'a> Tokenizer<'a> for RawTokenizer {
|
||||
token,
|
||||
has_token: true,
|
||||
}
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//! ```rust
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! let tokenizer = SimpleTokenizer
|
||||
//! let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
//! .filter(RemoveLongFilter::limit(5));
|
||||
//!
|
||||
//! let mut stream = tokenizer.token_stream("toolong nice");
|
||||
@@ -13,6 +13,7 @@
|
||||
//! ```
|
||||
//!
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
use crate::tokenizer::BoxTokenStream;
|
||||
|
||||
/// `RemoveLongFilter` removes tokens that are longer
|
||||
/// than a given number of bytes (in UTF-8 representation).
|
||||
@@ -31,56 +32,27 @@ impl RemoveLongFilter {
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> RemoveLongFilterStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
impl<'a> RemoveLongFilterStream<'a> {
|
||||
fn predicate(&self, token: &Token) -> bool {
|
||||
token.text.len() < self.token_length_limit
|
||||
}
|
||||
}
|
||||
|
||||
fn wrap(
|
||||
token_length_limit: usize,
|
||||
tail: TailTokenStream,
|
||||
) -> RemoveLongFilterStream<TailTokenStream> {
|
||||
RemoveLongFilterStream {
|
||||
token_length_limit,
|
||||
tail,
|
||||
}
|
||||
impl TokenFilter for RemoveLongFilter {
|
||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
||||
BoxTokenStream::from(RemoveLongFilterStream {
|
||||
token_length_limit: self.length_limit,
|
||||
tail: token_stream,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenFilter<TailTokenStream> for RemoveLongFilter
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
type ResultTokenStream = RemoveLongFilterStream<TailTokenStream>;
|
||||
|
||||
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||
RemoveLongFilterStream::wrap(self.length_limit, token_stream)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RemoveLongFilterStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
pub struct RemoveLongFilterStream<'a> {
|
||||
token_length_limit: usize,
|
||||
tail: TailTokenStream,
|
||||
tail: BoxTokenStream<'a>,
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenStream for RemoveLongFilterStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
|
||||
impl<'a> TokenStream for RemoveLongFilterStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
while self.tail.advance() {
|
||||
if self.predicate(self.tail.token()) {
|
||||
@@ -89,4 +61,12 @@ where
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use super::BoxTokenStream;
|
||||
use super::{Token, TokenStream, Tokenizer};
|
||||
use std::str::CharIndices;
|
||||
|
||||
@@ -11,15 +12,13 @@ pub struct SimpleTokenStream<'a> {
|
||||
token: Token,
|
||||
}
|
||||
|
||||
impl<'a> Tokenizer<'a> for SimpleTokenizer {
|
||||
type TokenStreamImpl = SimpleTokenStream<'a>;
|
||||
|
||||
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
|
||||
SimpleTokenStream {
|
||||
impl Tokenizer for SimpleTokenizer {
|
||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
||||
BoxTokenStream::from(SimpleTokenStream {
|
||||
text,
|
||||
chars: text.char_indices(),
|
||||
token: Token::default(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
use crate::tokenizer::BoxTokenStream;
|
||||
use rust_stemmers::{self, Algorithm};
|
||||
|
||||
/// Available stemmer languages.
|
||||
@@ -75,38 +76,22 @@ impl Default for Stemmer {
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenFilter<TailTokenStream> for Stemmer
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
type ResultTokenStream = StemmerTokenStream<TailTokenStream>;
|
||||
|
||||
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||
impl TokenFilter for Stemmer {
|
||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
||||
let inner_stemmer = rust_stemmers::Stemmer::create(self.stemmer_algorithm);
|
||||
StemmerTokenStream::wrap(inner_stemmer, token_stream)
|
||||
BoxTokenStream::from(StemmerTokenStream {
|
||||
tail: token_stream,
|
||||
stemmer: inner_stemmer,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct StemmerTokenStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
tail: TailTokenStream,
|
||||
pub struct StemmerTokenStream<'a> {
|
||||
tail: BoxTokenStream<'a>,
|
||||
stemmer: rust_stemmers::Stemmer,
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenStream for StemmerTokenStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
|
||||
impl<'a> TokenStream for StemmerTokenStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
if !self.tail.advance() {
|
||||
return false;
|
||||
@@ -117,16 +102,12 @@ where
|
||||
self.token_mut().text.push_str(&stemmed_str);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> StemmerTokenStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
fn wrap(
|
||||
stemmer: rust_stemmers::Stemmer,
|
||||
tail: TailTokenStream,
|
||||
) -> StemmerTokenStream<TailTokenStream> {
|
||||
StemmerTokenStream { tail, stemmer }
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//! ```rust
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! let tokenizer = SimpleTokenizer
|
||||
//! let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
//! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()]));
|
||||
//!
|
||||
//! let mut stream = tokenizer.token_stream("the fox is crafty");
|
||||
@@ -11,6 +11,7 @@
|
||||
//! assert!(stream.next().is_none());
|
||||
//! ```
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
use crate::tokenizer::BoxTokenStream;
|
||||
use fnv::FnvHasher;
|
||||
use std::collections::HashSet;
|
||||
use std::hash::BuildHasherDefault;
|
||||
@@ -48,53 +49,27 @@ impl StopWordFilter {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct StopWordFilterStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
pub struct StopWordFilterStream<'a> {
|
||||
words: StopWordHashSet,
|
||||
tail: TailTokenStream,
|
||||
tail: BoxTokenStream<'a>,
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenFilter<TailTokenStream> for StopWordFilter
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
type ResultTokenStream = StopWordFilterStream<TailTokenStream>;
|
||||
|
||||
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||
StopWordFilterStream::wrap(self.words.clone(), token_stream)
|
||||
impl TokenFilter for StopWordFilter {
|
||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
||||
BoxTokenStream::from(StopWordFilterStream {
|
||||
words: self.words.clone(),
|
||||
tail: token_stream,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> StopWordFilterStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
impl<'a> StopWordFilterStream<'a> {
|
||||
fn predicate(&self, token: &Token) -> bool {
|
||||
!self.words.contains(&token.text)
|
||||
}
|
||||
|
||||
fn wrap(
|
||||
words: StopWordHashSet,
|
||||
tail: TailTokenStream,
|
||||
) -> StopWordFilterStream<TailTokenStream> {
|
||||
StopWordFilterStream { words, tail }
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenStream for StopWordFilterStream<TailTokenStream>
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
{
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
|
||||
impl<'a> TokenStream for StopWordFilterStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
while self.tail.advance() {
|
||||
if self.predicate(self.tail.token()) {
|
||||
@@ -103,6 +78,14 @@ where
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn token(&self) -> &Token {
|
||||
self.tail.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for StopWordFilter {
|
||||
|
||||
@@ -1,23 +1,21 @@
|
||||
use crate::tokenizer::{Token, TokenStream};
|
||||
use crate::tokenizer::{BoxTokenStream, Token, TokenStream};
|
||||
use std::ops::DerefMut;
|
||||
|
||||
const POSITION_GAP: usize = 2;
|
||||
|
||||
pub(crate) struct TokenStreamChain<TTokenStream: TokenStream> {
|
||||
pub(crate) struct TokenStreamChain<'a> {
|
||||
offsets: Vec<usize>,
|
||||
token_streams: Vec<TTokenStream>,
|
||||
token_streams: Vec<BoxTokenStream<'a>>,
|
||||
position_shift: usize,
|
||||
stream_idx: usize,
|
||||
token: Token,
|
||||
}
|
||||
|
||||
impl<'a, TTokenStream> TokenStreamChain<TTokenStream>
|
||||
where
|
||||
TTokenStream: TokenStream,
|
||||
{
|
||||
impl<'a> TokenStreamChain<'a> {
|
||||
pub fn new(
|
||||
offsets: Vec<usize>,
|
||||
token_streams: Vec<TTokenStream>,
|
||||
) -> TokenStreamChain<TTokenStream> {
|
||||
token_streams: Vec<BoxTokenStream<'a>>,
|
||||
) -> TokenStreamChain<'a> {
|
||||
TokenStreamChain {
|
||||
offsets,
|
||||
stream_idx: 0,
|
||||
@@ -28,13 +26,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, TTokenStream> TokenStream for TokenStreamChain<TTokenStream>
|
||||
where
|
||||
TTokenStream: TokenStream,
|
||||
{
|
||||
impl<'a> TokenStream for TokenStreamChain<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
while self.stream_idx < self.token_streams.len() {
|
||||
let token_stream = &mut self.token_streams[self.stream_idx];
|
||||
let token_stream = self.token_streams[self.stream_idx].deref_mut();
|
||||
if token_stream.advance() {
|
||||
let token = token_stream.token();
|
||||
let offset_offset = self.offsets[self.stream_idx];
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::tokenizer::{Token, TokenStream, TokenStreamChain};
|
||||
use crate::tokenizer::{BoxTokenStream, Token, TokenStream, TokenStreamChain};
|
||||
use std::cmp::Ordering;
|
||||
|
||||
/// Struct representing pre-tokenized text
|
||||
@@ -41,9 +41,9 @@ impl PreTokenizedStream {
|
||||
/// Creates a TokenStream from PreTokenizedString array
|
||||
pub fn chain_tokenized_strings<'a>(
|
||||
tok_strings: &'a [&'a PreTokenizedString],
|
||||
) -> Box<dyn TokenStream + 'a> {
|
||||
) -> BoxTokenStream {
|
||||
if tok_strings.len() == 1 {
|
||||
Box::new(PreTokenizedStream::from((*tok_strings[0]).clone()))
|
||||
PreTokenizedStream::from((*tok_strings[0]).clone()).into()
|
||||
} else {
|
||||
let mut offsets = vec![];
|
||||
let mut total_offset = 0;
|
||||
@@ -53,11 +53,12 @@ impl PreTokenizedStream {
|
||||
total_offset += last_token.offset_to;
|
||||
}
|
||||
}
|
||||
let token_streams: Vec<_> = tok_strings
|
||||
// TODO remove the string cloning.
|
||||
let token_streams: Vec<BoxTokenStream<'static>> = tok_strings
|
||||
.iter()
|
||||
.map(|tok_string| PreTokenizedStream::from((*tok_string).clone()))
|
||||
.map(|&tok_string| PreTokenizedStream::from((*tok_string).clone()).into())
|
||||
.collect();
|
||||
Box::new(TokenStreamChain::new(offsets, token_streams))
|
||||
TokenStreamChain::new(offsets, token_streams).into()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ use crate::tokenizer::TokenStreamChain;
|
||||
/// The tokenizer module contains all of the tools used to process
|
||||
/// text in `tantivy`.
|
||||
use std::borrow::{Borrow, BorrowMut};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
/// Token
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
@@ -33,20 +34,31 @@ impl Default for Token {
|
||||
}
|
||||
}
|
||||
|
||||
/// `Tokenizer` are in charge of splitting text into a stream of token
|
||||
/// before indexing.
|
||||
/// `TextAnalyzer` tokenizes an input text into tokens and modifies the resulting `TokenStream`.
|
||||
///
|
||||
/// See the [module documentation](./index.html) for more detail.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This API may change to use associated types.
|
||||
pub trait Tokenizer<'a>: Sized + Clone {
|
||||
/// Type associated to the resulting tokenstream tokenstream.
|
||||
type TokenStreamImpl: TokenStream;
|
||||
/// It simply wraps a `Tokenizer` and a list of `TokenFilter` that are applied sequentially.
|
||||
pub struct TextAnalyzer {
|
||||
tokenizer: Box<dyn Tokenizer>,
|
||||
token_filters: Vec<BoxTokenFilter>,
|
||||
}
|
||||
|
||||
/// Creates a token stream for a given `str`.
|
||||
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl;
|
||||
impl<T: Tokenizer> From<T> for TextAnalyzer {
|
||||
fn from(tokenizer: T) -> Self {
|
||||
TextAnalyzer::new(tokenizer, Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl TextAnalyzer {
|
||||
/// Creates a new `TextAnalyzer` given a tokenizer and a vector of `BoxTokenFilter`.
|
||||
///
|
||||
/// When creating a `TextAnalyzer` from a `Tokenizer` alone, prefer using
|
||||
/// `TextAnalyzer::from(tokenizer)`.
|
||||
pub fn new<T: Tokenizer>(tokenizer: T, token_filters: Vec<BoxTokenFilter>) -> TextAnalyzer {
|
||||
TextAnalyzer {
|
||||
tokenizer: Box::new(tokenizer),
|
||||
token_filters,
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends a token filter to the current tokenizer.
|
||||
///
|
||||
@@ -58,90 +70,26 @@ pub trait Tokenizer<'a>: Sized + Clone {
|
||||
/// ```rust
|
||||
/// use tantivy::tokenizer::*;
|
||||
///
|
||||
/// let en_stem = SimpleTokenizer
|
||||
/// let en_stem = TextAnalyzer::from(SimpleTokenizer)
|
||||
/// .filter(RemoveLongFilter::limit(40))
|
||||
/// .filter(LowerCaser)
|
||||
/// .filter(Stemmer::default());
|
||||
/// ```
|
||||
///
|
||||
fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self>
|
||||
where
|
||||
NewFilter: TokenFilter<<Self as Tokenizer<'a>>::TokenStreamImpl>,
|
||||
{
|
||||
ChainTokenizer {
|
||||
head: new_filter,
|
||||
tail: self,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A boxed tokenizer
|
||||
trait BoxedTokenizerTrait: Send + Sync {
|
||||
/// Tokenize a `&str`
|
||||
fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a>;
|
||||
|
||||
/// Tokenize an array`&str`
|
||||
///
|
||||
/// The resulting `TokenStream` is equivalent to what would be obtained if the &str were
|
||||
/// one concatenated `&str`, with an artificial position gap of `2` between the different fields
|
||||
/// to prevent accidental `PhraseQuery` to match accross two terms.
|
||||
fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b>;
|
||||
|
||||
/// Return a boxed clone of the tokenizer
|
||||
fn boxed_clone(&self) -> BoxedTokenizer;
|
||||
}
|
||||
|
||||
/// A boxed tokenizer
|
||||
pub struct BoxedTokenizer(Box<dyn BoxedTokenizerTrait>);
|
||||
|
||||
impl<T> From<T> for BoxedTokenizer
|
||||
where
|
||||
T: 'static + Send + Sync + for<'a> Tokenizer<'a>,
|
||||
{
|
||||
fn from(tokenizer: T) -> BoxedTokenizer {
|
||||
BoxedTokenizer(Box::new(BoxableTokenizer(tokenizer)))
|
||||
}
|
||||
}
|
||||
|
||||
impl BoxedTokenizer {
|
||||
/// Tokenize a `&str`
|
||||
pub fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a> {
|
||||
self.0.token_stream(text)
|
||||
pub fn filter<F: Into<BoxTokenFilter>>(mut self, token_filter: F) -> Self {
|
||||
self.token_filters.push(token_filter.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Tokenize an array`&str`
|
||||
///
|
||||
/// The resulting `TokenStream` is equivalent to what would be obtained if the &str were
|
||||
/// The resulting `BoxTokenStream` is equivalent to what would be obtained if the &str were
|
||||
/// one concatenated `&str`, with an artificial position gap of `2` between the different fields
|
||||
/// to prevent accidental `PhraseQuery` to match accross two terms.
|
||||
pub fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b> {
|
||||
self.0.token_stream_texts(texts)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for BoxedTokenizer {
|
||||
fn clone(&self) -> BoxedTokenizer {
|
||||
self.0.boxed_clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct BoxableTokenizer<A>(A)
|
||||
where
|
||||
A: for<'a> Tokenizer<'a> + Send + Sync;
|
||||
|
||||
impl<A> BoxedTokenizerTrait for BoxableTokenizer<A>
|
||||
where
|
||||
A: 'static + Send + Sync + for<'a> Tokenizer<'a>,
|
||||
{
|
||||
fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a> {
|
||||
Box::new(self.0.token_stream(text))
|
||||
}
|
||||
|
||||
fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b> {
|
||||
pub fn token_stream_texts<'a>(&self, texts: &'a [&'a str]) -> BoxTokenStream<'a> {
|
||||
assert!(!texts.is_empty());
|
||||
if texts.len() == 1 {
|
||||
Box::new(self.0.token_stream(texts[0]))
|
||||
self.token_stream(texts[0])
|
||||
} else {
|
||||
let mut offsets = vec![];
|
||||
let mut total_offset = 0;
|
||||
@@ -149,34 +97,124 @@ where
|
||||
offsets.push(total_offset);
|
||||
total_offset += text.len();
|
||||
}
|
||||
let token_streams: Vec<_> =
|
||||
texts.iter().map(|text| self.0.token_stream(text)).collect();
|
||||
Box::new(TokenStreamChain::new(offsets, token_streams))
|
||||
let token_streams: Vec<BoxTokenStream<'a>> = texts
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|text| self.token_stream(text))
|
||||
.collect();
|
||||
From::from(TokenStreamChain::new(offsets, token_streams))
|
||||
}
|
||||
}
|
||||
|
||||
fn boxed_clone(&self) -> BoxedTokenizer {
|
||||
self.0.clone().into()
|
||||
/// Creates a token stream for a given `str`.
|
||||
pub fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
||||
let mut token_stream = self.tokenizer.token_stream(text);
|
||||
for token_filter in &self.token_filters {
|
||||
token_stream = token_filter.transform(token_stream);
|
||||
}
|
||||
token_stream
|
||||
}
|
||||
}
|
||||
|
||||
impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
|
||||
impl Clone for TextAnalyzer {
|
||||
fn clone(&self) -> Self {
|
||||
TextAnalyzer {
|
||||
tokenizer: self.tokenizer.box_clone(),
|
||||
token_filters: self
|
||||
.token_filters
|
||||
.iter()
|
||||
.map(|token_filter| token_filter.box_clone())
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `Tokenizer` are in charge of splitting text into a stream of token
|
||||
/// before indexing.
|
||||
///
|
||||
/// See the [module documentation](./index.html) for more detail.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This API may change to use associated types.
|
||||
pub trait Tokenizer: 'static + Send + Sync + TokenizerClone {
|
||||
/// Creates a token stream for a given `str`.
|
||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a>;
|
||||
}
|
||||
|
||||
pub trait TokenizerClone {
|
||||
fn box_clone(&self) -> Box<dyn Tokenizer>;
|
||||
}
|
||||
|
||||
impl<T: Tokenizer + Clone> TokenizerClone for T {
|
||||
fn box_clone(&self) -> Box<dyn Tokenizer> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TokenStream for Box<dyn TokenStream + 'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
let token_stream: &mut dyn TokenStream = self.borrow_mut();
|
||||
token_stream.advance()
|
||||
}
|
||||
|
||||
fn token(&self) -> &Token {
|
||||
let token_stream: &dyn TokenStream = self.borrow();
|
||||
fn token<'b>(&'b self) -> &'b Token {
|
||||
let token_stream: &'b (dyn TokenStream + 'a) = self.borrow();
|
||||
token_stream.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
let token_stream: &mut dyn TokenStream = self.borrow_mut();
|
||||
fn token_mut<'b>(&'b mut self) -> &'b mut Token {
|
||||
let token_stream: &'b mut (dyn TokenStream + 'a) = self.borrow_mut();
|
||||
token_stream.token_mut()
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple wrapper of `Box<dyn TokenStream + 'a>`.
|
||||
///
|
||||
/// See `TokenStream` for more information.
|
||||
pub struct BoxTokenStream<'a>(Box<dyn TokenStream + 'a>);
|
||||
|
||||
impl<'a, T> From<T> for BoxTokenStream<'a>
|
||||
where
|
||||
T: TokenStream + 'a,
|
||||
{
|
||||
fn from(token_stream: T) -> BoxTokenStream<'a> {
|
||||
BoxTokenStream(Box::new(token_stream))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Deref for BoxTokenStream<'a> {
|
||||
type Target = dyn TokenStream + 'a;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&*self.0
|
||||
}
|
||||
}
|
||||
impl<'a> DerefMut for BoxTokenStream<'a> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut *self.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple wrapper of `Box<dyn TokenFilter + 'a>`.
|
||||
///
|
||||
/// See `TokenStream` for more information.
|
||||
pub struct BoxTokenFilter(Box<dyn TokenFilter>);
|
||||
|
||||
impl Deref for BoxTokenFilter {
|
||||
type Target = dyn TokenFilter;
|
||||
|
||||
fn deref(&self) -> &dyn TokenFilter {
|
||||
&*self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TokenFilter> From<T> for BoxTokenFilter {
|
||||
fn from(tokenizer: T) -> BoxTokenFilter {
|
||||
BoxTokenFilter(Box::new(tokenizer))
|
||||
}
|
||||
}
|
||||
|
||||
/// `TokenStream` is the result of the tokenization.
|
||||
///
|
||||
/// It consists consumable stream of `Token`s.
|
||||
@@ -186,7 +224,7 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
|
||||
/// ```
|
||||
/// use tantivy::tokenizer::*;
|
||||
///
|
||||
/// let tokenizer = SimpleTokenizer
|
||||
/// let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
/// .filter(RemoveLongFilter::limit(40))
|
||||
/// .filter(LowerCaser);
|
||||
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
|
||||
@@ -225,7 +263,7 @@ pub trait TokenStream {
|
||||
/// ```
|
||||
/// use tantivy::tokenizer::*;
|
||||
///
|
||||
/// let tokenizer = SimpleTokenizer
|
||||
/// let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
/// .filter(RemoveLongFilter::limit(40))
|
||||
/// .filter(LowerCaser);
|
||||
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
|
||||
@@ -243,6 +281,8 @@ pub trait TokenStream {
|
||||
|
||||
/// Helper function to consume the entire `TokenStream`
|
||||
/// and push the tokens to a sink function.
|
||||
///
|
||||
/// Remove this.
|
||||
fn process(&mut self, sink: &mut dyn FnMut(&Token)) -> u32 {
|
||||
let mut num_tokens_pushed = 0u32;
|
||||
while self.advance() {
|
||||
@@ -253,33 +293,20 @@ pub trait TokenStream {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ChainTokenizer<HeadTokenFilterFactory, TailTokenizer> {
|
||||
head: HeadTokenFilterFactory,
|
||||
tail: TailTokenizer,
|
||||
}
|
||||
|
||||
impl<'a, HeadTokenFilterFactory, TailTokenizer> Tokenizer<'a>
|
||||
for ChainTokenizer<HeadTokenFilterFactory, TailTokenizer>
|
||||
where
|
||||
HeadTokenFilterFactory: TokenFilter<TailTokenizer::TokenStreamImpl>,
|
||||
TailTokenizer: Tokenizer<'a>,
|
||||
{
|
||||
type TokenStreamImpl = HeadTokenFilterFactory::ResultTokenStream;
|
||||
|
||||
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
|
||||
let tail_token_stream = self.tail.token_stream(text);
|
||||
self.head.transform(tail_token_stream)
|
||||
}
|
||||
pub trait TokenFilterClone {
|
||||
fn box_clone(&self) -> BoxTokenFilter;
|
||||
}
|
||||
|
||||
/// Trait for the pluggable components of `Tokenizer`s.
|
||||
pub trait TokenFilter<TailTokenStream: TokenStream>: Clone {
|
||||
/// The resulting `TokenStream` type.
|
||||
type ResultTokenStream: TokenStream;
|
||||
|
||||
pub trait TokenFilter: 'static + Send + Sync + TokenFilterClone {
|
||||
/// Wraps a token stream and returns the modified one.
|
||||
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream;
|
||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a>;
|
||||
}
|
||||
|
||||
impl<T: TokenFilter + Clone> TokenFilterClone for T {
|
||||
fn box_clone(&self) -> BoxTokenFilter {
|
||||
BoxTokenFilter::from(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
use crate::tokenizer::stemmer::Language;
|
||||
use crate::tokenizer::BoxedTokenizer;
|
||||
use crate::tokenizer::tokenizer::TextAnalyzer;
|
||||
use crate::tokenizer::LowerCaser;
|
||||
use crate::tokenizer::RawTokenizer;
|
||||
use crate::tokenizer::RemoveLongFilter;
|
||||
use crate::tokenizer::SimpleTokenizer;
|
||||
use crate::tokenizer::Stemmer;
|
||||
use crate::tokenizer::Tokenizer;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
@@ -23,16 +22,16 @@ use std::sync::{Arc, RwLock};
|
||||
/// search engine.
|
||||
#[derive(Clone)]
|
||||
pub struct TokenizerManager {
|
||||
tokenizers: Arc<RwLock<HashMap<String, BoxedTokenizer>>>,
|
||||
tokenizers: Arc<RwLock<HashMap<String, TextAnalyzer>>>,
|
||||
}
|
||||
|
||||
impl TokenizerManager {
|
||||
/// Registers a new tokenizer associated with a given name.
|
||||
pub fn register<A>(&self, tokenizer_name: &str, tokenizer: A)
|
||||
pub fn register<T>(&self, tokenizer_name: &str, tokenizer: T)
|
||||
where
|
||||
A: Into<BoxedTokenizer>,
|
||||
TextAnalyzer: From<T>,
|
||||
{
|
||||
let boxed_tokenizer = tokenizer.into();
|
||||
let boxed_tokenizer: TextAnalyzer = TextAnalyzer::from(tokenizer);
|
||||
self.tokenizers
|
||||
.write()
|
||||
.expect("Acquiring the lock should never fail")
|
||||
@@ -40,7 +39,7 @@ impl TokenizerManager {
|
||||
}
|
||||
|
||||
/// Accessing a tokenizer given its name.
|
||||
pub fn get(&self, tokenizer_name: &str) -> Option<BoxedTokenizer> {
|
||||
pub fn get(&self, tokenizer_name: &str) -> Option<TextAnalyzer> {
|
||||
self.tokenizers
|
||||
.read()
|
||||
.expect("Acquiring the lock should never fail")
|
||||
@@ -62,13 +61,13 @@ impl Default for TokenizerManager {
|
||||
manager.register("raw", RawTokenizer);
|
||||
manager.register(
|
||||
"default",
|
||||
SimpleTokenizer
|
||||
TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(RemoveLongFilter::limit(40))
|
||||
.filter(LowerCaser),
|
||||
);
|
||||
manager.register(
|
||||
"en_stem",
|
||||
SimpleTokenizer
|
||||
TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(RemoveLongFilter::limit(40))
|
||||
.filter(LowerCaser)
|
||||
.filter(Stemmer::new(Language::English)),
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
use fail;
|
||||
use std::path::Path;
|
||||
use tantivy::directory::{
|
||||
Directory, ManagedDirectory, RAMDirectory, ReadOnlyDirectory, TerminatingWrite,
|
||||
};
|
||||
use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory, TerminatingWrite};
|
||||
use tantivy::doc;
|
||||
use tantivy::schema::{Schema, TEXT};
|
||||
use tantivy::{Index, Term};
|
||||
|
||||
Reference in New Issue
Block a user