mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-27 20:42:54 +00:00
Compare commits
7 Commits
issue/681
...
remove-tru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4fc8712f1a | ||
|
|
6e4fdfd4bf | ||
|
|
0519056bd8 | ||
|
|
7305ad575e | ||
|
|
79f64ac2f4 | ||
|
|
67bce6cbf2 | ||
|
|
e5316a4388 |
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: fulmicoton
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
@@ -10,7 +10,10 @@ Tantivy 0.11.0
|
||||
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
||||
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
|
||||
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
|
||||
|
||||
- Added handling of pre-tokenized text fields (#642), which will enable users to
|
||||
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
|
||||
- Fix crash when committing multiple times with deleted documents. #681 (@brainlock)
|
||||
|
||||
## How to update?
|
||||
|
||||
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
||||
|
||||
12
Cargo.toml
12
Cargo.toml
@@ -50,10 +50,10 @@ owned-read = "0.4"
|
||||
failure = "0.1"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.3"
|
||||
scoped-pool = "1.0"
|
||||
murmurhash32 = "0.2"
|
||||
chrono = "0.4"
|
||||
smallvec = "0.6"
|
||||
smallvec = "1.0"
|
||||
rayon = "1"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3"
|
||||
@@ -64,6 +64,10 @@ maplit = "1"
|
||||
matches = "0.1.8"
|
||||
time = "0.1.42"
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.3"
|
||||
features = ["failpoints"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
debug = false
|
||||
@@ -87,10 +91,6 @@ members = ["query-grammar"]
|
||||
[badges]
|
||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.3"
|
||||
features = ["failpoints"]
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
# tests that define specific behavior in fail check points
|
||||
# in a different binary.
|
||||
|
||||
140
examples/pre_tokenized_text.rs
Normal file
140
examples/pre_tokenized_text.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
// # Pre-tokenized text example
|
||||
//
|
||||
// This example shows how to use pre-tokenized text. Sometimes yout might
|
||||
// want to index and search through text which is already split into
|
||||
// tokens by some external tool.
|
||||
//
|
||||
// In this example we will:
|
||||
// - use tantivy tokenizer to create tokens and load them directly into tantivy,
|
||||
// - import tokenized text straight from json,
|
||||
// - perform a search on documents with pre-tokenized text
|
||||
|
||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer};
|
||||
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn pre_tokenize_text(text: &str) -> Vec<Token> {
|
||||
let mut token_stream = SimpleTokenizer.token_stream(text);
|
||||
let mut tokens = vec![];
|
||||
while token_stream.advance() {
|
||||
tokens.push(token_stream.token().clone());
|
||||
}
|
||||
tokens
|
||||
}
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let index_path = TempDir::new()?;
|
||||
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// We can create a document manually, by setting the fields
|
||||
// one by one in a Document object.
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let title_text = "The Old Man and the Sea";
|
||||
let body_text = "He was an old man who fished alone in a skiff in the Gulf Stream";
|
||||
|
||||
// Content of our first document
|
||||
// We create `PreTokenizedString` which contains original text and vector of tokens
|
||||
let title_tok = PreTokenizedString {
|
||||
text: String::from(title_text),
|
||||
tokens: pre_tokenize_text(title_text),
|
||||
};
|
||||
|
||||
println!(
|
||||
"Original text: \"{}\" and tokens: {:?}",
|
||||
title_tok.text, title_tok.tokens
|
||||
);
|
||||
|
||||
let body_tok = PreTokenizedString {
|
||||
text: String::from(body_text),
|
||||
tokens: pre_tokenize_text(body_text),
|
||||
};
|
||||
|
||||
// Now lets create a document and add our `PreTokenizedString` using
|
||||
// `add_pre_tokenized_text` method of `Document`
|
||||
let mut old_man_doc = Document::default();
|
||||
old_man_doc.add_pre_tokenized_text(title, &title_tok);
|
||||
old_man_doc.add_pre_tokenized_text(body, &body_tok);
|
||||
|
||||
// ... now let's just add it to the IndexWriter
|
||||
index_writer.add_document(old_man_doc);
|
||||
|
||||
// Pretokenized text can also be fed as JSON
|
||||
let short_man_json = r#"{
|
||||
"title":[{
|
||||
"text":"The Old Man",
|
||||
"tokens":[
|
||||
{"offset_from":0,"offset_to":3,"position":0,"text":"The","position_length":1},
|
||||
{"offset_from":4,"offset_to":7,"position":1,"text":"Old","position_length":1},
|
||||
{"offset_from":8,"offset_to":11,"position":2,"text":"Man","position_length":1}
|
||||
]
|
||||
}]
|
||||
}"#;
|
||||
|
||||
let short_man_doc = schema.parse_document(&short_man_json)?;
|
||||
|
||||
index_writer.add_document(short_man_doc);
|
||||
|
||||
// Let's commit changes
|
||||
index_writer.commit()?;
|
||||
|
||||
// ... and now is the time to query our index
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// We want to get documents with token "Man", we will use TermQuery to do it
|
||||
// Using PreTokenizedString means the tokens are stored as is avoiding stemming
|
||||
// and lowercasing, which preserves full words in their original form
|
||||
let query = TermQuery::new(
|
||||
Term::from_field_text(title, "Man"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let (top_docs, count) = searcher
|
||||
.search(&query, &(TopDocs::with_limit(2), Count))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(count, 2);
|
||||
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("Document: {}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
// In contrary to the previous query, when we search for the "man" term we
|
||||
// should get no results, as it's not one of the indexed tokens. SimpleTokenizer
|
||||
// only splits text on whitespace / punctuation.
|
||||
|
||||
let query = TermQuery::new(
|
||||
Term::from_field_text(title, "man"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let (_top_docs, count) = searcher
|
||||
.search(&query, &(TopDocs::with_limit(2), Count))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(count, 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::Result;
|
||||
use crossbeam::channel;
|
||||
use scoped_pool::{Pool, ThreadConfig};
|
||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||
|
||||
/// Search executor whether search request are single thread or multithread.
|
||||
///
|
||||
@@ -11,7 +11,7 @@ use scoped_pool::{Pool, ThreadConfig};
|
||||
/// used by the client. Second, we may stop using rayon in the future.
|
||||
pub enum Executor {
|
||||
SingleThread,
|
||||
ThreadPool(Pool),
|
||||
ThreadPool(ThreadPool),
|
||||
}
|
||||
|
||||
impl Executor {
|
||||
@@ -21,10 +21,12 @@ impl Executor {
|
||||
}
|
||||
|
||||
// Creates an Executor that dispatches the tasks in a thread pool.
|
||||
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Executor {
|
||||
let thread_config = ThreadConfig::new().prefix(prefix);
|
||||
let pool = Pool::with_thread_config(num_threads, thread_config);
|
||||
Executor::ThreadPool(pool)
|
||||
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Result<Executor> {
|
||||
let pool = ThreadPoolBuilder::new()
|
||||
.num_threads(num_threads)
|
||||
.thread_name(move |num| format!("{}{}", prefix, num))
|
||||
.build()?;
|
||||
Ok(Executor::ThreadPool(pool))
|
||||
}
|
||||
|
||||
// Perform a map in the thread pool.
|
||||
@@ -48,9 +50,9 @@ impl Executor {
|
||||
let num_fruits = args_with_indices.len();
|
||||
let fruit_receiver = {
|
||||
let (fruit_sender, fruit_receiver) = channel::unbounded();
|
||||
pool.scoped(|scope| {
|
||||
pool.scope(|scope| {
|
||||
for arg_with_idx in args_with_indices {
|
||||
scope.execute(|| {
|
||||
scope.spawn(|_| {
|
||||
let (idx, arg) = arg_with_idx;
|
||||
let fruit = f(arg);
|
||||
if let Err(err) = fruit_sender.send((idx, fruit)) {
|
||||
@@ -103,6 +105,7 @@ mod tests {
|
||||
#[should_panic] //< unfortunately the panic message is not propagated
|
||||
fn test_panic_propagates_multi_thread() {
|
||||
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
|
||||
.unwrap()
|
||||
.map(
|
||||
|_| {
|
||||
panic!("panic should propagate");
|
||||
@@ -126,6 +129,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_map_multithread() {
|
||||
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
|
||||
.unwrap()
|
||||
.map(|i| Ok(i * 2), 0..10)
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 10);
|
||||
|
||||
@@ -73,15 +73,16 @@ impl Index {
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
pub fn set_multithread_executor(&mut self, num_threads: usize) {
|
||||
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-"));
|
||||
pub fn set_multithread_executor(&mut self, num_threads: usize) -> Result<()> {
|
||||
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-")?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
pub fn set_default_multithread_executor(&mut self) {
|
||||
pub fn set_default_multithread_executor(&mut self) -> Result<()> {
|
||||
let default_num_threads = num_cpus::get();
|
||||
self.set_multithread_executor(default_num_threads);
|
||||
self.set_multithread_executor(default_num_threads)
|
||||
}
|
||||
|
||||
/// Creates a new index using the `RAMDirectory`.
|
||||
|
||||
@@ -150,6 +150,21 @@ impl SegmentMeta {
|
||||
self.num_deleted_docs() > 0
|
||||
}
|
||||
|
||||
/// Updates the max_doc value from the `SegmentMeta`.
|
||||
///
|
||||
/// This method is only used when updating `max_doc` from 0
|
||||
/// as we finalize a fresh new segment.
|
||||
pub(crate) fn with_max_doc(self, max_doc: u32) -> SegmentMeta {
|
||||
assert_eq!(self.tracked.max_doc, 0);
|
||||
assert!(self.tracked.deletes.is_none());
|
||||
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
|
||||
segment_id: inner_meta.segment_id,
|
||||
max_doc,
|
||||
deletes: None,
|
||||
});
|
||||
SegmentMeta { tracked }
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
|
||||
let delete_meta = DeleteMeta {
|
||||
|
||||
@@ -50,6 +50,17 @@ impl Segment {
|
||||
&self.meta
|
||||
}
|
||||
|
||||
/// Updates the max_doc value from the `SegmentMeta`.
|
||||
///
|
||||
/// This method is only used when updating `max_doc` from 0
|
||||
/// as we finalize a fresh new segment.
|
||||
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
|
||||
Segment {
|
||||
index: self.index,
|
||||
meta: self.meta.with_max_doc(max_doc),
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
||||
Segment {
|
||||
|
||||
@@ -170,3 +170,9 @@ impl From<serde_json::Error> for TantivyError {
|
||||
TantivyError::IOError(io_err.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<rayon::ThreadPoolBuildError> for TantivyError {
|
||||
fn from(error: rayon::ThreadPoolBuildError) -> TantivyError {
|
||||
TantivyError::SystemError(error.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,11 +10,14 @@ use std::io::Write;
|
||||
/// Write a delete `BitSet`
|
||||
///
|
||||
/// where `delete_bitset` is the set of deleted `DocId`.
|
||||
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
|
||||
let max_doc = delete_bitset.capacity();
|
||||
pub fn write_delete_bitset(
|
||||
delete_bitset: &BitSet,
|
||||
max_doc: u32,
|
||||
writer: &mut WritePtr,
|
||||
) -> io::Result<()> {
|
||||
let mut byte = 0u8;
|
||||
let mut shift = 0u8;
|
||||
for doc in 0..max_doc {
|
||||
for doc in 0..(max_doc as usize) {
|
||||
if delete_bitset.contains(doc) {
|
||||
byte |= 1 << shift;
|
||||
}
|
||||
@@ -86,18 +89,17 @@ mod tests {
|
||||
use bit_set::BitSet;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn test_delete_bitset_helper(bitset: &BitSet) {
|
||||
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
|
||||
let test_path = PathBuf::from("test");
|
||||
let mut directory = RAMDirectory::create();
|
||||
{
|
||||
let mut writer = directory.open_write(&*test_path).unwrap();
|
||||
write_delete_bitset(bitset, &mut writer).unwrap();
|
||||
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
|
||||
}
|
||||
{
|
||||
let source = directory.open_read(&test_path).unwrap();
|
||||
let delete_bitset = DeleteBitSet::open(source);
|
||||
let n = bitset.capacity();
|
||||
for doc in 0..n {
|
||||
for doc in 0..max_doc as usize {
|
||||
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
||||
}
|
||||
assert_eq!(delete_bitset.len(), bitset.len());
|
||||
@@ -110,7 +112,7 @@ mod tests {
|
||||
let mut bitset = BitSet::with_capacity(10);
|
||||
bitset.insert(1);
|
||||
bitset.insert(9);
|
||||
test_delete_bitset_helper(&bitset);
|
||||
test_delete_bitset_helper(&bitset, 10);
|
||||
}
|
||||
{
|
||||
let mut bitset = BitSet::with_capacity(8);
|
||||
@@ -119,7 +121,7 @@ mod tests {
|
||||
bitset.insert(3);
|
||||
bitset.insert(5);
|
||||
bitset.insert(7);
|
||||
test_delete_bitset_helper(&bitset);
|
||||
test_delete_bitset_helper(&bitset, 8);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,7 +148,6 @@ pub(crate) fn advance_deletes(
|
||||
};
|
||||
|
||||
let delete_cursor = segment_entry.delete_cursor();
|
||||
|
||||
compute_deleted_bitset(
|
||||
&mut delete_bitset,
|
||||
&segment_reader,
|
||||
@@ -168,7 +167,7 @@ pub(crate) fn advance_deletes(
|
||||
if num_deleted_docs > 0 {
|
||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
|
||||
write_delete_bitset(&delete_bitset, &mut delete_file)?;
|
||||
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
|
||||
delete_file.terminate()?;
|
||||
}
|
||||
}
|
||||
@@ -178,13 +177,13 @@ pub(crate) fn advance_deletes(
|
||||
|
||||
fn index_documents(
|
||||
memory_budget: usize,
|
||||
segment: &Segment,
|
||||
segment: Segment,
|
||||
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
||||
segment_updater: &mut SegmentUpdater,
|
||||
mut delete_cursor: DeleteCursor,
|
||||
) -> Result<bool> {
|
||||
let schema = segment.schema();
|
||||
let segment_id = segment.id();
|
||||
|
||||
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
|
||||
for document_group in grouped_document_iterator {
|
||||
for doc in document_group {
|
||||
@@ -204,22 +203,32 @@ fn index_documents(
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let num_docs = segment_writer.max_doc();
|
||||
let max_doc = segment_writer.max_doc();
|
||||
|
||||
// this is ensured by the call to peek before starting
|
||||
// the worker thread.
|
||||
assert!(num_docs > 0);
|
||||
assert!(max_doc > 0);
|
||||
|
||||
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
|
||||
let segment_meta = segment.index().new_segment_meta(segment_id, num_docs);
|
||||
|
||||
let segment_with_max_doc = segment.with_max_doc(max_doc);
|
||||
|
||||
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
||||
|
||||
let delete_bitset_opt =
|
||||
apply_deletes(&segment, &mut delete_cursor, &doc_opstamps, last_docstamp)?;
|
||||
let delete_bitset_opt = apply_deletes(
|
||||
&segment_with_max_doc,
|
||||
&mut delete_cursor,
|
||||
&doc_opstamps,
|
||||
last_docstamp,
|
||||
)?;
|
||||
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, delete_bitset_opt);
|
||||
Ok(segment_updater.add_segment(segment_entry))
|
||||
let segment_entry = SegmentEntry::new(
|
||||
segment_with_max_doc.meta().clone(),
|
||||
delete_cursor,
|
||||
delete_bitset_opt,
|
||||
);
|
||||
segment_updater.add_segment(segment_entry);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn apply_deletes(
|
||||
@@ -235,7 +244,9 @@ fn apply_deletes(
|
||||
}
|
||||
let segment_reader = SegmentReader::open(segment)?;
|
||||
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
|
||||
let mut deleted_bitset = BitSet::with_capacity(segment_reader.max_doc() as usize);
|
||||
|
||||
let max_doc = segment.meta().max_doc();
|
||||
let mut deleted_bitset = BitSet::with_capacity(max_doc as usize);
|
||||
let may_have_deletes = compute_deleted_bitset(
|
||||
&mut deleted_bitset,
|
||||
&segment_reader,
|
||||
@@ -407,7 +418,7 @@ impl IndexWriter {
|
||||
let segment = index.new_segment();
|
||||
index_documents(
|
||||
mem_budget,
|
||||
&segment,
|
||||
segment,
|
||||
&mut document_iterator,
|
||||
&mut segment_updater,
|
||||
delete_cursor.clone(),
|
||||
|
||||
@@ -28,3 +28,25 @@ pub use self::segment_writer::SegmentWriter;
|
||||
|
||||
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
||||
pub type DefaultMergePolicy = LogMergePolicy;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::schema::{self, Schema};
|
||||
use crate::{Index, Term};
|
||||
#[test]
|
||||
fn test_advance_delete_bug() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
// there must be one deleted document in the segment
|
||||
index_writer.add_document(doc!(text_field=>"b"));
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
||||
// we need enough data to trigger the bug (at least 32 documents)
|
||||
for _ in 0..32 {
|
||||
index_writer.add_document(doc!(text_field=>"c"));
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,14 +199,12 @@ impl SegmentUpdater {
|
||||
self.0.pool.spawn_fn(move || Ok(f(me_clone)))
|
||||
}
|
||||
|
||||
pub fn add_segment(&self, segment_entry: SegmentEntry) -> bool {
|
||||
pub fn add_segment(&self, segment_entry: SegmentEntry) {
|
||||
self.run_async(|segment_updater| {
|
||||
segment_updater.0.segment_manager.add_segment(segment_entry);
|
||||
segment_updater.consider_merge_options();
|
||||
true
|
||||
})
|
||||
.forget();
|
||||
true
|
||||
}
|
||||
|
||||
/// Orders `SegmentManager` to remove all segments
|
||||
|
||||
@@ -13,7 +13,8 @@ use crate::schema::Value;
|
||||
use crate::schema::{Field, FieldEntry};
|
||||
use crate::tokenizer::BoxedTokenizer;
|
||||
use crate::tokenizer::FacetTokenizer;
|
||||
use crate::tokenizer::{TokenStream, Tokenizer};
|
||||
use crate::tokenizer::PreTokenizedStream;
|
||||
use crate::tokenizer::{TokenStream, TokenStreamChain, Tokenizer};
|
||||
use crate::DocId;
|
||||
use crate::Opstamp;
|
||||
use crate::Result;
|
||||
@@ -158,26 +159,44 @@ impl SegmentWriter {
|
||||
}
|
||||
}
|
||||
FieldType::Str(_) => {
|
||||
let num_tokens = if let Some(ref mut tokenizer) =
|
||||
self.tokenizers[field.field_id() as usize]
|
||||
{
|
||||
let texts: Vec<&str> = field_values
|
||||
.iter()
|
||||
.flat_map(|field_value| match *field_value.value() {
|
||||
Value::Str(ref text) => Some(text.as_str()),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
if texts.is_empty() {
|
||||
0
|
||||
} else {
|
||||
let mut token_stream = tokenizer.token_stream_texts(&texts[..]);
|
||||
self.multifield_postings
|
||||
.index_text(doc_id, field, &mut token_stream)
|
||||
let mut token_streams: Vec<Box<dyn TokenStream>> = vec![];
|
||||
let mut offsets = vec![];
|
||||
let mut total_offset = 0;
|
||||
|
||||
for field_value in field_values {
|
||||
match field_value.value() {
|
||||
Value::PreTokStr(tok_str) => {
|
||||
offsets.push(total_offset);
|
||||
if let Some(last_token) = tok_str.tokens.last() {
|
||||
total_offset += last_token.offset_to;
|
||||
}
|
||||
|
||||
token_streams
|
||||
.push(Box::new(PreTokenizedStream::from(tok_str.clone())));
|
||||
}
|
||||
Value::Str(ref text) => {
|
||||
if let Some(ref mut tokenizer) =
|
||||
self.tokenizers[field.field_id() as usize]
|
||||
{
|
||||
offsets.push(total_offset);
|
||||
total_offset += text.len();
|
||||
|
||||
token_streams.push(tokenizer.token_stream(text));
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
} else {
|
||||
}
|
||||
|
||||
let num_tokens = if token_streams.is_empty() {
|
||||
0
|
||||
} else {
|
||||
let mut token_stream: Box<dyn TokenStream> =
|
||||
Box::new(TokenStreamChain::new(offsets, token_streams));
|
||||
self.multifield_postings
|
||||
.index_text(doc_id, field, &mut token_stream)
|
||||
};
|
||||
|
||||
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
||||
}
|
||||
FieldType::U64(ref int_option) => {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use super::*;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::VInt;
|
||||
use crate::tokenizer::PreTokenizedString;
|
||||
use crate::DateTime;
|
||||
use itertools::Itertools;
|
||||
use std::io::{self, Read, Write};
|
||||
@@ -29,8 +30,8 @@ impl From<Vec<FieldValue>> for Document {
|
||||
impl PartialEq for Document {
|
||||
fn eq(&self, other: &Document) -> bool {
|
||||
// super slow, but only here for tests
|
||||
let mut self_field_values = self.field_values.clone();
|
||||
let mut other_field_values = other.field_values.clone();
|
||||
let mut self_field_values: Vec<&_> = self.field_values.iter().collect();
|
||||
let mut other_field_values: Vec<&_> = other.field_values.iter().collect();
|
||||
self_field_values.sort();
|
||||
other_field_values.sort();
|
||||
self_field_values.eq(&other_field_values)
|
||||
@@ -78,6 +79,16 @@ impl Document {
|
||||
self.add(FieldValue::new(field, value));
|
||||
}
|
||||
|
||||
/// Add a pre-tokenized text field.
|
||||
pub fn add_pre_tokenized_text(
|
||||
&mut self,
|
||||
field: Field,
|
||||
pre_tokenized_text: &PreTokenizedString,
|
||||
) {
|
||||
let value = Value::PreTokStr(pre_tokenized_text.clone());
|
||||
self.add(FieldValue::new(field, value));
|
||||
}
|
||||
|
||||
/// Add a u64 field
|
||||
pub fn add_u64(&mut self, field: Field, value: u64) {
|
||||
self.add(FieldValue::new(field, Value::U64(value)));
|
||||
|
||||
@@ -15,6 +15,7 @@ impl Field {
|
||||
}
|
||||
|
||||
/// Returns a u32 identifying uniquely a field within a schema.
|
||||
#[allow(clippy::trivially_copy_pass_by_ref)]
|
||||
pub fn field_id(&self) -> u32 {
|
||||
self.0
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use base64::decode;
|
||||
|
||||
use crate::schema::{IntOptions, TextOptions};
|
||||
|
||||
use crate::schema::Facet;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::TextFieldIndexing;
|
||||
use crate::schema::Value;
|
||||
use crate::schema::{IntOptions, TextOptions};
|
||||
use crate::tokenizer::PreTokenizedString;
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
/// Possible error that may occur while parsing a field value
|
||||
@@ -169,6 +169,28 @@ impl FieldType {
|
||||
Err(ValueParsingError::TypeError(msg))
|
||||
}
|
||||
},
|
||||
JsonValue::Object(_) => match *self {
|
||||
FieldType::Str(_) => {
|
||||
if let Ok(tok_str_val) =
|
||||
serde_json::from_value::<PreTokenizedString>(json.clone())
|
||||
{
|
||||
Ok(Value::PreTokStr(tok_str_val))
|
||||
} else {
|
||||
let msg = format!(
|
||||
"Json value {:?} cannot be translated to PreTokenizedString.",
|
||||
json
|
||||
);
|
||||
Err(ValueParsingError::TypeError(msg))
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
let msg = format!(
|
||||
"Json value not supported error {:?}. Expected {:?}",
|
||||
json, self
|
||||
);
|
||||
Err(ValueParsingError::TypeError(msg))
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
let msg = format!(
|
||||
"Json value not supported error {:?}. Expected {:?}",
|
||||
@@ -184,7 +206,9 @@ impl FieldType {
|
||||
mod tests {
|
||||
use super::FieldType;
|
||||
use crate::schema::field_type::ValueParsingError;
|
||||
use crate::schema::TextOptions;
|
||||
use crate::schema::Value;
|
||||
use crate::tokenizer::{PreTokenizedString, Token};
|
||||
|
||||
#[test]
|
||||
fn test_bytes_value_from_json() {
|
||||
@@ -205,4 +229,71 @@ mod tests {
|
||||
_ => panic!("Expected parse failure for invalid base64"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pre_tok_str_value_from_json() {
|
||||
let pre_tokenized_string_json = r#"{
|
||||
"text": "The Old Man",
|
||||
"tokens": [
|
||||
{
|
||||
"offset_from": 0,
|
||||
"offset_to": 3,
|
||||
"position": 0,
|
||||
"text": "The",
|
||||
"position_length": 1
|
||||
},
|
||||
{
|
||||
"offset_from": 4,
|
||||
"offset_to": 7,
|
||||
"position": 1,
|
||||
"text": "Old",
|
||||
"position_length": 1
|
||||
},
|
||||
{
|
||||
"offset_from": 8,
|
||||
"offset_to": 11,
|
||||
"position": 2,
|
||||
"text": "Man",
|
||||
"position_length": 1
|
||||
}
|
||||
]
|
||||
}"#;
|
||||
|
||||
let expected_value = Value::PreTokStr(PreTokenizedString {
|
||||
text: String::from("The Old Man"),
|
||||
tokens: vec![
|
||||
Token {
|
||||
offset_from: 0,
|
||||
offset_to: 3,
|
||||
position: 0,
|
||||
text: String::from("The"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 4,
|
||||
offset_to: 7,
|
||||
position: 1,
|
||||
text: String::from("Old"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 8,
|
||||
offset_to: 11,
|
||||
position: 2,
|
||||
text: String::from("Man"),
|
||||
position_length: 1,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let deserialized_value = FieldType::Str(TextOptions::default())
|
||||
.value_from_json(&serde_json::from_str(pre_tokenized_string_json).unwrap())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(deserialized_value, expected_value);
|
||||
|
||||
let serialized_value_json = serde_json::to_string_pretty(&expected_value).unwrap();
|
||||
|
||||
assert_eq!(serialized_value_json, pre_tokenized_string_json);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::schema::Facet;
|
||||
use crate::tokenizer::PreTokenizedString;
|
||||
use crate::DateTime;
|
||||
use serde::de::Visitor;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
@@ -10,6 +11,8 @@ use std::{cmp::Ordering, fmt};
|
||||
pub enum Value {
|
||||
/// The str type is used for any text information.
|
||||
Str(String),
|
||||
/// Pre-tokenized str type,
|
||||
PreTokStr(PreTokenizedString),
|
||||
/// Unsigned 64-bits Integer `u64`
|
||||
U64(u64),
|
||||
/// Signed 64-bits Integer `i64`
|
||||
@@ -29,6 +32,7 @@ impl Ord for Value {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match (self, other) {
|
||||
(Value::Str(l), Value::Str(r)) => l.cmp(r),
|
||||
(Value::PreTokStr(l), Value::PreTokStr(r)) => l.cmp(r),
|
||||
(Value::U64(l), Value::U64(r)) => l.cmp(r),
|
||||
(Value::I64(l), Value::I64(r)) => l.cmp(r),
|
||||
(Value::Date(l), Value::Date(r)) => l.cmp(r),
|
||||
@@ -44,6 +48,8 @@ impl Ord for Value {
|
||||
}
|
||||
(Value::Str(_), _) => Ordering::Less,
|
||||
(_, Value::Str(_)) => Ordering::Greater,
|
||||
(Value::PreTokStr(_), _) => Ordering::Less,
|
||||
(_, Value::PreTokStr(_)) => Ordering::Greater,
|
||||
(Value::U64(_), _) => Ordering::Less,
|
||||
(_, Value::U64(_)) => Ordering::Greater,
|
||||
(Value::I64(_), _) => Ordering::Less,
|
||||
@@ -65,6 +71,7 @@ impl Serialize for Value {
|
||||
{
|
||||
match *self {
|
||||
Value::Str(ref v) => serializer.serialize_str(v),
|
||||
Value::PreTokStr(ref v) => v.serialize(serializer),
|
||||
Value::U64(u) => serializer.serialize_u64(u),
|
||||
Value::I64(u) => serializer.serialize_i64(u),
|
||||
Value::F64(u) => serializer.serialize_f64(u),
|
||||
@@ -124,6 +131,15 @@ impl Value {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the tokenized text, provided the value is of the `PreTokStr` type.
|
||||
/// (Returns None if the value is not of the `PreTokStr` type).
|
||||
pub fn tokenized_text(&self) -> Option<&PreTokenizedString> {
|
||||
match *self {
|
||||
Value::PreTokStr(ref tok_text) => Some(tok_text),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the u64-value, provided the value is of the `U64` type.
|
||||
///
|
||||
/// # Panics
|
||||
@@ -221,6 +237,7 @@ mod binary_serialize {
|
||||
use super::Value;
|
||||
use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable};
|
||||
use crate::schema::Facet;
|
||||
use crate::tokenizer::PreTokenizedString;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use std::io::{self, Read, Write};
|
||||
|
||||
@@ -231,6 +248,11 @@ mod binary_serialize {
|
||||
const BYTES_CODE: u8 = 4;
|
||||
const DATE_CODE: u8 = 5;
|
||||
const F64_CODE: u8 = 6;
|
||||
const EXT_CODE: u8 = 7;
|
||||
|
||||
// extended types
|
||||
|
||||
const TOK_STR_CODE: u8 = 0;
|
||||
|
||||
impl BinarySerializable for Value {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
@@ -239,6 +261,18 @@ mod binary_serialize {
|
||||
TEXT_CODE.serialize(writer)?;
|
||||
text.serialize(writer)
|
||||
}
|
||||
Value::PreTokStr(ref tok_str) => {
|
||||
EXT_CODE.serialize(writer)?;
|
||||
TOK_STR_CODE.serialize(writer)?;
|
||||
if let Ok(text) = serde_json::to_string(tok_str) {
|
||||
text.serialize(writer)
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Failed to dump Value::PreTokStr(_) to json.",
|
||||
))
|
||||
}
|
||||
}
|
||||
Value::U64(ref val) => {
|
||||
U64_CODE.serialize(writer)?;
|
||||
val.serialize(writer)
|
||||
@@ -290,6 +324,30 @@ mod binary_serialize {
|
||||
}
|
||||
HIERARCHICAL_FACET_CODE => Ok(Value::Facet(Facet::deserialize(reader)?)),
|
||||
BYTES_CODE => Ok(Value::Bytes(Vec::<u8>::deserialize(reader)?)),
|
||||
EXT_CODE => {
|
||||
let ext_type_code = u8::deserialize(reader)?;
|
||||
match ext_type_code {
|
||||
TOK_STR_CODE => {
|
||||
let str_val = String::deserialize(reader)?;
|
||||
if let Ok(value) = serde_json::from_str::<PreTokenizedString>(&str_val)
|
||||
{
|
||||
Ok(Value::PreTokStr(value))
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Failed to parse string data as Value::PreTokStr(_).",
|
||||
))
|
||||
}
|
||||
}
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"No extened field type is associated with code {:?}",
|
||||
ext_type_code
|
||||
),
|
||||
)),
|
||||
}
|
||||
}
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("No field type is associated with code {:?}", type_code),
|
||||
|
||||
@@ -136,6 +136,7 @@ mod simple_tokenizer;
|
||||
mod stemmer;
|
||||
mod stop_word_filter;
|
||||
mod token_stream_chain;
|
||||
mod tokenized_string;
|
||||
mod tokenizer;
|
||||
mod tokenizer_manager;
|
||||
|
||||
@@ -152,7 +153,9 @@ pub use self::stop_word_filter::StopWordFilter;
|
||||
pub(crate) use self::token_stream_chain::TokenStreamChain;
|
||||
pub use self::tokenizer::BoxedTokenizer;
|
||||
|
||||
pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString};
|
||||
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
|
||||
|
||||
pub use self::tokenizer_manager::TokenizerManager;
|
||||
|
||||
/// Maximum authorized len (in bytes) for a token.
|
||||
|
||||
189
src/tokenizer/tokenized_string.rs
Normal file
189
src/tokenizer/tokenized_string.rs
Normal file
@@ -0,0 +1,189 @@
|
||||
use crate::tokenizer::{Token, TokenStream, TokenStreamChain};
|
||||
use std::cmp::Ordering;
|
||||
|
||||
/// Struct representing pre-tokenized text
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct PreTokenizedString {
|
||||
/// Original text
|
||||
pub text: String,
|
||||
/// Tokens derived from the text
|
||||
pub tokens: Vec<Token>,
|
||||
}
|
||||
|
||||
impl Ord for PreTokenizedString {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.text.cmp(&other.text)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for PreTokenizedString {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
/// TokenStream implementation which wraps PreTokenizedString
|
||||
pub struct PreTokenizedStream {
|
||||
tokenized_string: PreTokenizedString,
|
||||
current_token: i64,
|
||||
}
|
||||
|
||||
impl From<PreTokenizedString> for PreTokenizedStream {
|
||||
fn from(s: PreTokenizedString) -> PreTokenizedStream {
|
||||
PreTokenizedStream {
|
||||
tokenized_string: s,
|
||||
current_token: -1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PreTokenizedStream {
|
||||
/// Creates a TokenStream from PreTokenizedString array
|
||||
pub fn chain_tokenized_strings<'a>(
|
||||
tok_strings: &'a [&'a PreTokenizedString],
|
||||
) -> Box<dyn TokenStream + 'a> {
|
||||
if tok_strings.len() == 1 {
|
||||
Box::new(PreTokenizedStream::from((*tok_strings[0]).clone()))
|
||||
} else {
|
||||
let mut offsets = vec![];
|
||||
let mut total_offset = 0;
|
||||
for &tok_string in tok_strings {
|
||||
offsets.push(total_offset);
|
||||
if let Some(last_token) = tok_string.tokens.last() {
|
||||
total_offset += last_token.offset_to;
|
||||
}
|
||||
}
|
||||
let token_streams: Vec<_> = tok_strings
|
||||
.iter()
|
||||
.map(|tok_string| PreTokenizedStream::from((*tok_string).clone()))
|
||||
.collect();
|
||||
Box::new(TokenStreamChain::new(offsets, token_streams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TokenStream for PreTokenizedStream {
|
||||
fn advance(&mut self) -> bool {
|
||||
self.current_token += 1;
|
||||
self.current_token < self.tokenized_string.tokens.len() as i64
|
||||
}
|
||||
|
||||
fn token(&self) -> &Token {
|
||||
assert!(
|
||||
self.current_token >= 0,
|
||||
"TokenStream not initialized. You should call advance() at least once."
|
||||
);
|
||||
&self.tokenized_string.tokens[self.current_token as usize]
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
assert!(
|
||||
self.current_token >= 0,
|
||||
"TokenStream not initialized. You should call advance() at least once."
|
||||
);
|
||||
&mut self.tokenized_string.tokens[self.current_token as usize]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
use crate::tokenizer::Token;
|
||||
|
||||
#[test]
|
||||
fn test_tokenized_stream() {
|
||||
let tok_text = PreTokenizedString {
|
||||
text: String::from("A a"),
|
||||
tokens: vec![
|
||||
Token {
|
||||
offset_from: 0,
|
||||
offset_to: 1,
|
||||
position: 0,
|
||||
text: String::from("A"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 2,
|
||||
offset_to: 3,
|
||||
position: 1,
|
||||
text: String::from("a"),
|
||||
position_length: 1,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let mut token_stream = PreTokenizedStream::from(tok_text.clone());
|
||||
|
||||
for expected_token in tok_text.tokens {
|
||||
assert!(token_stream.advance());
|
||||
assert_eq!(token_stream.token(), &expected_token);
|
||||
}
|
||||
assert!(!token_stream.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_chain_tokenized_strings() {
|
||||
let tok_text = PreTokenizedString {
|
||||
text: String::from("A a"),
|
||||
tokens: vec![
|
||||
Token {
|
||||
offset_from: 0,
|
||||
offset_to: 1,
|
||||
position: 0,
|
||||
text: String::from("A"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 2,
|
||||
offset_to: 3,
|
||||
position: 1,
|
||||
text: String::from("a"),
|
||||
position_length: 1,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let chain_parts = vec![&tok_text, &tok_text];
|
||||
|
||||
let mut token_stream = PreTokenizedStream::chain_tokenized_strings(&chain_parts[..]);
|
||||
|
||||
let expected_tokens = vec![
|
||||
Token {
|
||||
offset_from: 0,
|
||||
offset_to: 1,
|
||||
position: 0,
|
||||
text: String::from("A"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 2,
|
||||
offset_to: 3,
|
||||
position: 1,
|
||||
text: String::from("a"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 3,
|
||||
offset_to: 4,
|
||||
position: 3,
|
||||
text: String::from("A"),
|
||||
position_length: 1,
|
||||
},
|
||||
Token {
|
||||
offset_from: 5,
|
||||
offset_to: 6,
|
||||
position: 4,
|
||||
text: String::from("a"),
|
||||
position_length: 1,
|
||||
},
|
||||
];
|
||||
|
||||
for expected_token in expected_tokens {
|
||||
assert!(token_stream.advance());
|
||||
assert_eq!(token_stream.token(), &expected_token);
|
||||
}
|
||||
assert!(!token_stream.advance());
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@ use crate::tokenizer::TokenStreamChain;
|
||||
use std::borrow::{Borrow, BorrowMut};
|
||||
|
||||
/// Token
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct Token {
|
||||
/// Offset (byte index) of the first character of the token.
|
||||
/// Offsets shall not be modified by token filters.
|
||||
|
||||
Reference in New Issue
Block a user