Compare commits

..

1 Commits

Author SHA1 Message Date
Pascal Seitz
1d72745bf5 add From impl to BoxTokenStream 2023-06-23 16:39:31 +08:00
43 changed files with 228 additions and 787 deletions

View File

@@ -1,14 +1,5 @@
Tantivy 0.20.2 Tantivy 0.20 [Unreleased]
================================
- Align numerical type priority order on the search side. [#2088](https://github.com/quickwit-oss/tantivy/issues/2088) (@fmassot)
- Fix is_child_of function not considering the root facet. [#2086](https://github.com/quickwit-oss/tantivy/issues/2086) (@adamreichhold)
Tantivy 0.20.1
================================
- Fix building on windows with mmap [#2070](https://github.com/quickwit-oss/tantivy/issues/2070) (@ChillFish8)
Tantivy 0.20
================================ ================================
#### Bugfixes #### Bugfixes
- Fix phrase queries with slop (slop supports now transpositions, algorithm that carries slop so far for num terms > 2) [#2031](https://github.com/quickwit-oss/tantivy/issues/2031)[#2020](https://github.com/quickwit-oss/tantivy/issues/2020)(@PSeitz) - Fix phrase queries with slop (slop supports now transpositions, algorithm that carries slop so far for num terms > 2) [#2031](https://github.com/quickwit-oss/tantivy/issues/2031)[#2020](https://github.com/quickwit-oss/tantivy/issues/2020)(@PSeitz)
@@ -47,14 +38,12 @@ Tantivy 0.20
- Add aggregation support for JSON type [#1888](https://github.com/quickwit-oss/tantivy/issues/1888) (@PSeitz) - Add aggregation support for JSON type [#1888](https://github.com/quickwit-oss/tantivy/issues/1888) (@PSeitz)
- Mixed types support on JSON fields in aggs [#1971](https://github.com/quickwit-oss/tantivy/issues/1971) (@PSeitz) - Mixed types support on JSON fields in aggs [#1971](https://github.com/quickwit-oss/tantivy/issues/1971) (@PSeitz)
- Perf: Fetch blocks of vals in aggregation for all cardinality [#1950](https://github.com/quickwit-oss/tantivy/issues/1950) (@PSeitz) - Perf: Fetch blocks of vals in aggregation for all cardinality [#1950](https://github.com/quickwit-oss/tantivy/issues/1950) (@PSeitz)
- Allow histogram bounds to be passed as Rfc3339 [#2076](https://github.com/quickwit-oss/tantivy/issues/2076) (@PSeitz)
- `Searcher` with disabled scoring via `EnableScoring::Disabled` [#1780](https://github.com/quickwit-oss/tantivy/issues/1780) (@shikhar) - `Searcher` with disabled scoring via `EnableScoring::Disabled` [#1780](https://github.com/quickwit-oss/tantivy/issues/1780) (@shikhar)
- Enable tokenizer on json fields [#2053](https://github.com/quickwit-oss/tantivy/issues/2053) (@PSeitz) - Enable tokenizer on json fields [#2053](https://github.com/quickwit-oss/tantivy/issues/2053) (@PSeitz)
- Enforcing "NOT" and "-" queries consistency in UserInputAst [#1609](https://github.com/quickwit-oss/tantivy/issues/1609) (@bazhenov) - Enforcing "NOT" and "-" queries consistency in UserInputAst [#1609](https://github.com/quickwit-oss/tantivy/issues/1609) (@bazhenov)
- Faster indexing - Faster indexing
- Refactor tokenization pipeline to use GATs [#1924](https://github.com/quickwit-oss/tantivy/issues/1924) (@trinity-1686a) - Refactor tokenization pipeline to use GATs [#1924](https://github.com/quickwit-oss/tantivy/issues/1924) (@trinity-1686a)
- Faster term hash map [#2058](https://github.com/quickwit-oss/tantivy/issues/2058)[#1940](https://github.com/quickwit-oss/tantivy/issues/1940) (@PSeitz) - Faster term hash map [#2058](https://github.com/quickwit-oss/tantivy/issues/2058)[#1940](https://github.com/quickwit-oss/tantivy/issues/1940) (@PSeitz)
- tokenizer-api: reduce Tokenizer allocation overhead [#2062](https://github.com/quickwit-oss/tantivy/issues/2062) (@PSeitz)
- Refactor vint [#2010](https://github.com/quickwit-oss/tantivy/issues/2010) (@PSeitz) - Refactor vint [#2010](https://github.com/quickwit-oss/tantivy/issues/2010) (@PSeitz)
- Faster search - Faster search
- Work in batches of docs on the SegmentCollector (Only for cases without score for now) [#1937](https://github.com/quickwit-oss/tantivy/issues/1937) (@PSeitz) - Work in batches of docs on the SegmentCollector (Only for cases without score for now) [#1937](https://github.com/quickwit-oss/tantivy/issues/1937) (@PSeitz)

View File

@@ -23,8 +23,8 @@ once_cell = "1.10.0"
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] } regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
aho-corasick = "1.0" aho-corasick = "1.0"
tantivy-fst = "0.4.0" tantivy-fst = "0.4.0"
memmap2 = { version = "0.7.1", optional = true } memmap2 = { version = "0.6.0", optional = true }
lz4_flex = { version = "0.11", default-features = false, optional = true } lz4_flex = { version = "0.10", default-features = false, features = ["checked-decode"], optional = true }
brotli = { version = "3.3.4", optional = true } brotli = { version = "3.3.4", optional = true }
zstd = { version = "0.12", optional = true, default-features = false } zstd = { version = "0.12", optional = true, default-features = false }
snap = { version = "1.0.5", optional = true } snap = { version = "1.0.5", optional = true }
@@ -44,14 +44,14 @@ census = "0.4.0"
rustc-hash = "1.1.0" rustc-hash = "1.1.0"
thiserror = "1.0.30" thiserror = "1.0.30"
htmlescape = "0.3.1" htmlescape = "0.3.1"
fail = { version = "0.5.0", optional = true } fail = "0.5.0"
murmurhash32 = "0.3.0" murmurhash32 = "0.3.0"
time = { version = "0.3.10", features = ["serde-well-known"] } time = { version = "0.3.10", features = ["serde-well-known"] }
smallvec = "1.8.0" smallvec = "1.8.0"
rayon = "1.5.2" rayon = "1.5.2"
lru = "0.11.0" lru = "0.10.0"
fastdivide = "0.4.0" fastdivide = "0.4.0"
itertools = "0.11.0" itertools = "0.10.3"
measure_time = "0.8.2" measure_time = "0.8.2"
async-trait = "0.1.53" async-trait = "0.1.53"
arc-swap = "1.5.0" arc-swap = "1.5.0"
@@ -112,7 +112,7 @@ lz4-compression = ["lz4_flex"]
snappy-compression = ["snap"] snappy-compression = ["snap"]
zstd-compression = ["zstd"] zstd-compression = ["zstd"]
failpoints = ["fail", "fail/failpoints"] failpoints = ["fail/failpoints"]
unstable = [] # useful for benches. unstable = [] # useful for benches.
quickwit = ["sstable", "futures-util"] quickwit = ["sstable", "futures-util"]

View File

@@ -1,7 +1,5 @@
use criterion::{criterion_group, criterion_main, Criterion}; use criterion::{criterion_group, criterion_main, Criterion};
use tantivy::tokenizer::{ use tantivy::tokenizer::TokenizerManager;
LowerCaser, RemoveLongFilter, SimpleTokenizer, TextAnalyzer, TokenizerManager,
};
const ALICE_TXT: &str = include_str!("alice.txt"); const ALICE_TXT: &str = include_str!("alice.txt");
@@ -18,26 +16,7 @@ pub fn criterion_benchmark(c: &mut Criterion) {
assert_eq!(word_count, 30_731); assert_eq!(word_count, 30_731);
}) })
}); });
let mut dynamic_analyzer = TextAnalyzer::builder(SimpleTokenizer::default())
.dynamic()
.filter_dynamic(RemoveLongFilter::limit(40))
.filter_dynamic(LowerCaser)
.build();
c.bench_function("dynamic-tokenize-alice", |b| {
b.iter(|| {
let mut word_count = 0;
let mut token_stream = dynamic_analyzer.token_stream(ALICE_TXT);
while token_stream.advance() {
word_count += 1;
}
assert_eq!(word_count, 30_731);
})
});
} }
criterion_group! { criterion_group!(benches, criterion_benchmark);
name = benches;
config = Criterion::default().sample_size(200);
targets = criterion_benchmark
}
criterion_main!(benches); criterion_main!(benches);

View File

@@ -1,6 +1,6 @@
use std::ops::RangeInclusive; use std::ops::RangeInclusive;
#[cfg(target_arch = "x86_64")] #[cfg(any(target_arch = "x86_64"))]
mod avx2; mod avx2;
mod scalar; mod scalar;

View File

@@ -5,11 +5,11 @@ edition = "2021"
license = "MIT" license = "MIT"
homepage = "https://github.com/quickwit-oss/tantivy" homepage = "https://github.com/quickwit-oss/tantivy"
repository = "https://github.com/quickwit-oss/tantivy" repository = "https://github.com/quickwit-oss/tantivy"
description = "column oriented storage for tantivy" desciption = "column oriented storage for tantivy"
categories = ["database-implementations", "data-structures", "compression"] categories = ["database-implementations", "data-structures", "compression"]
[dependencies] [dependencies]
itertools = "0.11.0" itertools = "0.10.5"
fnv = "1.0.7" fnv = "1.0.7"
fastdivide = "0.4.0" fastdivide = "0.4.0"

View File

@@ -168,9 +168,8 @@ mod tests {
) )
.into(); .into();
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order); let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index else { let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
panic!("Excpected a multivalued index") else { panic!("Excpected a multivalued index") };
};
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect(); let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
assert_eq!(&start_indexes, &[0, 3, 5]); assert_eq!(&start_indexes, &[0, 3, 5]);
} }
@@ -201,9 +200,8 @@ mod tests {
) )
.into(); .into();
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order); let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index else { let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
panic!("Excpected a multivalued index") else { panic!("Excpected a multivalued index") };
};
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect(); let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
assert_eq!(&start_indexes, &[0, 3, 5, 6]); assert_eq!(&start_indexes, &[0, 3, 5, 6]);
} }

View File

@@ -157,13 +157,7 @@ mod tests {
Cardinality::Optional, Cardinality::Optional,
&shuffle_merge_order, &shuffle_merge_order,
); );
let SerializableColumnIndex::Optional { let SerializableColumnIndex::Optional { non_null_row_ids, num_rows } = serializable_index else { panic!() };
non_null_row_ids,
num_rows,
} = serializable_index
else {
panic!()
};
assert_eq!(num_rows, 2); assert_eq!(num_rows, 2);
let non_null_rows: Vec<RowId> = non_null_row_ids.boxed_iter().collect(); let non_null_rows: Vec<RowId> = non_null_row_ids.boxed_iter().collect();
assert_eq!(&non_null_rows, &[1]); assert_eq!(&non_null_rows, &[1]);

View File

@@ -2,7 +2,7 @@
//! # `fastfield_codecs` //! # `fastfield_codecs`
//! //!
//! - Columnar storage of data for tantivy [`crate::Column`]. //! - Columnar storage of data for tantivy [`Column`].
//! - Encode data in different codecs. //! - Encode data in different codecs.
//! - Monotonically map values to u64/u128 //! - Monotonically map values to u64/u128

View File

@@ -83,8 +83,7 @@ impl ColumnValues for BitpackedReader {
doc_id_range: Range<u32>, doc_id_range: Range<u32>,
positions: &mut Vec<u32>, positions: &mut Vec<u32>,
) { ) {
let Some(transformed_range) = let Some(transformed_range) = transform_range_before_linear_transformation(&self.stats, range)
transform_range_before_linear_transformation(&self.stats, range)
else { else {
positions.clear(); positions.clear();
return; return;

View File

@@ -52,8 +52,8 @@ pub enum MergeRowOrder {
/// Columnar tables are simply stacked one above the other. /// Columnar tables are simply stacked one above the other.
/// If the i-th columnar_readers has n_rows_i rows, then /// If the i-th columnar_readers has n_rows_i rows, then
/// in the resulting columnar, /// in the resulting columnar,
/// rows [r0..n_row_0) contains the row of `columnar_readers[0]`, in ordder /// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of `columnar_readers[1]`, in order. /// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order.
/// .. /// ..
/// No documents is deleted. /// No documents is deleted.
Stack(StackMergeOrder), Stack(StackMergeOrder),

View File

@@ -244,9 +244,7 @@ fn test_merge_columnar_numbers() {
assert_eq!(columnar_reader.num_columns(), 1); assert_eq!(columnar_reader.num_columns(), 1);
let cols = columnar_reader.read_columns("numbers").unwrap(); let cols = columnar_reader.read_columns("numbers").unwrap();
let dynamic_column = cols[0].open().unwrap(); let dynamic_column = cols[0].open().unwrap();
let DynamicColumn::F64(vals) = dynamic_column else { let DynamicColumn::F64(vals) = dynamic_column else { panic!() };
panic!()
};
assert_eq!(vals.get_cardinality(), Cardinality::Optional); assert_eq!(vals.get_cardinality(), Cardinality::Optional);
assert_eq!(vals.first(0u32), Some(-1f64)); assert_eq!(vals.first(0u32), Some(-1f64));
assert_eq!(vals.first(1u32), None); assert_eq!(vals.first(1u32), None);
@@ -272,9 +270,7 @@ fn test_merge_columnar_texts() {
assert_eq!(columnar_reader.num_columns(), 1); assert_eq!(columnar_reader.num_columns(), 1);
let cols = columnar_reader.read_columns("texts").unwrap(); let cols = columnar_reader.read_columns("texts").unwrap();
let dynamic_column = cols[0].open().unwrap(); let dynamic_column = cols[0].open().unwrap();
let DynamicColumn::Str(vals) = dynamic_column else { let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
panic!()
};
assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional); assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional);
let get_str_for_ord = |ord| { let get_str_for_ord = |ord| {
@@ -321,9 +317,7 @@ fn test_merge_columnar_byte() {
assert_eq!(columnar_reader.num_columns(), 1); assert_eq!(columnar_reader.num_columns(), 1);
let cols = columnar_reader.read_columns("bytes").unwrap(); let cols = columnar_reader.read_columns("bytes").unwrap();
let dynamic_column = cols[0].open().unwrap(); let dynamic_column = cols[0].open().unwrap();
let DynamicColumn::Bytes(vals) = dynamic_column else { let DynamicColumn::Bytes(vals) = dynamic_column else { panic!() };
panic!()
};
let get_bytes_for_ord = |ord| { let get_bytes_for_ord = |ord| {
let mut out = Vec::new(); let mut out = Vec::new();
vals.ord_to_bytes(ord, &mut out).unwrap(); vals.ord_to_bytes(ord, &mut out).unwrap();
@@ -377,9 +371,7 @@ fn test_merge_columnar_byte_with_missing() {
assert_eq!(columnar_reader.num_columns(), 2); assert_eq!(columnar_reader.num_columns(), 2);
let cols = columnar_reader.read_columns("col").unwrap(); let cols = columnar_reader.read_columns("col").unwrap();
let dynamic_column = cols[0].open().unwrap(); let dynamic_column = cols[0].open().unwrap();
let DynamicColumn::Bytes(vals) = dynamic_column else { let DynamicColumn::Bytes(vals) = dynamic_column else { panic!() };
panic!()
};
let get_bytes_for_ord = |ord| { let get_bytes_for_ord = |ord| {
let mut out = Vec::new(); let mut out = Vec::new();
vals.ord_to_bytes(ord, &mut out).unwrap(); vals.ord_to_bytes(ord, &mut out).unwrap();
@@ -431,9 +423,7 @@ fn test_merge_columnar_different_types() {
// numeric column // numeric column
let dynamic_column = cols[0].open().unwrap(); let dynamic_column = cols[0].open().unwrap();
let DynamicColumn::I64(vals) = dynamic_column else { let DynamicColumn::I64(vals) = dynamic_column else { panic!() };
panic!()
};
assert_eq!(vals.get_cardinality(), Cardinality::Optional); assert_eq!(vals.get_cardinality(), Cardinality::Optional);
assert_eq!(vals.values_for_doc(0).collect_vec(), vec![]); assert_eq!(vals.values_for_doc(0).collect_vec(), vec![]);
assert_eq!(vals.values_for_doc(1).collect_vec(), vec![]); assert_eq!(vals.values_for_doc(1).collect_vec(), vec![]);
@@ -443,9 +433,7 @@ fn test_merge_columnar_different_types() {
// text column // text column
let dynamic_column = cols[1].open().unwrap(); let dynamic_column = cols[1].open().unwrap();
let DynamicColumn::Str(vals) = dynamic_column else { let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
panic!()
};
assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional); assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional);
let get_str_for_ord = |ord| { let get_str_for_ord = |ord| {
let mut out = String::new(); let mut out = String::new();

View File

@@ -98,11 +98,9 @@ impl ColumnarWriter {
/// ///
/// The sort applied is stable. /// The sort applied is stable.
pub fn sort_order(&self, sort_field: &str, num_docs: RowId, reversed: bool) -> Vec<u32> { pub fn sort_order(&self, sort_field: &str, num_docs: RowId, reversed: bool) -> Vec<u32> {
let Some(numerical_col_writer) = self let Some(numerical_col_writer) =
.numerical_field_hash_map self.numerical_field_hash_map.get::<NumericalColumnWriter>(sort_field.as_bytes()) else {
.get::<NumericalColumnWriter>(sort_field.as_bytes()) return Vec::new();
else {
return Vec::new();
}; };
let mut symbols_buffer = Vec::new(); let mut symbols_buffer = Vec::new();
let mut values = Vec::new(); let mut values = Vec::new();

View File

@@ -57,9 +57,7 @@ fn test_dataframe_writer_bool() {
assert_eq!(cols[0].num_bytes(), 22); assert_eq!(cols[0].num_bytes(), 22);
assert_eq!(cols[0].column_type(), ColumnType::Bool); assert_eq!(cols[0].column_type(), ColumnType::Bool);
let dyn_bool_col = cols[0].open().unwrap(); let dyn_bool_col = cols[0].open().unwrap();
let DynamicColumn::Bool(bool_col) = dyn_bool_col else { let DynamicColumn::Bool(bool_col) = dyn_bool_col else { panic!(); };
panic!();
};
let vals: Vec<Option<bool>> = (0..5).map(|row_id| bool_col.first(row_id)).collect(); let vals: Vec<Option<bool>> = (0..5).map(|row_id| bool_col.first(row_id)).collect();
assert_eq!(&vals, &[None, Some(false), None, Some(true), None,]); assert_eq!(&vals, &[None, Some(false), None, Some(true), None,]);
} }
@@ -81,9 +79,7 @@ fn test_dataframe_writer_u64_multivalued() {
assert_eq!(cols.len(), 1); assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 29); assert_eq!(cols[0].num_bytes(), 29);
let dyn_i64_col = cols[0].open().unwrap(); let dyn_i64_col = cols[0].open().unwrap();
let DynamicColumn::I64(divisor_col) = dyn_i64_col else { let DynamicColumn::I64(divisor_col) = dyn_i64_col else { panic!(); };
panic!();
};
assert_eq!( assert_eq!(
divisor_col.get_cardinality(), divisor_col.get_cardinality(),
crate::Cardinality::Multivalued crate::Cardinality::Multivalued
@@ -105,9 +101,7 @@ fn test_dataframe_writer_ip_addr() {
assert_eq!(cols[0].num_bytes(), 42); assert_eq!(cols[0].num_bytes(), 42);
assert_eq!(cols[0].column_type(), ColumnType::IpAddr); assert_eq!(cols[0].column_type(), ColumnType::IpAddr);
let dyn_bool_col = cols[0].open().unwrap(); let dyn_bool_col = cols[0].open().unwrap();
let DynamicColumn::IpAddr(ip_col) = dyn_bool_col else { let DynamicColumn::IpAddr(ip_col) = dyn_bool_col else { panic!(); };
panic!();
};
let vals: Vec<Option<Ipv6Addr>> = (0..5).map(|row_id| ip_col.first(row_id)).collect(); let vals: Vec<Option<Ipv6Addr>> = (0..5).map(|row_id| ip_col.first(row_id)).collect();
assert_eq!( assert_eq!(
&vals, &vals,
@@ -140,9 +134,7 @@ fn test_dataframe_writer_numerical() {
// - null footer 6 bytes // - null footer 6 bytes
assert_eq!(cols[0].num_bytes(), 33); assert_eq!(cols[0].num_bytes(), 33);
let column = cols[0].open().unwrap(); let column = cols[0].open().unwrap();
let DynamicColumn::I64(column_i64) = column else { let DynamicColumn::I64(column_i64) = column else { panic!(); };
panic!();
};
assert_eq!(column_i64.index.get_cardinality(), Cardinality::Optional); assert_eq!(column_i64.index.get_cardinality(), Cardinality::Optional);
assert_eq!(column_i64.first(0), None); assert_eq!(column_i64.first(0), None);
assert_eq!(column_i64.first(1), Some(12i64)); assert_eq!(column_i64.first(1), Some(12i64));
@@ -206,9 +198,7 @@ fn test_dictionary_encoded_str() {
assert_eq!(columnar_reader.num_columns(), 2); assert_eq!(columnar_reader.num_columns(), 2);
let col_handles = columnar_reader.read_columns("my.column").unwrap(); let col_handles = columnar_reader.read_columns("my.column").unwrap();
assert_eq!(col_handles.len(), 1); assert_eq!(col_handles.len(), 1);
let DynamicColumn::Str(str_col) = col_handles[0].open().unwrap() else { let DynamicColumn::Str(str_col) = col_handles[0].open().unwrap() else { panic!(); };
panic!();
};
let index: Vec<Option<u64>> = (0..5).map(|row_id| str_col.ords().first(row_id)).collect(); let index: Vec<Option<u64>> = (0..5).map(|row_id| str_col.ords().first(row_id)).collect();
assert_eq!(index, &[None, Some(0), None, Some(2), Some(1)]); assert_eq!(index, &[None, Some(0), None, Some(2), Some(1)]);
assert_eq!(str_col.num_rows(), 5); assert_eq!(str_col.num_rows(), 5);
@@ -240,9 +230,7 @@ fn test_dictionary_encoded_bytes() {
assert_eq!(columnar_reader.num_columns(), 2); assert_eq!(columnar_reader.num_columns(), 2);
let col_handles = columnar_reader.read_columns("my.column").unwrap(); let col_handles = columnar_reader.read_columns("my.column").unwrap();
assert_eq!(col_handles.len(), 1); assert_eq!(col_handles.len(), 1);
let DynamicColumn::Bytes(bytes_col) = col_handles[0].open().unwrap() else { let DynamicColumn::Bytes(bytes_col) = col_handles[0].open().unwrap() else { panic!(); };
panic!();
};
let index: Vec<Option<u64>> = (0..5) let index: Vec<Option<u64>> = (0..5)
.map(|row_id| bytes_col.ords().first(row_id)) .map(|row_id| bytes_col.ords().first(row_id))
.collect(); .collect();
@@ -545,36 +533,28 @@ trait AssertEqualToColumnValue {
impl AssertEqualToColumnValue for bool { impl AssertEqualToColumnValue for bool {
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) { fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
let ColumnValue::Bool(val) = column_value else { let ColumnValue::Bool(val) = column_value else { panic!() };
panic!()
};
assert_eq!(self, val); assert_eq!(self, val);
} }
} }
impl AssertEqualToColumnValue for Ipv6Addr { impl AssertEqualToColumnValue for Ipv6Addr {
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) { fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
let ColumnValue::IpAddr(val) = column_value else { let ColumnValue::IpAddr(val) = column_value else { panic!() };
panic!()
};
assert_eq!(self, val); assert_eq!(self, val);
} }
} }
impl<T: Coerce + PartialEq + Debug + Into<NumericalValue>> AssertEqualToColumnValue for T { impl<T: Coerce + PartialEq + Debug + Into<NumericalValue>> AssertEqualToColumnValue for T {
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) { fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
let ColumnValue::Numerical(num) = column_value else { let ColumnValue::Numerical(num) = column_value else { panic!() };
panic!()
};
assert_eq!(self, &T::coerce(*num)); assert_eq!(self, &T::coerce(*num));
} }
} }
impl AssertEqualToColumnValue for DateTime { impl AssertEqualToColumnValue for DateTime {
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) { fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
let ColumnValue::DateTime(dt) = column_value else { let ColumnValue::DateTime(dt) = column_value else { panic!() };
panic!()
};
assert_eq!(self, dt); assert_eq!(self, dt);
} }
} }

View File

@@ -53,7 +53,7 @@ fn main() -> tantivy::Result<()> {
// this will store tokens of 3 characters each // this will store tokens of 3 characters each
index index
.tokenizers() .tokenizers()
.register("ngram3", NgramTokenizer::new(3, 3, false).unwrap()); .register("ngram3", NgramTokenizer::new(3, 3, false));
// To insert document we need an index writer. // To insert document we need an index writer.
// There must be only one writer at a time. // There must be only one writer at a time.

View File

@@ -6,14 +6,12 @@ use tantivy::collector::TopDocs;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::{Schema, FAST, TEXT}; use tantivy::schema::{Schema, FAST, TEXT};
use tantivy::{ use tantivy::{
doc, DocAddress, DocId, Index, Opstamp, Searcher, SearcherGeneration, SegmentId, SegmentReader, doc, DocAddress, DocId, Index, IndexReader, Opstamp, Searcher, SearcherGeneration, SegmentId,
Warmer, SegmentReader, Warmer,
}; };
// This example shows how warmers can be used to // This example shows how warmers can be used to
// load values from an external sources and // load a values from an external sources using the Warmer API.
// tie their lifecycle to that of the index segments
// using the Warmer API.
// //
// In this example, we assume an e-commerce search engine. // In this example, we assume an e-commerce search engine.
@@ -25,11 +23,9 @@ pub trait PriceFetcher: Send + Sync + 'static {
fn fetch_prices(&self, product_ids: &[ProductId]) -> Vec<Price>; fn fetch_prices(&self, product_ids: &[ProductId]) -> Vec<Price>;
} }
type SegmentKey = (SegmentId, Option<Opstamp>);
struct DynamicPriceColumn { struct DynamicPriceColumn {
field: String, field: String,
price_cache: RwLock<HashMap<SegmentKey, Arc<Vec<Price>>>>, price_cache: RwLock<HashMap<(SegmentId, Option<Opstamp>), Arc<Vec<Price>>>>,
price_fetcher: Box<dyn PriceFetcher>, price_fetcher: Box<dyn PriceFetcher>,
} }
@@ -50,6 +46,7 @@ impl DynamicPriceColumn {
impl Warmer for DynamicPriceColumn { impl Warmer for DynamicPriceColumn {
fn warm(&self, searcher: &Searcher) -> tantivy::Result<()> { fn warm(&self, searcher: &Searcher) -> tantivy::Result<()> {
for segment in searcher.segment_readers() { for segment in searcher.segment_readers() {
let key = (segment.segment_id(), segment.delete_opstamp());
let product_id_reader = segment let product_id_reader = segment
.fast_fields() .fast_fields()
.u64(&self.field)? .u64(&self.field)?
@@ -58,40 +55,37 @@ impl Warmer for DynamicPriceColumn {
.doc_ids_alive() .doc_ids_alive()
.map(|doc| product_id_reader.get_val(doc)) .map(|doc| product_id_reader.get_val(doc))
.collect(); .collect();
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
let mut prices = self.price_fetcher.fetch_prices(&product_ids).into_iter(); let mut price_vals: Vec<Price> = Vec::new();
for doc in 0..segment.max_doc() {
let prices: Vec<Price> = (0..segment.max_doc()) if segment.is_deleted(doc) {
.map(|doc| { price_vals.push(0);
if !segment.is_deleted(doc) { } else {
prices.next().unwrap() price_vals.push(prices_it.next().unwrap())
} else { }
0 }
}
})
.collect();
let key = (segment.segment_id(), segment.delete_opstamp());
self.price_cache self.price_cache
.write() .write()
.unwrap() .unwrap()
.insert(key, Arc::new(prices)); .insert(key, Arc::new(price_vals));
} }
Ok(()) Ok(())
} }
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) { fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
let live_keys: HashSet<SegmentKey> = live_generations let live_segment_id_and_delete_ops: HashSet<(SegmentId, Option<Opstamp>)> =
.iter() live_generations
.flat_map(|gen| gen.segments()) .iter()
.map(|(&segment_id, &opstamp)| (segment_id, opstamp)) .flat_map(|gen| gen.segments())
.map(|(&segment_id, &opstamp)| (segment_id, opstamp))
.collect();
let mut price_cache_wrt = self.price_cache.write().unwrap();
// let price_cache = std::mem::take(&mut *price_cache_wrt);
// Drain would be nicer here.
*price_cache_wrt = std::mem::take(&mut *price_cache_wrt)
.into_iter()
.filter(|(seg_id_and_op, _)| !live_segment_id_and_delete_ops.contains(seg_id_and_op))
.collect(); .collect();
self.price_cache
.write()
.unwrap()
.retain(|key, _| live_keys.contains(key));
} }
} }
@@ -106,17 +100,17 @@ pub struct ExternalPriceTable {
impl ExternalPriceTable { impl ExternalPriceTable {
pub fn update_price(&self, product_id: ProductId, price: Price) { pub fn update_price(&self, product_id: ProductId, price: Price) {
self.prices.write().unwrap().insert(product_id, price); let mut prices_wrt = self.prices.write().unwrap();
prices_wrt.insert(product_id, price);
} }
} }
impl PriceFetcher for ExternalPriceTable { impl PriceFetcher for ExternalPriceTable {
fn fetch_prices(&self, product_ids: &[ProductId]) -> Vec<Price> { fn fetch_prices(&self, product_ids: &[ProductId]) -> Vec<Price> {
let prices = self.prices.read().unwrap(); let prices_read = self.prices.read().unwrap();
product_ids product_ids
.iter() .iter()
.map(|product_id| prices.get(product_id).cloned().unwrap_or(0)) .map(|product_id| prices_read.get(product_id).cloned().unwrap_or(0))
.collect() .collect()
} }
} }
@@ -149,8 +143,11 @@ fn main() -> tantivy::Result<()> {
writer.add_document(doc!(product_id=>SNEAKERS, text=>"uber sweet sneakers"))?; writer.add_document(doc!(product_id=>SNEAKERS, text=>"uber sweet sneakers"))?;
writer.commit()?; writer.commit()?;
let warmers = vec![Arc::downgrade(&price_dynamic_column) as Weak<dyn Warmer>]; let warmers: Vec<Weak<dyn Warmer>> = vec![Arc::downgrade(
let reader = index.reader_builder().warmers(warmers).try_into()?; &(price_dynamic_column.clone() as Arc<dyn Warmer>),
)];
let reader: IndexReader = index.reader_builder().warmers(warmers).try_into()?;
reader.reload()?;
let query_parser = QueryParser::for_index(&index, vec![text]); let query_parser = QueryParser::for_index(&index, vec![text]);
let query = query_parser.parse_query("cooking")?; let query = query_parser.parse_query("cooking")?;

View File

@@ -15,12 +15,6 @@
//! Results of final buckets are [`BucketResult`](super::agg_result::BucketResult). //! Results of final buckets are [`BucketResult`](super::agg_result::BucketResult).
//! Results of intermediate buckets are //! Results of intermediate buckets are
//! [`IntermediateBucketResult`](super::intermediate_agg_result::IntermediateBucketResult) //! [`IntermediateBucketResult`](super::intermediate_agg_result::IntermediateBucketResult)
//!
//! ## Supported Bucket Aggregations
//! - [Histogram](HistogramAggregation)
//! - [DateHistogram](DateHistogramAggregationReq)
//! - [Range](RangeAggregation)
//! - [Terms](TermsAggregation)
mod histogram; mod histogram;
mod range; mod range;

View File

@@ -6,15 +6,6 @@
//! Some aggregations output a single numeric metric (e.g. Average) and are called //! Some aggregations output a single numeric metric (e.g. Average) and are called
//! single-value numeric metrics aggregation, others generate multiple metrics (e.g. Stats) and are //! single-value numeric metrics aggregation, others generate multiple metrics (e.g. Stats) and are
//! called multi-value numeric metrics aggregation. //! called multi-value numeric metrics aggregation.
//!
//! ## Supported Metric Aggregations
//! - [Average](AverageAggregation)
//! - [Stats](StatsAggregation)
//! - [Min](MinAggregation)
//! - [Max](MaxAggregation)
//! - [Sum](SumAggregation)
//! - [Count](CountAggregation)
//! - [Percentiles](PercentilesAggregationReq)
mod average; mod average;
mod count; mod count;

View File

@@ -6,35 +6,32 @@
// //
// Of course, you can have a look at the tantivy's built-in collectors // Of course, you can have a look at the tantivy's built-in collectors
// such as the `CountCollector` for more examples. // such as the `CountCollector` for more examples.
use std::fmt::Debug;
use std::marker::PhantomData;
use columnar::{BytesColumn, Column, DynamicColumn, HasAssociatedColumnType}; // ---
// Importing tantivy...
use std::marker::PhantomData;
use std::sync::Arc;
use columnar::{ColumnValues, DynamicColumn, HasAssociatedColumnType};
use crate::collector::{Collector, SegmentCollector}; use crate::collector::{Collector, SegmentCollector};
use crate::schema::Field; use crate::schema::Field;
use crate::{DocId, Score, SegmentReader, TantivyError}; use crate::{Score, SegmentReader, TantivyError};
/// The `FilterCollector` filters docs using a fast field value and a predicate. /// The `FilterCollector` filters docs using a fast field value and a predicate.
/// /// Only the documents for which the predicate returned "true" will be passed on to the next
/// Only the documents containing at least one value for which the predicate returns `true` /// collector.
/// will be passed on to the next collector.
///
/// In other words,
/// - documents with no values are filtered out.
/// - documents with several values are accepted if at least one value matches the predicate.
///
/// ///
/// ```rust /// ```rust
/// use tantivy::collector::{TopDocs, FilterCollector}; /// use tantivy::collector::{TopDocs, FilterCollector};
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT, FAST}; /// use tantivy::schema::{Schema, TEXT, INDEXED, FAST};
/// use tantivy::{doc, DocAddress, Index}; /// use tantivy::{doc, DocAddress, Index};
/// ///
/// # fn main() -> tantivy::Result<()> { /// # fn main() -> tantivy::Result<()> {
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
/// let price = schema_builder.add_u64_field("price", FAST); /// let price = schema_builder.add_u64_field("price", INDEXED | FAST);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
/// ///
@@ -50,24 +47,20 @@ use crate::{DocId, Score, SegmentReader, TantivyError};
/// ///
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?; /// let query = query_parser.parse_query("diary")?;
/// let no_filter_collector = FilterCollector::new(price, |value: u64| value > 20_120u64, TopDocs::with_limit(2)); /// let no_filter_collector = FilterCollector::new(price, &|value: u64| value > 20_120u64, TopDocs::with_limit(2));
/// let top_docs = searcher.search(&query, &no_filter_collector)?; /// let top_docs = searcher.search(&query, &no_filter_collector)?;
/// ///
/// assert_eq!(top_docs.len(), 1); /// assert_eq!(top_docs.len(), 1);
/// assert_eq!(top_docs[0].1, DocAddress::new(0, 1)); /// assert_eq!(top_docs[0].1, DocAddress::new(0, 1));
/// ///
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, |value| value < 5u64, TopDocs::with_limit(2)); /// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
/// let filtered_top_docs = searcher.search(&query, &filter_all_collector)?; /// let filtered_top_docs = searcher.search(&query, &filter_all_collector)?;
/// ///
/// assert_eq!(filtered_top_docs.len(), 0); /// assert_eq!(filtered_top_docs.len(), 0);
/// # Ok(()) /// # Ok(())
/// # } /// # }
/// ``` /// ```
/// pub struct FilterCollector<TCollector, TPredicate, TPredicateValue: Default>
/// Note that this is limited to fast fields which implement the
/// [`FastValue`][crate::fastfield::FastValue] trait, e.g. `u64` but not `&[u8]`.
/// To filter based on a bytes fast field, use a [`BytesFilterCollector`] instead.
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue>
where TPredicate: 'static + Clone where TPredicate: 'static + Clone
{ {
field: Field, field: Field,
@@ -76,15 +69,19 @@ where TPredicate: 'static + Clone
t_predicate_value: PhantomData<TPredicateValue>, t_predicate_value: PhantomData<TPredicateValue>,
} }
impl<TCollector, TPredicate, TPredicateValue> impl<TCollector, TPredicate, TPredicateValue: Default>
FilterCollector<TCollector, TPredicate, TPredicateValue> FilterCollector<TCollector, TPredicate, TPredicateValue>
where where
TCollector: Collector + Send + Sync, TCollector: Collector + Send + Sync,
TPredicate: Fn(TPredicateValue) -> bool + Send + Sync + Clone, TPredicate: Fn(TPredicateValue) -> bool + Send + Sync + Clone,
{ {
/// Create a new `FilterCollector`. /// Create a new FilterCollector.
pub fn new(field: Field, predicate: TPredicate, collector: TCollector) -> Self { pub fn new(
Self { field: Field,
predicate: TPredicate,
collector: TCollector,
) -> FilterCollector<TCollector, TPredicate, TPredicateValue> {
FilterCollector {
field, field,
predicate, predicate,
collector, collector,
@@ -93,7 +90,7 @@ where
} }
} }
impl<TCollector, TPredicate, TPredicateValue> Collector impl<TCollector, TPredicate, TPredicateValue: Default> Collector
for FilterCollector<TCollector, TPredicate, TPredicateValue> for FilterCollector<TCollector, TPredicate, TPredicateValue>
where where
TCollector: Collector + Send + Sync, TCollector: Collector + Send + Sync,
@@ -101,6 +98,8 @@ where
TPredicateValue: HasAssociatedColumnType, TPredicateValue: HasAssociatedColumnType,
DynamicColumn: Into<Option<columnar::Column<TPredicateValue>>>, DynamicColumn: Into<Option<columnar::Column<TPredicateValue>>>,
{ {
// That's the type of our result.
// Our standard deviation will be a float.
type Fruit = TCollector::Fruit; type Fruit = TCollector::Fruit;
type Child = FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>; type Child = FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>;
@@ -109,7 +108,7 @@ where
&self, &self,
segment_local_id: u32, segment_local_id: u32,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> crate::Result<Self::Child> { ) -> crate::Result<FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>> {
let schema = segment_reader.schema(); let schema = segment_reader.schema();
let field_entry = schema.get_field_entry(self.field); let field_entry = schema.get_field_entry(self.field);
if !field_entry.is_fast() { if !field_entry.is_fast() {
@@ -119,16 +118,16 @@ where
))); )));
} }
let column_opt = segment_reader let fast_field_reader = segment_reader
.fast_fields() .fast_fields()
.column_opt(field_entry.name())?; .column_first_or_default(schema.get_field_name(self.field))?;
let segment_collector = self let segment_collector = self
.collector .collector
.for_segment(segment_local_id, segment_reader)?; .for_segment(segment_local_id, segment_reader)?;
Ok(FilterSegmentCollector { Ok(FilterSegmentCollector {
column_opt, fast_field_reader,
segment_collector, segment_collector,
predicate: self.predicate.clone(), predicate: self.predicate.clone(),
t_predicate_value: PhantomData, t_predicate_value: PhantomData,
@@ -147,208 +146,35 @@ where
} }
} }
pub struct FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue> { pub struct FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
column_opt: Option<Column<TPredicateValue>>, where
TPredicate: 'static,
DynamicColumn: Into<Option<columnar::Column<TPredicateValue>>>,
{
fast_field_reader: Arc<dyn ColumnValues<TPredicateValue>>,
segment_collector: TSegmentCollector, segment_collector: TSegmentCollector,
predicate: TPredicate, predicate: TPredicate,
t_predicate_value: PhantomData<TPredicateValue>, t_predicate_value: PhantomData<TPredicateValue>,
} }
impl<TSegmentCollector, TPredicate, TPredicateValue>
FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
where
TPredicateValue: PartialOrd + Copy + Debug + Send + Sync + 'static,
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
{
#[inline]
fn accept_document(&self, doc_id: DocId) -> bool {
if let Some(column) = &self.column_opt {
for val in column.values_for_doc(doc_id) {
if (self.predicate)(val) {
return true;
}
}
}
false
}
}
impl<TSegmentCollector, TPredicate, TPredicateValue> SegmentCollector impl<TSegmentCollector, TPredicate, TPredicateValue> SegmentCollector
for FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue> for FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
where where
TSegmentCollector: SegmentCollector, TSegmentCollector: SegmentCollector,
TPredicateValue: HasAssociatedColumnType, TPredicateValue: HasAssociatedColumnType,
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync, /* DynamicColumn: Into<Option<columnar::Column<TPredicateValue>>> */ TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
DynamicColumn: Into<Option<columnar::Column<TPredicateValue>>>,
{ {
type Fruit = TSegmentCollector::Fruit; type Fruit = TSegmentCollector::Fruit;
fn collect(&mut self, doc: u32, score: Score) { fn collect(&mut self, doc: u32, score: Score) {
if self.accept_document(doc) { let value = self.fast_field_reader.get_val(doc);
self.segment_collector.collect(doc, score); if (self.predicate)(value) {
self.segment_collector.collect(doc, score)
} }
} }
fn harvest(self) -> TSegmentCollector::Fruit { fn harvest(self) -> <TSegmentCollector as SegmentCollector>::Fruit {
self.segment_collector.harvest()
}
}
/// A variant of the [`FilterCollector`] specialized for bytes fast fields, i.e.
/// it transparently wraps an inner [`Collector`] but filters documents
/// based on the result of applying the predicate to the bytes fast field.
///
/// A document is accepted if and only if the predicate returns `true` for at least one value.
///
/// In other words,
/// - documents with no values are filtered out.
/// - documents with several values are accepted if at least one value matches the predicate.
///
/// ```rust
/// use tantivy::collector::{TopDocs, BytesFilterCollector};
/// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT, FAST};
/// use tantivy::{doc, DocAddress, Index};
///
/// # fn main() -> tantivy::Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let barcode = schema_builder.add_bytes_field("barcode", FAST);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
///
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
/// index_writer.add_document(doc!(title => "The Name of the Wind", barcode => &b"010101"[..]))?;
/// index_writer.add_document(doc!(title => "The Diary of Muadib", barcode => &b"110011"[..]))?;
/// index_writer.add_document(doc!(title => "A Dairy Cow", barcode => &b"110111"[..]))?;
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl", barcode => &b"011101"[..]))?;
/// index_writer.add_document(doc!(title => "Bridget Jones's Diary"))?;
/// index_writer.commit()?;
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let filter_collector = BytesFilterCollector::new(barcode, |bytes: &[u8]| bytes.starts_with(b"01"), TopDocs::with_limit(2));
/// let top_docs = searcher.search(&query, &filter_collector)?;
///
/// assert_eq!(top_docs.len(), 1);
/// assert_eq!(top_docs[0].1, DocAddress::new(0, 3));
/// # Ok(())
/// # }
/// ```
pub struct BytesFilterCollector<TCollector, TPredicate>
where TPredicate: 'static + Clone
{
field: Field,
collector: TCollector,
predicate: TPredicate,
}
impl<TCollector, TPredicate> BytesFilterCollector<TCollector, TPredicate>
where
TCollector: Collector + Send + Sync,
TPredicate: Fn(&[u8]) -> bool + Send + Sync + Clone,
{
/// Create a new `BytesFilterCollector`.
pub fn new(field: Field, predicate: TPredicate, collector: TCollector) -> Self {
Self {
field,
predicate,
collector,
}
}
}
impl<TCollector, TPredicate> Collector for BytesFilterCollector<TCollector, TPredicate>
where
TCollector: Collector + Send + Sync,
TPredicate: 'static + Fn(&[u8]) -> bool + Send + Sync + Clone,
{
type Fruit = TCollector::Fruit;
type Child = BytesFilterSegmentCollector<TCollector::Child, TPredicate>;
fn for_segment(
&self,
segment_local_id: u32,
segment_reader: &SegmentReader,
) -> crate::Result<Self::Child> {
let schema = segment_reader.schema();
let field_name = schema.get_field_name(self.field);
let column_opt = segment_reader.fast_fields().bytes(field_name)?;
let segment_collector = self
.collector
.for_segment(segment_local_id, segment_reader)?;
Ok(BytesFilterSegmentCollector {
column_opt,
segment_collector,
predicate: self.predicate.clone(),
buffer: Vec::new(),
})
}
fn requires_scoring(&self) -> bool {
self.collector.requires_scoring()
}
fn merge_fruits(
&self,
segment_fruits: Vec<<TCollector::Child as SegmentCollector>::Fruit>,
) -> crate::Result<TCollector::Fruit> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct BytesFilterSegmentCollector<TSegmentCollector, TPredicate>
where TPredicate: 'static
{
column_opt: Option<BytesColumn>,
segment_collector: TSegmentCollector,
predicate: TPredicate,
buffer: Vec<u8>,
}
impl<TSegmentCollector, TPredicate> BytesFilterSegmentCollector<TSegmentCollector, TPredicate>
where
TSegmentCollector: SegmentCollector,
TPredicate: 'static + Fn(&[u8]) -> bool + Send + Sync,
{
#[inline]
fn accept_document(&mut self, doc_id: DocId) -> bool {
if let Some(column) = &self.column_opt {
for ord in column.term_ords(doc_id) {
self.buffer.clear();
let found = column.ord_to_bytes(ord, &mut self.buffer).unwrap_or(false);
if found && (self.predicate)(&self.buffer) {
return true;
}
}
}
false
}
}
impl<TSegmentCollector, TPredicate> SegmentCollector
for BytesFilterSegmentCollector<TSegmentCollector, TPredicate>
where
TSegmentCollector: SegmentCollector,
TPredicate: 'static + Fn(&[u8]) -> bool + Send + Sync,
{
type Fruit = TSegmentCollector::Fruit;
fn collect(&mut self, doc: u32, score: Score) {
if self.accept_document(doc) {
self.segment_collector.collect(doc, score);
}
}
fn harvest(self) -> TSegmentCollector::Fruit {
self.segment_collector.harvest() self.segment_collector.harvest()
} }
} }

View File

@@ -112,7 +112,7 @@ mod docset_collector;
pub use self::docset_collector::DocSetCollector; pub use self::docset_collector::DocSetCollector;
mod filter_collector_wrapper; mod filter_collector_wrapper;
pub use self::filter_collector_wrapper::{BytesFilterCollector, FilterCollector}; pub use self::filter_collector_wrapper::FilterCollector;
/// `Fruit` is the type for the result of our collection. /// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector. /// e.g. `usize` for the `Count` collector.

View File

@@ -14,7 +14,7 @@ use crate::collector::{
}; };
use crate::fastfield::{FastFieldNotAvailableError, FastValue}; use crate::fastfield::{FastFieldNotAvailableError, FastValue};
use crate::query::Weight; use crate::query::Weight;
use crate::{DocAddress, DocId, Order, Score, SegmentOrdinal, SegmentReader, TantivyError}; use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
struct FastFieldConvertCollector< struct FastFieldConvertCollector<
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>, TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
@@ -23,7 +23,6 @@ struct FastFieldConvertCollector<
pub collector: TCollector, pub collector: TCollector,
pub field: String, pub field: String,
pub fast_value: std::marker::PhantomData<TFastValue>, pub fast_value: std::marker::PhantomData<TFastValue>,
order: Order,
} }
impl<TCollector, TFastValue> Collector for FastFieldConvertCollector<TCollector, TFastValue> impl<TCollector, TFastValue> Collector for FastFieldConvertCollector<TCollector, TFastValue>
@@ -71,13 +70,7 @@ where
let raw_result = self.collector.merge_fruits(segment_fruits)?; let raw_result = self.collector.merge_fruits(segment_fruits)?;
let transformed_result = raw_result let transformed_result = raw_result
.into_iter() .into_iter()
.map(|(score, doc_address)| { .map(|(score, doc_address)| (TFastValue::from_u64(score), doc_address))
if self.order.is_desc() {
(TFastValue::from_u64(score), doc_address)
} else {
(TFastValue::from_u64(u64::MAX - score), doc_address)
}
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
Ok(transformed_result) Ok(transformed_result)
} }
@@ -138,23 +131,16 @@ impl fmt::Debug for TopDocs {
struct ScorerByFastFieldReader { struct ScorerByFastFieldReader {
sort_column: Arc<dyn ColumnValues<u64>>, sort_column: Arc<dyn ColumnValues<u64>>,
order: Order,
} }
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader { impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
fn score(&mut self, doc: DocId) -> u64 { fn score(&mut self, doc: DocId) -> u64 {
let value = self.sort_column.get_val(doc); self.sort_column.get_val(doc)
if self.order.is_desc() {
value
} else {
u64::MAX - value
}
} }
} }
struct ScorerByField { struct ScorerByField {
field: String, field: String,
order: Order,
} }
impl CustomScorer<u64> for ScorerByField { impl CustomScorer<u64> for ScorerByField {
@@ -171,13 +157,8 @@ impl CustomScorer<u64> for ScorerByField {
sort_column_opt.ok_or_else(|| FastFieldNotAvailableError { sort_column_opt.ok_or_else(|| FastFieldNotAvailableError {
field_name: self.field.clone(), field_name: self.field.clone(),
})?; })?;
let mut default_value = 0u64;
if self.order.is_asc() {
default_value = u64::MAX;
}
Ok(ScorerByFastFieldReader { Ok(ScorerByFastFieldReader {
sort_column: sort_column.first_or_default_col(default_value), sort_column: sort_column.first_or_default_col(0u64),
order: self.order.clone(),
}) })
} }
} }
@@ -249,7 +230,7 @@ impl TopDocs {
/// ///
/// ```rust /// ```rust
/// # use tantivy::schema::{Schema, FAST, TEXT}; /// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress, Order}; /// # use tantivy::{doc, Index, DocAddress};
/// # use tantivy::query::{Query, QueryParser}; /// # use tantivy::query::{Query, QueryParser};
/// use tantivy::Searcher; /// use tantivy::Searcher;
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
@@ -287,7 +268,7 @@ impl TopDocs {
/// // Note the `rating_field` needs to be a FAST field here. /// // Note the `rating_field` needs to be a FAST field here.
/// let top_books_by_rating = TopDocs /// let top_books_by_rating = TopDocs
/// ::with_limit(10) /// ::with_limit(10)
/// .order_by_fast_field("rating", Order::Desc); /// .order_by_u64_field("rating");
/// ///
/// // ... and here are our documents. Note this is a simple vec. /// // ... and here are our documents. Note this is a simple vec.
/// // The `u64` in the pair is the value of our fast field for /// // The `u64` in the pair is the value of our fast field for
@@ -307,15 +288,13 @@ impl TopDocs {
/// ///
/// To comfortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to /// To comfortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
/// the [.order_by_fast_field(...)](TopDocs::order_by_fast_field) method. /// the [.order_by_fast_field(...)](TopDocs::order_by_fast_field) method.
fn order_by_u64_field( pub fn order_by_u64_field(
self, self,
field: impl ToString, field: impl ToString,
order: Order,
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> { ) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
CustomScoreTopCollector::new( CustomScoreTopCollector::new(
ScorerByField { ScorerByField {
field: field.to_string(), field: field.to_string(),
order,
}, },
self.0.into_tscore(), self.0.into_tscore(),
) )
@@ -337,7 +316,7 @@ impl TopDocs {
/// ///
/// ```rust /// ```rust
/// # use tantivy::schema::{Schema, FAST, TEXT}; /// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress,Order}; /// # use tantivy::{doc, Index, DocAddress};
/// # use tantivy::query::{Query, AllQuery}; /// # use tantivy::query::{Query, AllQuery};
/// use tantivy::Searcher; /// use tantivy::Searcher;
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
@@ -375,7 +354,7 @@ impl TopDocs {
/// // type `sort_by_field`. revenue_field here is a FAST i64 field. /// // type `sort_by_field`. revenue_field here is a FAST i64 field.
/// let top_company_by_revenue = TopDocs /// let top_company_by_revenue = TopDocs
/// ::with_limit(2) /// ::with_limit(2)
/// .order_by_fast_field("revenue", Order::Desc); /// .order_by_fast_field("revenue");
/// ///
/// // ... and here are our documents. Note this is a simple vec. /// // ... and here are our documents. Note this is a simple vec.
/// // The `i64` in the pair is the value of our fast field for /// // The `i64` in the pair is the value of our fast field for
@@ -393,17 +372,15 @@ impl TopDocs {
pub fn order_by_fast_field<TFastValue>( pub fn order_by_fast_field<TFastValue>(
self, self,
fast_field: impl ToString, fast_field: impl ToString,
order: Order,
) -> impl Collector<Fruit = Vec<(TFastValue, DocAddress)>> ) -> impl Collector<Fruit = Vec<(TFastValue, DocAddress)>>
where where
TFastValue: FastValue, TFastValue: FastValue,
{ {
let u64_collector = self.order_by_u64_field(fast_field.to_string(), order.clone()); let u64_collector = self.order_by_u64_field(fast_field.to_string());
FastFieldConvertCollector { FastFieldConvertCollector {
collector: u64_collector, collector: u64_collector,
field: fast_field.to_string(), field: fast_field.to_string(),
fast_value: PhantomData, fast_value: PhantomData,
order,
} }
} }
@@ -744,7 +721,7 @@ mod tests {
use crate::schema::{Field, Schema, FAST, STORED, TEXT}; use crate::schema::{Field, Schema, FAST, STORED, TEXT};
use crate::time::format_description::well_known::Rfc3339; use crate::time::format_description::well_known::Rfc3339;
use crate::time::OffsetDateTime; use crate::time::OffsetDateTime;
use crate::{DateTime, DocAddress, DocId, Index, IndexWriter, Order, Score, SegmentReader}; use crate::{DateTime, DocAddress, DocId, Index, IndexWriter, Score, SegmentReader};
fn make_index() -> crate::Result<Index> { fn make_index() -> crate::Result<Index> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -905,7 +882,7 @@ mod tests {
}); });
let searcher = index.reader()?.searcher(); let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(4).order_by_u64_field(SIZE, Order::Desc); let top_collector = TopDocs::with_limit(4).order_by_u64_field(SIZE);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector)?; let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector)?;
assert_eq!( assert_eq!(
&top_docs[..], &top_docs[..],
@@ -944,7 +921,7 @@ mod tests {
))?; ))?;
index_writer.commit()?; index_writer.commit()?;
let searcher = index.reader()?.searcher(); let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(3).order_by_fast_field("birthday", Order::Desc); let top_collector = TopDocs::with_limit(3).order_by_fast_field("birthday");
let top_docs: Vec<(DateTime, DocAddress)> = searcher.search(&AllQuery, &top_collector)?; let top_docs: Vec<(DateTime, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
assert_eq!( assert_eq!(
&top_docs[..], &top_docs[..],
@@ -974,7 +951,7 @@ mod tests {
))?; ))?;
index_writer.commit()?; index_writer.commit()?;
let searcher = index.reader()?.searcher(); let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(3).order_by_fast_field("altitude", Order::Desc); let top_collector = TopDocs::with_limit(3).order_by_fast_field("altitude");
let top_docs: Vec<(i64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?; let top_docs: Vec<(i64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
assert_eq!( assert_eq!(
&top_docs[..], &top_docs[..],
@@ -1004,7 +981,7 @@ mod tests {
))?; ))?;
index_writer.commit()?; index_writer.commit()?;
let searcher = index.reader()?.searcher(); let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(3).order_by_fast_field("altitude", Order::Desc); let top_collector = TopDocs::with_limit(3).order_by_fast_field("altitude");
let top_docs: Vec<(f64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?; let top_docs: Vec<(f64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
assert_eq!( assert_eq!(
&top_docs[..], &top_docs[..],
@@ -1032,7 +1009,7 @@ mod tests {
.unwrap(); .unwrap();
}); });
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_u64_field("missing_field", Order::Desc); let top_collector = TopDocs::with_limit(4).order_by_u64_field("missing_field");
let segment_reader = searcher.segment_reader(0u32); let segment_reader = searcher.segment_reader(0u32);
top_collector top_collector
.for_segment(0, segment_reader) .for_segment(0, segment_reader)
@@ -1050,7 +1027,7 @@ mod tests {
index_writer.commit()?; index_writer.commit()?;
let searcher = index.reader()?.searcher(); let searcher = index.reader()?.searcher();
let segment = searcher.segment_reader(0); let segment = searcher.segment_reader(0);
let top_collector = TopDocs::with_limit(4).order_by_u64_field(SIZE, Order::Desc); let top_collector = TopDocs::with_limit(4).order_by_u64_field(SIZE);
let err = top_collector.for_segment(0, segment).err().unwrap(); let err = top_collector.for_segment(0, segment).err().unwrap();
assert!(matches!(err, crate::TantivyError::InvalidArgument(_))); assert!(matches!(err, crate::TantivyError::InvalidArgument(_)));
Ok(()) Ok(())
@@ -1067,7 +1044,7 @@ mod tests {
index_writer.commit()?; index_writer.commit()?;
let searcher = index.reader()?.searcher(); let searcher = index.reader()?.searcher();
let segment = searcher.segment_reader(0); let segment = searcher.segment_reader(0);
let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(SIZE, Order::Desc); let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(SIZE);
let err = top_collector.for_segment(0, segment).err().unwrap(); let err = top_collector.for_segment(0, segment).err().unwrap();
assert!( assert!(
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field \"size\" is not a fast field.") matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field \"size\" is not a fast field.")
@@ -1129,50 +1106,4 @@ mod tests {
let query = query_parser.parse_query(query).unwrap(); let query = query_parser.parse_query(query).unwrap();
(index, query) (index, query)
} }
#[test]
fn test_fast_field_ascending_order() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, FAST);
let schema = schema_builder.build();
let (index, query) = index("beer", title, schema, |index_writer| {
index_writer
.add_document(doc!(
title => "bottle of beer",
size => 12u64,
))
.unwrap();
index_writer
.add_document(doc!(
title => "growler of beer",
size => 64u64,
))
.unwrap();
index_writer
.add_document(doc!(
title => "pint of beer",
size => 16u64,
))
.unwrap();
index_writer
.add_document(doc!(
title => "empty beer",
))
.unwrap();
});
let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(4).order_by_fast_field(SIZE, Order::Asc);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector)?;
assert_eq!(
&top_docs[..],
&[
(12, DocAddress::new(0, 0)),
(16, DocAddress::new(0, 2)),
(64, DocAddress::new(0, 1)),
(18446744073709551615, DocAddress::new(0, 3)),
]
);
Ok(())
}
} }

View File

@@ -259,7 +259,7 @@ pub(crate) fn set_string_and_get_terms(
/// Writes a value of a JSON field to a `Term`. /// Writes a value of a JSON field to a `Term`.
/// The Term format is as follows: /// The Term format is as follows:
/// `[JSON_TYPE][JSON_PATH][JSON_END_OF_PATH][VALUE_BYTES]` /// [JSON_TYPE][JSON_PATH][JSON_END_OF_PATH][VALUE_BYTES]
pub struct JsonTermWriter<'a> { pub struct JsonTermWriter<'a> {
term_buffer: &'a mut Term, term_buffer: &'a mut Term,
path_stack: Vec<usize>, path_stack: Vec<usize>,

View File

@@ -2,6 +2,8 @@ use std::collections::HashMap;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::{fmt, io}; use std::{fmt, io};
use fail::fail_point;
use crate::core::{InvertedIndexReader, Segment, SegmentComponent, SegmentId}; use crate::core::{InvertedIndexReader, Segment, SegmentComponent, SegmentId};
use crate::directory::{CompositeFile, FileSlice}; use crate::directory::{CompositeFile, FileSlice};
use crate::error::DataCorruption; use crate::error::DataCorruption;
@@ -149,7 +151,7 @@ impl SegmentReader {
let store_file = segment.open_read(SegmentComponent::Store)?; let store_file = segment.open_read(SegmentComponent::Store)?;
crate::fail_point!("SegmentReader::open#middle"); fail_point!("SegmentReader::open#middle");
let postings_file = segment.open_read(SegmentComponent::Postings)?; let postings_file = segment.open_read(SegmentComponent::Postings)?;
let postings_composite = CompositeFile::open(&postings_file)?; let postings_composite = CompositeFile::open(&postings_file)?;

View File

@@ -5,6 +5,7 @@ use std::sync::{Arc, RwLock};
use std::{fmt, result}; use std::{fmt, result};
use common::HasLen; use common::HasLen;
use fail::fail_point;
use super::FileHandle; use super::FileHandle;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
@@ -183,7 +184,7 @@ impl Directory for RamDirectory {
} }
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
crate::fail_point!("RamDirectory::delete", |_| { fail_point!("RamDirectory::delete", |_| {
Err(DeleteError::IoError { Err(DeleteError::IoError {
io_error: Arc::new(io::Error::from(io::ErrorKind::Other)), io_error: Arc::new(io::Error::from(io::ErrorKind::Other)),
filepath: path.to_path_buf(), filepath: path.to_path_buf(),

View File

@@ -1291,28 +1291,4 @@ mod tests {
let vals: Vec<i64> = column.values_for_doc(0u32).collect(); let vals: Vec<i64> = column.values_for_doc(0u32).collect();
assert_eq!(&vals, &[33]); assert_eq!(&vals, &[33]);
} }
#[test]
fn check_num_columnar_fields() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let id_field = schema_builder.add_text_field("id", FAST);
let index = Index::create_in_ram(schema_builder.build());
{
let mut index_writer = index.writer_with_num_threads(1, 20_000_000)?;
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(
id_field => 1u64,
))?;
index_writer.commit()?;
}
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let ff_reader = searcher.segment_reader(0).fast_fields();
let fields = ff_reader.u64_lenient_for_type_all(None, "id").unwrap();
assert_eq!(fields.len(), 1);
Ok(())
}
} }

View File

@@ -88,7 +88,7 @@ impl FastFieldReaders {
let Some((field, path)): Option<(Field, &str)> = self let Some((field, path)): Option<(Field, &str)> = self
.schema .schema
.find_field_with_default(field_name, default_field_opt) .find_field_with_default(field_name, default_field_opt)
else { else{
return Ok(None); return Ok(None);
}; };
let field_entry: &FieldEntry = self.schema.get_field_entry(field); let field_entry: &FieldEntry = self.schema.get_field_entry(field);
@@ -120,8 +120,7 @@ impl FastFieldReaders {
T: HasAssociatedColumnType, T: HasAssociatedColumnType,
DynamicColumn: Into<Option<Column<T>>>, DynamicColumn: Into<Option<Column<T>>>,
{ {
let Some(dynamic_column_handle) = let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, T::column_type())?
self.dynamic_column_handle(field_name, T::column_type())?
else { else {
return Ok(None); return Ok(None);
}; };
@@ -197,8 +196,7 @@ impl FastFieldReaders {
/// Returns a `str` column. /// Returns a `str` column.
pub fn str(&self, field_name: &str) -> crate::Result<Option<StrColumn>> { pub fn str(&self, field_name: &str) -> crate::Result<Option<StrColumn>> {
let Some(dynamic_column_handle) = let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, ColumnType::Str)?
self.dynamic_column_handle(field_name, ColumnType::Str)?
else { else {
return Ok(None); return Ok(None);
}; };
@@ -208,8 +206,7 @@ impl FastFieldReaders {
/// Returns a `bytes` column. /// Returns a `bytes` column.
pub fn bytes(&self, field_name: &str) -> crate::Result<Option<BytesColumn>> { pub fn bytes(&self, field_name: &str) -> crate::Result<Option<BytesColumn>> {
let Some(dynamic_column_handle) = let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, ColumnType::Bytes)?
self.dynamic_column_handle(field_name, ColumnType::Bytes)?
else { else {
return Ok(None); return Ok(None);
}; };

View File

@@ -6,6 +6,7 @@ use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use fail::fail_point;
use rayon::{ThreadPool, ThreadPoolBuilder}; use rayon::{ThreadPool, ThreadPoolBuilder};
use super::segment_manager::SegmentManager; use super::segment_manager::SegmentManager;
@@ -42,7 +43,7 @@ pub(crate) fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate:
let mut buffer = serde_json::to_vec_pretty(metas)?; let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer. // Just adding a new line at the end of the buffer.
writeln!(&mut buffer)?; writeln!(&mut buffer)?;
crate::fail_point!("save_metas", |msg| Err(crate::TantivyError::from( fail_point!("save_metas", |msg| Err(crate::TantivyError::from(
std::io::Error::new( std::io::Error::new(
std::io::ErrorKind::Other, std::io::ErrorKind::Other,
msg.unwrap_or_else(|| "Undefined".to_string()) msg.unwrap_or_else(|| "Undefined".to_string())

View File

@@ -1,6 +1,5 @@
use columnar::MonotonicallyMappableToU64; use columnar::MonotonicallyMappableToU64;
use itertools::Itertools; use itertools::Itertools;
use tokenizer_api::BoxTokenStream;
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping}; use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
use super::operation::AddOperation; use super::operation::AddOperation;
@@ -16,7 +15,7 @@ use crate::postings::{
use crate::schema::{FieldEntry, FieldType, Schema, Term, Value, DATE_TIME_PRECISION_INDEXED}; use crate::schema::{FieldEntry, FieldType, Schema, Term, Value, DATE_TIME_PRECISION_INDEXED};
use crate::store::{StoreReader, StoreWriter}; use crate::store::{StoreReader, StoreWriter};
use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer}; use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer};
use crate::{DocId, Document, Opstamp, SegmentComponent, TantivyError}; use crate::{DocId, Document, Opstamp, SegmentComponent};
/// Computes the initial size of the hash table. /// Computes the initial size of the hash table.
/// ///
@@ -99,18 +98,14 @@ impl SegmentWriter {
} }
_ => None, _ => None,
}; };
let tokenizer_name = text_options text_options
.map(|text_index_option| text_index_option.tokenizer()) .and_then(|text_index_option| {
.unwrap_or("default"); let tokenizer_name = &text_index_option.tokenizer();
tokenizer_manager.get(tokenizer_name)
tokenizer_manager.get(tokenizer_name).ok_or_else(|| { })
TantivyError::SchemaError(format!( .unwrap_or_default()
"Error getting tokenizer for field: {}",
field_entry.name()
))
})
}) })
.collect::<Result<Vec<_>, _>>()?; .collect();
Ok(SegmentWriter { Ok(SegmentWriter {
max_doc: 0, max_doc: 0,
ctx: IndexingContext::new(table_size), ctx: IndexingContext::new(table_size),
@@ -210,7 +205,7 @@ impl SegmentWriter {
for value in values { for value in values {
let mut token_stream = match value { let mut token_stream = match value {
Value::PreTokStr(tok_str) => { Value::PreTokStr(tok_str) => {
BoxTokenStream::new(PreTokenizedStream::from(tok_str.clone())) PreTokenizedStream::from(tok_str.clone()).into()
} }
Value::Str(ref text) => { Value::Str(ref text) => {
let text_analyzer = let text_analyzer =
@@ -443,9 +438,7 @@ fn remap_and_write(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::path::{Path, PathBuf}; use std::path::Path;
use tempfile::TempDir;
use super::compute_initial_table_size; use super::compute_initial_table_size;
use crate::collector::Count; use crate::collector::Count;
@@ -453,9 +446,7 @@ mod tests {
use crate::directory::RamDirectory; use crate::directory::RamDirectory;
use crate::postings::TermInfo; use crate::postings::TermInfo;
use crate::query::PhraseQuery; use crate::query::PhraseQuery;
use crate::schema::{ use crate::schema::{IndexRecordOption, Schema, Type, STORED, STRING, TEXT};
IndexRecordOption, Schema, TextFieldIndexing, TextOptions, Type, STORED, STRING, TEXT,
};
use crate::store::{Compressor, StoreReader, StoreWriter}; use crate::store::{Compressor, StoreReader, StoreWriter};
use crate::time::format_description::well_known::Rfc3339; use crate::time::format_description::well_known::Rfc3339;
use crate::time::OffsetDateTime; use crate::time::OffsetDateTime;
@@ -909,32 +900,4 @@ mod tests {
postings.positions(&mut positions); postings.positions(&mut positions);
assert_eq!(positions, &[4]); //< as opposed to 3 if we had a position length of 1. assert_eq!(positions, &[4]); //< as opposed to 3 if we had a position length of 1.
} }
#[test]
fn test_show_error_when_tokenizer_not_registered() {
let text_field_indexing = TextFieldIndexing::default()
.set_tokenizer("custom_en")
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
let text_options = TextOptions::default()
.set_indexing_options(text_field_indexing)
.set_stored();
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title", text_options);
let schema = schema_builder.build();
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
Index::create_in_dir(&tempdir_path, schema).unwrap();
let index = Index::open_in_dir(tempdir_path).unwrap();
let schema = index.schema();
let mut index_writer = index.writer(50_000_000).unwrap();
let title = schema.get_field("title").unwrap();
let mut document = Document::default();
document.add_text(title, "The Old Man and the Sea");
index_writer.add_document(document).unwrap();
let error = index_writer.commit().unwrap_err();
assert_eq!(
error.to_string(),
"Schema error: 'Error getting tokenizer for field: title'"
);
}
} }

View File

@@ -101,7 +101,6 @@ mod test {
use super::Stamper; use super::Stamper;
#[allow(clippy::redundant_clone)]
#[test] #[test]
fn test_stamper() { fn test_stamper() {
let stamper = Stamper::new(7u64); let stamper = Stamper::new(7u64);
@@ -117,7 +116,6 @@ mod test {
assert_eq!(stamper.stamp(), 15u64); assert_eq!(stamper.stamp(), 15u64);
} }
#[allow(clippy::redundant_clone)]
#[test] #[test]
fn test_stamper_revert() { fn test_stamper_revert() {
let stamper = Stamper::new(7u64); let stamper = Stamper::new(7u64);

View File

@@ -299,35 +299,6 @@ pub struct DocAddress {
pub doc_id: DocId, pub doc_id: DocId,
} }
#[macro_export]
/// Enable fail_point if feature is enabled.
macro_rules! fail_point {
($name:expr) => {{
#[cfg(feature = "failpoints")]
{
fail::eval($name, |_| {
panic!("Return is not supported for the fail point \"{}\"", $name);
});
}
}};
($name:expr, $e:expr) => {{
#[cfg(feature = "failpoints")]
{
if let Some(res) = fail::eval($name, $e) {
return res;
}
}
}};
($name:expr, $cond:expr, $e:expr) => {{
#[cfg(feature = "failpoints")]
{
if $cond {
fail::fail_point!($name, $e);
}
}
}};
}
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use common::{BinarySerializable, FixedSize}; use common::{BinarySerializable, FixedSize};
@@ -905,8 +876,8 @@ pub mod tests {
}"#, }"#,
) )
.unwrap(); .unwrap();
let doc = doc!(json_field=>json_val); let doc = doc!(json_field=>json_val.clone());
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema.clone());
let mut writer = index.writer_for_tests().unwrap(); let mut writer = index.writer_for_tests().unwrap();
writer.add_document(doc).unwrap(); writer.add_document(doc).unwrap();
writer.commit().unwrap(); writer.commit().unwrap();

View File

@@ -2,6 +2,7 @@ use std::cmp::Ordering;
use std::io::{self, Write}; use std::io::{self, Write};
use common::{BinarySerializable, CountingWriter, VInt}; use common::{BinarySerializable, CountingWriter, VInt};
use fail::fail_point;
use super::TermInfo; use super::TermInfo;
use crate::core::Segment; use crate::core::Segment;
@@ -204,7 +205,7 @@ impl<'a> FieldSerializer<'a> {
/// If the current block is incomplete, it needs to be encoded /// If the current block is incomplete, it needs to be encoded
/// using `VInt` encoding. /// using `VInt` encoding.
pub fn close_term(&mut self) -> io::Result<()> { pub fn close_term(&mut self) -> io::Result<()> {
crate::fail_point!("FieldSerializer::close_term", |msg: Option<String>| { fail_point!("FieldSerializer::close_term", |msg: Option<String>| {
Err(io::Error::new(io::ErrorKind::Other, format!("{msg:?}"))) Err(io::Error::new(io::ErrorKind::Other, format!("{msg:?}")))
}); });
if self.term_open { if self.term_open {

View File

@@ -4,7 +4,9 @@ use std::collections::{BinaryHeap, HashMap};
use crate::query::bm25::idf; use crate::query::bm25::idf;
use crate::query::{BooleanQuery, BoostQuery, Occur, Query, TermQuery}; use crate::query::{BooleanQuery, BoostQuery, Occur, Query, TermQuery};
use crate::schema::{Field, FieldType, IndexRecordOption, Term, Value}; use crate::schema::{Field, FieldType, IndexRecordOption, Term, Value};
use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TokenStream, Tokenizer}; use crate::tokenizer::{
BoxTokenStream, FacetTokenizer, PreTokenizedStream, TokenStream, Tokenizer,
};
use crate::{DocAddress, Result, Searcher, TantivyError}; use crate::{DocAddress, Result, Searcher, TantivyError};
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@@ -204,7 +206,8 @@ impl MoreLikeThis {
for value in values { for value in values {
match value { match value {
Value::PreTokStr(tok_str) => { Value::PreTokStr(tok_str) => {
let mut token_stream = PreTokenizedStream::from(tok_str.clone()); let mut token_stream: BoxTokenStream =
PreTokenizedStream::from(tok_str.clone()).into();
token_stream.process(&mut |token| { token_stream.process(&mut |token| {
if !self.is_noise_word(token.text.clone()) { if !self.is_noise_word(token.text.clone()) {
let term = Term::from_field_text(field, &token.text); let term = Term::from_field_text(field, &token.text);

View File

@@ -472,7 +472,6 @@ mod tests {
use super::RangeQuery; use super::RangeQuery;
use crate::collector::{Count, TopDocs}; use crate::collector::{Count, TopDocs};
use crate::indexer::NoMergePolicy;
use crate::query::QueryParser; use crate::query::QueryParser;
use crate::schema::{Document, Field, IntoIpv6Addr, Schema, FAST, INDEXED, STORED, TEXT}; use crate::schema::{Document, Field, IntoIpv6Addr, Schema, FAST, INDEXED, STORED, TEXT};
use crate::{doc, Index}; use crate::{doc, Index};
@@ -548,8 +547,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_with_num_threads(1, 60_000_000)?; let mut index_writer = index.writer_with_num_threads(2, 60_000_000)?;
index_writer.set_merge_policy(Box::new(NoMergePolicy));
for i in 1..100 { for i in 1..100 {
let mut doc = Document::new(); let mut doc = Document::new();
@@ -559,9 +557,6 @@ mod tests {
} }
} }
index_writer.add_document(doc)?; index_writer.add_document(doc)?;
if i == 10 {
index_writer.commit()?;
}
} }
index_writer.commit()?; index_writer.commit()?;

View File

@@ -31,10 +31,9 @@ impl IPFastFieldRangeWeight {
impl Weight for IPFastFieldRangeWeight { impl Weight for IPFastFieldRangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let Some(ip_addr_column): Option<Column<Ipv6Addr>> = let Some(ip_addr_column): Option<Column<Ipv6Addr>> = reader.fast_fields()
reader.fast_fields().column_opt(&self.field)? .column_opt(&self.field)? else {
else { return Ok(Box::new(EmptyScorer))
return Ok(Box::new(EmptyScorer));
}; };
let value_range = bound_to_value_range( let value_range = bound_to_value_range(
&self.lower_bound, &self.lower_bound,

View File

@@ -71,9 +71,7 @@ impl Weight for FastFieldRangeWeight {
let column_type_opt_ref: Option<&[ColumnType]> = column_type_opt let column_type_opt_ref: Option<&[ColumnType]> = column_type_opt
.as_ref() .as_ref()
.map(|column_types| column_types.as_slice()); .map(|column_types| column_types.as_slice());
let Some((column, _)) = let Some((column, _)) = fast_field_reader.u64_lenient_for_type(column_type_opt_ref, &self.field)? else {
fast_field_reader.u64_lenient_for_type(column_type_opt_ref, &self.field)?
else {
return Ok(Box::new(EmptyScorer)); return Ok(Box::new(EmptyScorer));
}; };
let value_range = bound_to_value_range( let value_range = bound_to_value_range(

View File

@@ -693,7 +693,7 @@ Survey in 2016, 2017, and 2018."#;
terms.insert(String::from("bc"), 1.0); terms.insert(String::from("bc"), 1.0);
let fragments = search_fragments( let fragments = search_fragments(
&mut From::from(NgramTokenizer::all_ngrams(2, 2).unwrap()), &mut From::from(NgramTokenizer::all_ngrams(2, 2)),
text, text,
&terms, &terms,
3, 3,

View File

@@ -426,7 +426,7 @@ mod tests {
assert_eq!(store.cache_stats().cache_hits, 1); assert_eq!(store.cache_stats().cache_hits, 1);
assert_eq!(store.cache_stats().cache_misses, 2); assert_eq!(store.cache_stats().cache_misses, 2);
assert_eq!(store.cache.peek_lru(), Some(11207)); assert_eq!(store.cache.peek_lru(), Some(11163));
Ok(()) Ok(())
} }

View File

@@ -154,7 +154,7 @@ pub use self::split_compound_words::SplitCompoundWords;
pub use self::stemmer::{Language, Stemmer}; pub use self::stemmer::{Language, Stemmer};
pub use self::stop_word_filter::StopWordFilter; pub use self::stop_word_filter::StopWordFilter;
pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString}; pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString};
pub use self::tokenizer::{TextAnalyzer, TextAnalyzerBuilder}; pub use self::tokenizer::TextAnalyzer;
pub use self::tokenizer_manager::TokenizerManager; pub use self::tokenizer_manager::TokenizerManager;
pub use self::whitespace_tokenizer::WhitespaceTokenizer; pub use self::whitespace_tokenizer::WhitespaceTokenizer;

View File

@@ -1,5 +1,4 @@
use super::{Token, TokenStream, Tokenizer}; use super::{Token, TokenStream, Tokenizer};
use crate::TantivyError;
/// Tokenize the text by splitting words into n-grams of the given size(s) /// Tokenize the text by splitting words into n-grams of the given size(s)
/// ///
@@ -34,7 +33,7 @@ use crate::TantivyError;
/// ```rust /// ```rust
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// let mut tokenizer = NgramTokenizer::new(2, 3, false).unwrap(); /// let mut tokenizer = NgramTokenizer::new(2, 3, false);
/// let mut stream = tokenizer.token_stream("hello"); /// let mut stream = tokenizer.token_stream("hello");
/// { /// {
/// let token = stream.next().unwrap(); /// let token = stream.next().unwrap();
@@ -80,7 +79,7 @@ use crate::TantivyError;
/// } /// }
/// assert!(stream.next().is_none()); /// assert!(stream.next().is_none());
/// ``` /// ```
#[derive(Clone, Debug)] #[derive(Clone)]
pub struct NgramTokenizer { pub struct NgramTokenizer {
/// min size of the n-gram /// min size of the n-gram
min_gram: usize, min_gram: usize,
@@ -93,39 +92,30 @@ pub struct NgramTokenizer {
impl NgramTokenizer { impl NgramTokenizer {
/// Configures a new Ngram tokenizer /// Configures a new Ngram tokenizer
pub fn new( pub fn new(min_gram: usize, max_gram: usize, prefix_only: bool) -> NgramTokenizer {
min_gram: usize, assert!(min_gram > 0, "min_gram must be greater than 0");
max_gram: usize, assert!(
prefix_only: bool, min_gram <= max_gram,
) -> crate::Result<NgramTokenizer> { "min_gram must not be greater than max_gram"
if min_gram == 0 { );
return Err(TantivyError::InvalidArgument( NgramTokenizer {
"min_gram must be greater than 0".to_string(),
));
}
if min_gram > max_gram {
return Err(TantivyError::InvalidArgument(
"min_gram must not be greater than max_gram".to_string(),
));
}
Ok(NgramTokenizer {
min_gram, min_gram,
max_gram, max_gram,
prefix_only, prefix_only,
token: Token::default(), token: Token::default(),
}) }
} }
/// Create a `NGramTokenizer` which generates tokens for all inner ngrams. /// Create a `NGramTokenizer` which generates tokens for all inner ngrams.
/// ///
/// This is as opposed to only prefix ngrams . /// This is as opposed to only prefix ngrams .
pub fn all_ngrams(min_gram: usize, max_gram: usize) -> crate::Result<NgramTokenizer> { pub fn all_ngrams(min_gram: usize, max_gram: usize) -> NgramTokenizer {
Self::new(min_gram, max_gram, false) Self::new(min_gram, max_gram, false)
} }
/// Create a `NGramTokenizer` which only generates tokens for the /// Create a `NGramTokenizer` which only generates tokens for the
/// prefix ngrams. /// prefix ngrams.
pub fn prefix_only(min_gram: usize, max_gram: usize) -> crate::Result<NgramTokenizer> { pub fn prefix_only(min_gram: usize, max_gram: usize) -> NgramTokenizer {
Self::new(min_gram, max_gram, true) Self::new(min_gram, max_gram, true)
} }
} }
@@ -359,11 +349,7 @@ mod tests {
#[test] #[test]
fn test_ngram_tokenizer_1_2_false() { fn test_ngram_tokenizer_1_2_false() {
let tokens = test_helper( let tokens = test_helper(NgramTokenizer::all_ngrams(1, 2).token_stream("hello"));
NgramTokenizer::all_ngrams(1, 2)
.unwrap()
.token_stream("hello"),
);
assert_eq!(tokens.len(), 9); assert_eq!(tokens.len(), 9);
assert_token(&tokens[0], 0, "h", 0, 1); assert_token(&tokens[0], 0, "h", 0, 1);
assert_token(&tokens[1], 0, "he", 0, 2); assert_token(&tokens[1], 0, "he", 0, 2);
@@ -378,11 +364,7 @@ mod tests {
#[test] #[test]
fn test_ngram_tokenizer_min_max_equal() { fn test_ngram_tokenizer_min_max_equal() {
let tokens = test_helper( let tokens = test_helper(NgramTokenizer::all_ngrams(3, 3).token_stream("hello"));
NgramTokenizer::all_ngrams(3, 3)
.unwrap()
.token_stream("hello"),
);
assert_eq!(tokens.len(), 3); assert_eq!(tokens.len(), 3);
assert_token(&tokens[0], 0, "hel", 0, 3); assert_token(&tokens[0], 0, "hel", 0, 3);
assert_token(&tokens[1], 0, "ell", 1, 4); assert_token(&tokens[1], 0, "ell", 1, 4);
@@ -391,11 +373,7 @@ mod tests {
#[test] #[test]
fn test_ngram_tokenizer_2_5_prefix() { fn test_ngram_tokenizer_2_5_prefix() {
let tokens = test_helper( let tokens = test_helper(NgramTokenizer::prefix_only(2, 5).token_stream("frankenstein"));
NgramTokenizer::prefix_only(2, 5)
.unwrap()
.token_stream("frankenstein"),
);
assert_eq!(tokens.len(), 4); assert_eq!(tokens.len(), 4);
assert_token(&tokens[0], 0, "fr", 0, 2); assert_token(&tokens[0], 0, "fr", 0, 2);
assert_token(&tokens[1], 0, "fra", 0, 3); assert_token(&tokens[1], 0, "fra", 0, 3);
@@ -405,11 +383,7 @@ mod tests {
#[test] #[test]
fn test_ngram_non_ascii_1_2() { fn test_ngram_non_ascii_1_2() {
let tokens = test_helper( let tokens = test_helper(NgramTokenizer::all_ngrams(1, 2).token_stream("hεllo"));
NgramTokenizer::all_ngrams(1, 2)
.unwrap()
.token_stream("hεllo"),
);
assert_eq!(tokens.len(), 9); assert_eq!(tokens.len(), 9);
assert_token(&tokens[0], 0, "h", 0, 1); assert_token(&tokens[0], 0, "h", 0, 1);
assert_token(&tokens[1], 0, "", 0, 3); assert_token(&tokens[1], 0, "", 0, 3);
@@ -424,11 +398,7 @@ mod tests {
#[test] #[test]
fn test_ngram_non_ascii_2_5_prefix() { fn test_ngram_non_ascii_2_5_prefix() {
let tokens = test_helper( let tokens = test_helper(NgramTokenizer::prefix_only(2, 5).token_stream("hεllo"));
NgramTokenizer::prefix_only(2, 5)
.unwrap()
.token_stream("hεllo"),
);
assert_eq!(tokens.len(), 4); assert_eq!(tokens.len(), 4);
assert_token(&tokens[0], 0, "", 0, 3); assert_token(&tokens[0], 0, "", 0, 3);
assert_token(&tokens[1], 0, "hεl", 0, 4); assert_token(&tokens[1], 0, "hεl", 0, 4);
@@ -438,26 +408,22 @@ mod tests {
#[test] #[test]
fn test_ngram_empty() { fn test_ngram_empty() {
let tokens = test_helper(NgramTokenizer::all_ngrams(1, 5).unwrap().token_stream("")); let tokens = test_helper(NgramTokenizer::all_ngrams(1, 5).token_stream(""));
assert!(tokens.is_empty()); assert!(tokens.is_empty());
let tokens = test_helper(NgramTokenizer::all_ngrams(2, 5).unwrap().token_stream("")); let tokens = test_helper(NgramTokenizer::all_ngrams(2, 5).token_stream(""));
assert!(tokens.is_empty()); assert!(tokens.is_empty());
} }
#[test] #[test]
#[should_panic(expected = "min_gram must be greater than 0")] #[should_panic(expected = "min_gram must be greater than 0")]
fn test_ngram_min_max_interval_empty() { fn test_ngram_min_max_interval_empty() {
test_helper( test_helper(NgramTokenizer::all_ngrams(0, 2).token_stream("hellossss"));
NgramTokenizer::all_ngrams(0, 2)
.unwrap()
.token_stream("hellossss"),
);
} }
#[test] #[test]
#[should_panic(expected = "min_gram must not be greater than max_gram")] #[should_panic(expected = "min_gram must not be greater than max_gram")]
fn test_invalid_interval_should_panic_if_smaller() { fn test_invalid_interval_should_panic_if_smaller() {
NgramTokenizer::all_ngrams(2, 1).unwrap(); NgramTokenizer::all_ngrams(2, 1);
} }
#[test] #[test]

View File

@@ -86,8 +86,6 @@ impl TokenFilter for SplitCompoundWords {
SplitCompoundWordsFilter { SplitCompoundWordsFilter {
dict: self.dict, dict: self.dict,
inner: tokenizer, inner: tokenizer,
cuts: Vec::new(),
parts: Vec::new(),
} }
} }
} }
@@ -96,33 +94,29 @@ impl TokenFilter for SplitCompoundWords {
pub struct SplitCompoundWordsFilter<T> { pub struct SplitCompoundWordsFilter<T> {
dict: AhoCorasick, dict: AhoCorasick,
inner: T, inner: T,
cuts: Vec<usize>,
parts: Vec<Token>,
} }
impl<T: Tokenizer> Tokenizer for SplitCompoundWordsFilter<T> { impl<T: Tokenizer> Tokenizer for SplitCompoundWordsFilter<T> {
type TokenStream<'a> = SplitCompoundWordsTokenStream<'a, T::TokenStream<'a>>; type TokenStream<'a> = SplitCompoundWordsTokenStream<T::TokenStream<'a>>;
fn token_stream<'a>(&'a mut self, text: &'a str) -> Self::TokenStream<'a> { fn token_stream<'a>(&'a mut self, text: &'a str) -> Self::TokenStream<'a> {
self.cuts.clear();
self.parts.clear();
SplitCompoundWordsTokenStream { SplitCompoundWordsTokenStream {
dict: self.dict.clone(), dict: self.dict.clone(),
tail: self.inner.token_stream(text), tail: self.inner.token_stream(text),
cuts: &mut self.cuts, cuts: Vec::new(),
parts: &mut self.parts, parts: Vec::new(),
} }
} }
} }
pub struct SplitCompoundWordsTokenStream<'a, T> { pub struct SplitCompoundWordsTokenStream<T> {
dict: AhoCorasick, dict: AhoCorasick,
tail: T, tail: T,
cuts: &'a mut Vec<usize>, cuts: Vec<usize>,
parts: &'a mut Vec<Token>, parts: Vec<Token>,
} }
impl<'a, T: TokenStream> SplitCompoundWordsTokenStream<'a, T> { impl<T: TokenStream> SplitCompoundWordsTokenStream<T> {
// Will use `self.cuts` to fill `self.parts` if `self.tail.token()` // Will use `self.cuts` to fill `self.parts` if `self.tail.token()`
// can fully be split into consecutive matches against `self.dict`. // can fully be split into consecutive matches against `self.dict`.
fn split(&mut self) { fn split(&mut self) {
@@ -158,7 +152,7 @@ impl<'a, T: TokenStream> SplitCompoundWordsTokenStream<'a, T> {
} }
} }
impl<'a, T: TokenStream> TokenStream for SplitCompoundWordsTokenStream<'a, T> { impl<T: TokenStream> TokenStream for SplitCompoundWordsTokenStream<T> {
fn advance(&mut self) -> bool { fn advance(&mut self) -> bool {
self.parts.pop(); self.parts.pop();

View File

@@ -5,32 +5,12 @@ use tokenizer_api::{BoxTokenStream, TokenFilter, Tokenizer};
use crate::tokenizer::empty_tokenizer::EmptyTokenizer; use crate::tokenizer::empty_tokenizer::EmptyTokenizer;
/// `TextAnalyzer` tokenizes an input text into tokens and modifies the resulting `TokenStream`. /// `TextAnalyzer` tokenizes an input text into tokens and modifies the resulting `TokenStream`.
#[derive(Clone)]
pub struct TextAnalyzer { pub struct TextAnalyzer {
tokenizer: Box<dyn BoxableTokenizer>, tokenizer: Box<dyn BoxableTokenizer>,
} }
impl Tokenizer for Box<dyn BoxableTokenizer> {
type TokenStream<'a> = BoxTokenStream<'a>;
// Note: we want to call `box_token_stream` on the concrete `Tokenizer`
// implementation, not the `BoxableTokenizer` one as it will cause
// a recursive call (and a stack overflow).
fn token_stream<'a>(&'a mut self, text: &'a str) -> Self::TokenStream<'a> {
(**self).box_token_stream(text)
}
}
impl Clone for Box<dyn BoxableTokenizer> {
// Note: we want to call `box_clone` on the concrete `Tokenizer`
// implementation in order to clone the concrete `Tokenizer`.
fn clone(&self) -> Self {
(**self).box_clone()
}
}
/// A boxable `Tokenizer`, with its `TokenStream` type erased. /// A boxable `Tokenizer`, with its `TokenStream` type erased.
pub trait BoxableTokenizer: 'static + Send + Sync { trait BoxableTokenizer: 'static + Send + Sync {
/// Creates a boxed token stream for a given `str`. /// Creates a boxed token stream for a given `str`.
fn box_token_stream<'a>(&'a mut self, text: &'a str) -> BoxTokenStream<'a>; fn box_token_stream<'a>(&'a mut self, text: &'a str) -> BoxTokenStream<'a>;
/// Clone this tokenizer. /// Clone this tokenizer.
@@ -39,13 +19,21 @@ pub trait BoxableTokenizer: 'static + Send + Sync {
impl<T: Tokenizer> BoxableTokenizer for T { impl<T: Tokenizer> BoxableTokenizer for T {
fn box_token_stream<'a>(&'a mut self, text: &'a str) -> BoxTokenStream<'a> { fn box_token_stream<'a>(&'a mut self, text: &'a str) -> BoxTokenStream<'a> {
BoxTokenStream::new(self.token_stream(text)) self.token_stream(text).into()
} }
fn box_clone(&self) -> Box<dyn BoxableTokenizer> { fn box_clone(&self) -> Box<dyn BoxableTokenizer> {
Box::new(self.clone()) Box::new(self.clone())
} }
} }
impl Clone for TextAnalyzer {
fn clone(&self) -> Self {
TextAnalyzer {
tokenizer: self.tokenizer.box_clone(),
}
}
}
impl Default for TextAnalyzer { impl Default for TextAnalyzer {
fn default() -> TextAnalyzer { fn default() -> TextAnalyzer {
TextAnalyzer::from(EmptyTokenizer) TextAnalyzer::from(EmptyTokenizer)
@@ -66,12 +54,12 @@ impl TextAnalyzer {
/// Creates a token stream for a given `str`. /// Creates a token stream for a given `str`.
pub fn token_stream<'a>(&'a mut self, text: &'a str) -> BoxTokenStream<'a> { pub fn token_stream<'a>(&'a mut self, text: &'a str) -> BoxTokenStream<'a> {
self.tokenizer.token_stream(text) self.tokenizer.box_token_stream(text)
} }
} }
/// Builder helper for [`TextAnalyzer`] /// Builder helper for [`TextAnalyzer`]
pub struct TextAnalyzerBuilder<T = Box<dyn BoxableTokenizer>> { pub struct TextAnalyzerBuilder<T> {
tokenizer: T, tokenizer: T,
} }
@@ -95,23 +83,6 @@ impl<T: Tokenizer> TextAnalyzerBuilder<T> {
} }
} }
/// Boxes the internal tokenizer. This is useful for adding dynamic filters.
/// Note: this will be less performant than the non boxed version.
pub fn dynamic(self) -> TextAnalyzerBuilder {
let boxed_tokenizer = Box::new(self.tokenizer);
TextAnalyzerBuilder {
tokenizer: boxed_tokenizer,
}
}
/// Appends a token filter to the current builder and returns a boxed version of the
/// tokenizer. This is useful when you want to build a `TextAnalyzer` dynamically.
/// Prefer using `TextAnalyzer::builder(tokenizer).filter(token_filter).build()` if
/// possible as it will be more performant and create less boxes.
pub fn filter_dynamic<F: TokenFilter>(self, token_filter: F) -> TextAnalyzerBuilder {
self.filter(token_filter).dynamic()
}
/// Finalize building the TextAnalyzer /// Finalize building the TextAnalyzer
pub fn build(self) -> TextAnalyzer { pub fn build(self) -> TextAnalyzer {
TextAnalyzer { TextAnalyzer {
@@ -119,57 +90,3 @@ impl<T: Tokenizer> TextAnalyzerBuilder<T> {
} }
} }
} }
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::{LowerCaser, RemoveLongFilter, SimpleTokenizer};
#[test]
fn test_text_analyzer_builder() {
let mut analyzer = TextAnalyzer::builder(SimpleTokenizer::default())
.filter(RemoveLongFilter::limit(40))
.filter(LowerCaser)
.build();
let mut stream = analyzer.token_stream("- first bullet point");
assert_eq!(stream.next().unwrap().text, "first");
assert_eq!(stream.next().unwrap().text, "bullet");
}
#[test]
fn test_text_analyzer_with_filters_boxed() {
// This test shows how one can build a TextAnalyzer dynamically, by stacking a list
// of parametrizable token filters.
//
// The following enum is the thing that would be serializable.
// Note that token filters can have their own parameters, too, like the RemoveLongFilter
enum SerializableTokenFilterEnum {
LowerCaser(LowerCaser),
RemoveLongFilter(RemoveLongFilter),
}
// Note that everything below is dynamic.
let filters: Vec<SerializableTokenFilterEnum> = vec![
SerializableTokenFilterEnum::LowerCaser(LowerCaser),
SerializableTokenFilterEnum::RemoveLongFilter(RemoveLongFilter::limit(12)),
];
let mut analyzer_builder: TextAnalyzerBuilder =
TextAnalyzer::builder(SimpleTokenizer::default())
.filter_dynamic(RemoveLongFilter::limit(40))
.filter_dynamic(LowerCaser);
for filter in filters {
analyzer_builder = match filter {
SerializableTokenFilterEnum::LowerCaser(lower_caser) => {
analyzer_builder.filter_dynamic(lower_caser)
}
SerializableTokenFilterEnum::RemoveLongFilter(remove_long_filter) => {
analyzer_builder.filter_dynamic(remove_long_filter)
}
}
}
let mut analyzer = analyzer_builder.build();
let mut stream = analyzer.token_stream("first bullet point");
assert_eq!(stream.next().unwrap().text, "first");
assert_eq!(stream.next().unwrap().text, "bullet");
}
}

View File

@@ -7,7 +7,7 @@ homepage = "https://github.com/quickwit-oss/tantivy"
repository = "https://github.com/quickwit-oss/tantivy" repository = "https://github.com/quickwit-oss/tantivy"
keywords = ["search", "information", "retrieval", "sstable"] keywords = ["search", "information", "retrieval", "sstable"]
categories = ["database-implementations", "data-structures", "compression"] categories = ["database-implementations", "data-structures", "compression"]
description = "sstables for tantivy" desciption = "sstables for tantivy"
[dependencies] [dependencies]
common = {version= "0.5", path="../common", package="tantivy-common"} common = {version= "0.5", path="../common", package="tantivy-common"}

View File

@@ -44,7 +44,7 @@ pub fn fast_short_slice_copy(src: &[u8], dst: &mut [u8]) {
return; return;
} }
// The code will use the vmovdqu instruction to copy 32 bytes at a time. /// The code will use the vmovdqu instruction to copy 32 bytes at a time.
#[cfg(target_feature = "avx")] #[cfg(target_feature = "avx")]
{ {
if len <= 64 { if len <= 64 {

View File

@@ -63,22 +63,17 @@ pub trait Tokenizer: 'static + Clone + Send + Sync {
/// Simple wrapper of `Box<dyn TokenStream + 'a>`. /// Simple wrapper of `Box<dyn TokenStream + 'a>`.
pub struct BoxTokenStream<'a>(Box<dyn TokenStream + 'a>); pub struct BoxTokenStream<'a>(Box<dyn TokenStream + 'a>);
impl<'a> TokenStream for BoxTokenStream<'a> { impl<'a> From<BoxTokenStream<'a>> for Box<dyn TokenStream + 'a> {
fn advance(&mut self) -> bool { fn from(token_stream: BoxTokenStream<'a>) -> Self {
self.0.advance() token_stream.0
}
fn token(&self) -> &Token {
self.0.token()
}
fn token_mut(&mut self) -> &mut Token {
self.0.token_mut()
} }
} }
impl<'a> BoxTokenStream<'a> { impl<'a, T> From<T> for BoxTokenStream<'a>
pub fn new<T: TokenStream + 'a>(token_stream: T) -> BoxTokenStream<'a> { where
T: TokenStream + 'a,
{
fn from(token_stream: T) -> BoxTokenStream<'a> {
BoxTokenStream(Box::new(token_stream)) BoxTokenStream(Box::new(token_stream))
} }
} }