mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 04:52:55 +00:00
Compare commits
27 Commits
addconvers
...
numeric_wi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18de6f477b | ||
|
|
1e7cd48cfa | ||
|
|
7f51d85bbd | ||
|
|
ad76e32398 | ||
|
|
7575f9bf1c | ||
|
|
67bdf3f5f6 | ||
|
|
3c300666ad | ||
|
|
b91d3f6be4 | ||
|
|
a8e76513bb | ||
|
|
0a23201338 | ||
|
|
81330aaf89 | ||
|
|
98a3b01992 | ||
|
|
d341520938 | ||
|
|
5c9af73e41 | ||
|
|
ad4c940fa3 | ||
|
|
910b0b0c61 | ||
|
|
3fef052bf1 | ||
|
|
040554f2f9 | ||
|
|
17186ca9c9 | ||
|
|
212d59c9ab | ||
|
|
1a1f252a3f | ||
|
|
d73706dede | ||
|
|
44850e1036 | ||
|
|
3b0cbf8102 | ||
|
|
4aa131c3db | ||
|
|
59962097d0 | ||
|
|
ebc78127f3 |
13
CHANGELOG.md
13
CHANGELOG.md
@@ -1,5 +1,14 @@
|
||||
|
||||
Tantivy 0.20 [Unreleased]
|
||||
Tantivy 0.20.2
|
||||
================================
|
||||
- Align numerical type priority order on the search side. [#2088](https://github.com/quickwit-oss/tantivy/issues/2088) (@fmassot)
|
||||
- Fix is_child_of function not considering the root facet. [#2086](https://github.com/quickwit-oss/tantivy/issues/2086) (@adamreichhold)
|
||||
|
||||
Tantivy 0.20.1
|
||||
================================
|
||||
- Fix building on windows with mmap [#2070](https://github.com/quickwit-oss/tantivy/issues/2070) (@ChillFish8)
|
||||
|
||||
Tantivy 0.20
|
||||
================================
|
||||
#### Bugfixes
|
||||
- Fix phrase queries with slop (slop supports now transpositions, algorithm that carries slop so far for num terms > 2) [#2031](https://github.com/quickwit-oss/tantivy/issues/2031)[#2020](https://github.com/quickwit-oss/tantivy/issues/2020)(@PSeitz)
|
||||
@@ -38,12 +47,14 @@ Tantivy 0.20 [Unreleased]
|
||||
- Add aggregation support for JSON type [#1888](https://github.com/quickwit-oss/tantivy/issues/1888) (@PSeitz)
|
||||
- Mixed types support on JSON fields in aggs [#1971](https://github.com/quickwit-oss/tantivy/issues/1971) (@PSeitz)
|
||||
- Perf: Fetch blocks of vals in aggregation for all cardinality [#1950](https://github.com/quickwit-oss/tantivy/issues/1950) (@PSeitz)
|
||||
- Allow histogram bounds to be passed as Rfc3339 [#2076](https://github.com/quickwit-oss/tantivy/issues/2076) (@PSeitz)
|
||||
- `Searcher` with disabled scoring via `EnableScoring::Disabled` [#1780](https://github.com/quickwit-oss/tantivy/issues/1780) (@shikhar)
|
||||
- Enable tokenizer on json fields [#2053](https://github.com/quickwit-oss/tantivy/issues/2053) (@PSeitz)
|
||||
- Enforcing "NOT" and "-" queries consistency in UserInputAst [#1609](https://github.com/quickwit-oss/tantivy/issues/1609) (@bazhenov)
|
||||
- Faster indexing
|
||||
- Refactor tokenization pipeline to use GATs [#1924](https://github.com/quickwit-oss/tantivy/issues/1924) (@trinity-1686a)
|
||||
- Faster term hash map [#2058](https://github.com/quickwit-oss/tantivy/issues/2058)[#1940](https://github.com/quickwit-oss/tantivy/issues/1940) (@PSeitz)
|
||||
- tokenizer-api: reduce Tokenizer allocation overhead [#2062](https://github.com/quickwit-oss/tantivy/issues/2062) (@PSeitz)
|
||||
- Refactor vint [#2010](https://github.com/quickwit-oss/tantivy/issues/2010) (@PSeitz)
|
||||
- Faster search
|
||||
- Work in batches of docs on the SegmentCollector (Only for cases without score for now) [#1937](https://github.com/quickwit-oss/tantivy/issues/1937) (@PSeitz)
|
||||
|
||||
12
Cargo.toml
12
Cargo.toml
@@ -23,8 +23,8 @@ once_cell = "1.10.0"
|
||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||
aho-corasick = "1.0"
|
||||
tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.6.0", optional = true }
|
||||
lz4_flex = { version = "0.10", default-features = false, features = ["checked-decode"], optional = true }
|
||||
memmap2 = { version = "0.7.1", optional = true }
|
||||
lz4_flex = { version = "0.11", default-features = false, optional = true }
|
||||
brotli = { version = "3.3.4", optional = true }
|
||||
zstd = { version = "0.12", optional = true, default-features = false }
|
||||
snap = { version = "1.0.5", optional = true }
|
||||
@@ -44,14 +44,14 @@ census = "0.4.0"
|
||||
rustc-hash = "1.1.0"
|
||||
thiserror = "1.0.30"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.5.0"
|
||||
fail = { version = "0.5.0", optional = true }
|
||||
murmurhash32 = "0.3.0"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.10.0"
|
||||
lru = "0.11.0"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.10.3"
|
||||
itertools = "0.11.0"
|
||||
measure_time = "0.8.2"
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
@@ -112,7 +112,7 @@ lz4-compression = ["lz4_flex"]
|
||||
snappy-compression = ["snap"]
|
||||
zstd-compression = ["zstd"]
|
||||
|
||||
failpoints = ["fail/failpoints"]
|
||||
failpoints = ["fail", "fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
quickwit = ["sstable", "futures-util"]
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use tantivy::tokenizer::TokenizerManager;
|
||||
use tantivy::tokenizer::{
|
||||
LowerCaser, RemoveLongFilter, SimpleTokenizer, TextAnalyzer, TokenizerManager,
|
||||
};
|
||||
|
||||
const ALICE_TXT: &str = include_str!("alice.txt");
|
||||
|
||||
@@ -16,7 +18,26 @@ pub fn criterion_benchmark(c: &mut Criterion) {
|
||||
assert_eq!(word_count, 30_731);
|
||||
})
|
||||
});
|
||||
let mut dynamic_analyzer = TextAnalyzer::builder(SimpleTokenizer::default())
|
||||
.dynamic()
|
||||
.filter_dynamic(RemoveLongFilter::limit(40))
|
||||
.filter_dynamic(LowerCaser)
|
||||
.build();
|
||||
c.bench_function("dynamic-tokenize-alice", |b| {
|
||||
b.iter(|| {
|
||||
let mut word_count = 0;
|
||||
let mut token_stream = dynamic_analyzer.token_stream(ALICE_TXT);
|
||||
while token_stream.advance() {
|
||||
word_count += 1;
|
||||
}
|
||||
assert_eq!(word_count, 30_731);
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(200);
|
||||
targets = criterion_benchmark
|
||||
}
|
||||
criterion_main!(benches);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
#[cfg(any(target_arch = "x86_64"))]
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
mod avx2;
|
||||
|
||||
mod scalar;
|
||||
|
||||
@@ -5,11 +5,11 @@ edition = "2021"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
desciption = "column oriented storage for tantivy"
|
||||
description = "column oriented storage for tantivy"
|
||||
categories = ["database-implementations", "data-structures", "compression"]
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.10.5"
|
||||
itertools = "0.11.0"
|
||||
fnv = "1.0.7"
|
||||
fastdivide = "0.4.0"
|
||||
|
||||
|
||||
@@ -168,8 +168,9 @@ mod tests {
|
||||
)
|
||||
.into();
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
|
||||
else { panic!("Excpected a multivalued index") };
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index else {
|
||||
panic!("Excpected a multivalued index")
|
||||
};
|
||||
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
|
||||
assert_eq!(&start_indexes, &[0, 3, 5]);
|
||||
}
|
||||
@@ -200,8 +201,9 @@ mod tests {
|
||||
)
|
||||
.into();
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
|
||||
else { panic!("Excpected a multivalued index") };
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index else {
|
||||
panic!("Excpected a multivalued index")
|
||||
};
|
||||
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
|
||||
assert_eq!(&start_indexes, &[0, 3, 5, 6]);
|
||||
}
|
||||
|
||||
@@ -157,7 +157,13 @@ mod tests {
|
||||
Cardinality::Optional,
|
||||
&shuffle_merge_order,
|
||||
);
|
||||
let SerializableColumnIndex::Optional { non_null_row_ids, num_rows } = serializable_index else { panic!() };
|
||||
let SerializableColumnIndex::Optional {
|
||||
non_null_row_ids,
|
||||
num_rows,
|
||||
} = serializable_index
|
||||
else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(num_rows, 2);
|
||||
let non_null_rows: Vec<RowId> = non_null_row_ids.boxed_iter().collect();
|
||||
assert_eq!(&non_null_rows, &[1]);
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
//! # `fastfield_codecs`
|
||||
//!
|
||||
//! - Columnar storage of data for tantivy [`Column`].
|
||||
//! - Columnar storage of data for tantivy [`crate::Column`].
|
||||
//! - Encode data in different codecs.
|
||||
//! - Monotonically map values to u64/u128
|
||||
|
||||
|
||||
@@ -83,7 +83,8 @@ impl ColumnValues for BitpackedReader {
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let Some(transformed_range) = transform_range_before_linear_transformation(&self.stats, range)
|
||||
let Some(transformed_range) =
|
||||
transform_range_before_linear_transformation(&self.stats, range)
|
||||
else {
|
||||
positions.clear();
|
||||
return;
|
||||
|
||||
@@ -52,8 +52,8 @@ pub enum MergeRowOrder {
|
||||
/// Columnar tables are simply stacked one above the other.
|
||||
/// If the i-th columnar_readers has n_rows_i rows, then
|
||||
/// in the resulting columnar,
|
||||
/// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder
|
||||
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order.
|
||||
/// rows [r0..n_row_0) contains the row of `columnar_readers[0]`, in ordder
|
||||
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of `columnar_readers[1]`, in order.
|
||||
/// ..
|
||||
/// No documents is deleted.
|
||||
Stack(StackMergeOrder),
|
||||
|
||||
@@ -244,7 +244,9 @@ fn test_merge_columnar_numbers() {
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("numbers").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::F64(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::F64(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(vals.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(vals.first(0u32), Some(-1f64));
|
||||
assert_eq!(vals.first(1u32), None);
|
||||
@@ -270,7 +272,9 @@ fn test_merge_columnar_texts() {
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("texts").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::Str(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional);
|
||||
|
||||
let get_str_for_ord = |ord| {
|
||||
@@ -317,7 +321,9 @@ fn test_merge_columnar_byte() {
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("bytes").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
let get_bytes_for_ord = |ord| {
|
||||
let mut out = Vec::new();
|
||||
vals.ord_to_bytes(ord, &mut out).unwrap();
|
||||
@@ -371,7 +377,9 @@ fn test_merge_columnar_byte_with_missing() {
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let cols = columnar_reader.read_columns("col").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
let get_bytes_for_ord = |ord| {
|
||||
let mut out = Vec::new();
|
||||
vals.ord_to_bytes(ord, &mut out).unwrap();
|
||||
@@ -423,7 +431,9 @@ fn test_merge_columnar_different_types() {
|
||||
|
||||
// numeric column
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::I64(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::I64(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(vals.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(vals.values_for_doc(0).collect_vec(), vec![]);
|
||||
assert_eq!(vals.values_for_doc(1).collect_vec(), vec![]);
|
||||
@@ -433,7 +443,9 @@ fn test_merge_columnar_different_types() {
|
||||
|
||||
// text column
|
||||
let dynamic_column = cols[1].open().unwrap();
|
||||
let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
|
||||
let DynamicColumn::Str(vals) = dynamic_column else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional);
|
||||
let get_str_for_ord = |ord| {
|
||||
let mut out = String::new();
|
||||
|
||||
@@ -98,9 +98,11 @@ impl ColumnarWriter {
|
||||
///
|
||||
/// The sort applied is stable.
|
||||
pub fn sort_order(&self, sort_field: &str, num_docs: RowId, reversed: bool) -> Vec<u32> {
|
||||
let Some(numerical_col_writer) =
|
||||
self.numerical_field_hash_map.get::<NumericalColumnWriter>(sort_field.as_bytes()) else {
|
||||
return Vec::new();
|
||||
let Some(numerical_col_writer) = self
|
||||
.numerical_field_hash_map
|
||||
.get::<NumericalColumnWriter>(sort_field.as_bytes())
|
||||
else {
|
||||
return Vec::new();
|
||||
};
|
||||
let mut symbols_buffer = Vec::new();
|
||||
let mut values = Vec::new();
|
||||
|
||||
@@ -57,7 +57,9 @@ fn test_dataframe_writer_bool() {
|
||||
assert_eq!(cols[0].num_bytes(), 22);
|
||||
assert_eq!(cols[0].column_type(), ColumnType::Bool);
|
||||
let dyn_bool_col = cols[0].open().unwrap();
|
||||
let DynamicColumn::Bool(bool_col) = dyn_bool_col else { panic!(); };
|
||||
let DynamicColumn::Bool(bool_col) = dyn_bool_col else {
|
||||
panic!();
|
||||
};
|
||||
let vals: Vec<Option<bool>> = (0..5).map(|row_id| bool_col.first(row_id)).collect();
|
||||
assert_eq!(&vals, &[None, Some(false), None, Some(true), None,]);
|
||||
}
|
||||
@@ -79,7 +81,9 @@ fn test_dataframe_writer_u64_multivalued() {
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 29);
|
||||
let dyn_i64_col = cols[0].open().unwrap();
|
||||
let DynamicColumn::I64(divisor_col) = dyn_i64_col else { panic!(); };
|
||||
let DynamicColumn::I64(divisor_col) = dyn_i64_col else {
|
||||
panic!();
|
||||
};
|
||||
assert_eq!(
|
||||
divisor_col.get_cardinality(),
|
||||
crate::Cardinality::Multivalued
|
||||
@@ -101,7 +105,9 @@ fn test_dataframe_writer_ip_addr() {
|
||||
assert_eq!(cols[0].num_bytes(), 42);
|
||||
assert_eq!(cols[0].column_type(), ColumnType::IpAddr);
|
||||
let dyn_bool_col = cols[0].open().unwrap();
|
||||
let DynamicColumn::IpAddr(ip_col) = dyn_bool_col else { panic!(); };
|
||||
let DynamicColumn::IpAddr(ip_col) = dyn_bool_col else {
|
||||
panic!();
|
||||
};
|
||||
let vals: Vec<Option<Ipv6Addr>> = (0..5).map(|row_id| ip_col.first(row_id)).collect();
|
||||
assert_eq!(
|
||||
&vals,
|
||||
@@ -134,7 +140,9 @@ fn test_dataframe_writer_numerical() {
|
||||
// - null footer 6 bytes
|
||||
assert_eq!(cols[0].num_bytes(), 33);
|
||||
let column = cols[0].open().unwrap();
|
||||
let DynamicColumn::I64(column_i64) = column else { panic!(); };
|
||||
let DynamicColumn::I64(column_i64) = column else {
|
||||
panic!();
|
||||
};
|
||||
assert_eq!(column_i64.index.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(column_i64.first(0), None);
|
||||
assert_eq!(column_i64.first(1), Some(12i64));
|
||||
@@ -198,7 +206,9 @@ fn test_dictionary_encoded_str() {
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let col_handles = columnar_reader.read_columns("my.column").unwrap();
|
||||
assert_eq!(col_handles.len(), 1);
|
||||
let DynamicColumn::Str(str_col) = col_handles[0].open().unwrap() else { panic!(); };
|
||||
let DynamicColumn::Str(str_col) = col_handles[0].open().unwrap() else {
|
||||
panic!();
|
||||
};
|
||||
let index: Vec<Option<u64>> = (0..5).map(|row_id| str_col.ords().first(row_id)).collect();
|
||||
assert_eq!(index, &[None, Some(0), None, Some(2), Some(1)]);
|
||||
assert_eq!(str_col.num_rows(), 5);
|
||||
@@ -230,7 +240,9 @@ fn test_dictionary_encoded_bytes() {
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let col_handles = columnar_reader.read_columns("my.column").unwrap();
|
||||
assert_eq!(col_handles.len(), 1);
|
||||
let DynamicColumn::Bytes(bytes_col) = col_handles[0].open().unwrap() else { panic!(); };
|
||||
let DynamicColumn::Bytes(bytes_col) = col_handles[0].open().unwrap() else {
|
||||
panic!();
|
||||
};
|
||||
let index: Vec<Option<u64>> = (0..5)
|
||||
.map(|row_id| bytes_col.ords().first(row_id))
|
||||
.collect();
|
||||
@@ -533,28 +545,36 @@ trait AssertEqualToColumnValue {
|
||||
|
||||
impl AssertEqualToColumnValue for bool {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::Bool(val) = column_value else { panic!() };
|
||||
let ColumnValue::Bool(val) = column_value else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(self, val);
|
||||
}
|
||||
}
|
||||
|
||||
impl AssertEqualToColumnValue for Ipv6Addr {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::IpAddr(val) = column_value else { panic!() };
|
||||
let ColumnValue::IpAddr(val) = column_value else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(self, val);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Coerce + PartialEq + Debug + Into<NumericalValue>> AssertEqualToColumnValue for T {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::Numerical(num) = column_value else { panic!() };
|
||||
let ColumnValue::Numerical(num) = column_value else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(self, &T::coerce(*num));
|
||||
}
|
||||
}
|
||||
|
||||
impl AssertEqualToColumnValue for DateTime {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::DateTime(dt) = column_value else { panic!() };
|
||||
let ColumnValue::DateTime(dt) = column_value else {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(self, dt);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// this will store tokens of 3 characters each
|
||||
index
|
||||
.tokenizers()
|
||||
.register("ngram3", NgramTokenizer::new(3, 3, false));
|
||||
.register("ngram3", NgramTokenizer::new(3, 3, false).unwrap());
|
||||
|
||||
// To insert document we need an index writer.
|
||||
// There must be only one writer at a time.
|
||||
|
||||
@@ -6,12 +6,14 @@ use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, TEXT};
|
||||
use tantivy::{
|
||||
doc, DocAddress, DocId, Index, IndexReader, Opstamp, Searcher, SearcherGeneration, SegmentId,
|
||||
SegmentReader, Warmer,
|
||||
doc, DocAddress, DocId, Index, Opstamp, Searcher, SearcherGeneration, SegmentId, SegmentReader,
|
||||
Warmer,
|
||||
};
|
||||
|
||||
// This example shows how warmers can be used to
|
||||
// load a values from an external sources using the Warmer API.
|
||||
// load values from an external sources and
|
||||
// tie their lifecycle to that of the index segments
|
||||
// using the Warmer API.
|
||||
//
|
||||
// In this example, we assume an e-commerce search engine.
|
||||
|
||||
@@ -23,9 +25,11 @@ pub trait PriceFetcher: Send + Sync + 'static {
|
||||
fn fetch_prices(&self, product_ids: &[ProductId]) -> Vec<Price>;
|
||||
}
|
||||
|
||||
type SegmentKey = (SegmentId, Option<Opstamp>);
|
||||
|
||||
struct DynamicPriceColumn {
|
||||
field: String,
|
||||
price_cache: RwLock<HashMap<(SegmentId, Option<Opstamp>), Arc<Vec<Price>>>>,
|
||||
price_cache: RwLock<HashMap<SegmentKey, Arc<Vec<Price>>>>,
|
||||
price_fetcher: Box<dyn PriceFetcher>,
|
||||
}
|
||||
|
||||
@@ -46,7 +50,6 @@ impl DynamicPriceColumn {
|
||||
impl Warmer for DynamicPriceColumn {
|
||||
fn warm(&self, searcher: &Searcher) -> tantivy::Result<()> {
|
||||
for segment in searcher.segment_readers() {
|
||||
let key = (segment.segment_id(), segment.delete_opstamp());
|
||||
let product_id_reader = segment
|
||||
.fast_fields()
|
||||
.u64(&self.field)?
|
||||
@@ -55,37 +58,40 @@ impl Warmer for DynamicPriceColumn {
|
||||
.doc_ids_alive()
|
||||
.map(|doc| product_id_reader.get_val(doc))
|
||||
.collect();
|
||||
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
|
||||
let mut price_vals: Vec<Price> = Vec::new();
|
||||
for doc in 0..segment.max_doc() {
|
||||
if segment.is_deleted(doc) {
|
||||
price_vals.push(0);
|
||||
} else {
|
||||
price_vals.push(prices_it.next().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
let mut prices = self.price_fetcher.fetch_prices(&product_ids).into_iter();
|
||||
|
||||
let prices: Vec<Price> = (0..segment.max_doc())
|
||||
.map(|doc| {
|
||||
if !segment.is_deleted(doc) {
|
||||
prices.next().unwrap()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let key = (segment.segment_id(), segment.delete_opstamp());
|
||||
self.price_cache
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(key, Arc::new(price_vals));
|
||||
.insert(key, Arc::new(prices));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
|
||||
let live_segment_id_and_delete_ops: HashSet<(SegmentId, Option<Opstamp>)> =
|
||||
live_generations
|
||||
.iter()
|
||||
.flat_map(|gen| gen.segments())
|
||||
.map(|(&segment_id, &opstamp)| (segment_id, opstamp))
|
||||
.collect();
|
||||
let mut price_cache_wrt = self.price_cache.write().unwrap();
|
||||
// let price_cache = std::mem::take(&mut *price_cache_wrt);
|
||||
// Drain would be nicer here.
|
||||
*price_cache_wrt = std::mem::take(&mut *price_cache_wrt)
|
||||
.into_iter()
|
||||
.filter(|(seg_id_and_op, _)| !live_segment_id_and_delete_ops.contains(seg_id_and_op))
|
||||
let live_keys: HashSet<SegmentKey> = live_generations
|
||||
.iter()
|
||||
.flat_map(|gen| gen.segments())
|
||||
.map(|(&segment_id, &opstamp)| (segment_id, opstamp))
|
||||
.collect();
|
||||
|
||||
self.price_cache
|
||||
.write()
|
||||
.unwrap()
|
||||
.retain(|key, _| live_keys.contains(key));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,17 +106,17 @@ pub struct ExternalPriceTable {
|
||||
|
||||
impl ExternalPriceTable {
|
||||
pub fn update_price(&self, product_id: ProductId, price: Price) {
|
||||
let mut prices_wrt = self.prices.write().unwrap();
|
||||
prices_wrt.insert(product_id, price);
|
||||
self.prices.write().unwrap().insert(product_id, price);
|
||||
}
|
||||
}
|
||||
|
||||
impl PriceFetcher for ExternalPriceTable {
|
||||
fn fetch_prices(&self, product_ids: &[ProductId]) -> Vec<Price> {
|
||||
let prices_read = self.prices.read().unwrap();
|
||||
let prices = self.prices.read().unwrap();
|
||||
|
||||
product_ids
|
||||
.iter()
|
||||
.map(|product_id| prices_read.get(product_id).cloned().unwrap_or(0))
|
||||
.map(|product_id| prices.get(product_id).cloned().unwrap_or(0))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
@@ -143,11 +149,8 @@ fn main() -> tantivy::Result<()> {
|
||||
writer.add_document(doc!(product_id=>SNEAKERS, text=>"uber sweet sneakers"))?;
|
||||
writer.commit()?;
|
||||
|
||||
let warmers: Vec<Weak<dyn Warmer>> = vec![Arc::downgrade(
|
||||
&(price_dynamic_column.clone() as Arc<dyn Warmer>),
|
||||
)];
|
||||
let reader: IndexReader = index.reader_builder().warmers(warmers).try_into()?;
|
||||
reader.reload()?;
|
||||
let warmers = vec![Arc::downgrade(&price_dynamic_column) as Weak<dyn Warmer>];
|
||||
let reader = index.reader_builder().warmers(warmers).try_into()?;
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![text]);
|
||||
let query = query_parser.parse_query("cooking")?;
|
||||
|
||||
@@ -15,6 +15,12 @@
|
||||
//! Results of final buckets are [`BucketResult`](super::agg_result::BucketResult).
|
||||
//! Results of intermediate buckets are
|
||||
//! [`IntermediateBucketResult`](super::intermediate_agg_result::IntermediateBucketResult)
|
||||
//!
|
||||
//! ## Supported Bucket Aggregations
|
||||
//! - [Histogram](HistogramAggregation)
|
||||
//! - [DateHistogram](DateHistogramAggregationReq)
|
||||
//! - [Range](RangeAggregation)
|
||||
//! - [Terms](TermsAggregation)
|
||||
|
||||
mod histogram;
|
||||
mod range;
|
||||
|
||||
@@ -6,6 +6,15 @@
|
||||
//! Some aggregations output a single numeric metric (e.g. Average) and are called
|
||||
//! single-value numeric metrics aggregation, others generate multiple metrics (e.g. Stats) and are
|
||||
//! called multi-value numeric metrics aggregation.
|
||||
//!
|
||||
//! ## Supported Metric Aggregations
|
||||
//! - [Average](AverageAggregation)
|
||||
//! - [Stats](StatsAggregation)
|
||||
//! - [Min](MinAggregation)
|
||||
//! - [Max](MaxAggregation)
|
||||
//! - [Sum](SumAggregation)
|
||||
//! - [Count](CountAggregation)
|
||||
//! - [Percentiles](PercentilesAggregationReq)
|
||||
|
||||
mod average;
|
||||
mod count;
|
||||
|
||||
@@ -6,32 +6,35 @@
|
||||
//
|
||||
// Of course, you can have a look at the tantivy's built-in collectors
|
||||
// such as the `CountCollector` for more examples.
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use columnar::{ColumnValues, DynamicColumn, HasAssociatedColumnType};
|
||||
use columnar::{BytesColumn, Column, DynamicColumn, HasAssociatedColumnType};
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::schema::Field;
|
||||
use crate::{Score, SegmentReader, TantivyError};
|
||||
use crate::{DocId, Score, SegmentReader, TantivyError};
|
||||
|
||||
/// The `FilterCollector` filters docs using a fast field value and a predicate.
|
||||
/// Only the documents for which the predicate returned "true" will be passed on to the next
|
||||
/// collector.
|
||||
///
|
||||
/// Only the documents containing at least one value for which the predicate returns `true`
|
||||
/// will be passed on to the next collector.
|
||||
///
|
||||
/// In other words,
|
||||
/// - documents with no values are filtered out.
|
||||
/// - documents with several values are accepted if at least one value matches the predicate.
|
||||
///
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy::collector::{TopDocs, FilterCollector};
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT, INDEXED, FAST};
|
||||
/// use tantivy::schema::{Schema, TEXT, FAST};
|
||||
/// use tantivy::{doc, DocAddress, Index};
|
||||
///
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let price = schema_builder.add_u64_field("price", INDEXED | FAST);
|
||||
/// let price = schema_builder.add_u64_field("price", FAST);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
@@ -47,20 +50,24 @@ use crate::{Score, SegmentReader, TantivyError};
|
||||
///
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let no_filter_collector = FilterCollector::new(price, &|value: u64| value > 20_120u64, TopDocs::with_limit(2));
|
||||
/// let no_filter_collector = FilterCollector::new(price, |value: u64| value > 20_120u64, TopDocs::with_limit(2));
|
||||
/// let top_docs = searcher.search(&query, &no_filter_collector)?;
|
||||
///
|
||||
/// assert_eq!(top_docs.len(), 1);
|
||||
/// assert_eq!(top_docs[0].1, DocAddress::new(0, 1));
|
||||
///
|
||||
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
|
||||
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, |value| value < 5u64, TopDocs::with_limit(2));
|
||||
/// let filtered_top_docs = searcher.search(&query, &filter_all_collector)?;
|
||||
///
|
||||
/// assert_eq!(filtered_top_docs.len(), 0);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue: Default>
|
||||
///
|
||||
/// Note that this is limited to fast fields which implement the
|
||||
/// [`FastValue`][crate::fastfield::FastValue] trait, e.g. `u64` but not `&[u8]`.
|
||||
/// To filter based on a bytes fast field, use a [`BytesFilterCollector`] instead.
|
||||
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue>
|
||||
where TPredicate: 'static + Clone
|
||||
{
|
||||
field: Field,
|
||||
@@ -69,19 +76,15 @@ where TPredicate: 'static + Clone
|
||||
t_predicate_value: PhantomData<TPredicateValue>,
|
||||
}
|
||||
|
||||
impl<TCollector, TPredicate, TPredicateValue: Default>
|
||||
impl<TCollector, TPredicate, TPredicateValue>
|
||||
FilterCollector<TCollector, TPredicate, TPredicateValue>
|
||||
where
|
||||
TCollector: Collector + Send + Sync,
|
||||
TPredicate: Fn(TPredicateValue) -> bool + Send + Sync + Clone,
|
||||
{
|
||||
/// Create a new FilterCollector.
|
||||
pub fn new(
|
||||
field: Field,
|
||||
predicate: TPredicate,
|
||||
collector: TCollector,
|
||||
) -> FilterCollector<TCollector, TPredicate, TPredicateValue> {
|
||||
FilterCollector {
|
||||
/// Create a new `FilterCollector`.
|
||||
pub fn new(field: Field, predicate: TPredicate, collector: TCollector) -> Self {
|
||||
Self {
|
||||
field,
|
||||
predicate,
|
||||
collector,
|
||||
@@ -90,7 +93,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<TCollector, TPredicate, TPredicateValue: Default> Collector
|
||||
impl<TCollector, TPredicate, TPredicateValue> Collector
|
||||
for FilterCollector<TCollector, TPredicate, TPredicateValue>
|
||||
where
|
||||
TCollector: Collector + Send + Sync,
|
||||
@@ -98,8 +101,6 @@ where
|
||||
TPredicateValue: HasAssociatedColumnType,
|
||||
DynamicColumn: Into<Option<columnar::Column<TPredicateValue>>>,
|
||||
{
|
||||
// That's the type of our result.
|
||||
// Our standard deviation will be a float.
|
||||
type Fruit = TCollector::Fruit;
|
||||
|
||||
type Child = FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>;
|
||||
@@ -108,7 +109,7 @@ where
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>> {
|
||||
) -> crate::Result<Self::Child> {
|
||||
let schema = segment_reader.schema();
|
||||
let field_entry = schema.get_field_entry(self.field);
|
||||
if !field_entry.is_fast() {
|
||||
@@ -118,16 +119,16 @@ where
|
||||
)));
|
||||
}
|
||||
|
||||
let fast_field_reader = segment_reader
|
||||
let column_opt = segment_reader
|
||||
.fast_fields()
|
||||
.column_first_or_default(schema.get_field_name(self.field))?;
|
||||
.column_opt(field_entry.name())?;
|
||||
|
||||
let segment_collector = self
|
||||
.collector
|
||||
.for_segment(segment_local_id, segment_reader)?;
|
||||
|
||||
Ok(FilterSegmentCollector {
|
||||
fast_field_reader,
|
||||
column_opt,
|
||||
segment_collector,
|
||||
predicate: self.predicate.clone(),
|
||||
t_predicate_value: PhantomData,
|
||||
@@ -146,35 +147,208 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
|
||||
where
|
||||
TPredicate: 'static,
|
||||
DynamicColumn: Into<Option<columnar::Column<TPredicateValue>>>,
|
||||
{
|
||||
fast_field_reader: Arc<dyn ColumnValues<TPredicateValue>>,
|
||||
pub struct FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue> {
|
||||
column_opt: Option<Column<TPredicateValue>>,
|
||||
segment_collector: TSegmentCollector,
|
||||
predicate: TPredicate,
|
||||
t_predicate_value: PhantomData<TPredicateValue>,
|
||||
}
|
||||
|
||||
impl<TSegmentCollector, TPredicate, TPredicateValue>
|
||||
FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
|
||||
where
|
||||
TPredicateValue: PartialOrd + Copy + Debug + Send + Sync + 'static,
|
||||
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
|
||||
{
|
||||
#[inline]
|
||||
fn accept_document(&self, doc_id: DocId) -> bool {
|
||||
if let Some(column) = &self.column_opt {
|
||||
for val in column.values_for_doc(doc_id) {
|
||||
if (self.predicate)(val) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSegmentCollector, TPredicate, TPredicateValue> SegmentCollector
|
||||
for FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
|
||||
where
|
||||
TSegmentCollector: SegmentCollector,
|
||||
TPredicateValue: HasAssociatedColumnType,
|
||||
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
|
||||
DynamicColumn: Into<Option<columnar::Column<TPredicateValue>>>,
|
||||
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync, /* DynamicColumn: Into<Option<columnar::Column<TPredicateValue>>> */
|
||||
{
|
||||
type Fruit = TSegmentCollector::Fruit;
|
||||
|
||||
fn collect(&mut self, doc: u32, score: Score) {
|
||||
let value = self.fast_field_reader.get_val(doc);
|
||||
if (self.predicate)(value) {
|
||||
self.segment_collector.collect(doc, score)
|
||||
if self.accept_document(doc) {
|
||||
self.segment_collector.collect(doc, score);
|
||||
}
|
||||
}
|
||||
|
||||
fn harvest(self) -> <TSegmentCollector as SegmentCollector>::Fruit {
|
||||
fn harvest(self) -> TSegmentCollector::Fruit {
|
||||
self.segment_collector.harvest()
|
||||
}
|
||||
}
|
||||
|
||||
/// A variant of the [`FilterCollector`] specialized for bytes fast fields, i.e.
|
||||
/// it transparently wraps an inner [`Collector`] but filters documents
|
||||
/// based on the result of applying the predicate to the bytes fast field.
|
||||
///
|
||||
/// A document is accepted if and only if the predicate returns `true` for at least one value.
|
||||
///
|
||||
/// In other words,
|
||||
/// - documents with no values are filtered out.
|
||||
/// - documents with several values are accepted if at least one value matches the predicate.
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy::collector::{TopDocs, BytesFilterCollector};
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT, FAST};
|
||||
/// use tantivy::{doc, DocAddress, Index};
|
||||
///
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let barcode = schema_builder.add_bytes_field("barcode", FAST);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||
/// index_writer.add_document(doc!(title => "The Name of the Wind", barcode => &b"010101"[..]))?;
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib", barcode => &b"110011"[..]))?;
|
||||
/// index_writer.add_document(doc!(title => "A Dairy Cow", barcode => &b"110111"[..]))?;
|
||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl", barcode => &b"011101"[..]))?;
|
||||
/// index_writer.add_document(doc!(title => "Bridget Jones's Diary"))?;
|
||||
/// index_writer.commit()?;
|
||||
///
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let filter_collector = BytesFilterCollector::new(barcode, |bytes: &[u8]| bytes.starts_with(b"01"), TopDocs::with_limit(2));
|
||||
/// let top_docs = searcher.search(&query, &filter_collector)?;
|
||||
///
|
||||
/// assert_eq!(top_docs.len(), 1);
|
||||
/// assert_eq!(top_docs[0].1, DocAddress::new(0, 3));
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub struct BytesFilterCollector<TCollector, TPredicate>
|
||||
where TPredicate: 'static + Clone
|
||||
{
|
||||
field: Field,
|
||||
collector: TCollector,
|
||||
predicate: TPredicate,
|
||||
}
|
||||
|
||||
impl<TCollector, TPredicate> BytesFilterCollector<TCollector, TPredicate>
|
||||
where
|
||||
TCollector: Collector + Send + Sync,
|
||||
TPredicate: Fn(&[u8]) -> bool + Send + Sync + Clone,
|
||||
{
|
||||
/// Create a new `BytesFilterCollector`.
|
||||
pub fn new(field: Field, predicate: TPredicate, collector: TCollector) -> Self {
|
||||
Self {
|
||||
field,
|
||||
predicate,
|
||||
collector,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TCollector, TPredicate> Collector for BytesFilterCollector<TCollector, TPredicate>
|
||||
where
|
||||
TCollector: Collector + Send + Sync,
|
||||
TPredicate: 'static + Fn(&[u8]) -> bool + Send + Sync + Clone,
|
||||
{
|
||||
type Fruit = TCollector::Fruit;
|
||||
|
||||
type Child = BytesFilterSegmentCollector<TCollector::Child, TPredicate>;
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let schema = segment_reader.schema();
|
||||
let field_name = schema.get_field_name(self.field);
|
||||
|
||||
let column_opt = segment_reader.fast_fields().bytes(field_name)?;
|
||||
|
||||
let segment_collector = self
|
||||
.collector
|
||||
.for_segment(segment_local_id, segment_reader)?;
|
||||
|
||||
Ok(BytesFilterSegmentCollector {
|
||||
column_opt,
|
||||
segment_collector,
|
||||
predicate: self.predicate.clone(),
|
||||
buffer: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
self.collector.requires_scoring()
|
||||
}
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
segment_fruits: Vec<<TCollector::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<TCollector::Fruit> {
|
||||
self.collector.merge_fruits(segment_fruits)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BytesFilterSegmentCollector<TSegmentCollector, TPredicate>
|
||||
where TPredicate: 'static
|
||||
{
|
||||
column_opt: Option<BytesColumn>,
|
||||
segment_collector: TSegmentCollector,
|
||||
predicate: TPredicate,
|
||||
buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<TSegmentCollector, TPredicate> BytesFilterSegmentCollector<TSegmentCollector, TPredicate>
|
||||
where
|
||||
TSegmentCollector: SegmentCollector,
|
||||
TPredicate: 'static + Fn(&[u8]) -> bool + Send + Sync,
|
||||
{
|
||||
#[inline]
|
||||
fn accept_document(&mut self, doc_id: DocId) -> bool {
|
||||
if let Some(column) = &self.column_opt {
|
||||
for ord in column.term_ords(doc_id) {
|
||||
self.buffer.clear();
|
||||
|
||||
let found = column.ord_to_bytes(ord, &mut self.buffer).unwrap_or(false);
|
||||
|
||||
if found && (self.predicate)(&self.buffer) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSegmentCollector, TPredicate> SegmentCollector
|
||||
for BytesFilterSegmentCollector<TSegmentCollector, TPredicate>
|
||||
where
|
||||
TSegmentCollector: SegmentCollector,
|
||||
TPredicate: 'static + Fn(&[u8]) -> bool + Send + Sync,
|
||||
{
|
||||
type Fruit = TSegmentCollector::Fruit;
|
||||
|
||||
fn collect(&mut self, doc: u32, score: Score) {
|
||||
if self.accept_document(doc) {
|
||||
self.segment_collector.collect(doc, score);
|
||||
}
|
||||
}
|
||||
|
||||
fn harvest(self) -> TSegmentCollector::Fruit {
|
||||
self.segment_collector.harvest()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ mod docset_collector;
|
||||
pub use self::docset_collector::DocSetCollector;
|
||||
|
||||
mod filter_collector_wrapper;
|
||||
pub use self::filter_collector_wrapper::FilterCollector;
|
||||
pub use self::filter_collector_wrapper::{BytesFilterCollector, FilterCollector};
|
||||
|
||||
/// `Fruit` is the type for the result of our collection.
|
||||
/// e.g. `usize` for the `Count` collector.
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::collector::{
|
||||
};
|
||||
use crate::fastfield::{FastFieldNotAvailableError, FastValue};
|
||||
use crate::query::Weight;
|
||||
use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
||||
use crate::{DocAddress, DocId, Order, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
||||
|
||||
struct FastFieldConvertCollector<
|
||||
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
|
||||
@@ -23,6 +23,7 @@ struct FastFieldConvertCollector<
|
||||
pub collector: TCollector,
|
||||
pub field: String,
|
||||
pub fast_value: std::marker::PhantomData<TFastValue>,
|
||||
order: Order,
|
||||
}
|
||||
|
||||
impl<TCollector, TFastValue> Collector for FastFieldConvertCollector<TCollector, TFastValue>
|
||||
@@ -70,7 +71,13 @@ where
|
||||
let raw_result = self.collector.merge_fruits(segment_fruits)?;
|
||||
let transformed_result = raw_result
|
||||
.into_iter()
|
||||
.map(|(score, doc_address)| (TFastValue::from_u64(score), doc_address))
|
||||
.map(|(score, doc_address)| {
|
||||
if self.order.is_desc() {
|
||||
(TFastValue::from_u64(score), doc_address)
|
||||
} else {
|
||||
(TFastValue::from_u64(u64::MAX - score), doc_address)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Ok(transformed_result)
|
||||
}
|
||||
@@ -131,16 +138,23 @@ impl fmt::Debug for TopDocs {
|
||||
|
||||
struct ScorerByFastFieldReader {
|
||||
sort_column: Arc<dyn ColumnValues<u64>>,
|
||||
order: Order,
|
||||
}
|
||||
|
||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||
fn score(&mut self, doc: DocId) -> u64 {
|
||||
self.sort_column.get_val(doc)
|
||||
let value = self.sort_column.get_val(doc);
|
||||
if self.order.is_desc() {
|
||||
value
|
||||
} else {
|
||||
u64::MAX - value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ScorerByField {
|
||||
field: String,
|
||||
order: Order,
|
||||
}
|
||||
|
||||
impl CustomScorer<u64> for ScorerByField {
|
||||
@@ -157,8 +171,13 @@ impl CustomScorer<u64> for ScorerByField {
|
||||
sort_column_opt.ok_or_else(|| FastFieldNotAvailableError {
|
||||
field_name: self.field.clone(),
|
||||
})?;
|
||||
let mut default_value = 0u64;
|
||||
if self.order.is_asc() {
|
||||
default_value = u64::MAX;
|
||||
}
|
||||
Ok(ScorerByFastFieldReader {
|
||||
sort_column: sort_column.first_or_default_col(0u64),
|
||||
sort_column: sort_column.first_or_default_col(default_value),
|
||||
order: self.order.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -230,7 +249,7 @@ impl TopDocs {
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress};
|
||||
/// # use tantivy::{doc, Index, DocAddress, Order};
|
||||
/// # use tantivy::query::{Query, QueryParser};
|
||||
/// use tantivy::Searcher;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
@@ -268,7 +287,7 @@ impl TopDocs {
|
||||
/// // Note the `rating_field` needs to be a FAST field here.
|
||||
/// let top_books_by_rating = TopDocs
|
||||
/// ::with_limit(10)
|
||||
/// .order_by_u64_field("rating");
|
||||
/// .order_by_fast_field("rating", Order::Desc);
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `u64` in the pair is the value of our fast field for
|
||||
@@ -288,13 +307,15 @@ impl TopDocs {
|
||||
///
|
||||
/// To comfortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||
/// the [.order_by_fast_field(...)](TopDocs::order_by_fast_field) method.
|
||||
pub fn order_by_u64_field(
|
||||
fn order_by_u64_field(
|
||||
self,
|
||||
field: impl ToString,
|
||||
order: Order,
|
||||
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
||||
CustomScoreTopCollector::new(
|
||||
ScorerByField {
|
||||
field: field.to_string(),
|
||||
order,
|
||||
},
|
||||
self.0.into_tscore(),
|
||||
)
|
||||
@@ -316,7 +337,7 @@ impl TopDocs {
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress};
|
||||
/// # use tantivy::{doc, Index, DocAddress,Order};
|
||||
/// # use tantivy::query::{Query, AllQuery};
|
||||
/// use tantivy::Searcher;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
@@ -354,7 +375,7 @@ impl TopDocs {
|
||||
/// // type `sort_by_field`. revenue_field here is a FAST i64 field.
|
||||
/// let top_company_by_revenue = TopDocs
|
||||
/// ::with_limit(2)
|
||||
/// .order_by_fast_field("revenue");
|
||||
/// .order_by_fast_field("revenue", Order::Desc);
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `i64` in the pair is the value of our fast field for
|
||||
@@ -372,15 +393,17 @@ impl TopDocs {
|
||||
pub fn order_by_fast_field<TFastValue>(
|
||||
self,
|
||||
fast_field: impl ToString,
|
||||
order: Order,
|
||||
) -> impl Collector<Fruit = Vec<(TFastValue, DocAddress)>>
|
||||
where
|
||||
TFastValue: FastValue,
|
||||
{
|
||||
let u64_collector = self.order_by_u64_field(fast_field.to_string());
|
||||
let u64_collector = self.order_by_u64_field(fast_field.to_string(), order.clone());
|
||||
FastFieldConvertCollector {
|
||||
collector: u64_collector,
|
||||
field: fast_field.to_string(),
|
||||
fast_value: PhantomData,
|
||||
order,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -721,7 +744,7 @@ mod tests {
|
||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::{DateTime, DocAddress, DocId, Index, IndexWriter, Score, SegmentReader};
|
||||
use crate::{DateTime, DocAddress, DocId, Index, IndexWriter, Order, Score, SegmentReader};
|
||||
|
||||
fn make_index() -> crate::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -882,7 +905,7 @@ mod tests {
|
||||
});
|
||||
let searcher = index.reader()?.searcher();
|
||||
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(SIZE);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(SIZE, Order::Desc);
|
||||
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
@@ -921,7 +944,7 @@ mod tests {
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field("birthday");
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field("birthday", Order::Desc);
|
||||
let top_docs: Vec<(DateTime, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
@@ -951,7 +974,7 @@ mod tests {
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field("altitude");
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field("altitude", Order::Desc);
|
||||
let top_docs: Vec<(i64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
@@ -981,7 +1004,7 @@ mod tests {
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field("altitude");
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field("altitude", Order::Desc);
|
||||
let top_docs: Vec<(f64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
@@ -1009,7 +1032,7 @@ mod tests {
|
||||
.unwrap();
|
||||
});
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field("missing_field");
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field("missing_field", Order::Desc);
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
top_collector
|
||||
.for_segment(0, segment_reader)
|
||||
@@ -1027,7 +1050,7 @@ mod tests {
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(SIZE);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(SIZE, Order::Desc);
|
||||
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||
assert!(matches!(err, crate::TantivyError::InvalidArgument(_)));
|
||||
Ok(())
|
||||
@@ -1044,7 +1067,7 @@ mod tests {
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(SIZE);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(SIZE, Order::Desc);
|
||||
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||
assert!(
|
||||
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field \"size\" is not a fast field.")
|
||||
@@ -1106,4 +1129,50 @@ mod tests {
|
||||
let query = query_parser.parse_query(query).unwrap();
|
||||
(index, query)
|
||||
}
|
||||
#[test]
|
||||
fn test_fast_field_ascending_order() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, FAST);
|
||||
let schema = schema_builder.build();
|
||||
let (index, query) = index("beer", title, schema, |index_writer| {
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
title => "growler of beer",
|
||||
size => 64u64,
|
||||
))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
title => "pint of beer",
|
||||
size => 16u64,
|
||||
))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
title => "empty beer",
|
||||
))
|
||||
.unwrap();
|
||||
});
|
||||
let searcher = index.reader()?.searcher();
|
||||
|
||||
let top_collector = TopDocs::with_limit(4).order_by_fast_field(SIZE, Order::Asc);
|
||||
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
&[
|
||||
(12, DocAddress::new(0, 0)),
|
||||
(16, DocAddress::new(0, 2)),
|
||||
(64, DocAddress::new(0, 1)),
|
||||
(18446744073709551615, DocAddress::new(0, 3)),
|
||||
]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ pub(crate) fn set_string_and_get_terms(
|
||||
|
||||
/// Writes a value of a JSON field to a `Term`.
|
||||
/// The Term format is as follows:
|
||||
/// [JSON_TYPE][JSON_PATH][JSON_END_OF_PATH][VALUE_BYTES]
|
||||
/// `[JSON_TYPE][JSON_PATH][JSON_END_OF_PATH][VALUE_BYTES]`
|
||||
pub struct JsonTermWriter<'a> {
|
||||
term_buffer: &'a mut Term,
|
||||
path_stack: Vec<usize>,
|
||||
|
||||
@@ -2,8 +2,6 @@ use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{fmt, io};
|
||||
|
||||
use fail::fail_point;
|
||||
|
||||
use crate::core::{InvertedIndexReader, Segment, SegmentComponent, SegmentId};
|
||||
use crate::directory::{CompositeFile, FileSlice};
|
||||
use crate::error::DataCorruption;
|
||||
@@ -151,7 +149,7 @@ impl SegmentReader {
|
||||
|
||||
let store_file = segment.open_read(SegmentComponent::Store)?;
|
||||
|
||||
fail_point!("SegmentReader::open#middle");
|
||||
crate::fail_point!("SegmentReader::open#middle");
|
||||
|
||||
let postings_file = segment.open_read(SegmentComponent::Postings)?;
|
||||
let postings_composite = CompositeFile::open(&postings_file)?;
|
||||
|
||||
@@ -5,7 +5,6 @@ use std::sync::{Arc, RwLock};
|
||||
use std::{fmt, result};
|
||||
|
||||
use common::HasLen;
|
||||
use fail::fail_point;
|
||||
|
||||
use super::FileHandle;
|
||||
use crate::core::META_FILEPATH;
|
||||
@@ -184,7 +183,7 @@ impl Directory for RamDirectory {
|
||||
}
|
||||
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
fail_point!("RamDirectory::delete", |_| {
|
||||
crate::fail_point!("RamDirectory::delete", |_| {
|
||||
Err(DeleteError::IoError {
|
||||
io_error: Arc::new(io::Error::from(io::ErrorKind::Other)),
|
||||
filepath: path.to_path_buf(),
|
||||
|
||||
@@ -1291,4 +1291,28 @@ mod tests {
|
||||
let vals: Vec<i64> = column.values_for_doc(0u32).collect();
|
||||
assert_eq!(&vals, &[33]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_num_columnar_fields() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let id_field = schema_builder.add_text_field("id", FAST);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 20_000_000)?;
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(
|
||||
id_field => 1u64,
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
}
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let ff_reader = searcher.segment_reader(0).fast_fields();
|
||||
let fields = ff_reader.u64_lenient_for_type_all(None, "id").unwrap();
|
||||
assert_eq!(fields.len(), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,7 +88,7 @@ impl FastFieldReaders {
|
||||
let Some((field, path)): Option<(Field, &str)> = self
|
||||
.schema
|
||||
.find_field_with_default(field_name, default_field_opt)
|
||||
else{
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
|
||||
@@ -120,7 +120,8 @@ impl FastFieldReaders {
|
||||
T: HasAssociatedColumnType,
|
||||
DynamicColumn: Into<Option<Column<T>>>,
|
||||
{
|
||||
let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, T::column_type())?
|
||||
let Some(dynamic_column_handle) =
|
||||
self.dynamic_column_handle(field_name, T::column_type())?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
@@ -196,7 +197,8 @@ impl FastFieldReaders {
|
||||
|
||||
/// Returns a `str` column.
|
||||
pub fn str(&self, field_name: &str) -> crate::Result<Option<StrColumn>> {
|
||||
let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, ColumnType::Str)?
|
||||
let Some(dynamic_column_handle) =
|
||||
self.dynamic_column_handle(field_name, ColumnType::Str)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
@@ -206,7 +208,8 @@ impl FastFieldReaders {
|
||||
|
||||
/// Returns a `bytes` column.
|
||||
pub fn bytes(&self, field_name: &str) -> crate::Result<Option<BytesColumn>> {
|
||||
let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, ColumnType::Bytes)?
|
||||
let Some(dynamic_column_handle) =
|
||||
self.dynamic_column_handle(field_name, ColumnType::Bytes)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
@@ -6,7 +6,6 @@ use std::path::PathBuf;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use fail::fail_point;
|
||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||
|
||||
use super::segment_manager::SegmentManager;
|
||||
@@ -43,7 +42,7 @@ pub(crate) fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate:
|
||||
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
||||
// Just adding a new line at the end of the buffer.
|
||||
writeln!(&mut buffer)?;
|
||||
fail_point!("save_metas", |msg| Err(crate::TantivyError::from(
|
||||
crate::fail_point!("save_metas", |msg| Err(crate::TantivyError::from(
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
msg.unwrap_or_else(|| "Undefined".to_string())
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
use itertools::Itertools;
|
||||
use tokenizer_api::BoxTokenStream;
|
||||
|
||||
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
|
||||
use super::operation::AddOperation;
|
||||
@@ -15,7 +16,7 @@ use crate::postings::{
|
||||
use crate::schema::{FieldEntry, FieldType, Schema, Term, Value, DATE_TIME_PRECISION_INDEXED};
|
||||
use crate::store::{StoreReader, StoreWriter};
|
||||
use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer};
|
||||
use crate::{DocId, Document, Opstamp, SegmentComponent};
|
||||
use crate::{DocId, Document, Opstamp, SegmentComponent, TantivyError};
|
||||
|
||||
/// Computes the initial size of the hash table.
|
||||
///
|
||||
@@ -98,14 +99,18 @@ impl SegmentWriter {
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
text_options
|
||||
.and_then(|text_index_option| {
|
||||
let tokenizer_name = &text_index_option.tokenizer();
|
||||
tokenizer_manager.get(tokenizer_name)
|
||||
})
|
||||
.unwrap_or_default()
|
||||
let tokenizer_name = text_options
|
||||
.map(|text_index_option| text_index_option.tokenizer())
|
||||
.unwrap_or("default");
|
||||
|
||||
tokenizer_manager.get(tokenizer_name).ok_or_else(|| {
|
||||
TantivyError::SchemaError(format!(
|
||||
"Error getting tokenizer for field: {}",
|
||||
field_entry.name()
|
||||
))
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
Ok(SegmentWriter {
|
||||
max_doc: 0,
|
||||
ctx: IndexingContext::new(table_size),
|
||||
@@ -205,7 +210,7 @@ impl SegmentWriter {
|
||||
for value in values {
|
||||
let mut token_stream = match value {
|
||||
Value::PreTokStr(tok_str) => {
|
||||
PreTokenizedStream::from(tok_str.clone()).into()
|
||||
BoxTokenStream::new(PreTokenizedStream::from(tok_str.clone()))
|
||||
}
|
||||
Value::Str(ref text) => {
|
||||
let text_analyzer =
|
||||
@@ -438,7 +443,9 @@ fn remap_and_write(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::compute_initial_table_size;
|
||||
use crate::collector::Count;
|
||||
@@ -446,7 +453,9 @@ mod tests {
|
||||
use crate::directory::RamDirectory;
|
||||
use crate::postings::TermInfo;
|
||||
use crate::query::PhraseQuery;
|
||||
use crate::schema::{IndexRecordOption, Schema, Type, STORED, STRING, TEXT};
|
||||
use crate::schema::{
|
||||
IndexRecordOption, Schema, TextFieldIndexing, TextOptions, Type, STORED, STRING, TEXT,
|
||||
};
|
||||
use crate::store::{Compressor, StoreReader, StoreWriter};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::OffsetDateTime;
|
||||
@@ -900,4 +909,32 @@ mod tests {
|
||||
postings.positions(&mut positions);
|
||||
assert_eq!(positions, &[4]); //< as opposed to 3 if we had a position length of 1.
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_error_when_tokenizer_not_registered() {
|
||||
let text_field_indexing = TextFieldIndexing::default()
|
||||
.set_tokenizer("custom_en")
|
||||
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
|
||||
let text_options = TextOptions::default()
|
||||
.set_indexing_options(text_field_indexing)
|
||||
.set_stored();
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("title", text_options);
|
||||
let schema = schema_builder.build();
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
Index::create_in_dir(&tempdir_path, schema).unwrap();
|
||||
let index = Index::open_in_dir(tempdir_path).unwrap();
|
||||
let schema = index.schema();
|
||||
let mut index_writer = index.writer(50_000_000).unwrap();
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let mut document = Document::default();
|
||||
document.add_text(title, "The Old Man and the Sea");
|
||||
index_writer.add_document(document).unwrap();
|
||||
let error = index_writer.commit().unwrap_err();
|
||||
assert_eq!(
|
||||
error.to_string(),
|
||||
"Schema error: 'Error getting tokenizer for field: title'"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +101,7 @@ mod test {
|
||||
|
||||
use super::Stamper;
|
||||
|
||||
#[allow(clippy::redundant_clone)]
|
||||
#[test]
|
||||
fn test_stamper() {
|
||||
let stamper = Stamper::new(7u64);
|
||||
@@ -116,6 +117,7 @@ mod test {
|
||||
assert_eq!(stamper.stamp(), 15u64);
|
||||
}
|
||||
|
||||
#[allow(clippy::redundant_clone)]
|
||||
#[test]
|
||||
fn test_stamper_revert() {
|
||||
let stamper = Stamper::new(7u64);
|
||||
|
||||
33
src/lib.rs
33
src/lib.rs
@@ -299,6 +299,35 @@ pub struct DocAddress {
|
||||
pub doc_id: DocId,
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
/// Enable fail_point if feature is enabled.
|
||||
macro_rules! fail_point {
|
||||
($name:expr) => {{
|
||||
#[cfg(feature = "failpoints")]
|
||||
{
|
||||
fail::eval($name, |_| {
|
||||
panic!("Return is not supported for the fail point \"{}\"", $name);
|
||||
});
|
||||
}
|
||||
}};
|
||||
($name:expr, $e:expr) => {{
|
||||
#[cfg(feature = "failpoints")]
|
||||
{
|
||||
if let Some(res) = fail::eval($name, $e) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}};
|
||||
($name:expr, $cond:expr, $e:expr) => {{
|
||||
#[cfg(feature = "failpoints")]
|
||||
{
|
||||
if $cond {
|
||||
fail::fail_point!($name, $e);
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
@@ -876,8 +905,8 @@ pub mod tests {
|
||||
}"#,
|
||||
)
|
||||
.unwrap();
|
||||
let doc = doc!(json_field=>json_val.clone());
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let doc = doc!(json_field=>json_val);
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.add_document(doc).unwrap();
|
||||
writer.commit().unwrap();
|
||||
|
||||
@@ -2,7 +2,6 @@ use std::cmp::Ordering;
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, VInt};
|
||||
use fail::fail_point;
|
||||
|
||||
use super::TermInfo;
|
||||
use crate::core::Segment;
|
||||
@@ -205,7 +204,7 @@ impl<'a> FieldSerializer<'a> {
|
||||
/// If the current block is incomplete, it needs to be encoded
|
||||
/// using `VInt` encoding.
|
||||
pub fn close_term(&mut self) -> io::Result<()> {
|
||||
fail_point!("FieldSerializer::close_term", |msg: Option<String>| {
|
||||
crate::fail_point!("FieldSerializer::close_term", |msg: Option<String>| {
|
||||
Err(io::Error::new(io::ErrorKind::Other, format!("{msg:?}")))
|
||||
});
|
||||
if self.term_open {
|
||||
|
||||
@@ -4,9 +4,7 @@ use std::collections::{BinaryHeap, HashMap};
|
||||
use crate::query::bm25::idf;
|
||||
use crate::query::{BooleanQuery, BoostQuery, Occur, Query, TermQuery};
|
||||
use crate::schema::{Field, FieldType, IndexRecordOption, Term, Value};
|
||||
use crate::tokenizer::{
|
||||
BoxTokenStream, FacetTokenizer, PreTokenizedStream, TokenStream, Tokenizer,
|
||||
};
|
||||
use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TokenStream, Tokenizer};
|
||||
use crate::{DocAddress, Result, Searcher, TantivyError};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@@ -206,8 +204,7 @@ impl MoreLikeThis {
|
||||
for value in values {
|
||||
match value {
|
||||
Value::PreTokStr(tok_str) => {
|
||||
let mut token_stream: BoxTokenStream =
|
||||
PreTokenizedStream::from(tok_str.clone()).into();
|
||||
let mut token_stream = PreTokenizedStream::from(tok_str.clone());
|
||||
token_stream.process(&mut |token| {
|
||||
if !self.is_noise_word(token.text.clone()) {
|
||||
let term = Term::from_field_text(field, &token.text);
|
||||
|
||||
@@ -472,6 +472,7 @@ mod tests {
|
||||
|
||||
use super::RangeQuery;
|
||||
use crate::collector::{Count, TopDocs};
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{Document, Field, IntoIpv6Addr, Schema, FAST, INDEXED, STORED, TEXT};
|
||||
use crate::{doc, Index};
|
||||
@@ -547,7 +548,8 @@ mod tests {
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(2, 60_000_000)?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 60_000_000)?;
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
|
||||
for i in 1..100 {
|
||||
let mut doc = Document::new();
|
||||
@@ -557,6 +559,9 @@ mod tests {
|
||||
}
|
||||
}
|
||||
index_writer.add_document(doc)?;
|
||||
if i == 10 {
|
||||
index_writer.commit()?;
|
||||
}
|
||||
}
|
||||
|
||||
index_writer.commit()?;
|
||||
|
||||
@@ -31,9 +31,10 @@ impl IPFastFieldRangeWeight {
|
||||
|
||||
impl Weight for IPFastFieldRangeWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let Some(ip_addr_column): Option<Column<Ipv6Addr>> = reader.fast_fields()
|
||||
.column_opt(&self.field)? else {
|
||||
return Ok(Box::new(EmptyScorer))
|
||||
let Some(ip_addr_column): Option<Column<Ipv6Addr>> =
|
||||
reader.fast_fields().column_opt(&self.field)?
|
||||
else {
|
||||
return Ok(Box::new(EmptyScorer));
|
||||
};
|
||||
let value_range = bound_to_value_range(
|
||||
&self.lower_bound,
|
||||
|
||||
@@ -71,7 +71,9 @@ impl Weight for FastFieldRangeWeight {
|
||||
let column_type_opt_ref: Option<&[ColumnType]> = column_type_opt
|
||||
.as_ref()
|
||||
.map(|column_types| column_types.as_slice());
|
||||
let Some((column, _)) = fast_field_reader.u64_lenient_for_type(column_type_opt_ref, &self.field)? else {
|
||||
let Some((column, _)) =
|
||||
fast_field_reader.u64_lenient_for_type(column_type_opt_ref, &self.field)?
|
||||
else {
|
||||
return Ok(Box::new(EmptyScorer));
|
||||
};
|
||||
let value_range = bound_to_value_range(
|
||||
|
||||
@@ -693,7 +693,7 @@ Survey in 2016, 2017, and 2018."#;
|
||||
terms.insert(String::from("bc"), 1.0);
|
||||
|
||||
let fragments = search_fragments(
|
||||
&mut From::from(NgramTokenizer::all_ngrams(2, 2)),
|
||||
&mut From::from(NgramTokenizer::all_ngrams(2, 2).unwrap()),
|
||||
text,
|
||||
&terms,
|
||||
3,
|
||||
|
||||
@@ -426,7 +426,7 @@ mod tests {
|
||||
assert_eq!(store.cache_stats().cache_hits, 1);
|
||||
assert_eq!(store.cache_stats().cache_misses, 2);
|
||||
|
||||
assert_eq!(store.cache.peek_lru(), Some(11163));
|
||||
assert_eq!(store.cache.peek_lru(), Some(11207));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ pub use self::split_compound_words::SplitCompoundWords;
|
||||
pub use self::stemmer::{Language, Stemmer};
|
||||
pub use self::stop_word_filter::StopWordFilter;
|
||||
pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString};
|
||||
pub use self::tokenizer::TextAnalyzer;
|
||||
pub use self::tokenizer::{TextAnalyzer, TextAnalyzerBuilder};
|
||||
pub use self::tokenizer_manager::TokenizerManager;
|
||||
pub use self::whitespace_tokenizer::WhitespaceTokenizer;
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use super::{Token, TokenStream, Tokenizer};
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Tokenize the text by splitting words into n-grams of the given size(s)
|
||||
///
|
||||
@@ -33,7 +34,7 @@ use super::{Token, TokenStream, Tokenizer};
|
||||
/// ```rust
|
||||
/// use tantivy::tokenizer::*;
|
||||
///
|
||||
/// let mut tokenizer = NgramTokenizer::new(2, 3, false);
|
||||
/// let mut tokenizer = NgramTokenizer::new(2, 3, false).unwrap();
|
||||
/// let mut stream = tokenizer.token_stream("hello");
|
||||
/// {
|
||||
/// let token = stream.next().unwrap();
|
||||
@@ -79,7 +80,7 @@ use super::{Token, TokenStream, Tokenizer};
|
||||
/// }
|
||||
/// assert!(stream.next().is_none());
|
||||
/// ```
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct NgramTokenizer {
|
||||
/// min size of the n-gram
|
||||
min_gram: usize,
|
||||
@@ -92,30 +93,39 @@ pub struct NgramTokenizer {
|
||||
|
||||
impl NgramTokenizer {
|
||||
/// Configures a new Ngram tokenizer
|
||||
pub fn new(min_gram: usize, max_gram: usize, prefix_only: bool) -> NgramTokenizer {
|
||||
assert!(min_gram > 0, "min_gram must be greater than 0");
|
||||
assert!(
|
||||
min_gram <= max_gram,
|
||||
"min_gram must not be greater than max_gram"
|
||||
);
|
||||
NgramTokenizer {
|
||||
pub fn new(
|
||||
min_gram: usize,
|
||||
max_gram: usize,
|
||||
prefix_only: bool,
|
||||
) -> crate::Result<NgramTokenizer> {
|
||||
if min_gram == 0 {
|
||||
return Err(TantivyError::InvalidArgument(
|
||||
"min_gram must be greater than 0".to_string(),
|
||||
));
|
||||
}
|
||||
if min_gram > max_gram {
|
||||
return Err(TantivyError::InvalidArgument(
|
||||
"min_gram must not be greater than max_gram".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(NgramTokenizer {
|
||||
min_gram,
|
||||
max_gram,
|
||||
prefix_only,
|
||||
token: Token::default(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a `NGramTokenizer` which generates tokens for all inner ngrams.
|
||||
///
|
||||
/// This is as opposed to only prefix ngrams .
|
||||
pub fn all_ngrams(min_gram: usize, max_gram: usize) -> NgramTokenizer {
|
||||
pub fn all_ngrams(min_gram: usize, max_gram: usize) -> crate::Result<NgramTokenizer> {
|
||||
Self::new(min_gram, max_gram, false)
|
||||
}
|
||||
|
||||
/// Create a `NGramTokenizer` which only generates tokens for the
|
||||
/// prefix ngrams.
|
||||
pub fn prefix_only(min_gram: usize, max_gram: usize) -> NgramTokenizer {
|
||||
pub fn prefix_only(min_gram: usize, max_gram: usize) -> crate::Result<NgramTokenizer> {
|
||||
Self::new(min_gram, max_gram, true)
|
||||
}
|
||||
}
|
||||
@@ -349,7 +359,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_ngram_tokenizer_1_2_false() {
|
||||
let tokens = test_helper(NgramTokenizer::all_ngrams(1, 2).token_stream("hello"));
|
||||
let tokens = test_helper(
|
||||
NgramTokenizer::all_ngrams(1, 2)
|
||||
.unwrap()
|
||||
.token_stream("hello"),
|
||||
);
|
||||
assert_eq!(tokens.len(), 9);
|
||||
assert_token(&tokens[0], 0, "h", 0, 1);
|
||||
assert_token(&tokens[1], 0, "he", 0, 2);
|
||||
@@ -364,7 +378,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_ngram_tokenizer_min_max_equal() {
|
||||
let tokens = test_helper(NgramTokenizer::all_ngrams(3, 3).token_stream("hello"));
|
||||
let tokens = test_helper(
|
||||
NgramTokenizer::all_ngrams(3, 3)
|
||||
.unwrap()
|
||||
.token_stream("hello"),
|
||||
);
|
||||
assert_eq!(tokens.len(), 3);
|
||||
assert_token(&tokens[0], 0, "hel", 0, 3);
|
||||
assert_token(&tokens[1], 0, "ell", 1, 4);
|
||||
@@ -373,7 +391,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_ngram_tokenizer_2_5_prefix() {
|
||||
let tokens = test_helper(NgramTokenizer::prefix_only(2, 5).token_stream("frankenstein"));
|
||||
let tokens = test_helper(
|
||||
NgramTokenizer::prefix_only(2, 5)
|
||||
.unwrap()
|
||||
.token_stream("frankenstein"),
|
||||
);
|
||||
assert_eq!(tokens.len(), 4);
|
||||
assert_token(&tokens[0], 0, "fr", 0, 2);
|
||||
assert_token(&tokens[1], 0, "fra", 0, 3);
|
||||
@@ -383,7 +405,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_ngram_non_ascii_1_2() {
|
||||
let tokens = test_helper(NgramTokenizer::all_ngrams(1, 2).token_stream("hεllo"));
|
||||
let tokens = test_helper(
|
||||
NgramTokenizer::all_ngrams(1, 2)
|
||||
.unwrap()
|
||||
.token_stream("hεllo"),
|
||||
);
|
||||
assert_eq!(tokens.len(), 9);
|
||||
assert_token(&tokens[0], 0, "h", 0, 1);
|
||||
assert_token(&tokens[1], 0, "hε", 0, 3);
|
||||
@@ -398,7 +424,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_ngram_non_ascii_2_5_prefix() {
|
||||
let tokens = test_helper(NgramTokenizer::prefix_only(2, 5).token_stream("hεllo"));
|
||||
let tokens = test_helper(
|
||||
NgramTokenizer::prefix_only(2, 5)
|
||||
.unwrap()
|
||||
.token_stream("hεllo"),
|
||||
);
|
||||
assert_eq!(tokens.len(), 4);
|
||||
assert_token(&tokens[0], 0, "hε", 0, 3);
|
||||
assert_token(&tokens[1], 0, "hεl", 0, 4);
|
||||
@@ -408,22 +438,26 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_ngram_empty() {
|
||||
let tokens = test_helper(NgramTokenizer::all_ngrams(1, 5).token_stream(""));
|
||||
let tokens = test_helper(NgramTokenizer::all_ngrams(1, 5).unwrap().token_stream(""));
|
||||
assert!(tokens.is_empty());
|
||||
let tokens = test_helper(NgramTokenizer::all_ngrams(2, 5).token_stream(""));
|
||||
let tokens = test_helper(NgramTokenizer::all_ngrams(2, 5).unwrap().token_stream(""));
|
||||
assert!(tokens.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "min_gram must be greater than 0")]
|
||||
fn test_ngram_min_max_interval_empty() {
|
||||
test_helper(NgramTokenizer::all_ngrams(0, 2).token_stream("hellossss"));
|
||||
test_helper(
|
||||
NgramTokenizer::all_ngrams(0, 2)
|
||||
.unwrap()
|
||||
.token_stream("hellossss"),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "min_gram must not be greater than max_gram")]
|
||||
fn test_invalid_interval_should_panic_if_smaller() {
|
||||
NgramTokenizer::all_ngrams(2, 1);
|
||||
NgramTokenizer::all_ngrams(2, 1).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -86,6 +86,8 @@ impl TokenFilter for SplitCompoundWords {
|
||||
SplitCompoundWordsFilter {
|
||||
dict: self.dict,
|
||||
inner: tokenizer,
|
||||
cuts: Vec::new(),
|
||||
parts: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -94,29 +96,33 @@ impl TokenFilter for SplitCompoundWords {
|
||||
pub struct SplitCompoundWordsFilter<T> {
|
||||
dict: AhoCorasick,
|
||||
inner: T,
|
||||
}
|
||||
|
||||
impl<T: Tokenizer> Tokenizer for SplitCompoundWordsFilter<T> {
|
||||
type TokenStream<'a> = SplitCompoundWordsTokenStream<T::TokenStream<'a>>;
|
||||
|
||||
fn token_stream<'a>(&'a mut self, text: &'a str) -> Self::TokenStream<'a> {
|
||||
SplitCompoundWordsTokenStream {
|
||||
dict: self.dict.clone(),
|
||||
tail: self.inner.token_stream(text),
|
||||
cuts: Vec::new(),
|
||||
parts: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SplitCompoundWordsTokenStream<T> {
|
||||
dict: AhoCorasick,
|
||||
tail: T,
|
||||
cuts: Vec<usize>,
|
||||
parts: Vec<Token>,
|
||||
}
|
||||
|
||||
impl<T: TokenStream> SplitCompoundWordsTokenStream<T> {
|
||||
impl<T: Tokenizer> Tokenizer for SplitCompoundWordsFilter<T> {
|
||||
type TokenStream<'a> = SplitCompoundWordsTokenStream<'a, T::TokenStream<'a>>;
|
||||
|
||||
fn token_stream<'a>(&'a mut self, text: &'a str) -> Self::TokenStream<'a> {
|
||||
self.cuts.clear();
|
||||
self.parts.clear();
|
||||
SplitCompoundWordsTokenStream {
|
||||
dict: self.dict.clone(),
|
||||
tail: self.inner.token_stream(text),
|
||||
cuts: &mut self.cuts,
|
||||
parts: &mut self.parts,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SplitCompoundWordsTokenStream<'a, T> {
|
||||
dict: AhoCorasick,
|
||||
tail: T,
|
||||
cuts: &'a mut Vec<usize>,
|
||||
parts: &'a mut Vec<Token>,
|
||||
}
|
||||
|
||||
impl<'a, T: TokenStream> SplitCompoundWordsTokenStream<'a, T> {
|
||||
// Will use `self.cuts` to fill `self.parts` if `self.tail.token()`
|
||||
// can fully be split into consecutive matches against `self.dict`.
|
||||
fn split(&mut self) {
|
||||
@@ -152,7 +158,7 @@ impl<T: TokenStream> SplitCompoundWordsTokenStream<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TokenStream> TokenStream for SplitCompoundWordsTokenStream<T> {
|
||||
impl<'a, T: TokenStream> TokenStream for SplitCompoundWordsTokenStream<'a, T> {
|
||||
fn advance(&mut self) -> bool {
|
||||
self.parts.pop();
|
||||
|
||||
|
||||
@@ -5,12 +5,32 @@ use tokenizer_api::{BoxTokenStream, TokenFilter, Tokenizer};
|
||||
use crate::tokenizer::empty_tokenizer::EmptyTokenizer;
|
||||
|
||||
/// `TextAnalyzer` tokenizes an input text into tokens and modifies the resulting `TokenStream`.
|
||||
#[derive(Clone)]
|
||||
pub struct TextAnalyzer {
|
||||
tokenizer: Box<dyn BoxableTokenizer>,
|
||||
}
|
||||
|
||||
impl Tokenizer for Box<dyn BoxableTokenizer> {
|
||||
type TokenStream<'a> = BoxTokenStream<'a>;
|
||||
|
||||
// Note: we want to call `box_token_stream` on the concrete `Tokenizer`
|
||||
// implementation, not the `BoxableTokenizer` one as it will cause
|
||||
// a recursive call (and a stack overflow).
|
||||
fn token_stream<'a>(&'a mut self, text: &'a str) -> Self::TokenStream<'a> {
|
||||
(**self).box_token_stream(text)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<dyn BoxableTokenizer> {
|
||||
// Note: we want to call `box_clone` on the concrete `Tokenizer`
|
||||
// implementation in order to clone the concrete `Tokenizer`.
|
||||
fn clone(&self) -> Self {
|
||||
(**self).box_clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// A boxable `Tokenizer`, with its `TokenStream` type erased.
|
||||
trait BoxableTokenizer: 'static + Send + Sync {
|
||||
pub trait BoxableTokenizer: 'static + Send + Sync {
|
||||
/// Creates a boxed token stream for a given `str`.
|
||||
fn box_token_stream<'a>(&'a mut self, text: &'a str) -> BoxTokenStream<'a>;
|
||||
/// Clone this tokenizer.
|
||||
@@ -19,21 +39,13 @@ trait BoxableTokenizer: 'static + Send + Sync {
|
||||
|
||||
impl<T: Tokenizer> BoxableTokenizer for T {
|
||||
fn box_token_stream<'a>(&'a mut self, text: &'a str) -> BoxTokenStream<'a> {
|
||||
self.token_stream(text).into()
|
||||
BoxTokenStream::new(self.token_stream(text))
|
||||
}
|
||||
fn box_clone(&self) -> Box<dyn BoxableTokenizer> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for TextAnalyzer {
|
||||
fn clone(&self) -> Self {
|
||||
TextAnalyzer {
|
||||
tokenizer: self.tokenizer.box_clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TextAnalyzer {
|
||||
fn default() -> TextAnalyzer {
|
||||
TextAnalyzer::from(EmptyTokenizer)
|
||||
@@ -54,12 +66,12 @@ impl TextAnalyzer {
|
||||
|
||||
/// Creates a token stream for a given `str`.
|
||||
pub fn token_stream<'a>(&'a mut self, text: &'a str) -> BoxTokenStream<'a> {
|
||||
self.tokenizer.box_token_stream(text)
|
||||
self.tokenizer.token_stream(text)
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder helper for [`TextAnalyzer`]
|
||||
pub struct TextAnalyzerBuilder<T> {
|
||||
pub struct TextAnalyzerBuilder<T = Box<dyn BoxableTokenizer>> {
|
||||
tokenizer: T,
|
||||
}
|
||||
|
||||
@@ -83,6 +95,23 @@ impl<T: Tokenizer> TextAnalyzerBuilder<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Boxes the internal tokenizer. This is useful for adding dynamic filters.
|
||||
/// Note: this will be less performant than the non boxed version.
|
||||
pub fn dynamic(self) -> TextAnalyzerBuilder {
|
||||
let boxed_tokenizer = Box::new(self.tokenizer);
|
||||
TextAnalyzerBuilder {
|
||||
tokenizer: boxed_tokenizer,
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends a token filter to the current builder and returns a boxed version of the
|
||||
/// tokenizer. This is useful when you want to build a `TextAnalyzer` dynamically.
|
||||
/// Prefer using `TextAnalyzer::builder(tokenizer).filter(token_filter).build()` if
|
||||
/// possible as it will be more performant and create less boxes.
|
||||
pub fn filter_dynamic<F: TokenFilter>(self, token_filter: F) -> TextAnalyzerBuilder {
|
||||
self.filter(token_filter).dynamic()
|
||||
}
|
||||
|
||||
/// Finalize building the TextAnalyzer
|
||||
pub fn build(self) -> TextAnalyzer {
|
||||
TextAnalyzer {
|
||||
@@ -90,3 +119,57 @@ impl<T: Tokenizer> TextAnalyzerBuilder<T> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::tokenizer::{LowerCaser, RemoveLongFilter, SimpleTokenizer};
|
||||
|
||||
#[test]
|
||||
fn test_text_analyzer_builder() {
|
||||
let mut analyzer = TextAnalyzer::builder(SimpleTokenizer::default())
|
||||
.filter(RemoveLongFilter::limit(40))
|
||||
.filter(LowerCaser)
|
||||
.build();
|
||||
let mut stream = analyzer.token_stream("- first bullet point");
|
||||
assert_eq!(stream.next().unwrap().text, "first");
|
||||
assert_eq!(stream.next().unwrap().text, "bullet");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_analyzer_with_filters_boxed() {
|
||||
// This test shows how one can build a TextAnalyzer dynamically, by stacking a list
|
||||
// of parametrizable token filters.
|
||||
//
|
||||
// The following enum is the thing that would be serializable.
|
||||
// Note that token filters can have their own parameters, too, like the RemoveLongFilter
|
||||
enum SerializableTokenFilterEnum {
|
||||
LowerCaser(LowerCaser),
|
||||
RemoveLongFilter(RemoveLongFilter),
|
||||
}
|
||||
// Note that everything below is dynamic.
|
||||
let filters: Vec<SerializableTokenFilterEnum> = vec![
|
||||
SerializableTokenFilterEnum::LowerCaser(LowerCaser),
|
||||
SerializableTokenFilterEnum::RemoveLongFilter(RemoveLongFilter::limit(12)),
|
||||
];
|
||||
let mut analyzer_builder: TextAnalyzerBuilder =
|
||||
TextAnalyzer::builder(SimpleTokenizer::default())
|
||||
.filter_dynamic(RemoveLongFilter::limit(40))
|
||||
.filter_dynamic(LowerCaser);
|
||||
for filter in filters {
|
||||
analyzer_builder = match filter {
|
||||
SerializableTokenFilterEnum::LowerCaser(lower_caser) => {
|
||||
analyzer_builder.filter_dynamic(lower_caser)
|
||||
}
|
||||
SerializableTokenFilterEnum::RemoveLongFilter(remove_long_filter) => {
|
||||
analyzer_builder.filter_dynamic(remove_long_filter)
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut analyzer = analyzer_builder.build();
|
||||
let mut stream = analyzer.token_stream("first bullet point");
|
||||
assert_eq!(stream.next().unwrap().text, "first");
|
||||
assert_eq!(stream.next().unwrap().text, "bullet");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
keywords = ["search", "information", "retrieval", "sstable"]
|
||||
categories = ["database-implementations", "data-structures", "compression"]
|
||||
desciption = "sstables for tantivy"
|
||||
description = "sstables for tantivy"
|
||||
|
||||
[dependencies]
|
||||
common = {version= "0.5", path="../common", package="tantivy-common"}
|
||||
|
||||
@@ -44,7 +44,7 @@ pub fn fast_short_slice_copy(src: &[u8], dst: &mut [u8]) {
|
||||
return;
|
||||
}
|
||||
|
||||
/// The code will use the vmovdqu instruction to copy 32 bytes at a time.
|
||||
// The code will use the vmovdqu instruction to copy 32 bytes at a time.
|
||||
#[cfg(target_feature = "avx")]
|
||||
{
|
||||
if len <= 64 {
|
||||
|
||||
@@ -63,10 +63,22 @@ pub trait Tokenizer: 'static + Clone + Send + Sync {
|
||||
/// Simple wrapper of `Box<dyn TokenStream + 'a>`.
|
||||
pub struct BoxTokenStream<'a>(Box<dyn TokenStream + 'a>);
|
||||
|
||||
impl<'a, T> From<T> for BoxTokenStream<'a>
|
||||
where T: TokenStream + 'a
|
||||
{
|
||||
fn from(token_stream: T) -> BoxTokenStream<'a> {
|
||||
impl<'a> TokenStream for BoxTokenStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
self.0.advance()
|
||||
}
|
||||
|
||||
fn token(&self) -> &Token {
|
||||
self.0.token()
|
||||
}
|
||||
|
||||
fn token_mut(&mut self) -> &mut Token {
|
||||
self.0.token_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BoxTokenStream<'a> {
|
||||
pub fn new<T: TokenStream + 'a>(token_stream: T) -> BoxTokenStream<'a> {
|
||||
BoxTokenStream(Box::new(token_stream))
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user