mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-03 07:42:54 +00:00
Compare commits
6 Commits
no-column-
...
trinity/yo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bcff3eb2d2 | ||
|
|
85f2588875 | ||
|
|
db6cf65d53 | ||
|
|
654aa7f42c | ||
|
|
951a898633 | ||
|
|
003722d831 |
@@ -1,7 +1,6 @@
|
||||
Tantivy 0.19
|
||||
================================
|
||||
|
||||
- Skip score calculation, when no scoring is required [#1646](https://github.com/quickwit-oss/tantivy/pull/1646) (@PSeitz)
|
||||
- Limit fast fields to u32 (`get_val(u32)`) [#1644](https://github.com/quickwit-oss/tantivy/pull/1644) (@PSeitz)
|
||||
- Major bugfix: Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
|
||||
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
|
||||
|
||||
@@ -61,6 +61,7 @@ measure_time = "0.8.2"
|
||||
ciborium = { version = "0.2", optional = true}
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
yoke = { version = "0.6.2", features = ["derive"] }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
@@ -71,10 +72,10 @@ maplit = "1.0.2"
|
||||
matches = "0.1.9"
|
||||
pretty_assertions = "1.2.1"
|
||||
proptest = "1.0.0"
|
||||
criterion = "0.4"
|
||||
criterion = "0.3.5"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.9.0"
|
||||
pprof = { version = "0.11.0", features = ["flamegraph", "criterion"] }
|
||||
pprof = { version = "0.10.0", features = ["flamegraph", "criterion"] }
|
||||
futures = "0.3.21"
|
||||
|
||||
[dev-dependencies.fail]
|
||||
|
||||
100000
benches/hdfs_with_array.json
Normal file
100000
benches/hdfs_with_array.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,116 +1,159 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use itertools::Itertools;
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use tantivy::schema::{INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::Index;
|
||||
use serde_json::{self, Value as JsonValue};
|
||||
use tantivy::directory::RamDirectory;
|
||||
use tantivy::schema::{
|
||||
FieldValue, TextFieldIndexing, TextOptions, Value, INDEXED, STORED, STRING, TEXT,
|
||||
};
|
||||
use tantivy::{Document, Index, IndexBuilder};
|
||||
|
||||
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
||||
const NUM_REPEATS: usize = 2;
|
||||
const NUM_REPEATS: usize = 20;
|
||||
|
||||
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
let schema = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", INDEXED);
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
schema_builder.add_text_field("severity", STRING);
|
||||
schema_builder.build()
|
||||
};
|
||||
let schema_with_store = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", INDEXED | STORED);
|
||||
schema_builder.add_text_field("body", TEXT | STORED);
|
||||
schema_builder.add_text_field("severity", STRING | STORED);
|
||||
schema_builder.build()
|
||||
};
|
||||
let dynamic_schema = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", TEXT);
|
||||
schema_builder.build()
|
||||
};
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
let text_indexing_options = TextFieldIndexing::default()
|
||||
.set_tokenizer("default")
|
||||
.set_fieldnorms(false)
|
||||
.set_index_option(tantivy::schema::IndexRecordOption::WithFreqsAndPositions);
|
||||
let mut text_options = TextOptions::default().set_indexing_options(text_indexing_options);
|
||||
let text_field = schema_builder.add_text_field("body", text_options);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// prepare doc
|
||||
let mut documents_no_array = Vec::new();
|
||||
let mut documents_with_array = Vec::new();
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let json_obj: serde_json::Map<String, JsonValue> = serde_json::from_str(doc_json).unwrap();
|
||||
let text = json_obj.get("body").unwrap().as_str().unwrap();
|
||||
let mut doc_no_array = Document::new();
|
||||
doc_no_array.add_text(text_field, text);
|
||||
documents_no_array.push(doc_no_array);
|
||||
let mut doc_with_array = Document::new();
|
||||
doc_with_array.add_borrowed_values(text.to_owned(), |text| {
|
||||
text.split(' ')
|
||||
.map(|text| FieldValue::new(text_field, text.into()))
|
||||
.collect()
|
||||
});
|
||||
documents_with_array.push(doc_with_array);
|
||||
}
|
||||
|
||||
let mut group = c.benchmark_group("index-hdfs");
|
||||
group.sample_size(20);
|
||||
group.bench_function("index-hdfs-no-commit", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
let ram_directory = RamDirectory::create();
|
||||
let mut index_writer = IndexBuilder::new()
|
||||
.schema(schema.clone())
|
||||
.single_segment_index_writer(ram_directory, 100_000_000)
|
||||
.unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
let documents_cloned = documents_no_array.clone();
|
||||
for doc in documents_cloned {
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit", |b| {
|
||||
group.bench_function("index-hdfs-with-array-no-commit", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
let ram_directory = RamDirectory::create();
|
||||
let mut index_writer = IndexBuilder::new()
|
||||
.schema(schema.clone())
|
||||
.single_segment_index_writer(ram_directory, 100_000_000)
|
||||
.unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-with-docstore", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
let documents_with_array_cloned = documents_with_array.clone();
|
||||
for doc in documents_with_array_cloned {
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit-with-docstore", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit-json-without-docstore", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
// group.bench_function("index-hdfs-with-commit", |b| {
|
||||
// b.iter(|| {
|
||||
// let ram_directory = RamDirectory::create();
|
||||
// let mut index_writer = IndexBuilder::new()
|
||||
// .schema(schema.clone())
|
||||
// .single_segment_index_writer(ram_directory, 100_000_000)
|
||||
// .unwrap();
|
||||
// for _ in 0..NUM_REPEATS {
|
||||
// for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
// let doc = schema.parse_document(doc_json).unwrap();
|
||||
// index_writer.add_document(doc).unwrap();
|
||||
// }
|
||||
// }
|
||||
// index_writer.commit().unwrap();
|
||||
// })
|
||||
// });
|
||||
// group.bench_function("index-hdfs-no-commit-with-docstore", |b| {
|
||||
// b.iter(|| {
|
||||
// let ram_directory = RamDirectory::create();
|
||||
// let mut index_writer = IndexBuilder::new()
|
||||
// .schema(schema.clone())
|
||||
// .single_segment_index_writer(ram_directory, 100_000_000)
|
||||
// .unwrap();
|
||||
// for _ in 0..NUM_REPEATS {
|
||||
// for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
// let doc = schema.parse_document(doc_json).unwrap();
|
||||
// index_writer.add_document(doc).unwrap();
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
// });
|
||||
// group.bench_function("index-hdfs-with-commit-with-docstore", |b| {
|
||||
// b.iter(|| {
|
||||
// let ram_directory = RamDirectory::create();
|
||||
// let mut index_writer = IndexBuilder::new()
|
||||
// .schema(schema.clone())
|
||||
// .single_segment_index_writer(ram_directory, 100_000_000)
|
||||
// .unwrap();
|
||||
// for _ in 0..NUM_REPEATS {
|
||||
// for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
// let doc = schema.parse_document(doc_json).unwrap();
|
||||
// index_writer.add_document(doc).unwrap();
|
||||
// }
|
||||
// }
|
||||
// index_writer.commit().unwrap();
|
||||
// })
|
||||
// });
|
||||
// group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
||||
// b.iter(|| {
|
||||
// let ram_directory = RamDirectory::create();
|
||||
// let mut index_writer = IndexBuilder::new()
|
||||
// .schema(schema.clone())
|
||||
// .single_segment_index_writer(ram_directory, 100_000_000)
|
||||
// .unwrap();
|
||||
// for _ in 0..NUM_REPEATS {
|
||||
// for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
// let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
// serde_json::from_str(doc_json).unwrap();
|
||||
// let doc = tantivy::doc!(json_field=>json_val);
|
||||
// index_writer.add_document(doc).unwrap();
|
||||
// }
|
||||
// }
|
||||
// index_writer.commit().unwrap();
|
||||
// })
|
||||
// });
|
||||
// group.bench_function("index-hdfs-with-commit-json-without-docstore", |b| {
|
||||
// b.iter(|| {
|
||||
// let ram_directory = RamDirectory::create();
|
||||
// let mut index_writer = IndexBuilder::new()
|
||||
// .schema(schema.clone())
|
||||
// .single_segment_index_writer(ram_directory, 100_000_000)
|
||||
// .unwrap();
|
||||
// for _ in 0..NUM_REPEATS {
|
||||
// for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
// let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
// serde_json::from_str(doc_json).unwrap();
|
||||
// let doc = tantivy::doc!(json_field=>json_val);
|
||||
// index_writer.add_document(doc).unwrap();
|
||||
// }
|
||||
// }
|
||||
// index_writer.commit().unwrap();
|
||||
// })
|
||||
//});
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::borrow::Cow;
|
||||
use std::io::{Read, Write};
|
||||
use std::{fmt, io};
|
||||
|
||||
@@ -210,6 +211,23 @@ impl BinarySerializable for String {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BinarySerializable for Cow<'a, str> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let data: &[u8] = self.as_bytes();
|
||||
VInt(data.len() as u64).serialize(writer)?;
|
||||
writer.write_all(data)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let string_length = VInt::deserialize(reader)?.val() as usize;
|
||||
let mut result = String::with_capacity(string_length);
|
||||
reader
|
||||
.take(string_length as u64)
|
||||
.read_to_string(&mut result)?;
|
||||
Ok(Cow::Owned(result))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
|
||||
@@ -157,7 +157,7 @@ fn vint_len(data: &[u8]) -> usize {
|
||||
/// If the buffer does not start by a valid
|
||||
/// vint payload
|
||||
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
||||
let (result, vlen) = read_u32_vint_no_advance(data);
|
||||
let (result, vlen) = read_u32_vint_no_advance(*data);
|
||||
*data = &data[vlen..];
|
||||
result
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ mod tests {
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
column.get_positions_for_value_range(
|
||||
major_item..=major_item,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
@@ -129,7 +129,7 @@ mod tests {
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
column.get_positions_for_value_range(
|
||||
minor_item..=minor_item,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
@@ -145,7 +145,11 @@ mod tests {
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||
column.get_positions_for_value_range(
|
||||
0..=u128::MAX,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
||||
///
|
||||
/// Note that position == docid for single value fast fields
|
||||
#[inline]
|
||||
fn get_docids_for_value_range(
|
||||
fn get_positions_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
@@ -70,11 +70,6 @@ pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
||||
/// The number of values in the column.
|
||||
fn num_vals(&self) -> u32;
|
||||
|
||||
/// The number of docs in the column. For single value columns this equals num_vals.
|
||||
fn num_docs(&self) -> u32 {
|
||||
self.num_vals()
|
||||
}
|
||||
|
||||
/// Returns a iterator over the data
|
||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
|
||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||
@@ -227,13 +222,13 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
fn get_docids_for_value_range(
|
||||
fn get_positions_for_value_range(
|
||||
&self,
|
||||
range: RangeInclusive<Output>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
self.from_column.get_docids_for_value_range(
|
||||
self.from_column.get_positions_for_value_range(
|
||||
self.monotonic_mapping.inverse(range.start().clone())
|
||||
..=self.monotonic_mapping.inverse(range.end().clone()),
|
||||
doc_id_range,
|
||||
@@ -245,7 +240,6 @@ where
|
||||
// and we do not have any specialized implementation anyway.
|
||||
}
|
||||
|
||||
/// Wraps an iterator into a `Column`.
|
||||
pub struct IterColumn<T>(T);
|
||||
|
||||
impl<T> From<T> for IterColumn<T>
|
||||
|
||||
@@ -306,13 +306,13 @@ impl Column<u128> for CompactSpaceDecompressor {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_docids_for_value_range(
|
||||
fn get_positions_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u128>,
|
||||
positions_range: Range<u32>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
self.get_positions_for_value_range(value_range, positions_range, positions)
|
||||
self.get_positions_for_value_range(value_range, doc_id_range, positions)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -351,13 +351,13 @@ impl CompactSpaceDecompressor {
|
||||
pub fn get_positions_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u128>,
|
||||
position_range: Range<u32>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
if value_range.start() > value_range.end() {
|
||||
return;
|
||||
}
|
||||
let position_range = position_range.start..position_range.end.min(self.num_vals());
|
||||
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
|
||||
let from_value = *value_range.start();
|
||||
let to_value = *value_range.end();
|
||||
assert!(to_value >= from_value);
|
||||
@@ -390,10 +390,10 @@ impl CompactSpaceDecompressor {
|
||||
|
||||
let range = compact_from..=compact_to;
|
||||
|
||||
let scan_num_docs = position_range.end - position_range.start;
|
||||
let scan_num_docs = doc_id_range.end - doc_id_range.start;
|
||||
|
||||
let step_size = 4;
|
||||
let cutoff = position_range.start + scan_num_docs - scan_num_docs % step_size;
|
||||
let cutoff = doc_id_range.start + scan_num_docs - scan_num_docs % step_size;
|
||||
|
||||
let mut push_if_in_range = |idx, val| {
|
||||
if range.contains(&val) {
|
||||
@@ -402,7 +402,7 @@ impl CompactSpaceDecompressor {
|
||||
};
|
||||
let get_val = |idx| self.params.bit_unpacker.get(idx, &self.data);
|
||||
// unrolled loop
|
||||
for idx in (position_range.start..cutoff).step_by(step_size as usize) {
|
||||
for idx in (doc_id_range.start..cutoff).step_by(step_size as usize) {
|
||||
let idx1 = idx;
|
||||
let idx2 = idx + 1;
|
||||
let idx3 = idx + 2;
|
||||
@@ -418,7 +418,7 @@ impl CompactSpaceDecompressor {
|
||||
}
|
||||
|
||||
// handle rest
|
||||
for idx in cutoff..position_range.end {
|
||||
for idx in cutoff..doc_id_range.end {
|
||||
push_if_in_range(idx, get_val(idx as u32));
|
||||
}
|
||||
}
|
||||
@@ -704,7 +704,7 @@ mod tests {
|
||||
doc_id_range: Range<u32>,
|
||||
) -> Vec<u32> {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(value_range, doc_id_range, &mut positions);
|
||||
column.get_positions_for_value_range(value_range, doc_id_range, &mut positions);
|
||||
positions
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ mod serialize;
|
||||
|
||||
use self::bitpacked::BitpackedCodec;
|
||||
use self::blockwise_linear::BlockwiseLinearCodec;
|
||||
pub use self::column::{monotonic_map_column, Column, IterColumn, VecColumn};
|
||||
pub use self::column::{monotonic_map_column, Column, VecColumn};
|
||||
use self::linear::LinearCodec;
|
||||
pub use self::monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
||||
pub use self::monotonic_mapping_u128::MonotonicallyMappableToU128;
|
||||
@@ -218,7 +218,7 @@ mod tests {
|
||||
.map(|(pos, _)| pos as u32)
|
||||
.collect();
|
||||
let mut positions = Vec::new();
|
||||
reader.get_docids_for_value_range(
|
||||
reader.get_positions_for_value_range(
|
||||
data[test_rand_idx]..=data[test_rand_idx],
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
|
||||
@@ -119,7 +119,7 @@ fn bench_ip() {
|
||||
for value in dataset.iter().take(1110).skip(1100).cloned() {
|
||||
doc_values.clear();
|
||||
print_time!("get range");
|
||||
decompressor.get_docids_for_value_range(
|
||||
decompressor.get_positions_for_value_range(
|
||||
value..=value,
|
||||
0..decompressor.num_vals(),
|
||||
&mut doc_values,
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
//! intermediate average results, which is the sum and the number of values. The actual average is
|
||||
//! calculated on the step from intermediate to final aggregation result tree.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -16,7 +18,7 @@ use crate::TantivyError;
|
||||
|
||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
/// The final aggegation result.
|
||||
pub struct AggregationResults(pub FxHashMap<String, AggregationResult>);
|
||||
pub struct AggregationResults(pub HashMap<String, AggregationResult>);
|
||||
|
||||
impl AggregationResults {
|
||||
pub(crate) fn get_value_from_aggregation(
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
//! indices.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use itertools::Itertools;
|
||||
use rustc_hash::FxHashMap;
|
||||
@@ -50,7 +51,7 @@ impl IntermediateAggregationResults {
|
||||
// Important assumption:
|
||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||
// request
|
||||
let mut results: FxHashMap<String, AggregationResult> = FxHashMap::default();
|
||||
let mut results: HashMap<String, AggregationResult> = HashMap::new();
|
||||
|
||||
if let Some(buckets) = self.buckets {
|
||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||
@@ -131,7 +132,7 @@ impl IntermediateAggregationResults {
|
||||
}
|
||||
|
||||
fn convert_and_add_final_metrics_to_result(
|
||||
results: &mut FxHashMap<String, AggregationResult>,
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
metrics: VecWithNames<IntermediateMetricResult>,
|
||||
) {
|
||||
results.extend(
|
||||
@@ -142,7 +143,7 @@ fn convert_and_add_final_metrics_to_result(
|
||||
}
|
||||
|
||||
fn add_empty_final_metrics_to_result(
|
||||
results: &mut FxHashMap<String, AggregationResult>,
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_metrics: &VecWithNames<MetricAggregation>,
|
||||
) -> crate::Result<()> {
|
||||
results.extend(req_metrics.iter().map(|(key, req)| {
|
||||
@@ -156,7 +157,7 @@ fn add_empty_final_metrics_to_result(
|
||||
}
|
||||
|
||||
fn add_empty_final_buckets_to_result(
|
||||
results: &mut FxHashMap<String, AggregationResult>,
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
let requested_buckets = req_buckets.iter();
|
||||
@@ -168,7 +169,7 @@ fn add_empty_final_buckets_to_result(
|
||||
}
|
||||
|
||||
fn convert_and_add_final_buckets_to_result(
|
||||
results: &mut FxHashMap<String, AggregationResult>,
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
buckets: VecWithNames<IntermediateBucketResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
|
||||
@@ -616,7 +616,7 @@ mod tests {
|
||||
.map(|mut doc| {
|
||||
doc.add_facet(
|
||||
facet_field,
|
||||
&format!("/facet/{}", thread_rng().sample(uniform)),
|
||||
&format!("/facet/{}", thread_rng().sample(&uniform)),
|
||||
);
|
||||
doc
|
||||
})
|
||||
|
||||
@@ -172,33 +172,17 @@ pub trait Collector: Sync + Send {
|
||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
|
||||
|
||||
match (reader.alive_bitset(), self.requires_scoring()) {
|
||||
(Some(alive_bitset), true) => {
|
||||
weight.for_each(reader, &mut |doc, score| {
|
||||
if alive_bitset.is_alive(doc) {
|
||||
segment_collector.collect(doc, score);
|
||||
}
|
||||
})?;
|
||||
}
|
||||
(Some(alive_bitset), false) => {
|
||||
weight.for_each_no_score(reader, &mut |doc| {
|
||||
if alive_bitset.is_alive(doc) {
|
||||
segment_collector.collect(doc, 0.0);
|
||||
}
|
||||
})?;
|
||||
}
|
||||
(None, true) => {
|
||||
weight.for_each(reader, &mut |doc, score| {
|
||||
if let Some(alive_bitset) = reader.alive_bitset() {
|
||||
weight.for_each(reader, &mut |doc, score| {
|
||||
if alive_bitset.is_alive(doc) {
|
||||
segment_collector.collect(doc, score);
|
||||
})?;
|
||||
}
|
||||
(None, false) => {
|
||||
weight.for_each_no_score(reader, &mut |doc| {
|
||||
segment_collector.collect(doc, 0.0);
|
||||
})?;
|
||||
}
|
||||
}
|
||||
})?;
|
||||
} else {
|
||||
weight.for_each(reader, &mut |doc, score| {
|
||||
segment_collector.collect(doc, score);
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(segment_collector.harvest())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ impl SegmentMeta {
|
||||
/// associated with a segment component.
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
let mut path = self.id().uuid_string();
|
||||
path.push_str(&match component {
|
||||
path.push_str(&*match component {
|
||||
SegmentComponent::Postings => ".idx".to_string(),
|
||||
SegmentComponent::Positions => ".pos".to_string(),
|
||||
SegmentComponent::Terms => ".term".to_string(),
|
||||
|
||||
@@ -55,7 +55,7 @@ impl<T: Send + Sync + 'static> From<Box<T>> for DirectoryLock {
|
||||
|
||||
impl Drop for DirectoryLockGuard {
|
||||
fn drop(&mut self) {
|
||||
if let Err(e) = self.directory.delete(&self.path) {
|
||||
if let Err(e) = self.directory.delete(&*self.path) {
|
||||
error!("Failed to remove the lock file. {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::directory::{FileSlice, OwnedBytes};
|
||||
use crate::fastfield::MultiValueIndex;
|
||||
use crate::fastfield::MultiValueLength;
|
||||
use crate::DocId;
|
||||
|
||||
/// Reader for byte array fast fields
|
||||
@@ -18,7 +19,7 @@ use crate::DocId;
|
||||
/// and the start index for the next document, and keeping the bytes in between.
|
||||
#[derive(Clone)]
|
||||
pub struct BytesFastFieldReader {
|
||||
idx_reader: MultiValueIndex,
|
||||
idx_reader: Arc<dyn Column<u64>>,
|
||||
values: OwnedBytes,
|
||||
}
|
||||
|
||||
@@ -28,26 +29,24 @@ impl BytesFastFieldReader {
|
||||
values_file: FileSlice,
|
||||
) -> crate::Result<BytesFastFieldReader> {
|
||||
let values = values_file.read_bytes()?;
|
||||
Ok(BytesFastFieldReader {
|
||||
idx_reader: MultiValueIndex::new(idx_reader),
|
||||
values,
|
||||
})
|
||||
Ok(BytesFastFieldReader { idx_reader, values })
|
||||
}
|
||||
|
||||
/// returns the multivalue index
|
||||
pub fn get_index_reader(&self) -> &MultiValueIndex {
|
||||
&self.idx_reader
|
||||
fn range(&self, doc: DocId) -> Range<u32> {
|
||||
let start = self.idx_reader.get_val(doc) as u32;
|
||||
let end = self.idx_reader.get_val(doc + 1) as u32;
|
||||
start..end
|
||||
}
|
||||
|
||||
/// Returns the bytes associated with the given `doc`
|
||||
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||
let range = self.idx_reader.range(doc);
|
||||
let range = self.range(doc);
|
||||
&self.values.as_slice()[range.start as usize..range.end as usize]
|
||||
}
|
||||
|
||||
/// Returns the length of the bytes associated with the given `doc`
|
||||
pub fn num_bytes(&self, doc: DocId) -> u64 {
|
||||
let range = self.idx_reader.range(doc);
|
||||
let range = self.range(doc);
|
||||
(range.end - range.start) as u64
|
||||
}
|
||||
|
||||
@@ -56,3 +55,15 @@ impl BytesFastFieldReader {
|
||||
self.values.len() as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl MultiValueLength for BytesFastFieldReader {
|
||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u32> {
|
||||
self.range(doc_id)
|
||||
}
|
||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
||||
self.num_bytes(doc_id)
|
||||
}
|
||||
fn get_total_len(&self) -> u64 {
|
||||
self.total_num_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,8 +27,8 @@ pub use self::error::{FastFieldNotAvailableError, Result};
|
||||
pub use self::facet_reader::FacetReader;
|
||||
pub(crate) use self::multivalued::{get_fastfield_codecs_for_multivalue, MultivalueStartIndex};
|
||||
pub use self::multivalued::{
|
||||
MultiValueIndex, MultiValueU128FastFieldWriter, MultiValuedFastFieldReader,
|
||||
MultiValuedFastFieldWriter, MultiValuedU128FastFieldReader,
|
||||
MultiValueU128FastFieldWriter, MultiValuedFastFieldReader, MultiValuedFastFieldWriter,
|
||||
MultiValuedU128FastFieldReader,
|
||||
};
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub(crate) use self::readers::{type_and_cardinality, FastType};
|
||||
@@ -36,7 +36,7 @@ pub use self::serializer::{Column, CompositeFastFieldSerializer};
|
||||
use self::writer::unexpected_value;
|
||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||
use crate::schema::{Type, Value};
|
||||
use crate::DateTime;
|
||||
use crate::{DateTime, DocId};
|
||||
|
||||
mod alive_bitset;
|
||||
mod bytes;
|
||||
@@ -47,6 +47,17 @@ mod readers;
|
||||
mod serializer;
|
||||
mod writer;
|
||||
|
||||
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
|
||||
/// for a doc_id
|
||||
pub trait MultiValueLength {
|
||||
/// returns the positions for a docid
|
||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u32>;
|
||||
/// returns the num of values associated with a doc_id
|
||||
fn get_len(&self, doc_id: DocId) -> u64;
|
||||
/// returns the sum of num values for all doc_ids
|
||||
fn get_total_len(&self) -> u64;
|
||||
}
|
||||
|
||||
/// Trait for types that are allowed for fast fields:
|
||||
/// (u64, i64 and f64, bool, DateTime).
|
||||
pub trait FastValue:
|
||||
|
||||
@@ -1,140 +0,0 @@
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::DocId;
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Index to resolve value range for given doc_id.
|
||||
/// Starts at 0.
|
||||
pub struct MultiValueIndex {
|
||||
idx: Arc<dyn Column<u64>>,
|
||||
}
|
||||
|
||||
impl MultiValueIndex {
|
||||
pub(crate) fn new(idx: Arc<dyn Column<u64>>) -> Self {
|
||||
Self { idx }
|
||||
}
|
||||
|
||||
/// Returns `[start, end)`, such that the values associated with
|
||||
/// the given document are `start..end`.
|
||||
#[inline]
|
||||
pub(crate) fn range(&self, doc: DocId) -> Range<u32> {
|
||||
let start = self.idx.get_val(doc) as u32;
|
||||
let end = self.idx.get_val(doc + 1) as u32;
|
||||
start..end
|
||||
}
|
||||
|
||||
/// Returns `[start, end)`, such that the values associated with
|
||||
/// the given documents are `start..end`.
|
||||
///
|
||||
/// The passed end range is allowed to be out of bounds.
|
||||
#[inline]
|
||||
pub(crate) fn docid_range_to_position_range(&self, range: Range<DocId>) -> Range<u32> {
|
||||
let end_docid = range.end.min(self.num_docs() - 1) + 1;
|
||||
let start_docid = range.start.min(end_docid);
|
||||
|
||||
let start = self.idx.get_val(start_docid) as u32;
|
||||
let end = self.idx.get_val(end_docid) as u32;
|
||||
assert!(start <= end);
|
||||
|
||||
start..end
|
||||
}
|
||||
|
||||
/// returns the num of values associated with a doc_id
|
||||
pub(crate) fn num_vals_for_doc(&self, doc: DocId) -> u32 {
|
||||
let range = self.range(doc);
|
||||
range.end - range.start
|
||||
}
|
||||
|
||||
/// Returns the overall number of values in this field.
|
||||
#[inline]
|
||||
pub fn total_num_vals(&self) -> u64 {
|
||||
self.idx.max_value()
|
||||
}
|
||||
|
||||
/// Returns the number of documents in the index.
|
||||
#[inline]
|
||||
pub fn num_docs(&self) -> u32 {
|
||||
self.idx.num_vals() - 1
|
||||
}
|
||||
|
||||
/// Converts a list of positions of values in a 1:n index to the corresponding list of DocIds.
|
||||
/// Positions are converted inplace to docids.
|
||||
///
|
||||
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
|
||||
/// index.
|
||||
///
|
||||
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically
|
||||
/// increasing positions.
|
||||
///
|
||||
///
|
||||
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
|
||||
/// match a docid to its value position.
|
||||
pub(crate) fn positions_to_docids(&self, doc_id_range: Range<u32>, positions: &mut Vec<u32>) {
|
||||
if positions.is_empty() {
|
||||
return;
|
||||
}
|
||||
let mut cur_doc = doc_id_range.start;
|
||||
let mut last_doc = None;
|
||||
|
||||
assert!(self.idx.get_val(doc_id_range.start) as u32 <= positions[0]);
|
||||
|
||||
let mut write_doc_pos = 0;
|
||||
for i in 0..positions.len() {
|
||||
let pos = positions[i];
|
||||
loop {
|
||||
let end = self.idx.get_val(cur_doc + 1) as u32;
|
||||
if end > pos {
|
||||
positions[write_doc_pos] = cur_doc;
|
||||
write_doc_pos += if last_doc == Some(cur_doc) { 0 } else { 1 };
|
||||
last_doc = Some(cur_doc);
|
||||
break;
|
||||
}
|
||||
cur_doc += 1;
|
||||
}
|
||||
}
|
||||
positions.truncate(write_doc_pos);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::IterColumn;
|
||||
|
||||
use crate::fastfield::MultiValueIndex;
|
||||
|
||||
fn index_to_pos_helper(
|
||||
index: &MultiValueIndex,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &[u32],
|
||||
) -> Vec<u32> {
|
||||
let mut positions = positions.to_vec();
|
||||
index.positions_to_docids(doc_id_range, &mut positions);
|
||||
positions
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_positions_to_docid() {
|
||||
let offsets = vec![0, 10, 12, 15, 22, 23]; // docid values are [0..10, 10..12, 12..15, etc.]
|
||||
let column = IterColumn::from(offsets.into_iter());
|
||||
let index = MultiValueIndex::new(Arc::new(column));
|
||||
assert_eq!(index.num_docs(), 5);
|
||||
{
|
||||
let positions = vec![10u32, 11, 15, 20, 21, 22];
|
||||
|
||||
assert_eq!(index_to_pos_helper(&index, 0..5, &positions), vec![1, 3, 4]);
|
||||
assert_eq!(index_to_pos_helper(&index, 1..5, &positions), vec![1, 3, 4]);
|
||||
assert_eq!(index_to_pos_helper(&index, 0..5, &[9]), vec![0]);
|
||||
assert_eq!(index_to_pos_helper(&index, 1..5, &[10]), vec![1]);
|
||||
assert_eq!(index_to_pos_helper(&index, 1..5, &[11]), vec![1]);
|
||||
assert_eq!(index_to_pos_helper(&index, 2..5, &[12]), vec![2]);
|
||||
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14]), vec![2]);
|
||||
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14, 15]), vec![2, 3]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,7 @@
|
||||
mod index;
|
||||
mod reader;
|
||||
mod writer;
|
||||
|
||||
use fastfield_codecs::FastFieldCodecType;
|
||||
pub use index::MultiValueIndex;
|
||||
|
||||
pub use self::reader::{MultiValuedFastFieldReader, MultiValuedU128FastFieldReader};
|
||||
pub(crate) use self::writer::MultivalueStartIndex;
|
||||
|
||||
@@ -3,8 +3,7 @@ use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::{Column, MonotonicallyMappableToU128};
|
||||
|
||||
use super::MultiValueIndex;
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::fastfield::{FastValue, MultiValueLength};
|
||||
use crate::DocId;
|
||||
|
||||
/// Reader for a multivalued `u64` fast field.
|
||||
@@ -14,10 +13,9 @@ use crate::DocId;
|
||||
/// The `vals_reader` will access the concatenated list of all
|
||||
/// values for all reader.
|
||||
/// The `idx_reader` associated, for each document, the index of its first value.
|
||||
/// Stores the start position for each document.
|
||||
#[derive(Clone)]
|
||||
pub struct MultiValuedFastFieldReader<Item: FastValue> {
|
||||
idx_reader: MultiValueIndex,
|
||||
idx_reader: Arc<dyn Column<u64>>,
|
||||
vals_reader: Arc<dyn Column<Item>>,
|
||||
}
|
||||
|
||||
@@ -27,11 +25,20 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
vals_reader: Arc<dyn Column<Item>>,
|
||||
) -> MultiValuedFastFieldReader<Item> {
|
||||
MultiValuedFastFieldReader {
|
||||
idx_reader: MultiValueIndex::new(idx_reader),
|
||||
idx_reader,
|
||||
vals_reader,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `[start, end)`, such that the values associated with
|
||||
/// the given document are `start..end`.
|
||||
#[inline]
|
||||
fn range(&self, doc: DocId) -> Range<u32> {
|
||||
let start = self.idx_reader.get_val(doc) as u32;
|
||||
let end = self.idx_reader.get_val(doc + 1) as u32;
|
||||
start..end
|
||||
}
|
||||
|
||||
/// Returns the array of values associated with the given `doc`.
|
||||
#[inline]
|
||||
fn get_vals_for_range(&self, range: Range<u32>, vals: &mut Vec<Item>) {
|
||||
@@ -44,15 +51,10 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
/// Returns the array of values associated with the given `doc`.
|
||||
#[inline]
|
||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||
let range = self.idx_reader.range(doc);
|
||||
let range = self.range(doc);
|
||||
self.get_vals_for_range(range, vals);
|
||||
}
|
||||
|
||||
/// returns the multivalue index
|
||||
pub fn get_index_reader(&self) -> &MultiValueIndex {
|
||||
&self.idx_reader
|
||||
}
|
||||
|
||||
/// Returns the minimum value for this fast field.
|
||||
///
|
||||
/// The min value does not take in account of possible
|
||||
@@ -73,14 +75,28 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
|
||||
/// Returns the number of values associated with the document `DocId`.
|
||||
#[inline]
|
||||
pub fn num_vals(&self, doc: DocId) -> u32 {
|
||||
self.idx_reader.num_vals_for_doc(doc)
|
||||
pub fn num_vals(&self, doc: DocId) -> usize {
|
||||
let range = self.range(doc);
|
||||
(range.end - range.start) as usize
|
||||
}
|
||||
|
||||
/// Returns the overall number of values in this field.
|
||||
/// Returns the overall number of values in this field .
|
||||
#[inline]
|
||||
pub fn total_num_vals(&self) -> u64 {
|
||||
self.idx_reader.total_num_vals()
|
||||
self.idx_reader.max_value()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
|
||||
fn get_range(&self, doc_id: DocId) -> Range<u32> {
|
||||
self.range(doc_id)
|
||||
}
|
||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
||||
self.num_vals(doc_id) as u64
|
||||
}
|
||||
|
||||
fn get_total_len(&self) -> u64 {
|
||||
self.total_num_vals() as u64
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,7 +109,7 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
/// The `idx_reader` associated, for each document, the index of its first value.
|
||||
#[derive(Clone)]
|
||||
pub struct MultiValuedU128FastFieldReader<T: MonotonicallyMappableToU128> {
|
||||
idx_reader: MultiValueIndex,
|
||||
idx_reader: Arc<dyn Column<u64>>,
|
||||
vals_reader: Arc<dyn Column<T>>,
|
||||
}
|
||||
|
||||
@@ -103,31 +119,24 @@ impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
|
||||
vals_reader: Arc<dyn Column<T>>,
|
||||
) -> MultiValuedU128FastFieldReader<T> {
|
||||
Self {
|
||||
idx_reader: MultiValueIndex::new(idx_reader),
|
||||
idx_reader,
|
||||
vals_reader,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `[start, end)`, such that the values associated
|
||||
/// to the given document are `start..end`.
|
||||
#[inline]
|
||||
fn get_docids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let position_range = self
|
||||
.get_index_reader()
|
||||
.docid_range_to_position_range(doc_id_range.clone());
|
||||
self.vals_reader
|
||||
.get_docids_for_value_range(value_range, position_range, positions);
|
||||
|
||||
self.idx_reader.positions_to_docids(doc_id_range, positions);
|
||||
fn range(&self, doc: DocId) -> Range<u32> {
|
||||
let start = self.idx_reader.get_val(doc) as u32;
|
||||
let end = self.idx_reader.get_val(doc + 1) as u32;
|
||||
start..end
|
||||
}
|
||||
|
||||
/// Returns the array of values associated to the given `doc`.
|
||||
#[inline]
|
||||
pub fn get_first_val(&self, doc: DocId) -> Option<T> {
|
||||
let range = self.idx_reader.range(doc);
|
||||
let range = self.range(doc);
|
||||
if range.is_empty() {
|
||||
return None;
|
||||
}
|
||||
@@ -143,18 +152,26 @@ impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
|
||||
.get_range(range.start as u64, &mut vals[..]);
|
||||
}
|
||||
|
||||
/// Returns the index reader
|
||||
pub fn get_index_reader(&self) -> &MultiValueIndex {
|
||||
&self.idx_reader
|
||||
}
|
||||
|
||||
/// Returns the array of values associated to the given `doc`.
|
||||
#[inline]
|
||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<T>) {
|
||||
let range = self.idx_reader.range(doc);
|
||||
let range = self.range(doc);
|
||||
self.get_vals_for_range(range, vals);
|
||||
}
|
||||
|
||||
/// Returns all docids which are in the provided value range
|
||||
pub fn get_positions_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
) -> Vec<DocId> {
|
||||
let mut positions = Vec::new(); // TODO replace
|
||||
self.vals_reader
|
||||
.get_positions_for_value_range(value_range, doc_id_range, &mut positions);
|
||||
|
||||
positions_to_docids(&positions, self.idx_reader.as_ref())
|
||||
}
|
||||
|
||||
/// Iterates over all elements in the fast field
|
||||
pub fn iter(&self) -> impl Iterator<Item = T> + '_ {
|
||||
self.vals_reader.iter()
|
||||
@@ -180,27 +197,85 @@ impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
|
||||
|
||||
/// Returns the number of values associated with the document `DocId`.
|
||||
#[inline]
|
||||
pub fn num_vals(&self, doc: DocId) -> u32 {
|
||||
self.idx_reader.num_vals_for_doc(doc)
|
||||
pub fn num_vals(&self, doc: DocId) -> usize {
|
||||
let range = self.range(doc);
|
||||
(range.end - range.start) as usize
|
||||
}
|
||||
|
||||
/// Returns the overall number of values in this field. It does not include deletes.
|
||||
/// Returns the overall number of values in this field.
|
||||
#[inline]
|
||||
pub fn total_num_vals(&self) -> u64 {
|
||||
assert_eq!(
|
||||
self.vals_reader.num_vals() as u64,
|
||||
self.get_index_reader().total_num_vals()
|
||||
);
|
||||
self.idx_reader.total_num_vals()
|
||||
self.idx_reader.max_value()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: MonotonicallyMappableToU128> MultiValueLength for MultiValuedU128FastFieldReader<T> {
|
||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u32> {
|
||||
self.range(doc_id)
|
||||
}
|
||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
||||
self.num_vals(doc_id) as u64
|
||||
}
|
||||
fn get_total_len(&self) -> u64 {
|
||||
self.total_num_vals() as u64
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a list of positions of values in a 1:n index to the corresponding list of DocIds.
|
||||
///
|
||||
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the index.
|
||||
///
|
||||
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically increasing
|
||||
/// positions.
|
||||
///
|
||||
/// TODO: Instead of a linear scan we can employ a expotential search into binary search to match a
|
||||
/// docid to its value position.
|
||||
fn positions_to_docids<C: Column + ?Sized>(positions: &[u32], idx_reader: &C) -> Vec<DocId> {
|
||||
let mut docs = vec![];
|
||||
let mut cur_doc = 0u32;
|
||||
let mut last_doc = None;
|
||||
|
||||
for pos in positions {
|
||||
loop {
|
||||
let end = idx_reader.get_val(cur_doc + 1) as u32;
|
||||
if end > *pos {
|
||||
// avoid duplicates
|
||||
if Some(cur_doc) == last_doc {
|
||||
break;
|
||||
}
|
||||
docs.push(cur_doc);
|
||||
last_doc = Some(cur_doc);
|
||||
break;
|
||||
}
|
||||
cur_doc += 1;
|
||||
}
|
||||
}
|
||||
|
||||
docs
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use fastfield_codecs::VecColumn;
|
||||
|
||||
use crate::core::Index;
|
||||
use crate::fastfield::multivalued::reader::positions_to_docids;
|
||||
use crate::schema::{Cardinality, Facet, FacetOptions, NumericOptions, Schema};
|
||||
|
||||
#[test]
|
||||
fn test_positions_to_docid() {
|
||||
let positions = vec![10u32, 11, 15, 20, 21, 22];
|
||||
|
||||
let offsets = vec![0, 10, 12, 15, 22, 23];
|
||||
{
|
||||
let column = VecColumn::from(&offsets);
|
||||
|
||||
let docids = positions_to_docids(&positions, &column);
|
||||
assert_eq!(docids, vec![1, 3, 4]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multifastfield_reader() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
@@ -95,7 +95,7 @@ fn compute_deleted_bitset(
|
||||
// document that were inserted before it.
|
||||
delete_op
|
||||
.target
|
||||
.for_each_no_score(segment_reader, &mut |doc_matching_delete_query| {
|
||||
.for_each(segment_reader, &mut |doc_matching_delete_query, _| {
|
||||
if doc_opstamps.is_deleted(doc_matching_delete_query, delete_op.opstamp) {
|
||||
alive_bitset.remove(doc_matching_delete_query);
|
||||
might_have_changed = true;
|
||||
@@ -805,7 +805,7 @@ mod tests {
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use fastfield_codecs::{Column, MonotonicallyMappableToU128};
|
||||
use fastfield_codecs::MonotonicallyMappableToU128;
|
||||
use proptest::prelude::*;
|
||||
use proptest::prop_oneof;
|
||||
use proptest::strategy::Strategy;
|
||||
@@ -1591,25 +1591,6 @@ mod tests {
|
||||
(existing_ids, deleted_ids)
|
||||
}
|
||||
|
||||
fn get_id_list(ops: &[IndexingOp]) -> Vec<u64> {
|
||||
let mut id_list = Vec::new();
|
||||
for &op in ops {
|
||||
match op {
|
||||
IndexingOp::AddDoc { id } => {
|
||||
id_list.push(id);
|
||||
}
|
||||
IndexingOp::DeleteDoc { id } => {
|
||||
id_list.retain(|el| *el != id);
|
||||
}
|
||||
IndexingOp::DeleteDocQuery { id } => {
|
||||
id_list.retain(|el| *el != id);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
id_list
|
||||
}
|
||||
|
||||
fn test_operation_strategy(
|
||||
ops: &[IndexingOp],
|
||||
sort_index: bool,
|
||||
@@ -1619,9 +1600,7 @@ mod tests {
|
||||
let ip_field = schema_builder.add_ip_addr_field("ip", FAST | INDEXED | STORED);
|
||||
let ips_field = schema_builder.add_ip_addr_field(
|
||||
"ips",
|
||||
IpAddrOptions::default()
|
||||
.set_fast(Cardinality::MultiValues)
|
||||
.set_indexed(),
|
||||
IpAddrOptions::default().set_fast(Cardinality::MultiValues),
|
||||
);
|
||||
let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED);
|
||||
let i64_field = schema_builder.add_i64_field("i64", INDEXED);
|
||||
@@ -1686,13 +1665,11 @@ mod tests {
|
||||
// rotate right
|
||||
let multi_text_field_text3 = "test3 test1 test2 test3 test1 test2";
|
||||
|
||||
let ip_from_id = |id| Ipv6Addr::from_u128(id as u128);
|
||||
|
||||
for &op in ops {
|
||||
match op {
|
||||
IndexingOp::AddDoc { id } => {
|
||||
let facet = Facet::from(&("/cola/".to_string() + &id.to_string()));
|
||||
let ip = ip_from_id(id);
|
||||
let ip_from_id = Ipv6Addr::from_u128(id as u128);
|
||||
|
||||
if !ip_exists(id) {
|
||||
// every 3rd doc has no ip field
|
||||
@@ -1716,9 +1693,9 @@ mod tests {
|
||||
} else {
|
||||
index_writer.add_document(doc!(id_field=>id,
|
||||
bytes_field => id.to_le_bytes().as_slice(),
|
||||
ip_field => ip,
|
||||
ips_field => ip,
|
||||
ips_field => ip,
|
||||
ip_field => ip_from_id,
|
||||
ips_field => ip_from_id,
|
||||
ips_field => ip_from_id,
|
||||
multi_numbers=> id,
|
||||
multi_numbers => id,
|
||||
bool_field => (id % 2u64) != 0,
|
||||
@@ -1761,7 +1738,6 @@ mod tests {
|
||||
index_writer.commit()?;
|
||||
|
||||
let searcher = index.reader()?.searcher();
|
||||
let num_segments_before_merge = searcher.segment_readers().len();
|
||||
if force_end_merge {
|
||||
index_writer.wait_merging_threads()?;
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
@@ -1773,7 +1749,6 @@ mod tests {
|
||||
assert!(index_writer.wait_merging_threads().is_ok());
|
||||
}
|
||||
}
|
||||
let num_segments_after_merge = searcher.segment_readers().len();
|
||||
|
||||
old_reader.reload()?;
|
||||
let old_searcher = old_reader.searcher();
|
||||
@@ -1801,22 +1776,6 @@ mod tests {
|
||||
.collect();
|
||||
|
||||
let (expected_ids_and_num_occurrences, deleted_ids) = expected_ids(ops);
|
||||
|
||||
let id_list = get_id_list(ops);
|
||||
|
||||
// multivalue fast field content
|
||||
let mut all_ips = Vec::new();
|
||||
let mut num_ips = 0;
|
||||
for segment_reader in searcher.segment_readers().iter() {
|
||||
let ip_reader = segment_reader.fast_fields().ip_addrs(ips_field).unwrap();
|
||||
for doc in segment_reader.doc_ids_alive() {
|
||||
let mut vals = vec![];
|
||||
ip_reader.get_vals(doc, &mut vals);
|
||||
all_ips.extend_from_slice(&vals);
|
||||
}
|
||||
num_ips += ip_reader.total_num_vals();
|
||||
}
|
||||
|
||||
let num_docs_expected = expected_ids_and_num_occurrences
|
||||
.iter()
|
||||
.map(|(_, id_occurrences)| *id_occurrences as usize)
|
||||
@@ -1838,30 +1797,6 @@ mod tests {
|
||||
.collect::<HashSet<_>>()
|
||||
);
|
||||
|
||||
if force_end_merge && num_segments_before_merge > 1 && num_segments_after_merge == 1 {
|
||||
let mut expected_multi_ips: Vec<_> = id_list
|
||||
.iter()
|
||||
.filter(|id| ip_exists(**id))
|
||||
.flat_map(|id| vec![ip_from_id(*id), ip_from_id(*id)])
|
||||
.collect();
|
||||
assert_eq!(num_ips, expected_multi_ips.len() as u64);
|
||||
|
||||
expected_multi_ips.sort();
|
||||
all_ips.sort();
|
||||
assert_eq!(expected_multi_ips, all_ips);
|
||||
|
||||
// Test fastfield num_docs
|
||||
let num_docs: usize = searcher
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(|segment_reader| {
|
||||
let ff_reader = segment_reader.fast_fields().ip_addrs(ips_field).unwrap();
|
||||
ff_reader.num_docs() as usize
|
||||
})
|
||||
.sum();
|
||||
assert_eq!(num_docs, num_docs_expected);
|
||||
}
|
||||
|
||||
// Load all ips addr
|
||||
let ips: HashSet<Ipv6Addr> = searcher
|
||||
.segment_readers()
|
||||
@@ -2065,51 +2000,6 @@ mod tests {
|
||||
assert_eq!(do_search_ip_field(&format!("\"{}\"", ip_addr)), count);
|
||||
}
|
||||
}
|
||||
|
||||
// assert data is like expected
|
||||
//
|
||||
for (existing_id, count) in expected_ids_and_num_occurrences.iter().take(10) {
|
||||
let (existing_id, count) = (*existing_id, *count);
|
||||
if !ip_exists(existing_id) {
|
||||
continue;
|
||||
}
|
||||
let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| {
|
||||
format!("{}:[{} TO {}]", field, &from.to_string(), &to.to_string())
|
||||
};
|
||||
let ip = ip_from_id(existing_id);
|
||||
|
||||
let do_search_ip_field = |term: &str| do_search(term, ip_field).len() as u64;
|
||||
// Range query on single value field
|
||||
// let query = gen_query_inclusive("ip", ip, ip);
|
||||
// assert_eq!(do_search_ip_field(&query), count);
|
||||
|
||||
// Range query on multi value field
|
||||
let query = gen_query_inclusive("ips", ip, ip);
|
||||
assert_eq!(do_search_ip_field(&query), count);
|
||||
}
|
||||
|
||||
// ip range query on fast field
|
||||
//
|
||||
for (existing_id, count) in expected_ids_and_num_occurrences.iter().take(10) {
|
||||
let (existing_id, count) = (*existing_id, *count);
|
||||
if !ip_exists(existing_id) {
|
||||
continue;
|
||||
}
|
||||
let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| {
|
||||
format!("{}:[{} TO {}]", field, &from.to_string(), &to.to_string())
|
||||
};
|
||||
let ip = ip_from_id(existing_id);
|
||||
|
||||
let do_search_ip_field = |term: &str| do_search(term, ip_field).len() as u64;
|
||||
// Range query on single value field
|
||||
// let query = gen_query_inclusive("ip", ip, ip);
|
||||
// assert_eq!(do_search_ip_field(&query), count);
|
||||
|
||||
// Range query on multi value field
|
||||
let query = gen_query_inclusive("ips", ip, ip);
|
||||
assert_eq!(do_search_ip_field(&query), count);
|
||||
}
|
||||
|
||||
// test facets
|
||||
for segment_reader in searcher.segment_readers().iter() {
|
||||
let mut facet_reader = segment_reader.facet_reader(facet_field).unwrap();
|
||||
@@ -2131,40 +2021,6 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ip_range_query_multivalue_bug() {
|
||||
assert!(test_operation_strategy(
|
||||
&[
|
||||
IndexingOp::AddDoc { id: 2 },
|
||||
IndexingOp::Commit,
|
||||
IndexingOp::AddDoc { id: 1 },
|
||||
IndexingOp::AddDoc { id: 1 },
|
||||
IndexingOp::Commit,
|
||||
IndexingOp::Merge
|
||||
],
|
||||
true,
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ff_num_ips_regression() {
|
||||
assert!(test_operation_strategy(
|
||||
&[
|
||||
IndexingOp::AddDoc { id: 13 },
|
||||
IndexingOp::AddDoc { id: 1 },
|
||||
IndexingOp::Commit,
|
||||
IndexingOp::DeleteDocQuery { id: 13 },
|
||||
IndexingOp::AddDoc { id: 1 },
|
||||
IndexingOp::Commit,
|
||||
],
|
||||
false,
|
||||
true
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minimal() {
|
||||
assert!(test_operation_strategy(
|
||||
@@ -2174,7 +2030,7 @@ mod tests {
|
||||
IndexingOp::DeleteDoc { id: 13 }
|
||||
],
|
||||
true,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{
|
||||
get_fastfield_codecs_for_multivalue, AliveBitSet, Column, CompositeFastFieldSerializer,
|
||||
MultiValueIndex, MultiValuedFastFieldReader, MultiValuedU128FastFieldReader,
|
||||
MultiValueLength, MultiValuedFastFieldReader, MultiValuedU128FastFieldReader,
|
||||
};
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
||||
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
|
||||
@@ -348,29 +348,9 @@ impl IndexMerger {
|
||||
field,
|
||||
fast_field_serializer,
|
||||
doc_id_mapping,
|
||||
&segment_and_ff_readers
|
||||
.iter()
|
||||
.map(|(segment_reader, u64s_reader)| {
|
||||
(*segment_reader, u64s_reader.get_index_reader())
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
&segment_and_ff_readers,
|
||||
)?;
|
||||
|
||||
let num_vals = segment_and_ff_readers
|
||||
.iter()
|
||||
.map(|(segment_reader, reader)| {
|
||||
// TODO implement generic version, implement reverse scan, all - deletes
|
||||
if let Some(alive_bitset) = segment_reader.alive_bitset() {
|
||||
alive_bitset
|
||||
.iter_alive()
|
||||
.map(|doc| reader.num_vals(doc))
|
||||
.sum()
|
||||
} else {
|
||||
reader.total_num_vals() as u32
|
||||
}
|
||||
})
|
||||
.sum();
|
||||
|
||||
let fast_field_readers = segment_and_ff_readers
|
||||
.into_iter()
|
||||
.map(|(_, ff_reader)| ff_reader)
|
||||
@@ -385,7 +365,12 @@ impl IndexMerger {
|
||||
})
|
||||
};
|
||||
|
||||
fast_field_serializer.create_u128_fast_field_with_idx(field, iter_gen, num_vals, 1)?;
|
||||
fast_field_serializer.create_u128_fast_field_with_idx(
|
||||
field,
|
||||
iter_gen,
|
||||
doc_id_mapping.len() as u32,
|
||||
1,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -544,11 +529,11 @@ impl IndexMerger {
|
||||
// Creating the index file to point into the data, generic over `BytesFastFieldReader` and
|
||||
// `MultiValuedFastFieldReader`
|
||||
//
|
||||
fn write_1_n_fast_field_idx_generic(
|
||||
fn write_1_n_fast_field_idx_generic<T: MultiValueLength + Send + Sync>(
|
||||
field: Field,
|
||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
segment_and_ff_readers: &[(&SegmentReader, &MultiValueIndex)],
|
||||
segment_and_ff_readers: &[(&SegmentReader, T)],
|
||||
) -> crate::Result<()> {
|
||||
let column =
|
||||
RemappedDocIdMultiValueIndexColumn::new(segment_and_ff_readers, doc_id_mapping);
|
||||
@@ -582,12 +567,7 @@ impl IndexMerger {
|
||||
field,
|
||||
fast_field_serializer,
|
||||
doc_id_mapping,
|
||||
&segment_and_ff_readers
|
||||
.iter()
|
||||
.map(|(segment_reader, u64s_reader)| {
|
||||
(*segment_reader, u64s_reader.get_index_reader())
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
&segment_and_ff_readers,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -717,12 +697,7 @@ impl IndexMerger {
|
||||
field,
|
||||
fast_field_serializer,
|
||||
doc_id_mapping,
|
||||
&segment_and_ff_readers
|
||||
.iter()
|
||||
.map(|(segment_reader, u64s_reader)| {
|
||||
(*segment_reader, u64s_reader.get_index_reader())
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
&segment_and_ff_readers,
|
||||
)?;
|
||||
|
||||
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field(field);
|
||||
@@ -829,7 +804,7 @@ impl IndexMerger {
|
||||
// Let's compute the list of non-empty posting lists
|
||||
for (segment_ord, term_info) in merged_terms.current_segment_ords_and_term_infos() {
|
||||
let segment_reader = &self.readers[segment_ord];
|
||||
let inverted_index: &InvertedIndexReader = &field_readers[segment_ord];
|
||||
let inverted_index: &InvertedIndexReader = &*field_readers[segment_ord];
|
||||
let segment_postings = inverted_index
|
||||
.read_postings_from_terminfo(&term_info, segment_postings_option)?;
|
||||
let alive_bitset_opt = segment_reader.alive_bitset();
|
||||
|
||||
@@ -158,7 +158,6 @@ impl SegmentWriter {
|
||||
let doc_id = self.max_doc;
|
||||
let vals_grouped_by_field = doc
|
||||
.field_values()
|
||||
.iter()
|
||||
.sorted_by_key(|el| el.field())
|
||||
.group_by(|el| el.field());
|
||||
for (field, field_values) in &vals_grouped_by_field {
|
||||
@@ -502,9 +501,17 @@ mod tests {
|
||||
let reader = StoreReader::open(directory.open_read(path).unwrap(), 0).unwrap();
|
||||
let doc = reader.get(0).unwrap();
|
||||
|
||||
assert_eq!(doc.field_values().len(), 2);
|
||||
assert_eq!(doc.field_values()[0].value().as_text(), Some("A"));
|
||||
assert_eq!(doc.field_values()[1].value().as_text(), Some("title"));
|
||||
assert_eq!(doc.value_count(), 2);
|
||||
let mut field_value_iter = doc.field_values();
|
||||
assert_eq!(
|
||||
field_value_iter.next().unwrap().value().as_text(),
|
||||
Some("A")
|
||||
);
|
||||
assert_eq!(
|
||||
field_value_iter.next().unwrap().value().as_text(),
|
||||
Some("title")
|
||||
);
|
||||
assert!(field_value_iter.next().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -833,20 +840,23 @@ mod tests {
|
||||
// This is a bit of a contrived example.
|
||||
let tokens = PreTokenizedString {
|
||||
text: "contrived-example".to_string(), //< I can't think of a use case where this corner case happens in real life.
|
||||
tokens: vec![Token { // Not the last token, yet ends after the last token.
|
||||
offset_from: 0,
|
||||
offset_to: 14,
|
||||
position: 0,
|
||||
text: "long_token".to_string(),
|
||||
position_length: 3,
|
||||
},
|
||||
Token {
|
||||
offset_from: 0,
|
||||
offset_to: 14,
|
||||
position: 1,
|
||||
text: "short".to_string(),
|
||||
position_length: 1,
|
||||
}],
|
||||
tokens: vec![
|
||||
Token {
|
||||
// Not the last token, yet ends after the last token.
|
||||
offset_from: 0,
|
||||
offset_to: 14,
|
||||
position: 0,
|
||||
text: "long_token".to_string(),
|
||||
position_length: 3,
|
||||
},
|
||||
Token {
|
||||
offset_from: 0,
|
||||
offset_to: 14,
|
||||
position: 1,
|
||||
text: "short".to_string(),
|
||||
position_length: 1,
|
||||
},
|
||||
],
|
||||
};
|
||||
doc.add_pre_tokenized_text(text, tokens);
|
||||
doc.add_text(text, "hello");
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::cmp;
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::flat_map_with_buffer::FlatMapWithBufferIter;
|
||||
use crate::fastfield::{MultiValueIndex, MultiValuedFastFieldReader};
|
||||
use crate::fastfield::{MultiValueLength, MultiValuedFastFieldReader};
|
||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
||||
use crate::schema::Field;
|
||||
use crate::{DocAddress, SegmentReader};
|
||||
@@ -94,17 +94,17 @@ impl<'a> Column for RemappedDocIdMultiValueColumn<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
pub(crate) struct RemappedDocIdMultiValueIndexColumn<'a, T: MultiValueLength> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
multi_value_length_readers: Vec<&'a MultiValueIndex>,
|
||||
multi_value_length_readers: Vec<&'a T>,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u32,
|
||||
}
|
||||
|
||||
impl<'a> RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
impl<'a, T: MultiValueLength> RemappedDocIdMultiValueIndexColumn<'a, T> {
|
||||
pub(crate) fn new(
|
||||
segment_and_ff_readers: &'a [(&'a SegmentReader, &'a MultiValueIndex)],
|
||||
segment_and_ff_readers: &'a [(&'a SegmentReader, T)],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
) -> Self {
|
||||
// We go through a complete first pass to compute the minimum and the
|
||||
@@ -115,19 +115,17 @@ impl<'a> RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
let mut multi_value_length_readers = Vec::with_capacity(segment_and_ff_readers.len());
|
||||
for segment_and_ff_reader in segment_and_ff_readers {
|
||||
let segment_reader = segment_and_ff_reader.0;
|
||||
let multi_value_length_reader = segment_and_ff_reader.1;
|
||||
let multi_value_length_reader = &segment_and_ff_reader.1;
|
||||
if !segment_reader.has_deletes() {
|
||||
max_value += multi_value_length_reader.total_num_vals();
|
||||
max_value += multi_value_length_reader.get_total_len();
|
||||
} else {
|
||||
for doc in segment_reader.doc_ids_alive() {
|
||||
max_value += multi_value_length_reader.num_vals_for_doc(doc) as u64;
|
||||
max_value += multi_value_length_reader.get_len(doc);
|
||||
}
|
||||
}
|
||||
num_vals += segment_reader.num_docs();
|
||||
multi_value_length_readers.push(multi_value_length_reader);
|
||||
}
|
||||
// The value range is always get_val(doc)..get_val(doc + 1)
|
||||
num_vals += 1;
|
||||
Self {
|
||||
doc_id_mapping,
|
||||
multi_value_length_readers,
|
||||
@@ -138,7 +136,7 @@ impl<'a> RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Column for RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
impl<'a, T: MultiValueLength + Send + Sync> Column for RemappedDocIdMultiValueIndexColumn<'a, T> {
|
||||
fn get_val(&self, _pos: u32) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
@@ -150,8 +148,8 @@ impl<'a> Column for RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
move |old_doc_addr| {
|
||||
let ff_reader =
|
||||
&self.multi_value_length_readers[old_doc_addr.segment_ord as usize];
|
||||
offset += ff_reader.num_vals_for_doc(old_doc_addr.doc_id);
|
||||
offset as u64
|
||||
offset += ff_reader.get_len(old_doc_addr.doc_id);
|
||||
offset
|
||||
},
|
||||
)),
|
||||
)
|
||||
|
||||
@@ -33,7 +33,7 @@ where
|
||||
&'a self,
|
||||
term_dict: &'a TermDictionary,
|
||||
) -> io::Result<TermStreamer<'a, &'a A>> {
|
||||
let automaton: &A = &self.automaton;
|
||||
let automaton: &A = &*self.automaton;
|
||||
let term_stream_builder = term_dict.search(automaton);
|
||||
term_stream_builder.into_stream()
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::postings::FreqReadingOption;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner};
|
||||
use crate::query::term_query::TermScorer;
|
||||
use crate::query::weight::{for_each_docset, for_each_pruning_scorer, for_each_scorer};
|
||||
use crate::query::weight::{for_each_pruning_scorer, for_each_scorer};
|
||||
use crate::query::{
|
||||
intersect_scorers, EmptyScorer, Exclude, Explanation, Occur, RequiredOptionalScorer, Scorer,
|
||||
Union, Weight,
|
||||
@@ -219,24 +219,6 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn for_each_no_score(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId),
|
||||
) -> crate::Result<()> {
|
||||
let scorer = self.complex_scorer(reader, 1.0, || DoNothingCombiner)?;
|
||||
match scorer {
|
||||
SpecializedScorer::TermUnion(term_scorers) => {
|
||||
let mut union_scorer = Union::build(term_scorers, &self.score_combiner_fn);
|
||||
for_each_docset(&mut union_scorer, callback);
|
||||
}
|
||||
SpecializedScorer::Other(mut scorer) => {
|
||||
for_each_docset(scorer.as_mut(), callback);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Calls `callback` with all of the `(doc, score)` for which score
|
||||
/// is exceeding a given threshold.
|
||||
///
|
||||
|
||||
@@ -31,7 +31,7 @@ pub struct MoreLikeThisQuery {
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
enum TargetDocument {
|
||||
DocumentAdress(DocAddress),
|
||||
DocumentFields(Vec<(Field, Vec<Value>)>),
|
||||
DocumentFields(Vec<(Field, Vec<Value<'static>>)>),
|
||||
}
|
||||
|
||||
impl MoreLikeThisQuery {
|
||||
@@ -160,7 +160,10 @@ impl MoreLikeThisQueryBuilder {
|
||||
/// that will be used to compose the resulting query.
|
||||
/// This interface is meant to be used when you want to provide your own set of fields
|
||||
/// not necessarily from a specific document.
|
||||
pub fn with_document_fields(self, doc_fields: Vec<(Field, Vec<Value>)>) -> MoreLikeThisQuery {
|
||||
pub fn with_document_fields(
|
||||
self,
|
||||
doc_fields: Vec<(Field, Vec<Value<'static>>)>,
|
||||
) -> MoreLikeThisQuery {
|
||||
MoreLikeThisQuery {
|
||||
mlt: self.mlt,
|
||||
target: TargetDocument::DocumentFields(doc_fields),
|
||||
|
||||
@@ -11,7 +11,6 @@ use fastfield_codecs::{Column, MonotonicallyMappableToU128};
|
||||
|
||||
use super::range_query::map_bound;
|
||||
use super::{ConstScorer, Explanation, Scorer, Weight};
|
||||
use crate::fastfield::MultiValuedU128FastFieldReader;
|
||||
use crate::schema::{Cardinality, Field};
|
||||
use crate::{DocId, DocSet, Score, SegmentReader, TantivyError, TERMINATED};
|
||||
|
||||
@@ -44,29 +43,16 @@ impl Weight for IPFastFieldRangeWeight {
|
||||
let field_type = reader.schema().get_field_entry(self.field).field_type();
|
||||
match field_type.fastfield_cardinality().unwrap() {
|
||||
Cardinality::SingleValue => {
|
||||
let ip_addr_fast_field: Arc<dyn Column<Ipv6Addr>> =
|
||||
reader.fast_fields().ip_addr(self.field)?;
|
||||
let ip_addr_fast_field = reader.fast_fields().ip_addr(self.field)?;
|
||||
let value_range = bound_to_value_range(
|
||||
&self.left_bound,
|
||||
&self.right_bound,
|
||||
ip_addr_fast_field.min_value(),
|
||||
ip_addr_fast_field.max_value(),
|
||||
ip_addr_fast_field.as_ref(),
|
||||
);
|
||||
let docset = IpRangeDocSet::new(value_range, ip_addr_fast_field);
|
||||
Ok(Box::new(ConstScorer::new(docset, boost)))
|
||||
}
|
||||
Cardinality::MultiValues => {
|
||||
let ip_addr_fast_field: MultiValuedU128FastFieldReader<Ipv6Addr> =
|
||||
reader.fast_fields().ip_addrs(self.field)?;
|
||||
let value_range = bound_to_value_range(
|
||||
&self.left_bound,
|
||||
&self.right_bound,
|
||||
ip_addr_fast_field.min_value(),
|
||||
ip_addr_fast_field.max_value(),
|
||||
);
|
||||
let docset = IpRangeDocSet::new(value_range, Arc::new(ip_addr_fast_field));
|
||||
Ok(Box::new(ConstScorer::new(docset, boost)))
|
||||
}
|
||||
Cardinality::MultiValues => unimplemented!(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,19 +73,18 @@ impl Weight for IPFastFieldRangeWeight {
|
||||
fn bound_to_value_range(
|
||||
left_bound: &Bound<Ipv6Addr>,
|
||||
right_bound: &Bound<Ipv6Addr>,
|
||||
min_value: Ipv6Addr,
|
||||
max_value: Ipv6Addr,
|
||||
column: &dyn Column<Ipv6Addr>,
|
||||
) -> RangeInclusive<Ipv6Addr> {
|
||||
let start_value = match left_bound {
|
||||
Bound::Included(ip_addr) => *ip_addr,
|
||||
Bound::Excluded(ip_addr) => Ipv6Addr::from(ip_addr.to_u128() + 1),
|
||||
Bound::Unbounded => min_value,
|
||||
Bound::Unbounded => column.min_value(),
|
||||
};
|
||||
|
||||
let end_value = match right_bound {
|
||||
Bound::Included(ip_addr) => *ip_addr,
|
||||
Bound::Excluded(ip_addr) => Ipv6Addr::from(ip_addr.to_u128() - 1),
|
||||
Bound::Unbounded => max_value,
|
||||
Bound::Unbounded => column.max_value(),
|
||||
};
|
||||
start_value..=end_value
|
||||
}
|
||||
@@ -124,23 +109,22 @@ impl VecCursor {
|
||||
fn current(&self) -> Option<u32> {
|
||||
self.docs.get(self.current_pos).map(|el| *el as u32)
|
||||
}
|
||||
|
||||
fn get_cleared_data(&mut self) -> &mut Vec<u32> {
|
||||
self.docs.clear();
|
||||
self.current_pos = 0;
|
||||
&mut self.docs
|
||||
}
|
||||
fn last_value(&self) -> Option<u32> {
|
||||
self.docs.iter().last().cloned()
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.current_pos >= self.docs.len()
|
||||
}
|
||||
}
|
||||
|
||||
struct IpRangeDocSet<T> {
|
||||
struct IpRangeDocSet {
|
||||
/// The range filter on the values.
|
||||
value_range: RangeInclusive<Ipv6Addr>,
|
||||
ip_addrs: T,
|
||||
ip_addr_fast_field: Arc<dyn Column<Ipv6Addr>>,
|
||||
/// The next docid start range to fetch (inclusive).
|
||||
next_fetch_start: u32,
|
||||
/// Number of docs range checked in a batch.
|
||||
@@ -157,17 +141,18 @@ struct IpRangeDocSet<T> {
|
||||
last_seek_pos_opt: Option<u32>,
|
||||
}
|
||||
|
||||
const DEFAULT_FETCH_HORIZON: u32 = 128;
|
||||
impl<T> IpRangeDocSet<T>
|
||||
where Self: SingleOrMultivalued
|
||||
{
|
||||
fn new(value_range: RangeInclusive<Ipv6Addr>, ip_addrs: T) -> Self {
|
||||
const DEFALT_FETCH_HORIZON: u32 = 128;
|
||||
impl IpRangeDocSet {
|
||||
fn new(
|
||||
value_range: RangeInclusive<Ipv6Addr>,
|
||||
ip_addr_fast_field: Arc<dyn Column<Ipv6Addr>>,
|
||||
) -> Self {
|
||||
let mut ip_range_docset = Self {
|
||||
value_range,
|
||||
ip_addrs,
|
||||
ip_addr_fast_field,
|
||||
loaded_docs: VecCursor::new(),
|
||||
next_fetch_start: 0,
|
||||
fetch_horizon: DEFAULT_FETCH_HORIZON,
|
||||
fetch_horizon: DEFALT_FETCH_HORIZON,
|
||||
last_seek_pos_opt: None,
|
||||
};
|
||||
ip_range_docset.reset_fetch_range();
|
||||
@@ -176,7 +161,7 @@ where Self: SingleOrMultivalued
|
||||
}
|
||||
|
||||
fn reset_fetch_range(&mut self) {
|
||||
self.fetch_horizon = DEFAULT_FETCH_HORIZON;
|
||||
self.fetch_horizon = DEFALT_FETCH_HORIZON;
|
||||
}
|
||||
|
||||
/// Returns true if more data could be fetched
|
||||
@@ -200,72 +185,36 @@ where Self: SingleOrMultivalued
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trait SingleOrMultivalued {
|
||||
fn num_docs(&self) -> u32;
|
||||
/// Fetches a block for docid range [next_fetch_start .. next_fetch_start + HORIZON]
|
||||
fn fetch_horizon(&mut self, horizon: u32) -> bool {
|
||||
// Have different implem for single value and multivalue
|
||||
todo!();
|
||||
// let mut finished_to_end = false;
|
||||
let mut finished_to_end = false;
|
||||
|
||||
// let limit = self.num_docs();
|
||||
// let mut end = self.next_fetch_start + horizon;
|
||||
// if end >= limit {
|
||||
// end = limit;
|
||||
// finished_to_end = true;
|
||||
// }
|
||||
let limit = self.ip_addr_fast_field.num_vals();
|
||||
let mut end = self.next_fetch_start + horizon;
|
||||
if end >= limit {
|
||||
end = limit;
|
||||
finished_to_end = true;
|
||||
}
|
||||
|
||||
// let last_loaded_docs_val = self
|
||||
// .is_multivalue
|
||||
// .then(|| self.loaded_docs.last_value())
|
||||
// .flatten();
|
||||
|
||||
// let last_loaded_docs_val =
|
||||
// if self.is_multivalue {
|
||||
// self.loaded_docs.last_value()
|
||||
// } else {
|
||||
// None
|
||||
// };
|
||||
|
||||
// let loaded_docs_data = self.loaded_docs.get_cleared_data();
|
||||
// self.ip_addr_fast_field.get_docids_for_value_range(
|
||||
// self.value_range.clone(),
|
||||
// self.next_fetch_start..end,
|
||||
// loaded_docs_data,
|
||||
// );
|
||||
// // In case of multivalues, we may have an overlap of the same docid between fetching
|
||||
// blocks if let Some(last_value) = last_loaded_docs_val {
|
||||
// while self.loaded_docs.current() == Some(last_value) {
|
||||
// self.loaded_docs.next();
|
||||
// }
|
||||
// }
|
||||
// self.next_fetch_start = end;
|
||||
// finished_to_end
|
||||
let data = self.loaded_docs.get_cleared_data();
|
||||
self.ip_addr_fast_field.get_positions_for_value_range(
|
||||
self.value_range.clone(),
|
||||
self.next_fetch_start..end,
|
||||
data,
|
||||
);
|
||||
self.next_fetch_start = end;
|
||||
finished_to_end
|
||||
}
|
||||
}
|
||||
|
||||
impl SingleOrMultivalued for IpRangeDocSet<Arc<dyn Column<Ipv6Addr>>> {
|
||||
fn num_docs(&self) -> u32 {
|
||||
self.ip_addrs.num_docs()
|
||||
}
|
||||
}
|
||||
|
||||
impl SingleOrMultivalued for IpRangeDocSet<Arc<MultiValuedU128FastFieldReader<Ipv6Addr>>> {
|
||||
fn num_docs(&self) -> u32 {
|
||||
self.ip_addrs.get_index_reader().num_docs()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> DocSet for IpRangeDocSet<T>
|
||||
where Self: SingleOrMultivalued
|
||||
{
|
||||
impl DocSet for IpRangeDocSet {
|
||||
#[inline]
|
||||
fn advance(&mut self) -> DocId {
|
||||
if let Some(docid) = self.loaded_docs.next() {
|
||||
docid as u32
|
||||
} else {
|
||||
if self.next_fetch_start >= self.num_docs() as u32 {
|
||||
if self.next_fetch_start >= self.ip_addr_fast_field.num_vals() as u32 {
|
||||
return TERMINATED;
|
||||
}
|
||||
self.fetch_block();
|
||||
@@ -320,7 +269,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::collector::Count;
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{IpAddrOptions, Schema, FAST, INDEXED, STORED, STRING};
|
||||
use crate::schema::{Schema, FAST, INDEXED, STORED, STRING};
|
||||
use crate::Index;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -331,13 +280,12 @@ mod tests {
|
||||
|
||||
fn operation_strategy() -> impl Strategy<Value = Doc> {
|
||||
prop_oneof![
|
||||
(0u64..10_000u64).prop_map(doc_from_id_1),
|
||||
(1u64..10_000u64).prop_map(doc_from_id_2),
|
||||
(0u64..100u64).prop_map(doc_from_id_1),
|
||||
(1u64..100u64).prop_map(doc_from_id_2),
|
||||
]
|
||||
}
|
||||
|
||||
pub fn doc_from_id_1(id: u64) -> Doc {
|
||||
let id = id * 1000;
|
||||
Doc {
|
||||
// ip != id
|
||||
id: id.to_string(),
|
||||
@@ -345,7 +293,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
fn doc_from_id_2(id: u64) -> Doc {
|
||||
let id = id * 1000;
|
||||
Doc {
|
||||
// ip != id
|
||||
id: (id - 1).to_string(),
|
||||
@@ -363,12 +310,6 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn ip_range_regression1_test() {
|
||||
let ops = vec![doc_from_id_1(0)];
|
||||
assert!(test_ip_range_for_docs(ops).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ip_range_regression2_test() {
|
||||
let ops = vec![
|
||||
doc_from_id_1(52),
|
||||
doc_from_id_1(63),
|
||||
@@ -380,20 +321,14 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ip_range_regression3_test() {
|
||||
let ops = vec![doc_from_id_1(1), doc_from_id_1(2), doc_from_id_1(3)];
|
||||
fn ip_range_regression2_test() {
|
||||
let ops = vec![doc_from_id_1(0)];
|
||||
assert!(test_ip_range_for_docs(ops).is_ok());
|
||||
}
|
||||
|
||||
pub fn create_index_from_docs(docs: &[Doc]) -> Index {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let ip_field = schema_builder.add_ip_addr_field("ip", INDEXED | STORED | FAST);
|
||||
let ips_field = schema_builder.add_ip_addr_field(
|
||||
"ips",
|
||||
IpAddrOptions::default()
|
||||
.set_fast(Cardinality::MultiValues)
|
||||
.set_indexed(),
|
||||
);
|
||||
let text_field = schema_builder.add_text_field("id", STRING | STORED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
@@ -403,8 +338,6 @@ mod tests {
|
||||
for doc in docs.iter() {
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
ips_field => doc.ip,
|
||||
ips_field => doc.ip,
|
||||
ip_field => doc.ip,
|
||||
text_field => doc.id.to_string(),
|
||||
))
|
||||
@@ -428,8 +361,8 @@ mod tests {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| {
|
||||
format!("{}:[{} TO {}]", field, &from.to_string(), &to.to_string())
|
||||
let gen_query_inclusive = |from: Ipv6Addr, to: Ipv6Addr| {
|
||||
format!("ip:[{} TO {}]", &from.to_string(), &to.to_string())
|
||||
};
|
||||
|
||||
let test_sample = |sample_docs: Vec<Doc>| {
|
||||
@@ -440,10 +373,7 @@ mod tests {
|
||||
.filter(|doc| (ips[0]..=ips[1]).contains(&doc.ip))
|
||||
.count();
|
||||
|
||||
let query = gen_query_inclusive("ip", ips[0], ips[1]);
|
||||
assert_eq!(get_num_hits(query_from_text(&query)), expected_num_hits);
|
||||
|
||||
let query = gen_query_inclusive("ips", ips[0], ips[1]);
|
||||
let query = gen_query_inclusive(ips[0], ips[1]);
|
||||
assert_eq!(get_num_hits(query_from_text(&query)), expected_num_hits);
|
||||
|
||||
// Intersection search
|
||||
@@ -452,20 +382,7 @@ mod tests {
|
||||
.iter()
|
||||
.filter(|doc| (ips[0]..=ips[1]).contains(&doc.ip) && doc.id == id_filter)
|
||||
.count();
|
||||
let query = format!(
|
||||
"{} AND id:{}",
|
||||
gen_query_inclusive("ip", ips[0], ips[1]),
|
||||
&id_filter
|
||||
);
|
||||
assert_eq!(get_num_hits(query_from_text(&query)), expected_num_hits);
|
||||
|
||||
// Intersection search on multivalue ip field
|
||||
let id_filter = sample_docs[0].id.to_string();
|
||||
let query = format!(
|
||||
"{} AND id:{}",
|
||||
gen_query_inclusive("ips", ips[0], ips[1]),
|
||||
&id_filter
|
||||
);
|
||||
let query = format!("{} AND id:{}", query, &id_filter);
|
||||
assert_eq!(get_num_hits(query_from_text(&query)), expected_num_hits);
|
||||
};
|
||||
|
||||
@@ -485,8 +402,7 @@ mod tests {
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand::{thread_rng, Rng};
|
||||
use test::Bencher;
|
||||
|
||||
use super::tests::*;
|
||||
@@ -496,7 +412,7 @@ mod bench {
|
||||
use crate::Index;
|
||||
|
||||
fn get_index_0_to_100() -> Index {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
let mut rng = thread_rng();
|
||||
let num_vals = 100_000;
|
||||
let docs: Vec<_> = (0..num_vals)
|
||||
.map(|_i| {
|
||||
@@ -508,10 +424,8 @@ mod bench {
|
||||
"many".to_string() // 90%
|
||||
};
|
||||
Doc {
|
||||
id,
|
||||
id: id,
|
||||
// Multiply by 1000, so that we create many buckets in the compact space
|
||||
// The benches depend on this range to select n-percent of elements with the
|
||||
// methods below.
|
||||
ip: Ipv6Addr::from_u128(rng.gen_range(0..100) * 1000),
|
||||
}
|
||||
})
|
||||
@@ -520,42 +434,22 @@ mod bench {
|
||||
let index = create_index_from_docs(&docs);
|
||||
index
|
||||
}
|
||||
|
||||
fn get_90_percent() -> RangeInclusive<Ipv6Addr> {
|
||||
let start = Ipv6Addr::from_u128(0);
|
||||
let end = Ipv6Addr::from_u128(90 * 1000);
|
||||
start..=end
|
||||
}
|
||||
|
||||
fn get_10_percent() -> RangeInclusive<Ipv6Addr> {
|
||||
let start = Ipv6Addr::from_u128(0);
|
||||
let end = Ipv6Addr::from_u128(10 * 1000);
|
||||
start..=end
|
||||
}
|
||||
|
||||
fn get_1_percent() -> RangeInclusive<Ipv6Addr> {
|
||||
let start = Ipv6Addr::from_u128(10 * 1000);
|
||||
let end = Ipv6Addr::from_u128(10 * 1000);
|
||||
start..=end
|
||||
}
|
||||
|
||||
fn excute_query(
|
||||
field: &str,
|
||||
ip_range: RangeInclusive<Ipv6Addr>,
|
||||
start_inclusive: Ipv6Addr,
|
||||
end_inclusive: Ipv6Addr,
|
||||
suffix: &str,
|
||||
index: &Index,
|
||||
) -> usize {
|
||||
let gen_query_inclusive = |from: &Ipv6Addr, to: &Ipv6Addr| {
|
||||
let gen_query_inclusive = |from: Ipv6Addr, to: Ipv6Addr| {
|
||||
format!(
|
||||
"{}:[{} TO {}] {}",
|
||||
field,
|
||||
"ip:[{} TO {}] {}",
|
||||
&from.to_string(),
|
||||
&to.to_string(),
|
||||
suffix
|
||||
)
|
||||
};
|
||||
|
||||
let query = gen_query_inclusive(ip_range.start(), ip_range.end());
|
||||
let query = gen_query_inclusive(start_inclusive, end_inclusive);
|
||||
let query_from_text = |text: &str| {
|
||||
QueryParser::for_index(&index, vec![])
|
||||
.parse_query(text)
|
||||
@@ -571,153 +465,131 @@ mod bench {
|
||||
fn bench_ip_range_hit_90_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_90_percent(), "", &index));
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(0);
|
||||
let end = Ipv6Addr::from_u128(90 * 1000);
|
||||
|
||||
excute_query(start, end, "", &index)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_10_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_10_percent(), "", &index));
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(0);
|
||||
let end = Ipv6Addr::from_u128(10 * 1000);
|
||||
|
||||
excute_query(start, end, "", &index)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_1_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_1_percent(), "", &index));
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(10 * 1000);
|
||||
let end = Ipv6Addr::from_u128(10 * 1000);
|
||||
|
||||
excute_query(start, end, "", &index)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_10_percent_intersect_with_10_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_10_percent(), "AND id:few", &index));
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(0);
|
||||
let end = Ipv6Addr::from_u128(10 * 1000);
|
||||
|
||||
excute_query(start, end, "AND id:few", &index)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_1_percent_intersect_with_10_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_1_percent(), "AND id:few", &index));
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(10 * 1000);
|
||||
let end = Ipv6Addr::from_u128(10 * 1000);
|
||||
|
||||
excute_query(start, end, "AND id:few", &index)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_1_percent_intersect_with_90_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_1_percent(), "AND id:many", &index));
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(10 * 1000);
|
||||
let end = Ipv6Addr::from_u128(10 * 1000);
|
||||
|
||||
excute_query(start, end, "AND id:many", &index)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_1_percent_intersect_with_1_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_1_percent(), "AND id:veryfew", &index));
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(10 * 1000);
|
||||
let end = Ipv6Addr::from_u128(10 * 1000);
|
||||
|
||||
excute_query(start, end, "AND id:veryfew", &index)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_10_percent_intersect_with_90_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_10_percent(), "AND id:many", &index));
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(0);
|
||||
let end = Ipv6Addr::from_u128(10 * 1000);
|
||||
|
||||
excute_query(start, end, "AND id:many", &index)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_90_percent_intersect_with_90_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_90_percent(), "AND id:many", &index));
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(0);
|
||||
let end = Ipv6Addr::from_u128(90 * 1000);
|
||||
|
||||
excute_query(start, end, "AND id:many", &index)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_90_percent_intersect_with_10_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_90_percent(), "AND id:few", &index));
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(0);
|
||||
let end = Ipv6Addr::from_u128(90 * 1000);
|
||||
|
||||
excute_query(start, end, "AND id:few", &index)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_90_percent_intersect_with_1_percent(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ip", get_90_percent(), "AND id:veryfew", &index));
|
||||
}
|
||||
bench.iter(|| {
|
||||
let start = Ipv6Addr::from_u128(0);
|
||||
let end = Ipv6Addr::from_u128(90 * 1000);
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_90_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_90_percent(), "", &index));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_10_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_10_percent(), "", &index));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_1_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_1_percent(), "", &index));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_10_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_10_percent(), "AND id:few", &index));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_1_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_1_percent(), "AND id:few", &index));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_1_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_1_percent(), "AND id:many", &index));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_1_percent_intersect_with_1_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_1_percent(), "AND id:veryfew", &index));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_10_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_10_percent(), "AND id:many", &index));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_90_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_90_percent(), "AND id:many", &index));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_90_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_90_percent(), "AND id:few", &index));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_ip_range_hit_90_percent_intersect_with_1_percent_multi(bench: &mut Bencher) {
|
||||
let index = get_index_0_to_100();
|
||||
|
||||
bench.iter(|| excute_query("ips", get_90_percent(), "AND id:veryfew", &index));
|
||||
excute_query(start, end, "AND id:veryfew", &index)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::fieldnorm::FieldNormReader;
|
||||
use crate::postings::SegmentPostings;
|
||||
use crate::query::bm25::Bm25Weight;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::weight::{for_each_docset, for_each_scorer};
|
||||
use crate::query::weight::for_each_scorer;
|
||||
use crate::query::{Explanation, Scorer, Weight};
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::{DocId, Score, Term};
|
||||
@@ -56,18 +56,6 @@ impl Weight for TermWeight {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Iterates through all of the document matched by the DocSet
|
||||
/// `DocSet` and push the scored documents to the collector.
|
||||
fn for_each_no_score(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId),
|
||||
) -> crate::Result<()> {
|
||||
let mut scorer = self.specialized_scorer(reader, 1.0)?;
|
||||
for_each_docset(&mut scorer, callback);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Calls `callback` with all of the `(doc, score)` for which score
|
||||
/// is exceeding a given threshold.
|
||||
///
|
||||
|
||||
@@ -94,8 +94,8 @@ impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> Union<TScorer, TScoreCombin
|
||||
self.doc = min_doc;
|
||||
refill(
|
||||
&mut self.docsets,
|
||||
&mut self.bitsets,
|
||||
&mut self.scores,
|
||||
&mut *self.bitsets,
|
||||
&mut *self.scores,
|
||||
min_doc,
|
||||
);
|
||||
true
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use super::Scorer;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::query::Explanation;
|
||||
use crate::{DocId, DocSet, Score, TERMINATED};
|
||||
use crate::{DocId, Score, TERMINATED};
|
||||
|
||||
/// Iterates through all of the documents and scores matched by the DocSet
|
||||
/// `DocSet`.
|
||||
/// Iterates through all of the document matched by the DocSet
|
||||
/// `DocSet` and push the scored documents to the collector.
|
||||
pub(crate) fn for_each_scorer<TScorer: Scorer + ?Sized>(
|
||||
scorer: &mut TScorer,
|
||||
callback: &mut dyn FnMut(DocId, Score),
|
||||
@@ -16,16 +16,6 @@ pub(crate) fn for_each_scorer<TScorer: Scorer + ?Sized>(
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates through all of the documents matched by the DocSet
|
||||
/// `DocSet`.
|
||||
pub(crate) fn for_each_docset<T: DocSet + ?Sized>(docset: &mut T, callback: &mut dyn FnMut(DocId)) {
|
||||
let mut doc = docset.doc();
|
||||
while doc != TERMINATED {
|
||||
callback(doc);
|
||||
doc = docset.advance();
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls `callback` with all of the `(doc, score)` for which score
|
||||
/// is exceeding a given threshold.
|
||||
///
|
||||
@@ -88,18 +78,6 @@ pub trait Weight: Send + Sync + 'static {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Iterates through all of the document matched by the DocSet
|
||||
/// `DocSet` and push the scored documents to the collector.
|
||||
fn for_each_no_score(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId),
|
||||
) -> crate::Result<()> {
|
||||
let mut docset = self.scorer(reader, 1.0)?;
|
||||
for_each_docset(docset.as_mut(), callback);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Calls `callback` with all of the `(doc, score)` for which score
|
||||
/// is exceeding a given threshold.
|
||||
///
|
||||
|
||||
@@ -1,35 +1,105 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::mem;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, mem};
|
||||
|
||||
use common::{BinarySerializable, VInt};
|
||||
use itertools::Either;
|
||||
use yoke::erased::ErasedArcCart;
|
||||
use yoke::Yoke;
|
||||
|
||||
use super::*;
|
||||
use crate::schema::value::MaybeOwnedString;
|
||||
use crate::tokenizer::PreTokenizedString;
|
||||
use crate::DateTime;
|
||||
|
||||
/// A group of FieldValue sharing an underlying storage
|
||||
///
|
||||
/// Or a single owned FieldValue.
|
||||
#[derive(Clone)]
|
||||
enum FieldValueGroup {
|
||||
Single(FieldValue<'static>),
|
||||
Group(Yoke<VecFieldValue<'static>, ErasedArcCart>),
|
||||
}
|
||||
|
||||
// this NewType is required to make it possible to yoke a vec with non 'static inner values.
|
||||
#[derive(yoke::Yokeable, Clone)]
|
||||
struct VecFieldValue<'a>(Vec<FieldValue<'a>>);
|
||||
|
||||
impl<'a> std::ops::Deref for VecFieldValue<'a> {
|
||||
type Target = Vec<FieldValue<'a>>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<Vec<FieldValue<'a>>> for VecFieldValue<'a> {
|
||||
fn from(field_values: Vec<FieldValue>) -> VecFieldValue {
|
||||
VecFieldValue(field_values)
|
||||
}
|
||||
}
|
||||
|
||||
impl FieldValueGroup {
|
||||
fn iter(&self) -> impl Iterator<Item = &FieldValue> {
|
||||
match self {
|
||||
FieldValueGroup::Single(field_value) => Either::Left(std::iter::once(field_value)),
|
||||
FieldValueGroup::Group(field_values) => Either::Right(field_values.get().iter()),
|
||||
}
|
||||
}
|
||||
|
||||
fn count(&self) -> usize {
|
||||
match self {
|
||||
FieldValueGroup::Single(_) => 1,
|
||||
FieldValueGroup::Group(field_values) => field_values.get().len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<FieldValue<'static>>> for FieldValueGroup {
|
||||
fn from(field_values: Vec<FieldValue<'static>>) -> FieldValueGroup {
|
||||
FieldValueGroup::Group(
|
||||
Yoke::new_always_owned(field_values.into())
|
||||
.wrap_cart_in_arc()
|
||||
.erase_arc_cart(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Tantivy's Document is the object that can
|
||||
/// be indexed and then searched for.
|
||||
///
|
||||
/// Documents are fundamentally a collection of unordered couples `(field, value)`.
|
||||
/// In this list, one field may appear more than once.
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)]
|
||||
#[derive(Clone, Default)]
|
||||
// TODO bring back Ser/De and Debug
|
||||
//#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)]
|
||||
//#[serde(bound(deserialize = "'static: 'de, 'de: 'static"))]
|
||||
pub struct Document {
|
||||
field_values: Vec<FieldValue>,
|
||||
field_values: Vec<FieldValueGroup>,
|
||||
}
|
||||
|
||||
impl From<Vec<FieldValue>> for Document {
|
||||
fn from(field_values: Vec<FieldValue>) -> Self {
|
||||
impl fmt::Debug for Document {
|
||||
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<FieldValue<'static>>> for Document {
|
||||
fn from(field_values: Vec<FieldValue<'static>>) -> Self {
|
||||
let field_values = vec![field_values.into()];
|
||||
Document { field_values }
|
||||
}
|
||||
}
|
||||
impl PartialEq for Document {
|
||||
fn eq(&self, other: &Document) -> bool {
|
||||
// super slow, but only here for tests
|
||||
let convert_to_comparable_map = |field_values: &[FieldValue]| {
|
||||
let convert_to_comparable_map = |field_values| {
|
||||
let mut field_value_set: HashMap<Field, HashSet<String>> = Default::default();
|
||||
for field_value in field_values.iter() {
|
||||
for field_value in field_values {
|
||||
// for some reason rustc fails to guess the type
|
||||
let field_value: &FieldValue = field_value;
|
||||
let json_val = serde_json::to_string(field_value.value()).unwrap();
|
||||
field_value_set
|
||||
.entry(field_value.field())
|
||||
@@ -39,9 +109,9 @@ impl PartialEq for Document {
|
||||
field_value_set
|
||||
};
|
||||
let self_field_values: HashMap<Field, HashSet<String>> =
|
||||
convert_to_comparable_map(&self.field_values);
|
||||
convert_to_comparable_map(self.field_values());
|
||||
let other_field_values: HashMap<Field, HashSet<String>> =
|
||||
convert_to_comparable_map(&other.field_values);
|
||||
convert_to_comparable_map(other.field_values());
|
||||
self_field_values.eq(&other_field_values)
|
||||
}
|
||||
}
|
||||
@@ -49,12 +119,13 @@ impl PartialEq for Document {
|
||||
impl Eq for Document {}
|
||||
|
||||
impl IntoIterator for Document {
|
||||
type Item = FieldValue;
|
||||
type Item = FieldValue<'static>;
|
||||
|
||||
type IntoIter = std::vec::IntoIter<FieldValue>;
|
||||
type IntoIter = std::vec::IntoIter<FieldValue<'static>>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.field_values.into_iter()
|
||||
todo!()
|
||||
// self.field_values.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +155,7 @@ impl Document {
|
||||
|
||||
/// Add a text field.
|
||||
pub fn add_text<S: ToString>(&mut self, field: Field, text: S) {
|
||||
let value = Value::Str(text.to_string());
|
||||
let value = Value::Str(MaybeOwnedString::from_string(text.to_string()));
|
||||
self.add_field_value(field, value);
|
||||
}
|
||||
|
||||
@@ -138,15 +209,35 @@ impl Document {
|
||||
}
|
||||
|
||||
/// Add a (field, value) to the document.
|
||||
pub fn add_field_value<T: Into<Value>>(&mut self, field: Field, typed_val: T) {
|
||||
pub fn add_field_value<T: Into<Value<'static>>>(&mut self, field: Field, typed_val: T) {
|
||||
let value = typed_val.into();
|
||||
let field_value = FieldValue { field, value };
|
||||
self.field_values.push(field_value);
|
||||
self.field_values.push(FieldValueGroup::Single(field_value));
|
||||
}
|
||||
|
||||
/// Add multiple borrowed values, also taking the container they're borrowing from
|
||||
// TODO add a try_ variant?
|
||||
pub fn add_borrowed_values<T, F>(&mut self, storage: T, f: F)
|
||||
where
|
||||
T: Send + Sync + 'static,
|
||||
F: FnOnce(&T) -> Vec<FieldValue>,
|
||||
{
|
||||
let yoke =
|
||||
Yoke::attach_to_cart(Arc::new(storage), |storage| f(storage).into()).erase_arc_cart();
|
||||
|
||||
self.field_values.push(FieldValueGroup::Group(yoke));
|
||||
}
|
||||
|
||||
/// field_values accessor
|
||||
pub fn field_values(&self) -> &[FieldValue] {
|
||||
&self.field_values
|
||||
pub fn field_values(&self) -> impl Iterator<Item = &FieldValue> {
|
||||
self.field_values.iter().flat_map(|group| group.iter())
|
||||
}
|
||||
|
||||
/// Return the total number of values
|
||||
///
|
||||
/// More efficient than calling `self.field_values().count()`
|
||||
pub fn value_count(&self) -> usize {
|
||||
self.field_values.iter().map(|group| group.count()).sum()
|
||||
}
|
||||
|
||||
/// Sort and groups the field_values by field.
|
||||
@@ -154,7 +245,7 @@ impl Document {
|
||||
/// The result of this method is not cached and is
|
||||
/// computed on the fly when this method is called.
|
||||
pub fn get_sorted_field_values(&self) -> Vec<(Field, Vec<&Value>)> {
|
||||
let mut field_values: Vec<&FieldValue> = self.field_values().iter().collect();
|
||||
let mut field_values: Vec<&FieldValue> = self.field_values().collect();
|
||||
field_values.sort_by_key(|field_value| field_value.field());
|
||||
|
||||
let mut field_values_it = field_values.into_iter();
|
||||
@@ -189,6 +280,7 @@ impl Document {
|
||||
pub fn get_all(&self, field: Field) -> impl Iterator<Item = &Value> {
|
||||
self.field_values
|
||||
.iter()
|
||||
.flat_map(|group| group.iter())
|
||||
.filter(move |field_value| field_value.field() == field)
|
||||
.map(FieldValue::value)
|
||||
}
|
||||
@@ -202,7 +294,6 @@ impl Document {
|
||||
pub fn serialize_stored<W: Write>(&self, schema: &Schema, writer: &mut W) -> io::Result<()> {
|
||||
let stored_field_values = || {
|
||||
self.field_values()
|
||||
.iter()
|
||||
.filter(|field_value| schema.get_field_entry(field_value.field()).is_stored())
|
||||
};
|
||||
let num_field_values = stored_field_values().count();
|
||||
@@ -216,7 +307,9 @@ impl Document {
|
||||
} => {
|
||||
let field_value = FieldValue {
|
||||
field: *field,
|
||||
value: Value::Str(pre_tokenized_text.text.to_string()),
|
||||
value: Value::Str(MaybeOwnedString::from_string(
|
||||
pre_tokenized_text.text.to_string(),
|
||||
)),
|
||||
};
|
||||
field_value.serialize(writer)?;
|
||||
}
|
||||
@@ -230,7 +323,7 @@ impl Document {
|
||||
impl BinarySerializable for Document {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let field_values = self.field_values();
|
||||
VInt(field_values.len() as u64).serialize(writer)?;
|
||||
VInt(self.value_count() as u64).serialize(writer)?;
|
||||
for field_value in field_values {
|
||||
field_value.serialize(writer)?;
|
||||
}
|
||||
@@ -259,7 +352,7 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("title", TEXT);
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "My title");
|
||||
assert_eq!(doc.field_values().len(), 1);
|
||||
assert_eq!(doc.value_count(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -273,7 +366,7 @@ mod tests {
|
||||
.clone(),
|
||||
);
|
||||
doc.add_text(Field::from_field_id(1), "hello");
|
||||
assert_eq!(doc.field_values().len(), 2);
|
||||
assert_eq!(doc.value_count(), 2);
|
||||
let mut payload: Vec<u8> = Vec::new();
|
||||
doc.serialize(&mut payload).unwrap();
|
||||
assert_eq!(payload.len(), 26);
|
||||
|
||||
@@ -9,6 +9,7 @@ use super::ip_options::IpAddrOptions;
|
||||
use super::{Cardinality, IntoIpv6Addr};
|
||||
use crate::schema::bytes_options::BytesOptions;
|
||||
use crate::schema::facet_options::FacetOptions;
|
||||
use crate::schema::value::MaybeOwnedString;
|
||||
use crate::schema::{
|
||||
DateOptions, Facet, IndexRecordOption, JsonObjectOptions, NumericOptions, TextFieldIndexing,
|
||||
TextOptions, Value,
|
||||
@@ -329,7 +330,7 @@ impl FieldType {
|
||||
/// Tantivy will not try to cast values.
|
||||
/// For instance, If the json value is the integer `3` and the
|
||||
/// target field is a `Str`, this method will return an Error.
|
||||
pub fn value_from_json(&self, json: JsonValue) -> Result<Value, ValueParsingError> {
|
||||
pub fn value_from_json(&self, json: JsonValue) -> Result<Value<'static>, ValueParsingError> {
|
||||
match json {
|
||||
JsonValue::String(field_text) => {
|
||||
match self {
|
||||
@@ -341,7 +342,7 @@ impl FieldType {
|
||||
})?;
|
||||
Ok(DateTime::from_utc(dt_with_fixed_tz).into())
|
||||
}
|
||||
FieldType::Str(_) => Ok(Value::Str(field_text)),
|
||||
FieldType::Str(_) => Ok(Value::Str(MaybeOwnedString::from_string(field_text))),
|
||||
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) => {
|
||||
Err(ValueParsingError::TypeError {
|
||||
expected: "an integer",
|
||||
|
||||
@@ -7,12 +7,13 @@ use crate::schema::{Field, Value};
|
||||
/// `FieldValue` holds together a `Field` and its `Value`.
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||
pub struct FieldValue {
|
||||
#[serde(bound(deserialize = "'a: 'de, 'de: 'a"))]
|
||||
pub struct FieldValue<'a> {
|
||||
pub field: Field,
|
||||
pub value: Value,
|
||||
pub value: Value<'a>,
|
||||
}
|
||||
|
||||
impl FieldValue {
|
||||
impl<'a> FieldValue<'a> {
|
||||
/// Constructor
|
||||
pub fn new(field: Field, value: Value) -> FieldValue {
|
||||
FieldValue { field, value }
|
||||
@@ -29,13 +30,13 @@ impl FieldValue {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FieldValue> for Value {
|
||||
fn from(field_value: FieldValue) -> Self {
|
||||
impl<'a> From<FieldValue<'a>> for Value<'a> {
|
||||
fn from(field_value: FieldValue<'a>) -> Self {
|
||||
field_value.value
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for FieldValue {
|
||||
impl<'a> BinarySerializable for FieldValue<'a> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.field.serialize(writer)?;
|
||||
self.value.serialize(writer)
|
||||
|
||||
@@ -10,4 +10,5 @@ use crate::schema::Value;
|
||||
/// A `NamedFieldDocument` is a simple representation of a document
|
||||
/// as a `BTreeMap<String, Vec<Value>>`.
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct NamedFieldDocument(pub BTreeMap<String, Vec<Value>>);
|
||||
#[serde(bound(deserialize = "'static: 'de, 'de: 'static"))]
|
||||
pub struct NamedFieldDocument(pub BTreeMap<String, Vec<Value<'static>>>);
|
||||
|
||||
@@ -308,7 +308,11 @@ impl Schema {
|
||||
let mut field_map = BTreeMap::new();
|
||||
for (field, field_values) in doc.get_sorted_field_values() {
|
||||
let field_name = self.get_field_name(field);
|
||||
let values: Vec<Value> = field_values.into_iter().cloned().collect();
|
||||
let values: Vec<Value> = field_values
|
||||
.into_iter()
|
||||
.cloned()
|
||||
.map(Value::into_owned)
|
||||
.collect();
|
||||
field_map.insert(field_name.to_string(), values);
|
||||
}
|
||||
NamedFieldDocument(field_map)
|
||||
@@ -338,20 +342,21 @@ impl Schema {
|
||||
if let Some(field) = self.get_field(&field_name) {
|
||||
let field_entry = self.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
// TODO rewrite this with shared allocation?
|
||||
match json_value {
|
||||
JsonValue::Array(json_items) => {
|
||||
for json_item in json_items {
|
||||
let value = field_type
|
||||
.value_from_json(json_item)
|
||||
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
|
||||
doc.add_field_value(field, value);
|
||||
doc.add_field_value(field, value.into_owned());
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
let value = field_type
|
||||
.value_from_json(json_value)
|
||||
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
|
||||
doc.add_field_value(field, value);
|
||||
doc.add_field_value(field, value.into_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -706,7 +711,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
{
|
||||
let doc = schema.parse_document("{}").unwrap();
|
||||
assert!(doc.field_values().is_empty());
|
||||
assert_eq!(doc.value_count(), 0);
|
||||
}
|
||||
{
|
||||
let doc = schema
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::fmt;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
pub use not_safe::MaybeOwnedString;
|
||||
use serde::de::Visitor;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde_json::Map;
|
||||
@@ -12,9 +13,9 @@ use crate::DateTime;
|
||||
/// Value represents the value of a any field.
|
||||
/// It is an enum over all over all of the possible field type.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum Value {
|
||||
pub enum Value<'a> {
|
||||
/// The str type is used for any text information.
|
||||
Str(String),
|
||||
Str(MaybeOwnedString<'a>),
|
||||
/// Pre-tokenized str type,
|
||||
PreTokStr(PreTokenizedString),
|
||||
/// Unsigned 64-bits Integer `u64`
|
||||
@@ -30,16 +31,38 @@ pub enum Value {
|
||||
/// Facet
|
||||
Facet(Facet),
|
||||
/// Arbitrarily sized byte array
|
||||
// TODO allow Cow<'a, [u8]>
|
||||
Bytes(Vec<u8>),
|
||||
/// Json object value.
|
||||
// TODO allow Cow keys and borrowed values
|
||||
JsonObject(serde_json::Map<String, serde_json::Value>),
|
||||
/// IpV6 Address. Internally there is no IpV4, it needs to be converted to `Ipv6Addr`.
|
||||
IpAddr(Ipv6Addr),
|
||||
}
|
||||
|
||||
impl Eq for Value {}
|
||||
impl<'a> Value<'a> {
|
||||
/// Convert a borrowing [`Value`] to an owning one.
|
||||
pub fn into_owned(self) -> Value<'static> {
|
||||
use Value::*;
|
||||
match self {
|
||||
Str(val) => Str(MaybeOwnedString::from_string(val.into_string())),
|
||||
PreTokStr(val) => PreTokStr(val),
|
||||
U64(val) => U64(val),
|
||||
I64(val) => I64(val),
|
||||
F64(val) => F64(val),
|
||||
Bool(val) => Bool(val),
|
||||
Date(val) => Date(val),
|
||||
Facet(val) => Facet(val),
|
||||
Bytes(val) => Bytes(val),
|
||||
JsonObject(val) => JsonObject(val),
|
||||
IpAddr(val) => IpAddr(val),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Value {
|
||||
impl<'a> Eq for Value<'a> {}
|
||||
|
||||
impl<'a> Serialize for Value<'a> {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where S: Serializer {
|
||||
match *self {
|
||||
@@ -65,13 +88,13 @@ impl Serialize for Value {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Value {
|
||||
impl<'de> Deserialize<'de> for Value<'de> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where D: Deserializer<'de> {
|
||||
struct ValueVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for ValueVisitor {
|
||||
type Value = Value;
|
||||
type Value = Value<'de>;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
formatter.write_str("a string or u32")
|
||||
@@ -93,12 +116,13 @@ impl<'de> Deserialize<'de> for Value {
|
||||
Ok(Value::Bool(v))
|
||||
}
|
||||
|
||||
// TODO add visit_borrowed_str
|
||||
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> {
|
||||
Ok(Value::Str(v.to_owned()))
|
||||
Ok(Value::Str(MaybeOwnedString::from_string(v.to_owned())))
|
||||
}
|
||||
|
||||
fn visit_string<E>(self, v: String) -> Result<Self::Value, E> {
|
||||
Ok(Value::Str(v))
|
||||
Ok(Value::Str(MaybeOwnedString::from_string(v)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +130,7 @@ impl<'de> Deserialize<'de> for Value {
|
||||
}
|
||||
}
|
||||
|
||||
impl Value {
|
||||
impl<'a> Value<'a> {
|
||||
/// Returns the text value, provided the value is of the `Str` type.
|
||||
/// (Returns `None` if the value is not of the `Str` type).
|
||||
pub fn as_text(&self) -> Option<&str> {
|
||||
@@ -224,86 +248,87 @@ impl Value {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Value {
|
||||
fn from(s: String) -> Value {
|
||||
Value::Str(s)
|
||||
impl From<String> for Value<'static> {
|
||||
fn from(s: String) -> Value<'static> {
|
||||
Value::Str(MaybeOwnedString::from_string(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Ipv6Addr> for Value {
|
||||
fn from(v: Ipv6Addr) -> Value {
|
||||
impl From<Ipv6Addr> for Value<'static> {
|
||||
fn from(v: Ipv6Addr) -> Value<'static> {
|
||||
Value::IpAddr(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for Value {
|
||||
fn from(v: u64) -> Value {
|
||||
impl From<u64> for Value<'static> {
|
||||
fn from(v: u64) -> Value<'static> {
|
||||
Value::U64(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i64> for Value {
|
||||
fn from(v: i64) -> Value {
|
||||
impl From<i64> for Value<'static> {
|
||||
fn from(v: i64) -> Value<'static> {
|
||||
Value::I64(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<f64> for Value {
|
||||
fn from(v: f64) -> Value {
|
||||
impl From<f64> for Value<'static> {
|
||||
fn from(v: f64) -> Value<'static> {
|
||||
Value::F64(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<bool> for Value {
|
||||
impl From<bool> for Value<'static> {
|
||||
fn from(b: bool) -> Self {
|
||||
Value::Bool(b)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DateTime> for Value {
|
||||
fn from(dt: DateTime) -> Value {
|
||||
impl From<DateTime> for Value<'static> {
|
||||
fn from(dt: DateTime) -> Value<'static> {
|
||||
Value::Date(dt)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for Value {
|
||||
fn from(s: &'a str) -> Value {
|
||||
Value::Str(s.to_string())
|
||||
impl<'a> From<&'a str> for Value<'a> {
|
||||
fn from(s: &'a str) -> Value<'a> {
|
||||
Value::Str(MaybeOwnedString::from_str(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a [u8]> for Value {
|
||||
fn from(bytes: &'a [u8]) -> Value {
|
||||
// TODO change lifetime to 'a
|
||||
impl<'a> From<&'a [u8]> for Value<'static> {
|
||||
fn from(bytes: &'a [u8]) -> Value<'static> {
|
||||
Value::Bytes(bytes.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Facet> for Value {
|
||||
fn from(facet: Facet) -> Value {
|
||||
impl From<Facet> for Value<'static> {
|
||||
fn from(facet: Facet) -> Value<'static> {
|
||||
Value::Facet(facet)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for Value {
|
||||
fn from(bytes: Vec<u8>) -> Value {
|
||||
impl From<Vec<u8>> for Value<'static> {
|
||||
fn from(bytes: Vec<u8>) -> Value<'static> {
|
||||
Value::Bytes(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PreTokenizedString> for Value {
|
||||
fn from(pretokenized_string: PreTokenizedString) -> Value {
|
||||
impl From<PreTokenizedString> for Value<'static> {
|
||||
fn from(pretokenized_string: PreTokenizedString) -> Value<'static> {
|
||||
Value::PreTokStr(pretokenized_string)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Map<String, serde_json::Value>> for Value {
|
||||
fn from(json_object: serde_json::Map<String, serde_json::Value>) -> Value {
|
||||
impl From<serde_json::Map<String, serde_json::Value>> for Value<'static> {
|
||||
fn from(json_object: serde_json::Map<String, serde_json::Value>) -> Value<'static> {
|
||||
Value::JsonObject(json_object)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Value> for Value {
|
||||
fn from(json_value: serde_json::Value) -> Value {
|
||||
impl From<serde_json::Value> for Value<'static> {
|
||||
fn from(json_value: serde_json::Value) -> Value<'static> {
|
||||
match json_value {
|
||||
serde_json::Value::Object(json_object) => Value::JsonObject(json_object),
|
||||
_ => {
|
||||
@@ -320,7 +345,7 @@ mod binary_serialize {
|
||||
use common::{f64_to_u64, u64_to_f64, BinarySerializable};
|
||||
use fastfield_codecs::MonotonicallyMappableToU128;
|
||||
|
||||
use super::Value;
|
||||
use super::{MaybeOwnedString, Value};
|
||||
use crate::schema::Facet;
|
||||
use crate::tokenizer::PreTokenizedString;
|
||||
use crate::DateTime;
|
||||
@@ -341,12 +366,13 @@ mod binary_serialize {
|
||||
|
||||
const TOK_STR_CODE: u8 = 0;
|
||||
|
||||
impl BinarySerializable for Value {
|
||||
impl<'a> BinarySerializable for Value<'a> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match *self {
|
||||
Value::Str(ref text) => {
|
||||
TEXT_CODE.serialize(writer)?;
|
||||
text.serialize(writer)
|
||||
// TODO impl trait for MaybeOwnedString
|
||||
text.as_str().to_owned().serialize(writer)
|
||||
}
|
||||
Value::PreTokStr(ref tok_str) => {
|
||||
EXT_CODE.serialize(writer)?;
|
||||
@@ -408,7 +434,7 @@ mod binary_serialize {
|
||||
match type_code {
|
||||
TEXT_CODE => {
|
||||
let text = String::deserialize(reader)?;
|
||||
Ok(Value::Str(text))
|
||||
Ok(Value::Str(MaybeOwnedString::from_string(text)))
|
||||
}
|
||||
U64_CODE => {
|
||||
let value = u64::deserialize(reader)?;
|
||||
@@ -550,3 +576,104 @@ mod tests {
|
||||
assert_eq!(serialized_value_json, r#""1996-12-20T01:39:57Z""#);
|
||||
}
|
||||
}
|
||||
|
||||
mod not_safe {
|
||||
use std::ops::Deref;
|
||||
|
||||
union Ref<'a, T: ?Sized> {
|
||||
shared: &'a T,
|
||||
uniq: &'a mut T,
|
||||
}
|
||||
|
||||
pub struct MaybeOwnedString<'a> {
|
||||
string: Ref<'a, str>,
|
||||
capacity: usize,
|
||||
}
|
||||
|
||||
impl<'a> MaybeOwnedString<'a> {
|
||||
pub fn from_str(string: &'a str) -> MaybeOwnedString<'a> {
|
||||
MaybeOwnedString {
|
||||
string: Ref { shared: string },
|
||||
capacity: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_string(mut string: String) -> MaybeOwnedString<'static> {
|
||||
string.shrink_to_fit(); // <= actually important for safety, todo use the Vec .as_ptr instead
|
||||
|
||||
let mut s = std::mem::ManuallyDrop::new(string);
|
||||
let ptr = s.as_mut_ptr();
|
||||
let len = s.len();
|
||||
let capacity = s.capacity();
|
||||
|
||||
let string = unsafe {
|
||||
std::str::from_utf8_unchecked_mut(std::slice::from_raw_parts_mut(ptr, len))
|
||||
};
|
||||
MaybeOwnedString {
|
||||
string: Ref { uniq: string },
|
||||
capacity,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_string(mut self) -> String {
|
||||
if self.capacity != 0 {
|
||||
let string = unsafe { &mut self.string.uniq };
|
||||
unsafe {
|
||||
return String::from_raw_parts(string.as_mut_ptr(), self.len(), self.capacity);
|
||||
};
|
||||
}
|
||||
self.deref().to_owned()
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
self.deref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Deref for MaybeOwnedString<'a> {
|
||||
type Target = str;
|
||||
|
||||
#[inline]
|
||||
fn deref(&self) -> &str {
|
||||
unsafe { self.string.shared }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for MaybeOwnedString<'a> {
|
||||
fn drop(&mut self) {
|
||||
// if capacity is 0, either it's an empty String so there is no dealloc to do, or it's
|
||||
// borrowed
|
||||
if self.capacity != 0 {
|
||||
let string = unsafe { &mut self.string.uniq };
|
||||
unsafe { String::from_raw_parts(string.as_mut_ptr(), self.len(), self.capacity) };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Clone for MaybeOwnedString<'a> {
|
||||
fn clone(&self) -> Self {
|
||||
if self.capacity == 0 {
|
||||
MaybeOwnedString {
|
||||
string: Ref {
|
||||
shared: unsafe { self.string.shared },
|
||||
},
|
||||
capacity: 0,
|
||||
}
|
||||
} else {
|
||||
MaybeOwnedString::from_string(self.deref().to_owned())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> std::fmt::Debug for MaybeOwnedString<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(self.deref())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PartialEq for MaybeOwnedString<'a> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.deref() == other.deref()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::io;
|
||||
|
||||
use common::{BinarySerializable, FixedSize, HasLen};
|
||||
|
||||
use super::{Decompressor, DOC_STORE_VERSION};
|
||||
use super::Decompressor;
|
||||
use crate::directory::FileSlice;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
@@ -17,7 +17,6 @@ pub struct DocStoreFooter {
|
||||
/// - reserved for future use: 15 bytes
|
||||
impl BinarySerializable for DocStoreFooter {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
BinarySerializable::serialize(&DOC_STORE_VERSION, writer)?;
|
||||
BinarySerializable::serialize(&self.offset, writer)?;
|
||||
BinarySerializable::serialize(&self.decompressor.get_id(), writer)?;
|
||||
writer.write_all(&[0; 15])?;
|
||||
@@ -25,13 +24,6 @@ impl BinarySerializable for DocStoreFooter {
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let doc_store_version = u32::deserialize(reader)?;
|
||||
if doc_store_version != DOC_STORE_VERSION {
|
||||
panic!(
|
||||
"actual doc store version: {}, expected: {}",
|
||||
doc_store_version, DOC_STORE_VERSION
|
||||
);
|
||||
}
|
||||
let offset = u64::deserialize(reader)?;
|
||||
let compressor_id = u8::deserialize(reader)?;
|
||||
let mut skip_buf = [0; 15];
|
||||
@@ -44,7 +36,7 @@ impl BinarySerializable for DocStoreFooter {
|
||||
}
|
||||
|
||||
impl FixedSize for DocStoreFooter {
|
||||
const SIZE_IN_BYTES: usize = 28;
|
||||
const SIZE_IN_BYTES: usize = 24;
|
||||
}
|
||||
|
||||
impl DocStoreFooter {
|
||||
|
||||
@@ -44,9 +44,6 @@ pub use self::reader::{CacheStats, StoreReader};
|
||||
pub use self::writer::StoreWriter;
|
||||
mod store_compressor;
|
||||
|
||||
/// Doc store version in footer to handle format changes.
|
||||
pub(crate) const DOC_STORE_VERSION: u32 = 1;
|
||||
|
||||
#[cfg(feature = "lz4-compression")]
|
||||
mod compression_lz4_block;
|
||||
|
||||
|
||||
@@ -229,10 +229,10 @@ fn test_empty_string() -> crate::Result<()> {
|
||||
let buffer: Vec<u8> = {
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
|
||||
term_dictionary_builder
|
||||
.insert([], &make_term_info(1_u64))
|
||||
.insert(&[], &make_term_info(1_u64))
|
||||
.unwrap();
|
||||
term_dictionary_builder
|
||||
.insert([1u8], &make_term_info(2_u64))
|
||||
.insert(&[1u8], &make_term_info(2_u64))
|
||||
.unwrap();
|
||||
term_dictionary_builder.finish()?
|
||||
};
|
||||
@@ -252,7 +252,7 @@ fn stream_range_test_dict() -> crate::Result<TermDictionary> {
|
||||
let mut term_dictionary_builder = TermDictionaryBuilder::create(Vec::new())?;
|
||||
for i in 0u8..10u8 {
|
||||
let number_arr = [i; 1];
|
||||
term_dictionary_builder.insert(number_arr, &make_term_info(i as u64))?;
|
||||
term_dictionary_builder.insert(&number_arr, &make_term_info(i as u64))?;
|
||||
}
|
||||
term_dictionary_builder.finish()?
|
||||
};
|
||||
|
||||
@@ -10,8 +10,6 @@
|
||||
//! assert_eq!(stream.next().unwrap().text, "crafty");
|
||||
//! assert!(stream.next().is_none());
|
||||
//! ```
|
||||
use std::sync::Arc;
|
||||
|
||||
use rustc_hash::FxHashSet;
|
||||
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
@@ -20,15 +18,19 @@ use crate::tokenizer::BoxTokenStream;
|
||||
/// `TokenFilter` that removes stop words from a token stream
|
||||
#[derive(Clone)]
|
||||
pub struct StopWordFilter {
|
||||
words: Arc<FxHashSet<String>>,
|
||||
words: FxHashSet<String>,
|
||||
}
|
||||
|
||||
impl StopWordFilter {
|
||||
/// Creates a `StopWordFilter` given a list of words to remove
|
||||
pub fn remove<W: IntoIterator<Item = String>>(words: W) -> StopWordFilter {
|
||||
StopWordFilter {
|
||||
words: Arc::new(words.into_iter().collect()),
|
||||
pub fn remove(words: Vec<String>) -> StopWordFilter {
|
||||
let mut set = FxHashSet::default();
|
||||
|
||||
for word in words {
|
||||
set.insert(word);
|
||||
}
|
||||
|
||||
StopWordFilter { words: set }
|
||||
}
|
||||
|
||||
fn english() -> StopWordFilter {
|
||||
@@ -38,12 +40,12 @@ impl StopWordFilter {
|
||||
"there", "these", "they", "this", "to", "was", "will", "with",
|
||||
];
|
||||
|
||||
StopWordFilter::remove(words.iter().map(|&s| s.to_string()))
|
||||
StopWordFilter::remove(words.iter().map(|&s| s.to_string()).collect())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct StopWordFilterStream<'a> {
|
||||
words: Arc<FxHashSet<String>>,
|
||||
words: FxHashSet<String>,
|
||||
tail: BoxTokenStream<'a>,
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user