mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-06 17:22:54 +00:00
Compare commits
10 Commits
paul.masur
...
stuhood.la
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
db9e35e7ee | ||
|
|
7f39d5eab9 | ||
|
|
af53ffe5df | ||
|
|
041c6f01a3 | ||
|
|
9615eb73b8 | ||
|
|
77505c3d03 | ||
|
|
735c588f4f | ||
|
|
242a1531bf | ||
|
|
6443b63177 | ||
|
|
4987495ee4 |
30
.github/workflows/test.yml
vendored
30
.github/workflows/test.yml
vendored
@@ -39,11 +39,11 @@ jobs:
|
||||
|
||||
- name: Check Formatting
|
||||
run: cargo +nightly fmt --all -- --check
|
||||
|
||||
|
||||
- name: Check Stable Compilation
|
||||
run: cargo build --all-features
|
||||
|
||||
|
||||
|
||||
- name: Check Bench Compilation
|
||||
run: cargo +nightly bench --no-run --profile=dev --all-features
|
||||
|
||||
@@ -59,10 +59,10 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
features: [
|
||||
{ label: "all", flags: "mmap,stopwords,lz4-compression,zstd-compression,failpoints" },
|
||||
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
|
||||
]
|
||||
features:
|
||||
- { label: "all", flags: "mmap,stopwords,lz4-compression,zstd-compression,failpoints,stemmer" }
|
||||
- { label: "quickwit", flags: "mmap,quickwit,failpoints" }
|
||||
- { label: "none", flags: "" }
|
||||
|
||||
name: test-${{ matrix.features.label}}
|
||||
|
||||
@@ -80,7 +80,21 @@ jobs:
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run tests
|
||||
run: cargo +stable nextest run --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
run: |
|
||||
# if matrix.feature.flags is empty then run on --lib to avoid compiling examples
|
||||
# (as most of them rely on mmap) otherwise run all
|
||||
if [ -z "${{ matrix.features.flags }}" ]; then
|
||||
cargo +stable nextest run --lib --no-default-features --verbose --workspace
|
||||
else
|
||||
cargo +stable nextest run --features ${{ matrix.features.flags }} --no-default-features --verbose --workspace
|
||||
fi
|
||||
|
||||
- name: Run doctests
|
||||
run: cargo +stable test --doc --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
run: |
|
||||
# if matrix.feature.flags is empty then run on --lib to avoid compiling examples
|
||||
# (as most of them rely on mmap) otherwise run all
|
||||
if [ -z "${{ matrix.features.flags }}" ]; then
|
||||
echo "no doctest for no feature flag"
|
||||
else
|
||||
cargo +stable test --doc --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
fi
|
||||
|
||||
@@ -37,7 +37,7 @@ fs4 = { version = "0.13.1", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
rust-stemmers = "1.2.0"
|
||||
rust-stemmers = { version = "1.2.0", optional = true }
|
||||
downcast-rs = "2.0.1"
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = [
|
||||
"bitpacker4x",
|
||||
@@ -113,7 +113,8 @@ debug-assertions = true
|
||||
overflow-checks = true
|
||||
|
||||
[features]
|
||||
default = ["mmap", "stopwords", "lz4-compression", "columnar-zstd-compression"]
|
||||
default = ["mmap", "stopwords", "lz4-compression", "columnar-zstd-compression", "stemmer"]
|
||||
stemmer = ["rust-stemmers"]
|
||||
mmap = ["fs4", "tempfile", "memmap2"]
|
||||
stopwords = []
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ stacker = { version= "0.6", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.6", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.10", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.9", path = "../bitpacker/" }
|
||||
serde = "1.0.152"
|
||||
serde = { version = "1.0.152", features = ["derive"] }
|
||||
downcast-rs = "2.0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use binggan::{InputGroup, black_box};
|
||||
use common::*;
|
||||
use tantivy_columnar::Column;
|
||||
use tantivy_columnar::{Column, ValueRange};
|
||||
|
||||
pub mod common;
|
||||
|
||||
@@ -46,16 +46,16 @@ fn bench_group(mut runner: InputGroup<Column>) {
|
||||
runner.register("access_first_vals", |column| {
|
||||
let mut sum = 0;
|
||||
const BLOCK_SIZE: usize = 32;
|
||||
let mut docs = vec![0; BLOCK_SIZE];
|
||||
let mut buffer = vec![None; BLOCK_SIZE];
|
||||
let mut docs = Vec::with_capacity(BLOCK_SIZE);
|
||||
let mut buffer = Vec::with_capacity(BLOCK_SIZE);
|
||||
for i in (0..NUM_DOCS).step_by(BLOCK_SIZE) {
|
||||
// fill docs
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
docs.clear();
|
||||
for idx in 0..BLOCK_SIZE {
|
||||
docs[idx] = idx as u32 + i;
|
||||
docs.push(idx as u32 + i);
|
||||
}
|
||||
|
||||
column.first_vals(&docs, &mut buffer);
|
||||
buffer.clear();
|
||||
column.first_vals_in_value_range(&mut docs, &mut buffer, ValueRange::All);
|
||||
for val in buffer.iter() {
|
||||
let Some(val) = val else { continue };
|
||||
sum += *val;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
mod dictionary_encoded;
|
||||
mod serialize;
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::fmt::{self, Debug};
|
||||
use std::io::Write;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
@@ -19,6 +20,11 @@ use crate::column_values::monotonic_mapping::StrictlyMonotonicMappingToInternal;
|
||||
use crate::column_values::{ColumnValues, monotonic_map_column};
|
||||
use crate::{Cardinality, DocId, EmptyColumnValues, MonotonicallyMappableToU64, RowId};
|
||||
|
||||
thread_local! {
|
||||
static ROWS: RefCell<Vec<RowId>> = const { RefCell::new(Vec::new()) };
|
||||
static DOCS: RefCell<Vec<DocId>> = const { RefCell::new(Vec::new()) };
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Column<T = u64> {
|
||||
pub index: ColumnIndex,
|
||||
@@ -89,31 +95,6 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
self.values_for_doc(row_id).next()
|
||||
}
|
||||
|
||||
/// Load the first value for each docid in the provided slice.
|
||||
#[inline]
|
||||
pub fn first_vals(&self, docids: &[DocId], output: &mut [Option<T>]) {
|
||||
match &self.index {
|
||||
ColumnIndex::Empty { .. } => {}
|
||||
ColumnIndex::Full => self.values.get_vals_opt(docids, output),
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
for (i, docid) in docids.iter().enumerate() {
|
||||
output[i] = optional_index
|
||||
.rank_if_exists(*docid)
|
||||
.map(|rowid| self.values.get_val(rowid));
|
||||
}
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
for (i, docid) in docids.iter().enumerate() {
|
||||
let range = multivalued_index.range(*docid);
|
||||
let is_empty = range.start == range.end;
|
||||
if !is_empty {
|
||||
output[i] = Some(self.values.get_val(range.start));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Translates a block of docids to row_ids.
|
||||
///
|
||||
/// returns the row_ids and the matching docids on the same index
|
||||
@@ -143,7 +124,7 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
#[inline]
|
||||
pub fn get_docids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
value_range: ValueRange<T>,
|
||||
selected_docid_range: Range<u32>,
|
||||
doc_ids: &mut Vec<u32>,
|
||||
) {
|
||||
@@ -168,6 +149,194 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
}
|
||||
}
|
||||
|
||||
// Separate impl block for methods requiring `Default` for `T`.
|
||||
impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static + Default> Column<T> {
|
||||
/// Load the first value for each docid in the provided slice.
|
||||
///
|
||||
/// The `docids` vector is mutated: documents that do not match the `value_range` are removed.
|
||||
/// The `values` vector is populated with the values of the remaining documents.
|
||||
#[inline]
|
||||
pub fn first_vals_in_value_range(
|
||||
&self,
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<crate::ComparableDoc<Option<T>, DocId>>,
|
||||
value_range: ValueRange<T>,
|
||||
) {
|
||||
match (&self.index, value_range) {
|
||||
(ColumnIndex::Empty { .. }, value_range) => {
|
||||
let nulls_match = match &value_range {
|
||||
ValueRange::All => true,
|
||||
ValueRange::Inclusive(_) => false,
|
||||
ValueRange::GreaterThan(_, nulls_match) => *nulls_match,
|
||||
ValueRange::GreaterThanOrEqual(_, nulls_match) => *nulls_match,
|
||||
ValueRange::LessThan(_, nulls_match) => *nulls_match,
|
||||
ValueRange::LessThanOrEqual(_, nulls_match) => *nulls_match,
|
||||
};
|
||||
if nulls_match {
|
||||
for &doc in input_docs {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
(ColumnIndex::Full, value_range) => {
|
||||
self.values
|
||||
.get_vals_in_value_range(input_docs, input_docs, output, value_range);
|
||||
}
|
||||
(ColumnIndex::Optional(optional_index), value_range) => {
|
||||
let nulls_match = match &value_range {
|
||||
ValueRange::All => true,
|
||||
ValueRange::Inclusive(_) => false,
|
||||
ValueRange::GreaterThan(_, nulls_match) => *nulls_match,
|
||||
ValueRange::GreaterThanOrEqual(_, nulls_match) => *nulls_match,
|
||||
ValueRange::LessThan(_, nulls_match) => *nulls_match,
|
||||
ValueRange::LessThanOrEqual(_, nulls_match) => *nulls_match,
|
||||
};
|
||||
|
||||
let fallback_needed = ROWS.with(|rows_cell| {
|
||||
DOCS.with(|docs_cell| {
|
||||
let mut rows = rows_cell.borrow_mut();
|
||||
let mut docs = docs_cell.borrow_mut();
|
||||
rows.clear();
|
||||
docs.clear();
|
||||
|
||||
let mut has_nulls = false;
|
||||
|
||||
for &doc_id in input_docs {
|
||||
if let Some(row_id) = optional_index.rank_if_exists(doc_id) {
|
||||
rows.push(row_id);
|
||||
docs.push(doc_id);
|
||||
} else {
|
||||
has_nulls = true;
|
||||
if nulls_match {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !has_nulls || !nulls_match {
|
||||
self.values.get_vals_in_value_range(
|
||||
&rows,
|
||||
&docs,
|
||||
output,
|
||||
value_range.clone(),
|
||||
);
|
||||
return false;
|
||||
}
|
||||
true
|
||||
})
|
||||
});
|
||||
|
||||
if fallback_needed {
|
||||
for &doc_id in input_docs {
|
||||
if let Some(row_id) = optional_index.rank_if_exists(doc_id) {
|
||||
let val = self.values.get_val(row_id);
|
||||
let value_matches = match &value_range {
|
||||
ValueRange::All => true,
|
||||
ValueRange::Inclusive(r) => r.contains(&val),
|
||||
ValueRange::GreaterThan(t, _) => val > *t,
|
||||
ValueRange::GreaterThanOrEqual(t, _) => val >= *t,
|
||||
ValueRange::LessThan(t, _) => val < *t,
|
||||
ValueRange::LessThanOrEqual(t, _) => val <= *t,
|
||||
};
|
||||
|
||||
if value_matches {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc_id,
|
||||
sort_key: Some(val),
|
||||
});
|
||||
}
|
||||
} else if nulls_match {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc_id,
|
||||
sort_key: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
(ColumnIndex::Multivalued(multivalued_index), value_range) => {
|
||||
let nulls_match = match &value_range {
|
||||
ValueRange::All => true,
|
||||
ValueRange::Inclusive(_) => false,
|
||||
ValueRange::GreaterThan(_, nulls_match) => *nulls_match,
|
||||
ValueRange::GreaterThanOrEqual(_, nulls_match) => *nulls_match,
|
||||
ValueRange::LessThan(_, nulls_match) => *nulls_match,
|
||||
ValueRange::LessThanOrEqual(_, nulls_match) => *nulls_match,
|
||||
};
|
||||
for i in 0..input_docs.len() {
|
||||
let docid = input_docs[i];
|
||||
let row_range = multivalued_index.range(docid);
|
||||
let is_empty = row_range.start == row_range.end;
|
||||
if !is_empty {
|
||||
let val = self.values.get_val(row_range.start);
|
||||
let matches = match &value_range {
|
||||
ValueRange::All => true,
|
||||
ValueRange::Inclusive(r) => r.contains(&val),
|
||||
ValueRange::GreaterThan(t, _) => val > *t,
|
||||
ValueRange::GreaterThanOrEqual(t, _) => val >= *t,
|
||||
ValueRange::LessThan(t, _) => val < *t,
|
||||
ValueRange::LessThanOrEqual(t, _) => val <= *t,
|
||||
};
|
||||
if matches {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: docid,
|
||||
sort_key: Some(val),
|
||||
});
|
||||
}
|
||||
} else if nulls_match {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: docid,
|
||||
sort_key: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A range of values.
|
||||
///
|
||||
/// This type is intended to be used in batch APIs, where the cost of unpacking the enum
|
||||
/// is outweighed by the time spent processing a batch.
|
||||
///
|
||||
/// Implementers should pattern match on the variants to use optimized loops for each case.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ValueRange<T> {
|
||||
/// A range that includes both start and end.
|
||||
Inclusive(RangeInclusive<T>),
|
||||
/// A range that matches all values.
|
||||
All,
|
||||
/// A range that matches all values greater than the threshold.
|
||||
/// The boolean flag indicates if null values should be included.
|
||||
GreaterThan(T, bool),
|
||||
/// A range that matches all values greater than or equal to the threshold.
|
||||
/// The boolean flag indicates if null values should be included.
|
||||
GreaterThanOrEqual(T, bool),
|
||||
/// A range that matches all values less than the threshold.
|
||||
/// The boolean flag indicates if null values should be included.
|
||||
LessThan(T, bool),
|
||||
/// A range that matches all values less than or equal to the threshold.
|
||||
/// The boolean flag indicates if null values should be included.
|
||||
LessThanOrEqual(T, bool),
|
||||
}
|
||||
|
||||
impl<T: PartialOrd> ValueRange<T> {
|
||||
pub fn intersects(&self, min: T, max: T) -> bool {
|
||||
match self {
|
||||
ValueRange::Inclusive(range) => *range.start() <= max && *range.end() >= min,
|
||||
ValueRange::All => true,
|
||||
ValueRange::GreaterThan(val, _) => max > *val,
|
||||
ValueRange::GreaterThanOrEqual(val, _) => max >= *val,
|
||||
ValueRange::LessThan(val, _) => min < *val,
|
||||
ValueRange::LessThanOrEqual(val, _) => min <= *val,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Cardinality {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
self.to_code().serialize(writer)
|
||||
|
||||
@@ -333,7 +333,7 @@ mod tests {
|
||||
use std::ops::Range;
|
||||
|
||||
use super::MultiValueIndex;
|
||||
use crate::{ColumnarReader, DynamicColumn};
|
||||
use crate::{ColumnarReader, DynamicColumn, ValueRange};
|
||||
|
||||
fn index_to_pos_helper(
|
||||
index: &MultiValueIndex,
|
||||
@@ -413,7 +413,7 @@ mod tests {
|
||||
assert_eq!(row_id_range, 0..4);
|
||||
|
||||
let check = |range, expected| {
|
||||
let full_range = 0..=u64::MAX;
|
||||
let full_range = ValueRange::All;
|
||||
let mut docids = Vec::new();
|
||||
column.get_docids_for_value_range(full_range, range, &mut docids);
|
||||
assert_eq!(docids, expected);
|
||||
|
||||
@@ -7,13 +7,15 @@
|
||||
//! - Monotonically map values to u64/u128
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use downcast_rs::DowncastSync;
|
||||
pub use monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
||||
pub use monotonic_mapping_u128::MonotonicallyMappableToU128;
|
||||
|
||||
use crate::column::ValueRange;
|
||||
|
||||
mod merge;
|
||||
pub(crate) mod monotonic_mapping;
|
||||
pub(crate) mod monotonic_mapping_u128;
|
||||
@@ -109,6 +111,307 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync + DowncastSync {
|
||||
}
|
||||
}
|
||||
|
||||
/// Load the values for the provided docids.
|
||||
///
|
||||
/// The values are filtered by the provided value range.
|
||||
fn get_vals_in_value_range(
|
||||
&self,
|
||||
input_indexes: &[u32],
|
||||
input_doc_ids: &[u32],
|
||||
output: &mut Vec<crate::ComparableDoc<Option<T>, crate::DocId>>,
|
||||
value_range: ValueRange<T>,
|
||||
) {
|
||||
let len = input_indexes.len();
|
||||
let mut read_head = 0;
|
||||
|
||||
match value_range {
|
||||
ValueRange::All => {
|
||||
while read_head + 3 < len {
|
||||
let idx0 = input_indexes[read_head];
|
||||
let idx1 = input_indexes[read_head + 1];
|
||||
let idx2 = input_indexes[read_head + 2];
|
||||
let idx3 = input_indexes[read_head + 3];
|
||||
|
||||
let doc0 = input_doc_ids[read_head];
|
||||
let doc1 = input_doc_ids[read_head + 1];
|
||||
let doc2 = input_doc_ids[read_head + 2];
|
||||
let doc3 = input_doc_ids[read_head + 3];
|
||||
|
||||
let val0 = self.get_val(idx0);
|
||||
let val1 = self.get_val(idx1);
|
||||
let val2 = self.get_val(idx2);
|
||||
let val3 = self.get_val(idx3);
|
||||
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc0,
|
||||
sort_key: Some(val0),
|
||||
});
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc1,
|
||||
sort_key: Some(val1),
|
||||
});
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc2,
|
||||
sort_key: Some(val2),
|
||||
});
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc3,
|
||||
sort_key: Some(val3),
|
||||
});
|
||||
|
||||
read_head += 4;
|
||||
}
|
||||
}
|
||||
ValueRange::Inclusive(ref range) => {
|
||||
while read_head + 3 < len {
|
||||
let idx0 = input_indexes[read_head];
|
||||
let idx1 = input_indexes[read_head + 1];
|
||||
let idx2 = input_indexes[read_head + 2];
|
||||
let idx3 = input_indexes[read_head + 3];
|
||||
|
||||
let doc0 = input_doc_ids[read_head];
|
||||
let doc1 = input_doc_ids[read_head + 1];
|
||||
let doc2 = input_doc_ids[read_head + 2];
|
||||
let doc3 = input_doc_ids[read_head + 3];
|
||||
|
||||
let val0 = self.get_val(idx0);
|
||||
let val1 = self.get_val(idx1);
|
||||
let val2 = self.get_val(idx2);
|
||||
let val3 = self.get_val(idx3);
|
||||
|
||||
if range.contains(&val0) {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc0,
|
||||
sort_key: Some(val0),
|
||||
});
|
||||
}
|
||||
if range.contains(&val1) {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc1,
|
||||
sort_key: Some(val1),
|
||||
});
|
||||
}
|
||||
if range.contains(&val2) {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc2,
|
||||
sort_key: Some(val2),
|
||||
});
|
||||
}
|
||||
if range.contains(&val3) {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc3,
|
||||
sort_key: Some(val3),
|
||||
});
|
||||
}
|
||||
|
||||
read_head += 4;
|
||||
}
|
||||
}
|
||||
ValueRange::GreaterThan(ref threshold, _) => {
|
||||
while read_head + 3 < len {
|
||||
let idx0 = input_indexes[read_head];
|
||||
let idx1 = input_indexes[read_head + 1];
|
||||
let idx2 = input_indexes[read_head + 2];
|
||||
let idx3 = input_indexes[read_head + 3];
|
||||
|
||||
let doc0 = input_doc_ids[read_head];
|
||||
let doc1 = input_doc_ids[read_head + 1];
|
||||
let doc2 = input_doc_ids[read_head + 2];
|
||||
let doc3 = input_doc_ids[read_head + 3];
|
||||
|
||||
let val0 = self.get_val(idx0);
|
||||
let val1 = self.get_val(idx1);
|
||||
let val2 = self.get_val(idx2);
|
||||
let val3 = self.get_val(idx3);
|
||||
|
||||
if val0 > *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc0,
|
||||
sort_key: Some(val0),
|
||||
});
|
||||
}
|
||||
if val1 > *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc1,
|
||||
sort_key: Some(val1),
|
||||
});
|
||||
}
|
||||
if val2 > *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc2,
|
||||
sort_key: Some(val2),
|
||||
});
|
||||
}
|
||||
if val3 > *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc3,
|
||||
sort_key: Some(val3),
|
||||
});
|
||||
}
|
||||
|
||||
read_head += 4;
|
||||
}
|
||||
}
|
||||
ValueRange::GreaterThanOrEqual(ref threshold, _) => {
|
||||
while read_head + 3 < len {
|
||||
let idx0 = input_indexes[read_head];
|
||||
let idx1 = input_indexes[read_head + 1];
|
||||
let idx2 = input_indexes[read_head + 2];
|
||||
let idx3 = input_indexes[read_head + 3];
|
||||
|
||||
let doc0 = input_doc_ids[read_head];
|
||||
let doc1 = input_doc_ids[read_head + 1];
|
||||
let doc2 = input_doc_ids[read_head + 2];
|
||||
let doc3 = input_doc_ids[read_head + 3];
|
||||
|
||||
let val0 = self.get_val(idx0);
|
||||
let val1 = self.get_val(idx1);
|
||||
let val2 = self.get_val(idx2);
|
||||
let val3 = self.get_val(idx3);
|
||||
|
||||
if val0 >= *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc0,
|
||||
sort_key: Some(val0),
|
||||
});
|
||||
}
|
||||
if val1 >= *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc1,
|
||||
sort_key: Some(val1),
|
||||
});
|
||||
}
|
||||
if val2 >= *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc2,
|
||||
sort_key: Some(val2),
|
||||
});
|
||||
}
|
||||
if val3 >= *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc3,
|
||||
sort_key: Some(val3),
|
||||
});
|
||||
}
|
||||
|
||||
read_head += 4;
|
||||
}
|
||||
}
|
||||
ValueRange::LessThan(ref threshold, _) => {
|
||||
while read_head + 3 < len {
|
||||
let idx0 = input_indexes[read_head];
|
||||
let idx1 = input_indexes[read_head + 1];
|
||||
let idx2 = input_indexes[read_head + 2];
|
||||
let idx3 = input_indexes[read_head + 3];
|
||||
|
||||
let doc0 = input_doc_ids[read_head];
|
||||
let doc1 = input_doc_ids[read_head + 1];
|
||||
let doc2 = input_doc_ids[read_head + 2];
|
||||
let doc3 = input_doc_ids[read_head + 3];
|
||||
|
||||
let val0 = self.get_val(idx0);
|
||||
let val1 = self.get_val(idx1);
|
||||
let val2 = self.get_val(idx2);
|
||||
let val3 = self.get_val(idx3);
|
||||
|
||||
if val0 < *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc0,
|
||||
sort_key: Some(val0),
|
||||
});
|
||||
}
|
||||
if val1 < *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc1,
|
||||
sort_key: Some(val1),
|
||||
});
|
||||
}
|
||||
if val2 < *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc2,
|
||||
sort_key: Some(val2),
|
||||
});
|
||||
}
|
||||
if val3 < *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc3,
|
||||
sort_key: Some(val3),
|
||||
});
|
||||
}
|
||||
|
||||
read_head += 4;
|
||||
}
|
||||
}
|
||||
ValueRange::LessThanOrEqual(ref threshold, _) => {
|
||||
while read_head + 3 < len {
|
||||
let idx0 = input_indexes[read_head];
|
||||
let idx1 = input_indexes[read_head + 1];
|
||||
let idx2 = input_indexes[read_head + 2];
|
||||
let idx3 = input_indexes[read_head + 3];
|
||||
|
||||
let doc0 = input_doc_ids[read_head];
|
||||
let doc1 = input_doc_ids[read_head + 1];
|
||||
let doc2 = input_doc_ids[read_head + 2];
|
||||
let doc3 = input_doc_ids[read_head + 3];
|
||||
|
||||
let val0 = self.get_val(idx0);
|
||||
let val1 = self.get_val(idx1);
|
||||
let val2 = self.get_val(idx2);
|
||||
let val3 = self.get_val(idx3);
|
||||
|
||||
if val0 <= *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc0,
|
||||
sort_key: Some(val0),
|
||||
});
|
||||
}
|
||||
if val1 <= *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc1,
|
||||
sort_key: Some(val1),
|
||||
});
|
||||
}
|
||||
if val2 <= *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc2,
|
||||
sort_key: Some(val2),
|
||||
});
|
||||
}
|
||||
if val3 <= *threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc: doc3,
|
||||
sort_key: Some(val3),
|
||||
});
|
||||
}
|
||||
|
||||
read_head += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Process remaining elements (0 to 3)
|
||||
while read_head < len {
|
||||
let idx = input_indexes[read_head];
|
||||
let doc = input_doc_ids[read_head];
|
||||
let val = self.get_val(idx);
|
||||
let matches = match value_range {
|
||||
// 'value_range' is still moved here. This is the outer `value_range`
|
||||
ValueRange::All => true,
|
||||
ValueRange::Inclusive(ref r) => r.contains(&val),
|
||||
ValueRange::GreaterThan(ref t, _) => val > *t,
|
||||
ValueRange::GreaterThanOrEqual(ref t, _) => val >= *t,
|
||||
ValueRange::LessThan(ref t, _) => val < *t,
|
||||
ValueRange::LessThanOrEqual(ref t, _) => val <= *t,
|
||||
};
|
||||
if matches {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(val),
|
||||
});
|
||||
}
|
||||
read_head += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Fills an output buffer with the fast field values
|
||||
/// associated with the `DocId` going from
|
||||
/// `start` to `start + output.len()`.
|
||||
@@ -129,15 +432,54 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync + DowncastSync {
|
||||
/// Note that position == docid for single value fast fields
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
value_range: ValueRange<T>,
|
||||
row_id_range: Range<RowId>,
|
||||
row_id_hits: &mut Vec<RowId>,
|
||||
) {
|
||||
let row_id_range = row_id_range.start..row_id_range.end.min(self.num_vals());
|
||||
for idx in row_id_range {
|
||||
let val = self.get_val(idx);
|
||||
if value_range.contains(&val) {
|
||||
row_id_hits.push(idx);
|
||||
match value_range {
|
||||
ValueRange::Inclusive(range) => {
|
||||
for idx in row_id_range {
|
||||
let val = self.get_val(idx);
|
||||
if range.contains(&val) {
|
||||
row_id_hits.push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
ValueRange::GreaterThan(threshold, _) => {
|
||||
for idx in row_id_range {
|
||||
let val = self.get_val(idx);
|
||||
if val > threshold {
|
||||
row_id_hits.push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
ValueRange::GreaterThanOrEqual(threshold, _) => {
|
||||
for idx in row_id_range {
|
||||
let val = self.get_val(idx);
|
||||
if val >= threshold {
|
||||
row_id_hits.push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
ValueRange::LessThan(threshold, _) => {
|
||||
for idx in row_id_range {
|
||||
let val = self.get_val(idx);
|
||||
if val < threshold {
|
||||
row_id_hits.push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
ValueRange::LessThanOrEqual(threshold, _) => {
|
||||
for idx in row_id_range {
|
||||
let val = self.get_val(idx);
|
||||
if val <= threshold {
|
||||
row_id_hits.push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
ValueRange::All => {
|
||||
row_id_hits.extend(row_id_range);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -193,6 +535,17 @@ impl<T: PartialOrd + Default> ColumnValues<T> for EmptyColumnValues {
|
||||
fn num_vals(&self) -> u32 {
|
||||
0
|
||||
}
|
||||
|
||||
fn get_vals_in_value_range(
|
||||
&self,
|
||||
input_indexes: &[u32],
|
||||
input_doc_ids: &[u32],
|
||||
output: &mut Vec<crate::ComparableDoc<Option<T>, crate::DocId>>,
|
||||
value_range: ValueRange<T>,
|
||||
) {
|
||||
let _ = (input_indexes, input_doc_ids, output, value_range);
|
||||
panic!("Internal Error: Called get_vals_in_value_range of empty column.")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialOrd + Debug + 'static> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
@@ -206,6 +559,18 @@ impl<T: Copy + PartialOrd + Debug + 'static> ColumnValues<T> for Arc<dyn ColumnV
|
||||
self.as_ref().get_vals_opt(indexes, output)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_vals_in_value_range(
|
||||
&self,
|
||||
input_indexes: &[u32],
|
||||
input_doc_ids: &[u32],
|
||||
output: &mut Vec<crate::ComparableDoc<Option<T>, crate::DocId>>,
|
||||
value_range: ValueRange<T>,
|
||||
) {
|
||||
self.as_ref()
|
||||
.get_vals_in_value_range(input_indexes, input_doc_ids, output, value_range)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> T {
|
||||
self.as_ref().min_value()
|
||||
@@ -234,7 +599,7 @@ impl<T: Copy + PartialOrd + Debug + 'static> ColumnValues<T> for Arc<dyn ColumnV
|
||||
#[inline(always)]
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
range: RangeInclusive<T>,
|
||||
range: ValueRange<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::ops::Range;
|
||||
|
||||
use crate::ColumnValues;
|
||||
use crate::column::ValueRange;
|
||||
use crate::column_values::monotonic_mapping::StrictlyMonotonicFn;
|
||||
|
||||
struct MonotonicMappingColumn<C, T, Input> {
|
||||
@@ -80,16 +81,52 @@ where
|
||||
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
range: RangeInclusive<Output>,
|
||||
range: ValueRange<Output>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
self.from_column.get_row_ids_for_value_range(
|
||||
self.monotonic_mapping.inverse(range.start().clone())
|
||||
..=self.monotonic_mapping.inverse(range.end().clone()),
|
||||
doc_id_range,
|
||||
positions,
|
||||
)
|
||||
match range {
|
||||
ValueRange::Inclusive(range) => self.from_column.get_row_ids_for_value_range(
|
||||
ValueRange::Inclusive(
|
||||
self.monotonic_mapping.inverse(range.start().clone())
|
||||
..=self.monotonic_mapping.inverse(range.end().clone()),
|
||||
),
|
||||
doc_id_range,
|
||||
positions,
|
||||
),
|
||||
ValueRange::All => self.from_column.get_row_ids_for_value_range(
|
||||
ValueRange::All,
|
||||
doc_id_range,
|
||||
positions,
|
||||
),
|
||||
ValueRange::GreaterThan(threshold, _) => self.from_column.get_row_ids_for_value_range(
|
||||
ValueRange::GreaterThan(self.monotonic_mapping.inverse(threshold), false),
|
||||
doc_id_range,
|
||||
positions,
|
||||
),
|
||||
ValueRange::GreaterThanOrEqual(threshold, _) => {
|
||||
self.from_column.get_row_ids_for_value_range(
|
||||
ValueRange::GreaterThanOrEqual(
|
||||
self.monotonic_mapping.inverse(threshold),
|
||||
false,
|
||||
),
|
||||
doc_id_range,
|
||||
positions,
|
||||
)
|
||||
}
|
||||
ValueRange::LessThan(threshold, _) => self.from_column.get_row_ids_for_value_range(
|
||||
ValueRange::LessThan(self.monotonic_mapping.inverse(threshold), false),
|
||||
doc_id_range,
|
||||
positions,
|
||||
),
|
||||
ValueRange::LessThanOrEqual(threshold, _) => {
|
||||
self.from_column.get_row_ids_for_value_range(
|
||||
ValueRange::LessThanOrEqual(self.monotonic_mapping.inverse(threshold), false),
|
||||
doc_id_range,
|
||||
positions,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We voluntarily do not implement get_range as it yields a regression,
|
||||
|
||||
@@ -25,6 +25,7 @@ use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt, VIntU128};
|
||||
use tantivy_bitpacker::{BitPacker, BitUnpacker};
|
||||
|
||||
use crate::RowId;
|
||||
use crate::column::ValueRange;
|
||||
use crate::column_values::ColumnValues;
|
||||
|
||||
/// The cost per blank is quite hard actually, since blanks are delta encoded, the actual cost of
|
||||
@@ -338,14 +339,48 @@ impl ColumnValues<u64> for CompactSpaceU64Accessor {
|
||||
#[inline]
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u64>,
|
||||
value_range: ValueRange<u64>,
|
||||
position_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let value_range = self.0.compact_to_u128(*value_range.start() as u32)
|
||||
..=self.0.compact_to_u128(*value_range.end() as u32);
|
||||
self.0
|
||||
.get_row_ids_for_value_range(value_range, position_range, positions)
|
||||
match value_range {
|
||||
ValueRange::Inclusive(value_range) => {
|
||||
let value_range = ValueRange::Inclusive(
|
||||
self.0.compact_to_u128(*value_range.start() as u32)
|
||||
..=self.0.compact_to_u128(*value_range.end() as u32),
|
||||
);
|
||||
self.0
|
||||
.get_row_ids_for_value_range(value_range, position_range, positions)
|
||||
}
|
||||
ValueRange::All => {
|
||||
let position_range = position_range.start..position_range.end.min(self.num_vals());
|
||||
positions.extend(position_range);
|
||||
}
|
||||
ValueRange::GreaterThan(threshold, _) => {
|
||||
let value_range =
|
||||
ValueRange::GreaterThan(self.0.compact_to_u128(threshold as u32), false);
|
||||
self.0
|
||||
.get_row_ids_for_value_range(value_range, position_range, positions)
|
||||
}
|
||||
ValueRange::GreaterThanOrEqual(threshold, _) => {
|
||||
let value_range =
|
||||
ValueRange::GreaterThanOrEqual(self.0.compact_to_u128(threshold as u32), false);
|
||||
self.0
|
||||
.get_row_ids_for_value_range(value_range, position_range, positions)
|
||||
}
|
||||
ValueRange::LessThan(threshold, _) => {
|
||||
let value_range =
|
||||
ValueRange::LessThan(self.0.compact_to_u128(threshold as u32), false);
|
||||
self.0
|
||||
.get_row_ids_for_value_range(value_range, position_range, positions)
|
||||
}
|
||||
ValueRange::LessThanOrEqual(threshold, _) => {
|
||||
let value_range =
|
||||
ValueRange::LessThanOrEqual(self.0.compact_to_u128(threshold as u32), false);
|
||||
self.0
|
||||
.get_row_ids_for_value_range(value_range, position_range, positions)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -375,10 +410,47 @@ impl ColumnValues<u128> for CompactSpaceDecompressor {
|
||||
#[inline]
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u128>,
|
||||
value_range: ValueRange<u128>,
|
||||
position_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let value_range = match value_range {
|
||||
ValueRange::Inclusive(value_range) => value_range,
|
||||
ValueRange::All => {
|
||||
let position_range = position_range.start..position_range.end.min(self.num_vals());
|
||||
positions.extend(position_range);
|
||||
return;
|
||||
}
|
||||
ValueRange::GreaterThan(threshold, _) => {
|
||||
let max = self.max_value();
|
||||
if threshold >= max {
|
||||
return;
|
||||
}
|
||||
(threshold + 1)..=max
|
||||
}
|
||||
ValueRange::GreaterThanOrEqual(threshold, _) => {
|
||||
let max = self.max_value();
|
||||
if threshold > max {
|
||||
return;
|
||||
}
|
||||
threshold..=max
|
||||
}
|
||||
ValueRange::LessThan(threshold, _) => {
|
||||
let min = self.min_value();
|
||||
if threshold <= min {
|
||||
return;
|
||||
}
|
||||
min..=(threshold - 1)
|
||||
}
|
||||
ValueRange::LessThanOrEqual(threshold, _) => {
|
||||
let min = self.min_value();
|
||||
if threshold < min {
|
||||
return;
|
||||
}
|
||||
min..=threshold
|
||||
}
|
||||
};
|
||||
|
||||
if value_range.start() > value_range.end() {
|
||||
return;
|
||||
}
|
||||
@@ -560,7 +632,7 @@ mod tests {
|
||||
.collect::<Vec<_>>();
|
||||
let mut positions = Vec::new();
|
||||
decompressor.get_row_ids_for_value_range(
|
||||
range,
|
||||
ValueRange::Inclusive(range),
|
||||
0..decompressor.num_vals(),
|
||||
&mut positions,
|
||||
);
|
||||
@@ -604,7 +676,11 @@ mod tests {
|
||||
let val = *val;
|
||||
let pos = pos as u32;
|
||||
let mut positions = Vec::new();
|
||||
decomp.get_row_ids_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||
decomp.get_row_ids_for_value_range(
|
||||
ValueRange::Inclusive(val..=val),
|
||||
pos..pos + 1,
|
||||
&mut positions,
|
||||
);
|
||||
assert_eq!(positions, vec![pos]);
|
||||
}
|
||||
|
||||
@@ -746,7 +822,11 @@ mod tests {
|
||||
doc_id_range: Range<u32>,
|
||||
) -> Vec<u32> {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(value_range, doc_id_range, &mut positions);
|
||||
column.get_row_ids_for_value_range(
|
||||
ValueRange::Inclusive(value_range),
|
||||
doc_id_range,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ use common::{BinarySerializable, OwnedBytes};
|
||||
use fastdivide::DividerU64;
|
||||
use tantivy_bitpacker::{BitPacker, BitUnpacker, compute_num_bits};
|
||||
|
||||
use crate::column::ValueRange;
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, ColumnStats};
|
||||
use crate::{ColumnValues, RowId};
|
||||
|
||||
@@ -66,24 +67,273 @@ impl ColumnValues for BitpackedReader {
|
||||
self.stats.num_rows
|
||||
}
|
||||
|
||||
fn get_vals_in_value_range(
|
||||
&self,
|
||||
input_indexes: &[u32],
|
||||
input_doc_ids: &[u32],
|
||||
output: &mut Vec<crate::ComparableDoc<Option<u64>, crate::DocId>>,
|
||||
value_range: ValueRange<u64>,
|
||||
) {
|
||||
match value_range {
|
||||
ValueRange::All => {
|
||||
for (&idx, &doc) in input_indexes.iter().zip(input_doc_ids.iter()) {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(self.get_val(idx)),
|
||||
});
|
||||
}
|
||||
}
|
||||
ValueRange::Inclusive(range) => {
|
||||
if let Some(transformed_range) =
|
||||
transform_range_before_linear_transformation(&self.stats, range)
|
||||
{
|
||||
for (&idx, &doc) in input_indexes.iter().zip(input_doc_ids.iter()) {
|
||||
let raw_val = self.get_val(idx);
|
||||
if transformed_range.contains(&raw_val) {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(
|
||||
self.stats.min_value + self.stats.gcd.get() * raw_val,
|
||||
),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ValueRange::GreaterThan(threshold, _) => {
|
||||
if threshold < self.stats.min_value {
|
||||
for (&idx, &doc) in input_indexes.iter().zip(input_doc_ids.iter()) {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(self.get_val(idx)),
|
||||
});
|
||||
}
|
||||
} else if threshold >= self.stats.max_value {
|
||||
// All filtered out
|
||||
} else {
|
||||
let raw_threshold = (threshold - self.stats.min_value) / self.stats.gcd.get();
|
||||
for (&idx, &doc) in input_indexes.iter().zip(input_doc_ids.iter()) {
|
||||
let raw_val = self.get_val(idx);
|
||||
if raw_val > raw_threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(
|
||||
self.stats.min_value + self.stats.gcd.get() * raw_val,
|
||||
),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ValueRange::GreaterThanOrEqual(threshold, _) => {
|
||||
if threshold <= self.stats.min_value {
|
||||
for (&idx, &doc) in input_indexes.iter().zip(input_doc_ids.iter()) {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(self.get_val(idx)),
|
||||
});
|
||||
}
|
||||
} else if threshold > self.stats.max_value {
|
||||
// All filtered out
|
||||
} else {
|
||||
let diff = threshold - self.stats.min_value;
|
||||
let gcd = self.stats.gcd.get();
|
||||
let raw_threshold = (diff + gcd - 1) / gcd;
|
||||
for (&idx, &doc) in input_indexes.iter().zip(input_doc_ids.iter()) {
|
||||
let raw_val = self.get_val(idx);
|
||||
if raw_val >= raw_threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(
|
||||
self.stats.min_value + self.stats.gcd.get() * raw_val,
|
||||
),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ValueRange::LessThan(threshold, _) => {
|
||||
if threshold > self.stats.max_value {
|
||||
for (&idx, &doc) in input_indexes.iter().zip(input_doc_ids.iter()) {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(self.get_val(idx)),
|
||||
});
|
||||
}
|
||||
} else if threshold <= self.stats.min_value {
|
||||
// All filtered out
|
||||
} else {
|
||||
let diff = threshold - self.stats.min_value;
|
||||
let gcd = self.stats.gcd.get();
|
||||
let raw_threshold = if diff % gcd == 0 {
|
||||
diff / gcd
|
||||
} else {
|
||||
diff / gcd + 1
|
||||
};
|
||||
|
||||
for (&idx, &doc) in input_indexes.iter().zip(input_doc_ids.iter()) {
|
||||
let raw_val = self.get_val(idx);
|
||||
if raw_val < raw_threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(
|
||||
self.stats.min_value + self.stats.gcd.get() * raw_val,
|
||||
),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ValueRange::LessThanOrEqual(threshold, _) => {
|
||||
if threshold >= self.stats.max_value {
|
||||
for (&idx, &doc) in input_indexes.iter().zip(input_doc_ids.iter()) {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(self.get_val(idx)),
|
||||
});
|
||||
}
|
||||
} else if threshold < self.stats.min_value {
|
||||
// All filtered out
|
||||
} else {
|
||||
let diff = threshold - self.stats.min_value;
|
||||
let gcd = self.stats.gcd.get();
|
||||
let raw_threshold = diff / gcd;
|
||||
|
||||
for (&idx, &doc) in input_indexes.iter().zip(input_doc_ids.iter()) {
|
||||
let raw_val = self.get_val(idx);
|
||||
if raw_val <= raw_threshold {
|
||||
output.push(crate::ComparableDoc {
|
||||
doc,
|
||||
sort_key: Some(
|
||||
self.stats.min_value + self.stats.gcd.get() * raw_val,
|
||||
),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
range: RangeInclusive<u64>,
|
||||
range: ValueRange<u64>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let Some(transformed_range) =
|
||||
transform_range_before_linear_transformation(&self.stats, range)
|
||||
else {
|
||||
positions.clear();
|
||||
return;
|
||||
};
|
||||
self.bit_unpacker.get_ids_for_value_range(
|
||||
transformed_range,
|
||||
doc_id_range,
|
||||
&self.data,
|
||||
positions,
|
||||
);
|
||||
match range {
|
||||
ValueRange::All => {
|
||||
positions.extend(doc_id_range);
|
||||
return;
|
||||
}
|
||||
ValueRange::Inclusive(range) => {
|
||||
let Some(transformed_range) =
|
||||
transform_range_before_linear_transformation(&self.stats, range)
|
||||
else {
|
||||
positions.clear();
|
||||
return;
|
||||
};
|
||||
|
||||
self.bit_unpacker.get_ids_for_value_range(
|
||||
transformed_range,
|
||||
doc_id_range,
|
||||
&self.data,
|
||||
positions,
|
||||
);
|
||||
}
|
||||
ValueRange::GreaterThan(threshold, _) => {
|
||||
if threshold < self.stats.min_value {
|
||||
positions.extend(doc_id_range);
|
||||
return;
|
||||
}
|
||||
if threshold >= self.stats.max_value {
|
||||
return;
|
||||
}
|
||||
let raw_threshold = (threshold - self.stats.min_value) / self.stats.gcd.get();
|
||||
let max_raw = (self.stats.max_value - self.stats.min_value) / self.stats.gcd.get();
|
||||
let transformed_range = (raw_threshold + 1)..=max_raw;
|
||||
|
||||
self.bit_unpacker.get_ids_for_value_range(
|
||||
transformed_range,
|
||||
doc_id_range,
|
||||
&self.data,
|
||||
positions,
|
||||
);
|
||||
}
|
||||
ValueRange::GreaterThanOrEqual(threshold, _) => {
|
||||
if threshold <= self.stats.min_value {
|
||||
positions.extend(doc_id_range);
|
||||
return;
|
||||
}
|
||||
if threshold > self.stats.max_value {
|
||||
return;
|
||||
}
|
||||
let diff = threshold - self.stats.min_value;
|
||||
let gcd = self.stats.gcd.get();
|
||||
let raw_threshold = (diff + gcd - 1) / gcd;
|
||||
// We want raw >= raw_threshold.
|
||||
let max_raw = (self.stats.max_value - self.stats.min_value) / self.stats.gcd.get();
|
||||
let transformed_range = raw_threshold..=max_raw;
|
||||
|
||||
self.bit_unpacker.get_ids_for_value_range(
|
||||
transformed_range,
|
||||
doc_id_range,
|
||||
&self.data,
|
||||
positions,
|
||||
);
|
||||
}
|
||||
ValueRange::LessThan(threshold, _) => {
|
||||
if threshold > self.stats.max_value {
|
||||
positions.extend(doc_id_range);
|
||||
return;
|
||||
}
|
||||
if threshold <= self.stats.min_value {
|
||||
return;
|
||||
}
|
||||
|
||||
let diff = threshold - self.stats.min_value;
|
||||
let gcd = self.stats.gcd.get();
|
||||
// We want raw < raw_threshold_limit
|
||||
// raw <= raw_threshold_limit - 1
|
||||
let raw_threshold_limit = if diff % gcd == 0 {
|
||||
diff / gcd
|
||||
} else {
|
||||
diff / gcd + 1
|
||||
};
|
||||
|
||||
if raw_threshold_limit == 0 {
|
||||
return;
|
||||
}
|
||||
let transformed_range = 0..=(raw_threshold_limit - 1);
|
||||
|
||||
self.bit_unpacker.get_ids_for_value_range(
|
||||
transformed_range,
|
||||
doc_id_range,
|
||||
&self.data,
|
||||
positions,
|
||||
);
|
||||
}
|
||||
ValueRange::LessThanOrEqual(threshold, _) => {
|
||||
if threshold >= self.stats.max_value {
|
||||
positions.extend(doc_id_range);
|
||||
return;
|
||||
}
|
||||
if threshold < self.stats.min_value {
|
||||
return;
|
||||
}
|
||||
let diff = threshold - self.stats.min_value;
|
||||
let gcd = self.stats.gcd.get();
|
||||
// We want raw <= raw_threshold.
|
||||
let raw_threshold = diff / gcd;
|
||||
let transformed_range = 0..=raw_threshold;
|
||||
|
||||
self.bit_unpacker.get_ids_for_value_range(
|
||||
transformed_range,
|
||||
doc_id_range,
|
||||
&self.data,
|
||||
positions,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ pub(crate) fn create_and_validate<TColumnCodec: ColumnCodec>(
|
||||
.collect();
|
||||
let mut positions = Vec::new();
|
||||
reader.get_row_ids_for_value_range(
|
||||
vals[test_rand_idx]..=vals[test_rand_idx],
|
||||
crate::column::ValueRange::Inclusive(vals[test_rand_idx]..=vals[test_rand_idx]),
|
||||
0..vals.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
|
||||
@@ -29,6 +29,7 @@ mod column;
|
||||
pub mod column_index;
|
||||
pub mod column_values;
|
||||
mod columnar;
|
||||
mod comparable_doc;
|
||||
mod dictionary;
|
||||
mod dynamic_column;
|
||||
mod iterable;
|
||||
@@ -36,7 +37,7 @@ pub(crate) mod utils;
|
||||
mod value;
|
||||
|
||||
pub use block_accessor::ColumnBlockAccessor;
|
||||
pub use column::{BytesColumn, Column, StrColumn};
|
||||
pub use column::{BytesColumn, Column, StrColumn, ValueRange};
|
||||
pub use column_index::ColumnIndex;
|
||||
pub use column_values::{
|
||||
ColumnValues, EmptyColumnValues, MonotonicallyMappableToU64, MonotonicallyMappableToU128,
|
||||
@@ -45,6 +46,7 @@ pub use columnar::{
|
||||
CURRENT_VERSION, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
|
||||
MergeRowOrder, ShuffleMergeOrder, StackMergeOrder, Version, merge_columnar,
|
||||
};
|
||||
pub use comparable_doc::ComparableDoc;
|
||||
use sstable::VoidSSTable;
|
||||
pub use value::{NumericalType, NumericalValue};
|
||||
|
||||
|
||||
@@ -446,7 +446,7 @@ impl DocumentQueryEvaluator {
|
||||
let weight = query.weight(EnableScoring::disabled_from_schema(&schema))?;
|
||||
|
||||
// Get a scorer that iterates over matching documents
|
||||
let mut scorer = weight.scorer(segment_reader, 1.0, 0)?;
|
||||
let mut scorer = weight.scorer(segment_reader, 1.0)?;
|
||||
|
||||
// Create a BitSet to hold all matching documents
|
||||
let mut bitset = BitSet::with_max_value(max_doc);
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use columnar::{Column, ColumnType, ColumnarReader, DynamicColumn};
|
||||
use columnar::{Column, ColumnType, ColumnarReader, DynamicColumn, ValueRange};
|
||||
use common::json_path_writer::JSON_PATH_SEGMENT_SEP_STR;
|
||||
use common::DateTime;
|
||||
use regex::Regex;
|
||||
@@ -16,7 +17,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::AggregationError;
|
||||
use crate::collector::sort_key::ReverseComparator;
|
||||
use crate::collector::sort_key::{Comparator, ReverseComparator};
|
||||
use crate::collector::TopNComputer;
|
||||
use crate::schema::OwnedValue;
|
||||
use crate::{DocAddress, DocId, SegmentOrdinal};
|
||||
@@ -383,7 +384,7 @@ impl From<FastFieldValue> for OwnedValue {
|
||||
|
||||
/// Holds a fast field value in its u64 representation, and the order in which it should be sorted.
|
||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||
struct DocValueAndOrder {
|
||||
pub(crate) struct DocValueAndOrder {
|
||||
/// A fast field value in its u64 representation.
|
||||
value: Option<u64>,
|
||||
/// Sort order for the value
|
||||
@@ -455,6 +456,37 @@ impl PartialEq for DocSortValuesAndFields {
|
||||
|
||||
impl Eq for DocSortValuesAndFields {}
|
||||
|
||||
impl Comparator<DocSortValuesAndFields> for ReverseComparator {
|
||||
#[inline(always)]
|
||||
fn compare(&self, lhs: &DocSortValuesAndFields, rhs: &DocSortValuesAndFields) -> Ordering {
|
||||
rhs.cmp(lhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(
|
||||
&self,
|
||||
threshold: DocSortValuesAndFields,
|
||||
) -> ValueRange<DocSortValuesAndFields> {
|
||||
ValueRange::LessThan(threshold, true)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub(crate) struct TopHitsSegmentSortKey(pub Vec<DocValueAndOrder>);
|
||||
|
||||
impl Comparator<TopHitsSegmentSortKey> for ReverseComparator {
|
||||
#[inline(always)]
|
||||
fn compare(&self, lhs: &TopHitsSegmentSortKey, rhs: &TopHitsSegmentSortKey) -> Ordering {
|
||||
rhs.cmp(lhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(
|
||||
&self,
|
||||
threshold: TopHitsSegmentSortKey,
|
||||
) -> ValueRange<TopHitsSegmentSortKey> {
|
||||
ValueRange::LessThan(threshold, true)
|
||||
}
|
||||
}
|
||||
|
||||
/// The TopHitsCollector used for collecting over segments and merging results.
|
||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||
pub struct TopHitsTopNComputer {
|
||||
@@ -518,7 +550,7 @@ impl TopHitsTopNComputer {
|
||||
pub(crate) struct TopHitsSegmentCollector {
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
accessor_idx: usize,
|
||||
top_n: TopNComputer<Vec<DocValueAndOrder>, DocAddress, ReverseComparator>,
|
||||
top_n: TopNComputer<TopHitsSegmentSortKey, DocAddress, ReverseComparator>,
|
||||
}
|
||||
|
||||
impl TopHitsSegmentCollector {
|
||||
@@ -539,13 +571,15 @@ impl TopHitsSegmentCollector {
|
||||
req: &TopHitsAggregationReq,
|
||||
) -> TopHitsTopNComputer {
|
||||
let mut top_hits_computer = TopHitsTopNComputer::new(req);
|
||||
// Map TopHitsSegmentSortKey back to Vec<DocValueAndOrder> if needed or use directly
|
||||
// The TopNComputer here stores TopHitsSegmentSortKey.
|
||||
let top_results = self.top_n.into_vec();
|
||||
|
||||
for res in top_results {
|
||||
let doc_value_fields = req.get_document_field_data(value_accessors, res.doc.doc_id);
|
||||
top_hits_computer.collect(
|
||||
DocSortValuesAndFields {
|
||||
sorts: res.sort_key,
|
||||
sorts: res.sort_key.0,
|
||||
doc_value_fields,
|
||||
},
|
||||
res.doc,
|
||||
@@ -579,7 +613,7 @@ impl TopHitsSegmentCollector {
|
||||
.collect();
|
||||
|
||||
self.top_n.push(
|
||||
sorts,
|
||||
TopHitsSegmentSortKey(sorts),
|
||||
DocAddress {
|
||||
segment_ord: self.segment_ordinal,
|
||||
doc_id,
|
||||
|
||||
@@ -96,10 +96,9 @@ mod histogram_collector;
|
||||
pub use histogram_collector::HistogramCollector;
|
||||
|
||||
mod multi_collector;
|
||||
pub use self::multi_collector::{FruitHandle, MultiCollector, MultiFruit};
|
||||
pub use columnar::ComparableDoc;
|
||||
|
||||
mod top_collector;
|
||||
pub use self::top_collector::ComparableDoc;
|
||||
pub use self::multi_collector::{FruitHandle, MultiCollector, MultiFruit};
|
||||
|
||||
mod top_score_collector;
|
||||
pub use self::top_score_collector::{TopDocs, TopNComputer};
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
mod order;
|
||||
mod sort_by_erased_type;
|
||||
mod sort_by_score;
|
||||
mod sort_by_static_fast_value;
|
||||
mod sort_by_string;
|
||||
mod sort_key_computer;
|
||||
|
||||
pub use order::*;
|
||||
pub use sort_by_erased_type::SortByErasedType;
|
||||
pub use sort_by_score::SortBySimilarityScore;
|
||||
pub use sort_by_static_fast_value::SortByStaticFastValue;
|
||||
pub use sort_by_string::SortByString;
|
||||
@@ -34,11 +36,14 @@ pub(crate) mod tests {
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Range;
|
||||
|
||||
use crate::collector::sort_key::{SortBySimilarityScore, SortByStaticFastValue, SortByString};
|
||||
use crate::collector::sort_key::{
|
||||
SortByErasedType, SortBySimilarityScore, SortByStaticFastValue, SortByString,
|
||||
};
|
||||
use crate::collector::top_score_collector::compare_for_top_k;
|
||||
use crate::collector::{ComparableDoc, DocSetCollector, TopDocs};
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::{AllQuery, QueryParser};
|
||||
use crate::schema::{Schema, FAST, TEXT};
|
||||
use crate::schema::{OwnedValue, Schema, FAST, TEXT};
|
||||
use crate::{DocAddress, Document, Index, Order, Score, Searcher};
|
||||
|
||||
fn make_index() -> crate::Result<Index> {
|
||||
@@ -313,11 +318,9 @@ pub(crate) mod tests {
|
||||
(SortBySimilarityScore, score_order),
|
||||
(SortByString::for_field("city"), city_order),
|
||||
));
|
||||
Ok(searcher
|
||||
.search(&AllQuery, &top_collector)?
|
||||
.into_iter()
|
||||
.map(|(f, doc)| (f, ids[&doc]))
|
||||
.collect())
|
||||
let results: Vec<((Score, Option<String>), DocAddress)> =
|
||||
searcher.search(&AllQuery, &top_collector)?;
|
||||
Ok(results.into_iter().map(|(f, doc)| (f, ids[&doc])).collect())
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
@@ -342,6 +345,97 @@ pub(crate) mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_order_by_score_then_owned_value() -> crate::Result<()> {
|
||||
let index = make_index()?;
|
||||
|
||||
type SortKey = (Score, OwnedValue);
|
||||
|
||||
fn query(
|
||||
index: &Index,
|
||||
score_order: Order,
|
||||
city_order: Order,
|
||||
) -> crate::Result<Vec<(SortKey, u64)>> {
|
||||
let searcher = index.reader()?.searcher();
|
||||
let ids = id_mapping(&searcher);
|
||||
|
||||
let top_collector = TopDocs::with_limit(4).order_by::<(Score, OwnedValue)>((
|
||||
(SortBySimilarityScore, score_order),
|
||||
(SortByErasedType::for_field("city"), city_order),
|
||||
));
|
||||
let results: Vec<((Score, OwnedValue), DocAddress)> =
|
||||
searcher.search(&AllQuery, &top_collector)?;
|
||||
Ok(results.into_iter().map(|(f, doc)| (f, ids[&doc])).collect())
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
&query(&index, Order::Asc, Order::Asc)?,
|
||||
&[
|
||||
((1.0, OwnedValue::Str("austin".to_owned())), 0),
|
||||
((1.0, OwnedValue::Str("greenville".to_owned())), 1),
|
||||
((1.0, OwnedValue::Str("tokyo".to_owned())), 2),
|
||||
((1.0, OwnedValue::Null), 3),
|
||||
]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
&query(&index, Order::Asc, Order::Desc)?,
|
||||
&[
|
||||
((1.0, OwnedValue::Str("tokyo".to_owned())), 2),
|
||||
((1.0, OwnedValue::Str("greenville".to_owned())), 1),
|
||||
((1.0, OwnedValue::Str("austin".to_owned())), 0),
|
||||
((1.0, OwnedValue::Null), 3),
|
||||
]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_order_by_compound_fast_fields() -> crate::Result<()> {
|
||||
let index = make_index()?;
|
||||
|
||||
type CompoundSortKey = (Option<String>, Option<f64>);
|
||||
|
||||
fn assert_query(
|
||||
index: &Index,
|
||||
city_order: Order,
|
||||
altitude_order: Order,
|
||||
expected: Vec<(CompoundSortKey, u64)>,
|
||||
) -> crate::Result<()> {
|
||||
let searcher = index.reader()?.searcher();
|
||||
let ids = id_mapping(&searcher);
|
||||
|
||||
let top_collector = TopDocs::with_limit(4).order_by((
|
||||
(SortByString::for_field("city"), city_order),
|
||||
(
|
||||
SortByStaticFastValue::<f64>::for_field("altitude"),
|
||||
altitude_order,
|
||||
),
|
||||
));
|
||||
let actual = searcher
|
||||
.search(&AllQuery, &top_collector)?
|
||||
.into_iter()
|
||||
.map(|(key, doc)| (key, ids[&doc]))
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(actual, expected);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
assert_query(
|
||||
&index,
|
||||
Order::Asc,
|
||||
Order::Desc,
|
||||
vec![
|
||||
((Some("austin".to_owned()), Some(149.0)), 0),
|
||||
((Some("greenville".to_owned()), Some(27.0)), 1),
|
||||
((Some("tokyo".to_owned()), Some(40.0)), 2),
|
||||
((None, Some(0.0)), 3),
|
||||
],
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
proptest! {
|
||||
@@ -404,4 +498,197 @@ pub(crate) mod tests {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_order_by_compound_prop(
|
||||
city_order in prop_oneof!(Just(Order::Desc), Just(Order::Asc)),
|
||||
altitude_order in prop_oneof!(Just(Order::Desc), Just(Order::Asc)),
|
||||
limit in 1..20_usize,
|
||||
offset in 0..20_usize,
|
||||
segments_data in proptest::collection::vec(
|
||||
proptest::collection::vec(
|
||||
(proptest::option::of("[a-c]"), proptest::option::of(0..50u64)),
|
||||
1..10_usize // segment size
|
||||
),
|
||||
1..4_usize // num segments
|
||||
)
|
||||
) {
|
||||
use crate::collector::sort_key::ComparatorEnum;
|
||||
use crate::TantivyDocument;
|
||||
|
||||
let mut schema_builder = Schema::builder();
|
||||
let city = schema_builder.add_text_field("city", TEXT | FAST);
|
||||
let altitude = schema_builder.add_u64_field("altitude", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
for segment_data in segments_data.into_iter() {
|
||||
for (city_val, altitude_val) in segment_data.into_iter() {
|
||||
let mut doc = TantivyDocument::default();
|
||||
if let Some(c) = city_val {
|
||||
doc.add_text(city, c);
|
||||
}
|
||||
if let Some(a) = altitude_val {
|
||||
doc.add_u64(altitude, a);
|
||||
}
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
|
||||
let top_collector = TopDocs::with_limit(limit)
|
||||
.and_offset(offset)
|
||||
.order_by((
|
||||
(SortByString::for_field("city"), city_order),
|
||||
(
|
||||
SortByStaticFastValue::<u64>::for_field("altitude"),
|
||||
altitude_order,
|
||||
),
|
||||
));
|
||||
|
||||
let actual_results = searcher.search(&AllQuery, &top_collector).unwrap();
|
||||
let actual_doc_ids: Vec<DocAddress> =
|
||||
actual_results.into_iter().map(|(_, doc)| doc).collect();
|
||||
|
||||
// Verification logic
|
||||
let all_docs_collector = DocSetCollector;
|
||||
let all_docs = searcher.search(&AllQuery, &all_docs_collector).unwrap();
|
||||
|
||||
let docs_with_keys: Vec<((Option<String>, Option<u64>), DocAddress)> = all_docs
|
||||
.into_iter()
|
||||
.map(|doc_addr| {
|
||||
let reader = searcher.segment_reader(doc_addr.segment_ord);
|
||||
|
||||
let city_val = if let Some(col) = reader.fast_fields().str("city").unwrap() {
|
||||
let ord = col.ords().first(doc_addr.doc_id);
|
||||
if let Some(ord) = ord {
|
||||
let mut out = Vec::new();
|
||||
col.dictionary().ord_to_term(ord, &mut out).unwrap();
|
||||
String::from_utf8(out).ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let alt_val = if let Some((col, _)) = reader.fast_fields().u64_lenient("altitude").unwrap() {
|
||||
col.first(doc_addr.doc_id)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
((city_val, alt_val), doc_addr)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let city_comparator = ComparatorEnum::from(city_order);
|
||||
let alt_comparator = ComparatorEnum::from(altitude_order);
|
||||
let comparator = (city_comparator, alt_comparator);
|
||||
|
||||
let mut comparable_docs: Vec<ComparableDoc<_, _>> = docs_with_keys
|
||||
.into_iter()
|
||||
.map(|(sort_key, doc)| ComparableDoc { sort_key, doc })
|
||||
.collect();
|
||||
|
||||
comparable_docs.sort_by(|l, r| compare_for_top_k(&comparator, l, r));
|
||||
|
||||
let expected_results = comparable_docs
|
||||
.into_iter()
|
||||
.skip(offset)
|
||||
.take(limit)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let expected_doc_ids: Vec<DocAddress> =
|
||||
expected_results.into_iter().map(|cd| cd.doc).collect();
|
||||
|
||||
prop_assert_eq!(actual_doc_ids, expected_doc_ids);
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_order_by_u64_prop(
|
||||
order in prop_oneof!(Just(Order::Desc), Just(Order::Asc)),
|
||||
limit in 1..20_usize,
|
||||
offset in 0..20_usize,
|
||||
segments_data in proptest::collection::vec(
|
||||
proptest::collection::vec(
|
||||
proptest::option::of(0..100u64),
|
||||
1..1000_usize // segment size
|
||||
),
|
||||
1..4_usize // num segments
|
||||
)
|
||||
) {
|
||||
use crate::collector::sort_key::ComparatorEnum;
|
||||
use crate::TantivyDocument;
|
||||
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_u64_field("field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
for segment_data in segments_data.into_iter() {
|
||||
for val in segment_data.into_iter() {
|
||||
let mut doc = TantivyDocument::default();
|
||||
if let Some(v) = val {
|
||||
doc.add_u64(field, v);
|
||||
}
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
|
||||
let top_collector = TopDocs::with_limit(limit)
|
||||
.and_offset(offset)
|
||||
.order_by((SortByStaticFastValue::<u64>::for_field("field"), order));
|
||||
|
||||
let actual_results = searcher.search(&AllQuery, &top_collector).unwrap();
|
||||
let actual_doc_ids: Vec<DocAddress> =
|
||||
actual_results.into_iter().map(|(_, doc)| doc).collect();
|
||||
|
||||
// Verification logic
|
||||
let all_docs_collector = DocSetCollector;
|
||||
let all_docs = searcher.search(&AllQuery, &all_docs_collector).unwrap();
|
||||
|
||||
let docs_with_keys: Vec<(Option<u64>, DocAddress)> = all_docs
|
||||
.into_iter()
|
||||
.map(|doc_addr| {
|
||||
let reader = searcher.segment_reader(doc_addr.segment_ord);
|
||||
let val = if let Some((col, _)) = reader.fast_fields().u64_lenient("field").unwrap() {
|
||||
col.first(doc_addr.doc_id)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(val, doc_addr)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let comparator = ComparatorEnum::from(order);
|
||||
let mut comparable_docs: Vec<ComparableDoc<_, _>> = docs_with_keys
|
||||
.into_iter()
|
||||
.map(|(sort_key, doc)| ComparableDoc { sort_key, doc })
|
||||
.collect();
|
||||
|
||||
comparable_docs.sort_by(|l, r| compare_for_top_k(&comparator, l, r));
|
||||
|
||||
let expected_results = comparable_docs
|
||||
.into_iter()
|
||||
.skip(offset)
|
||||
.take(limit)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let expected_doc_ids: Vec<DocAddress> =
|
||||
expected_results.into_iter().map(|cd| cd.doc).collect();
|
||||
|
||||
prop_assert_eq!(actual_doc_ids, expected_doc_ids);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,77 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use columnar::{ComparableDoc, MonotonicallyMappableToU64, ValueRange};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::{OwnedValue, Schema};
|
||||
use crate::{DocId, Order, Score};
|
||||
|
||||
fn compare_owned_value<const NULLS_FIRST: bool>(lhs: &OwnedValue, rhs: &OwnedValue) -> Ordering {
|
||||
match (lhs, rhs) {
|
||||
(OwnedValue::Null, OwnedValue::Null) => Ordering::Equal,
|
||||
(OwnedValue::Null, _) => {
|
||||
if NULLS_FIRST {
|
||||
Ordering::Less
|
||||
} else {
|
||||
Ordering::Greater
|
||||
}
|
||||
}
|
||||
(_, OwnedValue::Null) => {
|
||||
if NULLS_FIRST {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
Ordering::Less
|
||||
}
|
||||
}
|
||||
(OwnedValue::Str(a), OwnedValue::Str(b)) => a.cmp(b),
|
||||
(OwnedValue::PreTokStr(a), OwnedValue::PreTokStr(b)) => a.cmp(b),
|
||||
(OwnedValue::U64(a), OwnedValue::U64(b)) => a.cmp(b),
|
||||
(OwnedValue::I64(a), OwnedValue::I64(b)) => a.cmp(b),
|
||||
(OwnedValue::F64(a), OwnedValue::F64(b)) => a.to_u64().cmp(&b.to_u64()),
|
||||
(OwnedValue::Bool(a), OwnedValue::Bool(b)) => a.cmp(b),
|
||||
(OwnedValue::Date(a), OwnedValue::Date(b)) => a.cmp(b),
|
||||
(OwnedValue::Facet(a), OwnedValue::Facet(b)) => a.cmp(b),
|
||||
(OwnedValue::Bytes(a), OwnedValue::Bytes(b)) => a.cmp(b),
|
||||
(OwnedValue::IpAddr(a), OwnedValue::IpAddr(b)) => a.cmp(b),
|
||||
(OwnedValue::U64(a), OwnedValue::I64(b)) => {
|
||||
if *b < 0 {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
a.cmp(&(*b as u64))
|
||||
}
|
||||
}
|
||||
(OwnedValue::I64(a), OwnedValue::U64(b)) => {
|
||||
if *a < 0 {
|
||||
Ordering::Less
|
||||
} else {
|
||||
(*a as u64).cmp(b)
|
||||
}
|
||||
}
|
||||
(OwnedValue::U64(a), OwnedValue::F64(b)) => (*a as f64).to_u64().cmp(&b.to_u64()),
|
||||
(OwnedValue::F64(a), OwnedValue::U64(b)) => a.to_u64().cmp(&(*b as f64).to_u64()),
|
||||
(OwnedValue::I64(a), OwnedValue::F64(b)) => (*a as f64).to_u64().cmp(&b.to_u64()),
|
||||
(OwnedValue::F64(a), OwnedValue::I64(b)) => a.to_u64().cmp(&(*b as f64).to_u64()),
|
||||
(a, b) => {
|
||||
let ord = a.discriminant_value().cmp(&b.discriminant_value());
|
||||
// If the discriminant is equal, it's because a new type was added, but hasn't been
|
||||
// included in this `match` statement.
|
||||
assert!(
|
||||
ord != Ordering::Equal,
|
||||
"Unimplemented comparison for type of {a:?}, {b:?}"
|
||||
);
|
||||
ord
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Comparator trait defining the order in which documents should be ordered.
|
||||
pub trait Comparator<T>: Send + Sync + std::fmt::Debug + Default {
|
||||
/// Return the order between two values.
|
||||
fn compare(&self, lhs: &T, rhs: &T) -> Ordering;
|
||||
|
||||
/// Return a `ValueRange` that matches all values that are greater than the provided threshold.
|
||||
fn threshold_to_valuerange(&self, threshold: T) -> ValueRange<T>;
|
||||
}
|
||||
|
||||
/// Compare values naturally (e.g. 1 < 2).
|
||||
@@ -25,7 +87,26 @@ pub struct NaturalComparator;
|
||||
impl<T: PartialOrd> Comparator<T> for NaturalComparator {
|
||||
#[inline(always)]
|
||||
fn compare(&self, lhs: &T, rhs: &T) -> Ordering {
|
||||
lhs.partial_cmp(rhs).unwrap()
|
||||
lhs.partial_cmp(rhs).unwrap_or(Ordering::Equal)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: T) -> ValueRange<T> {
|
||||
ValueRange::GreaterThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
/// A (partial) implementation of comparison for OwnedValue.
|
||||
///
|
||||
/// Intended for use within columns of homogenous types, and so will panic for OwnedValues with
|
||||
/// mismatched types. The one exception is Null, for which we do define all comparisons.
|
||||
impl Comparator<OwnedValue> for NaturalComparator {
|
||||
#[inline(always)]
|
||||
fn compare(&self, lhs: &OwnedValue, rhs: &OwnedValue) -> Ordering {
|
||||
compare_owned_value::</* NULLS_FIRST= */ true>(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: OwnedValue) -> ValueRange<OwnedValue> {
|
||||
ValueRange::GreaterThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,13 +125,69 @@ impl<T: PartialOrd> Comparator<T> for NaturalComparator {
|
||||
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ReverseComparator;
|
||||
|
||||
impl<T> Comparator<T> for ReverseComparator
|
||||
where NaturalComparator: Comparator<T>
|
||||
macro_rules! impl_reverse_comparator_primitive {
|
||||
($($t:ty),*) => {
|
||||
$(
|
||||
impl Comparator<$t> for ReverseComparator {
|
||||
#[inline(always)]
|
||||
fn compare(&self, lhs: &$t, rhs: &$t) -> Ordering {
|
||||
NaturalComparator.compare(rhs, lhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: $t) -> ValueRange<$t> {
|
||||
ValueRange::LessThan(threshold, true)
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
impl_reverse_comparator_primitive!(
|
||||
bool,
|
||||
u8,
|
||||
u16,
|
||||
u32,
|
||||
u64,
|
||||
u128,
|
||||
usize,
|
||||
i8,
|
||||
i16,
|
||||
i32,
|
||||
i64,
|
||||
i128,
|
||||
isize,
|
||||
f32,
|
||||
f64,
|
||||
String,
|
||||
crate::DateTime,
|
||||
Vec<u8>,
|
||||
crate::schema::Facet
|
||||
);
|
||||
|
||||
impl<T: PartialOrd + Send + Sync + std::fmt::Debug + Clone + 'static> Comparator<Option<T>>
|
||||
for ReverseComparator
|
||||
{
|
||||
#[inline(always)]
|
||||
fn compare(&self, lhs: &T, rhs: &T) -> Ordering {
|
||||
fn compare(&self, lhs: &Option<T>, rhs: &Option<T>) -> Ordering {
|
||||
NaturalComparator.compare(rhs, lhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: Option<T>) -> ValueRange<Option<T>> {
|
||||
let is_some = threshold.is_some();
|
||||
ValueRange::LessThan(threshold, is_some)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<OwnedValue> for ReverseComparator {
|
||||
#[inline(always)]
|
||||
fn compare(&self, lhs: &OwnedValue, rhs: &OwnedValue) -> Ordering {
|
||||
NaturalComparator.compare(rhs, lhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: OwnedValue) -> ValueRange<OwnedValue> {
|
||||
let is_not_null = !matches!(threshold, OwnedValue::Null);
|
||||
ValueRange::LessThan(threshold, is_not_null)
|
||||
}
|
||||
}
|
||||
|
||||
/// Compare values in reverse, but treating `None` as lower than `Some`.
|
||||
@@ -77,6 +214,14 @@ where ReverseComparator: Comparator<T>
|
||||
(Some(lhs), Some(rhs)) => ReverseComparator.compare(lhs, rhs),
|
||||
}
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: Option<T>) -> ValueRange<Option<T>> {
|
||||
if threshold.is_some() {
|
||||
ValueRange::LessThan(threshold, false)
|
||||
} else {
|
||||
ValueRange::GreaterThan(threshold, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<u32> for ReverseNoneIsLowerComparator {
|
||||
@@ -84,6 +229,10 @@ impl Comparator<u32> for ReverseNoneIsLowerComparator {
|
||||
fn compare(&self, lhs: &u32, rhs: &u32) -> Ordering {
|
||||
ReverseComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: u32) -> ValueRange<u32> {
|
||||
ValueRange::LessThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<u64> for ReverseNoneIsLowerComparator {
|
||||
@@ -91,6 +240,10 @@ impl Comparator<u64> for ReverseNoneIsLowerComparator {
|
||||
fn compare(&self, lhs: &u64, rhs: &u64) -> Ordering {
|
||||
ReverseComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: u64) -> ValueRange<u64> {
|
||||
ValueRange::LessThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<f64> for ReverseNoneIsLowerComparator {
|
||||
@@ -98,6 +251,10 @@ impl Comparator<f64> for ReverseNoneIsLowerComparator {
|
||||
fn compare(&self, lhs: &f64, rhs: &f64) -> Ordering {
|
||||
ReverseComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: f64) -> ValueRange<f64> {
|
||||
ValueRange::LessThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<f32> for ReverseNoneIsLowerComparator {
|
||||
@@ -105,6 +262,10 @@ impl Comparator<f32> for ReverseNoneIsLowerComparator {
|
||||
fn compare(&self, lhs: &f32, rhs: &f32) -> Ordering {
|
||||
ReverseComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: f32) -> ValueRange<f32> {
|
||||
ValueRange::LessThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<i64> for ReverseNoneIsLowerComparator {
|
||||
@@ -112,6 +273,10 @@ impl Comparator<i64> for ReverseNoneIsLowerComparator {
|
||||
fn compare(&self, lhs: &i64, rhs: &i64) -> Ordering {
|
||||
ReverseComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: i64) -> ValueRange<i64> {
|
||||
ValueRange::LessThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<String> for ReverseNoneIsLowerComparator {
|
||||
@@ -119,6 +284,21 @@ impl Comparator<String> for ReverseNoneIsLowerComparator {
|
||||
fn compare(&self, lhs: &String, rhs: &String) -> Ordering {
|
||||
ReverseComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: String) -> ValueRange<String> {
|
||||
ValueRange::LessThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<OwnedValue> for ReverseNoneIsLowerComparator {
|
||||
#[inline(always)]
|
||||
fn compare(&self, lhs: &OwnedValue, rhs: &OwnedValue) -> Ordering {
|
||||
compare_owned_value::</* NULLS_FIRST= */ false>(rhs, lhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: OwnedValue) -> ValueRange<OwnedValue> {
|
||||
ValueRange::LessThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Compare values naturally, but treating `None` as higher than `Some`.
|
||||
@@ -141,6 +321,15 @@ where NaturalComparator: Comparator<T>
|
||||
(Some(lhs), Some(rhs)) => NaturalComparator.compare(lhs, rhs),
|
||||
}
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: Option<T>) -> ValueRange<Option<T>> {
|
||||
if threshold.is_some() {
|
||||
let is_some = threshold.is_some();
|
||||
ValueRange::GreaterThan(threshold, is_some)
|
||||
} else {
|
||||
ValueRange::LessThan(threshold, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<u32> for NaturalNoneIsHigherComparator {
|
||||
@@ -148,6 +337,10 @@ impl Comparator<u32> for NaturalNoneIsHigherComparator {
|
||||
fn compare(&self, lhs: &u32, rhs: &u32) -> Ordering {
|
||||
NaturalComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: u32) -> ValueRange<u32> {
|
||||
ValueRange::GreaterThan(threshold, true)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<u64> for NaturalNoneIsHigherComparator {
|
||||
@@ -155,6 +348,10 @@ impl Comparator<u64> for NaturalNoneIsHigherComparator {
|
||||
fn compare(&self, lhs: &u64, rhs: &u64) -> Ordering {
|
||||
NaturalComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: u64) -> ValueRange<u64> {
|
||||
ValueRange::GreaterThan(threshold, true)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<f64> for NaturalNoneIsHigherComparator {
|
||||
@@ -162,6 +359,10 @@ impl Comparator<f64> for NaturalNoneIsHigherComparator {
|
||||
fn compare(&self, lhs: &f64, rhs: &f64) -> Ordering {
|
||||
NaturalComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: f64) -> ValueRange<f64> {
|
||||
ValueRange::GreaterThan(threshold, true)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<f32> for NaturalNoneIsHigherComparator {
|
||||
@@ -169,6 +370,10 @@ impl Comparator<f32> for NaturalNoneIsHigherComparator {
|
||||
fn compare(&self, lhs: &f32, rhs: &f32) -> Ordering {
|
||||
NaturalComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: f32) -> ValueRange<f32> {
|
||||
ValueRange::GreaterThan(threshold, true)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<i64> for NaturalNoneIsHigherComparator {
|
||||
@@ -176,6 +381,10 @@ impl Comparator<i64> for NaturalNoneIsHigherComparator {
|
||||
fn compare(&self, lhs: &i64, rhs: &i64) -> Ordering {
|
||||
NaturalComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: i64) -> ValueRange<i64> {
|
||||
ValueRange::GreaterThan(threshold, true)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<String> for NaturalNoneIsHigherComparator {
|
||||
@@ -183,6 +392,21 @@ impl Comparator<String> for NaturalNoneIsHigherComparator {
|
||||
fn compare(&self, lhs: &String, rhs: &String) -> Ordering {
|
||||
NaturalComparator.compare(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: String) -> ValueRange<String> {
|
||||
ValueRange::GreaterThan(threshold, true)
|
||||
}
|
||||
}
|
||||
|
||||
impl Comparator<OwnedValue> for NaturalNoneIsHigherComparator {
|
||||
#[inline(always)]
|
||||
fn compare(&self, lhs: &OwnedValue, rhs: &OwnedValue) -> Ordering {
|
||||
compare_owned_value::</* NULLS_FIRST= */ false>(lhs, rhs)
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: OwnedValue) -> ValueRange<OwnedValue> {
|
||||
ValueRange::GreaterThan(threshold, true)
|
||||
}
|
||||
}
|
||||
|
||||
/// An enum representing the different sort orders.
|
||||
@@ -224,6 +448,19 @@ where
|
||||
ComparatorEnum::NaturalNoneHigher => NaturalNoneIsHigherComparator.compare(lhs, rhs),
|
||||
}
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: T) -> ValueRange<T> {
|
||||
match self {
|
||||
ComparatorEnum::Natural => NaturalComparator.threshold_to_valuerange(threshold),
|
||||
ComparatorEnum::Reverse => ReverseComparator.threshold_to_valuerange(threshold),
|
||||
ComparatorEnum::ReverseNoneLower => {
|
||||
ReverseNoneIsLowerComparator.threshold_to_valuerange(threshold)
|
||||
}
|
||||
ComparatorEnum::NaturalNoneHigher => {
|
||||
NaturalNoneIsHigherComparator.threshold_to_valuerange(threshold)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Head, Tail, LeftComparator, RightComparator> Comparator<(Head, Tail)>
|
||||
@@ -238,6 +475,10 @@ where
|
||||
.compare(&lhs.0, &rhs.0)
|
||||
.then_with(|| self.1.compare(&lhs.1, &rhs.1))
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(&self, threshold: (Head, Tail)) -> ValueRange<(Head, Tail)> {
|
||||
ValueRange::GreaterThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Type1, Type2, Type3, Comparator1, Comparator2, Comparator3> Comparator<(Type1, (Type2, Type3))>
|
||||
@@ -254,6 +495,13 @@ where
|
||||
.then_with(|| self.1.compare(&lhs.1 .0, &rhs.1 .0))
|
||||
.then_with(|| self.2.compare(&lhs.1 .1, &rhs.1 .1))
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(
|
||||
&self,
|
||||
threshold: (Type1, (Type2, Type3)),
|
||||
) -> ValueRange<(Type1, (Type2, Type3))> {
|
||||
ValueRange::GreaterThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Type1, Type2, Type3, Comparator1, Comparator2, Comparator3> Comparator<(Type1, Type2, Type3)>
|
||||
@@ -270,6 +518,13 @@ where
|
||||
.then_with(|| self.1.compare(&lhs.1, &rhs.1))
|
||||
.then_with(|| self.2.compare(&lhs.2, &rhs.2))
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(
|
||||
&self,
|
||||
threshold: (Type1, Type2, Type3),
|
||||
) -> ValueRange<(Type1, Type2, Type3)> {
|
||||
ValueRange::GreaterThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Type1, Type2, Type3, Type4, Comparator1, Comparator2, Comparator3, Comparator4>
|
||||
@@ -293,6 +548,13 @@ where
|
||||
.then_with(|| self.2.compare(&lhs.1 .1 .0, &rhs.1 .1 .0))
|
||||
.then_with(|| self.3.compare(&lhs.1 .1 .1, &rhs.1 .1 .1))
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(
|
||||
&self,
|
||||
threshold: (Type1, (Type2, (Type3, Type4))),
|
||||
) -> ValueRange<(Type1, (Type2, (Type3, Type4)))> {
|
||||
ValueRange::GreaterThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Type1, Type2, Type3, Type4, Comparator1, Comparator2, Comparator3, Comparator4>
|
||||
@@ -316,6 +578,13 @@ where
|
||||
.then_with(|| self.2.compare(&lhs.2, &rhs.2))
|
||||
.then_with(|| self.3.compare(&lhs.3, &rhs.3))
|
||||
}
|
||||
|
||||
fn threshold_to_valuerange(
|
||||
&self,
|
||||
threshold: (Type1, Type2, Type3, Type4),
|
||||
) -> ValueRange<(Type1, Type2, Type3, Type4)> {
|
||||
ValueRange::GreaterThan(threshold, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSortKeyComputer> SortKeyComputer for (TSortKeyComputer, ComparatorEnum)
|
||||
@@ -404,16 +673,33 @@ impl<TSegmentSortKeyComputer, TSegmentSortKey, TComparator> SegmentSortKeyComput
|
||||
for SegmentSortKeyComputerWithComparator<TSegmentSortKeyComputer, TComparator>
|
||||
where
|
||||
TSegmentSortKeyComputer: SegmentSortKeyComputer<SegmentSortKey = TSegmentSortKey>,
|
||||
TSegmentSortKey: PartialOrd + Clone + 'static + Sync + Send,
|
||||
TComparator: Comparator<TSegmentSortKey> + 'static + Sync + Send,
|
||||
TSegmentSortKey: Clone + 'static + Sync + Send,
|
||||
TComparator: Comparator<TSegmentSortKey> + Clone + 'static + Sync + Send,
|
||||
{
|
||||
type SortKey = TSegmentSortKeyComputer::SortKey;
|
||||
type SegmentSortKey = TSegmentSortKey;
|
||||
type SegmentComparator = TComparator;
|
||||
type Buffer = TSegmentSortKeyComputer::Buffer;
|
||||
|
||||
fn segment_comparator(&self) -> Self::SegmentComparator {
|
||||
self.comparator.clone()
|
||||
}
|
||||
|
||||
fn segment_sort_key(&mut self, doc: DocId, score: Score) -> Self::SegmentSortKey {
|
||||
self.segment_sort_key_computer.segment_sort_key(doc, score)
|
||||
}
|
||||
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<ComparableDoc<Self::SegmentSortKey, DocId>>,
|
||||
buffer: &mut Self::Buffer,
|
||||
filter: ValueRange<Self::SegmentSortKey>,
|
||||
) {
|
||||
self.segment_sort_key_computer
|
||||
.segment_sort_keys(input_docs, output, buffer, filter)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn compare_segment_sort_key(
|
||||
&self,
|
||||
@@ -432,6 +718,7 @@ where
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::schema::OwnedValue;
|
||||
|
||||
#[test]
|
||||
fn test_natural_none_is_higher() {
|
||||
@@ -455,4 +742,27 @@ mod tests {
|
||||
// compare(None, None) should be Equal.
|
||||
assert_eq!(comp.compare(&null, &null), Ordering::Equal);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mixed_ownedvalue_compare() {
|
||||
let u = OwnedValue::U64(10);
|
||||
let i = OwnedValue::I64(10);
|
||||
let f = OwnedValue::F64(10.0);
|
||||
|
||||
let nc = NaturalComparator;
|
||||
assert_eq!(nc.compare(&u, &i), Ordering::Equal);
|
||||
assert_eq!(nc.compare(&u, &f), Ordering::Equal);
|
||||
assert_eq!(nc.compare(&i, &f), Ordering::Equal);
|
||||
|
||||
let u2 = OwnedValue::U64(11);
|
||||
assert_eq!(nc.compare(&u2, &f), Ordering::Greater);
|
||||
|
||||
let s = OwnedValue::Str("a".to_string());
|
||||
// Str < U64
|
||||
assert_eq!(nc.compare(&s, &u), Ordering::Less);
|
||||
// Str < I64
|
||||
assert_eq!(nc.compare(&s, &i), Ordering::Less);
|
||||
// Str < F64
|
||||
assert_eq!(nc.compare(&s, &f), Ordering::Less);
|
||||
}
|
||||
}
|
||||
|
||||
410
src/collector/sort_key/sort_by_erased_type.rs
Normal file
410
src/collector/sort_key/sort_by_erased_type.rs
Normal file
@@ -0,0 +1,410 @@
|
||||
use columnar::{ColumnType, MonotonicallyMappableToU64, ValueRange};
|
||||
|
||||
use crate::collector::sort_key::sort_by_score::SortBySimilarityScoreSegmentComputer;
|
||||
use crate::collector::sort_key::{
|
||||
NaturalComparator, SortBySimilarityScore, SortByStaticFastValue, SortByString,
|
||||
};
|
||||
use crate::collector::{ComparableDoc, SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::fastfield::FastFieldNotAvailableError;
|
||||
use crate::schema::OwnedValue;
|
||||
use crate::{DateTime, DocId, Score};
|
||||
|
||||
/// Sort by the boxed / OwnedValue representation of either a fast field, or of the score.
|
||||
///
|
||||
/// Using the OwnedValue representation allows for type erasure, and can be useful when sort orders
|
||||
/// are not known until runtime. But it comes with a performance cost: wherever possible, prefer to
|
||||
/// use a SortKeyComputer implementation with a known-type at compile time.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum SortByErasedType {
|
||||
/// Sort by a fast field
|
||||
Field(String),
|
||||
/// Sort by score
|
||||
Score,
|
||||
}
|
||||
|
||||
impl SortByErasedType {
|
||||
/// Creates a new sort key computer which will sort by the given fast field column, with type
|
||||
/// erasure.
|
||||
pub fn for_field(column_name: impl ToString) -> Self {
|
||||
Self::Field(column_name.to_string())
|
||||
}
|
||||
|
||||
/// Creates a new sort key computer which will sort by score, with type erasure.
|
||||
pub fn for_score() -> Self {
|
||||
Self::Score
|
||||
}
|
||||
}
|
||||
|
||||
trait ErasedSegmentSortKeyComputer: Send + Sync {
|
||||
fn segment_sort_key(&mut self, doc: DocId, score: Score) -> Option<u64>;
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<ComparableDoc<Option<u64>, DocId>>,
|
||||
filter: ValueRange<Option<u64>>,
|
||||
);
|
||||
fn convert_segment_sort_key(&self, sort_key: Option<u64>) -> OwnedValue;
|
||||
}
|
||||
|
||||
struct ErasedSegmentSortKeyComputerWrapper<C, F>
|
||||
where
|
||||
C: SegmentSortKeyComputer<SegmentSortKey = Option<u64>> + Send + Sync,
|
||||
F: Fn(C::SortKey) -> OwnedValue + Send + Sync + 'static,
|
||||
{
|
||||
inner: C,
|
||||
converter: F,
|
||||
buffer: C::Buffer,
|
||||
}
|
||||
|
||||
impl<C, F> ErasedSegmentSortKeyComputer for ErasedSegmentSortKeyComputerWrapper<C, F>
|
||||
where
|
||||
C: SegmentSortKeyComputer<SegmentSortKey = Option<u64>> + Send + Sync,
|
||||
F: Fn(C::SortKey) -> OwnedValue + Send + Sync + 'static,
|
||||
{
|
||||
fn segment_sort_key(&mut self, doc: DocId, score: Score) -> Option<u64> {
|
||||
self.inner.segment_sort_key(doc, score)
|
||||
}
|
||||
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<ComparableDoc<Option<u64>, DocId>>,
|
||||
filter: ValueRange<Option<u64>>,
|
||||
) {
|
||||
self.inner
|
||||
.segment_sort_keys(input_docs, output, &mut self.buffer, filter)
|
||||
}
|
||||
|
||||
fn convert_segment_sort_key(&self, sort_key: Option<u64>) -> OwnedValue {
|
||||
let val = self.inner.convert_segment_sort_key(sort_key);
|
||||
(self.converter)(val)
|
||||
}
|
||||
}
|
||||
|
||||
struct ScoreSegmentSortKeyComputer {
|
||||
segment_computer: SortBySimilarityScoreSegmentComputer,
|
||||
}
|
||||
|
||||
impl ErasedSegmentSortKeyComputer for ScoreSegmentSortKeyComputer {
|
||||
fn segment_sort_key(&mut self, doc: DocId, score: Score) -> Option<u64> {
|
||||
let score_value: f64 = self.segment_computer.segment_sort_key(doc, score).into();
|
||||
Some(score_value.to_u64())
|
||||
}
|
||||
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
_input_docs: &[DocId],
|
||||
_output: &mut Vec<ComparableDoc<Option<u64>, DocId>>,
|
||||
_filter: ValueRange<Option<u64>>,
|
||||
) {
|
||||
unimplemented!("Batch computation not supported for score sorting")
|
||||
}
|
||||
|
||||
fn convert_segment_sort_key(&self, sort_key: Option<u64>) -> OwnedValue {
|
||||
let score_value: u64 = sort_key.expect("This implementation always produces a score.");
|
||||
OwnedValue::F64(f64::from_u64(score_value))
|
||||
}
|
||||
}
|
||||
|
||||
impl SortKeyComputer for SortByErasedType {
|
||||
type SortKey = OwnedValue;
|
||||
type Child = ErasedColumnSegmentSortKeyComputer;
|
||||
type Comparator = NaturalComparator;
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
matches!(self, Self::Score)
|
||||
}
|
||||
|
||||
fn segment_sort_key_computer(
|
||||
&self,
|
||||
segment_reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let inner: Box<dyn ErasedSegmentSortKeyComputer> = match self {
|
||||
Self::Field(column_name) => {
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
// TODO: We currently double-open the column to avoid relying on the implementation
|
||||
// details of `SortByString` or `SortByStaticFastValue`. Once
|
||||
// https://github.com/quickwit-oss/tantivy/issues/2776 is resolved, we should
|
||||
// consider directly constructing the appropriate `SegmentSortKeyComputer` type for
|
||||
// the column that we open here.
|
||||
let (_column, column_type) =
|
||||
fast_fields.u64_lenient(column_name)?.ok_or_else(|| {
|
||||
FastFieldNotAvailableError {
|
||||
field_name: column_name.to_owned(),
|
||||
}
|
||||
})?;
|
||||
|
||||
match column_type {
|
||||
ColumnType::Str => {
|
||||
let computer = SortByString::for_field(column_name);
|
||||
let inner = computer.segment_sort_key_computer(segment_reader)?;
|
||||
Box::new(ErasedSegmentSortKeyComputerWrapper {
|
||||
inner,
|
||||
converter: |val: Option<String>| {
|
||||
val.map(OwnedValue::Str).unwrap_or(OwnedValue::Null)
|
||||
},
|
||||
buffer: Default::default(),
|
||||
})
|
||||
}
|
||||
ColumnType::U64 => {
|
||||
let computer = SortByStaticFastValue::<u64>::for_field(column_name);
|
||||
let inner = computer.segment_sort_key_computer(segment_reader)?;
|
||||
Box::new(ErasedSegmentSortKeyComputerWrapper {
|
||||
inner,
|
||||
converter: |val: Option<u64>| {
|
||||
val.map(OwnedValue::U64).unwrap_or(OwnedValue::Null)
|
||||
},
|
||||
buffer: Default::default(),
|
||||
})
|
||||
}
|
||||
ColumnType::I64 => {
|
||||
let computer = SortByStaticFastValue::<i64>::for_field(column_name);
|
||||
let inner = computer.segment_sort_key_computer(segment_reader)?;
|
||||
Box::new(ErasedSegmentSortKeyComputerWrapper {
|
||||
inner,
|
||||
converter: |val: Option<i64>| {
|
||||
val.map(OwnedValue::I64).unwrap_or(OwnedValue::Null)
|
||||
},
|
||||
buffer: Default::default(),
|
||||
})
|
||||
}
|
||||
ColumnType::F64 => {
|
||||
let computer = SortByStaticFastValue::<f64>::for_field(column_name);
|
||||
let inner = computer.segment_sort_key_computer(segment_reader)?;
|
||||
Box::new(ErasedSegmentSortKeyComputerWrapper {
|
||||
inner,
|
||||
converter: |val: Option<f64>| {
|
||||
val.map(OwnedValue::F64).unwrap_or(OwnedValue::Null)
|
||||
},
|
||||
buffer: Default::default(),
|
||||
})
|
||||
}
|
||||
ColumnType::Bool => {
|
||||
let computer = SortByStaticFastValue::<bool>::for_field(column_name);
|
||||
let inner = computer.segment_sort_key_computer(segment_reader)?;
|
||||
Box::new(ErasedSegmentSortKeyComputerWrapper {
|
||||
inner,
|
||||
converter: |val: Option<bool>| {
|
||||
val.map(OwnedValue::Bool).unwrap_or(OwnedValue::Null)
|
||||
},
|
||||
buffer: Default::default(),
|
||||
})
|
||||
}
|
||||
ColumnType::DateTime => {
|
||||
let computer = SortByStaticFastValue::<DateTime>::for_field(column_name);
|
||||
let inner = computer.segment_sort_key_computer(segment_reader)?;
|
||||
Box::new(ErasedSegmentSortKeyComputerWrapper {
|
||||
inner,
|
||||
converter: |val: Option<DateTime>| {
|
||||
val.map(OwnedValue::Date).unwrap_or(OwnedValue::Null)
|
||||
},
|
||||
buffer: Default::default(),
|
||||
})
|
||||
}
|
||||
column_type => {
|
||||
return Err(crate::TantivyError::SchemaError(format!(
|
||||
"Field `{}` is of type {column_type:?}, which is not supported for \
|
||||
sorting by owned value yet.",
|
||||
column_name
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
Self::Score => Box::new(ScoreSegmentSortKeyComputer {
|
||||
segment_computer: SortBySimilarityScore
|
||||
.segment_sort_key_computer(segment_reader)?,
|
||||
}),
|
||||
};
|
||||
Ok(ErasedColumnSegmentSortKeyComputer { inner })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ErasedColumnSegmentSortKeyComputer {
|
||||
inner: Box<dyn ErasedSegmentSortKeyComputer>,
|
||||
}
|
||||
|
||||
impl SegmentSortKeyComputer for ErasedColumnSegmentSortKeyComputer {
|
||||
type SortKey = OwnedValue;
|
||||
type SegmentSortKey = Option<u64>;
|
||||
type SegmentComparator = NaturalComparator;
|
||||
type Buffer = ();
|
||||
|
||||
#[inline(always)]
|
||||
fn segment_sort_key(&mut self, doc: DocId, score: Score) -> Option<u64> {
|
||||
self.inner.segment_sort_key(doc, score)
|
||||
}
|
||||
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<ComparableDoc<Self::SegmentSortKey, DocId>>,
|
||||
_buffer: &mut Self::Buffer,
|
||||
filter: ValueRange<Self::SegmentSortKey>,
|
||||
) {
|
||||
self.inner.segment_sort_keys(input_docs, output, filter)
|
||||
}
|
||||
|
||||
fn convert_segment_sort_key(&self, segment_sort_key: Self::SegmentSortKey) -> OwnedValue {
|
||||
self.inner.convert_segment_sort_key(segment_sort_key)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::collector::sort_key::{ComparatorEnum, SortByErasedType};
|
||||
use crate::collector::TopDocs;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::{OwnedValue, Schema, FAST, TEXT};
|
||||
use crate::Index;
|
||||
|
||||
#[test]
|
||||
fn test_sort_by_owned_u64() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let id_field = schema_builder.add_u64_field("id", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.add_document(doc!(id_field => 10u64)).unwrap();
|
||||
writer.add_document(doc!(id_field => 2u64)).unwrap();
|
||||
writer.add_document(doc!()).unwrap();
|
||||
writer.commit().unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let collector = TopDocs::with_limit(10)
|
||||
.order_by((SortByErasedType::for_field("id"), ComparatorEnum::Natural));
|
||||
let top_docs = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let values: Vec<OwnedValue> = top_docs.into_iter().map(|(key, _)| key).collect();
|
||||
|
||||
assert_eq!(
|
||||
values,
|
||||
vec![OwnedValue::U64(10), OwnedValue::U64(2), OwnedValue::Null]
|
||||
);
|
||||
|
||||
let collector = TopDocs::with_limit(10).order_by((
|
||||
SortByErasedType::for_field("id"),
|
||||
ComparatorEnum::ReverseNoneLower,
|
||||
));
|
||||
let top_docs = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let values: Vec<OwnedValue> = top_docs.into_iter().map(|(key, _)| key).collect();
|
||||
|
||||
assert_eq!(
|
||||
values,
|
||||
vec![OwnedValue::U64(2), OwnedValue::U64(10), OwnedValue::Null]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sort_by_owned_string() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let city_field = schema_builder.add_text_field("city", FAST | TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.add_document(doc!(city_field => "tokyo")).unwrap();
|
||||
writer.add_document(doc!(city_field => "austin")).unwrap();
|
||||
writer.add_document(doc!()).unwrap();
|
||||
writer.commit().unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let collector = TopDocs::with_limit(10).order_by((
|
||||
SortByErasedType::for_field("city"),
|
||||
ComparatorEnum::ReverseNoneLower,
|
||||
));
|
||||
let top_docs = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let values: Vec<OwnedValue> = top_docs.into_iter().map(|(key, _)| key).collect();
|
||||
|
||||
assert_eq!(
|
||||
values,
|
||||
vec![
|
||||
OwnedValue::Str("austin".to_string()),
|
||||
OwnedValue::Str("tokyo".to_string()),
|
||||
OwnedValue::Null
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sort_by_owned_reverse() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let id_field = schema_builder.add_u64_field("id", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.add_document(doc!(id_field => 10u64)).unwrap();
|
||||
writer.add_document(doc!(id_field => 2u64)).unwrap();
|
||||
writer.add_document(doc!()).unwrap();
|
||||
writer.commit().unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let collector = TopDocs::with_limit(10)
|
||||
.order_by((SortByErasedType::for_field("id"), ComparatorEnum::Reverse));
|
||||
let top_docs = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let values: Vec<OwnedValue> = top_docs.into_iter().map(|(key, _)| key).collect();
|
||||
|
||||
assert_eq!(
|
||||
values,
|
||||
vec![OwnedValue::Null, OwnedValue::U64(2), OwnedValue::U64(10)]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sort_by_owned_score() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let body_field = schema_builder.add_text_field("body", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.add_document(doc!(body_field => "a a")).unwrap();
|
||||
writer.add_document(doc!(body_field => "a")).unwrap();
|
||||
writer.commit().unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let query_parser = crate::query::QueryParser::for_index(&index, vec![body_field]);
|
||||
let query = query_parser.parse_query("a").unwrap();
|
||||
|
||||
// Sort by score descending (Natural)
|
||||
let collector = TopDocs::with_limit(10)
|
||||
.order_by((SortByErasedType::for_score(), ComparatorEnum::Natural));
|
||||
let top_docs = searcher.search(&query, &collector).unwrap();
|
||||
|
||||
let values: Vec<f64> = top_docs
|
||||
.into_iter()
|
||||
.map(|(key, _)| match key {
|
||||
OwnedValue::F64(val) => val,
|
||||
_ => panic!("Wrong type {key:?}"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(values.len(), 2);
|
||||
assert!(values[0] > values[1]);
|
||||
|
||||
// Sort by score ascending (ReverseNoneLower)
|
||||
let collector = TopDocs::with_limit(10).order_by((
|
||||
SortByErasedType::for_score(),
|
||||
ComparatorEnum::ReverseNoneLower,
|
||||
));
|
||||
let top_docs = searcher.search(&query, &collector).unwrap();
|
||||
|
||||
let values: Vec<f64> = top_docs
|
||||
.into_iter()
|
||||
.map(|(key, _)| match key {
|
||||
OwnedValue::F64(val) => val,
|
||||
_ => panic!("Wrong type {key:?}"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(values.len(), 2);
|
||||
assert!(values[0] < values[1]);
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
use columnar::ValueRange;
|
||||
|
||||
use crate::collector::sort_key::NaturalComparator;
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer, TopNComputer};
|
||||
use crate::collector::{ComparableDoc, SegmentSortKeyComputer, SortKeyComputer, TopNComputer};
|
||||
use crate::{DocAddress, DocId, Score};
|
||||
|
||||
/// Sort by similarity score.
|
||||
@@ -9,7 +11,7 @@ pub struct SortBySimilarityScore;
|
||||
impl SortKeyComputer for SortBySimilarityScore {
|
||||
type SortKey = Score;
|
||||
|
||||
type Child = SortBySimilarityScore;
|
||||
type Child = SortBySimilarityScoreSegmentComputer;
|
||||
|
||||
type Comparator = NaturalComparator;
|
||||
|
||||
@@ -21,7 +23,7 @@ impl SortKeyComputer for SortBySimilarityScore {
|
||||
&self,
|
||||
_segment_reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
Ok(SortBySimilarityScore)
|
||||
Ok(SortBySimilarityScoreSegmentComputer)
|
||||
}
|
||||
|
||||
// Sorting by score is special in that it allows for the Block-Wand optimization.
|
||||
@@ -61,16 +63,29 @@ impl SortKeyComputer for SortBySimilarityScore {
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentSortKeyComputer for SortBySimilarityScore {
|
||||
type SortKey = Score;
|
||||
pub struct SortBySimilarityScoreSegmentComputer;
|
||||
|
||||
impl SegmentSortKeyComputer for SortBySimilarityScoreSegmentComputer {
|
||||
type SortKey = Score;
|
||||
type SegmentSortKey = Score;
|
||||
type SegmentComparator = NaturalComparator;
|
||||
type Buffer = ();
|
||||
|
||||
#[inline(always)]
|
||||
fn segment_sort_key(&mut self, _doc: DocId, score: Score) -> Score {
|
||||
score
|
||||
}
|
||||
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
_input_docs: &[DocId],
|
||||
_output: &mut Vec<ComparableDoc<Self::SegmentSortKey, DocId>>,
|
||||
_buffer: &mut Self::Buffer,
|
||||
_filter: ValueRange<Self::SegmentSortKey>,
|
||||
) {
|
||||
unimplemented!("Batch computation not supported for score sorting")
|
||||
}
|
||||
|
||||
fn convert_segment_sort_key(&self, score: Score) -> Score {
|
||||
score
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use columnar::Column;
|
||||
use columnar::{Column, ValueRange};
|
||||
|
||||
use crate::collector::sort_key::sort_key_computer::convert_optional_u64_range_to_u64_range;
|
||||
use crate::collector::sort_key::NaturalComparator;
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::collector::{ComparableDoc, SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::fastfield::{FastFieldNotAvailableError, FastValue};
|
||||
use crate::{DocId, Score, SegmentReader};
|
||||
|
||||
@@ -34,9 +35,7 @@ impl<T: FastValue> SortByStaticFastValue<T> {
|
||||
|
||||
impl<T: FastValue> SortKeyComputer for SortByStaticFastValue<T> {
|
||||
type Child = SortByFastValueSegmentSortKeyComputer<T>;
|
||||
|
||||
type SortKey = Option<T>;
|
||||
|
||||
type Comparator = NaturalComparator;
|
||||
|
||||
fn check_schema(&self, schema: &crate::schema::Schema) -> crate::Result<()> {
|
||||
@@ -84,15 +83,112 @@ pub struct SortByFastValueSegmentSortKeyComputer<T> {
|
||||
|
||||
impl<T: FastValue> SegmentSortKeyComputer for SortByFastValueSegmentSortKeyComputer<T> {
|
||||
type SortKey = Option<T>;
|
||||
|
||||
type SegmentSortKey = Option<u64>;
|
||||
type SegmentComparator = NaturalComparator;
|
||||
type Buffer = ();
|
||||
|
||||
#[inline(always)]
|
||||
fn segment_sort_key(&mut self, doc: DocId, _score: Score) -> Self::SegmentSortKey {
|
||||
self.sort_column.first(doc)
|
||||
}
|
||||
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<ComparableDoc<Self::SegmentSortKey, DocId>>,
|
||||
_buffer: &mut Self::Buffer,
|
||||
filter: ValueRange<Self::SegmentSortKey>,
|
||||
) {
|
||||
let u64_filter = convert_optional_u64_range_to_u64_range(filter);
|
||||
self.sort_column
|
||||
.first_vals_in_value_range(input_docs, output, u64_filter);
|
||||
}
|
||||
|
||||
fn convert_segment_sort_key(&self, sort_key: Self::SegmentSortKey) -> Self::SortKey {
|
||||
sort_key.map(T::from_u64)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::schema::{Schema, FAST};
|
||||
use crate::Index;
|
||||
|
||||
#[test]
|
||||
fn test_sort_by_fast_value_batch() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field_col = schema_builder.add_u64_field("field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer
|
||||
.add_document(crate::doc!(field_col => 10u64))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(crate::doc!(field_col => 20u64))
|
||||
.unwrap();
|
||||
index_writer.add_document(crate::doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let sorter = SortByStaticFastValue::<u64>::for_field("field");
|
||||
let mut computer = sorter.segment_sort_key_computer(segment_reader).unwrap();
|
||||
|
||||
let mut docs = vec![0, 1, 2];
|
||||
let mut output = Vec::new();
|
||||
let mut buffer = ();
|
||||
computer.segment_sort_keys(&mut docs, &mut output, &mut buffer, ValueRange::All);
|
||||
|
||||
assert_eq!(
|
||||
output.iter().map(|c| c.sort_key).collect::<Vec<_>>(),
|
||||
&[Some(10), Some(20), None]
|
||||
);
|
||||
assert_eq!(output.iter().map(|c| c.doc).collect::<Vec<_>>(), &[0, 1, 2]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sort_by_fast_value_batch_with_filter() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field_col = schema_builder.add_u64_field("field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer
|
||||
.add_document(crate::doc!(field_col => 10u64))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(crate::doc!(field_col => 20u64))
|
||||
.unwrap();
|
||||
index_writer.add_document(crate::doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let sorter = SortByStaticFastValue::<u64>::for_field("field");
|
||||
let mut computer = sorter.segment_sort_key_computer(segment_reader).unwrap();
|
||||
|
||||
let mut docs = vec![0, 1, 2];
|
||||
let mut output = Vec::new();
|
||||
let mut buffer = ();
|
||||
computer.segment_sort_keys(
|
||||
&mut docs,
|
||||
&mut output,
|
||||
&mut buffer,
|
||||
ValueRange::GreaterThan(Some(15u64), false /* inclusive */),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
output.iter().map(|c| c.sort_key).collect::<Vec<_>>(),
|
||||
&[Some(20)]
|
||||
);
|
||||
assert_eq!(output.iter().map(|c| c.doc).collect::<Vec<_>>(), &[1]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use columnar::StrColumn;
|
||||
use columnar::{StrColumn, ValueRange};
|
||||
|
||||
use crate::collector::sort_key::sort_key_computer::{
|
||||
convert_optional_u64_range_to_u64_range, range_contains_none,
|
||||
};
|
||||
use crate::collector::sort_key::NaturalComparator;
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::collector::{ComparableDoc, SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::termdict::TermOrdinal;
|
||||
use crate::{DocId, Score};
|
||||
|
||||
@@ -30,9 +33,7 @@ impl SortByString {
|
||||
|
||||
impl SortKeyComputer for SortByString {
|
||||
type SortKey = Option<String>;
|
||||
|
||||
type Child = ByStringColumnSegmentSortKeyComputer;
|
||||
|
||||
type Comparator = NaturalComparator;
|
||||
|
||||
fn segment_sort_key_computer(
|
||||
@@ -50,8 +51,9 @@ pub struct ByStringColumnSegmentSortKeyComputer {
|
||||
|
||||
impl SegmentSortKeyComputer for ByStringColumnSegmentSortKeyComputer {
|
||||
type SortKey = Option<String>;
|
||||
|
||||
type SegmentSortKey = Option<TermOrdinal>;
|
||||
type SegmentComparator = NaturalComparator;
|
||||
type Buffer = ();
|
||||
|
||||
#[inline(always)]
|
||||
fn segment_sort_key(&mut self, doc: DocId, _score: Score) -> Option<TermOrdinal> {
|
||||
@@ -59,7 +61,31 @@ impl SegmentSortKeyComputer for ByStringColumnSegmentSortKeyComputer {
|
||||
str_column.ords().first(doc)
|
||||
}
|
||||
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<ComparableDoc<Self::SegmentSortKey, DocId>>,
|
||||
_buffer: &mut Self::Buffer,
|
||||
filter: ValueRange<Self::SegmentSortKey>,
|
||||
) {
|
||||
if let Some(str_column) = &self.str_column_opt {
|
||||
let u64_filter = convert_optional_u64_range_to_u64_range(filter);
|
||||
str_column
|
||||
.ords()
|
||||
.first_vals_in_value_range(input_docs, output, u64_filter);
|
||||
} else if range_contains_none(&filter) {
|
||||
for &doc in input_docs {
|
||||
output.push(ComparableDoc {
|
||||
doc,
|
||||
sort_key: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_segment_sort_key(&self, term_ord_opt: Option<TermOrdinal>) -> Option<String> {
|
||||
// TODO: Individual lookups to the dictionary like this are very likely to repeatedly
|
||||
// decompress the same blocks. See https://github.com/quickwit-oss/tantivy/issues/2776
|
||||
let term_ord = term_ord_opt?;
|
||||
let str_column = self.str_column_opt.as_ref()?;
|
||||
let mut bytes = Vec::new();
|
||||
@@ -70,3 +96,90 @@ impl SegmentSortKeyComputer for ByStringColumnSegmentSortKeyComputer {
|
||||
String::try_from(bytes).ok()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::schema::{Schema, FAST, TEXT};
|
||||
use crate::Index;
|
||||
|
||||
#[test]
|
||||
fn test_sort_by_string_batch() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field_col = schema_builder.add_text_field("field", FAST | TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer
|
||||
.add_document(crate::doc!(field_col => "a"))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(crate::doc!(field_col => "c"))
|
||||
.unwrap();
|
||||
index_writer.add_document(crate::doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let sorter = SortByString::for_field("field");
|
||||
let mut computer = sorter.segment_sort_key_computer(segment_reader).unwrap();
|
||||
|
||||
let mut docs = vec![0, 1, 2];
|
||||
let mut output = Vec::new();
|
||||
let mut buffer = ();
|
||||
computer.segment_sort_keys(&mut docs, &mut output, &mut buffer, ValueRange::All);
|
||||
|
||||
assert_eq!(
|
||||
output.iter().map(|c| c.sort_key).collect::<Vec<_>>(),
|
||||
&[Some(0), Some(1), None]
|
||||
);
|
||||
assert_eq!(output.iter().map(|c| c.doc).collect::<Vec<_>>(), &[0, 1, 2]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sort_by_string_batch_with_filter() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field_col = schema_builder.add_text_field("field", FAST | TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer
|
||||
.add_document(crate::doc!(field_col => "a"))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(crate::doc!(field_col => "c"))
|
||||
.unwrap();
|
||||
index_writer.add_document(crate::doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let sorter = SortByString::for_field("field");
|
||||
let mut computer = sorter.segment_sort_key_computer(segment_reader).unwrap();
|
||||
|
||||
let mut docs = vec![0, 1, 2];
|
||||
let mut output = Vec::new();
|
||||
// Filter: > "b". "a" is 0, "c" is 1.
|
||||
// We want > "a" (ord 0). So we filter > ord 0.
|
||||
// 0 is "a", 1 is "c".
|
||||
let mut buffer = ();
|
||||
computer.segment_sort_keys(
|
||||
&mut docs,
|
||||
&mut output,
|
||||
&mut buffer,
|
||||
ValueRange::GreaterThan(Some(0), false /* inclusive */),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
output.iter().map(|c| c.sort_key).collect::<Vec<_>>(),
|
||||
&[Some(1)]
|
||||
);
|
||||
assert_eq!(output.iter().map(|c| c.doc).collect::<Vec<_>>(), &[1]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use columnar::ValueRange;
|
||||
|
||||
use crate::collector::sort_key::{Comparator, NaturalComparator};
|
||||
use crate::collector::sort_key_top_collector::TopBySortKeySegmentCollector;
|
||||
use crate::collector::{default_collect_segment_impl, SegmentCollector as _, TopNComputer};
|
||||
use crate::collector::{
|
||||
default_collect_segment_impl, ComparableDoc, SegmentCollector as _, TopNComputer,
|
||||
};
|
||||
use crate::schema::Schema;
|
||||
use crate::{DocAddress, DocId, Result, Score, SegmentReader};
|
||||
|
||||
@@ -12,17 +16,40 @@ use crate::{DocAddress, DocId, Result, Score, SegmentReader};
|
||||
/// It is the segment local version of the [`SortKeyComputer`].
|
||||
pub trait SegmentSortKeyComputer: 'static {
|
||||
/// The final score being emitted.
|
||||
type SortKey: 'static + PartialOrd + Send + Sync + Clone;
|
||||
type SortKey: 'static + Send + Sync + Clone;
|
||||
|
||||
/// Sort key used by at the segment level by the `SegmentSortKeyComputer`.
|
||||
///
|
||||
/// It is typically small like a `u64`, and is meant to be converted
|
||||
/// to the final score at the end of the collection of the segment.
|
||||
type SegmentSortKey: 'static + PartialOrd + Clone + Send + Sync + Clone;
|
||||
type SegmentSortKey: 'static + Clone + Send + Sync + Clone;
|
||||
|
||||
/// Comparator type.
|
||||
type SegmentComparator: Comparator<Self::SegmentSortKey> + Clone + 'static;
|
||||
|
||||
/// Buffer type used for scratch space.
|
||||
type Buffer: Default + Send + Sync + 'static;
|
||||
|
||||
/// Returns the segment sort key comparator.
|
||||
fn segment_comparator(&self) -> Self::SegmentComparator {
|
||||
Self::SegmentComparator::default()
|
||||
}
|
||||
|
||||
/// Computes the sort key for the given document and score.
|
||||
fn segment_sort_key(&mut self, doc: DocId, score: Score) -> Self::SegmentSortKey;
|
||||
|
||||
/// Computes the sort keys for a batch of documents.
|
||||
///
|
||||
/// The computed sort keys and document IDs are pushed into the `output` vector.
|
||||
/// The `buffer` is used for scratch space.
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<ComparableDoc<Self::SegmentSortKey, DocId>>,
|
||||
buffer: &mut Self::Buffer,
|
||||
filter: ValueRange<Self::SegmentSortKey>,
|
||||
);
|
||||
|
||||
/// Computes the sort key and pushes the document in a TopN Computer.
|
||||
///
|
||||
/// When using a tuple as the sorting key, the sort key is evaluated in a lazy manner.
|
||||
@@ -31,12 +58,32 @@ pub trait SegmentSortKeyComputer: 'static {
|
||||
&mut self,
|
||||
doc: DocId,
|
||||
score: Score,
|
||||
top_n_computer: &mut TopNComputer<Self::SegmentSortKey, DocId, C>,
|
||||
top_n_computer: &mut TopNComputer<Self::SegmentSortKey, DocId, C, Self::Buffer>,
|
||||
) {
|
||||
let sort_key = self.segment_sort_key(doc, score);
|
||||
top_n_computer.push(sort_key, doc);
|
||||
}
|
||||
|
||||
fn compute_sort_keys_and_collect<C: Comparator<Self::SegmentSortKey>>(
|
||||
&mut self,
|
||||
docs: &[DocId],
|
||||
top_n_computer: &mut TopNComputer<Self::SegmentSortKey, DocId, C, Self::Buffer>,
|
||||
) {
|
||||
// The capacity of a TopNComputer is larger than 2*n + COLLECT_BLOCK_BUFFER_LEN, so we
|
||||
// should always be able to `reserve` space for the entire block.
|
||||
top_n_computer.reserve(docs.len());
|
||||
|
||||
let comparator = self.segment_comparator();
|
||||
let value_range = if let Some(threshold) = &top_n_computer.threshold {
|
||||
comparator.threshold_to_valuerange(threshold.clone())
|
||||
} else {
|
||||
ValueRange::All
|
||||
};
|
||||
|
||||
let (buffer, scratch) = top_n_computer.buffer_and_scratch();
|
||||
self.segment_sort_keys(docs, buffer, scratch, value_range);
|
||||
}
|
||||
|
||||
/// A SegmentSortKeyComputer maps to a SegmentSortKey, but it can also decide on
|
||||
/// its ordering.
|
||||
///
|
||||
@@ -47,27 +94,7 @@ pub trait SegmentSortKeyComputer: 'static {
|
||||
left: &Self::SegmentSortKey,
|
||||
right: &Self::SegmentSortKey,
|
||||
) -> Ordering {
|
||||
NaturalComparator.compare(left, right)
|
||||
}
|
||||
|
||||
/// Implementing this method makes it possible to avoid computing
|
||||
/// a sort_key entirely if we can assess that it won't pass a threshold
|
||||
/// with a partial computation.
|
||||
///
|
||||
/// This is currently used for lexicographic sorting.
|
||||
fn accept_sort_key_lazy(
|
||||
&mut self,
|
||||
doc_id: DocId,
|
||||
score: Score,
|
||||
threshold: &Self::SegmentSortKey,
|
||||
) -> Option<(Ordering, Self::SegmentSortKey)> {
|
||||
let sort_key = self.segment_sort_key(doc_id, score);
|
||||
let cmp = self.compare_segment_sort_key(&sort_key, threshold);
|
||||
if cmp == Ordering::Less {
|
||||
None
|
||||
} else {
|
||||
Some((cmp, sort_key))
|
||||
}
|
||||
self.segment_comparator().compare(left, right)
|
||||
}
|
||||
|
||||
/// Convert a segment level sort key into the global sort key.
|
||||
@@ -81,7 +108,7 @@ pub trait SegmentSortKeyComputer: 'static {
|
||||
/// the sort key at a segment scale.
|
||||
pub trait SortKeyComputer: Sync {
|
||||
/// The sort key type.
|
||||
type SortKey: 'static + Send + Sync + PartialOrd + Clone + std::fmt::Debug;
|
||||
type SortKey: 'static + Send + Sync + Clone + std::fmt::Debug;
|
||||
/// Type of the associated [`SegmentSortKeyComputer`].
|
||||
type Child: SegmentSortKeyComputer<SortKey = Self::SortKey>;
|
||||
/// Comparator type.
|
||||
@@ -136,11 +163,9 @@ where
|
||||
HeadSortKeyComputer: SortKeyComputer,
|
||||
TailSortKeyComputer: SortKeyComputer,
|
||||
{
|
||||
type SortKey = (
|
||||
<HeadSortKeyComputer::Child as SegmentSortKeyComputer>::SortKey,
|
||||
<TailSortKeyComputer::Child as SegmentSortKeyComputer>::SortKey,
|
||||
);
|
||||
type Child = (HeadSortKeyComputer::Child, TailSortKeyComputer::Child);
|
||||
type SortKey = (HeadSortKeyComputer::SortKey, TailSortKeyComputer::SortKey);
|
||||
type Child =
|
||||
ChainSegmentSortKeyComputer<HeadSortKeyComputer::Child, TailSortKeyComputer::Child>;
|
||||
|
||||
type Comparator = (
|
||||
HeadSortKeyComputer::Comparator,
|
||||
@@ -152,10 +177,10 @@ where
|
||||
}
|
||||
|
||||
fn segment_sort_key_computer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
|
||||
Ok((
|
||||
self.0.segment_sort_key_computer(segment_reader)?,
|
||||
self.1.segment_sort_key_computer(segment_reader)?,
|
||||
))
|
||||
Ok(ChainSegmentSortKeyComputer {
|
||||
head: self.0.segment_sort_key_computer(segment_reader)?,
|
||||
tail: self.1.segment_sort_key_computer(segment_reader)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Checks whether the schema is compatible with the sort key computer.
|
||||
@@ -173,20 +198,91 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<HeadSegmentSortKeyComputer, TailSegmentSortKeyComputer> SegmentSortKeyComputer
|
||||
for (HeadSegmentSortKeyComputer, TailSegmentSortKeyComputer)
|
||||
pub struct ChainSegmentSortKeyComputer<Head, Tail>
|
||||
where
|
||||
HeadSegmentSortKeyComputer: SegmentSortKeyComputer,
|
||||
TailSegmentSortKeyComputer: SegmentSortKeyComputer,
|
||||
Head: SegmentSortKeyComputer,
|
||||
Tail: SegmentSortKeyComputer,
|
||||
{
|
||||
type SortKey = (
|
||||
HeadSegmentSortKeyComputer::SortKey,
|
||||
TailSegmentSortKeyComputer::SortKey,
|
||||
);
|
||||
type SegmentSortKey = (
|
||||
HeadSegmentSortKeyComputer::SegmentSortKey,
|
||||
TailSegmentSortKeyComputer::SegmentSortKey,
|
||||
);
|
||||
head: Head,
|
||||
tail: Tail,
|
||||
}
|
||||
|
||||
pub struct ChainBuffer<HeadBuffer, TailBuffer, HeadKey, TailKey> {
|
||||
pub head: HeadBuffer,
|
||||
pub tail: TailBuffer,
|
||||
pub head_output: Vec<ComparableDoc<HeadKey, DocId>>,
|
||||
pub tail_output: Vec<ComparableDoc<TailKey, DocId>>,
|
||||
pub tail_input_docs: Vec<DocId>,
|
||||
}
|
||||
|
||||
impl<HeadBuffer: Default, TailBuffer: Default, HeadKey, TailKey> Default
|
||||
for ChainBuffer<HeadBuffer, TailBuffer, HeadKey, TailKey>
|
||||
{
|
||||
fn default() -> Self {
|
||||
ChainBuffer {
|
||||
head: HeadBuffer::default(),
|
||||
tail: TailBuffer::default(),
|
||||
head_output: Vec::new(),
|
||||
tail_output: Vec::new(),
|
||||
tail_input_docs: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Head, Tail> ChainSegmentSortKeyComputer<Head, Tail>
|
||||
where
|
||||
Head: SegmentSortKeyComputer,
|
||||
Tail: SegmentSortKeyComputer,
|
||||
{
|
||||
fn accept_sort_key_lazy(
|
||||
&mut self,
|
||||
doc_id: DocId,
|
||||
score: Score,
|
||||
threshold: &<Self as SegmentSortKeyComputer>::SegmentSortKey,
|
||||
) -> Option<(Ordering, <Self as SegmentSortKeyComputer>::SegmentSortKey)> {
|
||||
let (head_threshold, tail_threshold) = threshold;
|
||||
let head_sort_key = self.head.segment_sort_key(doc_id, score);
|
||||
let head_cmp = self
|
||||
.head
|
||||
.compare_segment_sort_key(&head_sort_key, head_threshold);
|
||||
if head_cmp == Ordering::Less {
|
||||
None
|
||||
} else if head_cmp == Ordering::Equal {
|
||||
let tail_sort_key = self.tail.segment_sort_key(doc_id, score);
|
||||
let tail_cmp = self
|
||||
.tail
|
||||
.compare_segment_sort_key(&tail_sort_key, tail_threshold);
|
||||
if tail_cmp == Ordering::Less {
|
||||
None
|
||||
} else {
|
||||
Some((tail_cmp, (head_sort_key, tail_sort_key)))
|
||||
}
|
||||
} else {
|
||||
let tail_sort_key = self.tail.segment_sort_key(doc_id, score);
|
||||
Some((head_cmp, (head_sort_key, tail_sort_key)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Head, Tail> SegmentSortKeyComputer for ChainSegmentSortKeyComputer<Head, Tail>
|
||||
where
|
||||
Head: SegmentSortKeyComputer,
|
||||
Tail: SegmentSortKeyComputer,
|
||||
{
|
||||
type SortKey = (Head::SortKey, Tail::SortKey);
|
||||
type SegmentSortKey = (Head::SegmentSortKey, Tail::SegmentSortKey);
|
||||
|
||||
type SegmentComparator = (Head::SegmentComparator, Tail::SegmentComparator);
|
||||
|
||||
type Buffer =
|
||||
ChainBuffer<Head::Buffer, Tail::Buffer, Head::SegmentSortKey, Tail::SegmentSortKey>;
|
||||
|
||||
fn segment_comparator(&self) -> Self::SegmentComparator {
|
||||
(
|
||||
self.head.segment_comparator(),
|
||||
self.tail.segment_comparator(),
|
||||
)
|
||||
}
|
||||
|
||||
/// A SegmentSortKeyComputer maps to a SegmentSortKey, but it can also decide on
|
||||
/// its ordering.
|
||||
@@ -198,9 +294,90 @@ where
|
||||
left: &Self::SegmentSortKey,
|
||||
right: &Self::SegmentSortKey,
|
||||
) -> Ordering {
|
||||
self.0
|
||||
self.head
|
||||
.compare_segment_sort_key(&left.0, &right.0)
|
||||
.then_with(|| self.1.compare_segment_sort_key(&left.1, &right.1))
|
||||
.then_with(|| self.tail.compare_segment_sort_key(&left.1, &right.1))
|
||||
}
|
||||
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<ComparableDoc<Self::SegmentSortKey, DocId>>,
|
||||
buffer: &mut Self::Buffer,
|
||||
filter: ValueRange<Self::SegmentSortKey>,
|
||||
) {
|
||||
let (head_filter, threshold) = match filter {
|
||||
ValueRange::GreaterThan((head_threshold, tail_threshold), _)
|
||||
| ValueRange::LessThan((head_threshold, tail_threshold), _) => {
|
||||
let head_cmp = self.head.segment_comparator();
|
||||
let strict_head_filter = head_cmp.threshold_to_valuerange(head_threshold.clone());
|
||||
let head_filter = match strict_head_filter {
|
||||
ValueRange::GreaterThan(t, m) => ValueRange::GreaterThanOrEqual(t, m),
|
||||
ValueRange::LessThan(t, m) => ValueRange::LessThanOrEqual(t, m),
|
||||
other => other,
|
||||
};
|
||||
(head_filter, Some((head_threshold, tail_threshold)))
|
||||
}
|
||||
_ => (ValueRange::All, None),
|
||||
};
|
||||
|
||||
buffer.head_output.clear();
|
||||
self.head.segment_sort_keys(
|
||||
input_docs,
|
||||
&mut buffer.head_output,
|
||||
&mut buffer.head,
|
||||
head_filter,
|
||||
);
|
||||
|
||||
if buffer.head_output.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
buffer.tail_output.clear();
|
||||
buffer.tail_input_docs.clear();
|
||||
for cd in &buffer.head_output {
|
||||
buffer.tail_input_docs.push(cd.doc);
|
||||
}
|
||||
|
||||
self.tail.segment_sort_keys(
|
||||
&buffer.tail_input_docs,
|
||||
&mut buffer.tail_output,
|
||||
&mut buffer.tail,
|
||||
ValueRange::All,
|
||||
);
|
||||
|
||||
let head_cmp = self.head.segment_comparator();
|
||||
let tail_cmp = self.tail.segment_comparator();
|
||||
|
||||
for (head_doc, tail_doc) in buffer
|
||||
.head_output
|
||||
.drain(..)
|
||||
.zip(buffer.tail_output.drain(..))
|
||||
{
|
||||
debug_assert_eq!(head_doc.doc, tail_doc.doc);
|
||||
let doc = head_doc.doc;
|
||||
let head_key = head_doc.sort_key;
|
||||
let tail_key = tail_doc.sort_key;
|
||||
|
||||
let accept = if let Some((head_threshold, tail_threshold)) = &threshold {
|
||||
let head_ord = head_cmp.compare(&head_key, head_threshold);
|
||||
let ord = if head_ord == Ordering::Equal {
|
||||
tail_cmp.compare(&tail_key, tail_threshold)
|
||||
} else {
|
||||
head_ord
|
||||
};
|
||||
ord == Ordering::Greater
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
if accept {
|
||||
output.push(ComparableDoc {
|
||||
sort_key: (head_key, tail_key),
|
||||
doc,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@@ -208,7 +385,7 @@ where
|
||||
&mut self,
|
||||
doc: DocId,
|
||||
score: Score,
|
||||
top_n_computer: &mut TopNComputer<Self::SegmentSortKey, DocId, C>,
|
||||
top_n_computer: &mut TopNComputer<Self::SegmentSortKey, DocId, C, Self::Buffer>,
|
||||
) {
|
||||
let sort_key: Self::SegmentSortKey;
|
||||
if let Some(threshold) = &top_n_computer.threshold {
|
||||
@@ -225,68 +402,56 @@ where
|
||||
|
||||
#[inline(always)]
|
||||
fn segment_sort_key(&mut self, doc: DocId, score: Score) -> Self::SegmentSortKey {
|
||||
let head_sort_key = self.0.segment_sort_key(doc, score);
|
||||
let tail_sort_key = self.1.segment_sort_key(doc, score);
|
||||
let head_sort_key = self.head.segment_sort_key(doc, score);
|
||||
let tail_sort_key = self.tail.segment_sort_key(doc, score);
|
||||
(head_sort_key, tail_sort_key)
|
||||
}
|
||||
|
||||
fn accept_sort_key_lazy(
|
||||
&mut self,
|
||||
doc_id: DocId,
|
||||
score: Score,
|
||||
threshold: &Self::SegmentSortKey,
|
||||
) -> Option<(Ordering, Self::SegmentSortKey)> {
|
||||
let (head_threshold, tail_threshold) = threshold;
|
||||
let (head_cmp, head_sort_key) =
|
||||
self.0.accept_sort_key_lazy(doc_id, score, head_threshold)?;
|
||||
if head_cmp == Ordering::Equal {
|
||||
let (tail_cmp, tail_sort_key) =
|
||||
self.1.accept_sort_key_lazy(doc_id, score, tail_threshold)?;
|
||||
Some((tail_cmp, (head_sort_key, tail_sort_key)))
|
||||
} else {
|
||||
let tail_sort_key = self.1.segment_sort_key(doc_id, score);
|
||||
Some((head_cmp, (head_sort_key, tail_sort_key)))
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_segment_sort_key(&self, sort_key: Self::SegmentSortKey) -> Self::SortKey {
|
||||
let (head_sort_key, tail_sort_key) = sort_key;
|
||||
(
|
||||
self.0.convert_segment_sort_key(head_sort_key),
|
||||
self.1.convert_segment_sort_key(tail_sort_key),
|
||||
self.head.convert_segment_sort_key(head_sort_key),
|
||||
self.tail.convert_segment_sort_key(tail_sort_key),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// This struct is used as an adapter to take a sort key computer and map its score to another
|
||||
/// new sort key.
|
||||
pub struct MappedSegmentSortKeyComputer<T, PreviousSortKey, NewSortKey> {
|
||||
pub struct MappedSegmentSortKeyComputer<T: SegmentSortKeyComputer, NewSortKey> {
|
||||
sort_key_computer: T,
|
||||
map: fn(PreviousSortKey) -> NewSortKey,
|
||||
map: fn(T::SortKey) -> NewSortKey,
|
||||
}
|
||||
|
||||
impl<T, PreviousScore, NewScore> SegmentSortKeyComputer
|
||||
for MappedSegmentSortKeyComputer<T, PreviousScore, NewScore>
|
||||
for MappedSegmentSortKeyComputer<T, NewScore>
|
||||
where
|
||||
T: SegmentSortKeyComputer<SortKey = PreviousScore>,
|
||||
PreviousScore: 'static + Clone + Send + Sync + PartialOrd,
|
||||
NewScore: 'static + Clone + Send + Sync + PartialOrd,
|
||||
PreviousScore: 'static + Clone + Send + Sync,
|
||||
NewScore: 'static + Clone + Send + Sync,
|
||||
{
|
||||
type SortKey = NewScore;
|
||||
type SegmentSortKey = T::SegmentSortKey;
|
||||
type SegmentComparator = T::SegmentComparator;
|
||||
type Buffer = T::Buffer;
|
||||
|
||||
fn segment_comparator(&self) -> Self::SegmentComparator {
|
||||
self.sort_key_computer.segment_comparator()
|
||||
}
|
||||
|
||||
fn segment_sort_key(&mut self, doc: DocId, score: Score) -> Self::SegmentSortKey {
|
||||
self.sort_key_computer.segment_sort_key(doc, score)
|
||||
}
|
||||
|
||||
fn accept_sort_key_lazy(
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
doc_id: DocId,
|
||||
score: Score,
|
||||
threshold: &Self::SegmentSortKey,
|
||||
) -> Option<(Ordering, Self::SegmentSortKey)> {
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<ComparableDoc<Self::SegmentSortKey, DocId>>,
|
||||
buffer: &mut Self::Buffer,
|
||||
filter: ValueRange<Self::SegmentSortKey>,
|
||||
) {
|
||||
self.sort_key_computer
|
||||
.accept_sort_key_lazy(doc_id, score, threshold)
|
||||
.segment_sort_keys(input_docs, output, buffer, filter)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@@ -294,12 +459,21 @@ where
|
||||
&mut self,
|
||||
doc: DocId,
|
||||
score: Score,
|
||||
top_n_computer: &mut TopNComputer<Self::SegmentSortKey, DocId, C>,
|
||||
top_n_computer: &mut TopNComputer<Self::SegmentSortKey, DocId, C, Self::Buffer>,
|
||||
) {
|
||||
self.sort_key_computer
|
||||
.compute_sort_key_and_collect(doc, score, top_n_computer);
|
||||
}
|
||||
|
||||
fn compute_sort_keys_and_collect<C: Comparator<Self::SegmentSortKey>>(
|
||||
&mut self,
|
||||
docs: &[DocId],
|
||||
top_n_computer: &mut TopNComputer<Self::SegmentSortKey, DocId, C, Self::Buffer>,
|
||||
) {
|
||||
self.sort_key_computer
|
||||
.compute_sort_keys_and_collect(docs, top_n_computer);
|
||||
}
|
||||
|
||||
fn convert_segment_sort_key(&self, segment_sort_key: Self::SegmentSortKey) -> Self::SortKey {
|
||||
(self.map)(
|
||||
self.sort_key_computer
|
||||
@@ -325,10 +499,6 @@ where
|
||||
);
|
||||
type Child = MappedSegmentSortKeyComputer<
|
||||
<(SortKeyComputer1, (SortKeyComputer2, SortKeyComputer3)) as SortKeyComputer>::Child,
|
||||
(
|
||||
SortKeyComputer1::SortKey,
|
||||
(SortKeyComputer2::SortKey, SortKeyComputer3::SortKey),
|
||||
),
|
||||
Self::SortKey,
|
||||
>;
|
||||
|
||||
@@ -352,7 +522,13 @@ where
|
||||
let sort_key_computer3 = self.2.segment_sort_key_computer(segment_reader)?;
|
||||
let map = |(sort_key1, (sort_key2, sort_key3))| (sort_key1, sort_key2, sort_key3);
|
||||
Ok(MappedSegmentSortKeyComputer {
|
||||
sort_key_computer: (sort_key_computer1, (sort_key_computer2, sort_key_computer3)),
|
||||
sort_key_computer: ChainSegmentSortKeyComputer {
|
||||
head: sort_key_computer1,
|
||||
tail: ChainSegmentSortKeyComputer {
|
||||
head: sort_key_computer2,
|
||||
tail: sort_key_computer3,
|
||||
},
|
||||
},
|
||||
map,
|
||||
})
|
||||
}
|
||||
@@ -387,13 +563,6 @@ where
|
||||
SortKeyComputer1,
|
||||
(SortKeyComputer2, (SortKeyComputer3, SortKeyComputer4)),
|
||||
) as SortKeyComputer>::Child,
|
||||
(
|
||||
SortKeyComputer1::SortKey,
|
||||
(
|
||||
SortKeyComputer2::SortKey,
|
||||
(SortKeyComputer3::SortKey, SortKeyComputer4::SortKey),
|
||||
),
|
||||
),
|
||||
Self::SortKey,
|
||||
>;
|
||||
type SortKey = (
|
||||
@@ -415,10 +584,16 @@ where
|
||||
let sort_key_computer3 = self.2.segment_sort_key_computer(segment_reader)?;
|
||||
let sort_key_computer4 = self.3.segment_sort_key_computer(segment_reader)?;
|
||||
Ok(MappedSegmentSortKeyComputer {
|
||||
sort_key_computer: (
|
||||
sort_key_computer1,
|
||||
(sort_key_computer2, (sort_key_computer3, sort_key_computer4)),
|
||||
),
|
||||
sort_key_computer: ChainSegmentSortKeyComputer {
|
||||
head: sort_key_computer1,
|
||||
tail: ChainSegmentSortKeyComputer {
|
||||
head: sort_key_computer2,
|
||||
tail: ChainSegmentSortKeyComputer {
|
||||
head: sort_key_computer3,
|
||||
tail: sort_key_computer4,
|
||||
},
|
||||
},
|
||||
},
|
||||
map: |(sort_key1, (sort_key2, (sort_key3, sort_key4)))| {
|
||||
(sort_key1, sort_key2, sort_key3, sort_key4)
|
||||
},
|
||||
@@ -441,6 +616,13 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
pub struct FuncSegmentSortKeyComputer<F, TSortKey> {
|
||||
func: F,
|
||||
_phantom: PhantomData<TSortKey>,
|
||||
}
|
||||
|
||||
impl<F, SegmentF, TSortKey> SortKeyComputer for F
|
||||
where
|
||||
F: 'static + Send + Sync + Fn(&SegmentReader) -> SegmentF,
|
||||
@@ -448,24 +630,44 @@ where
|
||||
TSortKey: 'static + PartialOrd + Clone + Send + Sync + std::fmt::Debug,
|
||||
{
|
||||
type SortKey = TSortKey;
|
||||
type Child = SegmentF;
|
||||
type Child = FuncSegmentSortKeyComputer<SegmentF, TSortKey>;
|
||||
type Comparator = NaturalComparator;
|
||||
|
||||
fn segment_sort_key_computer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
|
||||
Ok((self)(segment_reader))
|
||||
Ok(FuncSegmentSortKeyComputer {
|
||||
func: (self)(segment_reader),
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, TSortKey> SegmentSortKeyComputer for F
|
||||
impl<F, TSortKey> SegmentSortKeyComputer for FuncSegmentSortKeyComputer<F, TSortKey>
|
||||
where
|
||||
F: 'static + FnMut(DocId) -> TSortKey,
|
||||
TSortKey: 'static + PartialOrd + Clone + Send + Sync,
|
||||
{
|
||||
type SortKey = TSortKey;
|
||||
type SegmentSortKey = TSortKey;
|
||||
type SegmentComparator = NaturalComparator;
|
||||
type Buffer = ();
|
||||
|
||||
fn segment_sort_key(&mut self, doc: DocId, _score: Score) -> TSortKey {
|
||||
(self)(doc)
|
||||
(self.func)(doc)
|
||||
}
|
||||
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
input_docs: &[DocId],
|
||||
output: &mut Vec<ComparableDoc<Self::SegmentSortKey, DocId>>,
|
||||
_buffer: &mut Self::Buffer,
|
||||
_filter: ValueRange<Self::SegmentSortKey>,
|
||||
) {
|
||||
for &doc in input_docs {
|
||||
output.push(ComparableDoc {
|
||||
sort_key: (self.func)(doc),
|
||||
doc,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a segment level score into the global level score.
|
||||
@@ -474,13 +676,75 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn range_contains_none(range: &ValueRange<Option<u64>>) -> bool {
|
||||
match range {
|
||||
ValueRange::All => true,
|
||||
ValueRange::Inclusive(r) => r.contains(&None),
|
||||
ValueRange::GreaterThan(_threshold, match_nulls) => *match_nulls,
|
||||
ValueRange::GreaterThanOrEqual(_threshold, match_nulls) => *match_nulls,
|
||||
ValueRange::LessThan(_threshold, match_nulls) => *match_nulls,
|
||||
ValueRange::LessThanOrEqual(_threshold, match_nulls) => *match_nulls,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn convert_optional_u64_range_to_u64_range(
|
||||
range: ValueRange<Option<u64>>,
|
||||
) -> ValueRange<u64> {
|
||||
match range {
|
||||
ValueRange::Inclusive(r) => {
|
||||
let start = r.start().unwrap_or(0);
|
||||
let end = r.end().unwrap_or(u64::MAX);
|
||||
ValueRange::Inclusive(start..=end)
|
||||
}
|
||||
ValueRange::GreaterThan(Some(val), match_nulls) => {
|
||||
ValueRange::GreaterThan(val, match_nulls)
|
||||
}
|
||||
ValueRange::GreaterThan(None, match_nulls) => {
|
||||
if match_nulls {
|
||||
ValueRange::All
|
||||
} else {
|
||||
ValueRange::Inclusive(u64::MIN..=u64::MAX)
|
||||
}
|
||||
}
|
||||
ValueRange::GreaterThanOrEqual(Some(val), match_nulls) => {
|
||||
ValueRange::GreaterThanOrEqual(val, match_nulls)
|
||||
}
|
||||
ValueRange::GreaterThanOrEqual(None, match_nulls) => {
|
||||
if match_nulls {
|
||||
ValueRange::All
|
||||
} else {
|
||||
ValueRange::Inclusive(u64::MIN..=u64::MAX)
|
||||
}
|
||||
}
|
||||
ValueRange::LessThan(None, match_nulls) => {
|
||||
if match_nulls {
|
||||
ValueRange::LessThan(u64::MIN, true)
|
||||
} else {
|
||||
ValueRange::Inclusive(1..=0)
|
||||
}
|
||||
}
|
||||
ValueRange::LessThan(Some(val), match_nulls) => ValueRange::LessThan(val, match_nulls),
|
||||
ValueRange::LessThanOrEqual(None, match_nulls) => {
|
||||
if match_nulls {
|
||||
ValueRange::LessThan(u64::MIN, true)
|
||||
} else {
|
||||
ValueRange::Inclusive(1..=0)
|
||||
}
|
||||
}
|
||||
ValueRange::LessThanOrEqual(Some(val), match_nulls) => {
|
||||
ValueRange::LessThanOrEqual(val, match_nulls)
|
||||
}
|
||||
ValueRange::All => ValueRange::All,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::cmp::Ordering;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer, TopNComputer};
|
||||
use crate::schema::Schema;
|
||||
use crate::{DocId, Index, Order, SegmentReader};
|
||||
|
||||
@@ -628,4 +892,178 @@ mod tests {
|
||||
(200u32, 2u32)
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
fn test_batch_score_computer_edge_case() {
|
||||
let score_computer_primary = |_segment_reader: &SegmentReader| |_doc: DocId| 200u32;
|
||||
let score_computer_secondary = |_segment_reader: &SegmentReader| |_doc: DocId| "b";
|
||||
let lazy_score_computer = (score_computer_primary, score_computer_secondary);
|
||||
let index = build_test_index();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let mut segment_sort_key_computer = lazy_score_computer
|
||||
.segment_sort_key_computer(searcher.segment_reader(0))
|
||||
.unwrap();
|
||||
|
||||
let mut top_n_computer =
|
||||
TopNComputer::new_with_comparator(10, lazy_score_computer.comparator());
|
||||
// Threshold (200, "a"). Doc is (200, "b"). 200 == 200, "b" > "a". Should be accepted.
|
||||
top_n_computer.threshold = Some((200, "a"));
|
||||
|
||||
let docs = vec![0];
|
||||
segment_sort_key_computer.compute_sort_keys_and_collect(&docs, &mut top_n_computer);
|
||||
|
||||
let results = top_n_computer.into_sorted_vec();
|
||||
assert_eq!(results.len(), 1);
|
||||
let result = &results[0];
|
||||
assert_eq!(result.doc, 0);
|
||||
assert_eq!(result.sort_key, (200, "b"));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod proptest_tests {
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::*;
|
||||
use crate::collector::sort_key::order::*;
|
||||
|
||||
// Re-implement logic to interpret ValueRange<Option<u64>> manually to verify expectations
|
||||
fn range_contains_opt(range: &ValueRange<Option<u64>>, val: &Option<u64>) -> bool {
|
||||
match range {
|
||||
ValueRange::All => true,
|
||||
ValueRange::Inclusive(r) => r.contains(val),
|
||||
ValueRange::GreaterThan(t, match_nulls) => {
|
||||
if val.is_none() {
|
||||
*match_nulls
|
||||
} else {
|
||||
val > t
|
||||
}
|
||||
}
|
||||
ValueRange::GreaterThanOrEqual(t, match_nulls) => {
|
||||
if val.is_none() {
|
||||
*match_nulls
|
||||
} else {
|
||||
val >= t
|
||||
}
|
||||
}
|
||||
ValueRange::LessThan(t, match_nulls) => {
|
||||
if val.is_none() {
|
||||
*match_nulls
|
||||
} else {
|
||||
val < t
|
||||
}
|
||||
}
|
||||
ValueRange::LessThanOrEqual(t, match_nulls) => {
|
||||
if val.is_none() {
|
||||
*match_nulls
|
||||
} else {
|
||||
val <= t
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn range_contains_u64(range: &ValueRange<u64>, val: &u64) -> bool {
|
||||
match range {
|
||||
ValueRange::All => true,
|
||||
ValueRange::Inclusive(r) => r.contains(val),
|
||||
ValueRange::GreaterThan(t, _) => val > t,
|
||||
ValueRange::GreaterThanOrEqual(t, _) => val >= t,
|
||||
ValueRange::LessThan(t, _) => val < t,
|
||||
ValueRange::LessThanOrEqual(t, _) => val <= t,
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_comparator_consistency_natural_none_is_lower(
|
||||
threshold in any::<Option<u64>>(),
|
||||
val in any::<Option<u64>>()
|
||||
) {
|
||||
check_comparator::<NaturalComparator>(threshold, val)?;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_comparator_consistency_reverse(
|
||||
threshold in any::<Option<u64>>(),
|
||||
val in any::<Option<u64>>()
|
||||
) {
|
||||
check_comparator::<ReverseComparator>(threshold, val)?;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_comparator_consistency_reverse_none_is_lower(
|
||||
threshold in any::<Option<u64>>(),
|
||||
val in any::<Option<u64>>()
|
||||
) {
|
||||
check_comparator::<ReverseNoneIsLowerComparator>(threshold, val)?;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_comparator_consistency_natural_none_is_higher(
|
||||
threshold in any::<Option<u64>>(),
|
||||
val in any::<Option<u64>>()
|
||||
) {
|
||||
check_comparator::<NaturalNoneIsHigherComparator>(threshold, val)?;
|
||||
}
|
||||
}
|
||||
|
||||
fn check_comparator<C: Comparator<Option<u64>>>(
|
||||
threshold: Option<u64>,
|
||||
val: Option<u64>,
|
||||
) -> std::result::Result<(), proptest::test_runner::TestCaseError> {
|
||||
let comparator = C::default();
|
||||
let range = comparator.threshold_to_valuerange(threshold);
|
||||
let ordering = comparator.compare(&val, &threshold);
|
||||
let should_be_in_range = ordering == Ordering::Greater;
|
||||
|
||||
let in_range_opt = range_contains_opt(&range, &val);
|
||||
|
||||
prop_assert_eq!(
|
||||
in_range_opt,
|
||||
should_be_in_range,
|
||||
"Comparator consistency failed for {:?}. Threshold: {:?}, Val: {:?}, Range: {:?}, \
|
||||
Ordering: {:?}. range_contains_opt says {}, but compare says {}",
|
||||
std::any::type_name::<C>(),
|
||||
threshold,
|
||||
val,
|
||||
range,
|
||||
ordering,
|
||||
in_range_opt,
|
||||
should_be_in_range
|
||||
);
|
||||
|
||||
// Check range_contains_none
|
||||
let expected_none_in_range = range_contains_opt(&range, &None);
|
||||
let actual_none_in_range = range_contains_none(&range);
|
||||
prop_assert_eq!(
|
||||
actual_none_in_range,
|
||||
expected_none_in_range,
|
||||
"range_contains_none failed for {:?}. Range: {:?}. Expected (from \
|
||||
range_contains_opt): {}, Actual: {}",
|
||||
std::any::type_name::<C>(),
|
||||
range,
|
||||
expected_none_in_range,
|
||||
actual_none_in_range
|
||||
);
|
||||
|
||||
// Check convert_optional_u64_range_to_u64_range
|
||||
let u64_range = convert_optional_u64_range_to_u64_range(range.clone());
|
||||
if let Some(v) = val {
|
||||
let in_u64_range = range_contains_u64(&u64_range, &v);
|
||||
let in_opt_range = range_contains_opt(&range, &Some(v));
|
||||
prop_assert_eq!(
|
||||
in_u64_range,
|
||||
in_opt_range,
|
||||
"convert_optional_u64_range_to_u64_range failed for {:?}. Val: {:?}, OptRange: \
|
||||
{:?}, U64Range: {:?}. Opt says {}, U64 says {}",
|
||||
std::any::type_name::<C>(),
|
||||
v,
|
||||
range,
|
||||
u64_range,
|
||||
in_opt_range,
|
||||
in_u64_range
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,7 +99,12 @@ where
|
||||
TSegmentSortKeyComputer: SegmentSortKeyComputer,
|
||||
C: Comparator<TSegmentSortKeyComputer::SegmentSortKey>,
|
||||
{
|
||||
pub(crate) topn_computer: TopNComputer<TSegmentSortKeyComputer::SegmentSortKey, DocId, C>,
|
||||
pub(crate) topn_computer: TopNComputer<
|
||||
TSegmentSortKeyComputer::SegmentSortKey,
|
||||
DocId,
|
||||
C,
|
||||
TSegmentSortKeyComputer::Buffer,
|
||||
>,
|
||||
pub(crate) segment_ord: u32,
|
||||
pub(crate) segment_sort_key_computer: TSegmentSortKeyComputer,
|
||||
}
|
||||
@@ -120,6 +125,11 @@ where
|
||||
);
|
||||
}
|
||||
|
||||
fn collect_block(&mut self, docs: &[DocId]) {
|
||||
self.segment_sort_key_computer
|
||||
.compute_sort_keys_and_collect(docs, &mut self.topn_computer);
|
||||
}
|
||||
|
||||
fn harvest(self) -> Self::Fruit {
|
||||
let segment_ord = self.segment_ord;
|
||||
let segment_hits: Vec<(TSegmentSortKeyComputer::SortKey, DocAddress)> = self
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::cmp::Ordering;
|
||||
use std::fmt;
|
||||
use std::ops::Range;
|
||||
|
||||
use columnar::ValueRange;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::Collector;
|
||||
@@ -10,8 +11,7 @@ use crate::collector::sort_key::{
|
||||
SortByStaticFastValue, SortByString,
|
||||
};
|
||||
use crate::collector::sort_key_top_collector::TopBySortKeyCollector;
|
||||
use crate::collector::top_collector::ComparableDoc;
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::collector::{ComparableDoc, SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::{DocAddress, DocId, Order, Score, SegmentReader};
|
||||
|
||||
@@ -324,7 +324,7 @@ impl TopDocs {
|
||||
sort_key_computer: impl SortKeyComputer<SortKey = TSortKey> + Send + 'static,
|
||||
) -> impl Collector<Fruit = Vec<(TSortKey, DocAddress)>>
|
||||
where
|
||||
TSortKey: 'static + Clone + Send + Sync + PartialOrd + std::fmt::Debug,
|
||||
TSortKey: 'static + Clone + Send + Sync + std::fmt::Debug,
|
||||
{
|
||||
TopBySortKeyCollector::new(sort_key_computer, self.doc_range())
|
||||
}
|
||||
@@ -445,7 +445,7 @@ where
|
||||
F: 'static + Send + Sync + Fn(&SegmentReader) -> TTweakScoreSortKeyFn,
|
||||
TTweakScoreSortKeyFn: 'static + Fn(DocId, Score) -> TSortKey,
|
||||
TweakScoreSegmentSortKeyComputer<TTweakScoreSortKeyFn>:
|
||||
SegmentSortKeyComputer<SortKey = TSortKey>,
|
||||
SegmentSortKeyComputer<SortKey = TSortKey, SegmentSortKey = TSortKey>,
|
||||
TSortKey: 'static + PartialOrd + Clone + Send + Sync + std::fmt::Debug,
|
||||
{
|
||||
type SortKey = TSortKey;
|
||||
@@ -480,11 +480,23 @@ where
|
||||
{
|
||||
type SortKey = TSortKey;
|
||||
type SegmentSortKey = TSortKey;
|
||||
type SegmentComparator = NaturalComparator;
|
||||
type Buffer = ();
|
||||
|
||||
fn segment_sort_key(&mut self, doc: DocId, score: Score) -> TSortKey {
|
||||
(self.sort_key_fn)(doc, score)
|
||||
}
|
||||
|
||||
fn segment_sort_keys(
|
||||
&mut self,
|
||||
_input_docs: &[DocId],
|
||||
_output: &mut Vec<ComparableDoc<Self::SegmentSortKey, DocId>>,
|
||||
_buffer: &mut Self::Buffer,
|
||||
_filter: ValueRange<Self::SegmentSortKey>,
|
||||
) {
|
||||
unimplemented!("Batch computation is not supported for tweak score.")
|
||||
}
|
||||
|
||||
/// Convert a segment level score into the global level score.
|
||||
fn convert_segment_sort_key(&self, sort_key: Self::SegmentSortKey) -> Self::SortKey {
|
||||
sort_key
|
||||
@@ -508,12 +520,14 @@ where
|
||||
/// the ascending `DocId|DocAddress` tie-breaking behavior without additional comparisons.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(from = "TopNComputerDeser<Score, D, C>")]
|
||||
pub struct TopNComputer<Score, D, C> {
|
||||
pub struct TopNComputer<Score, D, C, Buffer = ()> {
|
||||
/// The buffer reverses sort order to get top-semantics instead of bottom-semantics
|
||||
buffer: Vec<ComparableDoc<Score, D>>,
|
||||
top_n: usize,
|
||||
pub(crate) threshold: Option<Score>,
|
||||
comparator: C,
|
||||
#[serde(skip)]
|
||||
scratch: Buffer,
|
||||
}
|
||||
|
||||
// Intermediate struct for TopNComputer for deserialization, to keep vec capacity
|
||||
@@ -525,7 +539,9 @@ struct TopNComputerDeser<Score, D, C> {
|
||||
comparator: C,
|
||||
}
|
||||
|
||||
impl<Score, D, C> From<TopNComputerDeser<Score, D, C>> for TopNComputer<Score, D, C> {
|
||||
impl<Score, D, C, Buffer> From<TopNComputerDeser<Score, D, C>> for TopNComputer<Score, D, C, Buffer>
|
||||
where Buffer: Default
|
||||
{
|
||||
fn from(mut value: TopNComputerDeser<Score, D, C>) -> Self {
|
||||
let expected_cap = value.top_n.max(1) * 2;
|
||||
let current_cap = value.buffer.capacity();
|
||||
@@ -540,12 +556,15 @@ impl<Score, D, C> From<TopNComputerDeser<Score, D, C>> for TopNComputer<Score, D
|
||||
top_n: value.top_n,
|
||||
threshold: value.threshold,
|
||||
comparator: value.comparator,
|
||||
scratch: Buffer::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Score: std::fmt::Debug, D, C> std::fmt::Debug for TopNComputer<Score, D, C>
|
||||
where C: Comparator<Score>
|
||||
impl<Score: std::fmt::Debug, D, C, Buffer> std::fmt::Debug for TopNComputer<Score, D, C, Buffer>
|
||||
where
|
||||
C: Comparator<Score>,
|
||||
Buffer: std::fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> std::fmt::Result {
|
||||
f.debug_struct("TopNComputer")
|
||||
@@ -553,12 +572,13 @@ where C: Comparator<Score>
|
||||
.field("top_n", &self.top_n)
|
||||
.field("current_threshold", &self.threshold)
|
||||
.field("comparator", &self.comparator)
|
||||
.field("scratch", &self.scratch)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
// Custom clone to keep capacity
|
||||
impl<Score: Clone, D: Clone, C: Clone> Clone for TopNComputer<Score, D, C> {
|
||||
impl<Score: Clone, D: Clone, C: Clone, Buffer: Clone> Clone for TopNComputer<Score, D, C, Buffer> {
|
||||
fn clone(&self) -> Self {
|
||||
let mut buffer_clone = Vec::with_capacity(self.buffer.capacity());
|
||||
buffer_clone.extend(self.buffer.iter().cloned());
|
||||
@@ -567,15 +587,17 @@ impl<Score: Clone, D: Clone, C: Clone> Clone for TopNComputer<Score, D, C> {
|
||||
top_n: self.top_n,
|
||||
threshold: self.threshold.clone(),
|
||||
comparator: self.comparator.clone(),
|
||||
scratch: self.scratch.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSortKey, D> TopNComputer<TSortKey, D, ReverseComparator>
|
||||
impl<TSortKey, D> TopNComputer<TSortKey, D, ReverseComparator, ()>
|
||||
where
|
||||
D: Ord,
|
||||
TSortKey: Clone,
|
||||
NaturalComparator: Comparator<TSortKey>,
|
||||
ReverseComparator: Comparator<TSortKey>,
|
||||
{
|
||||
/// Create a new `TopNComputer`.
|
||||
/// Internally it will allocate a buffer of size `2 * top_n`.
|
||||
@@ -585,7 +607,7 @@ where
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn compare_for_top_k<TSortKey, D: Ord, C: Comparator<TSortKey>>(
|
||||
pub fn compare_for_top_k<TSortKey, D: Ord, C: Comparator<TSortKey>>(
|
||||
c: &C,
|
||||
lhs: &ComparableDoc<TSortKey, D>,
|
||||
rhs: &ComparableDoc<TSortKey, D>,
|
||||
@@ -596,21 +618,26 @@ fn compare_for_top_k<TSortKey, D: Ord, C: Comparator<TSortKey>>(
|
||||
// sort by doc id
|
||||
}
|
||||
|
||||
impl<TSortKey, D, C> TopNComputer<TSortKey, D, C>
|
||||
impl<TSortKey, D, C, Buffer> TopNComputer<TSortKey, D, C, Buffer>
|
||||
where
|
||||
D: Ord,
|
||||
TSortKey: Clone,
|
||||
C: Comparator<TSortKey>,
|
||||
Buffer: Default,
|
||||
{
|
||||
/// Create a new `TopNComputer`.
|
||||
/// Internally it will allocate a buffer of size `2 * top_n`.
|
||||
/// Internally it will allocate a buffer of size `(top_n.max(1) * 2) +
|
||||
/// COLLECT_BLOCK_BUFFER_LEN`.
|
||||
pub fn new_with_comparator(top_n: usize, comparator: C) -> Self {
|
||||
let vec_cap = top_n.max(1) * 2;
|
||||
// We ensure that there is always enough space to include an entire block in the buffer if
|
||||
// need be, so that `push_block_lazy` can avoid checking capacity inside its loop.
|
||||
let vec_cap = (top_n.max(1) * 2) + crate::COLLECT_BLOCK_BUFFER_LEN;
|
||||
TopNComputer {
|
||||
buffer: Vec::with_capacity(vec_cap),
|
||||
top_n,
|
||||
threshold: None,
|
||||
comparator,
|
||||
scratch: Buffer::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -634,16 +661,28 @@ where
|
||||
// At this point, we need to have established that the doc is above the threshold.
|
||||
#[inline(always)]
|
||||
pub(crate) fn append_doc(&mut self, doc: D, sort_key: TSortKey) {
|
||||
if self.buffer.len() == self.buffer.capacity() {
|
||||
let median = self.truncate_top_n();
|
||||
self.threshold = Some(median);
|
||||
}
|
||||
// This cannot panic, because we truncate_median will at least remove one element, since
|
||||
// the min capacity is 2.
|
||||
self.reserve(1);
|
||||
// This cannot panic, because we've reserved room for one element.
|
||||
let comparable_doc = ComparableDoc { doc, sort_key };
|
||||
push_assuming_capacity(comparable_doc, &mut self.buffer);
|
||||
}
|
||||
|
||||
// Ensure that there is capacity to push `additional` more elements without resizing.
|
||||
#[inline(always)]
|
||||
pub(crate) fn reserve(&mut self, additional: usize) {
|
||||
if self.buffer.len() + additional > self.buffer.capacity() {
|
||||
let median = self.truncate_top_n();
|
||||
debug_assert!(self.buffer.len() + additional <= self.buffer.capacity());
|
||||
self.threshold = Some(median);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn buffer_and_scratch(
|
||||
&mut self,
|
||||
) -> (&mut Vec<ComparableDoc<TSortKey, D>>, &mut Buffer) {
|
||||
(&mut self.buffer, &mut self.scratch)
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn truncate_top_n(&mut self) -> TSortKey {
|
||||
// Use select_nth_unstable to find the top nth score
|
||||
@@ -683,7 +722,7 @@ where
|
||||
//
|
||||
// Panics if there is not enough capacity to add an element.
|
||||
#[inline(always)]
|
||||
fn push_assuming_capacity<T>(el: T, buf: &mut Vec<T>) {
|
||||
pub fn push_assuming_capacity<T>(el: T, buf: &mut Vec<T>) {
|
||||
let prev_len = buf.len();
|
||||
assert!(prev_len < buf.capacity());
|
||||
// This is mimicking the current (non-stabilized) implementation in std.
|
||||
@@ -701,8 +740,7 @@ mod tests {
|
||||
|
||||
use super::{TopDocs, TopNComputer};
|
||||
use crate::collector::sort_key::{ComparatorEnum, NaturalComparator, ReverseComparator};
|
||||
use crate::collector::top_collector::ComparableDoc;
|
||||
use crate::collector::{Collector, DocSetCollector};
|
||||
use crate::collector::{Collector, ComparableDoc, DocSetCollector};
|
||||
use crate::query::{AllQuery, Query, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
@@ -1407,11 +1445,11 @@ mod tests {
|
||||
#[test]
|
||||
fn test_top_field_collect_string_prop(
|
||||
order in prop_oneof!(Just(Order::Desc), Just(Order::Asc)),
|
||||
limit in 1..256_usize,
|
||||
offset in 0..256_usize,
|
||||
limit in 1..32_usize,
|
||||
offset in 0..32_usize,
|
||||
segments_terms in
|
||||
proptest::collection::vec(
|
||||
proptest::collection::vec(0..32_u8, 1..32_usize),
|
||||
proptest::collection::vec(0..64_u8, 1..256_usize),
|
||||
0..8_usize,
|
||||
)
|
||||
) {
|
||||
@@ -1732,7 +1770,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_top_n_computer_not_at_capacity() {
|
||||
let mut top_n_computer = TopNComputer::new_with_comparator(4, NaturalComparator);
|
||||
let mut top_n_computer: TopNComputer<f32, u32, _, ()> =
|
||||
TopNComputer::new_with_comparator(4, NaturalComparator);
|
||||
top_n_computer.append_doc(1, 0.8);
|
||||
top_n_computer.append_doc(3, 0.2);
|
||||
top_n_computer.append_doc(5, 0.3);
|
||||
@@ -1757,7 +1796,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_top_n_computer_at_capacity() {
|
||||
let mut top_collector = TopNComputer::new_with_comparator(4, NaturalComparator);
|
||||
let mut top_collector: TopNComputer<f32, u32, _, ()> =
|
||||
TopNComputer::new_with_comparator(4, NaturalComparator);
|
||||
top_collector.append_doc(1, 0.8);
|
||||
top_collector.append_doc(3, 0.2);
|
||||
top_collector.append_doc(5, 0.3);
|
||||
@@ -1794,12 +1834,14 @@ mod tests {
|
||||
let doc_ids_collection = [4, 5, 6];
|
||||
let score = 3.3f32;
|
||||
|
||||
let mut top_collector_limit_2 = TopNComputer::new_with_comparator(2, NaturalComparator);
|
||||
let mut top_collector_limit_2: TopNComputer<f32, u32, _, ()> =
|
||||
TopNComputer::new_with_comparator(2, NaturalComparator);
|
||||
for id in &doc_ids_collection {
|
||||
top_collector_limit_2.append_doc(*id, score);
|
||||
}
|
||||
|
||||
let mut top_collector_limit_3 = TopNComputer::new_with_comparator(3, NaturalComparator);
|
||||
let mut top_collector_limit_3: TopNComputer<f32, u32, _, ()> =
|
||||
TopNComputer::new_with_comparator(3, NaturalComparator);
|
||||
for id in &doc_ids_collection {
|
||||
top_collector_limit_3.append_doc(*id, score);
|
||||
}
|
||||
@@ -1820,15 +1862,16 @@ mod bench {
|
||||
|
||||
#[bench]
|
||||
fn bench_top_segment_collector_collect_at_capacity(b: &mut Bencher) {
|
||||
let mut top_collector = TopNComputer::new_with_comparator(100, NaturalComparator);
|
||||
let mut top_collector: TopNComputer<f32, u32, _, ()> =
|
||||
TopNComputer::new_with_comparator(100, NaturalComparator);
|
||||
|
||||
for i in 0..100 {
|
||||
top_collector.append_doc(i, 0.8);
|
||||
top_collector.append_doc(i as u32, 0.8);
|
||||
}
|
||||
|
||||
b.iter(|| {
|
||||
for i in 0..100 {
|
||||
top_collector.append_doc(i, 0.8);
|
||||
top_collector.append_doc(i as u32, 0.8);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
mod file_watcher;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
@@ -7,6 +9,7 @@ use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use common::StableDeref;
|
||||
use file_watcher::FileWatcher;
|
||||
use fs4::fs_std::FileExt;
|
||||
#[cfg(all(feature = "mmap", unix))]
|
||||
pub use memmap2::Advice;
|
||||
@@ -18,7 +21,6 @@ use crate::core::META_FILEPATH;
|
||||
use crate::directory::error::{
|
||||
DeleteError, LockError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
||||
};
|
||||
use crate::directory::file_watcher::FileWatcher;
|
||||
use crate::directory::{
|
||||
AntiCallToken, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes, TerminatingWrite,
|
||||
WatchCallback, WatchHandle, WritePtr,
|
||||
@@ -5,7 +5,6 @@ mod mmap_directory;
|
||||
|
||||
mod directory;
|
||||
mod directory_lock;
|
||||
mod file_watcher;
|
||||
pub mod footer;
|
||||
mod managed_directory;
|
||||
mod ram_directory;
|
||||
|
||||
@@ -42,7 +42,6 @@ pub trait DocSet: Send {
|
||||
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a `DocSet`.
|
||||
///
|
||||
/// `target` has to be larger or equal to `.doc()` when calling `seek`.
|
||||
/// If `target` is equal to `.doc()` then the DocSet should not advance.
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
let mut doc = self.doc();
|
||||
debug_assert!(doc <= target);
|
||||
@@ -167,19 +166,6 @@ pub trait DocSet: Send {
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes the `DocSet` and returns a Vec with all of the docs in the DocSet
|
||||
/// including the current doc.
|
||||
#[cfg(test)]
|
||||
pub fn docset_to_doc_vec(mut doc_set: Box<dyn DocSet>) -> Vec<DocId> {
|
||||
let mut output = Vec::new();
|
||||
let mut doc = doc_set.doc();
|
||||
while doc != TERMINATED {
|
||||
output.push(doc);
|
||||
doc = doc_set.advance();
|
||||
}
|
||||
output
|
||||
}
|
||||
|
||||
impl DocSet for &mut dyn DocSet {
|
||||
fn advance(&mut self) -> u32 {
|
||||
(**self).advance()
|
||||
|
||||
@@ -79,7 +79,7 @@ mod tests {
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::path::Path;
|
||||
|
||||
use columnar::StrColumn;
|
||||
use columnar::{StrColumn, ValueRange};
|
||||
use common::{ByteCount, DateTimePrecision, HasLen, TerminatingWrite};
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::prelude::SliceRandom;
|
||||
@@ -944,7 +944,7 @@ mod tests {
|
||||
let test_range = |range: RangeInclusive<u64>| {
|
||||
let expected_count = numbers.iter().filter(|num| range.contains(*num)).count();
|
||||
let mut vec = vec![];
|
||||
field.get_row_ids_for_value_range(range, 0..u32::MAX, &mut vec);
|
||||
field.get_row_ids_for_value_range(ValueRange::Inclusive(range), 0..u32::MAX, &mut vec);
|
||||
assert_eq!(vec.len(), expected_count);
|
||||
};
|
||||
test_range(50..=50);
|
||||
@@ -1022,7 +1022,7 @@ mod tests {
|
||||
let test_range = |range: RangeInclusive<u64>| {
|
||||
let expected_count = numbers.iter().filter(|num| range.contains(*num)).count();
|
||||
let mut vec = vec![];
|
||||
field.get_row_ids_for_value_range(range, 0..u32::MAX, &mut vec);
|
||||
field.get_row_ids_for_value_range(ValueRange::Inclusive(range), 0..u32::MAX, &mut vec);
|
||||
assert_eq!(vec.len(), expected_count);
|
||||
};
|
||||
let test_range_variant = |start, stop| {
|
||||
|
||||
@@ -113,7 +113,7 @@ mod tests {
|
||||
IndexRecordOption::WithFreqs,
|
||||
);
|
||||
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
|
||||
let mut scorer = weight.scorer(searcher.segment_reader(0), 1.0f32, 0)?;
|
||||
let mut scorer = weight.scorer(searcher.segment_reader(0), 1.0f32)?;
|
||||
assert_eq!(scorer.doc(), 0);
|
||||
assert!((scorer.score() - 0.22920431).abs() < 0.001f32);
|
||||
assert_eq!(scorer.advance(), 1);
|
||||
@@ -142,7 +142,7 @@ mod tests {
|
||||
IndexRecordOption::WithFreqs,
|
||||
);
|
||||
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
|
||||
let mut scorer = weight.scorer(searcher.segment_reader(0), 1.0f32, 0)?;
|
||||
let mut scorer = weight.scorer(searcher.segment_reader(0), 1.0f32)?;
|
||||
assert_eq!(scorer.doc(), 0);
|
||||
assert!((scorer.score() - 0.22920431).abs() < 0.001f32);
|
||||
assert_eq!(scorer.advance(), 1);
|
||||
|
||||
@@ -404,7 +404,10 @@ mod tests {
|
||||
schema_builder.build()
|
||||
};
|
||||
let index_metas = IndexMeta {
|
||||
index_settings: IndexSettings::default(),
|
||||
index_settings: IndexSettings {
|
||||
docstore_compression: Compressor::None,
|
||||
..Default::default()
|
||||
},
|
||||
segments: Vec::new(),
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
@@ -413,7 +416,7 @@ mod tests {
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"index_settings":{"docstore_compression":"lz4","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
|
||||
r#"{"index_settings":{"docstore_compression":"none","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
|
||||
);
|
||||
|
||||
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
|
||||
@@ -494,6 +497,8 @@ mod tests {
|
||||
#[test]
|
||||
#[cfg(feature = "lz4-compression")]
|
||||
fn test_index_settings_default() {
|
||||
use crate::store::Compressor;
|
||||
|
||||
let mut index_settings = IndexSettings::default();
|
||||
assert_eq!(
|
||||
index_settings,
|
||||
|
||||
@@ -14,7 +14,6 @@ use crate::positions::PositionReader;
|
||||
use crate::postings::{BlockSegmentPostings, SegmentPostings, TermInfo};
|
||||
use crate::schema::{IndexRecordOption, Term, Type};
|
||||
use crate::termdict::TermDictionary;
|
||||
use crate::DocId;
|
||||
|
||||
/// The inverted index reader is in charge of accessing
|
||||
/// the inverted index associated with a specific field.
|
||||
@@ -193,34 +192,9 @@ impl InvertedIndexReader {
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<Option<BlockSegmentPostings>> {
|
||||
let Some(term_info) = self.get_term_info(term)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
let block_postings_not_loaded =
|
||||
self.read_block_postings_from_terminfo(&term_info, option)?;
|
||||
Ok(Some(block_postings_not_loaded))
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub(crate) fn read_block_postings_from_terminfo_with_seek(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
requested_option: IndexRecordOption,
|
||||
seek_doc: DocId,
|
||||
) -> io::Result<(BlockSegmentPostings, usize)> {
|
||||
let postings_data = self
|
||||
.postings_file_slice
|
||||
.slice(term_info.postings_range.clone());
|
||||
BlockSegmentPostings::open(
|
||||
term_info.doc_freq,
|
||||
postings_data,
|
||||
self.record_option,
|
||||
requested_option,
|
||||
seek_doc,
|
||||
)
|
||||
self.get_term_info(term)?
|
||||
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `term_info`.
|
||||
@@ -232,9 +206,15 @@ impl InvertedIndexReader {
|
||||
term_info: &TermInfo,
|
||||
requested_option: IndexRecordOption,
|
||||
) -> io::Result<BlockSegmentPostings> {
|
||||
let (block_segment_postings, _) =
|
||||
self.read_block_postings_from_terminfo_with_seek(term_info, requested_option, 0)?;
|
||||
Ok(block_segment_postings)
|
||||
let postings_data = self
|
||||
.postings_file_slice
|
||||
.slice(term_info.postings_range.clone());
|
||||
BlockSegmentPostings::open(
|
||||
term_info.doc_freq,
|
||||
postings_data,
|
||||
self.record_option,
|
||||
requested_option,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns a posting object given a `term_info`.
|
||||
@@ -244,13 +224,13 @@ impl InvertedIndexReader {
|
||||
pub fn read_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
record_option: IndexRecordOption,
|
||||
seek_doc: DocId,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<SegmentPostings> {
|
||||
let (block_segment_postings, position_within_block) =
|
||||
self.read_block_postings_from_terminfo_with_seek(term_info, record_option, seek_doc)?;
|
||||
let option = option.downgrade(self.record_option);
|
||||
|
||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
|
||||
let position_reader = {
|
||||
if record_option.has_positions() {
|
||||
if option.has_positions() {
|
||||
let positions_data = self
|
||||
.positions_file_slice
|
||||
.read_bytes_slice(term_info.positions_range.clone())?;
|
||||
@@ -261,9 +241,8 @@ impl InvertedIndexReader {
|
||||
}
|
||||
};
|
||||
Ok(SegmentPostings::from_block_postings(
|
||||
block_segment_postings,
|
||||
block_postings,
|
||||
position_reader,
|
||||
position_within_block,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -289,7 +268,7 @@ impl InvertedIndexReader {
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<Option<SegmentPostings>> {
|
||||
self.get_term_info(term)?
|
||||
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option, 0u32))
|
||||
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
|
||||
@@ -4,19 +4,20 @@ use std::sync::{Arc, RwLock, Weak};
|
||||
use super::operation::DeleteOperation;
|
||||
use crate::Opstamp;
|
||||
|
||||
// The DeleteQueue is similar in conceptually to a multiple
|
||||
// consumer single producer broadcast channel.
|
||||
//
|
||||
// All consumer will receive all messages.
|
||||
//
|
||||
// Consumer of the delete queue are holding a `DeleteCursor`,
|
||||
// which points to a specific place of the `DeleteQueue`.
|
||||
//
|
||||
// New consumer can be created in two ways
|
||||
// - calling `delete_queue.cursor()` returns a cursor, that will include all future delete operation
|
||||
// (and some or none of the past operations... The client is in charge of checking the opstamps.).
|
||||
// - cloning an existing cursor returns a new cursor, that is at the exact same position, and can
|
||||
// now advance independently from the original cursor.
|
||||
/// The DeleteQueue is similar in conceptually to a multiple
|
||||
/// consumer single producer broadcast channel.
|
||||
///
|
||||
/// All consumer will receive all messages.
|
||||
///
|
||||
/// Consumer of the delete queue are holding a `DeleteCursor`,
|
||||
/// which points to a specific place of the `DeleteQueue`.
|
||||
///
|
||||
/// New consumer can be created in two ways
|
||||
/// - calling `delete_queue.cursor()` returns a cursor, that will include all future delete
|
||||
/// operation (and some or none of the past operations... The client is in charge of checking the
|
||||
/// opstamps.).
|
||||
/// - cloning an existing cursor returns a new cursor, that is at the exact same position, and can
|
||||
/// now advance independently from the original cursor.
|
||||
#[derive(Default)]
|
||||
struct InnerDeleteQueue {
|
||||
writer: Vec<DeleteOperation>,
|
||||
@@ -249,12 +250,7 @@ mod tests {
|
||||
|
||||
struct DummyWeight;
|
||||
impl Weight for DummyWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
_reader: &SegmentReader,
|
||||
_boost: Score,
|
||||
_seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
Err(crate::TantivyError::InternalError("dummy impl".to_owned()))
|
||||
}
|
||||
|
||||
|
||||
@@ -367,11 +367,8 @@ impl IndexMerger {
|
||||
for (segment_ord, term_info) in merged_terms.current_segment_ords_and_term_infos() {
|
||||
let segment_reader = &self.readers[segment_ord];
|
||||
let inverted_index: &InvertedIndexReader = &field_readers[segment_ord];
|
||||
let segment_postings = inverted_index.read_postings_from_terminfo(
|
||||
&term_info,
|
||||
segment_postings_option,
|
||||
0u32,
|
||||
)?;
|
||||
let segment_postings = inverted_index
|
||||
.read_postings_from_terminfo(&term_info, segment_postings_option)?;
|
||||
let alive_bitset_opt = segment_reader.alive_bitset();
|
||||
let doc_freq = if let Some(alive_bitset) = alive_bitset_opt {
|
||||
segment_postings.doc_freq_given_deletes(alive_bitset)
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
//! `IndexWriter` is the main entry point for that, which created from
|
||||
//! [`Index::writer`](crate::Index::writer).
|
||||
|
||||
/// Delete queue implementation for broadcasting delete operations to consumers.
|
||||
pub(crate) mod delete_queue;
|
||||
pub(crate) mod path_to_unordered_id;
|
||||
|
||||
|
||||
@@ -421,10 +421,9 @@ fn remap_and_write(
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::BTreeMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::Path;
|
||||
|
||||
use columnar::ColumnType;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use crate::collector::{Count, TopDocs};
|
||||
use crate::directory::RamDirectory;
|
||||
@@ -1067,10 +1066,7 @@ mod tests {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("title", text_options);
|
||||
let schema = schema_builder.build();
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
Index::create_in_dir(&tempdir_path, schema).unwrap();
|
||||
let index = Index::open_in_dir(tempdir_path).unwrap();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let schema = index.schema();
|
||||
let mut index_writer = index.writer(50_000_000).unwrap();
|
||||
let title = schema.get_field("title").unwrap();
|
||||
|
||||
14
src/lib.rs
14
src/lib.rs
@@ -17,6 +17,7 @@
|
||||
//!
|
||||
//! ```rust
|
||||
//! # use std::path::Path;
|
||||
//! # use std::fs;
|
||||
//! # use tempfile::TempDir;
|
||||
//! # use tantivy::collector::TopDocs;
|
||||
//! # use tantivy::query::QueryParser;
|
||||
@@ -27,8 +28,11 @@
|
||||
//! # // Let's create a temporary directory for the
|
||||
//! # // sake of this example
|
||||
//! # if let Ok(dir) = TempDir::new() {
|
||||
//! # run_example(dir.path()).unwrap();
|
||||
//! # dir.close().unwrap();
|
||||
//! # let index_path = dir.path().join("index");
|
||||
//! # // In case the directory already exists, we remove it
|
||||
//! # let _ = fs::remove_dir_all(&index_path);
|
||||
//! # fs::create_dir_all(&index_path).unwrap();
|
||||
//! # run_example(&index_path).unwrap();
|
||||
//! # }
|
||||
//! # }
|
||||
//! #
|
||||
@@ -203,6 +207,7 @@ mod docset;
|
||||
mod reader;
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(feature = "mmap")]
|
||||
mod compat_tests;
|
||||
|
||||
pub use self::reader::{IndexReader, IndexReaderBuilder, ReloadPolicy, Warmer};
|
||||
@@ -1170,12 +1175,11 @@ pub mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_validate_checksum() -> crate::Result<()> {
|
||||
let index_path = tempfile::tempdir().expect("dir");
|
||||
let mut builder = Schema::builder();
|
||||
let body = builder.add_text_field("body", TEXT | STORED);
|
||||
let schema = builder.build();
|
||||
let index = Index::create_in_dir(&index_path, schema)?;
|
||||
let mut writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer: IndexWriter = index.writer_for_tests()?;
|
||||
writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
for _ in 0..5000 {
|
||||
writer.add_document(doc!(body => "foo"))?;
|
||||
|
||||
@@ -99,8 +99,7 @@ impl BlockSegmentPostings {
|
||||
data: FileSlice,
|
||||
mut record_option: IndexRecordOption,
|
||||
requested_option: IndexRecordOption,
|
||||
seek_doc: DocId,
|
||||
) -> io::Result<(BlockSegmentPostings, usize)> {
|
||||
) -> io::Result<BlockSegmentPostings> {
|
||||
let bytes = data.read_bytes()?;
|
||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, bytes)?;
|
||||
let skip_reader = match skip_data_opt {
|
||||
@@ -126,7 +125,7 @@ impl BlockSegmentPostings {
|
||||
(_, _) => FreqReadingOption::ReadFreq,
|
||||
};
|
||||
|
||||
let mut block_segment_postings: BlockSegmentPostings = BlockSegmentPostings {
|
||||
let mut block_segment_postings = BlockSegmentPostings {
|
||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
||||
block_loaded: false,
|
||||
freq_decoder: BlockDecoder::with_val(1),
|
||||
@@ -136,13 +135,8 @@ impl BlockSegmentPostings {
|
||||
data: postings_data,
|
||||
skip_reader,
|
||||
};
|
||||
let inner_pos = if seek_doc == 0 {
|
||||
block_segment_postings.load_block();
|
||||
0
|
||||
} else {
|
||||
block_segment_postings.seek(seek_doc)
|
||||
};
|
||||
Ok((block_segment_postings, inner_pos))
|
||||
block_segment_postings.load_block();
|
||||
Ok(block_segment_postings)
|
||||
}
|
||||
|
||||
/// Returns the block_max_score for the current block.
|
||||
@@ -264,9 +258,7 @@ impl BlockSegmentPostings {
|
||||
self.doc_decoder.output_len
|
||||
}
|
||||
|
||||
/// Position on a block that may contains `target_doc`, and returns the
|
||||
/// position of the first document greater than or equal to `target_doc`
|
||||
/// within that block.
|
||||
/// Position on a block that may contains `target_doc`.
|
||||
///
|
||||
/// If all docs are smaller than target, the block loaded may be empty,
|
||||
/// or be the last an incomplete VInt block.
|
||||
@@ -461,7 +453,7 @@ mod tests {
|
||||
doc_ids.push(130);
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids)?;
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None, 0);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.seek(128), 129);
|
||||
assert_eq!(docset.doc(), 129);
|
||||
assert_eq!(docset.advance(), 130);
|
||||
@@ -469,8 +461,8 @@ mod tests {
|
||||
assert_eq!(docset.advance(), TERMINATED);
|
||||
}
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids)?;
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None, 0);
|
||||
let block_segments = build_block_postings(&doc_ids).unwrap();
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.seek(129), 129);
|
||||
assert_eq!(docset.doc(), 129);
|
||||
assert_eq!(docset.advance(), 130);
|
||||
@@ -479,7 +471,7 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids)?;
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None, 0);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.doc(), 0);
|
||||
assert_eq!(docset.seek(131), TERMINATED);
|
||||
assert_eq!(docset.doc(), TERMINATED);
|
||||
|
||||
@@ -527,6 +527,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
impl<TScorer: Scorer> Scorer for UnoptimizedDocSet<TScorer> {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
self.0.score()
|
||||
}
|
||||
|
||||
@@ -79,15 +79,14 @@ impl SegmentPostings {
|
||||
.close_term(docs.len() as u32)
|
||||
.expect("In memory Serialization should never fail.");
|
||||
}
|
||||
let (block_segment_postings, position_within_block) = BlockSegmentPostings::open(
|
||||
let block_segment_postings = BlockSegmentPostings::open(
|
||||
docs.len() as u32,
|
||||
FileSlice::from(buffer),
|
||||
IndexRecordOption::Basic,
|
||||
IndexRecordOption::Basic,
|
||||
0u32,
|
||||
)
|
||||
.unwrap();
|
||||
SegmentPostings::from_block_postings(block_segment_postings, None, position_within_block)
|
||||
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||
}
|
||||
|
||||
/// Helper functions to create `SegmentPostings` for tests.
|
||||
@@ -128,29 +127,28 @@ impl SegmentPostings {
|
||||
postings_serializer
|
||||
.close_term(doc_and_tfs.len() as u32)
|
||||
.unwrap();
|
||||
let (block_segment_postings, position_within_block) = BlockSegmentPostings::open(
|
||||
let block_segment_postings = BlockSegmentPostings::open(
|
||||
doc_and_tfs.len() as u32,
|
||||
FileSlice::from(buffer),
|
||||
IndexRecordOption::WithFreqs,
|
||||
IndexRecordOption::WithFreqs,
|
||||
0u32,
|
||||
)
|
||||
.unwrap();
|
||||
SegmentPostings::from_block_postings(block_segment_postings, None, position_within_block)
|
||||
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||
}
|
||||
|
||||
/// Creates a Segment Postings from a
|
||||
/// - `BlockSegmentPostings`,
|
||||
/// - a position reader
|
||||
/// - a target document to seek to
|
||||
/// Reads a Segment postings from an &[u8]
|
||||
///
|
||||
/// * `len` - number of document in the posting lists.
|
||||
/// * `data` - data array. The complete data is not necessarily used.
|
||||
/// * `freq_handler` - the freq handler is in charge of decoding frequencies and/or positions
|
||||
pub(crate) fn from_block_postings(
|
||||
segment_block_postings: BlockSegmentPostings,
|
||||
position_reader: Option<PositionReader>,
|
||||
position_within_block: usize,
|
||||
) -> SegmentPostings {
|
||||
SegmentPostings {
|
||||
block_cursor: segment_block_postings,
|
||||
cur: position_within_block,
|
||||
cur: 0, // cursor within the block
|
||||
position_reader,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,17 +6,21 @@ use crate::{DocId, Score, TERMINATED};
|
||||
|
||||
// doc num bits uses the following encoding:
|
||||
// given 0b a b cdefgh
|
||||
// |1|2| 3 |
|
||||
// |1|2|3| 4 |
|
||||
// - 1: unused
|
||||
// - 2: is delta-1 encoded. 0 if not, 1, if yes
|
||||
// - 3: a 6 bit number in 0..=32, the actual bitwidth
|
||||
// - 3: unused
|
||||
// - 4: a 5 bit number in 0..32, the actual bitwidth. Bitpacking could in theory say this is 32
|
||||
// (requiring a 6th bit), but the biggest doc_id we can want to encode is TERMINATED-1, which can
|
||||
// be represented on 31b without delta encoding.
|
||||
fn encode_bitwidth(bitwidth: u8, delta_1: bool) -> u8 {
|
||||
assert!(bitwidth < 32);
|
||||
bitwidth | ((delta_1 as u8) << 6)
|
||||
}
|
||||
|
||||
fn decode_bitwidth(raw_bitwidth: u8) -> (u8, bool) {
|
||||
let delta_1 = ((raw_bitwidth >> 6) & 1) != 0;
|
||||
let bitwidth = raw_bitwidth & 0x3f;
|
||||
let bitwidth = raw_bitwidth & 0x1f;
|
||||
(bitwidth, delta_1)
|
||||
}
|
||||
|
||||
@@ -430,7 +434,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_encode_decode_bitwidth() {
|
||||
for bitwidth in 0..=32 {
|
||||
for bitwidth in 0..32 {
|
||||
for delta_1 in [false, true] {
|
||||
assert_eq!(
|
||||
(bitwidth, delta_1),
|
||||
|
||||
@@ -21,12 +21,7 @@ impl Query for AllQuery {
|
||||
pub struct AllWeight;
|
||||
|
||||
impl Weight for AllWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
_seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let all_scorer = AllScorer::new(reader.max_doc());
|
||||
if boost != 1.0 {
|
||||
Ok(Box::new(BoostScorer::new(all_scorer, boost)))
|
||||
@@ -110,6 +105,7 @@ impl DocSet for AllScorer {
|
||||
}
|
||||
|
||||
impl Scorer for AllScorer {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
1.0
|
||||
}
|
||||
@@ -145,7 +141,7 @@ mod tests {
|
||||
let weight = AllQuery.weight(EnableScoring::disabled_from_schema(&index.schema()))?;
|
||||
{
|
||||
let reader = searcher.segment_reader(0);
|
||||
let mut scorer = weight.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = weight.scorer(reader, 1.0)?;
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert_eq!(scorer.advance(), 1u32);
|
||||
assert_eq!(scorer.doc(), 1u32);
|
||||
@@ -153,7 +149,7 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let reader = searcher.segment_reader(1);
|
||||
let mut scorer = weight.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = weight.scorer(reader, 1.0)?;
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert_eq!(scorer.advance(), TERMINATED);
|
||||
}
|
||||
@@ -168,12 +164,12 @@ mod tests {
|
||||
let weight = AllQuery.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
|
||||
let reader = searcher.segment_reader(0);
|
||||
{
|
||||
let mut scorer = weight.scorer(reader, 2.0, 0)?;
|
||||
let mut scorer = weight.scorer(reader, 2.0)?;
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert_eq!(scorer.score(), 2.0);
|
||||
}
|
||||
{
|
||||
let mut scorer = weight.scorer(reader, 1.5, 0)?;
|
||||
let mut scorer = weight.scorer(reader, 1.5)?;
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert_eq!(scorer.score(), 1.5);
|
||||
}
|
||||
|
||||
@@ -84,12 +84,7 @@ where
|
||||
A: Automaton + Send + Sync + 'static,
|
||||
A::State: Clone,
|
||||
{
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let max_doc = reader.max_doc();
|
||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||
let inverted_index = reader.inverted_index(self.field)?;
|
||||
@@ -97,12 +92,8 @@ where
|
||||
let mut term_stream = self.automaton_stream(term_dict)?;
|
||||
while term_stream.advance() {
|
||||
let term_info = term_stream.value();
|
||||
let (mut block_segment_postings, _) = inverted_index
|
||||
.read_block_postings_from_terminfo_with_seek(
|
||||
term_info,
|
||||
IndexRecordOption::Basic,
|
||||
seek_doc,
|
||||
)?;
|
||||
let mut block_segment_postings = inverted_index
|
||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
|
||||
loop {
|
||||
let docs = block_segment_postings.docs();
|
||||
if docs.is_empty() {
|
||||
@@ -120,7 +111,7 @@ where
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) == doc {
|
||||
Ok(Explanation::new("AutomatonScorer", 1.0))
|
||||
} else {
|
||||
@@ -195,7 +186,7 @@ mod tests {
|
||||
let automaton_weight = AutomatonWeight::new(field, PrefixedByA);
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let mut scorer = automaton_weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
|
||||
let mut scorer = automaton_weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert_eq!(scorer.score(), 1.0);
|
||||
assert_eq!(scorer.advance(), 2u32);
|
||||
@@ -212,7 +203,7 @@ mod tests {
|
||||
let automaton_weight = AutomatonWeight::new(field, PrefixedByA);
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let mut scorer = automaton_weight.scorer(searcher.segment_reader(0u32), 1.32, 0)?;
|
||||
let mut scorer = automaton_weight.scorer(searcher.segment_reader(0u32), 1.32)?;
|
||||
assert_eq!(scorer.doc(), 0u32);
|
||||
assert_eq!(scorer.score(), 1.32);
|
||||
Ok(())
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::query::{
|
||||
intersect_scorers, AllScorer, BufferedUnionScorer, EmptyScorer, Exclude, Explanation, Occur,
|
||||
RequiredOptionalScorer, Scorer, Weight,
|
||||
};
|
||||
use crate::{DocId, Score, TERMINATED};
|
||||
use crate::{DocId, Score};
|
||||
|
||||
enum SpecializedScorer {
|
||||
TermUnion(Vec<TermScorer>),
|
||||
@@ -156,19 +156,6 @@ fn effective_should_scorer_for_union<TScoreCombiner: ScoreCombiner>(
|
||||
}
|
||||
}
|
||||
|
||||
fn create_scorer(
|
||||
weight: &dyn Weight,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
target_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
if target_doc >= reader.max_doc() {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
} else {
|
||||
weight.scorer(reader, boost, target_doc)
|
||||
}
|
||||
}
|
||||
|
||||
enum ShouldScorersCombinationMethod {
|
||||
// Should scorers are irrelevant.
|
||||
Ignored,
|
||||
@@ -220,29 +207,10 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
mut seek_first_doc: DocId,
|
||||
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
|
||||
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
|
||||
let (mut must_weights, other_weights): (Vec<(Occur, _)>, Vec<(Occur, _)>) = self
|
||||
.weights
|
||||
.iter()
|
||||
.map(|(occur, weight)| (*occur, weight))
|
||||
.partition(|(occur, _weight)| *occur == Occur::Must);
|
||||
// We start by must weights in order to get the best "seek_first_doc" so that we
|
||||
// can skip the first few documents of the other scorers.
|
||||
must_weights.sort_by_key(|weight| weight.1.intersection_priority());
|
||||
for (_, must_sub_weight) in must_weights {
|
||||
let sub_scorer: Box<dyn Scorer> =
|
||||
create_scorer(must_sub_weight.as_ref(), reader, boost, seek_first_doc)?;
|
||||
seek_first_doc = seek_first_doc.max(sub_scorer.doc());
|
||||
per_occur_scorers
|
||||
.entry(Occur::Must)
|
||||
.or_default()
|
||||
.push(sub_scorer);
|
||||
}
|
||||
for (occur, sub_weight) in &other_weights {
|
||||
let sub_scorer: Box<dyn Scorer> =
|
||||
create_scorer(sub_weight.as_ref(), reader, boost, seek_first_doc)?;
|
||||
for (occur, subweight) in &self.weights {
|
||||
let sub_scorer: Box<dyn Scorer> = subweight.scorer(reader, boost)?;
|
||||
per_occur_scorers
|
||||
.entry(*occur)
|
||||
.or_default()
|
||||
@@ -256,10 +224,9 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
score_combiner_fn: impl Fn() -> TComplexScoreCombiner,
|
||||
seek_doc: u32,
|
||||
) -> crate::Result<SpecializedScorer> {
|
||||
let num_docs = reader.num_docs();
|
||||
let mut per_occur_scorers = self.per_occur_scorers(reader, boost, seek_doc)?;
|
||||
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
|
||||
|
||||
// Indicate how should clauses are combined with must clauses.
|
||||
let mut must_scorers: Vec<Box<dyn Scorer>> =
|
||||
@@ -440,7 +407,7 @@ fn remove_and_count_all_and_empty_scorers(
|
||||
if scorer.is::<AllScorer>() {
|
||||
counts.num_all_scorers += 1;
|
||||
false
|
||||
} else if scorer.doc() == TERMINATED {
|
||||
} else if scorer.is::<EmptyScorer>() {
|
||||
counts.num_empty_scorers += 1;
|
||||
false
|
||||
} else {
|
||||
@@ -451,12 +418,7 @@ fn remove_and_count_all_and_empty_scorers(
|
||||
}
|
||||
|
||||
impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombiner> {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let num_docs = reader.num_docs();
|
||||
if self.weights.is_empty() {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
@@ -465,15 +427,15 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
if occur == Occur::MustNot {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
} else {
|
||||
weight.scorer(reader, boost, seek_doc)
|
||||
weight.scorer(reader, boost)
|
||||
}
|
||||
} else if self.scoring_enabled {
|
||||
self.complex_scorer(reader, boost, &self.score_combiner_fn, seek_doc)
|
||||
self.complex_scorer(reader, boost, &self.score_combiner_fn)
|
||||
.map(|specialized_scorer| {
|
||||
into_box_scorer(specialized_scorer, &self.score_combiner_fn, num_docs)
|
||||
})
|
||||
} else {
|
||||
self.complex_scorer(reader, boost, DoNothingCombiner::default, seek_doc)
|
||||
self.complex_scorer(reader, boost, DoNothingCombiner::default)
|
||||
.map(|specialized_scorer| {
|
||||
into_box_scorer(specialized_scorer, DoNothingCombiner::default, num_docs)
|
||||
})
|
||||
@@ -481,7 +443,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
@@ -505,7 +467,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score),
|
||||
) -> crate::Result<()> {
|
||||
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn, 0)?;
|
||||
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
|
||||
match scorer {
|
||||
SpecializedScorer::TermUnion(term_scorers) => {
|
||||
let mut union_scorer = BufferedUnionScorer::build(
|
||||
@@ -527,7 +489,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(&[DocId]),
|
||||
) -> crate::Result<()> {
|
||||
let scorer = self.complex_scorer(reader, 1.0, || DoNothingCombiner, 0u32)?;
|
||||
let scorer = self.complex_scorer(reader, 1.0, || DoNothingCombiner)?;
|
||||
let mut buffer = [0u32; COLLECT_BLOCK_BUFFER_LEN];
|
||||
|
||||
match scorer {
|
||||
@@ -562,7 +524,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
||||
) -> crate::Result<()> {
|
||||
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn, 0u32)?;
|
||||
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
|
||||
match scorer {
|
||||
SpecializedScorer::TermUnion(term_scorers) => {
|
||||
super::block_wand(term_scorers, threshold, callback);
|
||||
|
||||
@@ -57,7 +57,7 @@ mod tests {
|
||||
let query = query_parser.parse_query("+a")?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert!(scorer.is::<TermScorer>());
|
||||
Ok(())
|
||||
}
|
||||
@@ -70,13 +70,13 @@ mod tests {
|
||||
{
|
||||
let query = query_parser.parse_query("+a +b +c")?;
|
||||
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert!(scorer.is::<Intersection<TermScorer>>());
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("+a +(b c)")?;
|
||||
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert!(scorer.is::<Intersection<Box<dyn Scorer>>>());
|
||||
}
|
||||
Ok(())
|
||||
@@ -90,14 +90,14 @@ mod tests {
|
||||
{
|
||||
let query = query_parser.parse_query("+a b")?;
|
||||
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert!(scorer
|
||||
.is::<RequiredOptionalScorer<Box<dyn Scorer>, Box<dyn Scorer>, SumCombiner>>());
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("+a b")?;
|
||||
let weight = query.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert!(scorer.is::<TermScorer>());
|
||||
}
|
||||
Ok(())
|
||||
@@ -244,14 +244,12 @@ mod tests {
|
||||
.weight(EnableScoring::enabled_from_searcher(&searcher))
|
||||
.unwrap();
|
||||
{
|
||||
let mut boolean_scorer =
|
||||
boolean_weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
|
||||
let mut boolean_scorer = boolean_weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert_eq!(boolean_scorer.doc(), 0u32);
|
||||
assert_nearly_equals!(boolean_scorer.score(), 0.84163445);
|
||||
}
|
||||
{
|
||||
let mut boolean_scorer =
|
||||
boolean_weight.scorer(searcher.segment_reader(0u32), 2.0, 0)?;
|
||||
let mut boolean_scorer = boolean_weight.scorer(searcher.segment_reader(0u32), 2.0)?;
|
||||
assert_eq!(boolean_scorer.doc(), 0u32);
|
||||
assert_nearly_equals!(boolean_scorer.score(), 1.6832689);
|
||||
}
|
||||
@@ -345,7 +343,7 @@ mod tests {
|
||||
(Occur::Must, term_match_some.box_clone()),
|
||||
]);
|
||||
let weight = query.weight(EnableScoring::disabled_from_searcher(&searcher))?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32, 0)?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32)?;
|
||||
assert!(scorer.is::<TermScorer>());
|
||||
}
|
||||
{
|
||||
@@ -355,7 +353,7 @@ mod tests {
|
||||
(Occur::Must, term_match_none.box_clone()),
|
||||
]);
|
||||
let weight = query.weight(EnableScoring::disabled_from_searcher(&searcher))?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32, 0)?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32)?;
|
||||
assert!(scorer.is::<EmptyScorer>());
|
||||
}
|
||||
{
|
||||
@@ -364,7 +362,7 @@ mod tests {
|
||||
(Occur::Should, term_match_none.box_clone()),
|
||||
]);
|
||||
let weight = query.weight(EnableScoring::disabled_from_searcher(&searcher))?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32, 0)?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32)?;
|
||||
assert!(scorer.is::<AllScorer>());
|
||||
}
|
||||
{
|
||||
@@ -373,7 +371,7 @@ mod tests {
|
||||
(Occur::Should, term_match_none.box_clone()),
|
||||
]);
|
||||
let weight = query.weight(EnableScoring::disabled_from_searcher(&searcher))?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32, 0)?;
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32)?;
|
||||
assert!(scorer.is::<TermScorer>());
|
||||
}
|
||||
Ok(())
|
||||
@@ -613,134 +611,6 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test that the seek_doc parameter correctly skips documents in BooleanWeight::scorer.
|
||||
///
|
||||
/// When seek_doc is provided, the scorer should start from that document (or the first
|
||||
/// matching document >= seek_doc), skipping earlier documents.
|
||||
#[test]
|
||||
pub fn test_boolean_weight_seek_doc() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let value_field = schema_builder.add_u64_field("value", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
|
||||
// Create 11 documents:
|
||||
// doc 0: value=0
|
||||
// doc 1: value=10
|
||||
// doc 2: value=20
|
||||
// ...
|
||||
// doc 9: value=90
|
||||
// doc 10: value=50 (matches range 30-70)
|
||||
for i in 0..10 {
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "hello",
|
||||
value_field => (i * 10) as u64
|
||||
))?;
|
||||
}
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "hello",
|
||||
value_field => 50u64
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
// Create a Boolean query: MUST(term "hello") AND MUST(range 30..=70)
|
||||
// This should match docs with value in [30, 70]: docs 3, 4, 5, 6, 7, 10
|
||||
let term_query: Box<dyn Query> = Box::new(TermQuery::new(
|
||||
Term::from_field_text(text_field, "hello"),
|
||||
IndexRecordOption::Basic,
|
||||
));
|
||||
let range_query: Box<dyn Query> = Box::new(RangeQuery::new(
|
||||
Bound::Included(Term::from_field_u64(value_field, 30)),
|
||||
Bound::Included(Term::from_field_u64(value_field, 70)),
|
||||
));
|
||||
|
||||
let boolean_query =
|
||||
BooleanQuery::new(vec![(Occur::Must, term_query), (Occur::Must, range_query)]);
|
||||
|
||||
let weight =
|
||||
boolean_query.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
|
||||
|
||||
let doc_when_seeking_from = |seek_from: DocId| {
|
||||
let scorer = weight.scorer(segment_reader, 1.0f32, seek_from).unwrap();
|
||||
crate::docset::docset_to_doc_vec(scorer)
|
||||
};
|
||||
|
||||
// Expected matching docs: 3, 4, 5, 6, 7, 10 (values 30, 40, 50, 60, 70, 50)
|
||||
assert_eq!(doc_when_seeking_from(0), vec![3, 4, 5, 6, 7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(1), vec![3, 4, 5, 6, 7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(3), vec![3, 4, 5, 6, 7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(4), vec![4, 5, 6, 7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(7), vec![7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(8), vec![10]);
|
||||
assert_eq!(doc_when_seeking_from(10), vec![10]);
|
||||
assert_eq!(doc_when_seeking_from(11), Vec::<DocId>::new());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test that the seek_doc parameter works correctly with SHOULD clauses.
|
||||
#[test]
|
||||
pub fn test_boolean_weight_seek_doc_with_should() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
|
||||
// Create documents:
|
||||
// doc 0: "a b"
|
||||
// doc 1: "a"
|
||||
// doc 2: "b"
|
||||
// doc 3: "c"
|
||||
// doc 4: "a b c"
|
||||
index_writer.add_document(doc!(text_field => "a b"))?;
|
||||
index_writer.add_document(doc!(text_field => "a"))?;
|
||||
index_writer.add_document(doc!(text_field => "b"))?;
|
||||
index_writer.add_document(doc!(text_field => "c"))?;
|
||||
index_writer.add_document(doc!(text_field => "a b c"))?;
|
||||
index_writer.commit()?;
|
||||
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
// Create a Boolean query: SHOULD(term "a") OR SHOULD(term "b")
|
||||
// This should match docs 0, 1, 2, 4
|
||||
let term_a: Box<dyn Query> = Box::new(TermQuery::new(
|
||||
Term::from_field_text(text_field, "a"),
|
||||
IndexRecordOption::Basic,
|
||||
));
|
||||
let term_b: Box<dyn Query> = Box::new(TermQuery::new(
|
||||
Term::from_field_text(text_field, "b"),
|
||||
IndexRecordOption::Basic,
|
||||
));
|
||||
|
||||
let boolean_query =
|
||||
BooleanQuery::new(vec![(Occur::Should, term_a), (Occur::Should, term_b)]);
|
||||
|
||||
let weight =
|
||||
boolean_query.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
|
||||
|
||||
let doc_when_seeking_from = |seek_from: DocId| {
|
||||
let scorer = weight.scorer(segment_reader, 1.0f32, seek_from).unwrap();
|
||||
crate::docset::docset_to_doc_vec(scorer)
|
||||
};
|
||||
|
||||
// Expected matching docs: 0, 1, 2, 4
|
||||
assert_eq!(doc_when_seeking_from(0), vec![0, 1, 2, 4]);
|
||||
assert_eq!(doc_when_seeking_from(1), vec![1, 2, 4]);
|
||||
assert_eq!(doc_when_seeking_from(2), vec![2, 4]);
|
||||
assert_eq!(doc_when_seeking_from(3), vec![4]);
|
||||
assert_eq!(doc_when_seeking_from(4), vec![4]);
|
||||
assert_eq!(doc_when_seeking_from(5), Vec::<DocId>::new());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test multiple AllScorer instances in different clause types.
|
||||
///
|
||||
/// Verifies correct behavior when AllScorers appear in multiple positions.
|
||||
|
||||
@@ -67,13 +67,8 @@ impl BoostWeight {
|
||||
}
|
||||
|
||||
impl Weight for BoostWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
self.weight.scorer(reader, boost * self.boost, seek_doc)
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
self.weight.scorer(reader, boost * self.boost)
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
||||
@@ -88,10 +83,6 @@ impl Weight for BoostWeight {
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
self.weight.count(reader)
|
||||
}
|
||||
|
||||
fn intersection_priority(&self) -> u32 {
|
||||
self.weight.intersection_priority()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct BoostScorer<S: Scorer> {
|
||||
@@ -143,6 +134,7 @@ impl<S: Scorer> DocSet for BoostScorer<S> {
|
||||
}
|
||||
|
||||
impl<S: Scorer> Scorer for BoostScorer<S> {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
self.underlying.score() * self.boost
|
||||
}
|
||||
|
||||
@@ -63,18 +63,13 @@ impl ConstWeight {
|
||||
}
|
||||
|
||||
impl Weight for ConstWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
let inner_scorer = self.weight.scorer(reader, boost, seek_doc)?;
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let inner_scorer = self.weight.scorer(reader, boost)?;
|
||||
Ok(Box::new(ConstScorer::new(inner_scorer, boost * self.score)))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Document #({doc}) does not match"
|
||||
@@ -89,10 +84,6 @@ impl Weight for ConstWeight {
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
self.weight.count(reader)
|
||||
}
|
||||
|
||||
fn intersection_priority(&self) -> u32 {
|
||||
self.weight.intersection_priority()
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps a `DocSet` and simply returns a constant `Scorer`.
|
||||
@@ -146,6 +137,7 @@ impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet + 'static> Scorer for ConstScorer<TDocSet> {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
self.score
|
||||
}
|
||||
|
||||
@@ -173,6 +173,7 @@ impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> DocSet
|
||||
impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> Scorer
|
||||
for Disjunction<TScorer, TScoreCombiner>
|
||||
{
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
self.current_score
|
||||
}
|
||||
@@ -307,6 +308,7 @@ mod tests {
|
||||
}
|
||||
|
||||
impl Scorer for DummyScorer {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
self.foo.get(self.cursor).map(|x| x.1).unwrap_or(0.0)
|
||||
}
|
||||
|
||||
@@ -26,24 +26,13 @@ impl Query for EmptyQuery {
|
||||
/// It is useful for tests and handling edge cases.
|
||||
pub struct EmptyWeight;
|
||||
impl Weight for EmptyWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
_reader: &SegmentReader,
|
||||
_boost: Score,
|
||||
_seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
}
|
||||
|
||||
fn explain(&self, _reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
Err(does_not_match(doc))
|
||||
}
|
||||
|
||||
/// Returns a priority number used to sort weights when running an
|
||||
/// intersection.
|
||||
fn intersection_priority(&self) -> u32 {
|
||||
0u32
|
||||
}
|
||||
}
|
||||
|
||||
/// `EmptyScorer` is a dummy `Scorer` in which no document matches.
|
||||
@@ -66,6 +55,7 @@ impl DocSet for EmptyScorer {
|
||||
}
|
||||
|
||||
impl Scorer for EmptyScorer {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
0.0
|
||||
}
|
||||
|
||||
@@ -84,6 +84,7 @@ where
|
||||
TScorer: Scorer,
|
||||
TDocSetExclude: DocSet + 'static,
|
||||
{
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
self.underlying_docset.score()
|
||||
}
|
||||
|
||||
@@ -98,12 +98,7 @@ pub struct ExistsWeight {
|
||||
}
|
||||
|
||||
impl Weight for ExistsWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
_seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let fast_field_reader = reader.fast_fields();
|
||||
let mut column_handles = fast_field_reader.dynamic_column_handles(&self.field_name)?;
|
||||
if self.field_type == Type::Json && self.json_subpaths {
|
||||
@@ -171,7 +166,7 @@ impl Weight for ExistsWeight {
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
|
||||
@@ -105,6 +105,7 @@ impl<TDocSet: DocSet> Intersection<TDocSet, TDocSet> {
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOtherDocSet> {
|
||||
#[inline]
|
||||
fn advance(&mut self) -> DocId {
|
||||
let (left, right) = (&mut self.left, &mut self.right);
|
||||
let mut candidate = left.advance();
|
||||
@@ -174,6 +175,7 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
|
||||
.all(|docset| docset.seek_into_the_danger_zone(target))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn doc(&self) -> DocId {
|
||||
self.left.doc()
|
||||
}
|
||||
@@ -200,6 +202,7 @@ where
|
||||
TScorer: Scorer,
|
||||
TOtherScorer: Scorer,
|
||||
{
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
self.left.score()
|
||||
+ self.right.score()
|
||||
|
||||
@@ -81,6 +81,7 @@ impl<TPostings: Postings> DocSet for PhraseKind<TPostings> {
|
||||
}
|
||||
|
||||
impl<TPostings: Postings> Scorer for PhraseKind<TPostings> {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
match self {
|
||||
PhraseKind::SinglePrefix { positions, .. } => {
|
||||
@@ -215,6 +216,7 @@ impl<TPostings: Postings> DocSet for PhrasePrefixScorer<TPostings> {
|
||||
}
|
||||
|
||||
impl<TPostings: Postings> Scorer for PhrasePrefixScorer<TPostings> {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
// TODO modify score??
|
||||
self.phrase_scorer.score()
|
||||
|
||||
@@ -42,11 +42,10 @@ impl PhrasePrefixWeight {
|
||||
Ok(FieldNormReader::constant(reader.max_doc(), 1))
|
||||
}
|
||||
|
||||
pub(crate) fn prefix_phrase_scorer(
|
||||
pub(crate) fn phrase_scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Option<PhrasePrefixScorer<SegmentPostings>>> {
|
||||
let similarity_weight_opt = self
|
||||
.similarity_weight_opt
|
||||
@@ -55,16 +54,14 @@ impl PhrasePrefixWeight {
|
||||
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
|
||||
let mut term_postings_list = Vec::new();
|
||||
for &(offset, ref term) in &self.phrase_terms {
|
||||
let inverted_index = reader.inverted_index(term.field())?;
|
||||
let Some(term_info) = inverted_index.get_term_info(term)? else {
|
||||
if let Some(postings) = reader
|
||||
.inverted_index(term.field())?
|
||||
.read_postings(term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
{
|
||||
term_postings_list.push((offset, postings));
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
let postings = inverted_index.read_postings_from_terminfo(
|
||||
&term_info,
|
||||
IndexRecordOption::WithFreqsAndPositions,
|
||||
seek_doc,
|
||||
)?;
|
||||
term_postings_list.push((offset, postings));
|
||||
}
|
||||
}
|
||||
|
||||
let inv_index = reader.inverted_index(self.prefix.1.field())?;
|
||||
@@ -117,13 +114,8 @@ impl PhrasePrefixWeight {
|
||||
}
|
||||
|
||||
impl Weight for PhrasePrefixWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
if let Some(scorer) = self.prefix_phrase_scorer(reader, boost, seek_doc)? {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
|
||||
Ok(Box::new(scorer))
|
||||
} else {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
@@ -131,7 +123,7 @@ impl Weight for PhrasePrefixWeight {
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let scorer_opt = self.prefix_phrase_scorer(reader, 1.0, doc)?;
|
||||
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
|
||||
if scorer_opt.is_none() {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
@@ -148,10 +140,6 @@ impl Weight for PhrasePrefixWeight {
|
||||
}
|
||||
Ok(explanation)
|
||||
}
|
||||
|
||||
fn intersection_priority(&self) -> u32 {
|
||||
50u32
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -199,7 +187,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let mut phrase_scorer = phrase_weight
|
||||
.prefix_phrase_scorer(searcher.segment_reader(0u32), 1.0, 0u32)?
|
||||
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
|
||||
.unwrap();
|
||||
assert_eq!(phrase_scorer.doc(), 1);
|
||||
assert_eq!(phrase_scorer.phrase_count(), 2);
|
||||
@@ -226,7 +214,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let mut phrase_scorer = phrase_weight
|
||||
.prefix_phrase_scorer(searcher.segment_reader(0u32), 1.0, 0u32)?
|
||||
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
|
||||
.unwrap();
|
||||
assert_eq!(phrase_scorer.doc(), 1);
|
||||
assert_eq!(phrase_scorer.phrase_count(), 2);
|
||||
@@ -250,7 +238,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.is_none());
|
||||
let weight = phrase_query.weight(enable_scoring).unwrap();
|
||||
let mut phrase_scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
|
||||
let mut phrase_scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert_eq!(phrase_scorer.doc(), 1);
|
||||
assert_eq!(phrase_scorer.advance(), 2);
|
||||
assert_eq!(phrase_scorer.doc(), 2);
|
||||
@@ -271,7 +259,7 @@ mod tests {
|
||||
]);
|
||||
let enable_scoring = EnableScoring::enabled_from_searcher(&searcher);
|
||||
let weight = phrase_query.weight(enable_scoring).unwrap();
|
||||
let mut phrase_scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
|
||||
let mut phrase_scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert_eq!(phrase_scorer.advance(), TERMINATED);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ pub(crate) mod tests {
|
||||
let phrase_query = PhraseQuery::new(terms);
|
||||
let phrase_weight =
|
||||
phrase_query.phrase_weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
|
||||
let mut phrase_scorer = phrase_weight.scorer(searcher.segment_reader(0), 1.0, 0)?;
|
||||
let mut phrase_scorer = phrase_weight.scorer(searcher.segment_reader(0), 1.0)?;
|
||||
assert_eq!(phrase_scorer.doc(), 1);
|
||||
assert_eq!(phrase_scorer.advance(), TERMINATED);
|
||||
Ok(())
|
||||
@@ -343,43 +343,6 @@ pub(crate) mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_phrase_weight_seek_doc() -> crate::Result<()> {
|
||||
// Create an index with documents where the phrase "a b" appears in some of them.
|
||||
// Documents: 0: "c d", 1: "a b", 2: "e f", 3: "a b c", 4: "g h", 5: "a b", 6: "i j"
|
||||
let index = create_index(&["c d", "a b", "e f", "a b c", "g h", "a b", "i j"])?;
|
||||
let text_field = index.schema().get_field("text").unwrap();
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let phrase_query = PhraseQuery::new(vec![
|
||||
Term::from_field_text(text_field, "a"),
|
||||
Term::from_field_text(text_field, "b"),
|
||||
]);
|
||||
let phrase_weight =
|
||||
phrase_query.phrase_weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
|
||||
|
||||
// Helper function to collect all docs from a scorer created with a given seek_doc
|
||||
let docs_when_seeking_from = |seek_from: DocId| {
|
||||
let scorer = phrase_weight
|
||||
.scorer(segment_reader, 1.0f32, seek_from)
|
||||
.unwrap();
|
||||
crate::docset::docset_to_doc_vec(scorer)
|
||||
};
|
||||
|
||||
// Documents with "a b": 1, 3, 5
|
||||
assert_eq!(docs_when_seeking_from(0), vec![1, 3, 5]);
|
||||
assert_eq!(docs_when_seeking_from(1), vec![1, 3, 5]);
|
||||
assert_eq!(docs_when_seeking_from(2), vec![3, 5]);
|
||||
assert_eq!(docs_when_seeking_from(3), vec![3, 5]);
|
||||
assert_eq!(docs_when_seeking_from(4), vec![5]);
|
||||
assert_eq!(docs_when_seeking_from(5), vec![5]);
|
||||
assert_eq!(docs_when_seeking_from(6), Vec::<DocId>::new());
|
||||
assert_eq!(docs_when_seeking_from(7), Vec::<DocId>::new());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_phrase_query_on_json() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -410,7 +373,7 @@ pub(crate) mod tests {
|
||||
.weight(EnableScoring::disabled_from_schema(searcher.schema()))
|
||||
.unwrap();
|
||||
let mut phrase_scorer = phrase_weight
|
||||
.scorer(searcher.segment_reader(0), 1.0f32, 0)
|
||||
.scorer(searcher.segment_reader(0), 1.0f32)
|
||||
.unwrap();
|
||||
let mut docs = Vec::new();
|
||||
loop {
|
||||
|
||||
@@ -563,6 +563,7 @@ impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
|
||||
}
|
||||
|
||||
impl<TPostings: Postings> Scorer for PhraseScorer<TPostings> {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
let doc = self.doc();
|
||||
let fieldnorm_id = self.fieldnorm_reader.fieldnorm_id(doc);
|
||||
|
||||
@@ -43,7 +43,6 @@ impl PhraseWeight {
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Option<PhraseScorer<SegmentPostings>>> {
|
||||
let similarity_weight_opt = self
|
||||
.similarity_weight_opt
|
||||
@@ -52,16 +51,14 @@ impl PhraseWeight {
|
||||
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
|
||||
let mut term_postings_list = Vec::new();
|
||||
for &(offset, ref term) in &self.phrase_terms {
|
||||
let inverted_index = reader.inverted_index(term.field())?;
|
||||
let Some(term_info) = inverted_index.get_term_info(term)? else {
|
||||
if let Some(postings) = reader
|
||||
.inverted_index(term.field())?
|
||||
.read_postings(term, IndexRecordOption::WithFreqsAndPositions)?
|
||||
{
|
||||
term_postings_list.push((offset, postings));
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
let postings = inverted_index.read_postings_from_terminfo(
|
||||
&term_info,
|
||||
IndexRecordOption::WithFreqsAndPositions,
|
||||
seek_doc,
|
||||
)?;
|
||||
term_postings_list.push((offset, postings));
|
||||
}
|
||||
}
|
||||
Ok(Some(PhraseScorer::new(
|
||||
term_postings_list,
|
||||
@@ -77,13 +74,8 @@ impl PhraseWeight {
|
||||
}
|
||||
|
||||
impl Weight for PhraseWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
if let Some(scorer) = self.phrase_scorer(reader, boost, seek_doc)? {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
|
||||
Ok(Box::new(scorer))
|
||||
} else {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
@@ -91,12 +83,12 @@ impl Weight for PhraseWeight {
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let scorer_opt = self.phrase_scorer(reader, 1.0, doc)?;
|
||||
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
|
||||
if scorer_opt.is_none() {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
let mut scorer = scorer_opt.unwrap();
|
||||
if scorer.doc() != doc {
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
|
||||
@@ -108,10 +100,6 @@ impl Weight for PhraseWeight {
|
||||
}
|
||||
Ok(explanation)
|
||||
}
|
||||
|
||||
fn intersection_priority(&self) -> u32 {
|
||||
40u32
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -134,7 +122,7 @@ mod tests {
|
||||
let enable_scoring = EnableScoring::enabled_from_searcher(&searcher);
|
||||
let phrase_weight = phrase_query.phrase_weight(enable_scoring).unwrap();
|
||||
let mut phrase_scorer = phrase_weight
|
||||
.phrase_scorer(searcher.segment_reader(0u32), 1.0, 0)?
|
||||
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
|
||||
.unwrap();
|
||||
assert_eq!(phrase_scorer.doc(), 1);
|
||||
assert_eq!(phrase_scorer.phrase_count(), 2);
|
||||
|
||||
@@ -195,11 +195,8 @@ impl RegexPhraseWeight {
|
||||
const SPARSE_TERM_DOC_THRESHOLD: u32 = 100;
|
||||
|
||||
for term_info in term_infos {
|
||||
let mut term_posting = inverted_index.read_postings_from_terminfo(
|
||||
term_info,
|
||||
IndexRecordOption::WithFreqsAndPositions,
|
||||
0u32,
|
||||
)?;
|
||||
let mut term_posting = inverted_index
|
||||
.read_postings_from_terminfo(term_info, IndexRecordOption::WithFreqsAndPositions)?;
|
||||
let num_docs = term_posting.doc_freq();
|
||||
|
||||
if num_docs < SPARSE_TERM_DOC_THRESHOLD {
|
||||
@@ -272,12 +269,7 @@ impl RegexPhraseWeight {
|
||||
}
|
||||
|
||||
impl Weight for RegexPhraseWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
_seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
|
||||
Ok(Box::new(scorer))
|
||||
} else {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use core::fmt::Debug;
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
use columnar::Column;
|
||||
use columnar::{Column, ValueRange};
|
||||
|
||||
use crate::{DocId, DocSet, TERMINATED};
|
||||
|
||||
@@ -41,7 +40,7 @@ impl VecCursor {
|
||||
|
||||
pub(crate) struct RangeDocSet<T> {
|
||||
/// The range filter on the values.
|
||||
value_range: RangeInclusive<T>,
|
||||
value_range: ValueRange<T>,
|
||||
column: Column<T>,
|
||||
/// The next docid start range to fetch (inclusive).
|
||||
next_fetch_start: u32,
|
||||
@@ -61,12 +60,8 @@ pub(crate) struct RangeDocSet<T> {
|
||||
|
||||
const DEFAULT_FETCH_HORIZON: u32 = 128;
|
||||
impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> RangeDocSet<T> {
|
||||
pub(crate) fn new(
|
||||
value_range: RangeInclusive<T>,
|
||||
column: Column<T>,
|
||||
seek_first_doc: DocId,
|
||||
) -> Self {
|
||||
if *value_range.start() > column.max_value() || *value_range.end() < column.min_value() {
|
||||
pub(crate) fn new(value_range: ValueRange<T>, column: Column<T>) -> Self {
|
||||
if !value_range.intersects(column.min_value(), column.max_value()) {
|
||||
return Self {
|
||||
value_range,
|
||||
column,
|
||||
@@ -81,7 +76,7 @@ impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> RangeDocSet<T> {
|
||||
value_range,
|
||||
column,
|
||||
loaded_docs: VecCursor::new(),
|
||||
next_fetch_start: seek_first_doc,
|
||||
next_fetch_start: 0,
|
||||
fetch_horizon: DEFAULT_FETCH_HORIZON,
|
||||
last_seek_pos_opt: None,
|
||||
};
|
||||
|
||||
@@ -212,12 +212,7 @@ impl InvertedIndexRangeWeight {
|
||||
}
|
||||
|
||||
impl Weight for InvertedIndexRangeWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let max_doc = reader.max_doc();
|
||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||
|
||||
@@ -234,12 +229,7 @@ impl Weight for InvertedIndexRangeWeight {
|
||||
processed_count += 1;
|
||||
let term_info = term_range.value();
|
||||
let mut block_segment_postings = inverted_index
|
||||
.read_block_postings_from_terminfo_with_seek(
|
||||
term_info,
|
||||
IndexRecordOption::Basic,
|
||||
seek_doc,
|
||||
)?
|
||||
.0;
|
||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
|
||||
loop {
|
||||
let docs = block_segment_postings.docs();
|
||||
if docs.is_empty() {
|
||||
@@ -256,7 +246,7 @@ impl Weight for InvertedIndexRangeWeight {
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
@@ -696,7 +686,7 @@ mod tests {
|
||||
.weight(EnableScoring::disabled_from_schema(&schema))
|
||||
.unwrap();
|
||||
let range_scorer = range_weight
|
||||
.scorer(&searcher.segment_readers()[0], 1.0f32, 0)
|
||||
.scorer(&searcher.segment_readers()[0], 1.0f32)
|
||||
.unwrap();
|
||||
range_scorer
|
||||
};
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::ops::{Bound, RangeInclusive};
|
||||
|
||||
use columnar::{
|
||||
Cardinality, Column, ColumnType, MonotonicallyMappableToU128, MonotonicallyMappableToU64,
|
||||
NumericalType, StrColumn,
|
||||
NumericalType, StrColumn, ValueRange,
|
||||
};
|
||||
use common::bounds::{BoundsRange, TransformBound};
|
||||
|
||||
@@ -52,12 +52,7 @@ impl FastFieldRangeWeight {
|
||||
}
|
||||
|
||||
impl Weight for FastFieldRangeWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
// Check if both bounds are Bound::Unbounded
|
||||
if self.bounds.is_unbounded() {
|
||||
return Ok(Box::new(AllScorer::new(reader.max_doc())));
|
||||
@@ -114,21 +109,11 @@ impl Weight for FastFieldRangeWeight {
|
||||
else {
|
||||
return Ok(Box::new(EmptyScorer));
|
||||
};
|
||||
search_on_u64_ff(
|
||||
column,
|
||||
boost,
|
||||
BoundsRange::new(lower_bound, upper_bound),
|
||||
seek_doc,
|
||||
)
|
||||
search_on_u64_ff(column, boost, BoundsRange::new(lower_bound, upper_bound))
|
||||
}
|
||||
Type::U64 | Type::I64 | Type::F64 => {
|
||||
search_on_json_numerical_field(reader, &field_name, typ, bounds, boost)
|
||||
}
|
||||
Type::U64 | Type::I64 | Type::F64 => search_on_json_numerical_field(
|
||||
reader,
|
||||
&field_name,
|
||||
typ,
|
||||
bounds,
|
||||
boost,
|
||||
seek_doc,
|
||||
),
|
||||
Type::Date => {
|
||||
let fast_field_reader = reader.fast_fields();
|
||||
let Some((column, _col_type)) = fast_field_reader
|
||||
@@ -141,7 +126,6 @@ impl Weight for FastFieldRangeWeight {
|
||||
column,
|
||||
boost,
|
||||
BoundsRange::new(bounds.lower_bound, bounds.upper_bound),
|
||||
seek_doc,
|
||||
)
|
||||
}
|
||||
Type::Bool | Type::Facet | Type::Bytes | Type::Json | Type::IpAddr => {
|
||||
@@ -170,7 +154,7 @@ impl Weight for FastFieldRangeWeight {
|
||||
ip_addr_column.min_value(),
|
||||
ip_addr_column.max_value(),
|
||||
);
|
||||
let docset = RangeDocSet::new(value_range, ip_addr_column, seek_doc);
|
||||
let docset = RangeDocSet::new(ValueRange::Inclusive(value_range), ip_addr_column);
|
||||
Ok(Box::new(ConstScorer::new(docset, boost)))
|
||||
} else if field_type.is_str() {
|
||||
let Some(str_dict_column): Option<StrColumn> = reader.fast_fields().str(&field_name)?
|
||||
@@ -189,12 +173,7 @@ impl Weight for FastFieldRangeWeight {
|
||||
else {
|
||||
return Ok(Box::new(EmptyScorer));
|
||||
};
|
||||
search_on_u64_ff(
|
||||
column,
|
||||
boost,
|
||||
BoundsRange::new(lower_bound, upper_bound),
|
||||
seek_doc,
|
||||
)
|
||||
search_on_u64_ff(column, boost, BoundsRange::new(lower_bound, upper_bound))
|
||||
} else {
|
||||
assert!(
|
||||
maps_to_u64_fastfield(field_type.value_type()),
|
||||
@@ -236,13 +215,12 @@ impl Weight for FastFieldRangeWeight {
|
||||
column,
|
||||
boost,
|
||||
BoundsRange::new(bounds.lower_bound, bounds.upper_bound),
|
||||
seek_doc,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Document #({doc}) does not match"
|
||||
@@ -252,10 +230,6 @@ impl Weight for FastFieldRangeWeight {
|
||||
|
||||
Ok(explanation)
|
||||
}
|
||||
|
||||
fn intersection_priority(&self) -> u32 {
|
||||
30u32
|
||||
}
|
||||
}
|
||||
|
||||
/// On numerical fields the column type may not match the user provided one.
|
||||
@@ -267,7 +241,6 @@ fn search_on_json_numerical_field(
|
||||
typ: Type,
|
||||
bounds: BoundsRange<ValueBytes<Vec<u8>>>,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
// Since we don't know which type was interpolated for the internal column we
|
||||
// have to check for all numeric types (only one exists)
|
||||
@@ -345,7 +318,6 @@ fn search_on_json_numerical_field(
|
||||
column,
|
||||
boost,
|
||||
BoundsRange::new(bounds.lower_bound, bounds.upper_bound),
|
||||
seek_doc,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -424,7 +396,6 @@ fn search_on_u64_ff(
|
||||
column: Column<u64>,
|
||||
boost: Score,
|
||||
bounds: BoundsRange<u64>,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
let col_min_value = column.min_value();
|
||||
let col_max_value = column.max_value();
|
||||
@@ -455,8 +426,8 @@ fn search_on_u64_ff(
|
||||
}
|
||||
}
|
||||
|
||||
let doc_set = RangeDocSet::new(value_range, column, seek_doc);
|
||||
Ok(Box::new(ConstScorer::new(doc_set, boost)))
|
||||
let docset = RangeDocSet::new(ValueRange::Inclusive(value_range), column);
|
||||
Ok(Box::new(ConstScorer::new(docset, boost)))
|
||||
}
|
||||
|
||||
/// Returns true if the type maps to a u64 fast field
|
||||
@@ -533,7 +504,7 @@ mod tests {
|
||||
DateOptions, Field, NumericOptions, Schema, SchemaBuilder, FAST, INDEXED, STORED, STRING,
|
||||
TEXT,
|
||||
};
|
||||
use crate::{DocId, Index, IndexWriter, TantivyDocument, Term, TERMINATED};
|
||||
use crate::{Index, IndexWriter, TantivyDocument, Term, TERMINATED};
|
||||
|
||||
#[test]
|
||||
fn test_text_field_ff_range_query() -> crate::Result<()> {
|
||||
@@ -1171,52 +1142,11 @@ mod tests {
|
||||
Bound::Included(Term::from_field_u64(field, 50_002)),
|
||||
));
|
||||
let scorer = range_query
|
||||
.scorer(searcher.segment_reader(0), 1.0f32, 0)
|
||||
.scorer(searcher.segment_reader(0), 1.0f32)
|
||||
.unwrap();
|
||||
assert_eq!(scorer.doc(), TERMINATED);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_range_weight_seek_doc() {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
let field = schema_builder.add_u64_field("value", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
|
||||
// Create 20 documents with values
|
||||
// 0, 10, 20, ..., 90
|
||||
// and then 50 again.
|
||||
for i in 0..10 {
|
||||
writer.add_document(doc!(field => (i * 10) as u64)).unwrap();
|
||||
}
|
||||
writer.add_document(doc!(field => 50u64)).unwrap();
|
||||
writer.commit().unwrap();
|
||||
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let range_weight = FastFieldRangeWeight::new(BoundsRange::new(
|
||||
Bound::Included(Term::from_field_u64(field, 30)),
|
||||
Bound::Included(Term::from_field_u64(field, 70)),
|
||||
));
|
||||
|
||||
let doc_when_seeking_from = |seek_from: DocId| {
|
||||
let doc_set = range_weight
|
||||
.scorer(segment_reader, 1.0f32, seek_from)
|
||||
.unwrap();
|
||||
crate::docset::docset_to_doc_vec(doc_set)
|
||||
};
|
||||
|
||||
assert_eq!(doc_when_seeking_from(0), vec![3, 4, 5, 6, 7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(1), vec![3, 4, 5, 6, 7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(3), vec![3, 4, 5, 6, 7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(7), vec![7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(8), vec![10]);
|
||||
assert_eq!(doc_when_seeking_from(10), vec![10]);
|
||||
assert_eq!(doc_when_seeking_from(11), Vec::<DocId>::new());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_regression3_test() {
|
||||
let ops = vec![doc_from_id_1(1), doc_from_id_1(2), doc_from_id_1(3)];
|
||||
|
||||
@@ -81,6 +81,7 @@ where
|
||||
TOptScorer: Scorer,
|
||||
TScoreCombiner: ScoreCombiner,
|
||||
{
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
if let Some(score) = self.score_cache {
|
||||
return score;
|
||||
|
||||
@@ -29,6 +29,7 @@ impl ScoreCombiner for DoNothingCombiner {
|
||||
|
||||
fn clear(&mut self) {}
|
||||
|
||||
#[inline]
|
||||
fn score(&self) -> Score {
|
||||
1.0
|
||||
}
|
||||
@@ -49,6 +50,7 @@ impl ScoreCombiner for SumCombiner {
|
||||
self.score = 0.0;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn score(&self) -> Score {
|
||||
self.score
|
||||
}
|
||||
@@ -86,6 +88,7 @@ impl ScoreCombiner for DisjunctionMaxCombiner {
|
||||
self.sum = 0.0;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn score(&self) -> Score {
|
||||
self.max + (self.sum - self.max) * self.tie_breaker
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ pub trait Scorer: downcast_rs::Downcast + DocSet + 'static {
|
||||
impl_downcast!(Scorer);
|
||||
|
||||
impl Scorer for Box<dyn Scorer> {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
self.deref_mut().score()
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ mod tests {
|
||||
);
|
||||
let term_weight = term_query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut term_scorer = term_weight.scorer(segment_reader, 1.0, 0)?;
|
||||
let mut term_scorer = term_weight.scorer(segment_reader, 1.0)?;
|
||||
assert_eq!(term_scorer.doc(), 0);
|
||||
assert_nearly_equals!(term_scorer.score(), 0.28768212);
|
||||
Ok(())
|
||||
@@ -65,7 +65,7 @@ mod tests {
|
||||
);
|
||||
let term_weight = term_query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut term_scorer = term_weight.scorer(segment_reader, 1.0, 0)?;
|
||||
let mut term_scorer = term_weight.scorer(segment_reader, 1.0)?;
|
||||
for i in 0u32..COMPRESSION_BLOCK_SIZE as u32 {
|
||||
assert_eq!(term_scorer.doc(), i);
|
||||
if i == COMPRESSION_BLOCK_SIZE as u32 - 1u32 {
|
||||
@@ -162,7 +162,7 @@ mod tests {
|
||||
let searcher = index.reader()?.searcher();
|
||||
let term_weight =
|
||||
term_query.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
|
||||
let mut term_scorer = term_weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
|
||||
let mut term_scorer = term_weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||
assert_eq!(term_scorer.doc(), 0u32);
|
||||
term_scorer.seek(1u32);
|
||||
assert_eq!(term_scorer.doc(), 1u32);
|
||||
@@ -470,7 +470,7 @@ mod tests {
|
||||
.weight(EnableScoring::disabled_from_schema(&schema))
|
||||
.unwrap();
|
||||
term_weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32, 0)
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap()
|
||||
};
|
||||
// Should be an allscorer
|
||||
@@ -484,53 +484,6 @@ mod tests {
|
||||
assert!(empty_scorer.is::<EmptyScorer>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_term_weight_seek_doc() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
|
||||
// Create 11 documents where docs 3, 4, 5, 6, 7, and 10 contain "target"
|
||||
// (similar pattern to test_fastfield_range_weight_seek_doc)
|
||||
for i in 0..11 {
|
||||
if i == 3 || i == 4 || i == 5 || i == 6 || i == 7 || i == 10 {
|
||||
index_writer.add_document(doc!(text_field => "target"))?;
|
||||
} else {
|
||||
index_writer.add_document(doc!(text_field => "other"))?;
|
||||
}
|
||||
}
|
||||
index_writer.commit()?;
|
||||
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "target"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
let term_weight =
|
||||
term_query.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
|
||||
|
||||
let doc_when_seeking_from = |seek_from: crate::DocId| {
|
||||
let scorer = term_weight
|
||||
.scorer(segment_reader, 1.0f32, seek_from)
|
||||
.unwrap();
|
||||
crate::docset::docset_to_doc_vec(scorer)
|
||||
};
|
||||
|
||||
assert_eq!(doc_when_seeking_from(0), vec![3, 4, 5, 6, 7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(1), vec![3, 4, 5, 6, 7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(3), vec![3, 4, 5, 6, 7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(7), vec![7, 10]);
|
||||
assert_eq!(doc_when_seeking_from(8), vec![10]);
|
||||
assert_eq!(doc_when_seeking_from(10), vec![10]);
|
||||
assert_eq!(doc_when_seeking_from(11), Vec::<crate::DocId>::new());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_term_weight_all_query_optimization_disable_when_scoring_enabled() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -556,7 +509,7 @@ mod tests {
|
||||
.weight(EnableScoring::enabled_from_searcher(&searcher))
|
||||
.unwrap();
|
||||
term_weight
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32, 0)
|
||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||
.unwrap()
|
||||
};
|
||||
// Should be an allscorer
|
||||
|
||||
@@ -119,6 +119,7 @@ impl DocSet for TermScorer {
|
||||
}
|
||||
|
||||
impl Scorer for TermScorer {
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
let fieldnorm_id = self.fieldnorm_id();
|
||||
let term_freq = self.term_freq();
|
||||
|
||||
@@ -34,19 +34,12 @@ impl TermOrEmptyOrAllScorer {
|
||||
}
|
||||
|
||||
impl Weight for TermWeight {
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
Ok(self
|
||||
.specialized_scorer(reader, boost, seek_doc)?
|
||||
.into_boxed_scorer())
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
Ok(self.specialized_scorer(reader, boost)?.into_boxed_scorer())
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
match self.specialized_scorer(reader, 1.0, doc)? {
|
||||
match self.specialized_scorer(reader, 1.0)? {
|
||||
TermOrEmptyOrAllScorer::TermScorer(mut term_scorer) => {
|
||||
if term_scorer.doc() > doc || term_scorer.seek(doc) != doc {
|
||||
return Err(does_not_match(doc));
|
||||
@@ -62,7 +55,7 @@ impl Weight for TermWeight {
|
||||
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
if let Some(alive_bitset) = reader.alive_bitset() {
|
||||
Ok(self.scorer(reader, 1.0, 0)?.count(alive_bitset))
|
||||
Ok(self.scorer(reader, 1.0)?.count(alive_bitset))
|
||||
} else {
|
||||
let field = self.term.field();
|
||||
let inv_index = reader.inverted_index(field)?;
|
||||
@@ -78,7 +71,7 @@ impl Weight for TermWeight {
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score),
|
||||
) -> crate::Result<()> {
|
||||
match self.specialized_scorer(reader, 1.0, 0u32)? {
|
||||
match self.specialized_scorer(reader, 1.0)? {
|
||||
TermOrEmptyOrAllScorer::TermScorer(mut term_scorer) => {
|
||||
for_each_scorer(&mut *term_scorer, callback);
|
||||
}
|
||||
@@ -97,7 +90,7 @@ impl Weight for TermWeight {
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(&[DocId]),
|
||||
) -> crate::Result<()> {
|
||||
match self.specialized_scorer(reader, 1.0, 0u32)? {
|
||||
match self.specialized_scorer(reader, 1.0)? {
|
||||
TermOrEmptyOrAllScorer::TermScorer(mut term_scorer) => {
|
||||
let mut buffer = [0u32; COLLECT_BLOCK_BUFFER_LEN];
|
||||
for_each_docset_buffered(&mut term_scorer, &mut buffer, callback);
|
||||
@@ -128,7 +121,7 @@ impl Weight for TermWeight {
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
||||
) -> crate::Result<()> {
|
||||
let specialized_scorer = self.specialized_scorer(reader, 1.0, 0u32)?;
|
||||
let specialized_scorer = self.specialized_scorer(reader, 1.0)?;
|
||||
match specialized_scorer {
|
||||
TermOrEmptyOrAllScorer::TermScorer(term_scorer) => {
|
||||
crate::query::boolean_query::block_wand_single_scorer(
|
||||
@@ -146,12 +139,6 @@ impl Weight for TermWeight {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a priority number used to sort weights when running an
|
||||
/// intersection.
|
||||
fn intersection_priority(&self) -> u32 {
|
||||
10u32
|
||||
}
|
||||
}
|
||||
|
||||
impl TermWeight {
|
||||
@@ -182,7 +169,7 @@ impl TermWeight {
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
) -> crate::Result<Option<TermScorer>> {
|
||||
let scorer = self.specialized_scorer(reader, boost, 0u32)?;
|
||||
let scorer = self.specialized_scorer(reader, boost)?;
|
||||
Ok(match scorer {
|
||||
TermOrEmptyOrAllScorer::TermScorer(scorer) => Some(*scorer),
|
||||
_ => None,
|
||||
@@ -193,7 +180,6 @@ impl TermWeight {
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<TermOrEmptyOrAllScorer> {
|
||||
let field = self.term.field();
|
||||
let inverted_index = reader.inverted_index(field)?;
|
||||
@@ -210,11 +196,8 @@ impl TermWeight {
|
||||
)));
|
||||
}
|
||||
|
||||
let segment_postings: SegmentPostings = inverted_index.read_postings_from_terminfo(
|
||||
&term_info,
|
||||
self.index_record_option,
|
||||
seek_doc,
|
||||
)?;
|
||||
let segment_postings: SegmentPostings =
|
||||
inverted_index.read_postings_from_terminfo(&term_info, self.index_record_option)?;
|
||||
|
||||
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
|
||||
let similarity_weight = self.similarity_weight.boost_by(boost);
|
||||
|
||||
@@ -128,6 +128,7 @@ impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> BufferedUnionScorer<TScorer
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn advance_buffered(&mut self) -> bool {
|
||||
while self.bucket_idx < HORIZON_NUM_TINYBITSETS {
|
||||
if let Some(val) = self.bitsets[self.bucket_idx].pop_lowest() {
|
||||
@@ -156,6 +157,7 @@ where
|
||||
TScorer: Scorer,
|
||||
TScoreCombiner: ScoreCombiner,
|
||||
{
|
||||
#[inline]
|
||||
fn advance(&mut self) -> DocId {
|
||||
if self.advance_buffered() {
|
||||
return self.doc;
|
||||
@@ -245,6 +247,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn doc(&self) -> DocId {
|
||||
self.doc
|
||||
}
|
||||
@@ -286,6 +289,7 @@ where
|
||||
TScoreCombiner: ScoreCombiner,
|
||||
TScorer: Scorer,
|
||||
{
|
||||
#[inline]
|
||||
fn score(&mut self) -> Score {
|
||||
self.score
|
||||
}
|
||||
|
||||
@@ -68,28 +68,15 @@ pub trait Weight: Send + Sync + 'static {
|
||||
///
|
||||
/// `boost` is a multiplier to apply to the score.
|
||||
///
|
||||
/// As an optimization, the scorer can be positioned on any document below `seek_doc`
|
||||
/// matching the request.
|
||||
/// If there are no such document, it should match the first document matching the request;
|
||||
/// (or TERMINATED if no documents match).
|
||||
///
|
||||
/// Entirely ignoring that parameter and positionning the Scorer on the first document
|
||||
/// is always correct.
|
||||
///
|
||||
/// See [`Query`](crate::query::Query).
|
||||
fn scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
seek_doc: DocId,
|
||||
) -> crate::Result<Box<dyn Scorer>>;
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>>;
|
||||
|
||||
/// Returns an [`Explanation`] for the given document.
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
|
||||
|
||||
/// Returns the number documents within the given [`SegmentReader`].
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
let mut scorer = self.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if let Some(alive_bitset) = reader.alive_bitset() {
|
||||
Ok(scorer.count(alive_bitset))
|
||||
} else {
|
||||
@@ -104,7 +91,7 @@ pub trait Weight: Send + Sync + 'static {
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score),
|
||||
) -> crate::Result<()> {
|
||||
let mut scorer = self.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
for_each_scorer(scorer.as_mut(), callback);
|
||||
Ok(())
|
||||
}
|
||||
@@ -116,7 +103,7 @@ pub trait Weight: Send + Sync + 'static {
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(&[DocId]),
|
||||
) -> crate::Result<()> {
|
||||
let mut docset = self.scorer(reader, 1.0, 0)?;
|
||||
let mut docset = self.scorer(reader, 1.0)?;
|
||||
|
||||
let mut buffer = [0u32; COLLECT_BLOCK_BUFFER_LEN];
|
||||
for_each_docset_buffered(&mut docset, &mut buffer, callback);
|
||||
@@ -139,18 +126,8 @@ pub trait Weight: Send + Sync + 'static {
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
||||
) -> crate::Result<()> {
|
||||
let mut scorer = self.scorer(reader, 1.0, 0)?;
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a priority number used to sort weights when running an
|
||||
/// intersection.
|
||||
///
|
||||
/// Tweaking this value only impacts performance.
|
||||
/// A higher priority means that the `.scorer()` will be more likely to be evaluated
|
||||
/// after the sibling weights, and be passed a higher `seek_doc` value as a result.
|
||||
fn intersection_priority(&self) -> u32 {
|
||||
20u32
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,6 +58,31 @@ impl AsRef<OwnedValue> for OwnedValue {
|
||||
}
|
||||
}
|
||||
|
||||
impl OwnedValue {
|
||||
/// Returns a u8 discriminant value for the `OwnedValue` variant.
|
||||
///
|
||||
/// This can be used to sort `OwnedValue` instances by their type.
|
||||
pub fn discriminant_value(&self) -> u8 {
|
||||
match self {
|
||||
OwnedValue::Null => 0,
|
||||
OwnedValue::Str(_) => 1,
|
||||
OwnedValue::PreTokStr(_) => 2,
|
||||
// It is key to make sure U64, I64, F64 are grouped together in there, otherwise we
|
||||
// might be breaking transivity.
|
||||
OwnedValue::U64(_) => 3,
|
||||
OwnedValue::I64(_) => 4,
|
||||
OwnedValue::F64(_) => 5,
|
||||
OwnedValue::Bool(_) => 6,
|
||||
OwnedValue::Date(_) => 7,
|
||||
OwnedValue::Facet(_) => 8,
|
||||
OwnedValue::Bytes(_) => 9,
|
||||
OwnedValue::Array(_) => 10,
|
||||
OwnedValue::Object(_) => 11,
|
||||
OwnedValue::IpAddr(_) => 12,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Value<'a> for &'a OwnedValue {
|
||||
type ArrayIter = std::slice::Iter<'a, OwnedValue>;
|
||||
type ObjectIter = ObjectMapIter<'a>;
|
||||
|
||||
@@ -98,6 +98,10 @@
|
||||
//! make it possible to access the value given the doc id rapidly. This is useful if the value
|
||||
//! of the field is required during scoring or collection for instance.
|
||||
//!
|
||||
//! Some queries may leverage Fast fields when run on a field that is not indexed. This can be
|
||||
//! handy if that kind of request is infrequent, however note that searching on a Fast field is
|
||||
//! generally much slower than searching in an index.
|
||||
//!
|
||||
//! ```
|
||||
//! use tantivy::schema::*;
|
||||
//! let mut schema_builder = Schema::builder();
|
||||
|
||||
@@ -483,7 +483,7 @@ mod tests {
|
||||
|
||||
use super::{collapse_overlapped_ranges, search_fragments, select_best_fragment_combination};
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, TextOptions, TEXT};
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::snippet::SnippetGenerator;
|
||||
use crate::tokenizer::{NgramTokenizer, SimpleTokenizer};
|
||||
use crate::Index;
|
||||
@@ -727,8 +727,10 @@ Survey in 2016, 2017, and 2018."#;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(feature = "stemmer")]
|
||||
#[test]
|
||||
fn test_snippet_generator() -> crate::Result<()> {
|
||||
use crate::schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_options = TextOptions::default().set_indexing_options(
|
||||
TextFieldIndexing::default()
|
||||
|
||||
@@ -102,6 +102,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
const NUM_DOCS: usize = 1_000;
|
||||
|
||||
#[test]
|
||||
fn test_doc_store_iter_with_delete_bug_1077() -> crate::Result<()> {
|
||||
// this will cover deletion of the first element in a checkpoint
|
||||
@@ -113,7 +114,7 @@ pub(crate) mod tests {
|
||||
let directory = RamDirectory::create();
|
||||
let store_wrt = directory.open_write(path)?;
|
||||
let schema =
|
||||
write_lorem_ipsum_store(store_wrt, NUM_DOCS, Compressor::Lz4, BLOCK_SIZE, true);
|
||||
write_lorem_ipsum_store(store_wrt, NUM_DOCS, Compressor::default(), BLOCK_SIZE, true);
|
||||
let field_title = schema.get_field("title").unwrap();
|
||||
let store_file = directory.open_read(path)?;
|
||||
let store = StoreReader::open(store_file, 10)?;
|
||||
|
||||
@@ -465,7 +465,7 @@ mod tests {
|
||||
let directory = RamDirectory::create();
|
||||
let path = Path::new("store");
|
||||
let writer = directory.open_write(path)?;
|
||||
let schema = write_lorem_ipsum_store(writer, 500, Compressor::default(), BLOCK_SIZE, true);
|
||||
let schema = write_lorem_ipsum_store(writer, 500, Compressor::None, BLOCK_SIZE, true);
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let store_file = directory.open_read(path)?;
|
||||
let store = StoreReader::open(store_file, DOCSTORE_CACHE_CAPACITY)?;
|
||||
@@ -499,7 +499,7 @@ mod tests {
|
||||
assert_eq!(store.cache_stats().cache_hits, 1);
|
||||
assert_eq!(store.cache_stats().cache_misses, 2);
|
||||
|
||||
assert_eq!(store.cache.peek_lru(), Some(11207));
|
||||
assert_eq!(store.cache.peek_lru(), Some(232206));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -132,13 +132,14 @@ mod regex_tokenizer;
|
||||
mod remove_long;
|
||||
mod simple_tokenizer;
|
||||
mod split_compound_words;
|
||||
mod stemmer;
|
||||
mod stop_word_filter;
|
||||
mod tokenized_string;
|
||||
mod tokenizer;
|
||||
mod tokenizer_manager;
|
||||
mod whitespace_tokenizer;
|
||||
|
||||
#[cfg(feature = "stemmer")]
|
||||
mod stemmer;
|
||||
pub use tokenizer_api::{BoxTokenStream, Token, TokenFilter, TokenStream, Tokenizer};
|
||||
|
||||
pub use self::alphanum_only::AlphaNumOnlyFilter;
|
||||
@@ -151,6 +152,7 @@ pub use self::regex_tokenizer::RegexTokenizer;
|
||||
pub use self::remove_long::RemoveLongFilter;
|
||||
pub use self::simple_tokenizer::{SimpleTokenStream, SimpleTokenizer};
|
||||
pub use self::split_compound_words::SplitCompoundWords;
|
||||
#[cfg(feature = "stemmer")]
|
||||
pub use self::stemmer::{Language, Stemmer};
|
||||
pub use self::stop_word_filter::StopWordFilter;
|
||||
pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString};
|
||||
@@ -167,10 +169,7 @@ pub const MAX_TOKEN_LEN: usize = u16::MAX as usize - 5;
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::{
|
||||
Language, LowerCaser, RemoveLongFilter, SimpleTokenizer, Stemmer, Token, TokenizerManager,
|
||||
};
|
||||
use crate::tokenizer::TextAnalyzer;
|
||||
use super::{Token, TokenizerManager};
|
||||
|
||||
/// This is a function that can be used in tests and doc tests
|
||||
/// to assert a token's correctness.
|
||||
@@ -205,59 +204,15 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_en_tokenizer() {
|
||||
fn test_tokenizer_does_not_exist() {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
assert!(tokenizer_manager.get("en_doesnotexist").is_none());
|
||||
let mut en_tokenizer = tokenizer_manager.get("en_stem").unwrap();
|
||||
let mut tokens: Vec<Token> = vec![];
|
||||
{
|
||||
let mut add_token = |token: &Token| {
|
||||
tokens.push(token.clone());
|
||||
};
|
||||
en_tokenizer
|
||||
.token_stream("Hello, happy tax payer!")
|
||||
.process(&mut add_token);
|
||||
}
|
||||
|
||||
assert_eq!(tokens.len(), 4);
|
||||
assert_token(&tokens[0], 0, "hello", 0, 5);
|
||||
assert_token(&tokens[1], 1, "happi", 7, 12);
|
||||
assert_token(&tokens[2], 2, "tax", 13, 16);
|
||||
assert_token(&tokens[3], 3, "payer", 17, 22);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_en_tokenizer() {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
tokenizer_manager.register(
|
||||
"el_stem",
|
||||
TextAnalyzer::builder(SimpleTokenizer::default())
|
||||
.filter(RemoveLongFilter::limit(40))
|
||||
.filter(LowerCaser)
|
||||
.filter(Stemmer::new(Language::Greek))
|
||||
.build(),
|
||||
);
|
||||
let mut en_tokenizer = tokenizer_manager.get("el_stem").unwrap();
|
||||
let mut tokens: Vec<Token> = vec![];
|
||||
{
|
||||
let mut add_token = |token: &Token| {
|
||||
tokens.push(token.clone());
|
||||
};
|
||||
en_tokenizer
|
||||
.token_stream("Καλημέρα, χαρούμενε φορολογούμενε!")
|
||||
.process(&mut add_token);
|
||||
}
|
||||
|
||||
assert_eq!(tokens.len(), 3);
|
||||
assert_token(&tokens[0], 0, "καλημερ", 0, 16);
|
||||
assert_token(&tokens[1], 1, "χαρουμεν", 18, 36);
|
||||
assert_token(&tokens[2], 2, "φορολογουμεν", 37, 63);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tokenizer_empty() {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
let mut en_tokenizer = tokenizer_manager.get("en_stem").unwrap();
|
||||
let mut en_tokenizer = tokenizer_manager.get("default").unwrap();
|
||||
{
|
||||
let mut tokens: Vec<Token> = vec![];
|
||||
{
|
||||
|
||||
@@ -142,3 +142,60 @@ impl<T: TokenStream> TokenStream for StemmerTokenStream<T> {
|
||||
self.tail.token_mut()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tokenizer_api::Token;
|
||||
|
||||
use super::*;
|
||||
use crate::tokenizer::tests::assert_token;
|
||||
use crate::tokenizer::{LowerCaser, SimpleTokenizer, TextAnalyzer, TokenizerManager};
|
||||
|
||||
#[test]
|
||||
fn test_en_stem() {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
let mut en_tokenizer = tokenizer_manager.get("en_stem").unwrap();
|
||||
let mut tokens: Vec<Token> = vec![];
|
||||
{
|
||||
let mut add_token = |token: &Token| {
|
||||
tokens.push(token.clone());
|
||||
};
|
||||
en_tokenizer
|
||||
.token_stream("Dogs are the bests!")
|
||||
.process(&mut add_token);
|
||||
}
|
||||
|
||||
assert_eq!(tokens.len(), 4);
|
||||
assert_token(&tokens[0], 0, "dog", 0, 4);
|
||||
assert_token(&tokens[1], 1, "are", 5, 8);
|
||||
assert_token(&tokens[2], 2, "the", 9, 12);
|
||||
assert_token(&tokens[3], 3, "best", 13, 18);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_en_stem() {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
tokenizer_manager.register(
|
||||
"el_stem",
|
||||
TextAnalyzer::builder(SimpleTokenizer::default())
|
||||
.filter(LowerCaser)
|
||||
.filter(Stemmer::new(Language::Greek))
|
||||
.build(),
|
||||
);
|
||||
let mut el_tokenizer = tokenizer_manager.get("el_stem").unwrap();
|
||||
let mut tokens: Vec<Token> = vec![];
|
||||
{
|
||||
let mut add_token = |token: &Token| {
|
||||
tokens.push(token.clone());
|
||||
};
|
||||
el_tokenizer
|
||||
.token_stream("Καλημέρα, χαρούμενε φορολογούμενε!")
|
||||
.process(&mut add_token);
|
||||
}
|
||||
|
||||
assert_eq!(tokens.len(), 3);
|
||||
assert_token(&tokens[0], 0, "καλημερ", 0, 16);
|
||||
assert_token(&tokens[1], 1, "χαρουμεν", 18, 36);
|
||||
assert_token(&tokens[2], 2, "φορολογουμεν", 37, 63);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use crate::tokenizer::stemmer::Language;
|
||||
use crate::tokenizer::tokenizer::TextAnalyzer;
|
||||
use crate::tokenizer::{
|
||||
LowerCaser, RawTokenizer, RemoveLongFilter, SimpleTokenizer, Stemmer, WhitespaceTokenizer,
|
||||
LowerCaser, RawTokenizer, RemoveLongFilter, SimpleTokenizer, WhitespaceTokenizer,
|
||||
};
|
||||
|
||||
/// The tokenizer manager serves as a store for
|
||||
@@ -64,14 +63,18 @@ impl Default for TokenizerManager {
|
||||
.filter(LowerCaser)
|
||||
.build(),
|
||||
);
|
||||
manager.register(
|
||||
"en_stem",
|
||||
TextAnalyzer::builder(SimpleTokenizer::default())
|
||||
.filter(RemoveLongFilter::limit(40))
|
||||
.filter(LowerCaser)
|
||||
.filter(Stemmer::new(Language::English))
|
||||
.build(),
|
||||
);
|
||||
#[cfg(feature = "stemmer")]
|
||||
{
|
||||
use crate::tokenizer::stemmer::{Language, Stemmer};
|
||||
manager.register(
|
||||
"en_stem",
|
||||
TextAnalyzer::builder(SimpleTokenizer::default())
|
||||
.filter(RemoveLongFilter::limit(40))
|
||||
.filter(LowerCaser) // The stemmer does not lowercase
|
||||
.filter(Stemmer::new(Language::English))
|
||||
.build(),
|
||||
);
|
||||
}
|
||||
manager.register("whitespace", WhitespaceTokenizer::default());
|
||||
manager
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user