mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-06 01:02:55 +00:00
add range query via ip fast field
This commit is contained in:
@@ -111,7 +111,9 @@ mod tests {
|
||||
let (major_item, _minor_item, data) = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| column.get_between_vals(major_item..=major_item));
|
||||
b.iter(|| {
|
||||
column.get_positions_for_value_range(major_item..=major_item, 0..data.len() as u32)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
@@ -119,7 +121,9 @@ mod tests {
|
||||
let (_major_item, minor_item, data) = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| column.get_between_vals(minor_item..=minor_item));
|
||||
b.iter(|| {
|
||||
column.get_positions_for_value_range(minor_item..=minor_item, 0..data.len() as u32)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
@@ -127,7 +131,7 @@ mod tests {
|
||||
let (_major_item, _minor_item, data) = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| column.get_between_vals(0..=u128::MAX));
|
||||
b.iter(|| column.get_positions_for_value_range(0..=u128::MAX, 0..data.len() as u32));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::RangeInclusive;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
@@ -31,13 +31,21 @@ pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the positions of values which are in the provided range.
|
||||
/// Get the positions of values which are in the provided value range.
|
||||
///
|
||||
/// Note that position == docid for single value fast fields
|
||||
#[inline]
|
||||
fn get_between_vals(&self, range: RangeInclusive<T>) -> Vec<u64> {
|
||||
fn get_positions_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
) -> Vec<u32> {
|
||||
let mut vals = Vec::new();
|
||||
for idx in 0..self.num_vals() as u64 {
|
||||
let val = self.get_val(idx);
|
||||
if range.contains(&val) {
|
||||
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
|
||||
|
||||
for idx in doc_id_range.start..doc_id_range.end {
|
||||
let val = self.get_val(idx as u64);
|
||||
if value_range.contains(&val) {
|
||||
vals.push(idx);
|
||||
}
|
||||
}
|
||||
@@ -156,7 +164,7 @@ struct MonotonicMappingColumn<C, T, Input> {
|
||||
/// monotonic_mapping.inverse(monotonic_mapping.mapping(el)) == el
|
||||
///
|
||||
/// The inverse of the mapping is required for:
|
||||
/// `fn get_between_vals(&self, range: RangeInclusive<T>) -> Vec<u64> `
|
||||
/// `fn get_positions_for_value_range(&self, range: RangeInclusive<T>) -> Vec<u64> `
|
||||
/// The user provides the original value range and we need to monotonic map them in the same way the
|
||||
/// serialization does before calling the underlying column.
|
||||
///
|
||||
@@ -215,10 +223,15 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
fn get_between_vals(&self, range: RangeInclusive<Output>) -> Vec<u64> {
|
||||
self.from_column.get_between_vals(
|
||||
fn get_positions_for_value_range(
|
||||
&self,
|
||||
range: RangeInclusive<Output>,
|
||||
doc_id_range: Range<u32>,
|
||||
) -> Vec<u32> {
|
||||
self.from_column.get_positions_for_value_range(
|
||||
self.monotonic_mapping.inverse(range.start().clone())
|
||||
..=self.monotonic_mapping.inverse(range.end().clone()),
|
||||
doc_id_range,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ use std::{
|
||||
cmp::Ordering,
|
||||
collections::BTreeSet,
|
||||
io::{self, Write},
|
||||
ops::RangeInclusive,
|
||||
ops::{Range, RangeInclusive},
|
||||
};
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, VInt, VIntU128};
|
||||
@@ -304,8 +304,14 @@ impl Column<u128> for CompactSpaceDecompressor {
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u128> + '_> {
|
||||
Box::new(self.iter())
|
||||
}
|
||||
fn get_between_vals(&self, range: RangeInclusive<u128>) -> Vec<u64> {
|
||||
self.get_between_vals(range)
|
||||
|
||||
#[inline]
|
||||
fn get_positions_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u128>,
|
||||
doc_id_range: Range<u32>,
|
||||
) -> Vec<u32> {
|
||||
self.get_positions_for_value_range(value_range, doc_id_range)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,12 +346,18 @@ impl CompactSpaceDecompressor {
|
||||
/// Comparing on compact space: Real dataset 1.08 GElements/s
|
||||
///
|
||||
/// Comparing on original space: Real dataset .06 GElements/s (not completely optimized)
|
||||
pub fn get_between_vals(&self, range: RangeInclusive<u128>) -> Vec<u64> {
|
||||
if range.start() > range.end() {
|
||||
#[inline]
|
||||
pub fn get_positions_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u128>,
|
||||
doc_id_range: Range<u32>,
|
||||
) -> Vec<u32> {
|
||||
if value_range.start() > value_range.end() {
|
||||
return Vec::new();
|
||||
}
|
||||
let from_value = *range.start();
|
||||
let to_value = *range.end();
|
||||
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
|
||||
let from_value = *value_range.start();
|
||||
let to_value = *value_range.end();
|
||||
assert!(to_value >= from_value);
|
||||
let compact_from = self.u128_to_compact(from_value);
|
||||
let compact_to = self.u128_to_compact(to_value);
|
||||
@@ -377,8 +389,10 @@ impl CompactSpaceDecompressor {
|
||||
let range = compact_from..=compact_to;
|
||||
let mut positions = Vec::new();
|
||||
|
||||
let scan_num_docs = doc_id_range.end - doc_id_range.start;
|
||||
|
||||
let step_size = 4;
|
||||
let cutoff = self.params.num_vals as u64 - self.params.num_vals as u64 % step_size;
|
||||
let cutoff = doc_id_range.start + scan_num_docs - scan_num_docs % step_size;
|
||||
|
||||
let mut push_if_in_range = |idx, val| {
|
||||
if range.contains(&val) {
|
||||
@@ -387,7 +401,7 @@ impl CompactSpaceDecompressor {
|
||||
};
|
||||
let get_val = |idx| self.params.bit_unpacker.get(idx as u64, &self.data);
|
||||
// unrolled loop
|
||||
for idx in (0..cutoff).step_by(step_size as usize) {
|
||||
for idx in (doc_id_range.start..cutoff).step_by(step_size as usize) {
|
||||
let idx1 = idx;
|
||||
let idx2 = idx + 1;
|
||||
let idx3 = idx + 2;
|
||||
@@ -403,7 +417,7 @@ impl CompactSpaceDecompressor {
|
||||
}
|
||||
|
||||
// handle rest
|
||||
for idx in cutoff..self.params.num_vals as u64 {
|
||||
for idx in cutoff..doc_id_range.end {
|
||||
push_if_in_range(idx, get_val(idx));
|
||||
}
|
||||
|
||||
@@ -498,9 +512,10 @@ mod tests {
|
||||
let expected_positions = expected
|
||||
.iter()
|
||||
.positions(|val| range.contains(val))
|
||||
.map(|pos| pos as u64)
|
||||
.map(|pos| pos as u32)
|
||||
.collect::<Vec<_>>();
|
||||
let positions = decompressor.get_between_vals(range);
|
||||
let positions =
|
||||
decompressor.get_positions_for_value_range(range, 0..decompressor.num_vals());
|
||||
assert_eq!(positions, expected_positions);
|
||||
};
|
||||
|
||||
@@ -540,24 +555,66 @@ mod tests {
|
||||
];
|
||||
let data = test_aux_vals(vals);
|
||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
let positions = decomp.get_between_vals(0..=1);
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
for (pos, val) in vals.iter().enumerate() {
|
||||
let val = *val as u128;
|
||||
let pos = pos as u32;
|
||||
let positions = decomp.get_positions_for_value_range(val..=val, pos..pos + 1);
|
||||
assert_eq!(positions, vec![pos]);
|
||||
}
|
||||
|
||||
// handle docid range out of bounds
|
||||
let positions = decomp.get_positions_for_value_range(0..=1, 1..u32::MAX);
|
||||
assert_eq!(positions, vec![]);
|
||||
|
||||
let positions = decomp.get_positions_for_value_range(0..=1, complete_range.clone());
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions = decomp.get_between_vals(0..=2);
|
||||
let positions = decomp.get_positions_for_value_range(0..=2, complete_range.clone());
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions = decomp.get_between_vals(0..=3);
|
||||
let positions = decomp.get_positions_for_value_range(0..=3, complete_range.clone());
|
||||
assert_eq!(positions, vec![0, 2]);
|
||||
assert_eq!(decomp.get_between_vals(99999u128..=99999u128), vec![3]);
|
||||
assert_eq!(decomp.get_between_vals(99999u128..=100000u128), vec![3, 4]);
|
||||
assert_eq!(decomp.get_between_vals(99998u128..=100000u128), vec![3, 4]);
|
||||
assert_eq!(decomp.get_between_vals(99998u128..=99999u128), vec![3]);
|
||||
assert_eq!(decomp.get_between_vals(99998u128..=99998u128), vec![]);
|
||||
assert_eq!(decomp.get_between_vals(333u128..=333u128), vec![8]);
|
||||
assert_eq!(decomp.get_between_vals(332u128..=333u128), vec![8]);
|
||||
assert_eq!(decomp.get_between_vals(332u128..=334u128), vec![8]);
|
||||
assert_eq!(decomp.get_between_vals(333u128..=334u128), vec![8]);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(99999u128..=99999u128, complete_range.clone()),
|
||||
vec![3]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(99999u128..=100000u128, complete_range.clone()),
|
||||
vec![3, 4]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(99998u128..=100000u128, complete_range.clone()),
|
||||
vec![3, 4]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(99998u128..=99999u128, complete_range.clone()),
|
||||
vec![3]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(99998u128..=99998u128, complete_range.clone()),
|
||||
vec![]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(333u128..=333u128, complete_range.clone()),
|
||||
vec![8]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(332u128..=333u128, complete_range.clone()),
|
||||
vec![8]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(332u128..=334u128, complete_range.clone()),
|
||||
vec![8]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(333u128..=334u128, complete_range.clone()),
|
||||
vec![8]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
decomp.get_between_vals(4_000_211_221u128..=5_000_000_000u128),
|
||||
decomp.get_positions_for_value_range(
|
||||
4_000_211_221u128..=5_000_000_000u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
vec![6, 7]
|
||||
);
|
||||
}
|
||||
@@ -582,11 +639,12 @@ mod tests {
|
||||
];
|
||||
let data = test_aux_vals(vals);
|
||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
let positions = decomp.get_between_vals(0..=5);
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
let positions = decomp.get_positions_for_value_range(0..=5, complete_range.clone());
|
||||
assert_eq!(positions, vec![]);
|
||||
let positions = decomp.get_between_vals(0..=100);
|
||||
let positions = decomp.get_positions_for_value_range(0..=100, complete_range.clone());
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions = decomp.get_between_vals(0..=105);
|
||||
let positions = decomp.get_positions_for_value_range(0..=105, complete_range.clone());
|
||||
assert_eq!(positions, vec![0]);
|
||||
}
|
||||
|
||||
@@ -610,11 +668,24 @@ mod tests {
|
||||
let mut out = Vec::new();
|
||||
serialize_u128(|| vals.iter().cloned(), vals.len() as u32, &mut out).unwrap();
|
||||
let decomp = open_u128::<u128>(OwnedBytes::new(out)).unwrap();
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
|
||||
assert_eq!(decomp.get_between_vals(199..=200), vec![0]);
|
||||
assert_eq!(decomp.get_between_vals(199..=201), vec![0, 1]);
|
||||
assert_eq!(decomp.get_between_vals(200..=200), vec![0]);
|
||||
assert_eq!(decomp.get_between_vals(1_000_000..=1_000_000), vec![11]);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(199..=200, complete_range.clone()),
|
||||
vec![0]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(199..=201, complete_range.clone()),
|
||||
vec![0, 1]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(200..=200, complete_range.clone()),
|
||||
vec![0]
|
||||
);
|
||||
assert_eq!(
|
||||
decomp.get_positions_for_value_range(1_000_000..=1_000_000, complete_range.clone()),
|
||||
vec![11]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -211,13 +211,16 @@ mod tests {
|
||||
|
||||
if !data.is_empty() {
|
||||
let test_rand_idx = rand::thread_rng().gen_range(0..=data.len() - 1);
|
||||
let expected_positions: Vec<u64> = data
|
||||
let expected_positions: Vec<u32> = data
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, el)| **el == data[test_rand_idx])
|
||||
.map(|(pos, _)| pos as u64)
|
||||
.map(|(pos, _)| pos as u32)
|
||||
.collect();
|
||||
let positions = reader.get_between_vals(data[test_rand_idx]..=data[test_rand_idx]);
|
||||
let positions = reader.get_positions_for_value_range(
|
||||
data[test_rand_idx]..=data[test_rand_idx],
|
||||
0..data.len() as u32,
|
||||
);
|
||||
assert_eq!(expected_positions, positions);
|
||||
}
|
||||
Some((estimation, actual_compression))
|
||||
|
||||
@@ -117,7 +117,8 @@ fn bench_ip() {
|
||||
// Sample some ranges
|
||||
for value in dataset.iter().take(1110).skip(1100).cloned() {
|
||||
print_time!("get range");
|
||||
let doc_values = decompressor.get_between_vals(value..=value);
|
||||
let doc_values =
|
||||
decompressor.get_positions_for_value_range(value..=value, 0..decompressor.num_vals());
|
||||
println!("{:?}", doc_values.len());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,8 +159,14 @@ impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
|
||||
}
|
||||
|
||||
/// Returns all docids which are in the provided value range
|
||||
pub fn get_between_vals(&self, range: RangeInclusive<T>) -> Vec<DocId> {
|
||||
let positions = self.vals_reader.get_between_vals(range);
|
||||
pub fn get_positions_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
) -> Vec<DocId> {
|
||||
let positions = self
|
||||
.vals_reader
|
||||
.get_positions_for_value_range(value_range, doc_id_range);
|
||||
|
||||
positions_to_docids(&positions, self.idx_reader.as_ref())
|
||||
}
|
||||
@@ -223,14 +229,14 @@ impl<T: MonotonicallyMappableToU128> MultiValueLength for MultiValuedU128FastFie
|
||||
///
|
||||
/// TODO: Instead of a linear scan we can employ a expotential search into binary search to match a
|
||||
/// docid to its value position.
|
||||
fn positions_to_docids<C: Column + ?Sized>(positions: &[u64], idx_reader: &C) -> Vec<DocId> {
|
||||
fn positions_to_docids<C: Column + ?Sized>(positions: &[u32], idx_reader: &C) -> Vec<DocId> {
|
||||
let mut docs = vec![];
|
||||
let mut cur_doc = 0u32;
|
||||
let mut last_doc = None;
|
||||
|
||||
for pos in positions {
|
||||
loop {
|
||||
let end = idx_reader.get_val(cur_doc as u64 + 1);
|
||||
let end = idx_reader.get_val(cur_doc as u64 + 1) as u32;
|
||||
if end > *pos {
|
||||
// avoid duplicates
|
||||
if Some(cur_doc) == last_doc {
|
||||
@@ -258,7 +264,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_positions_to_docid() {
|
||||
let positions = vec![10u64, 11, 15, 20, 21, 22];
|
||||
let positions = vec![10u32, 11, 15, 20, 21, 22];
|
||||
|
||||
let offsets = vec![0, 10, 12, 15, 22, 23];
|
||||
{
|
||||
|
||||
@@ -86,10 +86,7 @@ impl DocSet for BitSetDocSet {
|
||||
self.doc
|
||||
}
|
||||
|
||||
/// Returns half of the `max_doc`
|
||||
/// This is quite a terrible heuristic,
|
||||
/// but we don't have access to any better
|
||||
/// value.
|
||||
/// Returns the number of values set in the underlying bitset.
|
||||
fn size_hint(&self) -> u32 {
|
||||
self.docs.len() as u32
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ mod phrase_query;
|
||||
mod query;
|
||||
mod query_parser;
|
||||
mod range_query;
|
||||
mod range_query_ip_fastfield;
|
||||
mod regex_query;
|
||||
mod reqopt_scorer;
|
||||
mod scorer;
|
||||
|
||||
@@ -6,12 +6,13 @@ use common::BitSet;
|
||||
use crate::core::{Searcher, SegmentReader};
|
||||
use crate::error::TantivyError;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::range_query_ip_fastfield::IPFastFieldRangeWeight;
|
||||
use crate::query::{BitSetDocSet, ConstScorer, Explanation, Query, Scorer, Weight};
|
||||
use crate::schema::{Field, IndexRecordOption, Term, Type};
|
||||
use crate::termdict::{TermDictionary, TermStreamer};
|
||||
use crate::{DocId, Score};
|
||||
|
||||
fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
|
||||
pub(crate) fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
|
||||
bound: &Bound<TFrom>,
|
||||
transform: &Transform,
|
||||
) -> Bound<TTo> {
|
||||
@@ -29,8 +30,17 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
|
||||
///
|
||||
/// # Implementation
|
||||
///
|
||||
/// The current implement will iterate over the terms within the range
|
||||
/// and append all of the document cross into a `BitSet`.
|
||||
/// ## Default
|
||||
/// The default implementation collects all documents _upfront_ into a `BitSet`.
|
||||
/// This is done by iterating over the terms within the range and loading all docs for each
|
||||
/// `TermInfo` from the inverted index (posting list) and put them into a `BitSet`.
|
||||
/// Depending on the number of terms matched, this is a potentially expensive operation.
|
||||
///
|
||||
/// ## IP fast field
|
||||
/// For IP fast fields a custom variant is used, by scanning the fast field. Unlike the default
|
||||
/// variant we can walk in a lazy fashion over it, since the fastfield is implicit orderered by
|
||||
/// DocId.
|
||||
///
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
@@ -249,7 +259,8 @@ impl Query for RangeQuery {
|
||||
_scoring_enabled: bool,
|
||||
) -> crate::Result<Box<dyn Weight>> {
|
||||
let schema = searcher.schema();
|
||||
let value_type = schema.get_field_entry(self.field).field_type().value_type();
|
||||
let field_type = schema.get_field_entry(self.field).field_type();
|
||||
let value_type = field_type.value_type();
|
||||
if value_type != self.value_type {
|
||||
let err_msg = format!(
|
||||
"Create a range query of the type {:?}, when the field given was of type {:?}",
|
||||
@@ -257,11 +268,20 @@ impl Query for RangeQuery {
|
||||
);
|
||||
return Err(TantivyError::SchemaError(err_msg));
|
||||
}
|
||||
Ok(Box::new(RangeWeight {
|
||||
field: self.field,
|
||||
left_bound: self.left_bound.clone(),
|
||||
right_bound: self.right_bound.clone(),
|
||||
}))
|
||||
|
||||
if field_type.is_ip_addr() && field_type.is_fast() {
|
||||
Ok(Box::new(IPFastFieldRangeWeight::new(
|
||||
self.field,
|
||||
&self.left_bound,
|
||||
&self.right_bound,
|
||||
)))
|
||||
} else {
|
||||
Ok(Box::new(RangeWeight {
|
||||
field: self.field,
|
||||
left_bound: self.left_bound.clone(),
|
||||
right_bound: self.right_bound.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,7 +355,7 @@ mod tests {
|
||||
use super::RangeQuery;
|
||||
use crate::collector::{Count, TopDocs};
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{Document, Field, IntoIpv6Addr, Schema, INDEXED, STORED, TEXT};
|
||||
use crate::schema::{Document, Field, IntoIpv6Addr, Schema, FAST, INDEXED, STORED, TEXT};
|
||||
use crate::{doc, Index};
|
||||
|
||||
#[test]
|
||||
@@ -509,10 +529,24 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn search_ip_range_test_posting_list() {
|
||||
search_ip_range_test_opt(false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn search_ip_range_test() {
|
||||
search_ip_range_test_opt(true);
|
||||
}
|
||||
|
||||
fn search_ip_range_test_opt(with_fast_field: bool) {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let ip_field = schema_builder.add_ip_addr_field("ip", INDEXED | STORED);
|
||||
let ip_field = if with_fast_field {
|
||||
schema_builder.add_ip_addr_field("ip", INDEXED | STORED | FAST)
|
||||
} else {
|
||||
schema_builder.add_ip_addr_field("ip", INDEXED | STORED)
|
||||
};
|
||||
let text_field = schema_builder.add_text_field("text", TEXT | STORED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let ip_addr_1 = IpAddr::from_str("127.0.0.10").unwrap().into_ipv6_addr();
|
||||
@@ -520,16 +554,22 @@ mod tests {
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
ip_field => ip_addr_1
|
||||
))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
ip_field => ip_addr_2
|
||||
))
|
||||
.unwrap();
|
||||
for _ in 0..1_000 {
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
ip_field => ip_addr_1,
|
||||
text_field => "BLUBBER"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
for _ in 0..1_000 {
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
ip_field => ip_addr_2,
|
||||
text_field => "BLOBBER"
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
@@ -543,24 +583,25 @@ mod tests {
|
||||
count
|
||||
};
|
||||
let query_from_text = |text: &str| {
|
||||
QueryParser::for_index(&index, vec![ip_field])
|
||||
QueryParser::for_index(&index, vec![])
|
||||
.parse_query(text)
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
// Inclusive range
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text("ip:[127.0.0.1 TO 127.0.0.20]")),
|
||||
2
|
||||
2000
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text("ip:[127.0.0.10 TO 127.0.0.20]")),
|
||||
2
|
||||
2000
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text("ip:[127.0.0.11 TO 127.0.0.20]")),
|
||||
1
|
||||
1000
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
@@ -568,9 +609,84 @@ mod tests {
|
||||
0
|
||||
);
|
||||
|
||||
assert_eq!(get_num_hits(query_from_text("ip:[127.0.0.11 TO *]")), 1);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:[127.0.0.11 TO *]")), 1000);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:[127.0.0.21 TO *]")), 0);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:[* TO 127.0.0.9]")), 0);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:[* TO 127.0.0.10]")), 1);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:[* TO 127.0.0.10]")), 1000);
|
||||
|
||||
// Exclusive range
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text("ip:{127.0.0.1 TO 127.0.0.20}")),
|
||||
1000
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text("ip:{127.0.0.1 TO 127.0.0.21}")),
|
||||
2000
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text("ip:{127.0.0.10 TO 127.0.0.20}")),
|
||||
0
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text("ip:{127.0.0.11 TO 127.0.0.20}")),
|
||||
0
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text("ip:{127.0.0.11 TO 127.0.0.19}")),
|
||||
0
|
||||
);
|
||||
|
||||
assert_eq!(get_num_hits(query_from_text("ip:{127.0.0.11 TO *}")), 1000);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:{127.0.0.10 TO *}")), 1000);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:{127.0.0.21 TO *}")), 0);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:{127.0.0.20 TO *}")), 0);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:{127.0.0.19 TO *}")), 1000);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:{* TO 127.0.0.9}")), 0);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:{* TO 127.0.0.10}")), 0);
|
||||
assert_eq!(get_num_hits(query_from_text("ip:{* TO 127.0.0.11}")), 1000);
|
||||
|
||||
// Inclusive/Exclusive range
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text("ip:[127.0.0.1 TO 127.0.0.20}")),
|
||||
1000
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text("ip:{127.0.0.1 TO 127.0.0.20]")),
|
||||
2000
|
||||
);
|
||||
|
||||
// Intersection
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text(
|
||||
"text:BLUBBER AND ip:[127.0.0.10 TO 127.0.0.10]"
|
||||
)),
|
||||
1000
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text(
|
||||
"text:BLOBBER AND ip:[127.0.0.10 TO 127.0.0.10]"
|
||||
)),
|
||||
0
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text(
|
||||
"text:BLOBBER AND ip:[127.0.0.20 TO 127.0.0.20]"
|
||||
)),
|
||||
1000
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_num_hits(query_from_text(
|
||||
"text:BLUBBER AND ip:[127.0.0.20 TO 127.0.0.20]"
|
||||
)),
|
||||
0
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
196
src/query/range_query_ip_fastfield.rs
Normal file
196
src/query/range_query_ip_fastfield.rs
Normal file
@@ -0,0 +1,196 @@
|
||||
//! IP Fastfields support efficient scanning for range queries.
|
||||
//! We use this variant only if the fastfield exists, otherwise the default in `range_query` is
|
||||
//! used, which uses the term dictionary + postings.
|
||||
|
||||
use std::net::Ipv6Addr;
|
||||
use std::ops::{Bound, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
use fastfield_codecs::{Column, MonotonicallyMappableToU128};
|
||||
|
||||
use super::range_query::map_bound;
|
||||
use super::{ConstScorer, Explanation, Scorer, Weight};
|
||||
use crate::schema::{Cardinality, Field};
|
||||
use crate::{DocId, DocSet, Score, SegmentReader, TantivyError, TERMINATED};
|
||||
|
||||
/// `IPFastFieldRangeWeight` uses the ip address fast field to execute range queries.
|
||||
pub struct IPFastFieldRangeWeight {
|
||||
field: Field,
|
||||
left_bound: Bound<Ipv6Addr>,
|
||||
right_bound: Bound<Ipv6Addr>,
|
||||
}
|
||||
|
||||
impl IPFastFieldRangeWeight {
|
||||
pub fn new(field: Field, left_bound: &Bound<Vec<u8>>, right_bound: &Bound<Vec<u8>>) -> Self {
|
||||
let ip_from_bound_raw_data = |data: &Vec<u8>| {
|
||||
let left_ip_u128: u128 =
|
||||
u128::from_be(BinarySerializable::deserialize(&mut &data[..]).unwrap());
|
||||
Ipv6Addr::from_u128(left_ip_u128)
|
||||
};
|
||||
let left_bound = map_bound(left_bound, &ip_from_bound_raw_data);
|
||||
let right_bound = map_bound(right_bound, &ip_from_bound_raw_data);
|
||||
Self {
|
||||
field,
|
||||
left_bound,
|
||||
right_bound,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Weight for IPFastFieldRangeWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let field_type = reader.schema().get_field_entry(self.field).field_type();
|
||||
match field_type.fastfield_cardinality().unwrap() {
|
||||
Cardinality::SingleValue => {
|
||||
let ip_addr_fast_field = reader.fast_fields().ip_addr(self.field)?;
|
||||
let value_range =
|
||||
bound_to_value_range(&self.left_bound, &self.right_bound, &ip_addr_fast_field);
|
||||
let docset = IpRangeDocSet::new(value_range, ip_addr_fast_field);
|
||||
Ok(Box::new(ConstScorer::new(docset, boost)))
|
||||
}
|
||||
Cardinality::MultiValues => unimplemented!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Document #({}) does not match",
|
||||
doc
|
||||
)));
|
||||
}
|
||||
let explanation = Explanation::new("Const", scorer.score());
|
||||
|
||||
Ok(explanation)
|
||||
}
|
||||
}
|
||||
|
||||
fn bound_to_value_range(
|
||||
left_bound: &Bound<Ipv6Addr>,
|
||||
right_bound: &Bound<Ipv6Addr>,
|
||||
column: &Arc<dyn Column<Ipv6Addr>>,
|
||||
) -> RangeInclusive<Ipv6Addr> {
|
||||
let start_value = match left_bound {
|
||||
Bound::Included(ip_addr) => *ip_addr,
|
||||
Bound::Excluded(ip_addr) => Ipv6Addr::from(ip_addr.to_u128() + 1),
|
||||
Bound::Unbounded => column.min_value(),
|
||||
};
|
||||
|
||||
let end_value = match right_bound {
|
||||
Bound::Included(ip_addr) => *ip_addr,
|
||||
Bound::Excluded(ip_addr) => Ipv6Addr::from(ip_addr.to_u128() - 1),
|
||||
Bound::Unbounded => column.max_value(),
|
||||
};
|
||||
start_value..=end_value
|
||||
}
|
||||
|
||||
/// Helper to have a cursor over a vec
|
||||
struct VecCursor {
|
||||
data: Vec<u32>,
|
||||
pos_in_data: usize,
|
||||
}
|
||||
impl VecCursor {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
data: Vec::with_capacity(32),
|
||||
pos_in_data: 0,
|
||||
}
|
||||
}
|
||||
fn next(&mut self) -> Option<u32> {
|
||||
self.pos_in_data += 1;
|
||||
self.current()
|
||||
}
|
||||
fn current(&self) -> Option<u32> {
|
||||
self.data.get(self.pos_in_data).map(|el| *el as u32)
|
||||
}
|
||||
|
||||
fn set_data(&mut self, data: Vec<u32>) {
|
||||
self.data = data;
|
||||
self.pos_in_data = 0;
|
||||
}
|
||||
fn is_empty(&self) -> bool {
|
||||
self.pos_in_data >= self.data.len()
|
||||
}
|
||||
}
|
||||
|
||||
struct IpRangeDocSet {
|
||||
value_range: RangeInclusive<Ipv6Addr>,
|
||||
ip_addr_fast_field: Arc<dyn Column<Ipv6Addr>>,
|
||||
fetched_until_doc: u32,
|
||||
loaded_docs: VecCursor,
|
||||
}
|
||||
impl IpRangeDocSet {
|
||||
fn new(
|
||||
value_range: RangeInclusive<Ipv6Addr>,
|
||||
ip_addr_fast_field: Arc<dyn Column<Ipv6Addr>>,
|
||||
) -> Self {
|
||||
let mut ip_range_docset = Self {
|
||||
value_range,
|
||||
ip_addr_fast_field,
|
||||
loaded_docs: VecCursor::new(),
|
||||
fetched_until_doc: 0,
|
||||
};
|
||||
ip_range_docset.fetch_block();
|
||||
ip_range_docset
|
||||
}
|
||||
|
||||
/// Returns true if more data could be fetched
|
||||
fn fetch_block(&mut self) {
|
||||
let mut horizon: u32 = 1;
|
||||
const MAX_HORIZON: u32 = 100_000;
|
||||
while self.loaded_docs.is_empty() {
|
||||
let finished_to_end = self.fetch_horizon(horizon);
|
||||
if finished_to_end {
|
||||
break;
|
||||
}
|
||||
// Fetch more data, increase horizon
|
||||
horizon = (horizon * 2).min(MAX_HORIZON);
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetches a block for docid range [fetched_until_doc .. fetched_until_doc + HORIZON]
|
||||
fn fetch_horizon(&mut self, horizon: u32) -> bool {
|
||||
let mut end = self.fetched_until_doc + horizon;
|
||||
let mut finished_to_end = false;
|
||||
|
||||
let limit = self.ip_addr_fast_field.num_vals();
|
||||
if end >= limit {
|
||||
end = limit;
|
||||
finished_to_end = true;
|
||||
}
|
||||
|
||||
let data = self
|
||||
.ip_addr_fast_field
|
||||
.get_positions_for_value_range(self.value_range.clone(), self.fetched_until_doc..end);
|
||||
self.loaded_docs.set_data(data);
|
||||
self.fetched_until_doc += horizon as u32;
|
||||
finished_to_end
|
||||
}
|
||||
}
|
||||
|
||||
impl DocSet for IpRangeDocSet {
|
||||
fn advance(&mut self) -> DocId {
|
||||
if let Some(docid) = self.loaded_docs.next() {
|
||||
docid as u32
|
||||
} else {
|
||||
if self.fetched_until_doc >= self.ip_addr_fast_field.num_vals() as u32 {
|
||||
return TERMINATED;
|
||||
}
|
||||
self.fetch_block();
|
||||
self.loaded_docs.current().unwrap_or(TERMINATED)
|
||||
}
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
self.loaded_docs
|
||||
.current()
|
||||
.map(|el| el as u32)
|
||||
.unwrap_or(TERMINATED)
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
0 // heuristic possible by checking number of hits when fetching a block
|
||||
}
|
||||
}
|
||||
@@ -176,6 +176,11 @@ impl FieldType {
|
||||
}
|
||||
}
|
||||
|
||||
/// returns true if this is an ip address field
|
||||
pub fn is_ip_addr(&self) -> bool {
|
||||
matches!(self, FieldType::IpAddr(_))
|
||||
}
|
||||
|
||||
/// returns true if the field is indexed.
|
||||
pub fn is_indexed(&self) -> bool {
|
||||
match *self {
|
||||
@@ -232,11 +237,11 @@ impl FieldType {
|
||||
/// returns true if the field is fast.
|
||||
pub fn fastfield_cardinality(&self) -> Option<Cardinality> {
|
||||
match *self {
|
||||
FieldType::Bytes(ref bytes_options) if bytes_options.is_fast() => {
|
||||
Some(Cardinality::SingleValue)
|
||||
FieldType::Bytes(ref bytes_options) => {
|
||||
bytes_options.is_fast().then_some(Cardinality::SingleValue)
|
||||
}
|
||||
FieldType::Str(ref text_options) if text_options.is_fast() => {
|
||||
Some(Cardinality::MultiValues)
|
||||
FieldType::Str(ref text_options) => {
|
||||
text_options.is_fast().then_some(Cardinality::MultiValues)
|
||||
}
|
||||
FieldType::U64(ref int_options)
|
||||
| FieldType::I64(ref int_options)
|
||||
@@ -245,7 +250,7 @@ impl FieldType {
|
||||
FieldType::Date(ref date_options) => date_options.get_fastfield_cardinality(),
|
||||
FieldType::Facet(_) => Some(Cardinality::MultiValues),
|
||||
FieldType::JsonObject(_) => None,
|
||||
_ => None,
|
||||
FieldType::IpAddr(ref ip_addr_options) => ip_addr_options.get_fastfield_cardinality(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user