diff --git a/fastfield_codecs/benches/bench.rs b/fastfield_codecs/benches/bench.rs index f2fd6bdde..b41bae0d9 100644 --- a/fastfield_codecs/benches/bench.rs +++ b/fastfield_codecs/benches/bench.rs @@ -111,7 +111,9 @@ mod tests { let (major_item, _minor_item, data) = get_data_50percent_item(); let column = get_u128_column_from_data(&data); - b.iter(|| column.get_between_vals(major_item..=major_item)); + b.iter(|| { + column.get_positions_for_value_range(major_item..=major_item, 0..data.len() as u32) + }); } #[bench] @@ -119,7 +121,9 @@ mod tests { let (_major_item, minor_item, data) = get_data_50percent_item(); let column = get_u128_column_from_data(&data); - b.iter(|| column.get_between_vals(minor_item..=minor_item)); + b.iter(|| { + column.get_positions_for_value_range(minor_item..=minor_item, 0..data.len() as u32) + }); } #[bench] @@ -127,7 +131,7 @@ mod tests { let (_major_item, _minor_item, data) = get_data_50percent_item(); let column = get_u128_column_from_data(&data); - b.iter(|| column.get_between_vals(0..=u128::MAX)); + b.iter(|| column.get_positions_for_value_range(0..=u128::MAX, 0..data.len() as u32)); } #[bench] diff --git a/fastfield_codecs/src/column.rs b/fastfield_codecs/src/column.rs index 864601a91..e4d4fcbb5 100644 --- a/fastfield_codecs/src/column.rs +++ b/fastfield_codecs/src/column.rs @@ -1,5 +1,5 @@ use std::marker::PhantomData; -use std::ops::RangeInclusive; +use std::ops::{Range, RangeInclusive}; use tantivy_bitpacker::minmax; @@ -31,13 +31,21 @@ pub trait Column: Send + Sync { } } - /// Return the positions of values which are in the provided range. + /// Get the positions of values which are in the provided value range. + /// + /// Note that position == docid for single value fast fields #[inline] - fn get_between_vals(&self, range: RangeInclusive) -> Vec { + fn get_positions_for_value_range( + &self, + value_range: RangeInclusive, + doc_id_range: Range, + ) -> Vec { let mut vals = Vec::new(); - for idx in 0..self.num_vals() as u64 { - let val = self.get_val(idx); - if range.contains(&val) { + let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals()); + + for idx in doc_id_range.start..doc_id_range.end { + let val = self.get_val(idx as u64); + if value_range.contains(&val) { vals.push(idx); } } @@ -156,7 +164,7 @@ struct MonotonicMappingColumn { /// monotonic_mapping.inverse(monotonic_mapping.mapping(el)) == el /// /// The inverse of the mapping is required for: -/// `fn get_between_vals(&self, range: RangeInclusive) -> Vec ` +/// `fn get_positions_for_value_range(&self, range: RangeInclusive) -> Vec ` /// The user provides the original value range and we need to monotonic map them in the same way the /// serialization does before calling the underlying column. /// @@ -215,10 +223,15 @@ where ) } - fn get_between_vals(&self, range: RangeInclusive) -> Vec { - self.from_column.get_between_vals( + fn get_positions_for_value_range( + &self, + range: RangeInclusive, + doc_id_range: Range, + ) -> Vec { + self.from_column.get_positions_for_value_range( self.monotonic_mapping.inverse(range.start().clone()) ..=self.monotonic_mapping.inverse(range.end().clone()), + doc_id_range, ) } diff --git a/fastfield_codecs/src/compact_space/mod.rs b/fastfield_codecs/src/compact_space/mod.rs index b47bb5744..a183d2cd5 100644 --- a/fastfield_codecs/src/compact_space/mod.rs +++ b/fastfield_codecs/src/compact_space/mod.rs @@ -14,7 +14,7 @@ use std::{ cmp::Ordering, collections::BTreeSet, io::{self, Write}, - ops::RangeInclusive, + ops::{Range, RangeInclusive}, }; use common::{BinarySerializable, CountingWriter, VInt, VIntU128}; @@ -304,8 +304,14 @@ impl Column for CompactSpaceDecompressor { fn iter(&self) -> Box + '_> { Box::new(self.iter()) } - fn get_between_vals(&self, range: RangeInclusive) -> Vec { - self.get_between_vals(range) + + #[inline] + fn get_positions_for_value_range( + &self, + value_range: RangeInclusive, + doc_id_range: Range, + ) -> Vec { + self.get_positions_for_value_range(value_range, doc_id_range) } } @@ -340,12 +346,18 @@ impl CompactSpaceDecompressor { /// Comparing on compact space: Real dataset 1.08 GElements/s /// /// Comparing on original space: Real dataset .06 GElements/s (not completely optimized) - pub fn get_between_vals(&self, range: RangeInclusive) -> Vec { - if range.start() > range.end() { + #[inline] + pub fn get_positions_for_value_range( + &self, + value_range: RangeInclusive, + doc_id_range: Range, + ) -> Vec { + if value_range.start() > value_range.end() { return Vec::new(); } - let from_value = *range.start(); - let to_value = *range.end(); + let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals()); + let from_value = *value_range.start(); + let to_value = *value_range.end(); assert!(to_value >= from_value); let compact_from = self.u128_to_compact(from_value); let compact_to = self.u128_to_compact(to_value); @@ -377,8 +389,10 @@ impl CompactSpaceDecompressor { let range = compact_from..=compact_to; let mut positions = Vec::new(); + let scan_num_docs = doc_id_range.end - doc_id_range.start; + let step_size = 4; - let cutoff = self.params.num_vals as u64 - self.params.num_vals as u64 % step_size; + let cutoff = doc_id_range.start + scan_num_docs - scan_num_docs % step_size; let mut push_if_in_range = |idx, val| { if range.contains(&val) { @@ -387,7 +401,7 @@ impl CompactSpaceDecompressor { }; let get_val = |idx| self.params.bit_unpacker.get(idx as u64, &self.data); // unrolled loop - for idx in (0..cutoff).step_by(step_size as usize) { + for idx in (doc_id_range.start..cutoff).step_by(step_size as usize) { let idx1 = idx; let idx2 = idx + 1; let idx3 = idx + 2; @@ -403,7 +417,7 @@ impl CompactSpaceDecompressor { } // handle rest - for idx in cutoff..self.params.num_vals as u64 { + for idx in cutoff..doc_id_range.end { push_if_in_range(idx, get_val(idx)); } @@ -498,9 +512,10 @@ mod tests { let expected_positions = expected .iter() .positions(|val| range.contains(val)) - .map(|pos| pos as u64) + .map(|pos| pos as u32) .collect::>(); - let positions = decompressor.get_between_vals(range); + let positions = + decompressor.get_positions_for_value_range(range, 0..decompressor.num_vals()); assert_eq!(positions, expected_positions); }; @@ -540,24 +555,66 @@ mod tests { ]; let data = test_aux_vals(vals); let decomp = CompactSpaceDecompressor::open(data).unwrap(); - let positions = decomp.get_between_vals(0..=1); + let complete_range = 0..vals.len() as u32; + for (pos, val) in vals.iter().enumerate() { + let val = *val as u128; + let pos = pos as u32; + let positions = decomp.get_positions_for_value_range(val..=val, pos..pos + 1); + assert_eq!(positions, vec![pos]); + } + + // handle docid range out of bounds + let positions = decomp.get_positions_for_value_range(0..=1, 1..u32::MAX); + assert_eq!(positions, vec![]); + + let positions = decomp.get_positions_for_value_range(0..=1, complete_range.clone()); assert_eq!(positions, vec![0]); - let positions = decomp.get_between_vals(0..=2); + let positions = decomp.get_positions_for_value_range(0..=2, complete_range.clone()); assert_eq!(positions, vec![0]); - let positions = decomp.get_between_vals(0..=3); + let positions = decomp.get_positions_for_value_range(0..=3, complete_range.clone()); assert_eq!(positions, vec![0, 2]); - assert_eq!(decomp.get_between_vals(99999u128..=99999u128), vec![3]); - assert_eq!(decomp.get_between_vals(99999u128..=100000u128), vec![3, 4]); - assert_eq!(decomp.get_between_vals(99998u128..=100000u128), vec![3, 4]); - assert_eq!(decomp.get_between_vals(99998u128..=99999u128), vec![3]); - assert_eq!(decomp.get_between_vals(99998u128..=99998u128), vec![]); - assert_eq!(decomp.get_between_vals(333u128..=333u128), vec![8]); - assert_eq!(decomp.get_between_vals(332u128..=333u128), vec![8]); - assert_eq!(decomp.get_between_vals(332u128..=334u128), vec![8]); - assert_eq!(decomp.get_between_vals(333u128..=334u128), vec![8]); + assert_eq!( + decomp.get_positions_for_value_range(99999u128..=99999u128, complete_range.clone()), + vec![3] + ); + assert_eq!( + decomp.get_positions_for_value_range(99999u128..=100000u128, complete_range.clone()), + vec![3, 4] + ); + assert_eq!( + decomp.get_positions_for_value_range(99998u128..=100000u128, complete_range.clone()), + vec![3, 4] + ); + assert_eq!( + decomp.get_positions_for_value_range(99998u128..=99999u128, complete_range.clone()), + vec![3] + ); + assert_eq!( + decomp.get_positions_for_value_range(99998u128..=99998u128, complete_range.clone()), + vec![] + ); + assert_eq!( + decomp.get_positions_for_value_range(333u128..=333u128, complete_range.clone()), + vec![8] + ); + assert_eq!( + decomp.get_positions_for_value_range(332u128..=333u128, complete_range.clone()), + vec![8] + ); + assert_eq!( + decomp.get_positions_for_value_range(332u128..=334u128, complete_range.clone()), + vec![8] + ); + assert_eq!( + decomp.get_positions_for_value_range(333u128..=334u128, complete_range.clone()), + vec![8] + ); assert_eq!( - decomp.get_between_vals(4_000_211_221u128..=5_000_000_000u128), + decomp.get_positions_for_value_range( + 4_000_211_221u128..=5_000_000_000u128, + complete_range.clone() + ), vec![6, 7] ); } @@ -582,11 +639,12 @@ mod tests { ]; let data = test_aux_vals(vals); let decomp = CompactSpaceDecompressor::open(data).unwrap(); - let positions = decomp.get_between_vals(0..=5); + let complete_range = 0..vals.len() as u32; + let positions = decomp.get_positions_for_value_range(0..=5, complete_range.clone()); assert_eq!(positions, vec![]); - let positions = decomp.get_between_vals(0..=100); + let positions = decomp.get_positions_for_value_range(0..=100, complete_range.clone()); assert_eq!(positions, vec![0]); - let positions = decomp.get_between_vals(0..=105); + let positions = decomp.get_positions_for_value_range(0..=105, complete_range.clone()); assert_eq!(positions, vec![0]); } @@ -610,11 +668,24 @@ mod tests { let mut out = Vec::new(); serialize_u128(|| vals.iter().cloned(), vals.len() as u32, &mut out).unwrap(); let decomp = open_u128::(OwnedBytes::new(out)).unwrap(); + let complete_range = 0..vals.len() as u32; - assert_eq!(decomp.get_between_vals(199..=200), vec![0]); - assert_eq!(decomp.get_between_vals(199..=201), vec![0, 1]); - assert_eq!(decomp.get_between_vals(200..=200), vec![0]); - assert_eq!(decomp.get_between_vals(1_000_000..=1_000_000), vec![11]); + assert_eq!( + decomp.get_positions_for_value_range(199..=200, complete_range.clone()), + vec![0] + ); + assert_eq!( + decomp.get_positions_for_value_range(199..=201, complete_range.clone()), + vec![0, 1] + ); + assert_eq!( + decomp.get_positions_for_value_range(200..=200, complete_range.clone()), + vec![0] + ); + assert_eq!( + decomp.get_positions_for_value_range(1_000_000..=1_000_000, complete_range.clone()), + vec![11] + ); } #[test] diff --git a/fastfield_codecs/src/lib.rs b/fastfield_codecs/src/lib.rs index 25ca123e2..6e5ea06ee 100644 --- a/fastfield_codecs/src/lib.rs +++ b/fastfield_codecs/src/lib.rs @@ -211,13 +211,16 @@ mod tests { if !data.is_empty() { let test_rand_idx = rand::thread_rng().gen_range(0..=data.len() - 1); - let expected_positions: Vec = data + let expected_positions: Vec = data .iter() .enumerate() .filter(|(_, el)| **el == data[test_rand_idx]) - .map(|(pos, _)| pos as u64) + .map(|(pos, _)| pos as u32) .collect(); - let positions = reader.get_between_vals(data[test_rand_idx]..=data[test_rand_idx]); + let positions = reader.get_positions_for_value_range( + data[test_rand_idx]..=data[test_rand_idx], + 0..data.len() as u32, + ); assert_eq!(expected_positions, positions); } Some((estimation, actual_compression)) diff --git a/fastfield_codecs/src/main.rs b/fastfield_codecs/src/main.rs index 988f41b1f..d951f2732 100644 --- a/fastfield_codecs/src/main.rs +++ b/fastfield_codecs/src/main.rs @@ -117,7 +117,8 @@ fn bench_ip() { // Sample some ranges for value in dataset.iter().take(1110).skip(1100).cloned() { print_time!("get range"); - let doc_values = decompressor.get_between_vals(value..=value); + let doc_values = + decompressor.get_positions_for_value_range(value..=value, 0..decompressor.num_vals()); println!("{:?}", doc_values.len()); } } diff --git a/src/fastfield/multivalued/reader.rs b/src/fastfield/multivalued/reader.rs index 054bb01e4..e1f81b28c 100644 --- a/src/fastfield/multivalued/reader.rs +++ b/src/fastfield/multivalued/reader.rs @@ -159,8 +159,14 @@ impl MultiValuedU128FastFieldReader { } /// Returns all docids which are in the provided value range - pub fn get_between_vals(&self, range: RangeInclusive) -> Vec { - let positions = self.vals_reader.get_between_vals(range); + pub fn get_positions_for_value_range( + &self, + value_range: RangeInclusive, + doc_id_range: Range, + ) -> Vec { + let positions = self + .vals_reader + .get_positions_for_value_range(value_range, doc_id_range); positions_to_docids(&positions, self.idx_reader.as_ref()) } @@ -223,14 +229,14 @@ impl MultiValueLength for MultiValuedU128FastFie /// /// TODO: Instead of a linear scan we can employ a expotential search into binary search to match a /// docid to its value position. -fn positions_to_docids(positions: &[u64], idx_reader: &C) -> Vec { +fn positions_to_docids(positions: &[u32], idx_reader: &C) -> Vec { let mut docs = vec![]; let mut cur_doc = 0u32; let mut last_doc = None; for pos in positions { loop { - let end = idx_reader.get_val(cur_doc as u64 + 1); + let end = idx_reader.get_val(cur_doc as u64 + 1) as u32; if end > *pos { // avoid duplicates if Some(cur_doc) == last_doc { @@ -258,7 +264,7 @@ mod tests { #[test] fn test_positions_to_docid() { - let positions = vec![10u64, 11, 15, 20, 21, 22]; + let positions = vec![10u32, 11, 15, 20, 21, 22]; let offsets = vec![0, 10, 12, 15, 22, 23]; { diff --git a/src/query/bitset/mod.rs b/src/query/bitset/mod.rs index 09b9d8360..6448e3e62 100644 --- a/src/query/bitset/mod.rs +++ b/src/query/bitset/mod.rs @@ -86,10 +86,7 @@ impl DocSet for BitSetDocSet { self.doc } - /// Returns half of the `max_doc` - /// This is quite a terrible heuristic, - /// but we don't have access to any better - /// value. + /// Returns the number of values set in the underlying bitset. fn size_hint(&self) -> u32 { self.docs.len() as u32 } diff --git a/src/query/mod.rs b/src/query/mod.rs index e22b2cae4..c46aed50f 100644 --- a/src/query/mod.rs +++ b/src/query/mod.rs @@ -18,6 +18,7 @@ mod phrase_query; mod query; mod query_parser; mod range_query; +mod range_query_ip_fastfield; mod regex_query; mod reqopt_scorer; mod scorer; diff --git a/src/query/range_query.rs b/src/query/range_query.rs index e198618c8..390a452d3 100644 --- a/src/query/range_query.rs +++ b/src/query/range_query.rs @@ -6,12 +6,13 @@ use common::BitSet; use crate::core::{Searcher, SegmentReader}; use crate::error::TantivyError; use crate::query::explanation::does_not_match; +use crate::query::range_query_ip_fastfield::IPFastFieldRangeWeight; use crate::query::{BitSetDocSet, ConstScorer, Explanation, Query, Scorer, Weight}; use crate::schema::{Field, IndexRecordOption, Term, Type}; use crate::termdict::{TermDictionary, TermStreamer}; use crate::{DocId, Score}; -fn map_bound TTo>( +pub(crate) fn map_bound TTo>( bound: &Bound, transform: &Transform, ) -> Bound { @@ -29,8 +30,17 @@ fn map_bound TTo>( /// /// # Implementation /// -/// The current implement will iterate over the terms within the range -/// and append all of the document cross into a `BitSet`. +/// ## Default +/// The default implementation collects all documents _upfront_ into a `BitSet`. +/// This is done by iterating over the terms within the range and loading all docs for each +/// `TermInfo` from the inverted index (posting list) and put them into a `BitSet`. +/// Depending on the number of terms matched, this is a potentially expensive operation. +/// +/// ## IP fast field +/// For IP fast fields a custom variant is used, by scanning the fast field. Unlike the default +/// variant we can walk in a lazy fashion over it, since the fastfield is implicit orderered by +/// DocId. +/// /// /// # Example /// @@ -249,7 +259,8 @@ impl Query for RangeQuery { _scoring_enabled: bool, ) -> crate::Result> { let schema = searcher.schema(); - let value_type = schema.get_field_entry(self.field).field_type().value_type(); + let field_type = schema.get_field_entry(self.field).field_type(); + let value_type = field_type.value_type(); if value_type != self.value_type { let err_msg = format!( "Create a range query of the type {:?}, when the field given was of type {:?}", @@ -257,11 +268,20 @@ impl Query for RangeQuery { ); return Err(TantivyError::SchemaError(err_msg)); } - Ok(Box::new(RangeWeight { - field: self.field, - left_bound: self.left_bound.clone(), - right_bound: self.right_bound.clone(), - })) + + if field_type.is_ip_addr() && field_type.is_fast() { + Ok(Box::new(IPFastFieldRangeWeight::new( + self.field, + &self.left_bound, + &self.right_bound, + ))) + } else { + Ok(Box::new(RangeWeight { + field: self.field, + left_bound: self.left_bound.clone(), + right_bound: self.right_bound.clone(), + })) + } } } @@ -335,7 +355,7 @@ mod tests { use super::RangeQuery; use crate::collector::{Count, TopDocs}; use crate::query::QueryParser; - use crate::schema::{Document, Field, IntoIpv6Addr, Schema, INDEXED, STORED, TEXT}; + use crate::schema::{Document, Field, IntoIpv6Addr, Schema, FAST, INDEXED, STORED, TEXT}; use crate::{doc, Index}; #[test] @@ -509,10 +529,24 @@ mod tests { Ok(()) } + #[test] + fn search_ip_range_test_posting_list() { + search_ip_range_test_opt(false); + } + #[test] fn search_ip_range_test() { + search_ip_range_test_opt(true); + } + + fn search_ip_range_test_opt(with_fast_field: bool) { let mut schema_builder = Schema::builder(); - let ip_field = schema_builder.add_ip_addr_field("ip", INDEXED | STORED); + let ip_field = if with_fast_field { + schema_builder.add_ip_addr_field("ip", INDEXED | STORED | FAST) + } else { + schema_builder.add_ip_addr_field("ip", INDEXED | STORED) + }; + let text_field = schema_builder.add_text_field("text", TEXT | STORED); let schema = schema_builder.build(); let index = Index::create_in_ram(schema); let ip_addr_1 = IpAddr::from_str("127.0.0.10").unwrap().into_ipv6_addr(); @@ -520,16 +554,22 @@ mod tests { { let mut index_writer = index.writer(3_000_000).unwrap(); - index_writer - .add_document(doc!( - ip_field => ip_addr_1 - )) - .unwrap(); - index_writer - .add_document(doc!( - ip_field => ip_addr_2 - )) - .unwrap(); + for _ in 0..1_000 { + index_writer + .add_document(doc!( + ip_field => ip_addr_1, + text_field => "BLUBBER" + )) + .unwrap(); + } + for _ in 0..1_000 { + index_writer + .add_document(doc!( + ip_field => ip_addr_2, + text_field => "BLOBBER" + )) + .unwrap(); + } index_writer.commit().unwrap(); } @@ -543,24 +583,25 @@ mod tests { count }; let query_from_text = |text: &str| { - QueryParser::for_index(&index, vec![ip_field]) + QueryParser::for_index(&index, vec![]) .parse_query(text) .unwrap() }; + // Inclusive range assert_eq!( get_num_hits(query_from_text("ip:[127.0.0.1 TO 127.0.0.20]")), - 2 + 2000 ); assert_eq!( get_num_hits(query_from_text("ip:[127.0.0.10 TO 127.0.0.20]")), - 2 + 2000 ); assert_eq!( get_num_hits(query_from_text("ip:[127.0.0.11 TO 127.0.0.20]")), - 1 + 1000 ); assert_eq!( @@ -568,9 +609,84 @@ mod tests { 0 ); - assert_eq!(get_num_hits(query_from_text("ip:[127.0.0.11 TO *]")), 1); + assert_eq!(get_num_hits(query_from_text("ip:[127.0.0.11 TO *]")), 1000); assert_eq!(get_num_hits(query_from_text("ip:[127.0.0.21 TO *]")), 0); assert_eq!(get_num_hits(query_from_text("ip:[* TO 127.0.0.9]")), 0); - assert_eq!(get_num_hits(query_from_text("ip:[* TO 127.0.0.10]")), 1); + assert_eq!(get_num_hits(query_from_text("ip:[* TO 127.0.0.10]")), 1000); + + // Exclusive range + assert_eq!( + get_num_hits(query_from_text("ip:{127.0.0.1 TO 127.0.0.20}")), + 1000 + ); + + assert_eq!( + get_num_hits(query_from_text("ip:{127.0.0.1 TO 127.0.0.21}")), + 2000 + ); + + assert_eq!( + get_num_hits(query_from_text("ip:{127.0.0.10 TO 127.0.0.20}")), + 0 + ); + + assert_eq!( + get_num_hits(query_from_text("ip:{127.0.0.11 TO 127.0.0.20}")), + 0 + ); + + assert_eq!( + get_num_hits(query_from_text("ip:{127.0.0.11 TO 127.0.0.19}")), + 0 + ); + + assert_eq!(get_num_hits(query_from_text("ip:{127.0.0.11 TO *}")), 1000); + assert_eq!(get_num_hits(query_from_text("ip:{127.0.0.10 TO *}")), 1000); + assert_eq!(get_num_hits(query_from_text("ip:{127.0.0.21 TO *}")), 0); + assert_eq!(get_num_hits(query_from_text("ip:{127.0.0.20 TO *}")), 0); + assert_eq!(get_num_hits(query_from_text("ip:{127.0.0.19 TO *}")), 1000); + assert_eq!(get_num_hits(query_from_text("ip:{* TO 127.0.0.9}")), 0); + assert_eq!(get_num_hits(query_from_text("ip:{* TO 127.0.0.10}")), 0); + assert_eq!(get_num_hits(query_from_text("ip:{* TO 127.0.0.11}")), 1000); + + // Inclusive/Exclusive range + assert_eq!( + get_num_hits(query_from_text("ip:[127.0.0.1 TO 127.0.0.20}")), + 1000 + ); + + assert_eq!( + get_num_hits(query_from_text("ip:{127.0.0.1 TO 127.0.0.20]")), + 2000 + ); + + // Intersection + assert_eq!( + get_num_hits(query_from_text( + "text:BLUBBER AND ip:[127.0.0.10 TO 127.0.0.10]" + )), + 1000 + ); + + assert_eq!( + get_num_hits(query_from_text( + "text:BLOBBER AND ip:[127.0.0.10 TO 127.0.0.10]" + )), + 0 + ); + + assert_eq!( + get_num_hits(query_from_text( + "text:BLOBBER AND ip:[127.0.0.20 TO 127.0.0.20]" + )), + 1000 + ); + + assert_eq!( + get_num_hits(query_from_text( + "text:BLUBBER AND ip:[127.0.0.20 TO 127.0.0.20]" + )), + 0 + ); } } diff --git a/src/query/range_query_ip_fastfield.rs b/src/query/range_query_ip_fastfield.rs new file mode 100644 index 000000000..3393ee3b9 --- /dev/null +++ b/src/query/range_query_ip_fastfield.rs @@ -0,0 +1,196 @@ +//! IP Fastfields support efficient scanning for range queries. +//! We use this variant only if the fastfield exists, otherwise the default in `range_query` is +//! used, which uses the term dictionary + postings. + +use std::net::Ipv6Addr; +use std::ops::{Bound, RangeInclusive}; +use std::sync::Arc; + +use common::BinarySerializable; +use fastfield_codecs::{Column, MonotonicallyMappableToU128}; + +use super::range_query::map_bound; +use super::{ConstScorer, Explanation, Scorer, Weight}; +use crate::schema::{Cardinality, Field}; +use crate::{DocId, DocSet, Score, SegmentReader, TantivyError, TERMINATED}; + +/// `IPFastFieldRangeWeight` uses the ip address fast field to execute range queries. +pub struct IPFastFieldRangeWeight { + field: Field, + left_bound: Bound, + right_bound: Bound, +} + +impl IPFastFieldRangeWeight { + pub fn new(field: Field, left_bound: &Bound>, right_bound: &Bound>) -> Self { + let ip_from_bound_raw_data = |data: &Vec| { + let left_ip_u128: u128 = + u128::from_be(BinarySerializable::deserialize(&mut &data[..]).unwrap()); + Ipv6Addr::from_u128(left_ip_u128) + }; + let left_bound = map_bound(left_bound, &ip_from_bound_raw_data); + let right_bound = map_bound(right_bound, &ip_from_bound_raw_data); + Self { + field, + left_bound, + right_bound, + } + } +} + +impl Weight for IPFastFieldRangeWeight { + fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result> { + let field_type = reader.schema().get_field_entry(self.field).field_type(); + match field_type.fastfield_cardinality().unwrap() { + Cardinality::SingleValue => { + let ip_addr_fast_field = reader.fast_fields().ip_addr(self.field)?; + let value_range = + bound_to_value_range(&self.left_bound, &self.right_bound, &ip_addr_fast_field); + let docset = IpRangeDocSet::new(value_range, ip_addr_fast_field); + Ok(Box::new(ConstScorer::new(docset, boost))) + } + Cardinality::MultiValues => unimplemented!(), + } + } + + fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result { + let mut scorer = self.scorer(reader, 1.0)?; + if scorer.seek(doc) != doc { + return Err(TantivyError::InvalidArgument(format!( + "Document #({}) does not match", + doc + ))); + } + let explanation = Explanation::new("Const", scorer.score()); + + Ok(explanation) + } +} + +fn bound_to_value_range( + left_bound: &Bound, + right_bound: &Bound, + column: &Arc>, +) -> RangeInclusive { + let start_value = match left_bound { + Bound::Included(ip_addr) => *ip_addr, + Bound::Excluded(ip_addr) => Ipv6Addr::from(ip_addr.to_u128() + 1), + Bound::Unbounded => column.min_value(), + }; + + let end_value = match right_bound { + Bound::Included(ip_addr) => *ip_addr, + Bound::Excluded(ip_addr) => Ipv6Addr::from(ip_addr.to_u128() - 1), + Bound::Unbounded => column.max_value(), + }; + start_value..=end_value +} + +/// Helper to have a cursor over a vec +struct VecCursor { + data: Vec, + pos_in_data: usize, +} +impl VecCursor { + fn new() -> Self { + Self { + data: Vec::with_capacity(32), + pos_in_data: 0, + } + } + fn next(&mut self) -> Option { + self.pos_in_data += 1; + self.current() + } + fn current(&self) -> Option { + self.data.get(self.pos_in_data).map(|el| *el as u32) + } + + fn set_data(&mut self, data: Vec) { + self.data = data; + self.pos_in_data = 0; + } + fn is_empty(&self) -> bool { + self.pos_in_data >= self.data.len() + } +} + +struct IpRangeDocSet { + value_range: RangeInclusive, + ip_addr_fast_field: Arc>, + fetched_until_doc: u32, + loaded_docs: VecCursor, +} +impl IpRangeDocSet { + fn new( + value_range: RangeInclusive, + ip_addr_fast_field: Arc>, + ) -> Self { + let mut ip_range_docset = Self { + value_range, + ip_addr_fast_field, + loaded_docs: VecCursor::new(), + fetched_until_doc: 0, + }; + ip_range_docset.fetch_block(); + ip_range_docset + } + + /// Returns true if more data could be fetched + fn fetch_block(&mut self) { + let mut horizon: u32 = 1; + const MAX_HORIZON: u32 = 100_000; + while self.loaded_docs.is_empty() { + let finished_to_end = self.fetch_horizon(horizon); + if finished_to_end { + break; + } + // Fetch more data, increase horizon + horizon = (horizon * 2).min(MAX_HORIZON); + } + } + + /// Fetches a block for docid range [fetched_until_doc .. fetched_until_doc + HORIZON] + fn fetch_horizon(&mut self, horizon: u32) -> bool { + let mut end = self.fetched_until_doc + horizon; + let mut finished_to_end = false; + + let limit = self.ip_addr_fast_field.num_vals(); + if end >= limit { + end = limit; + finished_to_end = true; + } + + let data = self + .ip_addr_fast_field + .get_positions_for_value_range(self.value_range.clone(), self.fetched_until_doc..end); + self.loaded_docs.set_data(data); + self.fetched_until_doc += horizon as u32; + finished_to_end + } +} + +impl DocSet for IpRangeDocSet { + fn advance(&mut self) -> DocId { + if let Some(docid) = self.loaded_docs.next() { + docid as u32 + } else { + if self.fetched_until_doc >= self.ip_addr_fast_field.num_vals() as u32 { + return TERMINATED; + } + self.fetch_block(); + self.loaded_docs.current().unwrap_or(TERMINATED) + } + } + + fn doc(&self) -> DocId { + self.loaded_docs + .current() + .map(|el| el as u32) + .unwrap_or(TERMINATED) + } + + fn size_hint(&self) -> u32 { + 0 // heuristic possible by checking number of hits when fetching a block + } +} diff --git a/src/schema/field_type.rs b/src/schema/field_type.rs index aa7bd2c6a..c2cae2f1e 100644 --- a/src/schema/field_type.rs +++ b/src/schema/field_type.rs @@ -176,6 +176,11 @@ impl FieldType { } } + /// returns true if this is an ip address field + pub fn is_ip_addr(&self) -> bool { + matches!(self, FieldType::IpAddr(_)) + } + /// returns true if the field is indexed. pub fn is_indexed(&self) -> bool { match *self { @@ -232,11 +237,11 @@ impl FieldType { /// returns true if the field is fast. pub fn fastfield_cardinality(&self) -> Option { match *self { - FieldType::Bytes(ref bytes_options) if bytes_options.is_fast() => { - Some(Cardinality::SingleValue) + FieldType::Bytes(ref bytes_options) => { + bytes_options.is_fast().then_some(Cardinality::SingleValue) } - FieldType::Str(ref text_options) if text_options.is_fast() => { - Some(Cardinality::MultiValues) + FieldType::Str(ref text_options) => { + text_options.is_fast().then_some(Cardinality::MultiValues) } FieldType::U64(ref int_options) | FieldType::I64(ref int_options) @@ -245,7 +250,7 @@ impl FieldType { FieldType::Date(ref date_options) => date_options.get_fastfield_cardinality(), FieldType::Facet(_) => Some(Cardinality::MultiValues), FieldType::JsonObject(_) => None, - _ => None, + FieldType::IpAddr(ref ip_addr_options) => ip_addr_options.get_fastfield_cardinality(), } }