Compare commits

...

23 Commits

Author SHA1 Message Date
Paul Masurel
c8c80d21cb Removing the need for column on Multivalued blabla 2022-11-14 11:46:15 +09:00
Pascal Seitz
a1c1f6a764 Improve position_to_docid, refactor, add tests 2022-11-10 13:56:01 +08:00
Pascal Seitz
605456027d add support for ip range query on multivalue fastfields 2022-11-08 17:45:15 +08:00
PSeitz
3e9c806890 Merge pull request #1665 from quickwit-oss/fix_num_vals
fix num_vals on u128 value index after merge
2022-11-07 21:46:02 +08:00
Pascal Seitz
c69a873dd3 fix num_vals on value index after merge 2022-11-07 21:05:21 +08:00
PSeitz
666afcf641 Merge pull request #1663 from PSeitz/fix_clippy
fix clippy
2022-11-07 18:11:20 +08:00
Pascal Seitz
38ad46e580 fix clippy 2022-11-07 16:09:55 +08:00
PSeitz
e948889f4c Merge pull request #1662 from quickwit-oss/fix_num_vals
fix num_vals in multivalue index after merge
2022-11-07 15:57:32 +08:00
Pascal Seitz
6e636c9cea fix num_vals in multivalue index after merge 2022-11-07 15:00:52 +08:00
PSeitz
5a610efbc1 Merge pull request #1661 from quickwit-oss/upgrade_criterion
update criterion to 0.4
2022-11-04 14:45:34 +08:00
Pascal Seitz
500a0d5e48 update criterion to 0.4 2022-11-04 13:26:29 +08:00
PSeitz
509a265659 add docstore version (#1652)
* add docstore version

closes #1589

* assert for docstore version
2022-11-04 10:19:16 +09:00
PSeitz
5b2cea1b97 Merge pull request #1656 from quickwit-oss/multival_offset_index
move multivalue index to own file
2022-11-02 14:03:06 +08:00
PSeitz
a5a80ffaea Update fastfield_codecs/src/column.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-11-02 06:37:27 +01:00
PSeitz
0f98d91a39 Merge pull request #1646 from quickwit-oss/no_score_calls
No score calls if score is not requested
2022-11-01 20:09:32 +08:00
PSeitz
2af6b01c17 Update src/query/boolean_query/boolean_weight.rs
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-11-01 16:13:00 +08:00
Adam Reichold
c32ab66bbd Small improvements to StopWorldFilter (#1657)
* Do not copy the whole set of stop words for each stream

* Make construction of StopWordFilter more flexible.
2022-11-01 16:47:34 +09:00
PSeitz
3f3a6f9990 Merge pull request #1653 from quickwit-oss/faster_hash
switch to fx hashmap
2022-11-01 14:53:18 +08:00
Pascal Seitz
83325d8f3f move multivalue index to own file
start_doc parameter in positions to docids
2022-11-01 10:36:13 +08:00
Pascal Seitz
43df356010 rename to docset 2022-10-27 16:53:38 +08:00
Pascal Seitz
279b1b28d3 switch to fx hashmap 2022-10-27 16:19:59 +08:00
Pascal Seitz
dfab201191 for_each_docset to iterate without score 2022-10-26 17:25:05 +08:00
Pascal Seitz
af839753e0 No score calls if score is not requested 2022-10-26 12:18:35 +08:00
32 changed files with 798 additions and 377 deletions

View File

@@ -1,6 +1,7 @@
Tantivy 0.19
================================
- Skip score calculation, when no scoring is required [#1646](https://github.com/quickwit-oss/tantivy/pull/1646) (@PSeitz)
- Limit fast fields to u32 (`get_val(u32)`) [#1644](https://github.com/quickwit-oss/tantivy/pull/1644) (@PSeitz)
- Major bugfix: Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)

View File

@@ -71,10 +71,10 @@ maplit = "1.0.2"
matches = "0.1.9"
pretty_assertions = "1.2.1"
proptest = "1.0.0"
criterion = "0.3.5"
criterion = "0.4"
test-log = "0.2.10"
env_logger = "0.9.0"
pprof = { version = "0.10.0", features = ["flamegraph", "criterion"] }
pprof = { version = "0.11.0", features = ["flamegraph", "criterion"] }
futures = "0.3.21"
[dev-dependencies.fail]

View File

@@ -157,7 +157,7 @@ fn vint_len(data: &[u8]) -> usize {
/// If the buffer does not start by a valid
/// vint payload
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
let (result, vlen) = read_u32_vint_no_advance(*data);
let (result, vlen) = read_u32_vint_no_advance(data);
*data = &data[vlen..];
result
}

View File

@@ -113,7 +113,7 @@ mod tests {
b.iter(|| {
let mut positions = Vec::new();
column.get_positions_for_value_range(
column.get_docids_for_value_range(
major_item..=major_item,
0..data.len() as u32,
&mut positions,
@@ -129,7 +129,7 @@ mod tests {
b.iter(|| {
let mut positions = Vec::new();
column.get_positions_for_value_range(
column.get_docids_for_value_range(
minor_item..=minor_item,
0..data.len() as u32,
&mut positions,
@@ -145,11 +145,7 @@ mod tests {
b.iter(|| {
let mut positions = Vec::new();
column.get_positions_for_value_range(
0..=u128::MAX,
0..data.len() as u32,
&mut positions,
);
column.get_docids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
positions
});
}

View File

@@ -35,7 +35,7 @@ pub trait Column<T: PartialOrd = u64>: Send + Sync {
///
/// Note that position == docid for single value fast fields
#[inline]
fn get_positions_for_value_range(
fn get_docids_for_value_range(
&self,
value_range: RangeInclusive<T>,
doc_id_range: Range<u32>,
@@ -70,6 +70,11 @@ pub trait Column<T: PartialOrd = u64>: Send + Sync {
/// The number of values in the column.
fn num_vals(&self) -> u32;
/// The number of docs in the column. For single value columns this equals num_vals.
fn num_docs(&self) -> u32 {
self.num_vals()
}
/// Returns a iterator over the data
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
@@ -222,13 +227,13 @@ where
)
}
fn get_positions_for_value_range(
fn get_docids_for_value_range(
&self,
range: RangeInclusive<Output>,
doc_id_range: Range<u32>,
positions: &mut Vec<u32>,
) {
self.from_column.get_positions_for_value_range(
self.from_column.get_docids_for_value_range(
self.monotonic_mapping.inverse(range.start().clone())
..=self.monotonic_mapping.inverse(range.end().clone()),
doc_id_range,
@@ -240,6 +245,7 @@ where
// and we do not have any specialized implementation anyway.
}
/// Wraps an iterator into a `Column`.
pub struct IterColumn<T>(T);
impl<T> From<T> for IterColumn<T>

View File

@@ -306,13 +306,13 @@ impl Column<u128> for CompactSpaceDecompressor {
}
#[inline]
fn get_positions_for_value_range(
fn get_docids_for_value_range(
&self,
value_range: RangeInclusive<u128>,
doc_id_range: Range<u32>,
positions_range: Range<u32>,
positions: &mut Vec<u32>,
) {
self.get_positions_for_value_range(value_range, doc_id_range, positions)
self.get_positions_for_value_range(value_range, positions_range, positions)
}
}
@@ -351,13 +351,13 @@ impl CompactSpaceDecompressor {
pub fn get_positions_for_value_range(
&self,
value_range: RangeInclusive<u128>,
doc_id_range: Range<u32>,
position_range: Range<u32>,
positions: &mut Vec<u32>,
) {
if value_range.start() > value_range.end() {
return;
}
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
let position_range = position_range.start..position_range.end.min(self.num_vals());
let from_value = *value_range.start();
let to_value = *value_range.end();
assert!(to_value >= from_value);
@@ -390,10 +390,10 @@ impl CompactSpaceDecompressor {
let range = compact_from..=compact_to;
let scan_num_docs = doc_id_range.end - doc_id_range.start;
let scan_num_docs = position_range.end - position_range.start;
let step_size = 4;
let cutoff = doc_id_range.start + scan_num_docs - scan_num_docs % step_size;
let cutoff = position_range.start + scan_num_docs - scan_num_docs % step_size;
let mut push_if_in_range = |idx, val| {
if range.contains(&val) {
@@ -402,7 +402,7 @@ impl CompactSpaceDecompressor {
};
let get_val = |idx| self.params.bit_unpacker.get(idx, &self.data);
// unrolled loop
for idx in (doc_id_range.start..cutoff).step_by(step_size as usize) {
for idx in (position_range.start..cutoff).step_by(step_size as usize) {
let idx1 = idx;
let idx2 = idx + 1;
let idx3 = idx + 2;
@@ -418,7 +418,7 @@ impl CompactSpaceDecompressor {
}
// handle rest
for idx in cutoff..doc_id_range.end {
for idx in cutoff..position_range.end {
push_if_in_range(idx, get_val(idx as u32));
}
}
@@ -704,7 +704,7 @@ mod tests {
doc_id_range: Range<u32>,
) -> Vec<u32> {
let mut positions = Vec::new();
column.get_positions_for_value_range(value_range, doc_id_range, &mut positions);
column.get_docids_for_value_range(value_range, doc_id_range, &mut positions);
positions
}

View File

@@ -41,7 +41,7 @@ mod serialize;
use self::bitpacked::BitpackedCodec;
use self::blockwise_linear::BlockwiseLinearCodec;
pub use self::column::{monotonic_map_column, Column, VecColumn};
pub use self::column::{monotonic_map_column, Column, IterColumn, VecColumn};
use self::linear::LinearCodec;
pub use self::monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
pub use self::monotonic_mapping_u128::MonotonicallyMappableToU128;
@@ -218,7 +218,7 @@ mod tests {
.map(|(pos, _)| pos as u32)
.collect();
let mut positions = Vec::new();
reader.get_positions_for_value_range(
reader.get_docids_for_value_range(
data[test_rand_idx]..=data[test_rand_idx],
0..data.len() as u32,
&mut positions,

View File

@@ -119,7 +119,7 @@ fn bench_ip() {
for value in dataset.iter().take(1110).skip(1100).cloned() {
doc_values.clear();
print_time!("get range");
decompressor.get_positions_for_value_range(
decompressor.get_docids_for_value_range(
value..=value,
0..decompressor.num_vals(),
&mut doc_values,

View File

@@ -4,8 +4,6 @@
//! intermediate average results, which is the sum and the number of values. The actual average is
//! calculated on the step from intermediate to final aggregation result tree.
use std::collections::HashMap;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
@@ -18,7 +16,7 @@ use crate::TantivyError;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
/// The final aggegation result.
pub struct AggregationResults(pub HashMap<String, AggregationResult>);
pub struct AggregationResults(pub FxHashMap<String, AggregationResult>);
impl AggregationResults {
pub(crate) fn get_value_from_aggregation(

View File

@@ -3,7 +3,6 @@
//! indices.
use std::cmp::Ordering;
use std::collections::HashMap;
use itertools::Itertools;
use rustc_hash::FxHashMap;
@@ -51,7 +50,7 @@ impl IntermediateAggregationResults {
// Important assumption:
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
// request
let mut results: HashMap<String, AggregationResult> = HashMap::new();
let mut results: FxHashMap<String, AggregationResult> = FxHashMap::default();
if let Some(buckets) = self.buckets {
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
@@ -132,7 +131,7 @@ impl IntermediateAggregationResults {
}
fn convert_and_add_final_metrics_to_result(
results: &mut HashMap<String, AggregationResult>,
results: &mut FxHashMap<String, AggregationResult>,
metrics: VecWithNames<IntermediateMetricResult>,
) {
results.extend(
@@ -143,7 +142,7 @@ fn convert_and_add_final_metrics_to_result(
}
fn add_empty_final_metrics_to_result(
results: &mut HashMap<String, AggregationResult>,
results: &mut FxHashMap<String, AggregationResult>,
req_metrics: &VecWithNames<MetricAggregation>,
) -> crate::Result<()> {
results.extend(req_metrics.iter().map(|(key, req)| {
@@ -157,7 +156,7 @@ fn add_empty_final_metrics_to_result(
}
fn add_empty_final_buckets_to_result(
results: &mut HashMap<String, AggregationResult>,
results: &mut FxHashMap<String, AggregationResult>,
req_buckets: &VecWithNames<BucketAggregationInternal>,
) -> crate::Result<()> {
let requested_buckets = req_buckets.iter();
@@ -169,7 +168,7 @@ fn add_empty_final_buckets_to_result(
}
fn convert_and_add_final_buckets_to_result(
results: &mut HashMap<String, AggregationResult>,
results: &mut FxHashMap<String, AggregationResult>,
buckets: VecWithNames<IntermediateBucketResult>,
req_buckets: &VecWithNames<BucketAggregationInternal>,
) -> crate::Result<()> {

View File

@@ -616,7 +616,7 @@ mod tests {
.map(|mut doc| {
doc.add_facet(
facet_field,
&format!("/facet/{}", thread_rng().sample(&uniform)),
&format!("/facet/{}", thread_rng().sample(uniform)),
);
doc
})

View File

@@ -172,17 +172,33 @@ pub trait Collector: Sync + Send {
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
if let Some(alive_bitset) = reader.alive_bitset() {
weight.for_each(reader, &mut |doc, score| {
if alive_bitset.is_alive(doc) {
match (reader.alive_bitset(), self.requires_scoring()) {
(Some(alive_bitset), true) => {
weight.for_each(reader, &mut |doc, score| {
if alive_bitset.is_alive(doc) {
segment_collector.collect(doc, score);
}
})?;
}
(Some(alive_bitset), false) => {
weight.for_each_no_score(reader, &mut |doc| {
if alive_bitset.is_alive(doc) {
segment_collector.collect(doc, 0.0);
}
})?;
}
(None, true) => {
weight.for_each(reader, &mut |doc, score| {
segment_collector.collect(doc, score);
}
})?;
} else {
weight.for_each(reader, &mut |doc, score| {
segment_collector.collect(doc, score);
})?;
})?;
}
(None, false) => {
weight.for_each_no_score(reader, &mut |doc| {
segment_collector.collect(doc, 0.0);
})?;
}
}
Ok(segment_collector.harvest())
}
}

View File

@@ -133,7 +133,7 @@ impl SegmentMeta {
/// associated with a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
let mut path = self.id().uuid_string();
path.push_str(&*match component {
path.push_str(&match component {
SegmentComponent::Postings => ".idx".to_string(),
SegmentComponent::Positions => ".pos".to_string(),
SegmentComponent::Terms => ".term".to_string(),

View File

@@ -55,7 +55,7 @@ impl<T: Send + Sync + 'static> From<Box<T>> for DirectoryLock {
impl Drop for DirectoryLockGuard {
fn drop(&mut self) {
if let Err(e) = self.directory.delete(&*self.path) {
if let Err(e) = self.directory.delete(&self.path) {
error!("Failed to remove the lock file. {:?}", e);
}
}

View File

@@ -1,10 +1,9 @@
use std::ops::Range;
use std::sync::Arc;
use fastfield_codecs::Column;
use crate::directory::{FileSlice, OwnedBytes};
use crate::fastfield::MultiValueLength;
use crate::fastfield::MultiValueIndex;
use crate::DocId;
/// Reader for byte array fast fields
@@ -19,7 +18,7 @@ use crate::DocId;
/// and the start index for the next document, and keeping the bytes in between.
#[derive(Clone)]
pub struct BytesFastFieldReader {
idx_reader: Arc<dyn Column<u64>>,
idx_reader: MultiValueIndex,
values: OwnedBytes,
}
@@ -29,24 +28,26 @@ impl BytesFastFieldReader {
values_file: FileSlice,
) -> crate::Result<BytesFastFieldReader> {
let values = values_file.read_bytes()?;
Ok(BytesFastFieldReader { idx_reader, values })
Ok(BytesFastFieldReader {
idx_reader: MultiValueIndex::new(idx_reader),
values,
})
}
fn range(&self, doc: DocId) -> Range<u32> {
let start = self.idx_reader.get_val(doc) as u32;
let end = self.idx_reader.get_val(doc + 1) as u32;
start..end
/// returns the multivalue index
pub fn get_index_reader(&self) -> &MultiValueIndex {
&self.idx_reader
}
/// Returns the bytes associated with the given `doc`
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
let range = self.range(doc);
let range = self.idx_reader.range(doc);
&self.values.as_slice()[range.start as usize..range.end as usize]
}
/// Returns the length of the bytes associated with the given `doc`
pub fn num_bytes(&self, doc: DocId) -> u64 {
let range = self.range(doc);
let range = self.idx_reader.range(doc);
(range.end - range.start) as u64
}
@@ -55,15 +56,3 @@ impl BytesFastFieldReader {
self.values.len() as u64
}
}
impl MultiValueLength for BytesFastFieldReader {
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u32> {
self.range(doc_id)
}
fn get_len(&self, doc_id: DocId) -> u64 {
self.num_bytes(doc_id)
}
fn get_total_len(&self) -> u64 {
self.total_num_bytes()
}
}

View File

@@ -27,8 +27,8 @@ pub use self::error::{FastFieldNotAvailableError, Result};
pub use self::facet_reader::FacetReader;
pub(crate) use self::multivalued::{get_fastfield_codecs_for_multivalue, MultivalueStartIndex};
pub use self::multivalued::{
MultiValueU128FastFieldWriter, MultiValuedFastFieldReader, MultiValuedFastFieldWriter,
MultiValuedU128FastFieldReader,
MultiValueIndex, MultiValueU128FastFieldWriter, MultiValuedFastFieldReader,
MultiValuedFastFieldWriter, MultiValuedU128FastFieldReader,
};
pub use self::readers::FastFieldReaders;
pub(crate) use self::readers::{type_and_cardinality, FastType};
@@ -36,7 +36,7 @@ pub use self::serializer::{Column, CompositeFastFieldSerializer};
use self::writer::unexpected_value;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::schema::{Type, Value};
use crate::{DateTime, DocId};
use crate::DateTime;
mod alive_bitset;
mod bytes;
@@ -47,17 +47,6 @@ mod readers;
mod serializer;
mod writer;
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
/// for a doc_id
pub trait MultiValueLength {
/// returns the positions for a docid
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u32>;
/// returns the num of values associated with a doc_id
fn get_len(&self, doc_id: DocId) -> u64;
/// returns the sum of num values for all doc_ids
fn get_total_len(&self) -> u64;
}
/// Trait for types that are allowed for fast fields:
/// (u64, i64 and f64, bool, DateTime).
pub trait FastValue:

View File

@@ -0,0 +1,140 @@
use std::ops::Range;
use std::sync::Arc;
use fastfield_codecs::Column;
use crate::DocId;
#[derive(Clone)]
/// Index to resolve value range for given doc_id.
/// Starts at 0.
pub struct MultiValueIndex {
idx: Arc<dyn Column<u64>>,
}
impl MultiValueIndex {
pub(crate) fn new(idx: Arc<dyn Column<u64>>) -> Self {
Self { idx }
}
/// Returns `[start, end)`, such that the values associated with
/// the given document are `start..end`.
#[inline]
pub(crate) fn range(&self, doc: DocId) -> Range<u32> {
let start = self.idx.get_val(doc) as u32;
let end = self.idx.get_val(doc + 1) as u32;
start..end
}
/// Returns `[start, end)`, such that the values associated with
/// the given documents are `start..end`.
///
/// The passed end range is allowed to be out of bounds.
#[inline]
pub(crate) fn docid_range_to_position_range(&self, range: Range<DocId>) -> Range<u32> {
let end_docid = range.end.min(self.num_docs() - 1) + 1;
let start_docid = range.start.min(end_docid);
let start = self.idx.get_val(start_docid) as u32;
let end = self.idx.get_val(end_docid) as u32;
assert!(start <= end);
start..end
}
/// returns the num of values associated with a doc_id
pub(crate) fn num_vals_for_doc(&self, doc: DocId) -> u32 {
let range = self.range(doc);
range.end - range.start
}
/// Returns the overall number of values in this field.
#[inline]
pub fn total_num_vals(&self) -> u64 {
self.idx.max_value()
}
/// Returns the number of documents in the index.
#[inline]
pub fn num_docs(&self) -> u32 {
self.idx.num_vals() - 1
}
/// Converts a list of positions of values in a 1:n index to the corresponding list of DocIds.
/// Positions are converted inplace to docids.
///
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
/// index.
///
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically
/// increasing positions.
///
///
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
/// match a docid to its value position.
pub(crate) fn positions_to_docids(&self, doc_id_range: Range<u32>, positions: &mut Vec<u32>) {
if positions.is_empty() {
return;
}
let mut cur_doc = doc_id_range.start;
let mut last_doc = None;
assert!(self.idx.get_val(doc_id_range.start) as u32 <= positions[0]);
let mut write_doc_pos = 0;
for i in 0..positions.len() {
let pos = positions[i];
loop {
let end = self.idx.get_val(cur_doc + 1) as u32;
if end > pos {
positions[write_doc_pos] = cur_doc;
write_doc_pos += if last_doc == Some(cur_doc) { 0 } else { 1 };
last_doc = Some(cur_doc);
break;
}
cur_doc += 1;
}
}
positions.truncate(write_doc_pos);
}
}
#[cfg(test)]
mod tests {
use std::ops::Range;
use std::sync::Arc;
use fastfield_codecs::IterColumn;
use crate::fastfield::MultiValueIndex;
fn index_to_pos_helper(
index: &MultiValueIndex,
doc_id_range: Range<u32>,
positions: &[u32],
) -> Vec<u32> {
let mut positions = positions.to_vec();
index.positions_to_docids(doc_id_range, &mut positions);
positions
}
#[test]
fn test_positions_to_docid() {
let offsets = vec![0, 10, 12, 15, 22, 23]; // docid values are [0..10, 10..12, 12..15, etc.]
let column = IterColumn::from(offsets.into_iter());
let index = MultiValueIndex::new(Arc::new(column));
assert_eq!(index.num_docs(), 5);
{
let positions = vec![10u32, 11, 15, 20, 21, 22];
assert_eq!(index_to_pos_helper(&index, 0..5, &positions), vec![1, 3, 4]);
assert_eq!(index_to_pos_helper(&index, 1..5, &positions), vec![1, 3, 4]);
assert_eq!(index_to_pos_helper(&index, 0..5, &[9]), vec![0]);
assert_eq!(index_to_pos_helper(&index, 1..5, &[10]), vec![1]);
assert_eq!(index_to_pos_helper(&index, 1..5, &[11]), vec![1]);
assert_eq!(index_to_pos_helper(&index, 2..5, &[12]), vec![2]);
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14]), vec![2]);
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14, 15]), vec![2, 3]);
}
}
}

View File

@@ -1,7 +1,9 @@
mod index;
mod reader;
mod writer;
use fastfield_codecs::FastFieldCodecType;
pub use index::MultiValueIndex;
pub use self::reader::{MultiValuedFastFieldReader, MultiValuedU128FastFieldReader};
pub(crate) use self::writer::MultivalueStartIndex;

View File

@@ -3,7 +3,8 @@ use std::sync::Arc;
use fastfield_codecs::{Column, MonotonicallyMappableToU128};
use crate::fastfield::{FastValue, MultiValueLength};
use super::MultiValueIndex;
use crate::fastfield::FastValue;
use crate::DocId;
/// Reader for a multivalued `u64` fast field.
@@ -13,9 +14,10 @@ use crate::DocId;
/// The `vals_reader` will access the concatenated list of all
/// values for all reader.
/// The `idx_reader` associated, for each document, the index of its first value.
/// Stores the start position for each document.
#[derive(Clone)]
pub struct MultiValuedFastFieldReader<Item: FastValue> {
idx_reader: Arc<dyn Column<u64>>,
idx_reader: MultiValueIndex,
vals_reader: Arc<dyn Column<Item>>,
}
@@ -25,20 +27,11 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
vals_reader: Arc<dyn Column<Item>>,
) -> MultiValuedFastFieldReader<Item> {
MultiValuedFastFieldReader {
idx_reader,
idx_reader: MultiValueIndex::new(idx_reader),
vals_reader,
}
}
/// Returns `[start, end)`, such that the values associated with
/// the given document are `start..end`.
#[inline]
fn range(&self, doc: DocId) -> Range<u32> {
let start = self.idx_reader.get_val(doc) as u32;
let end = self.idx_reader.get_val(doc + 1) as u32;
start..end
}
/// Returns the array of values associated with the given `doc`.
#[inline]
fn get_vals_for_range(&self, range: Range<u32>, vals: &mut Vec<Item>) {
@@ -51,10 +44,15 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
/// Returns the array of values associated with the given `doc`.
#[inline]
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
let range = self.range(doc);
let range = self.idx_reader.range(doc);
self.get_vals_for_range(range, vals);
}
/// returns the multivalue index
pub fn get_index_reader(&self) -> &MultiValueIndex {
&self.idx_reader
}
/// Returns the minimum value for this fast field.
///
/// The min value does not take in account of possible
@@ -75,28 +73,14 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
/// Returns the number of values associated with the document `DocId`.
#[inline]
pub fn num_vals(&self, doc: DocId) -> usize {
let range = self.range(doc);
(range.end - range.start) as usize
pub fn num_vals(&self, doc: DocId) -> u32 {
self.idx_reader.num_vals_for_doc(doc)
}
/// Returns the overall number of values in this field .
/// Returns the overall number of values in this field.
#[inline]
pub fn total_num_vals(&self) -> u64 {
self.idx_reader.max_value()
}
}
impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
fn get_range(&self, doc_id: DocId) -> Range<u32> {
self.range(doc_id)
}
fn get_len(&self, doc_id: DocId) -> u64 {
self.num_vals(doc_id) as u64
}
fn get_total_len(&self) -> u64 {
self.total_num_vals() as u64
self.idx_reader.total_num_vals()
}
}
@@ -109,7 +93,7 @@ impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
/// The `idx_reader` associated, for each document, the index of its first value.
#[derive(Clone)]
pub struct MultiValuedU128FastFieldReader<T: MonotonicallyMappableToU128> {
idx_reader: Arc<dyn Column<u64>>,
idx_reader: MultiValueIndex,
vals_reader: Arc<dyn Column<T>>,
}
@@ -119,24 +103,31 @@ impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
vals_reader: Arc<dyn Column<T>>,
) -> MultiValuedU128FastFieldReader<T> {
Self {
idx_reader,
idx_reader: MultiValueIndex::new(idx_reader),
vals_reader,
}
}
/// Returns `[start, end)`, such that the values associated
/// to the given document are `start..end`.
#[inline]
fn range(&self, doc: DocId) -> Range<u32> {
let start = self.idx_reader.get_val(doc) as u32;
let end = self.idx_reader.get_val(doc + 1) as u32;
start..end
fn get_docids_for_value_range(
&self,
value_range: RangeInclusive<T>,
doc_id_range: Range<u32>,
positions: &mut Vec<u32>,
) {
let position_range = self
.get_index_reader()
.docid_range_to_position_range(doc_id_range.clone());
self.vals_reader
.get_docids_for_value_range(value_range, position_range, positions);
self.idx_reader.positions_to_docids(doc_id_range, positions);
}
/// Returns the array of values associated to the given `doc`.
#[inline]
pub fn get_first_val(&self, doc: DocId) -> Option<T> {
let range = self.range(doc);
let range = self.idx_reader.range(doc);
if range.is_empty() {
return None;
}
@@ -152,26 +143,18 @@ impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
.get_range(range.start as u64, &mut vals[..]);
}
/// Returns the index reader
pub fn get_index_reader(&self) -> &MultiValueIndex {
&self.idx_reader
}
/// Returns the array of values associated to the given `doc`.
#[inline]
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<T>) {
let range = self.range(doc);
let range = self.idx_reader.range(doc);
self.get_vals_for_range(range, vals);
}
/// Returns all docids which are in the provided value range
pub fn get_positions_for_value_range(
&self,
value_range: RangeInclusive<T>,
doc_id_range: Range<u32>,
) -> Vec<DocId> {
let mut positions = Vec::new(); // TODO replace
self.vals_reader
.get_positions_for_value_range(value_range, doc_id_range, &mut positions);
positions_to_docids(&positions, self.idx_reader.as_ref())
}
/// Iterates over all elements in the fast field
pub fn iter(&self) -> impl Iterator<Item = T> + '_ {
self.vals_reader.iter()
@@ -197,85 +180,27 @@ impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
/// Returns the number of values associated with the document `DocId`.
#[inline]
pub fn num_vals(&self, doc: DocId) -> usize {
let range = self.range(doc);
(range.end - range.start) as usize
pub fn num_vals(&self, doc: DocId) -> u32 {
self.idx_reader.num_vals_for_doc(doc)
}
/// Returns the overall number of values in this field.
/// Returns the overall number of values in this field. It does not include deletes.
#[inline]
pub fn total_num_vals(&self) -> u64 {
self.idx_reader.max_value()
assert_eq!(
self.vals_reader.num_vals() as u64,
self.get_index_reader().total_num_vals()
);
self.idx_reader.total_num_vals()
}
}
impl<T: MonotonicallyMappableToU128> MultiValueLength for MultiValuedU128FastFieldReader<T> {
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u32> {
self.range(doc_id)
}
fn get_len(&self, doc_id: DocId) -> u64 {
self.num_vals(doc_id) as u64
}
fn get_total_len(&self) -> u64 {
self.total_num_vals() as u64
}
}
/// Converts a list of positions of values in a 1:n index to the corresponding list of DocIds.
///
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the index.
///
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically increasing
/// positions.
///
/// TODO: Instead of a linear scan we can employ a expotential search into binary search to match a
/// docid to its value position.
fn positions_to_docids<C: Column + ?Sized>(positions: &[u32], idx_reader: &C) -> Vec<DocId> {
let mut docs = vec![];
let mut cur_doc = 0u32;
let mut last_doc = None;
for pos in positions {
loop {
let end = idx_reader.get_val(cur_doc + 1) as u32;
if end > *pos {
// avoid duplicates
if Some(cur_doc) == last_doc {
break;
}
docs.push(cur_doc);
last_doc = Some(cur_doc);
break;
}
cur_doc += 1;
}
}
docs
}
#[cfg(test)]
mod tests {
use fastfield_codecs::VecColumn;
use crate::core::Index;
use crate::fastfield::multivalued::reader::positions_to_docids;
use crate::schema::{Cardinality, Facet, FacetOptions, NumericOptions, Schema};
#[test]
fn test_positions_to_docid() {
let positions = vec![10u32, 11, 15, 20, 21, 22];
let offsets = vec![0, 10, 12, 15, 22, 23];
{
let column = VecColumn::from(&offsets);
let docids = positions_to_docids(&positions, &column);
assert_eq!(docids, vec![1, 3, 4]);
}
}
#[test]
fn test_multifastfield_reader() -> crate::Result<()> {
let mut schema_builder = Schema::builder();

View File

@@ -95,7 +95,7 @@ fn compute_deleted_bitset(
// document that were inserted before it.
delete_op
.target
.for_each(segment_reader, &mut |doc_matching_delete_query, _| {
.for_each_no_score(segment_reader, &mut |doc_matching_delete_query| {
if doc_opstamps.is_deleted(doc_matching_delete_query, delete_op.opstamp) {
alive_bitset.remove(doc_matching_delete_query);
might_have_changed = true;
@@ -805,7 +805,7 @@ mod tests {
use std::collections::{HashMap, HashSet};
use std::net::Ipv6Addr;
use fastfield_codecs::MonotonicallyMappableToU128;
use fastfield_codecs::{Column, MonotonicallyMappableToU128};
use proptest::prelude::*;
use proptest::prop_oneof;
use proptest::strategy::Strategy;
@@ -1591,6 +1591,25 @@ mod tests {
(existing_ids, deleted_ids)
}
fn get_id_list(ops: &[IndexingOp]) -> Vec<u64> {
let mut id_list = Vec::new();
for &op in ops {
match op {
IndexingOp::AddDoc { id } => {
id_list.push(id);
}
IndexingOp::DeleteDoc { id } => {
id_list.retain(|el| *el != id);
}
IndexingOp::DeleteDocQuery { id } => {
id_list.retain(|el| *el != id);
}
_ => {}
}
}
id_list
}
fn test_operation_strategy(
ops: &[IndexingOp],
sort_index: bool,
@@ -1600,7 +1619,9 @@ mod tests {
let ip_field = schema_builder.add_ip_addr_field("ip", FAST | INDEXED | STORED);
let ips_field = schema_builder.add_ip_addr_field(
"ips",
IpAddrOptions::default().set_fast(Cardinality::MultiValues),
IpAddrOptions::default()
.set_fast(Cardinality::MultiValues)
.set_indexed(),
);
let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED);
let i64_field = schema_builder.add_i64_field("i64", INDEXED);
@@ -1665,11 +1686,13 @@ mod tests {
// rotate right
let multi_text_field_text3 = "test3 test1 test2 test3 test1 test2";
let ip_from_id = |id| Ipv6Addr::from_u128(id as u128);
for &op in ops {
match op {
IndexingOp::AddDoc { id } => {
let facet = Facet::from(&("/cola/".to_string() + &id.to_string()));
let ip_from_id = Ipv6Addr::from_u128(id as u128);
let ip = ip_from_id(id);
if !ip_exists(id) {
// every 3rd doc has no ip field
@@ -1693,9 +1716,9 @@ mod tests {
} else {
index_writer.add_document(doc!(id_field=>id,
bytes_field => id.to_le_bytes().as_slice(),
ip_field => ip_from_id,
ips_field => ip_from_id,
ips_field => ip_from_id,
ip_field => ip,
ips_field => ip,
ips_field => ip,
multi_numbers=> id,
multi_numbers => id,
bool_field => (id % 2u64) != 0,
@@ -1738,6 +1761,7 @@ mod tests {
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let num_segments_before_merge = searcher.segment_readers().len();
if force_end_merge {
index_writer.wait_merging_threads()?;
let mut index_writer = index.writer_for_tests()?;
@@ -1749,6 +1773,7 @@ mod tests {
assert!(index_writer.wait_merging_threads().is_ok());
}
}
let num_segments_after_merge = searcher.segment_readers().len();
old_reader.reload()?;
let old_searcher = old_reader.searcher();
@@ -1776,6 +1801,22 @@ mod tests {
.collect();
let (expected_ids_and_num_occurrences, deleted_ids) = expected_ids(ops);
let id_list = get_id_list(ops);
// multivalue fast field content
let mut all_ips = Vec::new();
let mut num_ips = 0;
for segment_reader in searcher.segment_readers().iter() {
let ip_reader = segment_reader.fast_fields().ip_addrs(ips_field).unwrap();
for doc in segment_reader.doc_ids_alive() {
let mut vals = vec![];
ip_reader.get_vals(doc, &mut vals);
all_ips.extend_from_slice(&vals);
}
num_ips += ip_reader.total_num_vals();
}
let num_docs_expected = expected_ids_and_num_occurrences
.iter()
.map(|(_, id_occurrences)| *id_occurrences as usize)
@@ -1797,6 +1838,30 @@ mod tests {
.collect::<HashSet<_>>()
);
if force_end_merge && num_segments_before_merge > 1 && num_segments_after_merge == 1 {
let mut expected_multi_ips: Vec<_> = id_list
.iter()
.filter(|id| ip_exists(**id))
.flat_map(|id| vec![ip_from_id(*id), ip_from_id(*id)])
.collect();
assert_eq!(num_ips, expected_multi_ips.len() as u64);
expected_multi_ips.sort();
all_ips.sort();
assert_eq!(expected_multi_ips, all_ips);
// Test fastfield num_docs
let num_docs: usize = searcher
.segment_readers()
.iter()
.map(|segment_reader| {
let ff_reader = segment_reader.fast_fields().ip_addrs(ips_field).unwrap();
ff_reader.num_docs() as usize
})
.sum();
assert_eq!(num_docs, num_docs_expected);
}
// Load all ips addr
let ips: HashSet<Ipv6Addr> = searcher
.segment_readers()
@@ -2000,6 +2065,51 @@ mod tests {
assert_eq!(do_search_ip_field(&format!("\"{}\"", ip_addr)), count);
}
}
// assert data is like expected
//
for (existing_id, count) in expected_ids_and_num_occurrences.iter().take(10) {
let (existing_id, count) = (*existing_id, *count);
if !ip_exists(existing_id) {
continue;
}
let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| {
format!("{}:[{} TO {}]", field, &from.to_string(), &to.to_string())
};
let ip = ip_from_id(existing_id);
let do_search_ip_field = |term: &str| do_search(term, ip_field).len() as u64;
// Range query on single value field
// let query = gen_query_inclusive("ip", ip, ip);
// assert_eq!(do_search_ip_field(&query), count);
// Range query on multi value field
let query = gen_query_inclusive("ips", ip, ip);
assert_eq!(do_search_ip_field(&query), count);
}
// ip range query on fast field
//
for (existing_id, count) in expected_ids_and_num_occurrences.iter().take(10) {
let (existing_id, count) = (*existing_id, *count);
if !ip_exists(existing_id) {
continue;
}
let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| {
format!("{}:[{} TO {}]", field, &from.to_string(), &to.to_string())
};
let ip = ip_from_id(existing_id);
let do_search_ip_field = |term: &str| do_search(term, ip_field).len() as u64;
// Range query on single value field
// let query = gen_query_inclusive("ip", ip, ip);
// assert_eq!(do_search_ip_field(&query), count);
// Range query on multi value field
let query = gen_query_inclusive("ips", ip, ip);
assert_eq!(do_search_ip_field(&query), count);
}
// test facets
for segment_reader in searcher.segment_readers().iter() {
let mut facet_reader = segment_reader.facet_reader(facet_field).unwrap();
@@ -2021,6 +2131,40 @@ mod tests {
Ok(())
}
#[test]
fn test_ip_range_query_multivalue_bug() {
assert!(test_operation_strategy(
&[
IndexingOp::AddDoc { id: 2 },
IndexingOp::Commit,
IndexingOp::AddDoc { id: 1 },
IndexingOp::AddDoc { id: 1 },
IndexingOp::Commit,
IndexingOp::Merge
],
true,
false
)
.is_ok());
}
#[test]
fn test_ff_num_ips_regression() {
assert!(test_operation_strategy(
&[
IndexingOp::AddDoc { id: 13 },
IndexingOp::AddDoc { id: 1 },
IndexingOp::Commit,
IndexingOp::DeleteDocQuery { id: 13 },
IndexingOp::AddDoc { id: 1 },
IndexingOp::Commit,
],
false,
true
)
.is_ok());
}
#[test]
fn test_minimal() {
assert!(test_operation_strategy(
@@ -2030,7 +2174,7 @@ mod tests {
IndexingOp::DeleteDoc { id: 13 }
],
true,
false
true
)
.is_ok());

View File

@@ -13,7 +13,7 @@ use crate::docset::{DocSet, TERMINATED};
use crate::error::DataCorruption;
use crate::fastfield::{
get_fastfield_codecs_for_multivalue, AliveBitSet, Column, CompositeFastFieldSerializer,
MultiValueLength, MultiValuedFastFieldReader, MultiValuedU128FastFieldReader,
MultiValueIndex, MultiValuedFastFieldReader, MultiValuedU128FastFieldReader,
};
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
@@ -348,9 +348,29 @@ impl IndexMerger {
field,
fast_field_serializer,
doc_id_mapping,
&segment_and_ff_readers,
&segment_and_ff_readers
.iter()
.map(|(segment_reader, u64s_reader)| {
(*segment_reader, u64s_reader.get_index_reader())
})
.collect::<Vec<_>>(),
)?;
let num_vals = segment_and_ff_readers
.iter()
.map(|(segment_reader, reader)| {
// TODO implement generic version, implement reverse scan, all - deletes
if let Some(alive_bitset) = segment_reader.alive_bitset() {
alive_bitset
.iter_alive()
.map(|doc| reader.num_vals(doc))
.sum()
} else {
reader.total_num_vals() as u32
}
})
.sum();
let fast_field_readers = segment_and_ff_readers
.into_iter()
.map(|(_, ff_reader)| ff_reader)
@@ -365,12 +385,7 @@ impl IndexMerger {
})
};
fast_field_serializer.create_u128_fast_field_with_idx(
field,
iter_gen,
doc_id_mapping.len() as u32,
1,
)?;
fast_field_serializer.create_u128_fast_field_with_idx(field, iter_gen, num_vals, 1)?;
Ok(())
}
@@ -529,11 +544,11 @@ impl IndexMerger {
// Creating the index file to point into the data, generic over `BytesFastFieldReader` and
// `MultiValuedFastFieldReader`
//
fn write_1_n_fast_field_idx_generic<T: MultiValueLength + Send + Sync>(
fn write_1_n_fast_field_idx_generic(
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
segment_and_ff_readers: &[(&SegmentReader, T)],
segment_and_ff_readers: &[(&SegmentReader, &MultiValueIndex)],
) -> crate::Result<()> {
let column =
RemappedDocIdMultiValueIndexColumn::new(segment_and_ff_readers, doc_id_mapping);
@@ -567,7 +582,12 @@ impl IndexMerger {
field,
fast_field_serializer,
doc_id_mapping,
&segment_and_ff_readers,
&segment_and_ff_readers
.iter()
.map(|(segment_reader, u64s_reader)| {
(*segment_reader, u64s_reader.get_index_reader())
})
.collect::<Vec<_>>(),
)
}
@@ -697,7 +717,12 @@ impl IndexMerger {
field,
fast_field_serializer,
doc_id_mapping,
&segment_and_ff_readers,
&segment_and_ff_readers
.iter()
.map(|(segment_reader, u64s_reader)| {
(*segment_reader, u64s_reader.get_index_reader())
})
.collect::<Vec<_>>(),
)?;
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field(field);
@@ -804,7 +829,7 @@ impl IndexMerger {
// Let's compute the list of non-empty posting lists
for (segment_ord, term_info) in merged_terms.current_segment_ords_and_term_infos() {
let segment_reader = &self.readers[segment_ord];
let inverted_index: &InvertedIndexReader = &*field_readers[segment_ord];
let inverted_index: &InvertedIndexReader = &field_readers[segment_ord];
let segment_postings = inverted_index
.read_postings_from_terminfo(&term_info, segment_postings_option)?;
let alive_bitset_opt = segment_reader.alive_bitset();

View File

@@ -3,7 +3,7 @@ use std::cmp;
use fastfield_codecs::Column;
use super::flat_map_with_buffer::FlatMapWithBufferIter;
use crate::fastfield::{MultiValueLength, MultiValuedFastFieldReader};
use crate::fastfield::{MultiValueIndex, MultiValuedFastFieldReader};
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
use crate::schema::Field;
use crate::{DocAddress, SegmentReader};
@@ -94,17 +94,17 @@ impl<'a> Column for RemappedDocIdMultiValueColumn<'a> {
}
}
pub(crate) struct RemappedDocIdMultiValueIndexColumn<'a, T: MultiValueLength> {
pub(crate) struct RemappedDocIdMultiValueIndexColumn<'a> {
doc_id_mapping: &'a SegmentDocIdMapping,
multi_value_length_readers: Vec<&'a T>,
multi_value_length_readers: Vec<&'a MultiValueIndex>,
min_value: u64,
max_value: u64,
num_vals: u32,
}
impl<'a, T: MultiValueLength> RemappedDocIdMultiValueIndexColumn<'a, T> {
impl<'a> RemappedDocIdMultiValueIndexColumn<'a> {
pub(crate) fn new(
segment_and_ff_readers: &'a [(&'a SegmentReader, T)],
segment_and_ff_readers: &'a [(&'a SegmentReader, &'a MultiValueIndex)],
doc_id_mapping: &'a SegmentDocIdMapping,
) -> Self {
// We go through a complete first pass to compute the minimum and the
@@ -115,17 +115,19 @@ impl<'a, T: MultiValueLength> RemappedDocIdMultiValueIndexColumn<'a, T> {
let mut multi_value_length_readers = Vec::with_capacity(segment_and_ff_readers.len());
for segment_and_ff_reader in segment_and_ff_readers {
let segment_reader = segment_and_ff_reader.0;
let multi_value_length_reader = &segment_and_ff_reader.1;
let multi_value_length_reader = segment_and_ff_reader.1;
if !segment_reader.has_deletes() {
max_value += multi_value_length_reader.get_total_len();
max_value += multi_value_length_reader.total_num_vals();
} else {
for doc in segment_reader.doc_ids_alive() {
max_value += multi_value_length_reader.get_len(doc);
max_value += multi_value_length_reader.num_vals_for_doc(doc) as u64;
}
}
num_vals += segment_reader.num_docs();
multi_value_length_readers.push(multi_value_length_reader);
}
// The value range is always get_val(doc)..get_val(doc + 1)
num_vals += 1;
Self {
doc_id_mapping,
multi_value_length_readers,
@@ -136,7 +138,7 @@ impl<'a, T: MultiValueLength> RemappedDocIdMultiValueIndexColumn<'a, T> {
}
}
impl<'a, T: MultiValueLength + Send + Sync> Column for RemappedDocIdMultiValueIndexColumn<'a, T> {
impl<'a> Column for RemappedDocIdMultiValueIndexColumn<'a> {
fn get_val(&self, _pos: u32) -> u64 {
unimplemented!()
}
@@ -148,8 +150,8 @@ impl<'a, T: MultiValueLength + Send + Sync> Column for RemappedDocIdMultiValueIn
move |old_doc_addr| {
let ff_reader =
&self.multi_value_length_readers[old_doc_addr.segment_ord as usize];
offset += ff_reader.get_len(old_doc_addr.doc_id);
offset
offset += ff_reader.num_vals_for_doc(old_doc_addr.doc_id);
offset as u64
},
)),
)

View File

@@ -33,7 +33,7 @@ where
&'a self,
term_dict: &'a TermDictionary,
) -> io::Result<TermStreamer<'a, &'a A>> {
let automaton: &A = &*self.automaton;
let automaton: &A = &self.automaton;
let term_stream_builder = term_dict.search(automaton);
term_stream_builder.into_stream()
}

View File

@@ -5,7 +5,7 @@ use crate::postings::FreqReadingOption;
use crate::query::explanation::does_not_match;
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner};
use crate::query::term_query::TermScorer;
use crate::query::weight::{for_each_pruning_scorer, for_each_scorer};
use crate::query::weight::{for_each_docset, for_each_pruning_scorer, for_each_scorer};
use crate::query::{
intersect_scorers, EmptyScorer, Exclude, Explanation, Occur, RequiredOptionalScorer, Scorer,
Union, Weight,
@@ -219,6 +219,24 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
Ok(())
}
fn for_each_no_score(
&self,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId),
) -> crate::Result<()> {
let scorer = self.complex_scorer(reader, 1.0, || DoNothingCombiner)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let mut union_scorer = Union::build(term_scorers, &self.score_combiner_fn);
for_each_docset(&mut union_scorer, callback);
}
SpecializedScorer::Other(mut scorer) => {
for_each_docset(scorer.as_mut(), callback);
}
}
Ok(())
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///

View File

@@ -11,6 +11,7 @@ use fastfield_codecs::{Column, MonotonicallyMappableToU128};
use super::range_query::map_bound;
use super::{ConstScorer, Explanation, Scorer, Weight};
use crate::fastfield::MultiValuedU128FastFieldReader;
use crate::schema::{Cardinality, Field};
use crate::{DocId, DocSet, Score, SegmentReader, TantivyError, TERMINATED};
@@ -43,16 +44,29 @@ impl Weight for IPFastFieldRangeWeight {
let field_type = reader.schema().get_field_entry(self.field).field_type();
match field_type.fastfield_cardinality().unwrap() {
Cardinality::SingleValue => {
let ip_addr_fast_field = reader.fast_fields().ip_addr(self.field)?;
let ip_addr_fast_field: Arc<dyn Column<Ipv6Addr>> =
reader.fast_fields().ip_addr(self.field)?;
let value_range = bound_to_value_range(
&self.left_bound,
&self.right_bound,
ip_addr_fast_field.as_ref(),
ip_addr_fast_field.min_value(),
ip_addr_fast_field.max_value(),
);
let docset = IpRangeDocSet::new(value_range, ip_addr_fast_field);
Ok(Box::new(ConstScorer::new(docset, boost)))
}
Cardinality::MultiValues => unimplemented!(),
Cardinality::MultiValues => {
let ip_addr_fast_field: MultiValuedU128FastFieldReader<Ipv6Addr> =
reader.fast_fields().ip_addrs(self.field)?;
let value_range = bound_to_value_range(
&self.left_bound,
&self.right_bound,
ip_addr_fast_field.min_value(),
ip_addr_fast_field.max_value(),
);
let docset = IpRangeDocSet::new(value_range, Arc::new(ip_addr_fast_field));
Ok(Box::new(ConstScorer::new(docset, boost)))
}
}
}
@@ -73,18 +87,19 @@ impl Weight for IPFastFieldRangeWeight {
fn bound_to_value_range(
left_bound: &Bound<Ipv6Addr>,
right_bound: &Bound<Ipv6Addr>,
column: &dyn Column<Ipv6Addr>,
min_value: Ipv6Addr,
max_value: Ipv6Addr,
) -> RangeInclusive<Ipv6Addr> {
let start_value = match left_bound {
Bound::Included(ip_addr) => *ip_addr,
Bound::Excluded(ip_addr) => Ipv6Addr::from(ip_addr.to_u128() + 1),
Bound::Unbounded => column.min_value(),
Bound::Unbounded => min_value,
};
let end_value = match right_bound {
Bound::Included(ip_addr) => *ip_addr,
Bound::Excluded(ip_addr) => Ipv6Addr::from(ip_addr.to_u128() - 1),
Bound::Unbounded => column.max_value(),
Bound::Unbounded => max_value,
};
start_value..=end_value
}
@@ -109,22 +124,23 @@ impl VecCursor {
fn current(&self) -> Option<u32> {
self.docs.get(self.current_pos).map(|el| *el as u32)
}
fn get_cleared_data(&mut self) -> &mut Vec<u32> {
self.docs.clear();
self.current_pos = 0;
&mut self.docs
}
fn last_value(&self) -> Option<u32> {
self.docs.iter().last().cloned()
}
fn is_empty(&self) -> bool {
self.current_pos >= self.docs.len()
}
}
struct IpRangeDocSet {
struct IpRangeDocSet<T> {
/// The range filter on the values.
value_range: RangeInclusive<Ipv6Addr>,
ip_addr_fast_field: Arc<dyn Column<Ipv6Addr>>,
ip_addrs: T,
/// The next docid start range to fetch (inclusive).
next_fetch_start: u32,
/// Number of docs range checked in a batch.
@@ -141,18 +157,17 @@ struct IpRangeDocSet {
last_seek_pos_opt: Option<u32>,
}
const DEFALT_FETCH_HORIZON: u32 = 128;
impl IpRangeDocSet {
fn new(
value_range: RangeInclusive<Ipv6Addr>,
ip_addr_fast_field: Arc<dyn Column<Ipv6Addr>>,
) -> Self {
const DEFAULT_FETCH_HORIZON: u32 = 128;
impl<T> IpRangeDocSet<T>
where Self: SingleOrMultivalued
{
fn new(value_range: RangeInclusive<Ipv6Addr>, ip_addrs: T) -> Self {
let mut ip_range_docset = Self {
value_range,
ip_addr_fast_field,
ip_addrs,
loaded_docs: VecCursor::new(),
next_fetch_start: 0,
fetch_horizon: DEFALT_FETCH_HORIZON,
fetch_horizon: DEFAULT_FETCH_HORIZON,
last_seek_pos_opt: None,
};
ip_range_docset.reset_fetch_range();
@@ -161,7 +176,7 @@ impl IpRangeDocSet {
}
fn reset_fetch_range(&mut self) {
self.fetch_horizon = DEFALT_FETCH_HORIZON;
self.fetch_horizon = DEFAULT_FETCH_HORIZON;
}
/// Returns true if more data could be fetched
@@ -185,36 +200,72 @@ impl IpRangeDocSet {
true
}
}
}
/// Fetches a block for docid range [next_fetch_start .. next_fetch_start + HORIZON]
trait SingleOrMultivalued {
fn num_docs(&self) -> u32;
fn fetch_horizon(&mut self, horizon: u32) -> bool {
let mut finished_to_end = false;
// Have different implem for single value and multivalue
todo!();
// let mut finished_to_end = false;
let limit = self.ip_addr_fast_field.num_vals();
let mut end = self.next_fetch_start + horizon;
if end >= limit {
end = limit;
finished_to_end = true;
}
// let limit = self.num_docs();
// let mut end = self.next_fetch_start + horizon;
// if end >= limit {
// end = limit;
// finished_to_end = true;
// }
let data = self.loaded_docs.get_cleared_data();
self.ip_addr_fast_field.get_positions_for_value_range(
self.value_range.clone(),
self.next_fetch_start..end,
data,
);
self.next_fetch_start = end;
finished_to_end
// let last_loaded_docs_val = self
// .is_multivalue
// .then(|| self.loaded_docs.last_value())
// .flatten();
// let last_loaded_docs_val =
// if self.is_multivalue {
// self.loaded_docs.last_value()
// } else {
// None
// };
// let loaded_docs_data = self.loaded_docs.get_cleared_data();
// self.ip_addr_fast_field.get_docids_for_value_range(
// self.value_range.clone(),
// self.next_fetch_start..end,
// loaded_docs_data,
// );
// // In case of multivalues, we may have an overlap of the same docid between fetching
// blocks if let Some(last_value) = last_loaded_docs_val {
// while self.loaded_docs.current() == Some(last_value) {
// self.loaded_docs.next();
// }
// }
// self.next_fetch_start = end;
// finished_to_end
}
}
impl DocSet for IpRangeDocSet {
impl SingleOrMultivalued for IpRangeDocSet<Arc<dyn Column<Ipv6Addr>>> {
fn num_docs(&self) -> u32 {
self.ip_addrs.num_docs()
}
}
impl SingleOrMultivalued for IpRangeDocSet<Arc<MultiValuedU128FastFieldReader<Ipv6Addr>>> {
fn num_docs(&self) -> u32 {
self.ip_addrs.get_index_reader().num_docs()
}
}
impl<T: Send> DocSet for IpRangeDocSet<T>
where Self: SingleOrMultivalued
{
#[inline]
fn advance(&mut self) -> DocId {
if let Some(docid) = self.loaded_docs.next() {
docid as u32
} else {
if self.next_fetch_start >= self.ip_addr_fast_field.num_vals() as u32 {
if self.next_fetch_start >= self.num_docs() as u32 {
return TERMINATED;
}
self.fetch_block();
@@ -269,7 +320,7 @@ mod tests {
use super::*;
use crate::collector::Count;
use crate::query::QueryParser;
use crate::schema::{Schema, FAST, INDEXED, STORED, STRING};
use crate::schema::{IpAddrOptions, Schema, FAST, INDEXED, STORED, STRING};
use crate::Index;
#[derive(Clone, Debug)]
@@ -280,12 +331,13 @@ mod tests {
fn operation_strategy() -> impl Strategy<Value = Doc> {
prop_oneof![
(0u64..100u64).prop_map(doc_from_id_1),
(1u64..100u64).prop_map(doc_from_id_2),
(0u64..10_000u64).prop_map(doc_from_id_1),
(1u64..10_000u64).prop_map(doc_from_id_2),
]
}
pub fn doc_from_id_1(id: u64) -> Doc {
let id = id * 1000;
Doc {
// ip != id
id: id.to_string(),
@@ -293,6 +345,7 @@ mod tests {
}
}
fn doc_from_id_2(id: u64) -> Doc {
let id = id * 1000;
Doc {
// ip != id
id: (id - 1).to_string(),
@@ -310,6 +363,12 @@ mod tests {
#[test]
fn ip_range_regression1_test() {
let ops = vec![doc_from_id_1(0)];
assert!(test_ip_range_for_docs(ops).is_ok());
}
#[test]
fn ip_range_regression2_test() {
let ops = vec![
doc_from_id_1(52),
doc_from_id_1(63),
@@ -321,14 +380,20 @@ mod tests {
}
#[test]
fn ip_range_regression2_test() {
let ops = vec![doc_from_id_1(0)];
fn ip_range_regression3_test() {
let ops = vec![doc_from_id_1(1), doc_from_id_1(2), doc_from_id_1(3)];
assert!(test_ip_range_for_docs(ops).is_ok());
}
pub fn create_index_from_docs(docs: &[Doc]) -> Index {
let mut schema_builder = Schema::builder();
let ip_field = schema_builder.add_ip_addr_field("ip", INDEXED | STORED | FAST);
let ips_field = schema_builder.add_ip_addr_field(
"ips",
IpAddrOptions::default()
.set_fast(Cardinality::MultiValues)
.set_indexed(),
);
let text_field = schema_builder.add_text_field("id", STRING | STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -338,6 +403,8 @@ mod tests {
for doc in docs.iter() {
index_writer
.add_document(doc!(
ips_field => doc.ip,
ips_field => doc.ip,
ip_field => doc.ip,
text_field => doc.id.to_string(),
))
@@ -361,8 +428,8 @@ mod tests {
.unwrap()
};
let gen_query_inclusive = |from: Ipv6Addr, to: Ipv6Addr| {
format!("ip:[{} TO {}]", &from.to_string(), &to.to_string())
let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| {
format!("{}:[{} TO {}]", field, &from.to_string(), &to.to_string())
};
let test_sample = |sample_docs: Vec<Doc>| {
@@ -373,7 +440,10 @@ mod tests {
.filter(|doc| (ips[0]..=ips[1]).contains(&doc.ip))
.count();
let query = gen_query_inclusive(ips[0], ips[1]);
let query = gen_query_inclusive("ip", ips[0], ips[1]);
assert_eq!(get_num_hits(query_from_text(&query)), expected_num_hits);
let query = gen_query_inclusive("ips", ips[0], ips[1]);
assert_eq!(get_num_hits(query_from_text(&query)), expected_num_hits);
// Intersection search
@@ -382,7 +452,20 @@ mod tests {
.iter()
.filter(|doc| (ips[0]..=ips[1]).contains(&doc.ip) && doc.id == id_filter)
.count();
let query = format!("{} AND id:{}", query, &id_filter);
let query = format!(
"{} AND id:{}",
gen_query_inclusive("ip", ips[0], ips[1]),
&id_filter
);
assert_eq!(get_num_hits(query_from_text(&query)), expected_num_hits);
// Intersection search on multivalue ip field
let id_filter = sample_docs[0].id.to_string();
let query = format!(
"{} AND id:{}",
gen_query_inclusive("ips", ips[0], ips[1]),
&id_filter
);
assert_eq!(get_num_hits(query_from_text(&query)), expected_num_hits);
};
@@ -402,7 +485,8 @@ mod tests {
#[cfg(all(test, feature = "unstable"))]
mod bench {
use rand::{thread_rng, Rng};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use test::Bencher;
use super::tests::*;
@@ -412,7 +496,7 @@ mod bench {
use crate::Index;
fn get_index_0_to_100() -> Index {
let mut rng = thread_rng();
let mut rng = StdRng::from_seed([1u8; 32]);
let num_vals = 100_000;
let docs: Vec<_> = (0..num_vals)
.map(|_i| {
@@ -424,8 +508,10 @@ mod bench {
"many".to_string() // 90%
};
Doc {
id: id,
id,
// Multiply by 1000, so that we create many buckets in the compact space
// The benches depend on this range to select n-percent of elements with the
// methods below.
ip: Ipv6Addr::from_u128(rng.gen_range(0..100) * 1000),
}
})
@@ -434,22 +520,42 @@ mod bench {
let index = create_index_from_docs(&docs);
index
}
fn get_90_percent() -> RangeInclusive<Ipv6Addr> {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(90 * 1000);
start..=end
}
fn get_10_percent() -> RangeInclusive<Ipv6Addr> {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(10 * 1000);
start..=end
}
fn get_1_percent() -> RangeInclusive<Ipv6Addr> {
let start = Ipv6Addr::from_u128(10 * 1000);
let end = Ipv6Addr::from_u128(10 * 1000);
start..=end
}
fn excute_query(
start_inclusive: Ipv6Addr,
end_inclusive: Ipv6Addr,
field: &str,
ip_range: RangeInclusive<Ipv6Addr>,
suffix: &str,
index: &Index,
) -> usize {
let gen_query_inclusive = |from: Ipv6Addr, to: Ipv6Addr| {
let gen_query_inclusive = |from: &Ipv6Addr, to: &Ipv6Addr| {
format!(
"ip:[{} TO {}] {}",
"{}:[{} TO {}] {}",
field,
&from.to_string(),
&to.to_string(),
suffix
)
};
let query = gen_query_inclusive(start_inclusive, end_inclusive);
let query = gen_query_inclusive(ip_range.start(), ip_range.end());
let query_from_text = |text: &str| {
QueryParser::for_index(&index, vec![])
.parse_query(text)
@@ -465,131 +571,153 @@ mod bench {
fn bench_ip_range_hit_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(90 * 1000);
excute_query(start, end, "", &index)
});
bench.iter(|| excute_query("ip", get_90_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(10 * 1000);
excute_query(start, end, "", &index)
});
bench.iter(|| excute_query("ip", get_10_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(10 * 1000);
let end = Ipv6Addr::from_u128(10 * 1000);
excute_query(start, end, "", &index)
});
bench.iter(|| excute_query("ip", get_1_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent_intersect_with_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(10 * 1000);
excute_query(start, end, "AND id:few", &index)
});
bench.iter(|| excute_query("ip", get_10_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(10 * 1000);
let end = Ipv6Addr::from_u128(10 * 1000);
excute_query(start, end, "AND id:few", &index)
});
bench.iter(|| excute_query("ip", get_1_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(10 * 1000);
let end = Ipv6Addr::from_u128(10 * 1000);
excute_query(start, end, "AND id:many", &index)
});
bench.iter(|| excute_query("ip", get_1_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_1_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(10 * 1000);
let end = Ipv6Addr::from_u128(10 * 1000);
excute_query(start, end, "AND id:veryfew", &index)
});
bench.iter(|| excute_query("ip", get_1_percent(), "AND id:veryfew", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent_intersect_with_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(10 * 1000);
excute_query(start, end, "AND id:many", &index)
});
bench.iter(|| excute_query("ip", get_10_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(90 * 1000);
excute_query(start, end, "AND id:many", &index)
});
bench.iter(|| excute_query("ip", get_90_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(90 * 1000);
excute_query(start, end, "AND id:few", &index)
});
bench.iter(|| excute_query("ip", get_90_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_1_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(90 * 1000);
bench.iter(|| excute_query("ip", get_90_percent(), "AND id:veryfew", &index));
}
excute_query(start, end, "AND id:veryfew", &index)
});
#[bench]
fn bench_ip_range_hit_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_90_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_10_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_1_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_10_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_1_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_1_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_1_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_1_percent(), "AND id:veryfew", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_10_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_90_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_90_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_1_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| excute_query("ips", get_90_percent(), "AND id:veryfew", &index));
}
}

View File

@@ -5,7 +5,7 @@ use crate::fieldnorm::FieldNormReader;
use crate::postings::SegmentPostings;
use crate::query::bm25::Bm25Weight;
use crate::query::explanation::does_not_match;
use crate::query::weight::for_each_scorer;
use crate::query::weight::{for_each_docset, for_each_scorer};
use crate::query::{Explanation, Scorer, Weight};
use crate::schema::IndexRecordOption;
use crate::{DocId, Score, Term};
@@ -56,6 +56,18 @@ impl Weight for TermWeight {
Ok(())
}
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
fn for_each_no_score(
&self,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId),
) -> crate::Result<()> {
let mut scorer = self.specialized_scorer(reader, 1.0)?;
for_each_docset(&mut scorer, callback);
Ok(())
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///

View File

@@ -94,8 +94,8 @@ impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> Union<TScorer, TScoreCombin
self.doc = min_doc;
refill(
&mut self.docsets,
&mut *self.bitsets,
&mut *self.scores,
&mut self.bitsets,
&mut self.scores,
min_doc,
);
true

View File

@@ -1,10 +1,10 @@
use super::Scorer;
use crate::core::SegmentReader;
use crate::query::Explanation;
use crate::{DocId, Score, TERMINATED};
use crate::{DocId, DocSet, Score, TERMINATED};
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
/// Iterates through all of the documents and scores matched by the DocSet
/// `DocSet`.
pub(crate) fn for_each_scorer<TScorer: Scorer + ?Sized>(
scorer: &mut TScorer,
callback: &mut dyn FnMut(DocId, Score),
@@ -16,6 +16,16 @@ pub(crate) fn for_each_scorer<TScorer: Scorer + ?Sized>(
}
}
/// Iterates through all of the documents matched by the DocSet
/// `DocSet`.
pub(crate) fn for_each_docset<T: DocSet + ?Sized>(docset: &mut T, callback: &mut dyn FnMut(DocId)) {
let mut doc = docset.doc();
while doc != TERMINATED {
callback(doc);
doc = docset.advance();
}
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
@@ -78,6 +88,18 @@ pub trait Weight: Send + Sync + 'static {
Ok(())
}
/// Iterates through all of the document matched by the DocSet
/// `DocSet` and push the scored documents to the collector.
fn for_each_no_score(
&self,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId),
) -> crate::Result<()> {
let mut docset = self.scorer(reader, 1.0)?;
for_each_docset(docset.as_mut(), callback);
Ok(())
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///

View File

@@ -2,7 +2,7 @@ use std::io;
use common::{BinarySerializable, FixedSize, HasLen};
use super::Decompressor;
use super::{Decompressor, DOC_STORE_VERSION};
use crate::directory::FileSlice;
#[derive(Debug, Clone, PartialEq)]
@@ -17,6 +17,7 @@ pub struct DocStoreFooter {
/// - reserved for future use: 15 bytes
impl BinarySerializable for DocStoreFooter {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
BinarySerializable::serialize(&DOC_STORE_VERSION, writer)?;
BinarySerializable::serialize(&self.offset, writer)?;
BinarySerializable::serialize(&self.decompressor.get_id(), writer)?;
writer.write_all(&[0; 15])?;
@@ -24,6 +25,13 @@ impl BinarySerializable for DocStoreFooter {
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let doc_store_version = u32::deserialize(reader)?;
if doc_store_version != DOC_STORE_VERSION {
panic!(
"actual doc store version: {}, expected: {}",
doc_store_version, DOC_STORE_VERSION
);
}
let offset = u64::deserialize(reader)?;
let compressor_id = u8::deserialize(reader)?;
let mut skip_buf = [0; 15];
@@ -36,7 +44,7 @@ impl BinarySerializable for DocStoreFooter {
}
impl FixedSize for DocStoreFooter {
const SIZE_IN_BYTES: usize = 24;
const SIZE_IN_BYTES: usize = 28;
}
impl DocStoreFooter {

View File

@@ -44,6 +44,9 @@ pub use self::reader::{CacheStats, StoreReader};
pub use self::writer::StoreWriter;
mod store_compressor;
/// Doc store version in footer to handle format changes.
pub(crate) const DOC_STORE_VERSION: u32 = 1;
#[cfg(feature = "lz4-compression")]
mod compression_lz4_block;

View File

@@ -229,10 +229,10 @@ fn test_empty_string() -> crate::Result<()> {
let buffer: Vec<u8> = {
let mut term_dictionary_builder = TermDictionaryBuilder::create(vec![]).unwrap();
term_dictionary_builder
.insert(&[], &make_term_info(1_u64))
.insert([], &make_term_info(1_u64))
.unwrap();
term_dictionary_builder
.insert(&[1u8], &make_term_info(2_u64))
.insert([1u8], &make_term_info(2_u64))
.unwrap();
term_dictionary_builder.finish()?
};
@@ -252,7 +252,7 @@ fn stream_range_test_dict() -> crate::Result<TermDictionary> {
let mut term_dictionary_builder = TermDictionaryBuilder::create(Vec::new())?;
for i in 0u8..10u8 {
let number_arr = [i; 1];
term_dictionary_builder.insert(&number_arr, &make_term_info(i as u64))?;
term_dictionary_builder.insert(number_arr, &make_term_info(i as u64))?;
}
term_dictionary_builder.finish()?
};

View File

@@ -10,6 +10,8 @@
//! assert_eq!(stream.next().unwrap().text, "crafty");
//! assert!(stream.next().is_none());
//! ```
use std::sync::Arc;
use rustc_hash::FxHashSet;
use super::{Token, TokenFilter, TokenStream};
@@ -18,19 +20,15 @@ use crate::tokenizer::BoxTokenStream;
/// `TokenFilter` that removes stop words from a token stream
#[derive(Clone)]
pub struct StopWordFilter {
words: FxHashSet<String>,
words: Arc<FxHashSet<String>>,
}
impl StopWordFilter {
/// Creates a `StopWordFilter` given a list of words to remove
pub fn remove(words: Vec<String>) -> StopWordFilter {
let mut set = FxHashSet::default();
for word in words {
set.insert(word);
pub fn remove<W: IntoIterator<Item = String>>(words: W) -> StopWordFilter {
StopWordFilter {
words: Arc::new(words.into_iter().collect()),
}
StopWordFilter { words: set }
}
fn english() -> StopWordFilter {
@@ -40,12 +38,12 @@ impl StopWordFilter {
"there", "these", "they", "this", "to", "was", "will", "with",
];
StopWordFilter::remove(words.iter().map(|&s| s.to_string()).collect())
StopWordFilter::remove(words.iter().map(|&s| s.to_string()))
}
}
pub struct StopWordFilterStream<'a> {
words: FxHashSet<String>,
words: Arc<FxHashSet<String>>,
tail: BoxTokenStream<'a>,
}