mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-03 07:42:54 +00:00
Compare commits
1 Commits
0.25.0
...
clippy-and
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dc0aa3d734 |
@@ -2,17 +2,13 @@ Tantivy 0.25
|
||||
================================
|
||||
|
||||
## Bugfixes
|
||||
- fix union performance regression in tantivy 0.24 [#2663](https://github.com/quickwit-oss/tantivy/pull/2663)(@PSeitz)
|
||||
- fix union performance regression in tantivy 0.24 [#2663](https://github.com/quickwit-oss/tantivy/pull/2663)(@PSeitz-dd)
|
||||
- make zstd optional in sstable [#2633](https://github.com/quickwit-oss/tantivy/pull/2633)(@Parth)
|
||||
- Fix TopDocs::order_by_string_fast_field for asc order [#2672](https://github.com/quickwit-oss/tantivy/pull/2672)(@stuhood @PSeitz)
|
||||
|
||||
## Features/Improvements
|
||||
- add docs/example and Vec<u32> values to sstable [#2660](https://github.com/quickwit-oss/tantivy/pull/2660)(@PSeitz)
|
||||
- Add string fast field support to `TopDocs`. [#2642](https://github.com/quickwit-oss/tantivy/pull/2642)(@stuhood)
|
||||
- update edition to 2024 [#2620](https://github.com/quickwit-oss/tantivy/pull/2620)(@PSeitz)
|
||||
- Allow optional spaces between the field name and the value in the query parser [#2678](https://github.com/quickwit-oss/tantivy/pull/2678)(@Darkheir)
|
||||
- Support mixed field types in query parser [#2676](https://github.com/quickwit-oss/tantivy/pull/2676)(@trinity-1686a)
|
||||
- Add per-field size details [#2679](https://github.com/quickwit-oss/tantivy/pull/2679)(@fulmicoton)
|
||||
|
||||
Tantivy 0.24
|
||||
================================
|
||||
|
||||
16
Cargo.toml
16
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.25.0"
|
||||
version = "0.24.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -57,13 +57,13 @@ measure_time = "0.9.0"
|
||||
arc-swap = "1.5.0"
|
||||
bon = "3.3.1"
|
||||
|
||||
columnar = { version = "0.6", path = "./columnar", package = "tantivy-columnar" }
|
||||
sstable = { version = "0.6", path = "./sstable", package = "tantivy-sstable", optional = true }
|
||||
stacker = { version = "0.6", path = "./stacker", package = "tantivy-stacker" }
|
||||
query-grammar = { version = "0.25.0", path = "./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version = "0.9", path = "./bitpacker" }
|
||||
common = { version = "0.10", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version = "0.6", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
||||
columnar = { version = "0.5", path = "./columnar", package = "tantivy-columnar" }
|
||||
sstable = { version = "0.5", path = "./sstable", package = "tantivy-sstable", optional = true }
|
||||
stacker = { version = "0.5", path = "./stacker", package = "tantivy-stacker" }
|
||||
query-grammar = { version = "0.24.0", path = "./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version = "0.8", path = "./bitpacker" }
|
||||
common = { version = "0.9", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version = "0.5", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
||||
sketches-ddsketch = { version = "0.3.0", features = ["use_serde"] }
|
||||
hyperloglogplus = { version = "0.4.1", features = ["const-loop"] }
|
||||
futures-util = { version = "0.3.28", optional = true }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.9.0"
|
||||
version = "0.8.0"
|
||||
edition = "2024"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// manual divceil actually generates code that is not optimal (to accept the full range of u32) and
|
||||
// perf matters here.
|
||||
#![allow(clippy::manual_div_ceil)]
|
||||
|
||||
use std::io;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
|
||||
|
||||
@@ -140,10 +140,9 @@ impl BlockedBitpacker {
|
||||
pub fn iter(&self) -> impl Iterator<Item = u64> + '_ {
|
||||
// todo performance: we could decompress a whole block and cache it instead
|
||||
let bitpacked_elems = self.offset_and_bits.len() * BLOCK_SIZE;
|
||||
let iter = (0..bitpacked_elems)
|
||||
(0..bitpacked_elems)
|
||||
.map(move |idx| self.get(idx))
|
||||
.chain(self.buffer.iter().cloned());
|
||||
iter
|
||||
.chain(self.buffer.iter().cloned())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// #[allow(clippy::manual_div_ceil)]
|
||||
|
||||
mod bitpacker;
|
||||
mod blocked_bitpacker;
|
||||
mod filter_vec;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-columnar"
|
||||
version = "0.6.0"
|
||||
version = "0.5.0"
|
||||
edition = "2024"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
@@ -12,10 +12,10 @@ categories = ["database-implementations", "data-structures", "compression"]
|
||||
itertools = "0.14.0"
|
||||
fastdivide = "0.4.0"
|
||||
|
||||
stacker = { version= "0.6", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.6", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.10", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.9", path = "../bitpacker/" }
|
||||
stacker = { version= "0.5", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.5", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.9", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.8", path = "../bitpacker/" }
|
||||
serde = "1.0.152"
|
||||
downcast-rs = "2.0.1"
|
||||
|
||||
@@ -33,29 +33,6 @@ harness = false
|
||||
name = "bench_access"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_first_vals"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_values_u64"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_values_u128"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_create_column_values"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_column_values_get"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_optional_index"
|
||||
harness = false
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
zstd-compression = ["sstable/zstd-compression"]
|
||||
|
||||
@@ -19,7 +19,7 @@ fn main() {
|
||||
|
||||
let mut add_card = |card1: Card| {
|
||||
inputs.push((
|
||||
card1.to_string(),
|
||||
format!("{card1}"),
|
||||
generate_columnar_and_open(card1, NUM_DOCS),
|
||||
));
|
||||
};
|
||||
@@ -50,7 +50,6 @@ fn bench_group(mut runner: InputGroup<Column>) {
|
||||
let mut buffer = vec![None; BLOCK_SIZE];
|
||||
for i in (0..NUM_DOCS).step_by(BLOCK_SIZE) {
|
||||
// fill docs
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for idx in 0..BLOCK_SIZE {
|
||||
docs[idx] = idx as u32 + i;
|
||||
}
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use binggan::{InputGroup, black_box};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use tantivy_columnar::ColumnValues;
|
||||
use tantivy_columnar::column_values::{CodecType, serialize_and_load_u64_based_column_values};
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
let mut data: Vec<_> = (100..55_000_u64)
|
||||
.map(|num| num + rng.r#gen::<u8>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
data.insert(2000, 100);
|
||||
data.insert(3000, 4100);
|
||||
data.insert(4000, 100);
|
||||
data.insert(5000, 800);
|
||||
data
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
}
|
||||
|
||||
type Col = Arc<dyn ColumnValues<u64>>;
|
||||
|
||||
fn main() {
|
||||
let data = get_data();
|
||||
let inputs: Vec<(String, Col)> = vec![
|
||||
(
|
||||
"bitpacked".to_string(),
|
||||
serialize_and_load_u64_based_column_values(&data.as_slice(), &[CodecType::Bitpacked]),
|
||||
),
|
||||
(
|
||||
"linear".to_string(),
|
||||
serialize_and_load_u64_based_column_values(&data.as_slice(), &[CodecType::Linear]),
|
||||
),
|
||||
(
|
||||
"blockwise_linear".to_string(),
|
||||
serialize_and_load_u64_based_column_values(
|
||||
&data.as_slice(),
|
||||
&[CodecType::BlockwiseLinear],
|
||||
),
|
||||
),
|
||||
];
|
||||
|
||||
let mut group: InputGroup<Col> = InputGroup::new_with_inputs(inputs);
|
||||
|
||||
group.register("fastfield_get", |col: &Col| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
sum = sum.wrapping_add(col.get_val(pos as u32));
|
||||
}
|
||||
black_box(sum);
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
use binggan::{InputGroup, black_box};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use tantivy_columnar::column_values::{CodecType, serialize_u64_based_column_values};
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
let mut data: Vec<_> = (100..55_000_u64)
|
||||
.map(|num| num + rng.r#gen::<u8>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
data.insert(2000, 100);
|
||||
data.insert(3000, 4100);
|
||||
data.insert(4000, 100);
|
||||
data.insert(5000, 800);
|
||||
data
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let data = get_data();
|
||||
let mut group: InputGroup<(CodecType, Vec<u64>)> = InputGroup::new_with_inputs(vec![
|
||||
(
|
||||
"bitpacked codec".to_string(),
|
||||
(CodecType::Bitpacked, data.clone()),
|
||||
),
|
||||
(
|
||||
"linear codec".to_string(),
|
||||
(CodecType::Linear, data.clone()),
|
||||
),
|
||||
(
|
||||
"blockwise linear codec".to_string(),
|
||||
(CodecType::BlockwiseLinear, data.clone()),
|
||||
),
|
||||
]);
|
||||
|
||||
group.register("serialize column_values", |data| {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_u64_based_column_values(&data.1.as_slice(), &[data.0], &mut buffer).unwrap();
|
||||
black_box(buffer.len());
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
@@ -1,9 +1,12 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use binggan::{InputGroup, black_box};
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::column_values::{CodecType, serialize_and_load_u64_based_column_values};
|
||||
use tantivy_columnar::*;
|
||||
use test::{Bencher, black_box};
|
||||
|
||||
struct Columns {
|
||||
pub optional: Column,
|
||||
@@ -65,45 +68,88 @@ pub fn serialize_and_load(column: &[u64], codec_type: CodecType) -> Arc<dyn Colu
|
||||
serialize_and_load_u64_based_column_values(&column, &[codec_type])
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let Columns {
|
||||
optional,
|
||||
full,
|
||||
multi,
|
||||
} = get_test_columns();
|
||||
|
||||
let inputs = vec![
|
||||
("full".to_string(), full),
|
||||
("optional".to_string(), optional),
|
||||
("multi".to_string(), multi),
|
||||
];
|
||||
|
||||
let mut group = InputGroup::new_with_inputs(inputs);
|
||||
|
||||
group.register("first_full_scan", |column| {
|
||||
fn run_bench_on_column_full_scan(b: &mut Bencher, column: Column) {
|
||||
let num_iter = black_box(NUM_VALUES);
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for i in 0..NUM_VALUES as u32 {
|
||||
for i in 0..num_iter as u32 {
|
||||
let val = column.first(i);
|
||||
sum += val.unwrap_or(0);
|
||||
}
|
||||
black_box(sum);
|
||||
sum
|
||||
});
|
||||
|
||||
group.register("first_block_fetch", |column| {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
}
|
||||
fn run_bench_on_column_block_fetch(b: &mut Bencher, column: Column) {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
b.iter(move || {
|
||||
column.first_vals(&fetch_docids, &mut block);
|
||||
black_box(block[0]);
|
||||
block[0]
|
||||
});
|
||||
|
||||
group.register("first_block_single_calls", |column| {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
}
|
||||
fn run_bench_on_column_block_single_calls(b: &mut Bencher, column: Column) {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
b.iter(move || {
|
||||
for i in 0..fetch_docids.len() {
|
||||
block[i] = column.first(fetch_docids[i]);
|
||||
}
|
||||
black_box(block[0]);
|
||||
block[0]
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
/// Column first method
|
||||
#[bench]
|
||||
fn bench_get_first_on_full_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_first_on_optional_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_first_on_multi_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
/// Block fetch column accessor
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_optional_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_multi_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_full_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_optional_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_multi_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_full_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
use binggan::{InputGroup, black_box};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use tantivy_columnar::column_index::{OptionalIndex, Set};
|
||||
|
||||
const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||
|
||||
fn gen_optional_index(fill_ratio: f64) -> OptionalIndex {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let vals: Vec<u32> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _)| pos as u32)
|
||||
.collect();
|
||||
OptionalIndex::for_test(TOTAL_NUM_VALUES, &vals)
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
start: u32,
|
||||
end: u32,
|
||||
avg_step_size: u32,
|
||||
avg_deviation: u32,
|
||||
) -> impl Iterator<Item = u32> {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let mut current = start;
|
||||
std::iter::from_fn(move || {
|
||||
current += rng.gen_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
|
||||
if current >= end { None } else { Some(current) }
|
||||
})
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
}
|
||||
|
||||
fn walk_over_data(codec: &OptionalIndex, avg_step_size: u32) -> Option<u32> {
|
||||
walk_over_data_from_positions(
|
||||
codec,
|
||||
random_range_iterator(0, TOTAL_NUM_VALUES, avg_step_size, 0),
|
||||
)
|
||||
}
|
||||
|
||||
fn walk_over_data_from_positions(
|
||||
codec: &OptionalIndex,
|
||||
positions: impl Iterator<Item = u32>,
|
||||
) -> Option<u32> {
|
||||
let mut dense_idx: Option<u32> = None;
|
||||
for idx in positions {
|
||||
dense_idx = dense_idx.or(codec.rank_if_exists(idx));
|
||||
}
|
||||
dense_idx
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Build separate inputs for each fill ratio.
|
||||
let inputs: Vec<(String, OptionalIndex)> = vec![
|
||||
("fill=1%".to_string(), gen_optional_index(0.01)),
|
||||
("fill=5%".to_string(), gen_optional_index(0.05)),
|
||||
("fill=10%".to_string(), gen_optional_index(0.10)),
|
||||
("fill=50%".to_string(), gen_optional_index(0.50)),
|
||||
("fill=90%".to_string(), gen_optional_index(0.90)),
|
||||
];
|
||||
|
||||
let mut group: InputGroup<OptionalIndex> = InputGroup::new_with_inputs(inputs);
|
||||
|
||||
// Translate orig->codec (rank_if_exists) with sampling
|
||||
group.register("orig_to_codec_10pct_hit", |codec: &OptionalIndex| {
|
||||
black_box(walk_over_data(codec, 100));
|
||||
});
|
||||
group.register("orig_to_codec_1pct_hit", |codec: &OptionalIndex| {
|
||||
black_box(walk_over_data(codec, 1000));
|
||||
});
|
||||
group.register("orig_to_codec_full_scan", |codec: &OptionalIndex| {
|
||||
black_box(walk_over_data_from_positions(codec, 0..TOTAL_NUM_VALUES));
|
||||
});
|
||||
|
||||
// Translate codec->orig (select/select_batch) on sampled ranks
|
||||
fn bench_translate_codec_to_orig_util(codec: &OptionalIndex, percent_hit: f32) {
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
let idxs: Vec<u32> = if percent_hit == 100.0f32 {
|
||||
(0..num_non_nulls).collect()
|
||||
} else {
|
||||
n_percent_step_iterator(percent_hit, num_non_nulls).collect()
|
||||
};
|
||||
let mut output = vec![0u32; idxs.len()];
|
||||
output.copy_from_slice(&idxs[..]);
|
||||
codec.select_batch(&mut output);
|
||||
black_box(output);
|
||||
}
|
||||
|
||||
group.register("codec_to_orig_0.005pct_hit", |codec: &OptionalIndex| {
|
||||
bench_translate_codec_to_orig_util(codec, 0.005);
|
||||
});
|
||||
group.register("codec_to_orig_10pct_hit", |codec: &OptionalIndex| {
|
||||
bench_translate_codec_to_orig_util(codec, 10.0);
|
||||
});
|
||||
group.register("codec_to_orig_full_scan", |codec: &OptionalIndex| {
|
||||
bench_translate_codec_to_orig_util(codec, 100.0);
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
@@ -1,12 +1,15 @@
|
||||
#![feature(test)]
|
||||
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use binggan::{InputGroup, black_box};
|
||||
use common::OwnedBytes;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::{Rng, SeedableRng, random};
|
||||
use tantivy_columnar::ColumnValues;
|
||||
use test::Bencher;
|
||||
extern crate test;
|
||||
|
||||
// TODO does this make sense for IPv6 ?
|
||||
fn generate_random() -> Vec<u64> {
|
||||
@@ -44,77 +47,78 @@ fn get_data_50percent_item() -> Vec<u128> {
|
||||
}
|
||||
data.push(SINGLE_ITEM);
|
||||
data.shuffle(&mut rng);
|
||||
data.iter().map(|el| *el as u128).collect::<Vec<_>>()
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
}
|
||||
|
||||
fn main() {
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column_range = get_u128_column_from_data(&data);
|
||||
let column_random = get_u128_column_random();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
struct Inputs {
|
||||
data: Vec<u128>,
|
||||
column_range: Arc<dyn ColumnValues<u128>>,
|
||||
column_random: Arc<dyn ColumnValues<u128>>,
|
||||
}
|
||||
|
||||
let inputs = Inputs {
|
||||
data,
|
||||
column_range,
|
||||
column_random,
|
||||
};
|
||||
let mut group: InputGroup<Inputs> =
|
||||
InputGroup::new_with_inputs(vec![("u128 benches".to_string(), inputs)]);
|
||||
|
||||
group.register(
|
||||
"intfastfield_getrange_u128_50percent_hit",
|
||||
|inp: &Inputs| {
|
||||
let mut positions = Vec::new();
|
||||
inp.column_range.get_row_ids_for_value_range(
|
||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||
0..inp.data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
black_box(positions.len());
|
||||
},
|
||||
);
|
||||
|
||||
group.register("intfastfield_getrange_u128_single_hit", |inp: &Inputs| {
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
inp.column_range.get_row_ids_for_value_range(
|
||||
column.get_row_ids_for_value_range(
|
||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(
|
||||
*SINGLE_ITEM_RANGE.start() as u128..=*SINGLE_ITEM_RANGE.end() as u128,
|
||||
0..inp.data.len() as u32,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
black_box(positions.len());
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
group.register("intfastfield_getrange_u128_hit_all", |inp: &Inputs| {
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
inp.column_range.get_row_ids_for_value_range(
|
||||
0..=u128::MAX,
|
||||
0..inp.data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
black_box(positions.len());
|
||||
column.get_row_ids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U128 RANGE END
|
||||
|
||||
group.register("intfastfield_scan_all_fflookup_u128", |inp: &Inputs| {
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let mut a = 0u128;
|
||||
for i in 0u64..inp.column_random.num_vals() as u64 {
|
||||
a += inp.column_random.get_val(i as u32);
|
||||
for i in 0u64..column.num_vals() as u64 {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
black_box(a);
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
group.register("intfastfield_jumpy_stride5_u128", |inp: &Inputs| {
|
||||
let n = inp.column_random.num_vals();
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_stride5_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let n = column.num_vals();
|
||||
let mut a = 0u128;
|
||||
for i in (0..n / 5).map(|val| val * 5) {
|
||||
a += inp.column_random.get_val(i);
|
||||
a += column.get_val(i);
|
||||
}
|
||||
black_box(a);
|
||||
a
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use binggan::{InputGroup, black_box};
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::column_values::{CodecType, serialize_and_load_u64_based_column_values};
|
||||
use tantivy_columnar::*;
|
||||
use test::Bencher;
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation() -> Vec<u64> {
|
||||
@@ -24,11 +27,37 @@ pub fn serialize_and_load(column: &[u64], codec_type: CodecType) -> Arc<dyn Colu
|
||||
serialize_and_load_u64_based_column_values(&column, &[codec_type])
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = permutation[a as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_fflookup_bitpacked(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = column.get_val(a as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
|
||||
const SINGLE_ITEM: u64 = 90;
|
||||
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
|
||||
const ONE_PERCENT_ITEM_RANGE: RangeInclusive<u64> = 49..=49;
|
||||
|
||||
fn get_data_50percent_item() -> Vec<u128> {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
|
||||
@@ -40,122 +69,135 @@ fn get_data_50percent_item() -> Vec<u128> {
|
||||
data.push(SINGLE_ITEM);
|
||||
|
||||
data.shuffle(&mut rng);
|
||||
data.iter().map(|el| *el as u128).collect::<Vec<_>>()
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
}
|
||||
|
||||
type VecCol = (Vec<u64>, Arc<dyn ColumnValues<u64>>);
|
||||
// U64 RANGE START
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(
|
||||
FIFTY_PERCENT_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_access() {
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_1percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(
|
||||
ONE_PERCENT_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(SINGLE_ITEM_RANGE, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U64 RANGE END
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let column_perm: Arc<dyn ColumnValues<u64>> =
|
||||
serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
|
||||
let permutation_gcd = generate_permutation_gcd();
|
||||
let column_perm_gcd: Arc<dyn ColumnValues<u64>> =
|
||||
serialize_and_load(&permutation_gcd, CodecType::Bitpacked);
|
||||
|
||||
let mut group: InputGroup<VecCol> = InputGroup::new_with_inputs(vec![
|
||||
(
|
||||
"access".to_string(),
|
||||
(permutation.clone(), column_perm.clone()),
|
||||
),
|
||||
(
|
||||
"access_gcd".to_string(),
|
||||
(permutation_gcd.clone(), column_perm_gcd.clone()),
|
||||
),
|
||||
]);
|
||||
|
||||
group.register("stride7_vec", |inp: &VecCol| {
|
||||
let n = inp.0.len();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += inp.0[i];
|
||||
a += permutation[i as usize];
|
||||
}
|
||||
black_box(a);
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
group.register("fullscan_vec", |inp: &VecCol| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..inp.0.len() {
|
||||
a += inp.0[i];
|
||||
}
|
||||
black_box(a);
|
||||
});
|
||||
|
||||
group.register("stride7_column_values", |inp: &VecCol| {
|
||||
let n = inp.1.num_vals() as usize;
|
||||
let mut a = 0u64;
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += inp.1.get_val(i as u32);
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
black_box(a);
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
group.register("fullscan_column_values", |inp: &VecCol| {
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
let column_ref = column.as_ref();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0u32..n as u32 {
|
||||
a += column_ref.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
|
||||
let permutation = generate_permutation_gcd();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
let n = inp.1.num_vals() as usize;
|
||||
for i in 0..n {
|
||||
a += inp.1.get_val(i as u32);
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
black_box(a);
|
||||
a
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
fn bench_range() {
|
||||
let data_50 = get_data_50percent_item();
|
||||
let data_u64 = data_50.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column_data: Arc<dyn ColumnValues<u64>> =
|
||||
serialize_and_load(&data_u64, CodecType::Bitpacked);
|
||||
|
||||
let mut group: InputGroup<Arc<dyn ColumnValues<u64>>> =
|
||||
InputGroup::new_with_inputs(vec![("dist_50pct_item".to_string(), column_data.clone())]);
|
||||
|
||||
group.register(
|
||||
"fastfield_getrange_u64_50percent_hit",
|
||||
|col: &Arc<dyn ColumnValues<u64>>| {
|
||||
let mut positions = Vec::new();
|
||||
col.get_row_ids_for_value_range(FIFTY_PERCENT_RANGE, 0..col.num_vals(), &mut positions);
|
||||
black_box(positions.len());
|
||||
},
|
||||
);
|
||||
|
||||
group.register(
|
||||
"fastfield_getrange_u64_1percent_hit",
|
||||
|col: &Arc<dyn ColumnValues<u64>>| {
|
||||
let mut positions = Vec::new();
|
||||
col.get_row_ids_for_value_range(
|
||||
ONE_PERCENT_ITEM_RANGE,
|
||||
0..col.num_vals(),
|
||||
&mut positions,
|
||||
);
|
||||
black_box(positions.len());
|
||||
},
|
||||
);
|
||||
|
||||
group.register(
|
||||
"fastfield_getrange_u64_single_hit",
|
||||
|col: &Arc<dyn ColumnValues<u64>>| {
|
||||
let mut positions = Vec::new();
|
||||
col.get_row_ids_for_value_range(SINGLE_ITEM_RANGE, 0..col.num_vals(), &mut positions);
|
||||
black_box(positions.len());
|
||||
},
|
||||
);
|
||||
|
||||
group.register(
|
||||
"fastfield_getrange_u64_hit_all",
|
||||
|col: &Arc<dyn ColumnValues<u64>>| {
|
||||
let mut positions = Vec::new();
|
||||
col.get_row_ids_for_value_range(0..=u64::MAX, 0..col.num_vals(), &mut positions);
|
||||
black_box(positions.len());
|
||||
},
|
||||
);
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
fn main() {
|
||||
bench_access();
|
||||
bench_range();
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..permutation.len() {
|
||||
a += permutation[i as usize] as u64;
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
@@ -219,3 +219,170 @@ fn test_optional_index_for_tests() {
|
||||
assert!(!optional_index.contains(3));
|
||||
assert_eq!(optional_index.num_docs(), 4);
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::Bencher;
|
||||
|
||||
use super::*;
|
||||
|
||||
const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||
fn gen_bools(fill_ratio: f64) -> OptionalIndex {
|
||||
let mut out = Vec::new();
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let vals: Vec<RowId> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _)| pos as RowId)
|
||||
.collect();
|
||||
serialize_optional_index(&&vals[..], TOTAL_NUM_VALUES, &mut out).unwrap();
|
||||
|
||||
open_optional_index(OwnedBytes::new(out)).unwrap()
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
start: u32,
|
||||
end: u32,
|
||||
avg_step_size: u32,
|
||||
avg_deviation: u32,
|
||||
) -> impl Iterator<Item = u32> {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let mut current = start;
|
||||
std::iter::from_fn(move || {
|
||||
current += rng.gen_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
|
||||
if current >= end { None } else { Some(current) }
|
||||
})
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
}
|
||||
|
||||
fn walk_over_data(codec: &OptionalIndex, avg_step_size: u32) -> Option<u32> {
|
||||
walk_over_data_from_positions(
|
||||
codec,
|
||||
random_range_iterator(0, TOTAL_NUM_VALUES, avg_step_size, 0),
|
||||
)
|
||||
}
|
||||
|
||||
fn walk_over_data_from_positions(
|
||||
codec: &OptionalIndex,
|
||||
positions: impl Iterator<Item = u32>,
|
||||
) -> Option<u32> {
|
||||
let mut dense_idx: Option<u32> = None;
|
||||
for idx in positions {
|
||||
dense_idx = dense_idx.or(codec.rank_if_exists(idx));
|
||||
}
|
||||
dense_idx
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_1percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_5percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_5percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 1000));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_1percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_10percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_90percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_10percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_50percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.5f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_90percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.01f64, 0.005f32, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_10percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.1f64, 0.005f32, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.01f64, 10f32, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_full_scan(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.01f64, 100f32, bench);
|
||||
}
|
||||
|
||||
fn bench_translate_codec_to_orig_util(
|
||||
percent_filled: f64,
|
||||
percent_hit: f32,
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(percent_filled);
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
let idxs: Vec<u32> = if percent_hit == 100.0f32 {
|
||||
(0..num_non_nulls).collect()
|
||||
} else {
|
||||
n_percent_step_iterator(percent_hit, num_non_nulls).collect()
|
||||
};
|
||||
let mut output = vec![0u32; idxs.len()];
|
||||
bench.iter(|| {
|
||||
output.copy_from_slice(&idxs[..]);
|
||||
codec.select_batch(&mut output);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_90percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.9f64, 0.005, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_90percent_filled_full_scan(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.9f64, 100.0f32, bench);
|
||||
}
|
||||
}
|
||||
|
||||
139
columnar/src/column_values/bench.rs
Normal file
139
columnar/src/column_values/bench.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::{self, Bencher};
|
||||
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::*;
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
let mut data: Vec<_> = (100..55000_u64)
|
||||
.map(|num| num + rng.r#gen::<u8>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
data.insert(2000, 100);
|
||||
data.insert(3000, 4100);
|
||||
data.insert(4000, 100);
|
||||
data.insert(5000, 800);
|
||||
data
|
||||
}
|
||||
|
||||
fn compute_stats(vals: impl Iterator<Item = u64>) -> ColumnStats {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
for val in vals {
|
||||
stats_collector.collect(val);
|
||||
}
|
||||
stats_collector.stats()
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
}
|
||||
|
||||
fn get_reader_for_bench<Codec: ColumnCodec>(data: &[u64]) -> Codec::ColumnValues {
|
||||
let mut bytes = Vec::new();
|
||||
let stats = compute_stats(data.iter().cloned());
|
||||
let mut codec_serializer = Codec::estimator();
|
||||
for val in data {
|
||||
codec_serializer.collect(*val);
|
||||
}
|
||||
codec_serializer
|
||||
.serialize(&stats, Box::new(data.iter().copied()).as_mut(), &mut bytes)
|
||||
.unwrap();
|
||||
|
||||
Codec::load(OwnedBytes::new(bytes)).unwrap()
|
||||
}
|
||||
|
||||
fn bench_get<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = get_reader_for_bench::<Codec>(data);
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
let val = col.get_val(pos as u32);
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn bench_get_dynamic_helper(b: &mut Bencher, col: Arc<dyn ColumnValues>) {
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
let val = col.get_val(pos as u32);
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_get_dynamic<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = Arc::new(get_reader_for_bench::<Codec>(data));
|
||||
bench_get_dynamic_helper(b, col);
|
||||
}
|
||||
fn bench_create<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let stats = compute_stats(data.iter().cloned());
|
||||
|
||||
let mut bytes = Vec::new();
|
||||
b.iter(|| {
|
||||
bytes.clear();
|
||||
let mut codec_serializer = Codec::estimator();
|
||||
for val in data.iter().take(1024) {
|
||||
codec_serializer.collect(*val);
|
||||
}
|
||||
|
||||
codec_serializer.serialize(&stats, Box::new(data.iter().copied()).as_mut(), &mut bytes)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
@@ -242,3 +242,6 @@ impl<T: Copy + PartialOrd + Debug + 'static> ColumnValues<T> for Arc<dyn ColumnV
|
||||
.get_row_ids_for_value_range(range, doc_id_range, positions)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench;
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(clippy::manual_div_ceil)]
|
||||
|
||||
mod column_type;
|
||||
mod format_version;
|
||||
mod merge;
|
||||
|
||||
@@ -17,10 +17,15 @@
|
||||
//! column.
|
||||
//! - [column_values]: Stores the values of a column in a dense format.
|
||||
|
||||
// #![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate more_asserts;
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
extern crate test;
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::io;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-common"
|
||||
version = "0.10.0"
|
||||
version = "0.9.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2024"
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::ByteCount;
|
||||
pub struct TinySet(u64);
|
||||
|
||||
impl fmt::Debug for TinySet {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.into_iter().collect::<Vec<u32>>().fmt(f)
|
||||
}
|
||||
}
|
||||
@@ -182,6 +182,7 @@ pub struct BitSet {
|
||||
max_value: u32,
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn num_buckets(max_val: u32) -> u32 {
|
||||
(max_val + 63u32) / 64u32
|
||||
}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
#![allow(clippy::len_without_is_empty)]
|
||||
// manual divceil actually generates code that is not optimal (to accept the full range of u32) and
|
||||
// perf matters here.
|
||||
#![allow(clippy::len_without_is_empty, clippy::manual_div_ceil)]
|
||||
|
||||
use std::ops::Deref;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.25.0"
|
||||
version = "0.24.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
|
||||
@@ -36,7 +36,7 @@ fn field_name(inp: &str) -> IResult<&str, String> {
|
||||
alt((first_char, escape_sequence())),
|
||||
many0(alt((simple_char, escape_sequence(), char('\\')))),
|
||||
)),
|
||||
tuple((multispace0, char(':'), multispace0)),
|
||||
char(':'),
|
||||
),
|
||||
|(first_char, next)| once(first_char).chain(next).collect(),
|
||||
)(inp)
|
||||
@@ -305,15 +305,14 @@ fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
let (inp, (field_name, _, _, _)) =
|
||||
tuple((field_name, multispace0, char('('), multispace0))(inp).expect("precondition failed");
|
||||
|
||||
let res = delimited_infallible(
|
||||
delimited_infallible(
|
||||
nothing,
|
||||
map(ast_infallible, |(mut ast, errors)| {
|
||||
ast.set_default_field(field_name.to_string());
|
||||
(ast, errors)
|
||||
}),
|
||||
opt_i_err(char(')'), "expected ')'"),
|
||||
)(inp);
|
||||
res
|
||||
)(inp)
|
||||
}
|
||||
|
||||
fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
@@ -1283,10 +1282,6 @@ mod test {
|
||||
super::field_name("~my~field:a"),
|
||||
Ok(("a", "~my~field".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name(".my.field.name : a"),
|
||||
Ok(("a", ".my.field.name".to_string()))
|
||||
);
|
||||
for special_char in SPECIAL_CHARS.iter() {
|
||||
let query = &format!("\\{special_char}my\\{special_char}field:a");
|
||||
assert_eq!(
|
||||
@@ -1693,15 +1688,4 @@ mod test {
|
||||
fn test_invalid_field() {
|
||||
test_is_parse_err(r#"!bc:def"#, "!bc:def");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_space_before_value() {
|
||||
test_parse_query_to_ast_helper("field : a", r#""field":a"#);
|
||||
test_parse_query_to_ast_helper("field: a", r#""field":a"#);
|
||||
test_parse_query_to_ast_helper("field :a", r#""field":a"#);
|
||||
test_parse_query_to_ast_helper(
|
||||
"field : 'happy tax payer' AND other_field : 1",
|
||||
r#"(+"field":'happy tax payer' +"other_field":1)"#,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -484,7 +484,6 @@ impl FacetCounts {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::BTreeSet;
|
||||
use std::iter;
|
||||
|
||||
use columnar::Dictionary;
|
||||
use rand::distributions::Uniform;
|
||||
|
||||
@@ -1293,220 +1293,6 @@ mod tests {
|
||||
assert_eq!(page_0, &page_2[..page_0.len()]);
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(20))]
|
||||
/// Build multiple segments with equal-scoring docs and verify stable ordering
|
||||
/// across pages when increasing limit or offset.
|
||||
#[test]
|
||||
fn proptest_stable_ordering_across_segments_with_pagination(
|
||||
docs_per_segment in proptest::collection::vec(1usize..50, 2..5)
|
||||
) {
|
||||
use crate::indexer::NoMergePolicy;
|
||||
|
||||
// Build an index with multiple segments; all docs will have the same score using AllQuery.
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
|
||||
for num_docs in &docs_per_segment {
|
||||
for _ in 0..*num_docs {
|
||||
writer.add_document(doc!(text => "x")).unwrap();
|
||||
}
|
||||
writer.commit().unwrap();
|
||||
}
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let total_docs: usize = docs_per_segment.iter().sum();
|
||||
// Full result set, first assert all scores are identical.
|
||||
let full_with_scores: Vec<(Score, DocAddress)> = searcher
|
||||
.search(&AllQuery, &TopDocs::with_limit(total_docs))
|
||||
.unwrap();
|
||||
// Sanity: at least one document was returned.
|
||||
prop_assert!(!full_with_scores.is_empty());
|
||||
let first_score = full_with_scores[0].0;
|
||||
prop_assert!(full_with_scores.iter().all(|(score, _)| *score == first_score));
|
||||
|
||||
// Keep only the addresses for the remaining checks.
|
||||
let full: Vec<DocAddress> = full_with_scores
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
|
||||
// Sanity: we actually created multiple segments and have documents.
|
||||
prop_assert!(docs_per_segment.len() >= 2);
|
||||
prop_assert!(total_docs >= 2);
|
||||
|
||||
// 1) Increasing limit should preserve prefix ordering.
|
||||
for k in 1..=total_docs {
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&AllQuery, &TopDocs::with_limit(k))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
prop_assert_eq!(page, full[..k].to_vec());
|
||||
}
|
||||
|
||||
// 2) Offset + limit pages should always match the corresponding slice.
|
||||
// For each offset, check three representative page sizes:
|
||||
// - first page (size 1)
|
||||
// - a middle page (roughly half of remaining)
|
||||
// - the last page (size = remaining)
|
||||
for offset in 0..total_docs {
|
||||
let remaining = total_docs - offset;
|
||||
|
||||
let assert_page_eq = |limit: usize| -> proptest::test_runner::TestCaseResult {
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&AllQuery, &TopDocs::with_limit(limit).and_offset(offset))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
prop_assert_eq!(page, full[offset..offset + limit].to_vec());
|
||||
Ok(())
|
||||
};
|
||||
|
||||
// Smallest page.
|
||||
assert_page_eq(1)?;
|
||||
// A middle-sized page (dedupes to 1 if remaining == 1).
|
||||
assert_page_eq((remaining / 2).max(1))?;
|
||||
// Largest page for this offset.
|
||||
assert_page_eq(remaining)?;
|
||||
}
|
||||
|
||||
// 3) Concatenating fixed-size pages by offset reproduces the full order.
|
||||
for page_size in 1..=total_docs.min(5) {
|
||||
let mut concat: Vec<DocAddress> = Vec::new();
|
||||
let mut offset = 0;
|
||||
while offset < total_docs {
|
||||
let size = page_size.min(total_docs - offset);
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&AllQuery, &TopDocs::with_limit(size).and_offset(offset))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
concat.extend(page);
|
||||
offset += size;
|
||||
}
|
||||
// Avoid moving `full` across loop iterations.
|
||||
prop_assert_eq!(concat, full.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(20))]
|
||||
/// Build multiple segments with same-scoring term matches and verify stable ordering
|
||||
/// across pages for a real scoring query (TermQuery with identical TF and fieldnorm).
|
||||
#[test]
|
||||
fn proptest_stable_ordering_across_segments_with_term_query_and_pagination(
|
||||
docs_per_segment in proptest::collection::vec(1usize..50, 2..5)
|
||||
) {
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::query::TermQuery;
|
||||
use crate::Term;
|
||||
|
||||
// Build an index with multiple segments; each doc has exactly one token "x",
|
||||
// ensuring equal BM25 scores across all matching docs (same TF=1 and fieldnorm=1).
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
|
||||
for num_docs in &docs_per_segment {
|
||||
for _ in 0..*num_docs {
|
||||
writer.add_document(doc!(text => "x")).unwrap();
|
||||
}
|
||||
writer.commit().unwrap();
|
||||
}
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let total_docs: usize = docs_per_segment.iter().sum();
|
||||
let term = Term::from_field_text(text, "x");
|
||||
let tq = TermQuery::new(term, IndexRecordOption::WithFreqs);
|
||||
|
||||
// Full result set, first assert all scores are identical across docs.
|
||||
let full_with_scores: Vec<(Score, DocAddress)> = searcher
|
||||
.search(&tq, &TopDocs::with_limit(total_docs))
|
||||
.unwrap();
|
||||
// Sanity: at least one document was returned.
|
||||
prop_assert!(!full_with_scores.is_empty());
|
||||
let first_score = full_with_scores[0].0;
|
||||
prop_assert!(full_with_scores.iter().all(|(score, _)| *score == first_score));
|
||||
|
||||
// Keep only the addresses for the remaining checks.
|
||||
let full: Vec<DocAddress> = full_with_scores
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
|
||||
// Sanity: we actually created multiple segments and have documents.
|
||||
prop_assert!(docs_per_segment.len() >= 2);
|
||||
prop_assert!(total_docs >= 2);
|
||||
|
||||
// 1) Increasing limit should preserve prefix ordering.
|
||||
for k in 1..=total_docs {
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&tq, &TopDocs::with_limit(k))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
prop_assert_eq!(page, full[..k].to_vec());
|
||||
}
|
||||
|
||||
// 2) Offset + limit pages should always match the corresponding slice.
|
||||
// Check three representative page sizes for each offset: 1, ~half, and remaining.
|
||||
for offset in 0..total_docs {
|
||||
let remaining = total_docs - offset;
|
||||
|
||||
let assert_page_eq = |limit: usize| -> proptest::test_runner::TestCaseResult {
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&tq, &TopDocs::with_limit(limit).and_offset(offset))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
prop_assert_eq!(page, full[offset..offset + limit].to_vec());
|
||||
Ok(())
|
||||
};
|
||||
|
||||
assert_page_eq(1)?;
|
||||
assert_page_eq((remaining / 2).max(1))?;
|
||||
assert_page_eq(remaining)?;
|
||||
}
|
||||
|
||||
// 3) Concatenating fixed-size pages by offset reproduces the full order.
|
||||
for page_size in 1..=total_docs.min(5) {
|
||||
let mut concat: Vec<DocAddress> = Vec::new();
|
||||
let mut offset = 0;
|
||||
while offset < total_docs {
|
||||
let size = page_size.min(total_docs - offset);
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&tq, &TopDocs::with_limit(size).and_offset(offset))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
concat.extend(page);
|
||||
offset += size;
|
||||
}
|
||||
prop_assert_eq!(concat, full.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_top_0() {
|
||||
|
||||
@@ -216,7 +216,7 @@ impl IndexBuilder {
|
||||
|
||||
/// Opens or creates a new index in the provided directory
|
||||
pub fn open_or_create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
|
||||
let dir: Box<dyn Directory> = dir.into();
|
||||
let dir = dir.into();
|
||||
if !Index::exists(&*dir)? {
|
||||
return self.create(dir);
|
||||
}
|
||||
@@ -494,7 +494,7 @@ impl Index {
|
||||
.into_iter()
|
||||
.map(|segment| SegmentReader::open(&segment)?.fields_metadata())
|
||||
.collect::<Result<_, _>>()?;
|
||||
Ok(merge_field_meta_data(fields_metadata))
|
||||
Ok(merge_field_meta_data(fields_metadata, &self.schema()))
|
||||
}
|
||||
|
||||
/// Creates a new segment_meta (Advanced user only).
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use std::io;
|
||||
|
||||
use common::json_path_writer::JSON_END_OF_PATH;
|
||||
use common::{BinarySerializable, ByteCount};
|
||||
use common::BinarySerializable;
|
||||
use fnv::FnvHashSet;
|
||||
#[cfg(feature = "quickwit")]
|
||||
use futures_util::{FutureExt, StreamExt, TryStreamExt};
|
||||
#[cfg(feature = "quickwit")]
|
||||
@@ -35,33 +36,6 @@ pub struct InvertedIndexReader {
|
||||
total_num_tokens: u64,
|
||||
}
|
||||
|
||||
/// Object that records the amount of space used by a field in an inverted index.
|
||||
pub(crate) struct InvertedIndexFieldSpace {
|
||||
pub field_name: String,
|
||||
pub field_type: Type,
|
||||
pub postings_size: ByteCount,
|
||||
pub positions_size: ByteCount,
|
||||
pub num_terms: u64,
|
||||
}
|
||||
|
||||
/// Returns None if the term is not a valid JSON path.
|
||||
fn extract_field_name_and_field_type_from_json_path(term: &[u8]) -> Option<(String, Type)> {
|
||||
let index = term.iter().position(|&byte| byte == JSON_END_OF_PATH)?;
|
||||
let field_type_code = term.get(index + 1).copied()?;
|
||||
let field_type = Type::from_code(field_type_code)?;
|
||||
// Let's flush the current field.
|
||||
let field_name = String::from_utf8_lossy(&term[..index]).to_string();
|
||||
Some((field_name, field_type))
|
||||
}
|
||||
|
||||
impl InvertedIndexFieldSpace {
|
||||
fn record(&mut self, term_info: &TermInfo) {
|
||||
self.postings_size += ByteCount::from(term_info.posting_num_bytes() as u64);
|
||||
self.positions_size += ByteCount::from(term_info.positions_num_bytes() as u64);
|
||||
self.num_terms += 1;
|
||||
}
|
||||
}
|
||||
|
||||
impl InvertedIndexReader {
|
||||
pub(crate) fn new(
|
||||
termdict: TermDictionary,
|
||||
@@ -107,56 +81,20 @@ impl InvertedIndexReader {
|
||||
///
|
||||
/// Notice: This requires a full scan and therefore **very expensive**.
|
||||
/// TODO: Move to sstable to use the index.
|
||||
pub(crate) fn list_encoded_json_fields(&self) -> io::Result<Vec<InvertedIndexFieldSpace>> {
|
||||
pub fn list_encoded_fields(&self) -> io::Result<Vec<(String, Type)>> {
|
||||
let mut stream = self.termdict.stream()?;
|
||||
let mut fields: Vec<InvertedIndexFieldSpace> = Vec::new();
|
||||
|
||||
let mut current_field_opt: Option<InvertedIndexFieldSpace> = None;
|
||||
// Current field bytes, including the JSON_END_OF_PATH.
|
||||
let mut current_field_bytes: Vec<u8> = Vec::new();
|
||||
|
||||
while let Some((term, term_info)) = stream.next() {
|
||||
if let Some(current_field) = &mut current_field_opt {
|
||||
if term.starts_with(¤t_field_bytes) {
|
||||
// We are still in the same field.
|
||||
current_field.record(term_info);
|
||||
continue;
|
||||
let mut fields = Vec::new();
|
||||
let mut fields_set = FnvHashSet::default();
|
||||
while let Some((term, _term_info)) = stream.next() {
|
||||
if let Some(index) = term.iter().position(|&byte| byte == JSON_END_OF_PATH) {
|
||||
if !fields_set.contains(&term[..index + 2]) {
|
||||
fields_set.insert(term[..index + 2].to_vec());
|
||||
let typ = Type::from_code(term[index + 1]).unwrap();
|
||||
fields.push((String::from_utf8_lossy(&term[..index]).to_string(), typ));
|
||||
}
|
||||
}
|
||||
|
||||
// This is a new field!
|
||||
// Let's flush the current field.
|
||||
fields.extend(current_field_opt.take());
|
||||
current_field_bytes.clear();
|
||||
|
||||
// And create a new one.
|
||||
let Some((field_name, field_type)) =
|
||||
extract_field_name_and_field_type_from_json_path(term)
|
||||
else {
|
||||
error!(
|
||||
"invalid term bytes encountered {term:?}. this only happens if the term \
|
||||
dictionary is corrupted. please report"
|
||||
);
|
||||
continue;
|
||||
};
|
||||
let mut field_space = InvertedIndexFieldSpace {
|
||||
field_name,
|
||||
field_type,
|
||||
postings_size: ByteCount::default(),
|
||||
positions_size: ByteCount::default(),
|
||||
num_terms: 0u64,
|
||||
};
|
||||
field_space.record(&term_info);
|
||||
|
||||
// We include the json type and the json end of path to make sure the prefix check
|
||||
// is meaningful.
|
||||
current_field_bytes.extend_from_slice(&term[..field_space.field_name.len() + 2]);
|
||||
current_field_opt = Some(field_space);
|
||||
}
|
||||
|
||||
// We need to flush the last field as well.
|
||||
fields.extend(current_field_opt.take());
|
||||
|
||||
Ok(fields)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ops::BitOrAssign;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{fmt, io};
|
||||
|
||||
use common::{ByteCount, HasLen};
|
||||
use fnv::FnvHashMap;
|
||||
use itertools::Itertools;
|
||||
|
||||
@@ -304,16 +304,12 @@ impl SegmentReader {
|
||||
for (field, field_entry) in self.schema().fields() {
|
||||
let field_name = field_entry.name().to_string();
|
||||
let is_indexed = field_entry.is_indexed();
|
||||
|
||||
if is_indexed {
|
||||
let is_json = field_entry.field_type().value_type() == Type::Json;
|
||||
if is_json {
|
||||
let term_dictionary_json_field_num_bytes: u64 = self
|
||||
.termdict_composite
|
||||
.open_read(field)
|
||||
.map(|file_slice| file_slice.len() as u64)
|
||||
.unwrap_or(0u64);
|
||||
let inv_index = self.inverted_index(field)?;
|
||||
let encoded_fields_in_index = inv_index.list_encoded_json_fields()?;
|
||||
let encoded_fields_in_index = inv_index.list_encoded_fields()?;
|
||||
let mut build_path = |field_name: &str, mut json_path: String| {
|
||||
// In this case we need to map the potential fast field to the field name
|
||||
// accepted by the query parser.
|
||||
@@ -332,65 +328,30 @@ impl SegmentReader {
|
||||
format!("{field_name}.{json_path}")
|
||||
}
|
||||
};
|
||||
let total_num_terms = encoded_fields_in_index
|
||||
.iter()
|
||||
.map(|field_space| field_space.num_terms)
|
||||
.sum();
|
||||
indexed_fields.extend(encoded_fields_in_index.into_iter().map(|field_space| {
|
||||
let field_name = build_path(&field_name, field_space.field_name);
|
||||
// It is complex to attribute the exact amount of bytes required by specific
|
||||
// field in the json field. Instead, as a proxy, we
|
||||
// attribute the total amount of bytes for the entire json field,
|
||||
// proportionally to the number of terms in each
|
||||
// fields.
|
||||
let term_dictionary_size = (term_dictionary_json_field_num_bytes
|
||||
* field_space.num_terms)
|
||||
.checked_div(total_num_terms)
|
||||
.unwrap_or(0);
|
||||
FieldMetadata {
|
||||
postings_size: Some(field_space.postings_size),
|
||||
positions_size: Some(field_space.positions_size),
|
||||
term_dictionary_size: Some(ByteCount::from(term_dictionary_size)),
|
||||
fast_size: None,
|
||||
// The stored flag will be set at the end of this function!
|
||||
stored: field_entry.is_stored(),
|
||||
field_name,
|
||||
typ: field_space.field_type,
|
||||
}
|
||||
}));
|
||||
indexed_fields.extend(
|
||||
encoded_fields_in_index
|
||||
.into_iter()
|
||||
.map(|(name, typ)| (build_path(&field_name, name), typ))
|
||||
.map(|(field_name, typ)| FieldMetadata {
|
||||
indexed: true,
|
||||
stored: false,
|
||||
field_name,
|
||||
fast: false,
|
||||
typ,
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
let postings_size: ByteCount = self
|
||||
.postings_composite
|
||||
.open_read(field)
|
||||
.map(|posting_fileslice| posting_fileslice.len())
|
||||
.unwrap_or(0)
|
||||
.into();
|
||||
let positions_size: ByteCount = self
|
||||
.positions_composite
|
||||
.open_read(field)
|
||||
.map(|positions_fileslice| positions_fileslice.len())
|
||||
.unwrap_or(0)
|
||||
.into();
|
||||
let term_dictionary_size: ByteCount = self
|
||||
.termdict_composite
|
||||
.open_read(field)
|
||||
.map(|term_dictionary_fileslice| term_dictionary_fileslice.len())
|
||||
.unwrap_or(0)
|
||||
.into();
|
||||
indexed_fields.push(FieldMetadata {
|
||||
indexed: true,
|
||||
stored: false,
|
||||
field_name: field_name.to_string(),
|
||||
fast: false,
|
||||
typ: field_entry.field_type().value_type(),
|
||||
// The stored flag will be set at the end of this function!
|
||||
stored: field_entry.is_stored(),
|
||||
fast_size: None,
|
||||
term_dictionary_size: Some(term_dictionary_size),
|
||||
postings_size: Some(postings_size),
|
||||
positions_size: Some(positions_size),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
let fast_fields: Vec<FieldMetadata> = self
|
||||
let mut fast_fields: Vec<FieldMetadata> = self
|
||||
.fast_fields()
|
||||
.columnar()
|
||||
.iter_columns()?
|
||||
@@ -402,21 +363,23 @@ impl SegmentReader {
|
||||
.get(&field_name)
|
||||
.unwrap_or(&field_name)
|
||||
.to_string();
|
||||
let stored = is_field_stored(&field_name, &self.schema);
|
||||
FieldMetadata {
|
||||
indexed: false,
|
||||
stored: false,
|
||||
field_name,
|
||||
fast: true,
|
||||
typ: Type::from(handle.column_type()),
|
||||
stored,
|
||||
fast_size: Some(handle.num_bytes()),
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let merged_field_metadatas: Vec<FieldMetadata> =
|
||||
merge_field_meta_data(vec![indexed_fields, fast_fields]);
|
||||
Ok(merged_field_metadatas)
|
||||
// Since the type is encoded differently in the fast field and in the inverted index,
|
||||
// the order of the fields is not guaranteed to be the same. Therefore, we sort the fields.
|
||||
// If we are sure that the order is the same, we can remove this sort.
|
||||
indexed_fields.sort_unstable();
|
||||
fast_fields.sort_unstable();
|
||||
let merged = merge_field_meta_data(vec![indexed_fields, fast_fields], &self.schema);
|
||||
|
||||
Ok(merged)
|
||||
}
|
||||
|
||||
/// Returns the segment id
|
||||
@@ -480,47 +443,20 @@ pub struct FieldMetadata {
|
||||
// Notice: Don't reorder the declaration of 1.field_name 2.typ, as it is used for ordering by
|
||||
// field_name then typ.
|
||||
pub typ: Type,
|
||||
/// Is the field indexed for search
|
||||
pub indexed: bool,
|
||||
/// Is the field stored in the doc store
|
||||
pub stored: bool,
|
||||
/// Size occupied in the columnar storage (None if not fast)
|
||||
pub fast_size: Option<ByteCount>,
|
||||
/// term_dictionary
|
||||
pub term_dictionary_size: Option<ByteCount>,
|
||||
/// Size occupied in the index postings storage (None if not indexed)
|
||||
pub postings_size: Option<ByteCount>,
|
||||
/// Size occupied in the index postings storage (None if positions are not recorded)
|
||||
pub positions_size: Option<ByteCount>,
|
||||
/// Is the field stored in the columnar storage
|
||||
pub fast: bool,
|
||||
}
|
||||
|
||||
fn merge_options(left: Option<ByteCount>, right: Option<ByteCount>) -> Option<ByteCount> {
|
||||
match (left, right) {
|
||||
(Some(l), Some(r)) => Some(l + r),
|
||||
(None, right) => right,
|
||||
(left, None) => left,
|
||||
}
|
||||
}
|
||||
|
||||
impl FieldMetadata {
|
||||
/// Returns true if and only if the field is indexed.
|
||||
pub fn is_indexed(&self) -> bool {
|
||||
self.postings_size.is_some()
|
||||
}
|
||||
|
||||
/// Returns true if and only if the field is a fast field (i.e.: recorded in columnar format).
|
||||
pub fn is_fast(&self) -> bool {
|
||||
self.fast_size.is_some()
|
||||
}
|
||||
|
||||
/// Merges two field metadata.
|
||||
pub fn merge(&mut self, rhs: Self) {
|
||||
assert_eq!(self.field_name, rhs.field_name);
|
||||
assert_eq!(self.typ, rhs.typ);
|
||||
impl BitOrAssign for FieldMetadata {
|
||||
fn bitor_assign(&mut self, rhs: Self) {
|
||||
assert!(self.field_name == rhs.field_name);
|
||||
assert!(self.typ == rhs.typ);
|
||||
self.indexed |= rhs.indexed;
|
||||
self.stored |= rhs.stored;
|
||||
self.fast_size = merge_options(self.fast_size, rhs.fast_size);
|
||||
self.term_dictionary_size =
|
||||
merge_options(self.term_dictionary_size, rhs.term_dictionary_size);
|
||||
self.postings_size = merge_options(self.postings_size, rhs.postings_size);
|
||||
self.positions_size = merge_options(self.positions_size, rhs.positions_size);
|
||||
self.fast |= rhs.fast;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -533,29 +469,23 @@ fn is_field_stored(field_name: &str, schema: &Schema) -> bool {
|
||||
}
|
||||
|
||||
/// Helper to merge the field metadata from multiple segments.
|
||||
pub fn merge_field_meta_data(mut field_metadatas: Vec<Vec<FieldMetadata>>) -> Vec<FieldMetadata> {
|
||||
// READ BEFORE REMOVING THIS!
|
||||
//
|
||||
// Because we replace field sep by `.`, fields are not always sorted.
|
||||
// Also, to enforce such an implicit contract, we would have to add
|
||||
// assert here.
|
||||
//
|
||||
// Sorting is linear time on pre-sorted data, so we are simply better off sorting data here.
|
||||
for field_metadatas in &mut field_metadatas {
|
||||
field_metadatas.sort_unstable();
|
||||
}
|
||||
pub fn merge_field_meta_data(
|
||||
field_metadatas: Vec<Vec<FieldMetadata>>,
|
||||
schema: &Schema,
|
||||
) -> Vec<FieldMetadata> {
|
||||
let mut merged_field_metadata = Vec::new();
|
||||
for (_key, mut group) in &field_metadatas
|
||||
.into_iter()
|
||||
.kmerge()
|
||||
.kmerge_by(|left, right| left < right)
|
||||
// TODO: Remove allocation
|
||||
.chunk_by(|el| (el.field_name.to_string(), el.typ))
|
||||
{
|
||||
let mut merged: FieldMetadata = group.next().unwrap();
|
||||
for el in group {
|
||||
merged.merge(el);
|
||||
merged |= el;
|
||||
}
|
||||
// Currently is_field_stored is maybe too slow for the high cardinality case
|
||||
merged.stored = is_field_stored(&merged.field_name, schema);
|
||||
merged_field_metadata.push(merged);
|
||||
}
|
||||
merged_field_metadata
|
||||
@@ -577,7 +507,7 @@ fn intersect_alive_bitset(
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentReader {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "SegmentReader({:?})", self.segment_id)
|
||||
}
|
||||
}
|
||||
@@ -586,168 +516,122 @@ impl fmt::Debug for SegmentReader {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::index::Index;
|
||||
use crate::schema::{Term, STORED, TEXT};
|
||||
use crate::schema::{SchemaBuilder, Term, STORED, TEXT};
|
||||
use crate::IndexWriter;
|
||||
|
||||
#[track_caller]
|
||||
fn assert_merge(fields_metadatas: &[Vec<FieldMetadata>], expected: &[FieldMetadata]) {
|
||||
use itertools::Itertools;
|
||||
let num_els = fields_metadatas.len();
|
||||
for permutation in fields_metadatas.iter().cloned().permutations(num_els) {
|
||||
let res = merge_field_meta_data(permutation);
|
||||
assert_eq!(&res, &expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_same_field() {
|
||||
fn test_merge_field_meta_data_same() {
|
||||
let schema = SchemaBuilder::new().build();
|
||||
let field_metadata1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
term_dictionary_size: Some(ByteCount::from(100u64)),
|
||||
postings_size: Some(ByteCount::from(1_000u64)),
|
||||
positions_size: Some(ByteCount::from(2_000u64)),
|
||||
fast_size: Some(ByteCount::from(1_000u64).into()),
|
||||
fast: true,
|
||||
};
|
||||
let field_metadata2 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
term_dictionary_size: Some(ByteCount::from(80u64)),
|
||||
postings_size: Some(ByteCount::from(1_500u64)),
|
||||
positions_size: Some(ByteCount::from(2_500u64)),
|
||||
fast_size: Some(ByteCount::from(3_000u64).into()),
|
||||
fast: true,
|
||||
};
|
||||
let expected = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
stored: false,
|
||||
term_dictionary_size: Some(ByteCount::from(180u64)),
|
||||
postings_size: Some(ByteCount::from(2_500u64)),
|
||||
positions_size: Some(ByteCount::from(4_500u64)),
|
||||
fast_size: Some(ByteCount::from(4_000u64).into()),
|
||||
};
|
||||
assert_merge(
|
||||
&[vec![field_metadata1.clone()], vec![field_metadata2]],
|
||||
&[expected],
|
||||
let res = merge_field_meta_data(
|
||||
vec![vec![field_metadata1.clone()], vec![field_metadata2]],
|
||||
&schema,
|
||||
);
|
||||
assert_eq!(res, vec![field_metadata1]);
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_different() {
|
||||
let schema = SchemaBuilder::new().build();
|
||||
let field_metadata1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast_size: Some(1_000u64.into()),
|
||||
term_dictionary_size: Some(100u64.into()),
|
||||
postings_size: Some(2_000u64.into()),
|
||||
positions_size: Some(4_000u64.into()),
|
||||
fast: true,
|
||||
};
|
||||
let field_metadata2 = FieldMetadata {
|
||||
field_name: "b".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast_size: Some(1_002u64.into()),
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
fast: true,
|
||||
};
|
||||
let field_metadata3 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
term_dictionary_size: Some(101u64.into()),
|
||||
postings_size: Some(2_001u64.into()),
|
||||
positions_size: Some(4_001u64.into()),
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast_size: None,
|
||||
fast: false,
|
||||
};
|
||||
let expected = vec![
|
||||
FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
stored: false,
|
||||
term_dictionary_size: Some(201u64.into()),
|
||||
postings_size: Some(4_001u64.into()),
|
||||
positions_size: Some(8_001u64.into()),
|
||||
fast_size: Some(1_000u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "b".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
stored: false,
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
fast_size: Some(1_002u64.into()),
|
||||
},
|
||||
];
|
||||
assert_merge(
|
||||
&[
|
||||
let res = merge_field_meta_data(
|
||||
vec![
|
||||
vec![field_metadata1.clone(), field_metadata2.clone()],
|
||||
vec![field_metadata3],
|
||||
],
|
||||
&expected,
|
||||
&schema,
|
||||
);
|
||||
let field_metadata_expected1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: true,
|
||||
};
|
||||
assert_eq!(res, vec![field_metadata_expected1, field_metadata2.clone()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_merge() {
|
||||
use pretty_assertions::assert_eq;
|
||||
let get_meta_data = |name: &str, typ: Type| FieldMetadata {
|
||||
field_name: name.to_string(),
|
||||
typ,
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast_size: Some(1u64.into()),
|
||||
fast: true,
|
||||
};
|
||||
let metas = vec![get_meta_data("d", Type::Str), get_meta_data("e", Type::U64)];
|
||||
assert_merge(
|
||||
&[vec![get_meta_data("e", Type::Str)], metas],
|
||||
&[
|
||||
let schema = SchemaBuilder::new().build();
|
||||
let mut metas = vec![get_meta_data("d", Type::Str), get_meta_data("e", Type::U64)];
|
||||
metas.sort();
|
||||
let res = merge_field_meta_data(vec![vec![get_meta_data("e", Type::Str)], metas], &schema);
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![
|
||||
get_meta_data("d", Type::Str),
|
||||
get_meta_data("e", Type::Str),
|
||||
get_meta_data("e", Type::U64),
|
||||
],
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_bitxor() {
|
||||
let field_metadata1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast_size: Some(10u64.into()),
|
||||
fast: true,
|
||||
};
|
||||
let field_metadata2 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
term_dictionary_size: Some(10u64.into()),
|
||||
postings_size: Some(11u64.into()),
|
||||
positions_size: Some(12u64.into()),
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast_size: None,
|
||||
fast: false,
|
||||
};
|
||||
let field_metadata_expected = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
term_dictionary_size: Some(10u64.into()),
|
||||
postings_size: Some(11u64.into()),
|
||||
positions_size: Some(12u64.into()),
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast_size: Some(10u64.into()),
|
||||
fast: true,
|
||||
};
|
||||
let mut res1 = field_metadata1.clone();
|
||||
res1.merge(field_metadata2.clone());
|
||||
res1 |= field_metadata2.clone();
|
||||
let mut res2 = field_metadata2.clone();
|
||||
res2.merge(field_metadata1);
|
||||
res2 |= field_metadata1;
|
||||
assert_eq!(res1, field_metadata_expected);
|
||||
assert_eq!(res2, field_metadata_expected);
|
||||
}
|
||||
@@ -778,7 +662,6 @@ mod test {
|
||||
assert_eq!(4, searcher.segment_reader(0).max_doc());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_alive_docs_iterator() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
@@ -61,8 +61,6 @@ type AddBatchReceiver<D> = channel::Receiver<AddBatch<D>>;
|
||||
#[cfg(test)]
|
||||
mod tests_mmap {
|
||||
|
||||
use common::ByteCount;
|
||||
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::AggregationCollector;
|
||||
@@ -282,14 +280,11 @@ mod tests_mmap {
|
||||
field_name_out
|
||||
};
|
||||
|
||||
let mut fields: Vec<(String, Type)> = reader.searcher().segment_readers()[0]
|
||||
let mut fields = reader.searcher().segment_readers()[0]
|
||||
.inverted_index(field)
|
||||
.unwrap()
|
||||
.list_encoded_json_fields()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|field_space| (field_space.field_name, field_space.field_type))
|
||||
.collect();
|
||||
.list_encoded_fields()
|
||||
.unwrap();
|
||||
assert_eq!(fields.len(), 8);
|
||||
fields.sort();
|
||||
let mut expected_fields = vec![
|
||||
@@ -390,12 +385,7 @@ mod tests_mmap {
|
||||
let reader = &searcher.segment_readers()[0];
|
||||
let inverted_index = reader.inverted_index(json_field).unwrap();
|
||||
assert_eq!(
|
||||
inverted_index
|
||||
.list_encoded_json_fields()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|field_space| (field_space.field_name, field_space.field_type))
|
||||
.collect::<Vec<_>>(),
|
||||
inverted_index.list_encoded_fields().unwrap(),
|
||||
[
|
||||
("k8s.container.name".to_string(), Type::Str),
|
||||
("sub\u{1}a".to_string(), Type::I64),
|
||||
@@ -412,41 +402,19 @@ mod tests_mmap {
|
||||
fn test_json_fields_metadata_expanded_dots_one_segment() {
|
||||
test_json_fields_metadata(true, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_fields_metadata_expanded_dots_multi_segment() {
|
||||
test_json_fields_metadata(true, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_fields_metadata_no_expanded_dots_one_segment() {
|
||||
test_json_fields_metadata(false, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_fields_metadata_no_expanded_dots_multi_segment() {
|
||||
test_json_fields_metadata(false, false);
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn assert_size_eq(lhs: Option<ByteCount>, rhs: Option<ByteCount>) {
|
||||
let ignore_actual_values = |size_opt: Option<ByteCount>| size_opt.map(|val| val > 0);
|
||||
assert_eq!(ignore_actual_values(lhs), ignore_actual_values(rhs));
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn assert_field_metadata_eq_but_ignore_field_size(
|
||||
expected: &FieldMetadata,
|
||||
actual: &FieldMetadata,
|
||||
) {
|
||||
assert_eq!(&expected.field_name, &actual.field_name);
|
||||
assert_eq!(&expected.typ, &actual.typ);
|
||||
assert_eq!(&expected.stored, &actual.stored);
|
||||
assert_size_eq(expected.postings_size, actual.postings_size);
|
||||
assert_size_eq(expected.positions_size, actual.positions_size);
|
||||
assert_size_eq(expected.fast_size, actual.fast_size);
|
||||
}
|
||||
|
||||
fn test_json_fields_metadata(expanded_dots: bool, one_segment: bool) {
|
||||
use pretty_assertions::assert_eq;
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -485,101 +453,81 @@ mod tests_mmap {
|
||||
assert_eq!(searcher.num_docs(), 3);
|
||||
|
||||
let fields_metadata = index.fields_metadata().unwrap();
|
||||
|
||||
let expected_fields = &[
|
||||
FieldMetadata {
|
||||
field_name: "empty".to_string(),
|
||||
stored: true,
|
||||
typ: Type::U64,
|
||||
term_dictionary_size: Some(0u64.into()),
|
||||
fast_size: Some(1u64.into()),
|
||||
postings_size: Some(0u64.into()),
|
||||
positions_size: Some(0u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: if expanded_dots {
|
||||
"json.shadow.k8s.container.name".to_string()
|
||||
} else {
|
||||
"json.shadow.k8s\\.container\\.name".to_string()
|
||||
assert_eq!(
|
||||
fields_metadata,
|
||||
[
|
||||
FieldMetadata {
|
||||
field_name: "empty".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::U64
|
||||
},
|
||||
stored: true,
|
||||
typ: Type::Str,
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
fast_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.sub.a".to_string(),
|
||||
typ: Type::I64,
|
||||
stored: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.sub.b".to_string(),
|
||||
typ: Type::I64,
|
||||
stored: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.a".to_string(),
|
||||
stored: true,
|
||||
typ: Type::I64,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.a".to_string(),
|
||||
typ: Type::Str,
|
||||
stored: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.b".to_string(),
|
||||
typ: Type::I64,
|
||||
stored: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.val".to_string(),
|
||||
typ: Type::Str,
|
||||
stored: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "numbers".to_string(),
|
||||
stored: false,
|
||||
typ: Type::U64,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
},
|
||||
];
|
||||
assert_eq!(fields_metadata.len(), expected_fields.len());
|
||||
for (expected, value) in expected_fields.iter().zip(fields_metadata.iter()) {
|
||||
assert_field_metadata_eq_but_ignore_field_size(expected, value);
|
||||
}
|
||||
FieldMetadata {
|
||||
field_name: if expanded_dots {
|
||||
"json.shadow.k8s.container.name".to_string()
|
||||
} else {
|
||||
"json.shadow.k8s\\.container\\.name".to_string()
|
||||
},
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::Str
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.sub.a".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::I64
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.sub.b".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::I64
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.a".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::I64
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.a".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::Str
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.b".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::I64
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.val".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::Str
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "numbers".to_string(),
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast: true,
|
||||
typ: Type::U64
|
||||
}
|
||||
]
|
||||
);
|
||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||
// Test if returned field name can be queried
|
||||
for indexed_field in fields_metadata.iter().filter(|meta| meta.is_indexed()) {
|
||||
for indexed_field in fields_metadata.iter().filter(|meta| meta.indexed) {
|
||||
let val = if indexed_field.typ == Type::Str {
|
||||
"a"
|
||||
} else {
|
||||
@@ -595,10 +543,7 @@ mod tests_mmap {
|
||||
}
|
||||
}
|
||||
// Test if returned field name can be used for aggregation
|
||||
for fast_field in fields_metadata
|
||||
.iter()
|
||||
.filter(|field_metadata| field_metadata.is_fast())
|
||||
{
|
||||
for fast_field in fields_metadata.iter().filter(|meta| meta.fast) {
|
||||
let agg_req_str = json!(
|
||||
{
|
||||
"termagg": {
|
||||
|
||||
@@ -165,7 +165,7 @@ mod macros;
|
||||
mod future_result;
|
||||
|
||||
// Re-exports
|
||||
pub use common::{ByteCount, DateTime};
|
||||
pub use common::DateTime;
|
||||
pub use {columnar, query_grammar, time};
|
||||
|
||||
pub use crate::error::TantivyError;
|
||||
|
||||
@@ -40,9 +40,6 @@ const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
|
||||
use std::iter;
|
||||
|
||||
use proptest::prelude::*;
|
||||
use proptest::sample::select;
|
||||
|
||||
|
||||
@@ -302,7 +302,6 @@ fn is_sorted<I: Iterator<Item = DocId>>(mut it: I) -> bool {
|
||||
mod tests {
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::iter;
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
|
||||
@@ -1790,15 +1790,6 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_space_before_value() {
|
||||
test_parse_query_to_logical_ast_helper(
|
||||
"title: a",
|
||||
r#"Term(field=0, type=Str, "a")"#,
|
||||
false,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escaped_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
@@ -41,7 +41,6 @@
|
||||
//! use tantivy::schema::document::{DeserializeError, DocumentDeserialize, DocumentDeserializer};
|
||||
//!
|
||||
//! /// Our custom document to let us use a map of `serde_json::Values`.
|
||||
//! #[allow(dead_code)]
|
||||
//! pub struct MyCustomDocument {
|
||||
//! // Tantivy provides trait implementations for common `serde_json` types.
|
||||
//! fields: BTreeMap<Field, serde_json::Value>
|
||||
|
||||
@@ -1561,8 +1561,6 @@ fn to_ascii(text: &str, output: &mut String) {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::iter;
|
||||
|
||||
use super::to_ascii;
|
||||
use crate::tokenizer::{AsciiFoldingFilter, RawTokenizer, SimpleTokenizer, TextAnalyzer};
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-sstable"
|
||||
version = "0.6.0"
|
||||
version = "0.5.0"
|
||||
edition = "2024"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
@@ -10,10 +10,10 @@ categories = ["database-implementations", "data-structures", "compression"]
|
||||
description = "sstables for tantivy"
|
||||
|
||||
[dependencies]
|
||||
common = {version= "0.10", path="../common", package="tantivy-common"}
|
||||
common = {version= "0.9", path="../common", package="tantivy-common"}
|
||||
futures-util = "0.3.30"
|
||||
itertools = "0.14.0"
|
||||
tantivy-bitpacker = { version= "0.9", path="../bitpacker" }
|
||||
tantivy-bitpacker = { version= "0.8", path="../bitpacker" }
|
||||
tantivy-fst = "0.5"
|
||||
# experimental gives us access to Decompressor::upper_bound
|
||||
zstd = { version = "0.13", optional = true, features = ["experimental"] }
|
||||
|
||||
@@ -308,10 +308,9 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Unsupported sstable version, expected one of [2, 3], found {version}"),
|
||||
));
|
||||
return Err(io::Error::other(format!(
|
||||
"Unsupported sstable version, expected one of [2, 3], found {version}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -697,10 +696,9 @@ mod tests {
|
||||
fn read_bytes(&self, range: Range<usize>) -> std::io::Result<OwnedBytes> {
|
||||
let allowed_range = self.allowed_range.lock().unwrap();
|
||||
if !allowed_range.contains(&range.start) || !allowed_range.contains(&(range.end - 1)) {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!("invalid range, allowed {allowed_range:?}, requested {range:?}"),
|
||||
));
|
||||
return Err(std::io::Error::other(format!(
|
||||
"invalid range, allowed {allowed_range:?}, requested {range:?}"
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(self.bytes.slice(range))
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(clippy::manual_div_ceil)]
|
||||
|
||||
//! `tantivy_sstable` is a crate that provides a sorted string table data structure.
|
||||
//!
|
||||
//! It is used in `tantivy` to store the term dictionary.
|
||||
|
||||
@@ -394,7 +394,7 @@ impl SSTableIndexBuilder {
|
||||
|
||||
fn fst_error_to_io_error(error: tantivy_fst::Error) -> io::Error {
|
||||
match error {
|
||||
tantivy_fst::Error::Fst(fst_error) => io::Error::new(io::ErrorKind::Other, fst_error),
|
||||
tantivy_fst::Error::Fst(fst_error) => io::Error::other(fst_error),
|
||||
tantivy_fst::Error::Io(ioerror) => ioerror,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-stacker"
|
||||
version = "0.6.0"
|
||||
version = "0.5.0"
|
||||
edition = "2024"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
@@ -9,7 +9,7 @@ description = "term hashmap used for indexing"
|
||||
|
||||
[dependencies]
|
||||
murmurhash32 = "0.3"
|
||||
common = { version = "0.10", path = "../common/", package = "tantivy-common" }
|
||||
common = { version = "0.9", path = "../common/", package = "tantivy-common" }
|
||||
ahash = { version = "0.8.11", default-features = false, optional = true }
|
||||
rand_distr = "0.4.3"
|
||||
|
||||
|
||||
@@ -10,8 +10,7 @@ pub fn fast_short_slice_copy(src: &[u8], dst: &mut [u8]) {
|
||||
#[track_caller]
|
||||
fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
|
||||
panic!(
|
||||
"source slice length ({}) does not match destination slice length ({})",
|
||||
src_len, dst_len,
|
||||
"source slice length ({src_len}) does not match destination slice length ({dst_len})",
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
extern crate test;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-tokenizer-api"
|
||||
version = "0.6.0"
|
||||
version = "0.5.0"
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
description = "Tokenizer API of tantivy"
|
||||
|
||||
Reference in New Issue
Block a user