mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-06 01:02:55 +00:00
Compare commits
48 Commits
trinity.po
...
low_card_o
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b2573a3b16 | ||
|
|
70e591e230 | ||
|
|
5277367cb0 | ||
|
|
8b02bff9b8 | ||
|
|
60225bdd45 | ||
|
|
938bfec8b7 | ||
|
|
dabcaa5809 | ||
|
|
d410a3b0c0 | ||
|
|
fc93391d0e | ||
|
|
f8e79271ab | ||
|
|
33835b6a01 | ||
|
|
270ca5123c | ||
|
|
714366d3b9 | ||
|
|
40659d4d07 | ||
|
|
e1e131a804 | ||
|
|
70da310b2d | ||
|
|
85010b589a | ||
|
|
2340dca628 | ||
|
|
71a26d5b24 | ||
|
|
203751f2fe | ||
|
|
7963b0b4aa | ||
|
|
d5eefca11d | ||
|
|
5d6c8de23e | ||
|
|
a06365f39f | ||
|
|
f4b374110f | ||
|
|
c37af9c1ff | ||
|
|
33794a114c | ||
|
|
8676a1f57b | ||
|
|
021ff2ad63 | ||
|
|
39e027667b | ||
|
|
a1d65c3df3 | ||
|
|
2e4615c2d3 | ||
|
|
610091e2c4 | ||
|
|
c301e7b1c4 | ||
|
|
d9eb093368 | ||
|
|
d4b090124c | ||
|
|
811c68cdb2 | ||
|
|
bc1c789897 | ||
|
|
e7c8c331bd | ||
|
|
2f01152a3c | ||
|
|
4e84c70387 | ||
|
|
f2c77f06c5 | ||
|
|
74334f9c9a | ||
|
|
cc4beb61ba | ||
|
|
6742e5981b | ||
|
|
b128299976 | ||
|
|
945af922d1 | ||
|
|
295d07e55c |
36
CHANGELOG.md
36
CHANGELOG.md
@@ -1,3 +1,31 @@
|
||||
Tantivy 0.25
|
||||
================================
|
||||
|
||||
## Bugfixes
|
||||
- fix union performance regression in tantivy 0.24 [#2663](https://github.com/quickwit-oss/tantivy/pull/2663)(@PSeitz)
|
||||
- make zstd optional in sstable [#2633](https://github.com/quickwit-oss/tantivy/pull/2633)(@Parth)
|
||||
- Fix TopDocs::order_by_string_fast_field for asc order [#2672](https://github.com/quickwit-oss/tantivy/pull/2672)(@stuhood @PSeitz)
|
||||
|
||||
## Features/Improvements
|
||||
- add docs/example and Vec<u32> values to sstable [#2660](https://github.com/quickwit-oss/tantivy/pull/2660)(@PSeitz)
|
||||
- Add string fast field support to `TopDocs`. [#2642](https://github.com/quickwit-oss/tantivy/pull/2642)(@stuhood)
|
||||
- update edition to 2024 [#2620](https://github.com/quickwit-oss/tantivy/pull/2620)(@PSeitz)
|
||||
- Allow optional spaces between the field name and the value in the query parser [#2678](https://github.com/quickwit-oss/tantivy/pull/2678)(@Darkheir)
|
||||
- Support mixed field types in query parser [#2676](https://github.com/quickwit-oss/tantivy/pull/2676)(@trinity-1686a)
|
||||
- Add per-field size details [#2679](https://github.com/quickwit-oss/tantivy/pull/2679)(@fulmicoton)
|
||||
|
||||
Tantivy 0.24.2
|
||||
================================
|
||||
- Fix TopNComputer for reverse order. [#2672](https://github.com/quickwit-oss/tantivy/pull/2672)(@stuhood @PSeitz)
|
||||
|
||||
Affected queries are [order_by_fast_field](https://docs.rs/tantivy/latest/tantivy/collector/struct.TopDocs.html#method.order_by_fast_field) and
|
||||
[order_by_u64_field](https://docs.rs/tantivy/latest/tantivy/collector/struct.TopDocs.html#method.order_by_u64_field)
|
||||
for `Order::Asc`
|
||||
|
||||
Tantivy 0.24.1
|
||||
================================
|
||||
- Fix: bump required rust version to 1.81
|
||||
|
||||
Tantivy 0.24
|
||||
================================
|
||||
Tantivy 0.24 will be backwards compatible with indices created with v0.22 and v0.21. The new minimum rust version will be 1.75. Tantivy 0.23 will be skipped.
|
||||
@@ -80,6 +108,14 @@ This will slightly increase space and access time. [#2439](https://github.com/qu
|
||||
- Fix trait bound of StoreReader::iter [#2360](https://github.com/quickwit-oss/tantivy/pull/2360)(@adamreichold)
|
||||
- remove read_postings_no_deletes [#2526](https://github.com/quickwit-oss/tantivy/pull/2526)(@PSeitz)
|
||||
|
||||
Tantivy 0.22.1
|
||||
================================
|
||||
- Fix TopNComputer for reverse order. [#2672](https://github.com/quickwit-oss/tantivy/pull/2672)(@stuhood @PSeitz)
|
||||
|
||||
Affected queries are [order_by_fast_field](https://docs.rs/tantivy/latest/tantivy/collector/struct.TopDocs.html#method.order_by_fast_field) and
|
||||
[order_by_u64_field](https://docs.rs/tantivy/latest/tantivy/collector/struct.TopDocs.html#method.order_by_u64_field)
|
||||
for `Order::Asc`
|
||||
|
||||
Tantivy 0.22
|
||||
================================
|
||||
|
||||
|
||||
29
Cargo.toml
29
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.24.0"
|
||||
version = "0.25.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -33,7 +33,7 @@ tempfile = { version = "3.12.0", optional = true }
|
||||
log = "0.4.16"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
fs4 = { version = "0.8.0", optional = true }
|
||||
fs4 = { version = "0.13.1", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
@@ -57,18 +57,19 @@ measure_time = "0.9.0"
|
||||
arc-swap = "1.5.0"
|
||||
bon = "3.3.1"
|
||||
|
||||
columnar = { version = "0.5", path = "./columnar", package = "tantivy-columnar" }
|
||||
sstable = { version = "0.5", path = "./sstable", package = "tantivy-sstable", optional = true }
|
||||
stacker = { version = "0.5", path = "./stacker", package = "tantivy-stacker" }
|
||||
query-grammar = { version = "0.24.0", path = "./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version = "0.8", path = "./bitpacker" }
|
||||
common = { version = "0.9", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version = "0.5", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
||||
columnar = { version = "0.6", path = "./columnar", package = "tantivy-columnar" }
|
||||
sstable = { version = "0.6", path = "./sstable", package = "tantivy-sstable", optional = true }
|
||||
stacker = { version = "0.6", path = "./stacker", package = "tantivy-stacker" }
|
||||
query-grammar = { version = "0.25.0", path = "./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version = "0.9", path = "./bitpacker" }
|
||||
common = { version = "0.10", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version = "0.6", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
||||
sketches-ddsketch = { version = "0.3.0", features = ["use_serde"] }
|
||||
hyperloglogplus = { version = "0.4.1", features = ["const-loop"] }
|
||||
futures-util = { version = "0.3.28", optional = true }
|
||||
futures-channel = { version = "0.3.28", optional = true }
|
||||
fnv = "1.0.7"
|
||||
typetag = "0.2.21"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
@@ -87,7 +88,7 @@ more-asserts = "0.3.1"
|
||||
rand_distr = "0.4.3"
|
||||
time = { version = "0.3.10", features = ["serde-well-known", "macros"] }
|
||||
postcard = { version = "1.0.4", features = [
|
||||
"use-std",
|
||||
"use-std",
|
||||
], default-features = false }
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
@@ -167,3 +168,11 @@ harness = false
|
||||
[[bench]]
|
||||
name = "agg_bench"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "exists_json"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "and_or_queries"
|
||||
harness = false
|
||||
|
||||
@@ -23,8 +23,6 @@ performance for different types of queries/collections.
|
||||
|
||||
Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
<img src="doc/assets/images/searchbenchmark.png">
|
||||
|
||||
Details about the benchmark can be found at this [repository](https://github.com/quickwit-oss/search-benchmark-game).
|
||||
|
||||
## Features
|
||||
|
||||
27
RELEASE.md
27
RELEASE.md
@@ -1,4 +1,4 @@
|
||||
# Release a new Tantivy Version
|
||||
# Releasing a new Tantivy Version
|
||||
|
||||
## Steps
|
||||
|
||||
@@ -10,12 +10,29 @@
|
||||
6. Set git tag with new version
|
||||
|
||||
|
||||
In conjucation with `cargo-release` Steps 1-4 (I'm not sure if the change detection works):
|
||||
Set new packages to version 0.0.0
|
||||
[`cargo-release`](https://github.com/crate-ci/cargo-release) will help us with steps 1-5:
|
||||
|
||||
Replace prev-tag-name
|
||||
```bash
|
||||
cargo release --workspace --no-publish -v --prev-tag-name 0.19 --push-remote origin minor --no-tag --execute
|
||||
cargo release --workspace --no-publish -v --prev-tag-name 0.24 --push-remote origin minor --no-tag
|
||||
```
|
||||
|
||||
no-tag or it will create tags for all the subpackages
|
||||
`no-tag` or it will create tags for all the subpackages
|
||||
|
||||
cargo release will _not_ ignore unchanged packages, but it will print warnings for them.
|
||||
e.g. "warning: updating ownedbytes to 0.10.0 despite no changes made since tag 0.24"
|
||||
|
||||
We need to manually ignore these unchanged packages
|
||||
```bash
|
||||
cargo release --workspace --no-publish -v --prev-tag-name 0.24 --push-remote origin minor --no-tag --exclude tokenizer-api
|
||||
```
|
||||
|
||||
Add `--execute` to actually publish the packages, otherwise it will only print the commands that would be run.
|
||||
|
||||
### Tag Version
|
||||
```bash
|
||||
git tag 0.25.0
|
||||
git push upstream tag 0.25.0
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -71,8 +71,15 @@ fn bench_agg(mut group: InputGroup<Index>) {
|
||||
register!(group, histogram);
|
||||
register!(group, histogram_hard_bounds);
|
||||
register!(group, histogram_with_avg_sub_agg);
|
||||
register!(group, histogram_with_term_agg_few);
|
||||
register!(group, avg_and_range_with_avg_sub_agg);
|
||||
|
||||
// Filter aggregation benchmarks
|
||||
register!(group, filter_agg_all_query_count_agg);
|
||||
register!(group, filter_agg_term_query_count_agg);
|
||||
register!(group, filter_agg_all_query_with_sub_aggs);
|
||||
register!(group, filter_agg_term_query_with_sub_aggs);
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
@@ -339,6 +346,17 @@ fn histogram_with_avg_sub_agg(index: &Index) {
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn histogram_with_term_agg_few(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"histogram": { "field": "score_f64", "interval": 10 },
|
||||
"aggs": {
|
||||
"my_texts": { "terms": { "field": "text_few_terms" } }
|
||||
}
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn avg_and_range_with_avg_sub_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
@@ -460,3 +478,61 @@ fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
// Filter aggregation benchmarks
|
||||
|
||||
fn filter_agg_all_query_count_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"filtered": {
|
||||
"filter": "*",
|
||||
"aggs": {
|
||||
"count": { "value_count": { "field": "score" } }
|
||||
}
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
fn filter_agg_term_query_count_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"filtered": {
|
||||
"filter": "text:cool",
|
||||
"aggs": {
|
||||
"count": { "value_count": { "field": "score" } }
|
||||
}
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
fn filter_agg_all_query_with_sub_aggs(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"filtered": {
|
||||
"filter": "*",
|
||||
"aggs": {
|
||||
"avg_score": { "avg": { "field": "score" } },
|
||||
"stats_score": { "stats": { "field": "score_f64" } },
|
||||
"terms_text": {
|
||||
"terms": { "field": "text_few_terms" }
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
fn filter_agg_term_query_with_sub_aggs(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"filtered": {
|
||||
"filter": "text:cool",
|
||||
"aggs": {
|
||||
"avg_score": { "avg": { "field": "score" } },
|
||||
"stats_score": { "stats": { "field": "score_f64" } },
|
||||
"terms_text": {
|
||||
"terms": { "field": "text_few_terms" }
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
224
benches/and_or_queries.rs
Normal file
224
benches/and_or_queries.rs
Normal file
@@ -0,0 +1,224 @@
|
||||
// Benchmarks boolean conjunction queries using binggan.
|
||||
//
|
||||
// What’s measured:
|
||||
// - Or and And queries with varying selectivity (only `Term` queries for now on leafs)
|
||||
// - Nested AND/OR combinations (on multiple fields)
|
||||
// - No-scoring path using the Count collector (focus on iterator/skip performance)
|
||||
// - Top-K retrieval (k=10) using the TopDocs collector
|
||||
//
|
||||
// Corpus model:
|
||||
// - Synthetic docs; each token a/b/c is independently included per doc
|
||||
// - If none of a/b/c are included, emit a neutral filler token to keep doc length similar
|
||||
//
|
||||
// Notes:
|
||||
// - After optimization, when scoring is disabled Tantivy reads doc-only postings
|
||||
// (IndexRecordOption::Basic), avoiding frequency decoding overhead.
|
||||
// - This bench isolates boolean iteration speed and intersection/union cost.
|
||||
// - Use `cargo bench --bench boolean_conjunction` to run.
|
||||
|
||||
use binggan::{black_box, BenchRunner};
|
||||
use rand::prelude::*;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::SeedableRng;
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, TEXT};
|
||||
use tantivy::{doc, Index, ReloadPolicy, Searcher};
|
||||
|
||||
#[derive(Clone)]
|
||||
struct BenchIndex {
|
||||
#[allow(dead_code)]
|
||||
index: Index,
|
||||
searcher: Searcher,
|
||||
query_parser: QueryParser,
|
||||
}
|
||||
|
||||
impl BenchIndex {
|
||||
#[inline(always)]
|
||||
fn count_query(&self, query_str: &str) -> usize {
|
||||
let query = self.query_parser.parse_query(query_str).unwrap();
|
||||
self.searcher.search(&query, &Count).unwrap()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn topk_len(&self, query_str: &str, k: usize) -> usize {
|
||||
let query = self.query_parser.parse_query(query_str).unwrap();
|
||||
self.searcher
|
||||
.search(&query, &TopDocs::with_limit(k))
|
||||
.unwrap()
|
||||
.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a single index containing both fields (title, body) and
|
||||
/// return two BenchIndex views:
|
||||
/// - single_field: QueryParser defaults to only "body"
|
||||
/// - multi_field: QueryParser defaults to ["title", "body"]
|
||||
fn build_shared_indices(num_docs: usize, p_a: f32, p_b: f32, p_c: f32) -> (BenchIndex, BenchIndex) {
|
||||
// Unified schema (two text fields)
|
||||
let mut schema_builder = Schema::builder();
|
||||
let f_title = schema_builder.add_text_field("title", TEXT);
|
||||
let f_body = schema_builder.add_text_field("body", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
// Populate index with stable RNG for reproducibility.
|
||||
let mut rng = StdRng::from_seed([7u8; 32]);
|
||||
|
||||
// Populate: spread each present token 90/10 to body/title
|
||||
{
|
||||
let mut writer = index.writer(500_000_000).unwrap();
|
||||
for _ in 0..num_docs {
|
||||
let has_a = rng.gen_bool(p_a as f64);
|
||||
let has_b = rng.gen_bool(p_b as f64);
|
||||
let has_c = rng.gen_bool(p_c as f64);
|
||||
let mut title_tokens: Vec<&str> = Vec::new();
|
||||
let mut body_tokens: Vec<&str> = Vec::new();
|
||||
if has_a {
|
||||
if rng.gen_bool(0.1) {
|
||||
title_tokens.push("a");
|
||||
} else {
|
||||
body_tokens.push("a");
|
||||
}
|
||||
}
|
||||
if has_b {
|
||||
if rng.gen_bool(0.1) {
|
||||
title_tokens.push("b");
|
||||
} else {
|
||||
body_tokens.push("b");
|
||||
}
|
||||
}
|
||||
if has_c {
|
||||
if rng.gen_bool(0.1) {
|
||||
title_tokens.push("c");
|
||||
} else {
|
||||
body_tokens.push("c");
|
||||
}
|
||||
}
|
||||
if title_tokens.is_empty() && body_tokens.is_empty() {
|
||||
body_tokens.push("z");
|
||||
}
|
||||
writer
|
||||
.add_document(doc!(
|
||||
f_title=>title_tokens.join(" "),
|
||||
f_body=>body_tokens.join(" ")
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
writer.commit().unwrap();
|
||||
}
|
||||
|
||||
// Prepare reader/searcher once.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::Manual)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// Build two query parsers with different default fields.
|
||||
let qp_single = QueryParser::for_index(&index, vec![f_body]);
|
||||
let qp_multi = QueryParser::for_index(&index, vec![f_title, f_body]);
|
||||
|
||||
let single_view = BenchIndex {
|
||||
index: index.clone(),
|
||||
searcher: searcher.clone(),
|
||||
query_parser: qp_single,
|
||||
};
|
||||
let multi_view = BenchIndex {
|
||||
index,
|
||||
searcher,
|
||||
query_parser: qp_multi,
|
||||
};
|
||||
(single_view, multi_view)
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Prepare corpora with varying selectivity. Build one index per corpus
|
||||
// and derive two views (single-field vs multi-field) from it.
|
||||
let scenarios = vec![
|
||||
(
|
||||
"N=1M, p(a)=5%, p(b)=1%, p(c)=15%".to_string(),
|
||||
1_000_000,
|
||||
0.05,
|
||||
0.01,
|
||||
0.15,
|
||||
),
|
||||
(
|
||||
"N=1M, p(a)=1%, p(b)=1%, p(c)=15%".to_string(),
|
||||
1_000_000,
|
||||
0.01,
|
||||
0.01,
|
||||
0.15,
|
||||
),
|
||||
];
|
||||
|
||||
let mut runner = BenchRunner::new();
|
||||
for (label, n, pa, pb, pc) in scenarios {
|
||||
let (single_view, multi_view) = build_shared_indices(n, pa, pb, pc);
|
||||
|
||||
// Single-field group: default field is body only
|
||||
{
|
||||
let mut group = runner.new_group();
|
||||
group.set_name(format!("single_field — {}", label));
|
||||
group.register_with_input("+a_+b_count", &single_view, |benv: &BenchIndex| {
|
||||
black_box(benv.count_query("+a +b"))
|
||||
});
|
||||
group.register_with_input("+a_+b_+c_count", &single_view, |benv: &BenchIndex| {
|
||||
black_box(benv.count_query("+a +b +c"))
|
||||
});
|
||||
group.register_with_input("+a_+b_top10", &single_view, |benv: &BenchIndex| {
|
||||
black_box(benv.topk_len("+a +b", 10))
|
||||
});
|
||||
group.register_with_input("+a_+b_+c_top10", &single_view, |benv: &BenchIndex| {
|
||||
black_box(benv.topk_len("+a +b +c", 10))
|
||||
});
|
||||
// OR queries
|
||||
group.register_with_input("a_OR_b_count", &single_view, |benv: &BenchIndex| {
|
||||
black_box(benv.count_query("a OR b"))
|
||||
});
|
||||
group.register_with_input("a_OR_b_OR_c_count", &single_view, |benv: &BenchIndex| {
|
||||
black_box(benv.count_query("a OR b OR c"))
|
||||
});
|
||||
group.register_with_input("a_OR_b_top10", &single_view, |benv: &BenchIndex| {
|
||||
black_box(benv.topk_len("a OR b", 10))
|
||||
});
|
||||
group.register_with_input("a_OR_b_OR_c_top10", &single_view, |benv: &BenchIndex| {
|
||||
black_box(benv.topk_len("a OR b OR c", 10))
|
||||
});
|
||||
group.run();
|
||||
}
|
||||
|
||||
// Multi-field group: default fields are [title, body]
|
||||
{
|
||||
let mut group = runner.new_group();
|
||||
group.set_name(format!("multi_field — {}", label));
|
||||
group.register_with_input("+a_+b_count", &multi_view, |benv: &BenchIndex| {
|
||||
black_box(benv.count_query("+a +b"))
|
||||
});
|
||||
group.register_with_input("+a_+b_+c_count", &multi_view, |benv: &BenchIndex| {
|
||||
black_box(benv.count_query("+a +b +c"))
|
||||
});
|
||||
group.register_with_input("+a_+b_top10", &multi_view, |benv: &BenchIndex| {
|
||||
black_box(benv.topk_len("+a +b", 10))
|
||||
});
|
||||
group.register_with_input("+a_+b_+c_top10", &multi_view, |benv: &BenchIndex| {
|
||||
black_box(benv.topk_len("+a +b +c", 10))
|
||||
});
|
||||
// OR queries
|
||||
group.register_with_input("a_OR_b_count", &multi_view, |benv: &BenchIndex| {
|
||||
black_box(benv.count_query("a OR b"))
|
||||
});
|
||||
group.register_with_input("a_OR_b_OR_c_count", &multi_view, |benv: &BenchIndex| {
|
||||
black_box(benv.count_query("a OR b OR c"))
|
||||
});
|
||||
group.register_with_input("a_OR_b_top10", &multi_view, |benv: &BenchIndex| {
|
||||
black_box(benv.topk_len("a OR b", 10))
|
||||
});
|
||||
group.register_with_input("a_OR_b_OR_c_top10", &multi_view, |benv: &BenchIndex| {
|
||||
black_box(benv.topk_len("a OR b OR c", 10))
|
||||
});
|
||||
group.run();
|
||||
}
|
||||
}
|
||||
}
|
||||
69
benches/exists_json.rs
Normal file
69
benches/exists_json.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
use binggan::plugins::PeakMemAllocPlugin;
|
||||
use binggan::{black_box, InputGroup, PeakMemAlloc, INSTRUMENTED_SYSTEM};
|
||||
use serde_json::json;
|
||||
use tantivy::collector::Count;
|
||||
use tantivy::query::ExistsQuery;
|
||||
use tantivy::schema::{Schema, FAST, TEXT};
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
#[global_allocator]
|
||||
pub static GLOBAL: &PeakMemAlloc<std::alloc::System> = &INSTRUMENTED_SYSTEM;
|
||||
|
||||
fn main() {
|
||||
let doc_count: usize = 500_000;
|
||||
let subfield_counts: &[usize] = &[1, 2, 3, 4, 5, 6, 7, 8, 16, 256, 4096, 65536, 262144];
|
||||
|
||||
let indices: Vec<(String, Index)> = subfield_counts
|
||||
.iter()
|
||||
.map(|&sub_fields| {
|
||||
(
|
||||
format!("subfields={sub_fields}"),
|
||||
build_index_with_json_subfields(doc_count, sub_fields),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut group = InputGroup::new_with_inputs(indices);
|
||||
group.add_plugin(PeakMemAllocPlugin::new(GLOBAL));
|
||||
|
||||
group.config().num_iter_group = Some(1);
|
||||
group.config().num_iter_bench = Some(1);
|
||||
group.register("exists_json", exists_json_union);
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
fn exists_json_union(index: &Index) {
|
||||
let reader = index.reader().expect("reader");
|
||||
let searcher = reader.searcher();
|
||||
let query = ExistsQuery::new("json".to_string(), true);
|
||||
let count = searcher.search(&query, &Count).expect("exists search");
|
||||
// Prevents optimizer from eliding the search
|
||||
black_box(count);
|
||||
}
|
||||
|
||||
fn build_index_with_json_subfields(num_docs: usize, num_subfields: usize) -> Index {
|
||||
// Schema: single JSON field stored as FAST to support ExistsQuery.
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json_field = schema_builder.add_json_field("json", TEXT | FAST);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_from_tempdir(schema).expect("create index");
|
||||
{
|
||||
let mut index_writer = index
|
||||
.writer_with_num_threads(1, 200_000_000)
|
||||
.expect("writer");
|
||||
for i in 0..num_docs {
|
||||
let sub = i % num_subfields;
|
||||
// Only one subpath set per document; rotate subpaths so that
|
||||
// no single subpath is full, but the union covers all docs.
|
||||
let v = json!({ format!("field_{sub}"): i as u64 });
|
||||
index_writer
|
||||
.add_document(doc!(json_field => v))
|
||||
.expect("add_document");
|
||||
}
|
||||
index_writer.commit().expect("commit");
|
||||
}
|
||||
|
||||
index
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.8.0"
|
||||
version = "0.9.0"
|
||||
edition = "2024"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
|
||||
@@ -48,7 +48,7 @@ impl BitPacker {
|
||||
|
||||
pub fn flush<TWrite: io::Write + ?Sized>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
if self.mini_buffer_written > 0 {
|
||||
let num_bytes = (self.mini_buffer_written + 7) / 8;
|
||||
let num_bytes = self.mini_buffer_written.div_ceil(8);
|
||||
let bytes = self.mini_buffer.to_le_bytes();
|
||||
output.write_all(&bytes[..num_bytes])?;
|
||||
self.mini_buffer_written = 0;
|
||||
@@ -138,7 +138,7 @@ impl BitUnpacker {
|
||||
|
||||
// We use `usize` here to avoid overflow issues.
|
||||
let end_bit_read = (end_idx as usize) * self.num_bits;
|
||||
let end_byte_read = (end_bit_read + 7) / 8;
|
||||
let end_byte_read = end_bit_read.div_ceil(8);
|
||||
assert!(
|
||||
end_byte_read <= data.len(),
|
||||
"Requested index is out of bounds."
|
||||
|
||||
@@ -140,10 +140,10 @@ impl BlockedBitpacker {
|
||||
pub fn iter(&self) -> impl Iterator<Item = u64> + '_ {
|
||||
// todo performance: we could decompress a whole block and cache it instead
|
||||
let bitpacked_elems = self.offset_and_bits.len() * BLOCK_SIZE;
|
||||
let iter = (0..bitpacked_elems)
|
||||
|
||||
(0..bitpacked_elems)
|
||||
.map(move |idx| self.get(idx))
|
||||
.chain(self.buffer.iter().cloned());
|
||||
iter
|
||||
.chain(self.buffer.iter().cloned())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-columnar"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
edition = "2024"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
@@ -12,10 +12,10 @@ categories = ["database-implementations", "data-structures", "compression"]
|
||||
itertools = "0.14.0"
|
||||
fastdivide = "0.4.0"
|
||||
|
||||
stacker = { version= "0.5", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.5", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.9", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.8", path = "../bitpacker/" }
|
||||
stacker = { version= "0.6", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.6", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.10", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.9", path = "../bitpacker/" }
|
||||
serde = "1.0.152"
|
||||
downcast-rs = "2.0.1"
|
||||
|
||||
@@ -33,6 +33,29 @@ harness = false
|
||||
name = "bench_access"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_first_vals"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_values_u64"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_values_u128"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_create_column_values"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_column_values_get"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_optional_index"
|
||||
harness = false
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
zstd-compression = ["sstable/zstd-compression"]
|
||||
|
||||
@@ -19,7 +19,7 @@ fn main() {
|
||||
|
||||
let mut add_card = |card1: Card| {
|
||||
inputs.push((
|
||||
format!("{card1}"),
|
||||
card1.to_string(),
|
||||
generate_columnar_and_open(card1, NUM_DOCS),
|
||||
));
|
||||
};
|
||||
@@ -50,6 +50,7 @@ fn bench_group(mut runner: InputGroup<Column>) {
|
||||
let mut buffer = vec![None; BLOCK_SIZE];
|
||||
for i in (0..NUM_DOCS).step_by(BLOCK_SIZE) {
|
||||
// fill docs
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for idx in 0..BLOCK_SIZE {
|
||||
docs[idx] = idx as u32 + i;
|
||||
}
|
||||
|
||||
61
columnar/benches/bench_column_values_get.rs
Normal file
61
columnar/benches/bench_column_values_get.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use binggan::{InputGroup, black_box};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use tantivy_columnar::ColumnValues;
|
||||
use tantivy_columnar::column_values::{CodecType, serialize_and_load_u64_based_column_values};
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
let mut data: Vec<_> = (100..55_000_u64)
|
||||
.map(|num| num + rng.r#gen::<u8>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
data.insert(2000, 100);
|
||||
data.insert(3000, 4100);
|
||||
data.insert(4000, 100);
|
||||
data.insert(5000, 800);
|
||||
data
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
}
|
||||
|
||||
type Col = Arc<dyn ColumnValues<u64>>;
|
||||
|
||||
fn main() {
|
||||
let data = get_data();
|
||||
let inputs: Vec<(String, Col)> = vec![
|
||||
(
|
||||
"bitpacked".to_string(),
|
||||
serialize_and_load_u64_based_column_values(&data.as_slice(), &[CodecType::Bitpacked]),
|
||||
),
|
||||
(
|
||||
"linear".to_string(),
|
||||
serialize_and_load_u64_based_column_values(&data.as_slice(), &[CodecType::Linear]),
|
||||
),
|
||||
(
|
||||
"blockwise_linear".to_string(),
|
||||
serialize_and_load_u64_based_column_values(
|
||||
&data.as_slice(),
|
||||
&[CodecType::BlockwiseLinear],
|
||||
),
|
||||
),
|
||||
];
|
||||
|
||||
let mut group: InputGroup<Col> = InputGroup::new_with_inputs(inputs);
|
||||
|
||||
group.register("fastfield_get", |col: &Col| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
sum = sum.wrapping_add(col.get_val(pos as u32));
|
||||
}
|
||||
black_box(sum);
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
44
columnar/benches/bench_create_column_values.rs
Normal file
44
columnar/benches/bench_create_column_values.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use binggan::{InputGroup, black_box};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use tantivy_columnar::column_values::{CodecType, serialize_u64_based_column_values};
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
let mut data: Vec<_> = (100..55_000_u64)
|
||||
.map(|num| num + rng.r#gen::<u8>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
data.insert(2000, 100);
|
||||
data.insert(3000, 4100);
|
||||
data.insert(4000, 100);
|
||||
data.insert(5000, 800);
|
||||
data
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let data = get_data();
|
||||
let mut group: InputGroup<(CodecType, Vec<u64>)> = InputGroup::new_with_inputs(vec![
|
||||
(
|
||||
"bitpacked codec".to_string(),
|
||||
(CodecType::Bitpacked, data.clone()),
|
||||
),
|
||||
(
|
||||
"linear codec".to_string(),
|
||||
(CodecType::Linear, data.clone()),
|
||||
),
|
||||
(
|
||||
"blockwise linear codec".to_string(),
|
||||
(CodecType::BlockwiseLinear, data.clone()),
|
||||
),
|
||||
]);
|
||||
|
||||
group.register("serialize column_values", |data| {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_u64_based_column_values(&data.1.as_slice(), &[data.0], &mut buffer).unwrap();
|
||||
black_box(buffer.len());
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
@@ -1,12 +1,9 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use binggan::{InputGroup, black_box};
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::column_values::{CodecType, serialize_and_load_u64_based_column_values};
|
||||
use tantivy_columnar::*;
|
||||
use test::{Bencher, black_box};
|
||||
|
||||
struct Columns {
|
||||
pub optional: Column,
|
||||
@@ -68,88 +65,45 @@ pub fn serialize_and_load(column: &[u64], codec_type: CodecType) -> Arc<dyn Colu
|
||||
serialize_and_load_u64_based_column_values(&column, &[codec_type])
|
||||
}
|
||||
|
||||
fn run_bench_on_column_full_scan(b: &mut Bencher, column: Column) {
|
||||
let num_iter = black_box(NUM_VALUES);
|
||||
b.iter(|| {
|
||||
fn main() {
|
||||
let Columns {
|
||||
optional,
|
||||
full,
|
||||
multi,
|
||||
} = get_test_columns();
|
||||
|
||||
let inputs = vec![
|
||||
("full".to_string(), full),
|
||||
("optional".to_string(), optional),
|
||||
("multi".to_string(), multi),
|
||||
];
|
||||
|
||||
let mut group = InputGroup::new_with_inputs(inputs);
|
||||
|
||||
group.register("first_full_scan", |column| {
|
||||
let mut sum = 0u64;
|
||||
for i in 0..num_iter as u32 {
|
||||
for i in 0..NUM_VALUES as u32 {
|
||||
let val = column.first(i);
|
||||
sum += val.unwrap_or(0);
|
||||
}
|
||||
sum
|
||||
black_box(sum);
|
||||
});
|
||||
}
|
||||
fn run_bench_on_column_block_fetch(b: &mut Bencher, column: Column) {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
b.iter(move || {
|
||||
|
||||
group.register("first_block_fetch", |column| {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
column.first_vals(&fetch_docids, &mut block);
|
||||
block[0]
|
||||
black_box(block[0]);
|
||||
});
|
||||
}
|
||||
fn run_bench_on_column_block_single_calls(b: &mut Bencher, column: Column) {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
b.iter(move || {
|
||||
|
||||
group.register("first_block_single_calls", |column| {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
for i in 0..fetch_docids.len() {
|
||||
block[i] = column.first(fetch_docids[i]);
|
||||
}
|
||||
block[0]
|
||||
black_box(block[0]);
|
||||
});
|
||||
}
|
||||
|
||||
/// Column first method
|
||||
#[bench]
|
||||
fn bench_get_first_on_full_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_first_on_optional_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_first_on_multi_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
/// Block fetch column accessor
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_optional_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_multi_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_full_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_optional_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_multi_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_full_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
group.run();
|
||||
}
|
||||
|
||||
106
columnar/benches/bench_optional_index.rs
Normal file
106
columnar/benches/bench_optional_index.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use binggan::{InputGroup, black_box};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use tantivy_columnar::column_index::{OptionalIndex, Set};
|
||||
|
||||
const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||
|
||||
fn gen_optional_index(fill_ratio: f64) -> OptionalIndex {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let vals: Vec<u32> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _)| pos as u32)
|
||||
.collect();
|
||||
OptionalIndex::for_test(TOTAL_NUM_VALUES, &vals)
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
start: u32,
|
||||
end: u32,
|
||||
avg_step_size: u32,
|
||||
avg_deviation: u32,
|
||||
) -> impl Iterator<Item = u32> {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let mut current = start;
|
||||
std::iter::from_fn(move || {
|
||||
current += rng.gen_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
|
||||
if current >= end { None } else { Some(current) }
|
||||
})
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
}
|
||||
|
||||
fn walk_over_data(codec: &OptionalIndex, avg_step_size: u32) -> Option<u32> {
|
||||
walk_over_data_from_positions(
|
||||
codec,
|
||||
random_range_iterator(0, TOTAL_NUM_VALUES, avg_step_size, 0),
|
||||
)
|
||||
}
|
||||
|
||||
fn walk_over_data_from_positions(
|
||||
codec: &OptionalIndex,
|
||||
positions: impl Iterator<Item = u32>,
|
||||
) -> Option<u32> {
|
||||
let mut dense_idx: Option<u32> = None;
|
||||
for idx in positions {
|
||||
dense_idx = dense_idx.or(codec.rank_if_exists(idx));
|
||||
}
|
||||
dense_idx
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Build separate inputs for each fill ratio.
|
||||
let inputs: Vec<(String, OptionalIndex)> = vec![
|
||||
("fill=1%".to_string(), gen_optional_index(0.01)),
|
||||
("fill=5%".to_string(), gen_optional_index(0.05)),
|
||||
("fill=10%".to_string(), gen_optional_index(0.10)),
|
||||
("fill=50%".to_string(), gen_optional_index(0.50)),
|
||||
("fill=90%".to_string(), gen_optional_index(0.90)),
|
||||
];
|
||||
|
||||
let mut group: InputGroup<OptionalIndex> = InputGroup::new_with_inputs(inputs);
|
||||
|
||||
// Translate orig->codec (rank_if_exists) with sampling
|
||||
group.register("orig_to_codec_10pct_hit", |codec: &OptionalIndex| {
|
||||
black_box(walk_over_data(codec, 100));
|
||||
});
|
||||
group.register("orig_to_codec_1pct_hit", |codec: &OptionalIndex| {
|
||||
black_box(walk_over_data(codec, 1000));
|
||||
});
|
||||
group.register("orig_to_codec_full_scan", |codec: &OptionalIndex| {
|
||||
black_box(walk_over_data_from_positions(codec, 0..TOTAL_NUM_VALUES));
|
||||
});
|
||||
|
||||
// Translate codec->orig (select/select_batch) on sampled ranks
|
||||
fn bench_translate_codec_to_orig_util(codec: &OptionalIndex, percent_hit: f32) {
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
let idxs: Vec<u32> = if percent_hit == 100.0f32 {
|
||||
(0..num_non_nulls).collect()
|
||||
} else {
|
||||
n_percent_step_iterator(percent_hit, num_non_nulls).collect()
|
||||
};
|
||||
let mut output = vec![0u32; idxs.len()];
|
||||
output.copy_from_slice(&idxs[..]);
|
||||
codec.select_batch(&mut output);
|
||||
black_box(output);
|
||||
}
|
||||
|
||||
group.register("codec_to_orig_0.005pct_hit", |codec: &OptionalIndex| {
|
||||
bench_translate_codec_to_orig_util(codec, 0.005);
|
||||
});
|
||||
group.register("codec_to_orig_10pct_hit", |codec: &OptionalIndex| {
|
||||
bench_translate_codec_to_orig_util(codec, 10.0);
|
||||
});
|
||||
group.register("codec_to_orig_full_scan", |codec: &OptionalIndex| {
|
||||
bench_translate_codec_to_orig_util(codec, 100.0);
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
@@ -1,15 +1,12 @@
|
||||
#![feature(test)]
|
||||
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use binggan::{InputGroup, black_box};
|
||||
use common::OwnedBytes;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::{Rng, SeedableRng, random};
|
||||
use tantivy_columnar::ColumnValues;
|
||||
use test::Bencher;
|
||||
extern crate test;
|
||||
|
||||
// TODO does this make sense for IPv6 ?
|
||||
fn generate_random() -> Vec<u64> {
|
||||
@@ -47,78 +44,77 @@ fn get_data_50percent_item() -> Vec<u128> {
|
||||
}
|
||||
data.push(SINGLE_ITEM);
|
||||
data.shuffle(&mut rng);
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
data.iter().map(|el| *el as u128).collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
||||
fn main() {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
let column_range = get_u128_column_from_data(&data);
|
||||
let column_random = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
struct Inputs {
|
||||
data: Vec<u128>,
|
||||
column_range: Arc<dyn ColumnValues<u128>>,
|
||||
column_random: Arc<dyn ColumnValues<u128>>,
|
||||
}
|
||||
|
||||
let inputs = Inputs {
|
||||
data,
|
||||
column_range,
|
||||
column_random,
|
||||
};
|
||||
let mut group: InputGroup<Inputs> =
|
||||
InputGroup::new_with_inputs(vec![("u128 benches".to_string(), inputs)]);
|
||||
|
||||
group.register(
|
||||
"intfastfield_getrange_u128_50percent_hit",
|
||||
|inp: &Inputs| {
|
||||
let mut positions = Vec::new();
|
||||
inp.column_range.get_row_ids_for_value_range(
|
||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||
0..inp.data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
black_box(positions.len());
|
||||
},
|
||||
);
|
||||
|
||||
group.register("intfastfield_getrange_u128_single_hit", |inp: &Inputs| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(
|
||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(
|
||||
inp.column_range.get_row_ids_for_value_range(
|
||||
*SINGLE_ITEM_RANGE.start() as u128..=*SINGLE_ITEM_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
0..inp.data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
black_box(positions.len());
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
group.register("intfastfield_getrange_u128_hit_all", |inp: &Inputs| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
inp.column_range.get_row_ids_for_value_range(
|
||||
0..=u128::MAX,
|
||||
0..inp.data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
black_box(positions.len());
|
||||
});
|
||||
}
|
||||
// U128 RANGE END
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
group.register("intfastfield_scan_all_fflookup_u128", |inp: &Inputs| {
|
||||
let mut a = 0u128;
|
||||
for i in 0u64..column.num_vals() as u64 {
|
||||
a += column.get_val(i as u32);
|
||||
for i in 0u64..inp.column_random.num_vals() as u64 {
|
||||
a += inp.column_random.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
black_box(a);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_stride5_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let n = column.num_vals();
|
||||
group.register("intfastfield_jumpy_stride5_u128", |inp: &Inputs| {
|
||||
let n = inp.column_random.num_vals();
|
||||
let mut a = 0u128;
|
||||
for i in (0..n / 5).map(|val| val * 5) {
|
||||
a += column.get_val(i);
|
||||
a += inp.column_random.get_val(i);
|
||||
}
|
||||
a
|
||||
black_box(a);
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use binggan::{InputGroup, black_box};
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::column_values::{CodecType, serialize_and_load_u64_based_column_values};
|
||||
use tantivy_columnar::*;
|
||||
use test::Bencher;
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation() -> Vec<u64> {
|
||||
@@ -27,37 +24,11 @@ pub fn serialize_and_load(column: &[u64], codec_type: CodecType) -> Arc<dyn Colu
|
||||
serialize_and_load_u64_based_column_values(&column, &[codec_type])
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = permutation[a as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_fflookup_bitpacked(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = column.get_val(a as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
|
||||
const SINGLE_ITEM: u64 = 90;
|
||||
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
|
||||
const ONE_PERCENT_ITEM_RANGE: RangeInclusive<u64> = 49..=49;
|
||||
|
||||
fn get_data_50percent_item() -> Vec<u128> {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
|
||||
@@ -69,135 +40,122 @@ fn get_data_50percent_item() -> Vec<u128> {
|
||||
data.push(SINGLE_ITEM);
|
||||
|
||||
data.shuffle(&mut rng);
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
data.iter().map(|el| *el as u128).collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
// U64 RANGE START
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(
|
||||
FIFTY_PERCENT_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
type VecCol = (Vec<u64>, Arc<dyn ColumnValues<u64>>);
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_1percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(
|
||||
ONE_PERCENT_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(SINGLE_ITEM_RANGE, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U64 RANGE END
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_vec(b: &mut Bencher) {
|
||||
fn bench_access() {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let column_perm: Arc<dyn ColumnValues<u64>> =
|
||||
serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
|
||||
let permutation_gcd = generate_permutation_gcd();
|
||||
let column_perm_gcd: Arc<dyn ColumnValues<u64>> =
|
||||
serialize_and_load(&permutation_gcd, CodecType::Bitpacked);
|
||||
|
||||
let mut group: InputGroup<VecCol> = InputGroup::new_with_inputs(vec![
|
||||
(
|
||||
"access".to_string(),
|
||||
(permutation.clone(), column_perm.clone()),
|
||||
),
|
||||
(
|
||||
"access_gcd".to_string(),
|
||||
(permutation_gcd.clone(), column_perm_gcd.clone()),
|
||||
),
|
||||
]);
|
||||
|
||||
group.register("stride7_vec", |inp: &VecCol| {
|
||||
let n = inp.0.len();
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += permutation[i as usize];
|
||||
a += inp.0[i];
|
||||
}
|
||||
a
|
||||
black_box(a);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0;
|
||||
group.register("fullscan_vec", |inp: &VecCol| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..inp.0.len() {
|
||||
a += inp.0[i];
|
||||
}
|
||||
black_box(a);
|
||||
});
|
||||
|
||||
group.register("stride7_column_values", |inp: &VecCol| {
|
||||
let n = inp.1.num_vals() as usize;
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += column.get_val(i as u32);
|
||||
a += inp.1.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
black_box(a);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
let column_ref = column.as_ref();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0u32..n as u32 {
|
||||
a += column_ref.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
|
||||
let permutation = generate_permutation_gcd();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
group.register("fullscan_column_values", |inp: &VecCol| {
|
||||
let mut a = 0u64;
|
||||
let n = inp.1.num_vals() as usize;
|
||||
for i in 0..n {
|
||||
a += column.get_val(i as u32);
|
||||
a += inp.1.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
black_box(a);
|
||||
});
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..permutation.len() {
|
||||
a += permutation[i as usize] as u64;
|
||||
}
|
||||
a
|
||||
});
|
||||
fn bench_range() {
|
||||
let data_50 = get_data_50percent_item();
|
||||
let data_u64 = data_50.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column_data: Arc<dyn ColumnValues<u64>> =
|
||||
serialize_and_load(&data_u64, CodecType::Bitpacked);
|
||||
|
||||
let mut group: InputGroup<Arc<dyn ColumnValues<u64>>> =
|
||||
InputGroup::new_with_inputs(vec![("dist_50pct_item".to_string(), column_data.clone())]);
|
||||
|
||||
group.register(
|
||||
"fastfield_getrange_u64_50percent_hit",
|
||||
|col: &Arc<dyn ColumnValues<u64>>| {
|
||||
let mut positions = Vec::new();
|
||||
col.get_row_ids_for_value_range(FIFTY_PERCENT_RANGE, 0..col.num_vals(), &mut positions);
|
||||
black_box(positions.len());
|
||||
},
|
||||
);
|
||||
|
||||
group.register(
|
||||
"fastfield_getrange_u64_1percent_hit",
|
||||
|col: &Arc<dyn ColumnValues<u64>>| {
|
||||
let mut positions = Vec::new();
|
||||
col.get_row_ids_for_value_range(
|
||||
ONE_PERCENT_ITEM_RANGE,
|
||||
0..col.num_vals(),
|
||||
&mut positions,
|
||||
);
|
||||
black_box(positions.len());
|
||||
},
|
||||
);
|
||||
|
||||
group.register(
|
||||
"fastfield_getrange_u64_single_hit",
|
||||
|col: &Arc<dyn ColumnValues<u64>>| {
|
||||
let mut positions = Vec::new();
|
||||
col.get_row_ids_for_value_range(SINGLE_ITEM_RANGE, 0..col.num_vals(), &mut positions);
|
||||
black_box(positions.len());
|
||||
},
|
||||
);
|
||||
|
||||
group.register(
|
||||
"fastfield_getrange_u64_hit_all",
|
||||
|col: &Arc<dyn ColumnValues<u64>>| {
|
||||
let mut positions = Vec::new();
|
||||
col.get_row_ids_for_value_range(0..=u64::MAX, 0..col.num_vals(), &mut positions);
|
||||
black_box(positions.len());
|
||||
},
|
||||
);
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
fn main() {
|
||||
bench_access();
|
||||
bench_range();
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Translates a block of docis to row_ids.
|
||||
/// Translates a block of docids to row_ids.
|
||||
///
|
||||
/// returns the row_ids and the matching docids on the same index
|
||||
/// e.g.
|
||||
|
||||
@@ -56,7 +56,7 @@ fn get_doc_ids_with_values<'a>(
|
||||
ColumnIndex::Full => Box::new(doc_range),
|
||||
ColumnIndex::Optional(optional_index) => Box::new(
|
||||
optional_index
|
||||
.iter_docs()
|
||||
.iter_non_null_docs()
|
||||
.map(move |row| row + doc_range.start),
|
||||
),
|
||||
ColumnIndex::Multivalued(multivalued_index) => match multivalued_index {
|
||||
@@ -73,7 +73,7 @@ fn get_doc_ids_with_values<'a>(
|
||||
MultiValueIndex::MultiValueIndexV2(multivalued_index) => Box::new(
|
||||
multivalued_index
|
||||
.optional_index
|
||||
.iter_docs()
|
||||
.iter_non_null_docs()
|
||||
.map(move |row| row + doc_range.start),
|
||||
),
|
||||
},
|
||||
@@ -105,10 +105,11 @@ fn get_num_values_iterator<'a>(
|
||||
) -> Box<dyn Iterator<Item = u32> + 'a> {
|
||||
match column_index {
|
||||
ColumnIndex::Empty { .. } => Box::new(std::iter::empty()),
|
||||
ColumnIndex::Full => Box::new(std::iter::repeat(1u32).take(num_docs as usize)),
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
Box::new(std::iter::repeat(1u32).take(optional_index.num_non_nulls() as usize))
|
||||
}
|
||||
ColumnIndex::Full => Box::new(std::iter::repeat_n(1u32, num_docs as usize)),
|
||||
ColumnIndex::Optional(optional_index) => Box::new(std::iter::repeat_n(
|
||||
1u32,
|
||||
optional_index.num_non_nulls() as usize,
|
||||
)),
|
||||
ColumnIndex::Multivalued(multivalued_index) => Box::new(
|
||||
multivalued_index
|
||||
.get_start_index_column()
|
||||
@@ -177,7 +178,7 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
|
||||
ColumnIndex::Full => Box::new(columnar_row_range),
|
||||
ColumnIndex::Optional(optional_index) => Box::new(
|
||||
optional_index
|
||||
.iter_docs()
|
||||
.iter_non_null_docs()
|
||||
.map(move |row_id: RowId| columnar_row_range.start + row_id),
|
||||
),
|
||||
ColumnIndex::Multivalued(_) => {
|
||||
|
||||
@@ -215,6 +215,32 @@ impl MultiValueIndex {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator over document ids that have at least one value.
|
||||
pub fn iter_non_null_docs(&self) -> Box<dyn Iterator<Item = DocId> + '_> {
|
||||
match self {
|
||||
MultiValueIndex::MultiValueIndexV1(idx) => {
|
||||
let mut doc: DocId = 0u32;
|
||||
let num_docs = idx.num_docs();
|
||||
Box::new(std::iter::from_fn(move || {
|
||||
// This is not the most efficient way to do this, but it's legacy code.
|
||||
while doc < num_docs {
|
||||
let cur = doc;
|
||||
doc += 1;
|
||||
let start = idx.start_index_column.get_val(cur);
|
||||
let end = idx.start_index_column.get_val(cur + 1);
|
||||
if end > start {
|
||||
return Some(cur);
|
||||
}
|
||||
}
|
||||
None
|
||||
}))
|
||||
}
|
||||
MultiValueIndex::MultiValueIndexV2(idx) => {
|
||||
Box::new(idx.optional_index.iter_non_null_docs())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
|
||||
/// docids. Positions are converted inplace to docids.
|
||||
///
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::io::{self, Write};
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
mod set;
|
||||
@@ -11,7 +11,7 @@ use set_block::{
|
||||
};
|
||||
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{DocId, InvalidData, RowId};
|
||||
use crate::{DocId, RowId};
|
||||
|
||||
/// The threshold for for number of elements after which we switch to dense block encoding.
|
||||
///
|
||||
@@ -88,7 +88,7 @@ pub struct OptionalIndex {
|
||||
|
||||
impl Iterable<u32> for &OptionalIndex {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
Box::new(self.iter_docs())
|
||||
Box::new(self.iter_non_null_docs())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -280,8 +280,9 @@ impl OptionalIndex {
|
||||
self.num_non_null_docs
|
||||
}
|
||||
|
||||
pub fn iter_docs(&self) -> impl Iterator<Item = RowId> + '_ {
|
||||
// TODO optimize
|
||||
pub fn iter_non_null_docs(&self) -> impl Iterator<Item = RowId> + '_ {
|
||||
// TODO optimize. We could iterate over the blocks directly.
|
||||
// We use the dense value ids and retrieve the doc ids via select.
|
||||
let mut select_batch = self.select_cursor();
|
||||
(0..self.num_non_null_docs).map(move |rank| select_batch.select(rank))
|
||||
}
|
||||
@@ -334,38 +335,6 @@ enum Block<'a> {
|
||||
Sparse(SparseBlock<'a>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
enum OptionalIndexCodec {
|
||||
Dense = 0,
|
||||
Sparse = 1,
|
||||
}
|
||||
|
||||
impl OptionalIndexCodec {
|
||||
fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
fn try_from_code(code: u8) -> Result<Self, InvalidData> {
|
||||
match code {
|
||||
0 => Ok(Self::Dense),
|
||||
1 => Ok(Self::Sparse),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for OptionalIndexCodec {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&[self.to_code()])
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let optional_codec_code = u8::deserialize(reader)?;
|
||||
let optional_codec = Self::try_from_code(optional_codec_code)?;
|
||||
Ok(optional_codec)
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_optional_index_block(block_els: &[u16], out: &mut impl io::Write) -> io::Result<()> {
|
||||
let is_sparse = is_sparse(block_els.len() as u32);
|
||||
if is_sparse {
|
||||
|
||||
@@ -164,7 +164,11 @@ fn test_optional_index_large() {
|
||||
fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) {
|
||||
let optional_index = OptionalIndex::for_test(num_rows, row_ids);
|
||||
assert_eq!(optional_index.num_docs(), num_rows);
|
||||
assert!(optional_index.iter_docs().eq(row_ids.iter().copied()));
|
||||
assert!(
|
||||
optional_index
|
||||
.iter_non_null_docs()
|
||||
.eq(row_ids.iter().copied())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -219,170 +223,3 @@ fn test_optional_index_for_tests() {
|
||||
assert!(!optional_index.contains(3));
|
||||
assert_eq!(optional_index.num_docs(), 4);
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::Bencher;
|
||||
|
||||
use super::*;
|
||||
|
||||
const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||
fn gen_bools(fill_ratio: f64) -> OptionalIndex {
|
||||
let mut out = Vec::new();
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let vals: Vec<RowId> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _)| pos as RowId)
|
||||
.collect();
|
||||
serialize_optional_index(&&vals[..], TOTAL_NUM_VALUES, &mut out).unwrap();
|
||||
|
||||
open_optional_index(OwnedBytes::new(out)).unwrap()
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
start: u32,
|
||||
end: u32,
|
||||
avg_step_size: u32,
|
||||
avg_deviation: u32,
|
||||
) -> impl Iterator<Item = u32> {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let mut current = start;
|
||||
std::iter::from_fn(move || {
|
||||
current += rng.gen_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
|
||||
if current >= end { None } else { Some(current) }
|
||||
})
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
}
|
||||
|
||||
fn walk_over_data(codec: &OptionalIndex, avg_step_size: u32) -> Option<u32> {
|
||||
walk_over_data_from_positions(
|
||||
codec,
|
||||
random_range_iterator(0, TOTAL_NUM_VALUES, avg_step_size, 0),
|
||||
)
|
||||
}
|
||||
|
||||
fn walk_over_data_from_positions(
|
||||
codec: &OptionalIndex,
|
||||
positions: impl Iterator<Item = u32>,
|
||||
) -> Option<u32> {
|
||||
let mut dense_idx: Option<u32> = None;
|
||||
for idx in positions {
|
||||
dense_idx = dense_idx.or(codec.rank_if_exists(idx));
|
||||
}
|
||||
dense_idx
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_1percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_5percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_5percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 1000));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_1percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_10percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_90percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_10percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_50percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.5f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_90percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.01f64, 0.005f32, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_10percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.1f64, 0.005f32, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.01f64, 10f32, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_full_scan(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.01f64, 100f32, bench);
|
||||
}
|
||||
|
||||
fn bench_translate_codec_to_orig_util(
|
||||
percent_filled: f64,
|
||||
percent_hit: f32,
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(percent_filled);
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
let idxs: Vec<u32> = if percent_hit == 100.0f32 {
|
||||
(0..num_non_nulls).collect()
|
||||
} else {
|
||||
n_percent_step_iterator(percent_hit, num_non_nulls).collect()
|
||||
};
|
||||
let mut output = vec![0u32; idxs.len()];
|
||||
bench.iter(|| {
|
||||
output.copy_from_slice(&idxs[..]);
|
||||
codec.select_batch(&mut output);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_90percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.9f64, 0.005, bench);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_90percent_filled_full_scan(bench: &mut Bencher) {
|
||||
bench_translate_codec_to_orig_util(0.9f64, 100.0f32, bench);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,139 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::{self, Bencher};
|
||||
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::*;
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
let mut data: Vec<_> = (100..55000_u64)
|
||||
.map(|num| num + rng.r#gen::<u8>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
data.insert(2000, 100);
|
||||
data.insert(3000, 4100);
|
||||
data.insert(4000, 100);
|
||||
data.insert(5000, 800);
|
||||
data
|
||||
}
|
||||
|
||||
fn compute_stats(vals: impl Iterator<Item = u64>) -> ColumnStats {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
for val in vals {
|
||||
stats_collector.collect(val);
|
||||
}
|
||||
stats_collector.stats()
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
}
|
||||
|
||||
fn get_reader_for_bench<Codec: ColumnCodec>(data: &[u64]) -> Codec::ColumnValues {
|
||||
let mut bytes = Vec::new();
|
||||
let stats = compute_stats(data.iter().cloned());
|
||||
let mut codec_serializer = Codec::estimator();
|
||||
for val in data {
|
||||
codec_serializer.collect(*val);
|
||||
}
|
||||
codec_serializer
|
||||
.serialize(&stats, Box::new(data.iter().copied()).as_mut(), &mut bytes)
|
||||
.unwrap();
|
||||
|
||||
Codec::load(OwnedBytes::new(bytes)).unwrap()
|
||||
}
|
||||
|
||||
fn bench_get<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = get_reader_for_bench::<Codec>(data);
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
let val = col.get_val(pos as u32);
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn bench_get_dynamic_helper(b: &mut Bencher, col: Arc<dyn ColumnValues>) {
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
let val = col.get_val(pos as u32);
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_get_dynamic<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = Arc::new(get_reader_for_bench::<Codec>(data));
|
||||
bench_get_dynamic_helper(b, col);
|
||||
}
|
||||
fn bench_create<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let stats = compute_stats(data.iter().cloned());
|
||||
|
||||
let mut bytes = Vec::new();
|
||||
b.iter(|| {
|
||||
bytes.clear();
|
||||
let mut codec_serializer = Codec::estimator();
|
||||
for val in data.iter().take(1024) {
|
||||
codec_serializer.collect(*val);
|
||||
}
|
||||
|
||||
codec_serializer.serialize(&stats, Box::new(data.iter().copied()).as_mut(), &mut bytes)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
@@ -242,6 +242,3 @@ impl<T: Copy + PartialOrd + Debug + 'static> ColumnValues<T> for Arc<dyn ColumnV
|
||||
.get_row_ids_for_value_range(range, doc_id_range, positions)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench;
|
||||
|
||||
@@ -185,10 +185,10 @@ impl CompactSpaceBuilder {
|
||||
let mut covered_space = Vec::with_capacity(self.blanks.len());
|
||||
|
||||
// beginning of the blanks
|
||||
if let Some(first_blank_start) = self.blanks.first().map(RangeInclusive::start) {
|
||||
if *first_blank_start != 0 {
|
||||
covered_space.push(0..=first_blank_start - 1);
|
||||
}
|
||||
if let Some(first_blank_start) = self.blanks.first().map(RangeInclusive::start)
|
||||
&& *first_blank_start != 0
|
||||
{
|
||||
covered_space.push(0..=first_blank_start - 1);
|
||||
}
|
||||
|
||||
// Between the blanks
|
||||
@@ -202,10 +202,10 @@ impl CompactSpaceBuilder {
|
||||
covered_space.extend(between_blanks);
|
||||
|
||||
// end of the blanks
|
||||
if let Some(last_blank_end) = self.blanks.last().map(RangeInclusive::end) {
|
||||
if *last_blank_end != u128::MAX {
|
||||
covered_space.push(last_blank_end + 1..=u128::MAX);
|
||||
}
|
||||
if let Some(last_blank_end) = self.blanks.last().map(RangeInclusive::end)
|
||||
&& *last_blank_end != u128::MAX
|
||||
{
|
||||
covered_space.push(last_blank_end + 1..=u128::MAX);
|
||||
}
|
||||
|
||||
if covered_space.is_empty() {
|
||||
|
||||
@@ -105,7 +105,7 @@ impl ColumnCodecEstimator for BitpackedCodecEstimator {
|
||||
|
||||
fn estimate(&self, stats: &ColumnStats) -> Option<u64> {
|
||||
let num_bits_per_value = num_bits(stats);
|
||||
Some(stats.num_bytes() + (stats.num_rows as u64 * (num_bits_per_value as u64) + 7) / 8)
|
||||
Some(stats.num_bytes() + (stats.num_rows as u64 * (num_bits_per_value as u64)).div_ceil(8))
|
||||
}
|
||||
|
||||
fn serialize(
|
||||
|
||||
@@ -117,7 +117,7 @@ impl ColumnCodecEstimator for LinearCodecEstimator {
|
||||
Some(
|
||||
stats.num_bytes()
|
||||
+ linear_params.num_bytes()
|
||||
+ (num_bits as u64 * stats.num_rows as u64 + 7) / 8,
|
||||
+ (num_bits as u64 * stats.num_rows as u64).div_ceil(8),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -367,7 +367,7 @@ fn is_empty_after_merge(
|
||||
ColumnIndex::Empty { .. } => true,
|
||||
ColumnIndex::Full => alive_bitset.len() == 0,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
for doc in optional_index.iter_docs() {
|
||||
for doc in optional_index.iter_non_null_docs() {
|
||||
if alive_bitset.contains(doc) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -244,7 +244,7 @@ impl SymbolValue for UnorderedId {
|
||||
|
||||
fn compute_num_bytes_for_u64(val: u64) -> usize {
|
||||
let msb = (64u32 - val.leading_zeros()) as usize;
|
||||
(msb + 7) / 8
|
||||
msb.div_ceil(8)
|
||||
}
|
||||
|
||||
fn encode_zig_zag(n: i64) -> u64 {
|
||||
|
||||
@@ -17,15 +17,10 @@
|
||||
//! column.
|
||||
//! - [column_values]: Stores the values of a column in a dense format.
|
||||
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate more_asserts;
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
extern crate test;
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::io;
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use common::DateTime;
|
||||
|
||||
use crate::InvalidData;
|
||||
@@ -9,6 +11,23 @@ pub enum NumericalValue {
|
||||
F64(f64),
|
||||
}
|
||||
|
||||
impl FromStr for NumericalValue {
|
||||
type Err = ();
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, ()> {
|
||||
if let Ok(val_i64) = s.parse::<i64>() {
|
||||
return Ok(val_i64.into());
|
||||
}
|
||||
if let Ok(val_u64) = s.parse::<u64>() {
|
||||
return Ok(val_u64.into());
|
||||
}
|
||||
if let Ok(val_f64) = s.parse::<f64>() {
|
||||
return Ok(NumericalValue::from(val_f64).normalize());
|
||||
}
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
|
||||
impl NumericalValue {
|
||||
pub fn numerical_type(&self) -> NumericalType {
|
||||
match self {
|
||||
@@ -26,7 +45,7 @@ impl NumericalValue {
|
||||
if val <= i64::MAX as u64 {
|
||||
NumericalValue::I64(val as i64)
|
||||
} else {
|
||||
NumericalValue::F64(val as f64)
|
||||
NumericalValue::U64(val)
|
||||
}
|
||||
}
|
||||
NumericalValue::I64(val) => NumericalValue::I64(val),
|
||||
@@ -141,6 +160,7 @@ impl Coerce for DateTime {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::NumericalType;
|
||||
use crate::NumericalValue;
|
||||
|
||||
#[test]
|
||||
fn test_numerical_type_code() {
|
||||
@@ -153,4 +173,58 @@ mod tests {
|
||||
}
|
||||
assert_eq!(num_numerical_type, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_numerical() {
|
||||
assert_eq!(
|
||||
"123".parse::<NumericalValue>().unwrap(),
|
||||
NumericalValue::I64(123)
|
||||
);
|
||||
assert_eq!(
|
||||
"18446744073709551615".parse::<NumericalValue>().unwrap(),
|
||||
NumericalValue::U64(18446744073709551615u64)
|
||||
);
|
||||
assert_eq!(
|
||||
"1.0".parse::<NumericalValue>().unwrap(),
|
||||
NumericalValue::I64(1i64)
|
||||
);
|
||||
assert_eq!(
|
||||
"1.1".parse::<NumericalValue>().unwrap(),
|
||||
NumericalValue::F64(1.1f64)
|
||||
);
|
||||
assert_eq!(
|
||||
"-1.0".parse::<NumericalValue>().unwrap(),
|
||||
NumericalValue::I64(-1i64)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_numerical() {
|
||||
assert_eq!(
|
||||
NumericalValue::from(1u64).normalize(),
|
||||
NumericalValue::I64(1i64),
|
||||
);
|
||||
let limit_val = i64::MAX as u64 + 1u64;
|
||||
assert_eq!(
|
||||
NumericalValue::from(limit_val).normalize(),
|
||||
NumericalValue::U64(limit_val),
|
||||
);
|
||||
assert_eq!(
|
||||
NumericalValue::from(-1i64).normalize(),
|
||||
NumericalValue::I64(-1i64),
|
||||
);
|
||||
assert_eq!(
|
||||
NumericalValue::from(-2.0f64).normalize(),
|
||||
NumericalValue::I64(-2i64),
|
||||
);
|
||||
assert_eq!(
|
||||
NumericalValue::from(-2.1f64).normalize(),
|
||||
NumericalValue::F64(-2.1f64),
|
||||
);
|
||||
let large_float = 2.0f64.powf(70.0f64);
|
||||
assert_eq!(
|
||||
NumericalValue::from(large_float).normalize(),
|
||||
NumericalValue::F64(large_float),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-common"
|
||||
version = "0.9.0"
|
||||
version = "0.10.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2024"
|
||||
|
||||
@@ -183,7 +183,7 @@ pub struct BitSet {
|
||||
}
|
||||
|
||||
fn num_buckets(max_val: u32) -> u32 {
|
||||
(max_val + 63u32) / 64u32
|
||||
max_val.div_ceil(64u32)
|
||||
}
|
||||
|
||||
impl BitSet {
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 7.4 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 653 KiB |
@@ -51,7 +51,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
// Our second field is body.
|
||||
// We want full-text search for it, but we do not
|
||||
// need to be able to be able to retrieve it
|
||||
// need to be able to retrieve it
|
||||
// for our application.
|
||||
//
|
||||
// We can make our index lighter by omitting the `STORED` flag.
|
||||
|
||||
212
examples/filter_aggregation.rs
Normal file
212
examples/filter_aggregation.rs
Normal file
@@ -0,0 +1,212 @@
|
||||
// # Filter Aggregation Example
|
||||
//
|
||||
// This example demonstrates filter aggregations - creating buckets of documents
|
||||
// matching specific queries, with nested aggregations computed on each bucket.
|
||||
//
|
||||
// Filter aggregations are useful for computing metrics on different subsets of
|
||||
// your data in a single query, like "average price overall + average price for
|
||||
// electronics + count of in-stock items".
|
||||
|
||||
use serde_json::json;
|
||||
use tantivy::aggregation::agg_req::Aggregations;
|
||||
use tantivy::aggregation::AggregationCollector;
|
||||
use tantivy::query::AllQuery;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Create a simple product schema
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("category", TEXT | FAST);
|
||||
schema_builder.add_text_field("brand", TEXT | FAST);
|
||||
schema_builder.add_u64_field("price", FAST);
|
||||
schema_builder.add_f64_field("rating", FAST);
|
||||
schema_builder.add_bool_field("in_stock", FAST | INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// Create index and add sample products
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut writer = index.writer(50_000_000)?;
|
||||
|
||||
writer.add_document(doc!(
|
||||
schema.get_field("category")? => "electronics",
|
||||
schema.get_field("brand")? => "apple",
|
||||
schema.get_field("price")? => 999u64,
|
||||
schema.get_field("rating")? => 4.5f64,
|
||||
schema.get_field("in_stock")? => true
|
||||
))?;
|
||||
writer.add_document(doc!(
|
||||
schema.get_field("category")? => "electronics",
|
||||
schema.get_field("brand")? => "samsung",
|
||||
schema.get_field("price")? => 799u64,
|
||||
schema.get_field("rating")? => 4.2f64,
|
||||
schema.get_field("in_stock")? => true
|
||||
))?;
|
||||
writer.add_document(doc!(
|
||||
schema.get_field("category")? => "clothing",
|
||||
schema.get_field("brand")? => "nike",
|
||||
schema.get_field("price")? => 120u64,
|
||||
schema.get_field("rating")? => 4.1f64,
|
||||
schema.get_field("in_stock")? => false
|
||||
))?;
|
||||
writer.add_document(doc!(
|
||||
schema.get_field("category")? => "books",
|
||||
schema.get_field("brand")? => "penguin",
|
||||
schema.get_field("price")? => 25u64,
|
||||
schema.get_field("rating")? => 4.8f64,
|
||||
schema.get_field("in_stock")? => true
|
||||
))?;
|
||||
|
||||
writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// Example 1: Basic filter with metric aggregation
|
||||
println!("=== Example 1: Electronics average price ===");
|
||||
let agg_req = json!({
|
||||
"electronics": {
|
||||
"filter": "category:electronics",
|
||||
"aggs": {
|
||||
"avg_price": { "avg": { "field": "price" } }
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let agg: Aggregations = serde_json::from_value(agg_req)?;
|
||||
let collector = AggregationCollector::from_aggs(agg, Default::default());
|
||||
let result = searcher.search(&AllQuery, &collector)?;
|
||||
|
||||
let expected = json!({
|
||||
"electronics": {
|
||||
"doc_count": 2,
|
||||
"avg_price": { "value": 899.0 }
|
||||
}
|
||||
});
|
||||
assert_eq!(serde_json::to_value(&result)?, expected);
|
||||
println!("{}\n", serde_json::to_string_pretty(&result)?);
|
||||
|
||||
// Example 2: Multiple independent filters
|
||||
println!("=== Example 2: Multiple filters in one query ===");
|
||||
let agg_req = json!({
|
||||
"electronics": {
|
||||
"filter": "category:electronics",
|
||||
"aggs": { "avg_price": { "avg": { "field": "price" } } }
|
||||
},
|
||||
"in_stock": {
|
||||
"filter": "in_stock:true",
|
||||
"aggs": { "count": { "value_count": { "field": "brand" } } }
|
||||
},
|
||||
"high_rated": {
|
||||
"filter": "rating:[4.5 TO *]",
|
||||
"aggs": { "count": { "value_count": { "field": "brand" } } }
|
||||
}
|
||||
});
|
||||
|
||||
let agg: Aggregations = serde_json::from_value(agg_req)?;
|
||||
let collector = AggregationCollector::from_aggs(agg, Default::default());
|
||||
let result = searcher.search(&AllQuery, &collector)?;
|
||||
|
||||
let expected = json!({
|
||||
"electronics": {
|
||||
"doc_count": 2,
|
||||
"avg_price": { "value": 899.0 }
|
||||
},
|
||||
"in_stock": {
|
||||
"doc_count": 3,
|
||||
"count": { "value": 3.0 }
|
||||
},
|
||||
"high_rated": {
|
||||
"doc_count": 2,
|
||||
"count": { "value": 2.0 }
|
||||
}
|
||||
});
|
||||
assert_eq!(serde_json::to_value(&result)?, expected);
|
||||
println!("{}\n", serde_json::to_string_pretty(&result)?);
|
||||
|
||||
// Example 3: Nested filters - progressive refinement
|
||||
println!("=== Example 3: Nested filters ===");
|
||||
let agg_req = json!({
|
||||
"in_stock": {
|
||||
"filter": "in_stock:true",
|
||||
"aggs": {
|
||||
"electronics": {
|
||||
"filter": "category:electronics",
|
||||
"aggs": {
|
||||
"expensive": {
|
||||
"filter": "price:[800 TO *]",
|
||||
"aggs": {
|
||||
"avg_rating": { "avg": { "field": "rating" } }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let agg: Aggregations = serde_json::from_value(agg_req)?;
|
||||
let collector = AggregationCollector::from_aggs(agg, Default::default());
|
||||
let result = searcher.search(&AllQuery, &collector)?;
|
||||
|
||||
let expected = json!({
|
||||
"in_stock": {
|
||||
"doc_count": 3, // apple, samsung, penguin
|
||||
"electronics": {
|
||||
"doc_count": 2, // apple, samsung
|
||||
"expensive": {
|
||||
"doc_count": 1, // only apple (999)
|
||||
"avg_rating": { "value": 4.5 }
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
assert_eq!(serde_json::to_value(&result)?, expected);
|
||||
println!("{}\n", serde_json::to_string_pretty(&result)?);
|
||||
|
||||
// Example 4: Filter with sub-aggregation (terms)
|
||||
println!("=== Example 4: Filter with terms sub-aggregation ===");
|
||||
let agg_req = json!({
|
||||
"electronics": {
|
||||
"filter": "category:electronics",
|
||||
"aggs": {
|
||||
"by_brand": {
|
||||
"terms": { "field": "brand" },
|
||||
"aggs": {
|
||||
"avg_price": { "avg": { "field": "price" } }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let agg: Aggregations = serde_json::from_value(agg_req)?;
|
||||
let collector = AggregationCollector::from_aggs(agg, Default::default());
|
||||
let result = searcher.search(&AllQuery, &collector)?;
|
||||
|
||||
let expected = json!({
|
||||
"electronics": {
|
||||
"doc_count": 2,
|
||||
"by_brand": {
|
||||
"buckets": [
|
||||
{
|
||||
"key": "samsung",
|
||||
"doc_count": 1,
|
||||
"avg_price": { "value": 799.0 }
|
||||
},
|
||||
{
|
||||
"key": "apple",
|
||||
"doc_count": 1,
|
||||
"avg_price": { "value": 999.0 }
|
||||
}
|
||||
],
|
||||
"sum_other_doc_count": 0,
|
||||
"doc_count_error_upper_bound": 0
|
||||
}
|
||||
}
|
||||
});
|
||||
assert_eq!(serde_json::to_value(&result)?, expected);
|
||||
println!("{}", serde_json::to_string_pretty(&result)?);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -85,7 +85,6 @@ fn main() -> tantivy::Result<()> {
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Diary of a Young Girl",
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
|
||||
// ### Committing
|
||||
//
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.24.0"
|
||||
version = "0.25.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -15,3 +15,5 @@ edition = "2024"
|
||||
nom = "7"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
ordered-float = "5.0.0"
|
||||
fnv = "1.0.7"
|
||||
|
||||
@@ -117,6 +117,22 @@ where F: nom::Parser<I, (O, ErrorList), Infallible> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn terminated_infallible<I, O1, O2, F, G>(
|
||||
mut first: F,
|
||||
mut second: G,
|
||||
) -> impl FnMut(I) -> JResult<I, O1>
|
||||
where
|
||||
F: nom::Parser<I, (O1, ErrorList), Infallible>,
|
||||
G: nom::Parser<I, (O2, ErrorList), Infallible>,
|
||||
{
|
||||
move |input: I| {
|
||||
let (input, (o1, mut err)) = first.parse(input)?;
|
||||
let (input, (_, mut err2)) = second.parse(input)?;
|
||||
err.append(&mut err2);
|
||||
Ok((input, (o1, err)))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn delimited_infallible<I, O1, O2, O3, F, G, H>(
|
||||
mut first: F,
|
||||
mut second: G,
|
||||
|
||||
@@ -31,7 +31,17 @@ pub fn parse_query_lenient(query: &str) -> (UserInputAst, Vec<LenientError>) {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{parse_query, parse_query_lenient};
|
||||
use crate::{UserInputAst, parse_query, parse_query_lenient};
|
||||
|
||||
#[test]
|
||||
fn test_deduplication() {
|
||||
let ast: UserInputAst = parse_query("a a").unwrap();
|
||||
let json = serde_json::to_string(&ast).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"type":"bool","clauses":[[null,{"type":"literal","field_name":null,"phrase":"a","delimiter":"none","slop":0,"prefix":false}]]}"#
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_serialization() {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::borrow::Cow;
|
||||
use std::iter::once;
|
||||
|
||||
use fnv::FnvHashSet;
|
||||
use nom::IResult;
|
||||
use nom::branch::alt;
|
||||
use nom::bytes::complete::tag;
|
||||
@@ -36,7 +37,7 @@ fn field_name(inp: &str) -> IResult<&str, String> {
|
||||
alt((first_char, escape_sequence())),
|
||||
many0(alt((simple_char, escape_sequence(), char('\\')))),
|
||||
)),
|
||||
char(':'),
|
||||
tuple((multispace0, char(':'), multispace0)),
|
||||
),
|
||||
|(first_char, next)| once(first_char).chain(next).collect(),
|
||||
)(inp)
|
||||
@@ -68,7 +69,7 @@ fn interpret_escape(source: &str) -> String {
|
||||
|
||||
/// Consume a word outside of any context.
|
||||
// TODO should support escape sequences
|
||||
fn word(inp: &str) -> IResult<&str, Cow<str>> {
|
||||
fn word(inp: &str) -> IResult<&str, Cow<'_, str>> {
|
||||
map_res(
|
||||
recognize(tuple((
|
||||
alt((
|
||||
@@ -305,15 +306,14 @@ fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
let (inp, (field_name, _, _, _)) =
|
||||
tuple((field_name, multispace0, char('('), multispace0))(inp).expect("precondition failed");
|
||||
|
||||
let res = delimited_infallible(
|
||||
delimited_infallible(
|
||||
nothing,
|
||||
map(ast_infallible, |(mut ast, errors)| {
|
||||
ast.set_default_field(field_name.to_string());
|
||||
(ast, errors)
|
||||
}),
|
||||
opt_i_err(char(')'), "expected ')'"),
|
||||
)(inp);
|
||||
res
|
||||
)(inp)
|
||||
}
|
||||
|
||||
fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
@@ -367,7 +367,10 @@ fn literal(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
// something (a field name) got parsed before
|
||||
alt((
|
||||
map(
|
||||
tuple((opt(field_name), alt((range, set, exists, term_or_phrase)))),
|
||||
tuple((
|
||||
opt(field_name),
|
||||
alt((range, set, exists, regex, term_or_phrase)),
|
||||
)),
|
||||
|(field_name, leaf): (Option<String>, UserInputLeaf)| leaf.set_field(field_name).into(),
|
||||
),
|
||||
term_group,
|
||||
@@ -389,6 +392,10 @@ fn literal_no_group_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>>
|
||||
value((), peek(one_of("{[><"))),
|
||||
map(range_infallible, |(range, errs)| (Some(range), errs)),
|
||||
),
|
||||
(
|
||||
value((), peek(one_of("/"))),
|
||||
map(regex_infallible, |(regex, errs)| (Some(regex), errs)),
|
||||
),
|
||||
),
|
||||
delimited_infallible(space0_infallible, term_or_phrase_infallible, nothing),
|
||||
),
|
||||
@@ -689,6 +696,61 @@ fn set_infallible(mut inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
}
|
||||
}
|
||||
|
||||
fn regex(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
map(
|
||||
terminated(
|
||||
delimited(
|
||||
char('/'),
|
||||
many1(alt((preceded(char('\\'), char('/')), none_of("/")))),
|
||||
char('/'),
|
||||
),
|
||||
peek(alt((multispace1, eof))),
|
||||
),
|
||||
|elements| UserInputLeaf::Regex {
|
||||
field: None,
|
||||
pattern: elements.into_iter().collect::<String>(),
|
||||
},
|
||||
)(inp)
|
||||
}
|
||||
|
||||
fn regex_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
match terminated_infallible(
|
||||
delimited_infallible(
|
||||
opt_i_err(char('/'), "missing delimiter /"),
|
||||
opt_i(many1(alt((preceded(char('\\'), char('/')), none_of("/"))))),
|
||||
opt_i_err(char('/'), "missing delimiter /"),
|
||||
),
|
||||
opt_i_err(
|
||||
peek(alt((multispace1, eof))),
|
||||
"expected whitespace or end of input",
|
||||
),
|
||||
)(inp)
|
||||
{
|
||||
Ok((rest, (elements_part, errors))) => {
|
||||
let pattern = match elements_part {
|
||||
Some(elements_part) => elements_part.into_iter().collect(),
|
||||
None => String::new(),
|
||||
};
|
||||
let res = UserInputLeaf::Regex {
|
||||
field: None,
|
||||
pattern,
|
||||
};
|
||||
Ok((rest, (res, errors)))
|
||||
}
|
||||
Err(e) => {
|
||||
let errs = vec![LenientErrorInternal {
|
||||
pos: inp.len(),
|
||||
message: e.to_string(),
|
||||
}];
|
||||
let res = UserInputLeaf::Regex {
|
||||
field: None,
|
||||
pattern: String::new(),
|
||||
};
|
||||
Ok((inp, (res, errs)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn negate(expr: UserInputAst) -> UserInputAst {
|
||||
expr.unary(Occur::MustNot)
|
||||
}
|
||||
@@ -753,7 +815,7 @@ fn boosted_leaf(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
tuple((leaf, fallible(boost))),
|
||||
|(leaf, boost_opt)| match boost_opt {
|
||||
Some(boost) if (boost - 1.0).abs() > f64::EPSILON => {
|
||||
UserInputAst::Boost(Box::new(leaf), boost)
|
||||
UserInputAst::Boost(Box::new(leaf), boost.into())
|
||||
}
|
||||
_ => leaf,
|
||||
},
|
||||
@@ -765,7 +827,7 @@ fn boosted_leaf_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
tuple_infallible((leaf_infallible, boost)),
|
||||
|((leaf, boost_opt), error)| match boost_opt {
|
||||
Some(boost) if (boost - 1.0).abs() > f64::EPSILON => (
|
||||
leaf.map(|leaf| UserInputAst::Boost(Box::new(leaf), boost)),
|
||||
leaf.map(|leaf| UserInputAst::Boost(Box::new(leaf), boost.into())),
|
||||
error,
|
||||
),
|
||||
_ => (leaf, error),
|
||||
@@ -1016,12 +1078,25 @@ pub fn parse_to_ast_lenient(query_str: &str) -> (UserInputAst, Vec<LenientError>
|
||||
(rewrite_ast(res), errors)
|
||||
}
|
||||
|
||||
/// Removes unnecessary children clauses in AST
|
||||
///
|
||||
/// Motivated by [issue #1433](https://github.com/quickwit-oss/tantivy/issues/1433)
|
||||
fn rewrite_ast(mut input: UserInputAst) -> UserInputAst {
|
||||
if let UserInputAst::Clause(terms) = &mut input {
|
||||
for term in terms {
|
||||
if let UserInputAst::Clause(sub_clauses) = &mut input {
|
||||
// call rewrite_ast recursively on children clauses if applicable
|
||||
let mut new_clauses = Vec::with_capacity(sub_clauses.len());
|
||||
for (occur, clause) in sub_clauses.drain(..) {
|
||||
let rewritten_clause = rewrite_ast(clause);
|
||||
new_clauses.push((occur, rewritten_clause));
|
||||
}
|
||||
*sub_clauses = new_clauses;
|
||||
|
||||
// remove duplicate child clauses
|
||||
// e.g. (+a +b) OR (+c +d) OR (+a +b) => (+a +b) OR (+c +d)
|
||||
let mut seen = FnvHashSet::default();
|
||||
sub_clauses.retain(|term| seen.insert(term.clone()));
|
||||
|
||||
// Removes unnecessary children clauses in AST
|
||||
//
|
||||
// Motivated by [issue #1433](https://github.com/quickwit-oss/tantivy/issues/1433)
|
||||
for term in sub_clauses {
|
||||
rewrite_ast_clause(term);
|
||||
}
|
||||
}
|
||||
@@ -1283,6 +1358,10 @@ mod test {
|
||||
super::field_name("~my~field:a"),
|
||||
Ok(("a", "~my~field".to_string()))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name(".my.field.name : a"),
|
||||
Ok(("a", ".my.field.name".to_string()))
|
||||
);
|
||||
for special_char in SPECIAL_CHARS.iter() {
|
||||
let query = &format!("\\{special_char}my\\{special_char}field:a");
|
||||
assert_eq!(
|
||||
@@ -1689,4 +1768,72 @@ mod test {
|
||||
fn test_invalid_field() {
|
||||
test_is_parse_err(r#"!bc:def"#, "!bc:def");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_regex_parser() {
|
||||
let r = parse_to_ast(r#"a:/joh?n(ath[oa]n)/"#);
|
||||
assert!(r.is_ok(), "Failed to parse custom query: {r:?}");
|
||||
let (_, input) = r.unwrap();
|
||||
match input {
|
||||
UserInputAst::Leaf(leaf) => match leaf.as_ref() {
|
||||
UserInputLeaf::Regex { field, pattern } => {
|
||||
assert_eq!(field, &Some("a".to_string()));
|
||||
assert_eq!(pattern, "joh?n(ath[oa]n)");
|
||||
}
|
||||
_ => panic!("Expected a regex leaf, got {leaf:?}"),
|
||||
},
|
||||
_ => panic!("Expected a leaf"),
|
||||
}
|
||||
let r = parse_to_ast(r#"a:/\\/cgi-bin\\/luci.*/"#);
|
||||
assert!(r.is_ok(), "Failed to parse custom query: {r:?}");
|
||||
let (_, input) = r.unwrap();
|
||||
match input {
|
||||
UserInputAst::Leaf(leaf) => match leaf.as_ref() {
|
||||
UserInputLeaf::Regex { field, pattern } => {
|
||||
assert_eq!(field, &Some("a".to_string()));
|
||||
assert_eq!(pattern, "\\/cgi-bin\\/luci.*");
|
||||
}
|
||||
_ => panic!("Expected a regex leaf, got {leaf:?}"),
|
||||
},
|
||||
_ => panic!("Expected a leaf"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_regex_parser_lenient() {
|
||||
let literal = |query| literal_infallible(query).unwrap().1;
|
||||
|
||||
let (res, errs) = literal(r#"a:/joh?n(ath[oa]n)/"#);
|
||||
let expected = UserInputLeaf::Regex {
|
||||
field: Some("a".to_string()),
|
||||
pattern: "joh?n(ath[oa]n)".to_string(),
|
||||
}
|
||||
.into();
|
||||
assert_eq!(res.unwrap(), expected);
|
||||
assert!(errs.is_empty(), "Expected no errors, got: {errs:?}");
|
||||
|
||||
let (res, errs) = literal("title:/joh?n(ath[oa]n)");
|
||||
let expected = UserInputLeaf::Regex {
|
||||
field: Some("title".to_string()),
|
||||
pattern: "joh?n(ath[oa]n)".to_string(),
|
||||
}
|
||||
.into();
|
||||
assert_eq!(res.unwrap(), expected);
|
||||
assert_eq!(errs.len(), 1, "Expected 1 error, got: {errs:?}");
|
||||
assert_eq!(
|
||||
errs[0].message, "missing delimiter /",
|
||||
"Unexpected error message",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_space_before_value() {
|
||||
test_parse_query_to_ast_helper("field : a", r#""field":a"#);
|
||||
test_parse_query_to_ast_helper("field: a", r#""field":a"#);
|
||||
test_parse_query_to_ast_helper("field :a", r#""field":a"#);
|
||||
test_parse_query_to_ast_helper(
|
||||
"field : 'happy tax payer' AND other_field : 1",
|
||||
r#"(+"field":'happy tax payer' +"other_field":1)"#,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ use serde::Serialize;
|
||||
|
||||
use crate::Occur;
|
||||
|
||||
#[derive(PartialEq, Clone, Serialize)]
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Serialize)]
|
||||
#[serde(tag = "type")]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum UserInputLeaf {
|
||||
@@ -23,6 +23,10 @@ pub enum UserInputLeaf {
|
||||
Exists {
|
||||
field: String,
|
||||
},
|
||||
Regex {
|
||||
field: Option<String>,
|
||||
pattern: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl UserInputLeaf {
|
||||
@@ -46,6 +50,7 @@ impl UserInputLeaf {
|
||||
UserInputLeaf::Exists { field: _ } => UserInputLeaf::Exists {
|
||||
field: field.expect("Exist query without a field isn't allowed"),
|
||||
},
|
||||
UserInputLeaf::Regex { field: _, pattern } => UserInputLeaf::Regex { field, pattern },
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,11 +108,19 @@ impl Debug for UserInputLeaf {
|
||||
UserInputLeaf::Exists { field } => {
|
||||
write!(formatter, "$exists(\"{field}\")")
|
||||
}
|
||||
UserInputLeaf::Regex { field, pattern } => {
|
||||
if let Some(field) = field {
|
||||
// TODO properly escape field (in case of \")
|
||||
write!(formatter, "\"{field}\":")?;
|
||||
}
|
||||
// TODO properly escape pattern (in case of \")
|
||||
write!(formatter, "/{pattern}/")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug, Serialize)]
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum Delimiter {
|
||||
SingleQuotes,
|
||||
@@ -115,7 +128,7 @@ pub enum Delimiter {
|
||||
None,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone, Serialize)]
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct UserInputLiteral {
|
||||
pub field_name: Option<String>,
|
||||
@@ -154,7 +167,7 @@ impl fmt::Debug for UserInputLiteral {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug, Clone, Serialize)]
|
||||
#[derive(PartialEq, Eq, Hash, Debug, Clone, Serialize)]
|
||||
#[serde(tag = "type", content = "value")]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum UserInputBound {
|
||||
@@ -191,11 +204,11 @@ impl UserInputBound {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone, Serialize)]
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Serialize)]
|
||||
#[serde(into = "UserInputAstSerde")]
|
||||
pub enum UserInputAst {
|
||||
Clause(Vec<(Option<Occur>, UserInputAst)>),
|
||||
Boost(Box<UserInputAst>, f64),
|
||||
Boost(Box<UserInputAst>, ordered_float::OrderedFloat<f64>),
|
||||
Leaf(Box<UserInputLeaf>),
|
||||
}
|
||||
|
||||
@@ -217,9 +230,10 @@ impl From<UserInputAst> for UserInputAstSerde {
|
||||
fn from(ast: UserInputAst) -> Self {
|
||||
match ast {
|
||||
UserInputAst::Clause(clause) => UserInputAstSerde::Bool { clauses: clause },
|
||||
UserInputAst::Boost(underlying, boost) => {
|
||||
UserInputAstSerde::Boost { underlying, boost }
|
||||
}
|
||||
UserInputAst::Boost(underlying, boost) => UserInputAstSerde::Boost {
|
||||
underlying,
|
||||
boost: boost.into_inner(),
|
||||
},
|
||||
UserInputAst::Leaf(leaf) => UserInputAstSerde::Leaf(leaf),
|
||||
}
|
||||
}
|
||||
@@ -378,7 +392,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_boost_serialization() {
|
||||
let inner_ast = UserInputAst::Leaf(Box::new(UserInputLeaf::All));
|
||||
let boost_ast = UserInputAst::Boost(Box::new(inner_ast), 2.5);
|
||||
let boost_ast = UserInputAst::Boost(Box::new(inner_ast), 2.5.into());
|
||||
let json = serde_json::to_string(&boost_ast).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
@@ -405,7 +419,7 @@ mod tests {
|
||||
}))),
|
||||
),
|
||||
])),
|
||||
2.5,
|
||||
2.5.into(),
|
||||
);
|
||||
let json = serde_json::to_string(&boost_ast).unwrap();
|
||||
assert_eq!(
|
||||
|
||||
@@ -20,17 +20,16 @@ Contains all metric aggregations, like average aggregation. Metric aggregations
|
||||
#### agg_req
|
||||
agg_req contains the users aggregation request. Deserialization from json is compatible with elasticsearch aggregation requests.
|
||||
|
||||
#### agg_req_with_accessor
|
||||
agg_req_with_accessor contains the users aggregation request enriched with fast field accessors etc, which are
|
||||
#### agg_data
|
||||
agg_data contains the users aggregation request enriched with fast field accessors etc, which are
|
||||
used during collection.
|
||||
|
||||
#### segment_agg_result
|
||||
segment_agg_result contains the aggregation result tree, which is used for collection of a segment.
|
||||
The tree from agg_req_with_accessor is passed during collection.
|
||||
agg_data is passed during collection.
|
||||
|
||||
#### intermediate_agg_result
|
||||
intermediate_agg_result contains the aggregation tree for merging with other trees.
|
||||
|
||||
#### agg_result
|
||||
agg_result contains the final aggregation tree.
|
||||
|
||||
|
||||
104
src/aggregation/accessor_helpers.rs
Normal file
104
src/aggregation/accessor_helpers.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
//! This will enhance the request tree with access to the fastfield and metadata.
|
||||
|
||||
use std::io;
|
||||
|
||||
use columnar::{Column, ColumnType};
|
||||
|
||||
use crate::aggregation::{f64_to_fastfield_u64, Key};
|
||||
use crate::index::SegmentReader;
|
||||
|
||||
/// Get the missing value as internal u64 representation
|
||||
///
|
||||
/// For terms we use u64::MAX as sentinel value
|
||||
/// For numerical data we convert the value into the representation
|
||||
/// we would get from the fast field, when we open it as u64_lenient_for_type.
|
||||
///
|
||||
/// That way we can use it the same way as if it would come from the fastfield.
|
||||
pub(crate) fn get_missing_val_as_u64_lenient(
|
||||
column_type: ColumnType,
|
||||
missing: &Key,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Option<u64>> {
|
||||
let missing_val = match missing {
|
||||
Key::Str(_) if column_type == ColumnType::Str => Some(u64::MAX),
|
||||
// Allow fallback to number on text fields
|
||||
Key::F64(_) if column_type == ColumnType::Str => Some(u64::MAX),
|
||||
Key::U64(_) if column_type == ColumnType::Str => Some(u64::MAX),
|
||||
Key::I64(_) if column_type == ColumnType::Str => Some(u64::MAX),
|
||||
Key::F64(val) if column_type.numerical_type().is_some() => {
|
||||
f64_to_fastfield_u64(*val, &column_type)
|
||||
}
|
||||
// NOTE: We may loose precision of the passed missing value by casting i64 and u64 to f64.
|
||||
Key::I64(val) if column_type.numerical_type().is_some() => {
|
||||
f64_to_fastfield_u64(*val as f64, &column_type)
|
||||
}
|
||||
Key::U64(val) if column_type.numerical_type().is_some() => {
|
||||
f64_to_fastfield_u64(*val as f64, &column_type)
|
||||
}
|
||||
_ => {
|
||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||
"Missing value {missing:?} for field {field_name} is not supported for column \
|
||||
type {column_type:?}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
Ok(missing_val)
|
||||
}
|
||||
|
||||
pub(crate) fn get_numeric_or_date_column_types() -> &'static [ColumnType] {
|
||||
&[
|
||||
ColumnType::F64,
|
||||
ColumnType::U64,
|
||||
ColumnType::I64,
|
||||
ColumnType::DateTime,
|
||||
]
|
||||
}
|
||||
|
||||
/// Get fast field reader or empty as default.
|
||||
pub(crate) fn get_ff_reader(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
allowed_column_types: Option<&[ColumnType]>,
|
||||
) -> crate::Result<(columnar::Column<u64>, ColumnType)> {
|
||||
let ff_fields = reader.fast_fields();
|
||||
let ff_field_with_type = ff_fields
|
||||
.u64_lenient_for_type(allowed_column_types, field_name)?
|
||||
.unwrap_or_else(|| {
|
||||
(
|
||||
Column::build_empty_column(reader.num_docs()),
|
||||
ColumnType::U64,
|
||||
)
|
||||
});
|
||||
Ok(ff_field_with_type)
|
||||
}
|
||||
|
||||
pub(crate) fn get_dynamic_columns(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Vec<columnar::DynamicColumn>> {
|
||||
let ff_fields = reader.fast_fields().dynamic_column_handles(field_name)?;
|
||||
let cols = ff_fields
|
||||
.iter()
|
||||
.map(|h| h.open())
|
||||
.collect::<io::Result<_>>()?;
|
||||
assert!(!ff_fields.is_empty(), "field {field_name} not found");
|
||||
Ok(cols)
|
||||
}
|
||||
|
||||
/// Get all fast field reader or empty as default.
|
||||
///
|
||||
/// Is guaranteed to return at least one column.
|
||||
pub(crate) fn get_all_ff_reader_or_empty(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
allowed_column_types: Option<&[ColumnType]>,
|
||||
fallback_type: ColumnType,
|
||||
) -> crate::Result<Vec<(columnar::Column<u64>, ColumnType)>> {
|
||||
let ff_fields = reader.fast_fields();
|
||||
let mut ff_field_with_type =
|
||||
ff_fields.u64_lenient_for_type_all(allowed_column_types, field_name)?;
|
||||
if ff_field_with_type.is_empty() {
|
||||
ff_field_with_type.push((Column::build_empty_column(reader.num_docs()), fallback_type));
|
||||
}
|
||||
Ok(ff_field_with_type)
|
||||
}
|
||||
1083
src/aggregation/agg_data.rs
Normal file
1083
src/aggregation/agg_data.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -70,7 +70,7 @@ impl AggregationLimitsGuard {
|
||||
/// *memory_limit*
|
||||
/// memory_limit is defined in bytes.
|
||||
/// Aggregation fails when the estimated memory consumption of the aggregation is higher than
|
||||
/// memory_limit.
|
||||
/// memory_limit.
|
||||
/// memory_limit will default to `DEFAULT_MEMORY_LIMIT` (500MB)
|
||||
///
|
||||
/// *bucket_limit*
|
||||
|
||||
@@ -26,12 +26,14 @@
|
||||
//! let _agg_req: Aggregations = serde_json::from_str(elasticsearch_compatible_json_req).unwrap();
|
||||
//! ```
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashSet;
|
||||
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::bucket::{
|
||||
DateHistogramAggregationReq, HistogramAggregation, RangeAggregation, TermsAggregation,
|
||||
DateHistogramAggregationReq, FilterAggregation, HistogramAggregation, RangeAggregation,
|
||||
TermsAggregation,
|
||||
};
|
||||
use super::metric::{
|
||||
AverageAggregation, CardinalityAggregationReq, CountAggregation, ExtendedStatsAggregation,
|
||||
@@ -43,7 +45,7 @@ use super::metric::{
|
||||
/// defined names. It is also used in buckets aggregations to define sub-aggregations.
|
||||
///
|
||||
/// The key is the user defined name of the aggregation.
|
||||
pub type Aggregations = HashMap<String, Aggregation>;
|
||||
pub type Aggregations = FxHashMap<String, Aggregation>;
|
||||
|
||||
/// Aggregation request.
|
||||
///
|
||||
@@ -129,6 +131,9 @@ pub enum AggregationVariants {
|
||||
/// Put data into buckets of terms.
|
||||
#[serde(rename = "terms")]
|
||||
Terms(TermsAggregation),
|
||||
/// Filter documents into a single bucket.
|
||||
#[serde(rename = "filter")]
|
||||
Filter(FilterAggregation),
|
||||
|
||||
// Metric aggregation types
|
||||
/// Computes the average of the extracted values.
|
||||
@@ -174,6 +179,7 @@ impl AggregationVariants {
|
||||
AggregationVariants::Range(range) => vec![range.field.as_str()],
|
||||
AggregationVariants::Histogram(histogram) => vec![histogram.field.as_str()],
|
||||
AggregationVariants::DateHistogram(histogram) => vec![histogram.field.as_str()],
|
||||
AggregationVariants::Filter(filter) => filter.get_fast_field_names(),
|
||||
AggregationVariants::Average(avg) => vec![avg.field_name()],
|
||||
AggregationVariants::Count(count) => vec![count.field_name()],
|
||||
AggregationVariants::Max(max) => vec![max.field_name()],
|
||||
@@ -208,13 +214,6 @@ impl AggregationVariants {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub(crate) fn as_top_hits(&self) -> Option<&TopHitsAggregationReq> {
|
||||
match &self {
|
||||
AggregationVariants::TopHits(top_hits) => Some(top_hits),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn as_percentile(&self) -> Option<&PercentilesAggregationReq> {
|
||||
match &self {
|
||||
AggregationVariants::Percentiles(percentile_req) => Some(percentile_req),
|
||||
|
||||
@@ -1,471 +0,0 @@
|
||||
//! This will enhance the request tree with access to the fastfield and metadata.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType, DynamicColumn, StrColumn};
|
||||
|
||||
use super::agg_req::{Aggregation, AggregationVariants, Aggregations};
|
||||
use super::bucket::{
|
||||
DateHistogramAggregationReq, HistogramAggregation, RangeAggregation, TermsAggregation,
|
||||
};
|
||||
use super::metric::{
|
||||
AverageAggregation, CardinalityAggregationReq, CountAggregation, ExtendedStatsAggregation,
|
||||
MaxAggregation, MinAggregation, StatsAggregation, SumAggregation,
|
||||
};
|
||||
use super::segment_agg_result::AggregationLimitsGuard;
|
||||
use super::VecWithNames;
|
||||
use crate::aggregation::{f64_to_fastfield_u64, Key};
|
||||
use crate::index::SegmentReader;
|
||||
use crate::SegmentOrdinal;
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct AggregationsWithAccessor {
|
||||
pub aggs: VecWithNames<AggregationWithAccessor>,
|
||||
}
|
||||
|
||||
impl AggregationsWithAccessor {
|
||||
fn from_data(aggs: VecWithNames<AggregationWithAccessor>) -> Self {
|
||||
Self { aggs }
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.aggs.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AggregationWithAccessor {
|
||||
pub(crate) segment_ordinal: SegmentOrdinal,
|
||||
/// In general there can be buckets without fast field access, e.g. buckets that are created
|
||||
/// based on search terms. That is not that case currently, but eventually this needs to be
|
||||
/// Option or moved.
|
||||
pub(crate) accessor: Column<u64>,
|
||||
/// Load insert u64 for missing use case
|
||||
pub(crate) missing_value_for_accessor: Option<u64>,
|
||||
pub(crate) str_dict_column: Option<StrColumn>,
|
||||
pub(crate) field_type: ColumnType,
|
||||
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
||||
pub(crate) limits: AggregationLimitsGuard,
|
||||
pub(crate) column_block_accessor: ColumnBlockAccessor<u64>,
|
||||
/// Used for missing term aggregation, which checks all columns for existence.
|
||||
/// And also for `top_hits` aggregation, which may sort on multiple fields.
|
||||
/// By convention the missing aggregation is chosen, when this property is set
|
||||
/// (instead bein set in `agg`).
|
||||
/// If this needs to used by other aggregations, we need to refactor this.
|
||||
// NOTE: we can make all other aggregations use this instead of the `accessor` and `field_type`
|
||||
// (making them obsolete) But will it have a performance impact?
|
||||
pub(crate) accessors: Vec<(Column<u64>, ColumnType)>,
|
||||
/// Map field names to all associated column accessors.
|
||||
/// This field is used for `docvalue_fields`, which is currently only supported for `top_hits`.
|
||||
pub(crate) value_accessors: HashMap<String, Vec<DynamicColumn>>,
|
||||
pub(crate) agg: Aggregation,
|
||||
}
|
||||
|
||||
impl AggregationWithAccessor {
|
||||
/// May return multiple accessors if the aggregation is e.g. on mixed field types.
|
||||
fn try_from_agg(
|
||||
agg: &Aggregation,
|
||||
sub_aggregation: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: AggregationLimitsGuard,
|
||||
) -> crate::Result<Vec<AggregationWithAccessor>> {
|
||||
let mut agg = agg.clone();
|
||||
|
||||
let add_agg_with_accessor = |agg: &Aggregation,
|
||||
accessor: Column<u64>,
|
||||
column_type: ColumnType,
|
||||
aggs: &mut Vec<AggregationWithAccessor>|
|
||||
-> crate::Result<()> {
|
||||
let res = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
accessor,
|
||||
accessors: Default::default(),
|
||||
value_accessors: Default::default(),
|
||||
field_type: column_type,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
limits: limits.clone(),
|
||||
missing_value_for_accessor: None,
|
||||
str_dict_column: None,
|
||||
column_block_accessor: Default::default(),
|
||||
};
|
||||
aggs.push(res);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let add_agg_with_accessors = |agg: &Aggregation,
|
||||
accessors: Vec<(Column<u64>, ColumnType)>,
|
||||
aggs: &mut Vec<AggregationWithAccessor>,
|
||||
value_accessors: HashMap<String, Vec<DynamicColumn>>|
|
||||
-> crate::Result<()> {
|
||||
let (accessor, field_type) = accessors.first().expect("at least one accessor");
|
||||
let limits = limits.clone();
|
||||
let res = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
// TODO: We should do away with the `accessor` field altogether
|
||||
accessor: accessor.clone(),
|
||||
value_accessors,
|
||||
field_type: *field_type,
|
||||
accessors,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
limits,
|
||||
missing_value_for_accessor: None,
|
||||
str_dict_column: None,
|
||||
column_block_accessor: Default::default(),
|
||||
};
|
||||
aggs.push(res);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let mut res: Vec<AggregationWithAccessor> = Vec::new();
|
||||
use AggregationVariants::*;
|
||||
|
||||
match agg.agg {
|
||||
Range(RangeAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Histogram(HistogramAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
DateHistogram(DateHistogramAggregationReq {
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
// Only DateTime is supported for DateHistogram
|
||||
get_ff_reader(reader, field_name, Some(&[ColumnType::DateTime]))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Terms(TermsAggregation {
|
||||
field: ref field_name,
|
||||
ref missing,
|
||||
..
|
||||
})
|
||||
| Cardinality(CardinalityAggregationReq {
|
||||
field: ref field_name,
|
||||
ref missing,
|
||||
..
|
||||
}) => {
|
||||
let str_dict_column = reader.fast_fields().str(field_name)?;
|
||||
let allowed_column_types = [
|
||||
ColumnType::I64,
|
||||
ColumnType::U64,
|
||||
ColumnType::F64,
|
||||
ColumnType::Str,
|
||||
ColumnType::DateTime,
|
||||
ColumnType::Bool,
|
||||
ColumnType::IpAddr,
|
||||
// ColumnType::Bytes Unsupported
|
||||
];
|
||||
|
||||
// In case the column is empty we want the shim column to match the missing type
|
||||
let fallback_type = missing
|
||||
.as_ref()
|
||||
.map(|missing| match missing {
|
||||
Key::Str(_) => ColumnType::Str,
|
||||
Key::F64(_) => ColumnType::F64,
|
||||
Key::I64(_) => ColumnType::I64,
|
||||
Key::U64(_) => ColumnType::U64,
|
||||
})
|
||||
.unwrap_or(ColumnType::U64);
|
||||
let column_and_types = get_all_ff_reader_or_empty(
|
||||
reader,
|
||||
field_name,
|
||||
Some(&allowed_column_types),
|
||||
fallback_type,
|
||||
)?;
|
||||
let missing_and_more_than_one_col = column_and_types.len() > 1 && missing.is_some();
|
||||
let text_on_non_text_col = column_and_types.len() == 1
|
||||
&& column_and_types[0].1.numerical_type().is_some()
|
||||
&& missing
|
||||
.as_ref()
|
||||
.map(|m| matches!(m, Key::Str(_)))
|
||||
.unwrap_or(false);
|
||||
|
||||
// Actually we could convert the text to a number and have the fast path, if it is
|
||||
// provided in Rfc3339 format. But this use case is probably common
|
||||
// enough to justify the effort.
|
||||
let text_on_date_col = column_and_types.len() == 1
|
||||
&& column_and_types[0].1 == ColumnType::DateTime
|
||||
&& missing
|
||||
.as_ref()
|
||||
.map(|m| matches!(m, Key::Str(_)))
|
||||
.unwrap_or(false);
|
||||
|
||||
let use_special_missing_agg =
|
||||
missing_and_more_than_one_col || text_on_non_text_col || text_on_date_col;
|
||||
if use_special_missing_agg {
|
||||
let column_and_types =
|
||||
get_all_ff_reader_or_empty(reader, field_name, None, fallback_type)?;
|
||||
|
||||
let accessors = column_and_types
|
||||
.iter()
|
||||
.map(|c_t| (c_t.0.clone(), c_t.1))
|
||||
.collect();
|
||||
add_agg_with_accessors(&agg, accessors, &mut res, Default::default())?;
|
||||
}
|
||||
|
||||
for (accessor, column_type) in column_and_types {
|
||||
let missing_value_term_agg = if use_special_missing_agg {
|
||||
None
|
||||
} else {
|
||||
missing.clone()
|
||||
};
|
||||
|
||||
let missing_value_for_accessor =
|
||||
if let Some(missing) = missing_value_term_agg.as_ref() {
|
||||
get_missing_val_as_u64_lenient(
|
||||
column_type,
|
||||
missing,
|
||||
agg.agg.get_fast_field_names()[0],
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let limits = limits.clone();
|
||||
let agg = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
missing_value_for_accessor,
|
||||
accessor,
|
||||
accessors: Default::default(),
|
||||
value_accessors: Default::default(),
|
||||
field_type: column_type,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
str_dict_column: str_dict_column.clone(),
|
||||
limits,
|
||||
column_block_accessor: Default::default(),
|
||||
};
|
||||
res.push(agg);
|
||||
}
|
||||
}
|
||||
Average(AverageAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Max(MaxAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Min(MinAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Stats(StatsAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| ExtendedStats(ExtendedStatsAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Sum(SumAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Count(CountAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let allowed_column_types = [
|
||||
ColumnType::I64,
|
||||
ColumnType::U64,
|
||||
ColumnType::F64,
|
||||
ColumnType::Str,
|
||||
ColumnType::DateTime,
|
||||
ColumnType::Bool,
|
||||
ColumnType::IpAddr,
|
||||
// ColumnType::Bytes Unsupported
|
||||
];
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(&allowed_column_types))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Percentiles(ref percentiles) => {
|
||||
let (accessor, column_type) = get_ff_reader(
|
||||
reader,
|
||||
percentiles.field_name(),
|
||||
Some(get_numeric_or_date_column_types()),
|
||||
)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
TopHits(ref mut top_hits) => {
|
||||
top_hits.validate_and_resolve_field_names(reader.fast_fields().columnar())?;
|
||||
let accessors: Vec<(Column<u64>, ColumnType)> = top_hits
|
||||
.field_names()
|
||||
.iter()
|
||||
.map(|field| {
|
||||
get_ff_reader(reader, field, Some(get_numeric_or_date_column_types()))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
let value_accessors = top_hits
|
||||
.value_field_names()
|
||||
.iter()
|
||||
.map(|field_name| {
|
||||
Ok((
|
||||
field_name.to_string(),
|
||||
get_dynamic_columns(reader, field_name)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
add_agg_with_accessors(&agg, accessors, &mut res, value_accessors)?;
|
||||
}
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the missing value as internal u64 representation
|
||||
///
|
||||
/// For terms we use u64::MAX as sentinel value
|
||||
/// For numerical data we convert the value into the representation
|
||||
/// we would get from the fast field, when we open it as u64_lenient_for_type.
|
||||
///
|
||||
/// That way we can use it the same way as if it would come from the fastfield.
|
||||
fn get_missing_val_as_u64_lenient(
|
||||
column_type: ColumnType,
|
||||
missing: &Key,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Option<u64>> {
|
||||
let missing_val = match missing {
|
||||
Key::Str(_) if column_type == ColumnType::Str => Some(u64::MAX),
|
||||
// Allow fallback to number on text fields
|
||||
Key::F64(_) if column_type == ColumnType::Str => Some(u64::MAX),
|
||||
Key::U64(_) if column_type == ColumnType::Str => Some(u64::MAX),
|
||||
Key::I64(_) if column_type == ColumnType::Str => Some(u64::MAX),
|
||||
Key::F64(val) if column_type.numerical_type().is_some() => {
|
||||
f64_to_fastfield_u64(*val, &column_type)
|
||||
}
|
||||
// NOTE: We may loose precision of the passed missing value by casting i64 and u64 to f64.
|
||||
Key::I64(val) if column_type.numerical_type().is_some() => {
|
||||
f64_to_fastfield_u64(*val as f64, &column_type)
|
||||
}
|
||||
Key::U64(val) if column_type.numerical_type().is_some() => {
|
||||
f64_to_fastfield_u64(*val as f64, &column_type)
|
||||
}
|
||||
_ => {
|
||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||
"Missing value {missing:?} for field {field_name} is not supported for column \
|
||||
type {column_type:?}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
Ok(missing_val)
|
||||
}
|
||||
|
||||
fn get_numeric_or_date_column_types() -> &'static [ColumnType] {
|
||||
&[
|
||||
ColumnType::F64,
|
||||
ColumnType::U64,
|
||||
ColumnType::I64,
|
||||
ColumnType::DateTime,
|
||||
]
|
||||
}
|
||||
|
||||
pub(crate) fn get_aggs_with_segment_accessor_and_validate(
|
||||
aggs: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: &AggregationLimitsGuard,
|
||||
) -> crate::Result<AggregationsWithAccessor> {
|
||||
let mut aggss = Vec::new();
|
||||
for (key, agg) in aggs.iter() {
|
||||
let aggs = AggregationWithAccessor::try_from_agg(
|
||||
agg,
|
||||
agg.sub_aggregation(),
|
||||
reader,
|
||||
segment_ordinal,
|
||||
limits.clone(),
|
||||
)?;
|
||||
for agg in aggs {
|
||||
aggss.push((key.to_string(), agg));
|
||||
}
|
||||
}
|
||||
Ok(AggregationsWithAccessor::from_data(
|
||||
VecWithNames::from_entries(aggss),
|
||||
))
|
||||
}
|
||||
|
||||
/// Get fast field reader or empty as default.
|
||||
fn get_ff_reader(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
allowed_column_types: Option<&[ColumnType]>,
|
||||
) -> crate::Result<(columnar::Column<u64>, ColumnType)> {
|
||||
let ff_fields = reader.fast_fields();
|
||||
let ff_field_with_type = ff_fields
|
||||
.u64_lenient_for_type(allowed_column_types, field_name)?
|
||||
.unwrap_or_else(|| {
|
||||
(
|
||||
Column::build_empty_column(reader.num_docs()),
|
||||
ColumnType::U64,
|
||||
)
|
||||
});
|
||||
Ok(ff_field_with_type)
|
||||
}
|
||||
|
||||
fn get_dynamic_columns(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Vec<columnar::DynamicColumn>> {
|
||||
let ff_fields = reader.fast_fields().dynamic_column_handles(field_name)?;
|
||||
let cols = ff_fields
|
||||
.iter()
|
||||
.map(|h| h.open())
|
||||
.collect::<io::Result<_>>()?;
|
||||
assert!(!ff_fields.is_empty(), "field {field_name} not found");
|
||||
Ok(cols)
|
||||
}
|
||||
|
||||
/// Get all fast field reader or empty as default.
|
||||
///
|
||||
/// Is guaranteed to return at least one column.
|
||||
fn get_all_ff_reader_or_empty(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
allowed_column_types: Option<&[ColumnType]>,
|
||||
fallback_type: ColumnType,
|
||||
) -> crate::Result<Vec<(columnar::Column<u64>, ColumnType)>> {
|
||||
let ff_fields = reader.fast_fields();
|
||||
let mut ff_field_with_type =
|
||||
ff_fields.u64_lenient_for_type_all(allowed_column_types, field_name)?;
|
||||
if ff_field_with_type.is_empty() {
|
||||
ff_field_with_type.push((Column::build_empty_column(reader.num_docs()), fallback_type));
|
||||
}
|
||||
Ok(ff_field_with_type)
|
||||
}
|
||||
@@ -156,6 +156,8 @@ pub enum BucketResult {
|
||||
/// The upper bound error for the doc count of each term.
|
||||
doc_count_error_upper_bound: Option<u64>,
|
||||
},
|
||||
/// This is the filter result - a single bucket with sub-aggregations
|
||||
Filter(FilterBucketResult),
|
||||
}
|
||||
|
||||
impl BucketResult {
|
||||
@@ -172,6 +174,11 @@ impl BucketResult {
|
||||
sum_other_doc_count: _,
|
||||
doc_count_error_upper_bound: _,
|
||||
} => buckets.iter().map(|bucket| bucket.get_bucket_count()).sum(),
|
||||
BucketResult::Filter(filter_result) => {
|
||||
// Filter doesn't add to bucket count - it's not a user-facing bucket
|
||||
// Only count sub-aggregation buckets
|
||||
filter_result.sub_aggregations.get_bucket_count()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -308,3 +315,25 @@ impl RangeBucketEntry {
|
||||
1 + self.sub_aggregation.get_bucket_count()
|
||||
}
|
||||
}
|
||||
|
||||
/// This is the filter bucket result, which contains the document count and sub-aggregations.
|
||||
///
|
||||
/// # JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
/// "electronics_only": {
|
||||
/// "doc_count": 2,
|
||||
/// "avg_price": {
|
||||
/// "value": 150.0
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct FilterBucketResult {
|
||||
/// Number of documents in the filter bucket
|
||||
pub doc_count: u64,
|
||||
/// Sub-aggregation results
|
||||
#[serde(flatten)]
|
||||
pub sub_aggregations: AggregationResults,
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::buf_collector::DOC_BLOCK_SIZE;
|
||||
use crate::aggregation::collector::AggregationCollector;
|
||||
use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use crate::aggregation::segment_agg_result::AggregationLimitsGuard;
|
||||
use crate::aggregation::tests::{get_test_index_2_segments, get_test_index_from_values_and_terms};
|
||||
use crate::aggregation::DistributedAggregationCollector;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
@@ -128,10 +127,8 @@ fn test_aggregation_flushing(
|
||||
.unwrap();
|
||||
|
||||
let agg_res: AggregationResults = if use_distributed_collector {
|
||||
let collector = DistributedAggregationCollector::from_aggs(
|
||||
agg_req.clone(),
|
||||
AggregationLimitsGuard::default(),
|
||||
);
|
||||
let collector =
|
||||
DistributedAggregationCollector::from_aggs(agg_req.clone(), Default::default());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
1755
src/aggregation/bucket/filter.rs
Normal file
1755
src/aggregation/bucket/filter.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,25 +1,54 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType};
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::aggregation::agg_data::{
|
||||
build_segment_agg_collectors, AggRefNode, AggregationsSegmentCtx,
|
||||
};
|
||||
use crate::aggregation::agg_limits::MemoryConsumption;
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::agg_req_with_accessor::{
|
||||
AggregationWithAccessor, AggregationsWithAccessor,
|
||||
};
|
||||
use crate::aggregation::agg_result::BucketEntry;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateBucketResult,
|
||||
IntermediateHistogramBucketEntry,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::*;
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Contains all information required by the SegmentHistogramCollector to perform the
|
||||
/// histogram or date_histogram aggregation on a segment.
|
||||
pub struct HistogramAggReqData {
|
||||
/// The column accessor to access the fast field values.
|
||||
pub accessor: Column<u64>,
|
||||
/// The field type of the fast field.
|
||||
pub field_type: ColumnType,
|
||||
/// The column block accessor to access the fast field values.
|
||||
pub column_block_accessor: ColumnBlockAccessor<u64>,
|
||||
/// The name of the aggregation.
|
||||
pub name: String,
|
||||
/// The sub aggregation blueprint, used to create sub aggregations for each bucket.
|
||||
/// Will be filled during initialization of the collector.
|
||||
pub sub_aggregation_blueprint: Option<Box<dyn SegmentAggregationCollector>>,
|
||||
/// The histogram aggregation request.
|
||||
pub req: HistogramAggregation,
|
||||
/// True if this is a date_histogram aggregation.
|
||||
pub is_date_histogram: bool,
|
||||
/// The bounds to limit the buckets to.
|
||||
pub bounds: HistogramBounds,
|
||||
/// The offset used to calculate the bucket position.
|
||||
pub offset: f64,
|
||||
}
|
||||
impl HistogramAggReqData {
|
||||
/// Estimate the memory consumption of this struct in bytes.
|
||||
pub fn get_memory_consumption(&self) -> usize {
|
||||
std::mem::size_of::<Self>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
||||
/// Each document value is rounded down to its bucket.
|
||||
///
|
||||
@@ -234,12 +263,12 @@ impl SegmentHistogramBucketEntry {
|
||||
pub(crate) fn into_intermediate_bucket_entry(
|
||||
self,
|
||||
sub_aggregation: Option<Box<dyn SegmentAggregationCollector>>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
) -> crate::Result<IntermediateHistogramBucketEntry> {
|
||||
let mut sub_aggregation_res = IntermediateAggregationResults::default();
|
||||
if let Some(sub_aggregation) = sub_aggregation {
|
||||
sub_aggregation
|
||||
.add_intermediate_aggregation_result(agg_with_accessor, &mut sub_aggregation_res)?;
|
||||
.add_intermediate_aggregation_result(agg_data, &mut sub_aggregation_res)?;
|
||||
}
|
||||
Ok(IntermediateHistogramBucketEntry {
|
||||
key: self.key,
|
||||
@@ -256,24 +285,20 @@ pub struct SegmentHistogramCollector {
|
||||
/// The buckets containing the aggregation data.
|
||||
buckets: FxHashMap<i64, SegmentHistogramBucketEntry>,
|
||||
sub_aggregations: FxHashMap<i64, Box<dyn SegmentAggregationCollector>>,
|
||||
sub_aggregation_blueprint: Option<Box<dyn SegmentAggregationCollector>>,
|
||||
column_type: ColumnType,
|
||||
interval: f64,
|
||||
offset: f64,
|
||||
bounds: HistogramBounds,
|
||||
accessor_idx: usize,
|
||||
}
|
||||
|
||||
impl SegmentAggregationCollector for SegmentHistogramCollector {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||
let agg_with_accessor = &agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
|
||||
let bucket = self.into_intermediate_bucket_result(agg_with_accessor)?;
|
||||
let name = agg_data
|
||||
.get_histogram_req_data(self.accessor_idx)
|
||||
.name
|
||||
.clone();
|
||||
let bucket = self.into_intermediate_bucket_result(agg_data)?;
|
||||
results.push(name, IntermediateAggregationResult::Bucket(bucket))?;
|
||||
|
||||
Ok(())
|
||||
@@ -283,56 +308,52 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
self.collect_block(&[doc], agg_with_accessor)
|
||||
self.collect_block(&[doc], agg_data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let bucket_agg_accessor = &mut agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
|
||||
let mut req = agg_data.take_histogram_req_data(self.accessor_idx);
|
||||
let mem_pre = self.get_memory_consumption();
|
||||
|
||||
let bounds = self.bounds;
|
||||
let interval = self.interval;
|
||||
let offset = self.offset;
|
||||
let get_bucket_pos = |val| (get_bucket_pos_f64(val, interval, offset) as i64);
|
||||
let bounds = req.bounds;
|
||||
let interval = req.req.interval;
|
||||
let offset = req.offset;
|
||||
let get_bucket_pos = |val| get_bucket_pos_f64(val, interval, offset) as i64;
|
||||
|
||||
bucket_agg_accessor
|
||||
req.column_block_accessor.fetch_block(docs, &req.accessor);
|
||||
for (doc, val) in req
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &bucket_agg_accessor.accessor);
|
||||
|
||||
for (doc, val) in bucket_agg_accessor
|
||||
.column_block_accessor
|
||||
.iter_docid_vals(docs, &bucket_agg_accessor.accessor)
|
||||
.iter_docid_vals(docs, &req.accessor)
|
||||
{
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
|
||||
let val = f64_from_fastfield_u64(val, &req.field_type);
|
||||
let bucket_pos = get_bucket_pos(val);
|
||||
|
||||
if bounds.contains(val) {
|
||||
let bucket = self.buckets.entry(bucket_pos).or_insert_with(|| {
|
||||
let key = get_bucket_key_from_pos(bucket_pos as f64, interval, offset);
|
||||
SegmentHistogramBucketEntry { key, doc_count: 0 }
|
||||
});
|
||||
bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation_blueprint) = self.sub_aggregation_blueprint.as_mut() {
|
||||
if let Some(sub_aggregation_blueprint) = req.sub_aggregation_blueprint.as_ref() {
|
||||
self.sub_aggregations
|
||||
.entry(bucket_pos)
|
||||
.or_insert_with(|| sub_aggregation_blueprint.clone())
|
||||
.collect(doc, &mut bucket_agg_accessor.sub_aggregation)?;
|
||||
.collect(doc, agg_data)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
agg_data.put_back_histogram_req_data(self.accessor_idx, req);
|
||||
|
||||
let mem_delta = self.get_memory_consumption() - mem_pre;
|
||||
if mem_delta > 0 {
|
||||
bucket_agg_accessor
|
||||
agg_data
|
||||
.context
|
||||
.limits
|
||||
.add_memory_consumed(mem_delta as u64)?;
|
||||
}
|
||||
@@ -340,12 +361,9 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
|
||||
let sub_aggregation_accessor =
|
||||
&mut agg_with_accessor.aggs.values[self.accessor_idx].sub_aggregation;
|
||||
|
||||
fn flush(&mut self, agg_data: &mut AggregationsSegmentCtx) -> crate::Result<()> {
|
||||
for sub_aggregation in self.sub_aggregations.values_mut() {
|
||||
sub_aggregation.flush(sub_aggregation_accessor)?;
|
||||
sub_aggregation.flush(agg_data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -362,65 +380,58 @@ impl SegmentHistogramCollector {
|
||||
/// Converts the collector result into a intermediate bucket result.
|
||||
pub fn into_intermediate_bucket_result(
|
||||
self,
|
||||
agg_with_accessor: &AggregationWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
) -> crate::Result<IntermediateBucketResult> {
|
||||
let mut buckets = Vec::with_capacity(self.buckets.len());
|
||||
|
||||
for (bucket_pos, bucket) in self.buckets {
|
||||
let bucket_res = bucket.into_intermediate_bucket_entry(
|
||||
self.sub_aggregations.get(&bucket_pos).cloned(),
|
||||
&agg_with_accessor.sub_aggregation,
|
||||
agg_data,
|
||||
);
|
||||
|
||||
buckets.push(bucket_res?);
|
||||
}
|
||||
buckets.sort_unstable_by(|b1, b2| b1.key.total_cmp(&b2.key));
|
||||
|
||||
let is_date_agg = agg_data
|
||||
.get_histogram_req_data(self.accessor_idx)
|
||||
.field_type
|
||||
== ColumnType::DateTime;
|
||||
Ok(IntermediateBucketResult::Histogram {
|
||||
buckets,
|
||||
is_date_agg: self.column_type == ColumnType::DateTime,
|
||||
is_date_agg,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn from_req_and_validate(
|
||||
mut req: HistogramAggregation,
|
||||
sub_aggregation: &mut AggregationsWithAccessor,
|
||||
field_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
node: &AggRefNode,
|
||||
) -> crate::Result<Self> {
|
||||
req.validate()?;
|
||||
if field_type == ColumnType::DateTime {
|
||||
req.normalize_date_time();
|
||||
}
|
||||
|
||||
let sub_aggregation_blueprint = if sub_aggregation.is_empty() {
|
||||
None
|
||||
let blueprint = if !node.children.is_empty() {
|
||||
Some(build_segment_agg_collectors(agg_data, &node.children)?)
|
||||
} else {
|
||||
let sub_aggregation = build_segment_agg_collector(sub_aggregation)?;
|
||||
Some(sub_aggregation)
|
||||
None
|
||||
};
|
||||
|
||||
let bounds = req.hard_bounds.unwrap_or(HistogramBounds {
|
||||
let req_data = agg_data.get_histogram_req_data_mut(node.idx_in_req_data);
|
||||
req_data.req.validate()?;
|
||||
if req_data.field_type == ColumnType::DateTime && !req_data.is_date_histogram {
|
||||
req_data.req.normalize_date_time();
|
||||
}
|
||||
req_data.bounds = req_data.req.hard_bounds.unwrap_or(HistogramBounds {
|
||||
min: f64::MIN,
|
||||
max: f64::MAX,
|
||||
});
|
||||
req_data.offset = req_data.req.offset.unwrap_or(0.0);
|
||||
|
||||
req_data.sub_aggregation_blueprint = blueprint;
|
||||
|
||||
Ok(Self {
|
||||
buckets: Default::default(),
|
||||
column_type: field_type,
|
||||
interval: req.interval,
|
||||
offset: req.offset.unwrap_or(0.0),
|
||||
bounds,
|
||||
sub_aggregations: Default::default(),
|
||||
sub_aggregation_blueprint,
|
||||
accessor_idx,
|
||||
accessor_idx: node.idx_in_req_data,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn f64_from_fastfield_u64(&self, val: u64) -> f64 {
|
||||
f64_from_fastfield_u64(val, &self.column_type)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
//! - [Range](RangeAggregation)
|
||||
//! - [Terms](TermsAggregation)
|
||||
|
||||
mod filter;
|
||||
mod histogram;
|
||||
mod range;
|
||||
mod term_agg;
|
||||
@@ -30,6 +31,7 @@ mod term_missing_agg;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
|
||||
pub use filter::*;
|
||||
pub use histogram::*;
|
||||
pub use range::*;
|
||||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
@@ -1,20 +1,43 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType};
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use crate::aggregation::agg_data::{
|
||||
build_segment_agg_collectors, AggRefNode, AggregationsSegmentCtx,
|
||||
};
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateBucketResult,
|
||||
IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::*;
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Contains all information required by the SegmentRangeCollector to perform the
|
||||
/// range aggregation on a segment.
|
||||
pub struct RangeAggReqData {
|
||||
/// The column accessor to access the fast field values.
|
||||
pub accessor: Column<u64>,
|
||||
/// The type of the fast field.
|
||||
pub field_type: ColumnType,
|
||||
/// The column block accessor to access the fast field values.
|
||||
pub column_block_accessor: ColumnBlockAccessor<u64>,
|
||||
/// The range aggregation request.
|
||||
pub req: RangeAggregation,
|
||||
/// The name of the aggregation.
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl RangeAggReqData {
|
||||
/// Estimate the memory consumption of this struct in bytes.
|
||||
pub fn get_memory_consumption(&self) -> usize {
|
||||
std::mem::size_of::<Self>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Provide user-defined buckets to aggregate on.
|
||||
///
|
||||
/// Two special buckets will automatically be created to cover the whole range of values.
|
||||
@@ -161,12 +184,12 @@ impl Debug for SegmentRangeBucketEntry {
|
||||
impl SegmentRangeBucketEntry {
|
||||
pub(crate) fn into_intermediate_bucket_entry(
|
||||
self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
) -> crate::Result<IntermediateRangeBucketEntry> {
|
||||
let mut sub_aggregation_res = IntermediateAggregationResults::default();
|
||||
if let Some(sub_aggregation) = self.sub_aggregation {
|
||||
sub_aggregation
|
||||
.add_intermediate_aggregation_result(agg_with_accessor, &mut sub_aggregation_res)?
|
||||
.add_intermediate_aggregation_result(agg_data, &mut sub_aggregation_res)?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
@@ -184,12 +207,14 @@ impl SegmentRangeBucketEntry {
|
||||
impl SegmentAggregationCollector for SegmentRangeCollector {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let field_type = self.column_type;
|
||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||
let sub_agg = &agg_with_accessor.aggs.values[self.accessor_idx].sub_aggregation;
|
||||
let name = agg_data
|
||||
.get_range_req_data(self.accessor_idx)
|
||||
.name
|
||||
.to_string();
|
||||
|
||||
let buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
|
||||
.buckets
|
||||
@@ -199,7 +224,7 @@ impl SegmentAggregationCollector for SegmentRangeCollector {
|
||||
range_to_string(&range_bucket.range, &field_type)?,
|
||||
range_bucket
|
||||
.bucket
|
||||
.into_intermediate_bucket_entry(sub_agg)?,
|
||||
.into_intermediate_bucket_entry(agg_data)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
@@ -218,66 +243,70 @@ impl SegmentAggregationCollector for SegmentRangeCollector {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
self.collect_block(&[doc], agg_with_accessor)
|
||||
self.collect_block(&[doc], agg_data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let bucket_agg_accessor = &mut agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
// Take request data to avoid borrow conflicts during sub-aggregation
|
||||
let mut req = agg_data.take_range_req_data(self.accessor_idx);
|
||||
|
||||
bucket_agg_accessor
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &bucket_agg_accessor.accessor);
|
||||
req.column_block_accessor.fetch_block(docs, &req.accessor);
|
||||
|
||||
for (doc, val) in bucket_agg_accessor
|
||||
for (doc, val) in req
|
||||
.column_block_accessor
|
||||
.iter_docid_vals(docs, &bucket_agg_accessor.accessor)
|
||||
.iter_docid_vals(docs, &req.accessor)
|
||||
{
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
|
||||
let bucket = &mut self.buckets[bucket_pos];
|
||||
|
||||
bucket.bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
sub_aggregation.collect(doc, &mut bucket_agg_accessor.sub_aggregation)?;
|
||||
if let Some(sub_agg) = bucket.bucket.sub_aggregation.as_mut() {
|
||||
sub_agg.collect(doc, agg_data)?;
|
||||
}
|
||||
}
|
||||
|
||||
agg_data.put_back_range_req_data(self.accessor_idx, req);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
|
||||
let sub_aggregation_accessor =
|
||||
&mut agg_with_accessor.aggs.values[self.accessor_idx].sub_aggregation;
|
||||
|
||||
fn flush(&mut self, agg_data: &mut AggregationsSegmentCtx) -> crate::Result<()> {
|
||||
for bucket in self.buckets.iter_mut() {
|
||||
if let Some(sub_agg) = bucket.bucket.sub_aggregation.as_mut() {
|
||||
sub_agg.flush(sub_aggregation_accessor)?;
|
||||
sub_agg.flush(agg_data)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentRangeCollector {
|
||||
pub(crate) fn from_req_and_validate(
|
||||
req: &RangeAggregation,
|
||||
sub_aggregation: &mut AggregationsWithAccessor,
|
||||
limits: &mut AggregationLimitsGuard,
|
||||
field_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
req_data: &mut AggregationsSegmentCtx,
|
||||
node: &AggRefNode,
|
||||
) -> crate::Result<Self> {
|
||||
let accessor_idx = node.idx_in_req_data;
|
||||
let (field_type, ranges) = {
|
||||
let req_view = req_data.get_range_req_data(node.idx_in_req_data);
|
||||
(req_view.field_type, req_view.req.ranges.clone())
|
||||
};
|
||||
|
||||
// The range input on the request is f64.
|
||||
// We need to convert to u64 ranges, because we read the values as u64.
|
||||
// The mapping from the conversion is monotonic so ordering is preserved.
|
||||
let buckets: Vec<_> = extend_validate_ranges(&req.ranges, &field_type)?
|
||||
let sub_agg_prototype = if !node.children.is_empty() {
|
||||
Some(build_segment_agg_collectors(req_data, &node.children)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let buckets: Vec<_> = extend_validate_ranges(&ranges, &field_type)?
|
||||
.iter()
|
||||
.map(|range| {
|
||||
let key = range
|
||||
@@ -295,11 +324,7 @@ impl SegmentRangeCollector {
|
||||
} else {
|
||||
Some(f64_from_fastfield_u64(range.range.start, &field_type))
|
||||
};
|
||||
let sub_aggregation = if sub_aggregation.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(build_segment_agg_collector(sub_aggregation)?)
|
||||
};
|
||||
let sub_aggregation = sub_agg_prototype.clone();
|
||||
|
||||
Ok(SegmentRangeAndBucketEntry {
|
||||
range: range.range.clone(),
|
||||
@@ -314,7 +339,7 @@ impl SegmentRangeCollector {
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
limits.add_memory_consumed(
|
||||
req_data.context.limits.add_memory_consumed(
|
||||
buckets.len() as u64 * std::mem::size_of::<SegmentRangeAndBucketEntry>() as u64,
|
||||
)?;
|
||||
|
||||
@@ -467,15 +492,45 @@ mod tests {
|
||||
ranges,
|
||||
..Default::default()
|
||||
};
|
||||
// Build buckets directly as in from_req_and_validate without AggregationsData
|
||||
let buckets: Vec<_> = extend_validate_ranges(&req.ranges, &field_type)
|
||||
.expect("unexpected error in extend_validate_ranges")
|
||||
.iter()
|
||||
.map(|range| {
|
||||
let key = range
|
||||
.key
|
||||
.clone()
|
||||
.map(|key| Ok(Key::Str(key)))
|
||||
.unwrap_or_else(|| range_to_key(&range.range, &field_type))
|
||||
.expect("unexpected error in range_to_key");
|
||||
let to = if range.range.end == u64::MAX {
|
||||
None
|
||||
} else {
|
||||
Some(f64_from_fastfield_u64(range.range.end, &field_type))
|
||||
};
|
||||
let from = if range.range.start == u64::MIN {
|
||||
None
|
||||
} else {
|
||||
Some(f64_from_fastfield_u64(range.range.start, &field_type))
|
||||
};
|
||||
SegmentRangeAndBucketEntry {
|
||||
range: range.range.clone(),
|
||||
bucket: SegmentRangeBucketEntry {
|
||||
doc_count: 0,
|
||||
sub_aggregation: None,
|
||||
key,
|
||||
from,
|
||||
to,
|
||||
},
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
SegmentRangeCollector::from_req_and_validate(
|
||||
&req,
|
||||
&mut Default::default(),
|
||||
&mut AggregationLimitsGuard::default(),
|
||||
field_type,
|
||||
0,
|
||||
)
|
||||
.expect("unexpected error")
|
||||
SegmentRangeCollector {
|
||||
buckets,
|
||||
column_type: field_type,
|
||||
accessor_idx: 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
196
src/aggregation/bucket/term_agg/default_impl.rs
Normal file
196
src/aggregation/bucket/term_agg/default_impl.rs
Normal file
@@ -0,0 +1,196 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use columnar::ColumnType;
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use super::OrderTarget;
|
||||
use crate::aggregation::agg_data::{
|
||||
build_segment_agg_collectors, AggRefNode, AggregationsSegmentCtx,
|
||||
};
|
||||
use crate::aggregation::agg_limits::MemoryConsumption;
|
||||
use crate::aggregation::bucket::get_agg_name_and_property;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::TantivyError;
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
/// Container to store term_ids/or u64 values and their buckets.
|
||||
struct TermBuckets {
|
||||
pub(crate) entries: FxHashMap<u64, u32>,
|
||||
pub(crate) sub_aggs: FxHashMap<u64, Box<dyn SegmentAggregationCollector>>,
|
||||
}
|
||||
|
||||
impl TermBuckets {
|
||||
fn get_memory_consumption(&self) -> usize {
|
||||
let sub_aggs_mem = self.sub_aggs.memory_consumption();
|
||||
let buckets_mem = self.entries.memory_consumption();
|
||||
sub_aggs_mem + buckets_mem
|
||||
}
|
||||
|
||||
fn force_flush(&mut self, agg_data: &mut AggregationsSegmentCtx) -> crate::Result<()> {
|
||||
for sub_aggregations in &mut self.sub_aggs.values_mut() {
|
||||
sub_aggregations.as_mut().flush(agg_data)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// The collector puts values from the fast field into the correct buckets and does a conversion to
|
||||
/// the correct datatype.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SegmentTermCollector {
|
||||
/// The buckets containing the aggregation data.
|
||||
term_buckets: TermBuckets,
|
||||
accessor_idx: usize,
|
||||
}
|
||||
|
||||
impl SegmentAggregationCollector for SegmentTermCollector {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_data.get_term_req_data(self.accessor_idx).name.clone();
|
||||
|
||||
let entries: Vec<(u64, u32)> = self.term_buckets.entries.into_iter().collect();
|
||||
let bucket = super::into_intermediate_bucket_result(
|
||||
self.accessor_idx,
|
||||
entries,
|
||||
self.term_buckets.sub_aggs,
|
||||
agg_data,
|
||||
)?;
|
||||
results.push(name, IntermediateAggregationResult::Bucket(bucket))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
self.collect_block(&[doc], agg_data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let mut req_data = agg_data.take_term_req_data(self.accessor_idx);
|
||||
|
||||
let mem_pre = self.get_memory_consumption();
|
||||
|
||||
if let Some(missing) = req_data.missing_value_for_accessor {
|
||||
req_data.column_block_accessor.fetch_block_with_missing(
|
||||
docs,
|
||||
&req_data.accessor,
|
||||
missing,
|
||||
);
|
||||
} else {
|
||||
req_data
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &req_data.accessor);
|
||||
}
|
||||
|
||||
for term_id in req_data.column_block_accessor.iter_vals() {
|
||||
if let Some(allowed_bs) = req_data.allowed_term_ids.as_ref() {
|
||||
if !allowed_bs.contains(term_id as u32) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let entry = self.term_buckets.entries.entry(term_id).or_default();
|
||||
*entry += 1;
|
||||
}
|
||||
// has subagg
|
||||
if let Some(blueprint) = req_data.sub_aggregation_blueprint.as_ref() {
|
||||
for (doc, term_id) in req_data
|
||||
.column_block_accessor
|
||||
.iter_docid_vals(docs, &req_data.accessor)
|
||||
{
|
||||
if let Some(allowed_bs) = req_data.allowed_term_ids.as_ref() {
|
||||
if !allowed_bs.contains(term_id as u32) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let sub_aggregations = self
|
||||
.term_buckets
|
||||
.sub_aggs
|
||||
.entry(term_id)
|
||||
.or_insert_with(|| blueprint.clone());
|
||||
sub_aggregations.collect(doc, agg_data)?;
|
||||
}
|
||||
}
|
||||
|
||||
let mem_delta = self.get_memory_consumption() - mem_pre;
|
||||
if mem_delta > 0 {
|
||||
agg_data
|
||||
.context
|
||||
.limits
|
||||
.add_memory_consumed(mem_delta as u64)?;
|
||||
}
|
||||
agg_data.put_back_term_req_data(self.accessor_idx, req_data);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush(&mut self, agg_data: &mut AggregationsSegmentCtx) -> crate::Result<()> {
|
||||
self.term_buckets.force_flush(agg_data)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentTermCollector {
|
||||
pub fn from_req_and_validate(
|
||||
req_data: &mut AggregationsSegmentCtx,
|
||||
node: &AggRefNode,
|
||||
) -> crate::Result<Self> {
|
||||
let terms_req_data = req_data.get_term_req_data(node.idx_in_req_data);
|
||||
let column_type = terms_req_data.column_type;
|
||||
let accessor_idx = node.idx_in_req_data;
|
||||
if column_type == ColumnType::Bytes {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"terms aggregation is not supported for column type {column_type:?}"
|
||||
)));
|
||||
}
|
||||
let term_buckets = TermBuckets::default();
|
||||
|
||||
// Validate sub aggregation exists
|
||||
if let OrderTarget::SubAggregation(sub_agg_name) = &terms_req_data.req.order.target {
|
||||
let (agg_name, _agg_property) = get_agg_name_and_property(sub_agg_name);
|
||||
|
||||
node.get_sub_agg(agg_name, &req_data.per_request)
|
||||
.ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"could not find aggregation with name {agg_name} in metric \
|
||||
sub_aggregations"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
let has_sub_aggregations = !node.children.is_empty();
|
||||
let blueprint = if has_sub_aggregations {
|
||||
let sub_aggregation = build_segment_agg_collectors(req_data, &node.children)?;
|
||||
Some(sub_aggregation)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let terms_req_data = req_data.get_term_req_data_mut(node.idx_in_req_data);
|
||||
terms_req_data.sub_aggregation_blueprint = blueprint;
|
||||
|
||||
Ok(SegmentTermCollector {
|
||||
term_buckets,
|
||||
accessor_idx,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_memory_consumption(&self) -> usize {
|
||||
let self_mem = std::mem::size_of::<Self>();
|
||||
let term_buckets_mem = self.term_buckets.get_memory_consumption();
|
||||
self_mem + term_buckets_mem
|
||||
}
|
||||
}
|
||||
228
src/aggregation/bucket/term_agg/low_cardinality_impl.rs
Normal file
228
src/aggregation/bucket/term_agg/low_cardinality_impl.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
use std::vec;
|
||||
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use crate::aggregation::agg_data::{
|
||||
build_segment_agg_collectors, AggRefNode, AggregationsSegmentCtx,
|
||||
};
|
||||
use crate::aggregation::bucket::{get_agg_name_and_property, OrderTarget};
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
const MAX_BATCH_SIZE: usize = 1_024;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct LowCardTermBuckets {
|
||||
entries: Box<[u32]>,
|
||||
sub_aggs: Vec<Box<dyn SegmentAggregationCollector>>,
|
||||
doc_buffers: Box<[Vec<DocId>]>,
|
||||
}
|
||||
|
||||
impl LowCardTermBuckets {
|
||||
pub fn with_num_buckets(
|
||||
num_buckets: usize,
|
||||
sub_aggs_blueprint_opt: Option<&Box<dyn SegmentAggregationCollector>>,
|
||||
) -> Self {
|
||||
let sub_aggs = sub_aggs_blueprint_opt
|
||||
.as_ref()
|
||||
.map(|blueprint| {
|
||||
std::iter::repeat_with(|| blueprint.clone_box())
|
||||
.take(num_buckets)
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
Self {
|
||||
entries: vec![0; num_buckets].into_boxed_slice(),
|
||||
sub_aggs,
|
||||
doc_buffers: std::iter::repeat_with(|| Vec::with_capacity(MAX_BATCH_SIZE))
|
||||
.take(num_buckets)
|
||||
.collect::<Vec<_>>()
|
||||
.into_boxed_slice(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_memory_consumption(&self) -> usize {
|
||||
std::mem::size_of::<Self>()
|
||||
+ self.entries.len() * std::mem::size_of::<u32>()
|
||||
+ self.doc_buffers.len()
|
||||
* (std::mem::size_of::<Vec<DocId>>()
|
||||
+ std::mem::size_of::<DocId>() * MAX_BATCH_SIZE)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LowCardSegmentTermCollector {
|
||||
term_buckets: LowCardTermBuckets,
|
||||
accessor_idx: usize,
|
||||
}
|
||||
|
||||
impl LowCardSegmentTermCollector {
|
||||
pub fn from_req_and_validate(
|
||||
req_data: &mut AggregationsSegmentCtx,
|
||||
node: &AggRefNode,
|
||||
) -> crate::Result<Self> {
|
||||
let terms_req_data = req_data.get_term_req_data(node.idx_in_req_data);
|
||||
let accessor_idx = node.idx_in_req_data;
|
||||
let cardinality = terms_req_data
|
||||
.accessor
|
||||
.max_value()
|
||||
.max(terms_req_data.missing_value_for_accessor.unwrap_or(0))
|
||||
+ 1;
|
||||
assert!(cardinality <= super::LOW_CARDINALITY_THRESHOLD);
|
||||
|
||||
// Validate sub aggregation exists
|
||||
if let OrderTarget::SubAggregation(sub_agg_name) = &terms_req_data.req.order.target {
|
||||
let (agg_name, _agg_property) = get_agg_name_and_property(sub_agg_name);
|
||||
|
||||
node.get_sub_agg(agg_name, &req_data.per_request)
|
||||
.ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"could not find aggregation with name {agg_name} in metric \
|
||||
sub_aggregations"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
let has_sub_aggregations = !node.children.is_empty();
|
||||
let blueprint = if has_sub_aggregations {
|
||||
let sub_aggregation = build_segment_agg_collectors(req_data, &node.children)?;
|
||||
Some(sub_aggregation)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let terms_req_data = req_data.get_term_req_data_mut(node.idx_in_req_data);
|
||||
|
||||
let term_buckets =
|
||||
LowCardTermBuckets::with_num_buckets(cardinality as usize, blueprint.as_ref());
|
||||
|
||||
terms_req_data.sub_aggregation_blueprint = blueprint;
|
||||
|
||||
Ok(LowCardSegmentTermCollector {
|
||||
term_buckets,
|
||||
accessor_idx,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_memory_consumption(&self) -> usize {
|
||||
let self_mem = std::mem::size_of::<Self>();
|
||||
let term_buckets_mem = self.term_buckets.get_memory_consumption();
|
||||
self_mem + term_buckets_mem
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentAggregationCollector for LowCardSegmentTermCollector {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_data.get_term_req_data(self.accessor_idx).name.clone();
|
||||
let sub_aggs: FxHashMap<u64, Box<dyn SegmentAggregationCollector>> = self
|
||||
.term_buckets
|
||||
.sub_aggs
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.filter(|(bucket_id, _sub_agg)| self.term_buckets.entries[*bucket_id] > 0)
|
||||
.map(|(bucket_id, sub_agg)| (bucket_id as u64, sub_agg))
|
||||
.collect();
|
||||
let entries: Vec<(u64, u32)> = self
|
||||
.term_buckets
|
||||
.entries
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, count)| **count > 0)
|
||||
.map(|(bucket_id, count)| (bucket_id as u64, *count))
|
||||
.collect();
|
||||
|
||||
let bucket =
|
||||
super::into_intermediate_bucket_result(self.accessor_idx, entries, sub_aggs, agg_data)?;
|
||||
results.push(name, IntermediateAggregationResult::Bucket(bucket))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
if docs.len() > MAX_BATCH_SIZE {
|
||||
for batch in docs.chunks(MAX_BATCH_SIZE) {
|
||||
self.collect_block(batch, agg_data)?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut req_data = agg_data.take_term_req_data(self.accessor_idx);
|
||||
|
||||
let mem_pre = self.get_memory_consumption();
|
||||
|
||||
if let Some(missing) = req_data.missing_value_for_accessor {
|
||||
req_data.column_block_accessor.fetch_block_with_missing(
|
||||
docs,
|
||||
&req_data.accessor,
|
||||
missing,
|
||||
);
|
||||
} else {
|
||||
req_data
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &req_data.accessor);
|
||||
}
|
||||
|
||||
// has subagg
|
||||
if req_data.sub_aggregation_blueprint.is_some() {
|
||||
for (doc, term_id) in req_data
|
||||
.column_block_accessor
|
||||
.iter_docid_vals(docs, &req_data.accessor)
|
||||
{
|
||||
if let Some(allowed_bs) = req_data.allowed_term_ids.as_ref() {
|
||||
if !allowed_bs.contains(term_id as u32) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
self.term_buckets.doc_buffers[term_id as usize].push(doc);
|
||||
}
|
||||
for (bucket_id, docs) in self.term_buckets.doc_buffers.iter_mut().enumerate() {
|
||||
self.term_buckets.entries[bucket_id] += docs.len() as u32;
|
||||
self.term_buckets.sub_aggs[bucket_id].collect_block(&docs[..], agg_data)?;
|
||||
docs.clear();
|
||||
}
|
||||
} else {
|
||||
for term_id in req_data.column_block_accessor.iter_vals() {
|
||||
if let Some(allowed_bs) = req_data.allowed_term_ids.as_ref() {
|
||||
if !allowed_bs.contains(term_id as u32) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
self.term_buckets.entries[term_id as usize] += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let mem_delta = self.get_memory_consumption() - mem_pre;
|
||||
if mem_delta > 0 {
|
||||
agg_data
|
||||
.context
|
||||
.limits
|
||||
.add_memory_consumed(mem_delta as u64)?;
|
||||
}
|
||||
agg_data.put_back_term_req_data(self.accessor_idx, req_data);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
self.collect_block(&[doc], agg_data)
|
||||
}
|
||||
|
||||
fn flush(&mut self, agg_data: &mut AggregationsSegmentCtx) -> crate::Result<()> {
|
||||
for sub_aggregations in &mut self.term_buckets.sub_aggs.iter_mut() {
|
||||
sub_aggregations.as_mut().flush(agg_data)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,13 +1,39 @@
|
||||
use columnar::{Column, ColumnType};
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use crate::aggregation::agg_data::{
|
||||
build_segment_agg_collectors, AggRefNode, AggregationsSegmentCtx,
|
||||
};
|
||||
use crate::aggregation::bucket::term_agg::TermsAggregation;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateBucketResult,
|
||||
IntermediateKey, IntermediateTermBucketEntry, IntermediateTermBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
|
||||
/// Special aggregation to handle missing values for term aggregations.
|
||||
/// This missing aggregation will check multiple columns for existence.
|
||||
///
|
||||
/// This is needed when:
|
||||
/// - The field is multi-valued and we therefore have multiple columns
|
||||
/// - The field is not text and missing is provided as string (we cannot use the numeric missing
|
||||
/// value optimization)
|
||||
#[derive(Default)]
|
||||
pub struct MissingTermAggReqData {
|
||||
/// The accessors to check for existence of a value.
|
||||
pub accessors: Vec<(Column<u64>, ColumnType)>,
|
||||
/// The name of the aggregation.
|
||||
pub name: String,
|
||||
/// The original terms aggregation request.
|
||||
pub req: TermsAggregation,
|
||||
}
|
||||
|
||||
impl MissingTermAggReqData {
|
||||
/// Estimate the memory consumption of this struct in bytes.
|
||||
pub fn get_memory_consumption(&self) -> usize {
|
||||
std::mem::size_of::<Self>()
|
||||
}
|
||||
}
|
||||
|
||||
/// The specialized missing term aggregation.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
@@ -18,12 +44,13 @@ pub struct TermMissingAgg {
|
||||
}
|
||||
impl TermMissingAgg {
|
||||
pub(crate) fn new(
|
||||
accessor_idx: usize,
|
||||
sub_aggregations: &mut AggregationsWithAccessor,
|
||||
req_data: &mut AggregationsSegmentCtx,
|
||||
node: &AggRefNode,
|
||||
) -> crate::Result<Self> {
|
||||
let has_sub_aggregations = !sub_aggregations.is_empty();
|
||||
let has_sub_aggregations = !node.children.is_empty();
|
||||
let accessor_idx = node.idx_in_req_data;
|
||||
let sub_agg = if has_sub_aggregations {
|
||||
let sub_aggregation = build_segment_agg_collector(sub_aggregations)?;
|
||||
let sub_aggregation = build_segment_agg_collectors(req_data, &node.children)?;
|
||||
Some(sub_aggregation)
|
||||
} else {
|
||||
None
|
||||
@@ -40,16 +67,11 @@ impl TermMissingAgg {
|
||||
impl SegmentAggregationCollector for TermMissingAgg {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||
let agg_with_accessor = &agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
let term_agg = agg_with_accessor
|
||||
.agg
|
||||
.agg
|
||||
.as_term()
|
||||
.expect("TermMissingAgg collector must be term agg req");
|
||||
let req_data = agg_data.get_missing_term_req_data(self.accessor_idx);
|
||||
let term_agg = &req_data.req;
|
||||
let missing = term_agg
|
||||
.missing
|
||||
.as_ref()
|
||||
@@ -64,10 +86,7 @@ impl SegmentAggregationCollector for TermMissingAgg {
|
||||
};
|
||||
if let Some(sub_agg) = self.sub_agg {
|
||||
let mut res = IntermediateAggregationResults::default();
|
||||
sub_agg.add_intermediate_aggregation_result(
|
||||
&agg_with_accessor.sub_aggregation,
|
||||
&mut res,
|
||||
)?;
|
||||
sub_agg.add_intermediate_aggregation_result(agg_data, &mut res)?;
|
||||
missing_entry.sub_aggregation = res;
|
||||
}
|
||||
entries.insert(missing.into(), missing_entry);
|
||||
@@ -80,7 +99,10 @@ impl SegmentAggregationCollector for TermMissingAgg {
|
||||
},
|
||||
};
|
||||
|
||||
results.push(name, IntermediateAggregationResult::Bucket(bucket))?;
|
||||
results.push(
|
||||
req_data.name.to_string(),
|
||||
IntermediateAggregationResult::Bucket(bucket),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -88,17 +110,17 @@ impl SegmentAggregationCollector for TermMissingAgg {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let agg = &mut agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
let has_value = agg
|
||||
let req_data = agg_data.get_missing_term_req_data(self.accessor_idx);
|
||||
let has_value = req_data
|
||||
.accessors
|
||||
.iter()
|
||||
.any(|(acc, _)| acc.index.has_value(doc));
|
||||
if !has_value {
|
||||
self.missing_count += 1;
|
||||
if let Some(sub_agg) = self.sub_agg.as_mut() {
|
||||
sub_agg.collect(doc, &mut agg.sub_aggregation)?;
|
||||
sub_agg.collect(doc, agg_data)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -107,10 +129,10 @@ impl SegmentAggregationCollector for TermMissingAgg {
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
for doc in docs {
|
||||
self.collect(*doc, agg_with_accessor)?;
|
||||
self.collect(*doc, agg_data)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::agg_data::AggregationsSegmentCtx;
|
||||
use crate::DocId;
|
||||
|
||||
pub(crate) const DOC_BLOCK_SIZE: usize = 64;
|
||||
@@ -37,23 +37,23 @@ impl SegmentAggregationCollector for BufAggregationCollector {
|
||||
#[inline]
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
Box::new(self.collector).add_intermediate_aggregation_result(agg_with_accessor, results)
|
||||
Box::new(self.collector).add_intermediate_aggregation_result(agg_data, results)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
self.staged_docs[self.num_staged_docs] = doc;
|
||||
self.num_staged_docs += 1;
|
||||
if self.num_staged_docs == self.staged_docs.len() {
|
||||
self.collector
|
||||
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor)?;
|
||||
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_data)?;
|
||||
self.num_staged_docs = 0;
|
||||
}
|
||||
Ok(())
|
||||
@@ -63,20 +63,20 @@ impl SegmentAggregationCollector for BufAggregationCollector {
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
self.collector.collect_block(docs, agg_with_accessor)?;
|
||||
self.collector.collect_block(docs, agg_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
|
||||
fn flush(&mut self, agg_data: &mut AggregationsSegmentCtx) -> crate::Result<()> {
|
||||
self.collector
|
||||
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor)?;
|
||||
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_data)?;
|
||||
self.num_staged_docs = 0;
|
||||
|
||||
self.collector.flush(agg_with_accessor)?;
|
||||
self.collector.flush(agg_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use super::agg_req::Aggregations;
|
||||
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use super::agg_result::AggregationResults;
|
||||
use super::buf_collector::BufAggregationCollector;
|
||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::segment_agg_result::{
|
||||
build_segment_agg_collector, AggregationLimitsGuard, SegmentAggregationCollector,
|
||||
use super::segment_agg_result::SegmentAggregationCollector;
|
||||
use super::AggContextParams;
|
||||
use crate::aggregation::agg_data::{
|
||||
build_aggregations_data_from_req, build_segment_agg_collectors_root, AggregationsSegmentCtx,
|
||||
};
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_segment_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::index::SegmentReader;
|
||||
use crate::{DocId, SegmentOrdinal, TantivyError};
|
||||
@@ -22,7 +22,7 @@ pub const DEFAULT_MEMORY_LIMIT: u64 = 500_000_000;
|
||||
/// The collector collects all aggregations by the underlying aggregation request.
|
||||
pub struct AggregationCollector {
|
||||
agg: Aggregations,
|
||||
limits: AggregationLimitsGuard,
|
||||
context: AggContextParams,
|
||||
}
|
||||
|
||||
impl AggregationCollector {
|
||||
@@ -30,8 +30,8 @@ impl AggregationCollector {
|
||||
///
|
||||
/// Aggregation fails when the limits in `AggregationLimits` is exceeded. (memory limit and
|
||||
/// bucket limit)
|
||||
pub fn from_aggs(agg: Aggregations, limits: AggregationLimitsGuard) -> Self {
|
||||
Self { agg, limits }
|
||||
pub fn from_aggs(agg: Aggregations, context: AggContextParams) -> Self {
|
||||
Self { agg, context }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ impl AggregationCollector {
|
||||
/// into the final `AggregationResults` via the `into_final_result()` method.
|
||||
pub struct DistributedAggregationCollector {
|
||||
agg: Aggregations,
|
||||
limits: AggregationLimitsGuard,
|
||||
context: AggContextParams,
|
||||
}
|
||||
|
||||
impl DistributedAggregationCollector {
|
||||
@@ -53,8 +53,8 @@ impl DistributedAggregationCollector {
|
||||
///
|
||||
/// Aggregation fails when the limits in `AggregationLimits` is exceeded. (memory limit and
|
||||
/// bucket limit)
|
||||
pub fn from_aggs(agg: Aggregations, limits: AggregationLimitsGuard) -> Self {
|
||||
Self { agg, limits }
|
||||
pub fn from_aggs(agg: Aggregations, context: AggContextParams) -> Self {
|
||||
Self { agg, context }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ impl Collector for DistributedAggregationCollector {
|
||||
&self.agg,
|
||||
reader,
|
||||
segment_local_id,
|
||||
&self.limits,
|
||||
&self.context,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ impl Collector for AggregationCollector {
|
||||
&self.agg,
|
||||
reader,
|
||||
segment_local_id,
|
||||
&self.limits,
|
||||
&self.context,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ impl Collector for AggregationCollector {
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let res = merge_fruits(segment_fruits)?;
|
||||
res.into_final_result(self.agg.clone(), self.limits.clone())
|
||||
res.into_final_result(self.agg.clone(), self.context.limits.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ fn merge_fruits(
|
||||
|
||||
/// `AggregationSegmentCollector` does the aggregation collection on a segment.
|
||||
pub struct AggregationSegmentCollector {
|
||||
aggs_with_accessor: AggregationsWithAccessor,
|
||||
aggs_with_accessor: AggregationsSegmentCtx,
|
||||
agg_collector: BufAggregationCollector,
|
||||
error: Option<TantivyError>,
|
||||
}
|
||||
@@ -147,14 +147,15 @@ impl AggregationSegmentCollector {
|
||||
agg: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: &AggregationLimitsGuard,
|
||||
context: &AggContextParams,
|
||||
) -> crate::Result<Self> {
|
||||
let mut aggs_with_accessor =
|
||||
get_aggs_with_segment_accessor_and_validate(agg, reader, segment_ordinal, limits)?;
|
||||
let mut agg_data =
|
||||
build_aggregations_data_from_req(agg, reader, segment_ordinal, context.clone())?;
|
||||
let result =
|
||||
BufAggregationCollector::new(build_segment_agg_collector(&mut aggs_with_accessor)?);
|
||||
BufAggregationCollector::new(build_segment_agg_collectors_root(&mut agg_data)?);
|
||||
|
||||
Ok(AggregationSegmentCollector {
|
||||
aggs_with_accessor,
|
||||
aggs_with_accessor: agg_data,
|
||||
agg_collector: result,
|
||||
error: None,
|
||||
})
|
||||
|
||||
@@ -24,7 +24,9 @@ use super::metric::{
|
||||
};
|
||||
use super::segment_agg_result::AggregationLimitsGuard;
|
||||
use super::{format_date, AggregationError, Key, SerializedKey};
|
||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
||||
use crate::aggregation::agg_result::{
|
||||
AggregationResults, BucketEntries, BucketEntry, FilterBucketResult,
|
||||
};
|
||||
use crate::aggregation::bucket::TermsAggregationInternal;
|
||||
use crate::aggregation::metric::CardinalityCollector;
|
||||
use crate::TantivyError;
|
||||
@@ -179,12 +181,17 @@ impl IntermediateAggregationResults {
|
||||
}
|
||||
|
||||
/// Merge another intermediate aggregation result into this result.
|
||||
///
|
||||
/// The order of the values need to be the same on both results. This is ensured when the same
|
||||
/// (key values) are present on the underlying `VecWithNames` struct.
|
||||
pub fn merge_fruits(&mut self, other: IntermediateAggregationResults) -> crate::Result<()> {
|
||||
for (left, right) in self.aggs_res.values_mut().zip(other.aggs_res.into_values()) {
|
||||
left.merge_fruits(right)?;
|
||||
pub fn merge_fruits(&mut self, mut other: IntermediateAggregationResults) -> crate::Result<()> {
|
||||
for (key, left) in self.aggs_res.iter_mut() {
|
||||
if let Some(key) = other.aggs_res.remove(key) {
|
||||
left.merge_fruits(key)?;
|
||||
}
|
||||
}
|
||||
// Move remainder of other aggs_res into self.
|
||||
// Note: Currently we don't expect this to happen, as we create empty intermediate results
|
||||
// via [IntermediateAggregationResults::empty_from_req].
|
||||
for (key, value) in other.aggs_res {
|
||||
self.aggs_res.insert(key, value);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -241,11 +248,16 @@ pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult
|
||||
Cardinality(_) => IntermediateAggregationResult::Metric(
|
||||
IntermediateMetricResult::Cardinality(CardinalityCollector::default()),
|
||||
),
|
||||
Filter(_) => IntermediateAggregationResult::Bucket(IntermediateBucketResult::Filter {
|
||||
doc_count: 0,
|
||||
sub_aggregations: IntermediateAggregationResults::default(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// An aggregation is either a bucket or a metric.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum IntermediateAggregationResult {
|
||||
/// Bucket variant
|
||||
Bucket(IntermediateBucketResult),
|
||||
@@ -426,6 +438,13 @@ pub enum IntermediateBucketResult {
|
||||
/// The term buckets
|
||||
buckets: IntermediateTermBucketResult,
|
||||
},
|
||||
/// Filter aggregation - a single bucket with sub-aggregations
|
||||
Filter {
|
||||
/// Document count in the filter bucket
|
||||
doc_count: u64,
|
||||
/// Sub-aggregation results
|
||||
sub_aggregations: IntermediateAggregationResults,
|
||||
},
|
||||
}
|
||||
|
||||
impl IntermediateBucketResult {
|
||||
@@ -509,6 +528,18 @@ impl IntermediateBucketResult {
|
||||
req.sub_aggregation(),
|
||||
limits,
|
||||
),
|
||||
IntermediateBucketResult::Filter {
|
||||
doc_count,
|
||||
sub_aggregations,
|
||||
} => {
|
||||
// Convert sub-aggregation results to final format
|
||||
let final_sub_aggregations = sub_aggregations
|
||||
.into_final_result(req.sub_aggregation().clone(), limits.clone())?;
|
||||
Ok(BucketResult::Filter(FilterBucketResult {
|
||||
doc_count,
|
||||
sub_aggregations: final_sub_aggregations,
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -562,6 +593,19 @@ impl IntermediateBucketResult {
|
||||
|
||||
*buckets_left = buckets?;
|
||||
}
|
||||
(
|
||||
IntermediateBucketResult::Filter {
|
||||
doc_count: doc_count_left,
|
||||
sub_aggregations: sub_aggs_left,
|
||||
},
|
||||
IntermediateBucketResult::Filter {
|
||||
doc_count: doc_count_right,
|
||||
sub_aggregations: sub_aggs_right,
|
||||
},
|
||||
) => {
|
||||
*doc_count_left += doc_count_right;
|
||||
sub_aggs_left.merge_fruits(sub_aggs_right)?;
|
||||
}
|
||||
(IntermediateBucketResult::Range(_), _) => {
|
||||
panic!("try merge on different types")
|
||||
}
|
||||
@@ -571,6 +615,9 @@ impl IntermediateBucketResult {
|
||||
(IntermediateBucketResult::Terms { .. }, _) => {
|
||||
panic!("try merge on different types")
|
||||
}
|
||||
(IntermediateBucketResult::Filter { .. }, _) => {
|
||||
panic!("try merge on different types")
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -2,15 +2,13 @@ use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{BuildHasher, Hasher};
|
||||
|
||||
use columnar::column_values::CompactSpaceU64Accessor;
|
||||
use columnar::Dictionary;
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType, Dictionary, StrColumn};
|
||||
use common::f64_to_u64;
|
||||
use hyperloglogplus::{HyperLogLog, HyperLogLogPlus};
|
||||
use rustc_hash::FxHashSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::agg_req_with_accessor::{
|
||||
AggregationWithAccessor, AggregationsWithAccessor,
|
||||
};
|
||||
use crate::aggregation::agg_data::AggregationsSegmentCtx;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
@@ -97,6 +95,32 @@ pub struct CardinalityAggregationReq {
|
||||
pub missing: Option<Key>,
|
||||
}
|
||||
|
||||
/// Contains all information required by the SegmentCardinalityCollector to perform the
|
||||
/// cardinality aggregation on a segment.
|
||||
pub struct CardinalityAggReqData {
|
||||
/// The column accessor to access the fast field values.
|
||||
pub accessor: Column<u64>,
|
||||
/// The column_type of the field.
|
||||
pub column_type: ColumnType,
|
||||
/// The string dictionary column if the field is of type string.
|
||||
pub str_dict_column: Option<StrColumn>,
|
||||
/// The missing value normalized to the internal u64 representation of the field type.
|
||||
pub missing_value_for_accessor: Option<u64>,
|
||||
/// The column block accessor to access the fast field values.
|
||||
pub(crate) column_block_accessor: ColumnBlockAccessor<u64>,
|
||||
/// The name of the aggregation.
|
||||
pub name: String,
|
||||
/// The aggregation request.
|
||||
pub req: CardinalityAggregationReq,
|
||||
}
|
||||
|
||||
impl CardinalityAggReqData {
|
||||
/// Estimate the memory consumption of this struct in bytes.
|
||||
pub fn get_memory_consumption(&self) -> usize {
|
||||
std::mem::size_of::<Self>()
|
||||
}
|
||||
}
|
||||
|
||||
impl CardinalityAggregationReq {
|
||||
/// Creates a new [`CardinalityAggregationReq`] instance from a field name.
|
||||
pub fn from_field_name(field_name: String) -> Self {
|
||||
@@ -115,47 +139,44 @@ impl CardinalityAggregationReq {
|
||||
pub(crate) struct SegmentCardinalityCollector {
|
||||
cardinality: CardinalityCollector,
|
||||
entries: FxHashSet<u64>,
|
||||
column_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
missing: Option<Key>,
|
||||
}
|
||||
|
||||
impl SegmentCardinalityCollector {
|
||||
pub fn from_req(column_type: ColumnType, accessor_idx: usize, missing: &Option<Key>) -> Self {
|
||||
pub fn from_req(column_type: ColumnType, accessor_idx: usize) -> Self {
|
||||
Self {
|
||||
cardinality: CardinalityCollector::new(column_type as u8),
|
||||
entries: Default::default(),
|
||||
column_type,
|
||||
accessor_idx,
|
||||
missing: missing.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fetch_block_with_field(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_accessor: &mut AggregationWithAccessor,
|
||||
agg_data: &mut CardinalityAggReqData,
|
||||
) {
|
||||
if let Some(missing) = agg_accessor.missing_value_for_accessor {
|
||||
agg_accessor.column_block_accessor.fetch_block_with_missing(
|
||||
if let Some(missing) = agg_data.missing_value_for_accessor {
|
||||
agg_data.column_block_accessor.fetch_block_with_missing(
|
||||
docs,
|
||||
&agg_accessor.accessor,
|
||||
&agg_data.accessor,
|
||||
missing,
|
||||
);
|
||||
} else {
|
||||
agg_accessor
|
||||
agg_data
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &agg_accessor.accessor);
|
||||
.fetch_block(docs, &agg_data.accessor);
|
||||
}
|
||||
}
|
||||
|
||||
fn into_intermediate_metric_result(
|
||||
mut self,
|
||||
agg_with_accessor: &AggregationWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
) -> crate::Result<IntermediateMetricResult> {
|
||||
if self.column_type == ColumnType::Str {
|
||||
let req_data = &agg_data.get_cardinality_req_data(self.accessor_idx);
|
||||
if req_data.column_type == ColumnType::Str {
|
||||
let fallback_dict = Dictionary::empty();
|
||||
let dict = agg_with_accessor
|
||||
let dict = req_data
|
||||
.str_dict_column
|
||||
.as_ref()
|
||||
.map(|el| el.dictionary())
|
||||
@@ -180,10 +201,10 @@ impl SegmentCardinalityCollector {
|
||||
})?;
|
||||
if has_missing {
|
||||
// Replace missing with the actual value provided
|
||||
let missing_key = self
|
||||
.missing
|
||||
.as_ref()
|
||||
.expect("Found sentinel value u64::MAX for term_ord but `missing` is not set");
|
||||
let missing_key =
|
||||
req_data.req.missing.as_ref().expect(
|
||||
"Found sentinel value u64::MAX for term_ord but `missing` is not set",
|
||||
);
|
||||
match missing_key {
|
||||
Key::Str(missing) => {
|
||||
self.cardinality.sketch.insert_any(&missing);
|
||||
@@ -209,13 +230,13 @@ impl SegmentCardinalityCollector {
|
||||
impl SegmentAggregationCollector for SegmentCardinalityCollector {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||
let agg_with_accessor = &agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
let req_data = &agg_data.get_cardinality_req_data(self.accessor_idx);
|
||||
let name = req_data.name.to_string();
|
||||
|
||||
let intermediate_result = self.into_intermediate_metric_result(agg_with_accessor)?;
|
||||
let intermediate_result = self.into_intermediate_metric_result(agg_data)?;
|
||||
results.push(
|
||||
name,
|
||||
IntermediateAggregationResult::Metric(intermediate_result),
|
||||
@@ -227,26 +248,26 @@ impl SegmentAggregationCollector for SegmentCardinalityCollector {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
self.collect_block(&[doc], agg_with_accessor)
|
||||
self.collect_block(&[doc], agg_data)
|
||||
}
|
||||
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let bucket_agg_accessor = &mut agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
self.fetch_block_with_field(docs, bucket_agg_accessor);
|
||||
let req_data = agg_data.get_cardinality_req_data_mut(self.accessor_idx);
|
||||
self.fetch_block_with_field(docs, req_data);
|
||||
|
||||
let col_block_accessor = &bucket_agg_accessor.column_block_accessor;
|
||||
if self.column_type == ColumnType::Str {
|
||||
let col_block_accessor = &req_data.column_block_accessor;
|
||||
if req_data.column_type == ColumnType::Str {
|
||||
for term_ord in col_block_accessor.iter_vals() {
|
||||
self.entries.insert(term_ord);
|
||||
}
|
||||
} else if self.column_type == ColumnType::IpAddr {
|
||||
let compact_space_accessor = bucket_agg_accessor
|
||||
} else if req_data.column_type == ColumnType::IpAddr {
|
||||
let compact_space_accessor = req_data
|
||||
.accessor
|
||||
.values
|
||||
.clone()
|
||||
|
||||
@@ -4,12 +4,11 @@ use std::mem;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req_with_accessor::{
|
||||
AggregationWithAccessor, AggregationsWithAccessor,
|
||||
};
|
||||
use crate::aggregation::agg_data::AggregationsSegmentCtx;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::metric::MetricAggReqData;
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::*;
|
||||
use crate::{DocId, TantivyError};
|
||||
@@ -348,20 +347,20 @@ impl SegmentExtendedStatsCollector {
|
||||
pub(crate) fn collect_block_with_field(
|
||||
&mut self,
|
||||
docs: &[DocId],
|
||||
agg_accessor: &mut AggregationWithAccessor,
|
||||
req_data: &mut MetricAggReqData,
|
||||
) {
|
||||
if let Some(missing) = self.missing.as_ref() {
|
||||
agg_accessor.column_block_accessor.fetch_block_with_missing(
|
||||
req_data.column_block_accessor.fetch_block_with_missing(
|
||||
docs,
|
||||
&agg_accessor.accessor,
|
||||
&req_data.accessor,
|
||||
*missing,
|
||||
);
|
||||
} else {
|
||||
agg_accessor
|
||||
req_data
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &agg_accessor.accessor);
|
||||
.fetch_block(docs, &req_data.accessor);
|
||||
}
|
||||
for val in agg_accessor.column_block_accessor.iter_vals() {
|
||||
for val in req_data.column_block_accessor.iter_vals() {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.extended_stats.collect(val1);
|
||||
}
|
||||
@@ -372,10 +371,10 @@ impl SegmentAggregationCollector for SegmentExtendedStatsCollector {
|
||||
#[inline]
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||
let name = agg_data.get_metric_req_data(self.accessor_idx).name.clone();
|
||||
results.push(
|
||||
name,
|
||||
IntermediateAggregationResult::Metric(IntermediateMetricResult::ExtendedStats(
|
||||
@@ -390,12 +389,12 @@ impl SegmentAggregationCollector for SegmentExtendedStatsCollector {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let field = &agg_with_accessor.aggs.values[self.accessor_idx].accessor;
|
||||
let req_data = agg_data.get_metric_req_data(self.accessor_idx);
|
||||
if let Some(missing) = self.missing {
|
||||
let mut has_val = false;
|
||||
for val in field.values_for_doc(doc) {
|
||||
for val in req_data.accessor.values_for_doc(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.extended_stats.collect(val1);
|
||||
has_val = true;
|
||||
@@ -405,7 +404,7 @@ impl SegmentAggregationCollector for SegmentExtendedStatsCollector {
|
||||
.collect(f64_from_fastfield_u64(missing, &self.field_type));
|
||||
}
|
||||
} else {
|
||||
for val in field.values_for_doc(doc) {
|
||||
for val in req_data.accessor.values_for_doc(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.extended_stats.collect(val1);
|
||||
}
|
||||
@@ -418,10 +417,10 @@ impl SegmentAggregationCollector for SegmentExtendedStatsCollector {
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let field = &mut agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
self.collect_block_with_field(docs, field);
|
||||
let req_data = agg_data.get_metric_req_data_mut(self.accessor_idx);
|
||||
self.collect_block_with_field(docs, req_data);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ use std::collections::HashMap;
|
||||
|
||||
pub use average::*;
|
||||
pub use cardinality::*;
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType};
|
||||
pub use count::*;
|
||||
pub use extended_stats::*;
|
||||
pub use max::*;
|
||||
@@ -44,6 +45,35 @@ pub use top_hits::*;
|
||||
|
||||
use crate::schema::OwnedValue;
|
||||
|
||||
/// Contains all information required by metric aggregations like avg, min, max, sum, stats,
|
||||
/// extended_stats, count, percentiles.
|
||||
#[repr(C)]
|
||||
pub struct MetricAggReqData {
|
||||
/// True if the field is of number or date type.
|
||||
pub is_number_or_date_type: bool,
|
||||
/// The type of the field.
|
||||
pub field_type: ColumnType,
|
||||
/// The missing value normalized to the internal u64 representation of the field type.
|
||||
pub missing_u64: Option<u64>,
|
||||
/// The column block accessor to access the fast field values.
|
||||
pub column_block_accessor: ColumnBlockAccessor<u64>,
|
||||
/// The column accessor to access the fast field values.
|
||||
pub accessor: Column<u64>,
|
||||
/// Used when converting to intermediate result
|
||||
pub collecting_for: StatsType,
|
||||
/// The missing value
|
||||
pub missing: Option<f64>,
|
||||
/// The name of the aggregation.
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl MetricAggReqData {
|
||||
/// Estimate the memory consumption of this struct in bytes.
|
||||
pub fn get_memory_consumption(&self) -> usize {
|
||||
std::mem::size_of::<Self>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Single-metric aggregations use this common result structure.
|
||||
///
|
||||
/// Main reason to wrap it in value is to match elasticsearch output structure.
|
||||
|
||||
@@ -3,12 +3,11 @@ use std::fmt::Debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req_with_accessor::{
|
||||
AggregationWithAccessor, AggregationsWithAccessor,
|
||||
};
|
||||
use crate::aggregation::agg_data::AggregationsSegmentCtx;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::metric::MetricAggReqData;
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::*;
|
||||
use crate::{DocId, TantivyError};
|
||||
@@ -112,7 +111,8 @@ impl PercentilesAggregationReq {
|
||||
&self.field
|
||||
}
|
||||
|
||||
fn validate(&self) -> crate::Result<()> {
|
||||
/// Validates the request parameters.
|
||||
pub fn validate(&self) -> crate::Result<()> {
|
||||
if let Some(percents) = self.percents.as_ref() {
|
||||
let all_in_range = percents
|
||||
.iter()
|
||||
@@ -133,10 +133,8 @@ impl PercentilesAggregationReq {
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) struct SegmentPercentilesCollector {
|
||||
field_type: ColumnType,
|
||||
pub(crate) percentiles: PercentilesCollector,
|
||||
pub(crate) accessor_idx: usize,
|
||||
missing: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
@@ -231,43 +229,32 @@ impl PercentilesCollector {
|
||||
}
|
||||
|
||||
impl SegmentPercentilesCollector {
|
||||
pub fn from_req_and_validate(
|
||||
req: &PercentilesAggregationReq,
|
||||
field_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
) -> crate::Result<Self> {
|
||||
req.validate()?;
|
||||
let missing = req
|
||||
.missing
|
||||
.and_then(|val| f64_to_fastfield_u64(val, &field_type));
|
||||
|
||||
pub fn from_req_and_validate(accessor_idx: usize) -> crate::Result<Self> {
|
||||
Ok(Self {
|
||||
field_type,
|
||||
percentiles: PercentilesCollector::new(),
|
||||
accessor_idx,
|
||||
missing,
|
||||
})
|
||||
}
|
||||
#[inline]
|
||||
pub(crate) fn collect_block_with_field(
|
||||
&mut self,
|
||||
docs: &[DocId],
|
||||
agg_accessor: &mut AggregationWithAccessor,
|
||||
req_data: &mut MetricAggReqData,
|
||||
) {
|
||||
if let Some(missing) = self.missing.as_ref() {
|
||||
agg_accessor.column_block_accessor.fetch_block_with_missing(
|
||||
if let Some(missing) = req_data.missing_u64.as_ref() {
|
||||
req_data.column_block_accessor.fetch_block_with_missing(
|
||||
docs,
|
||||
&agg_accessor.accessor,
|
||||
&req_data.accessor,
|
||||
*missing,
|
||||
);
|
||||
} else {
|
||||
agg_accessor
|
||||
req_data
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &agg_accessor.accessor);
|
||||
.fetch_block(docs, &req_data.accessor);
|
||||
}
|
||||
|
||||
for val in agg_accessor.column_block_accessor.iter_vals() {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
for val in req_data.column_block_accessor.iter_vals() {
|
||||
let val1 = f64_from_fastfield_u64(val, &req_data.field_type);
|
||||
self.percentiles.collect(val1);
|
||||
}
|
||||
}
|
||||
@@ -277,10 +264,10 @@ impl SegmentAggregationCollector for SegmentPercentilesCollector {
|
||||
#[inline]
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||
let name = agg_data.get_metric_req_data(self.accessor_idx).name.clone();
|
||||
let intermediate_metric_result = IntermediateMetricResult::Percentiles(self.percentiles);
|
||||
|
||||
results.push(
|
||||
@@ -295,24 +282,24 @@ impl SegmentAggregationCollector for SegmentPercentilesCollector {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let field = &agg_with_accessor.aggs.values[self.accessor_idx].accessor;
|
||||
let req_data = agg_data.get_metric_req_data(self.accessor_idx);
|
||||
|
||||
if let Some(missing) = self.missing {
|
||||
if let Some(missing) = req_data.missing_u64 {
|
||||
let mut has_val = false;
|
||||
for val in field.values_for_doc(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
for val in req_data.accessor.values_for_doc(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &req_data.field_type);
|
||||
self.percentiles.collect(val1);
|
||||
has_val = true;
|
||||
}
|
||||
if !has_val {
|
||||
self.percentiles
|
||||
.collect(f64_from_fastfield_u64(missing, &self.field_type));
|
||||
.collect(f64_from_fastfield_u64(missing, &req_data.field_type));
|
||||
}
|
||||
} else {
|
||||
for val in field.values_for_doc(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
for val in req_data.accessor.values_for_doc(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &req_data.field_type);
|
||||
self.percentiles.collect(val1);
|
||||
}
|
||||
}
|
||||
@@ -324,10 +311,10 @@ impl SegmentAggregationCollector for SegmentPercentilesCollector {
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let field = &mut agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
self.collect_block_with_field(docs, field);
|
||||
let req_data = agg_data.get_metric_req_data_mut(self.accessor_idx);
|
||||
self.collect_block_with_field(docs, req_data);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,11 @@ use std::fmt::Debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req_with_accessor::{
|
||||
AggregationWithAccessor, AggregationsWithAccessor,
|
||||
};
|
||||
use crate::aggregation::agg_data::AggregationsSegmentCtx;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::metric::MetricAggReqData;
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::*;
|
||||
use crate::{DocId, TantivyError};
|
||||
@@ -166,74 +165,65 @@ impl IntermediateStats {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) enum SegmentStatsType {
|
||||
/// The type of stats aggregation to perform.
|
||||
/// Note that not all stats types are supported in the stats aggregation.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum StatsType {
|
||||
/// The average of the values.
|
||||
Average,
|
||||
/// The count of the values.
|
||||
Count,
|
||||
/// The maximum value.
|
||||
Max,
|
||||
/// The minimum value.
|
||||
Min,
|
||||
/// The stats (count, sum, min, max, avg) of the values.
|
||||
Stats,
|
||||
/// The extended stats (count, sum, min, max, avg, sum_of_squares, variance, std_deviation,
|
||||
ExtendedStats(Option<f64>), // sigma
|
||||
/// The sum of the values.
|
||||
Sum,
|
||||
/// The percentiles of the values.
|
||||
Percentiles,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct SegmentStatsCollector {
|
||||
missing: Option<u64>,
|
||||
field_type: ColumnType,
|
||||
pub(crate) collecting_for: SegmentStatsType,
|
||||
pub(crate) stats: IntermediateStats,
|
||||
pub(crate) accessor_idx: usize,
|
||||
val_cache: Vec<u64>,
|
||||
}
|
||||
|
||||
impl SegmentStatsCollector {
|
||||
pub fn from_req(
|
||||
field_type: ColumnType,
|
||||
collecting_for: SegmentStatsType,
|
||||
accessor_idx: usize,
|
||||
missing: Option<f64>,
|
||||
) -> Self {
|
||||
let missing = missing.and_then(|val| f64_to_fastfield_u64(val, &field_type));
|
||||
pub fn from_req(accessor_idx: usize) -> Self {
|
||||
Self {
|
||||
field_type,
|
||||
collecting_for,
|
||||
stats: IntermediateStats::default(),
|
||||
accessor_idx,
|
||||
missing,
|
||||
val_cache: Default::default(),
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub(crate) fn collect_block_with_field(
|
||||
&mut self,
|
||||
docs: &[DocId],
|
||||
agg_accessor: &mut AggregationWithAccessor,
|
||||
req_data: &mut MetricAggReqData,
|
||||
) {
|
||||
if let Some(missing) = self.missing.as_ref() {
|
||||
agg_accessor.column_block_accessor.fetch_block_with_missing(
|
||||
if let Some(missing) = req_data.missing_u64.as_ref() {
|
||||
req_data.column_block_accessor.fetch_block_with_missing(
|
||||
docs,
|
||||
&agg_accessor.accessor,
|
||||
&req_data.accessor,
|
||||
*missing,
|
||||
);
|
||||
} else {
|
||||
agg_accessor
|
||||
req_data
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &agg_accessor.accessor);
|
||||
.fetch_block(docs, &req_data.accessor);
|
||||
}
|
||||
if [
|
||||
ColumnType::I64,
|
||||
ColumnType::U64,
|
||||
ColumnType::F64,
|
||||
ColumnType::DateTime,
|
||||
]
|
||||
.contains(&self.field_type)
|
||||
{
|
||||
for val in agg_accessor.column_block_accessor.iter_vals() {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
if req_data.is_number_or_date_type {
|
||||
for val in req_data.column_block_accessor.iter_vals() {
|
||||
let val1 = f64_from_fastfield_u64(val, &req_data.field_type);
|
||||
self.stats.collect(val1);
|
||||
}
|
||||
} else {
|
||||
for _val in agg_accessor.column_block_accessor.iter_vals() {
|
||||
for _val in req_data.column_block_accessor.iter_vals() {
|
||||
// we ignore the value and simply record that we got something
|
||||
self.stats.collect(0.0);
|
||||
}
|
||||
@@ -245,27 +235,28 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
|
||||
#[inline]
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||
let req = agg_data.get_metric_req_data(self.accessor_idx);
|
||||
let name = req.name.clone();
|
||||
|
||||
let intermediate_metric_result = match self.collecting_for {
|
||||
SegmentStatsType::Average => {
|
||||
let intermediate_metric_result = match req.collecting_for {
|
||||
StatsType::Average => {
|
||||
IntermediateMetricResult::Average(IntermediateAverage::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Count => {
|
||||
StatsType::Count => {
|
||||
IntermediateMetricResult::Count(IntermediateCount::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Max => {
|
||||
IntermediateMetricResult::Max(IntermediateMax::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Min => {
|
||||
IntermediateMetricResult::Min(IntermediateMin::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Stats => IntermediateMetricResult::Stats(self.stats),
|
||||
SegmentStatsType::Sum => {
|
||||
IntermediateMetricResult::Sum(IntermediateSum::from_collector(*self))
|
||||
StatsType::Max => IntermediateMetricResult::Max(IntermediateMax::from_collector(*self)),
|
||||
StatsType::Min => IntermediateMetricResult::Min(IntermediateMin::from_collector(*self)),
|
||||
StatsType::Stats => IntermediateMetricResult::Stats(self.stats),
|
||||
StatsType::Sum => IntermediateMetricResult::Sum(IntermediateSum::from_collector(*self)),
|
||||
_ => {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Unsupported stats type for stats aggregation: {:?}",
|
||||
req.collecting_for
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
@@ -281,23 +272,23 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let field = &agg_with_accessor.aggs.values[self.accessor_idx].accessor;
|
||||
if let Some(missing) = self.missing {
|
||||
let req_data = agg_data.get_metric_req_data(self.accessor_idx);
|
||||
if let Some(missing) = req_data.missing_u64 {
|
||||
let mut has_val = false;
|
||||
for val in field.values_for_doc(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
for val in req_data.accessor.values_for_doc(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &req_data.field_type);
|
||||
self.stats.collect(val1);
|
||||
has_val = true;
|
||||
}
|
||||
if !has_val {
|
||||
self.stats
|
||||
.collect(f64_from_fastfield_u64(missing, &self.field_type));
|
||||
.collect(f64_from_fastfield_u64(missing, &req_data.field_type));
|
||||
}
|
||||
} else {
|
||||
for val in field.values_for_doc(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
for val in req_data.accessor.values_for_doc(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &req_data.field_type);
|
||||
self.stats.collect(val1);
|
||||
}
|
||||
}
|
||||
@@ -309,10 +300,10 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let field = &mut agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
self.collect_block_with_field(docs, field);
|
||||
let req_data = agg_data.get_metric_req_data_mut(self.accessor_idx);
|
||||
self.collect_block_with_field(docs, req_data);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ use serde::ser::SerializeMap;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use super::{TopHitsMetricResult, TopHitsVecEntry};
|
||||
use crate::aggregation::agg_data::AggregationsSegmentCtx;
|
||||
use crate::aggregation::bucket::Order;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateMetricResult,
|
||||
@@ -18,6 +19,30 @@ use crate::aggregation::AggregationError;
|
||||
use crate::collector::TopNComputer;
|
||||
use crate::schema::OwnedValue;
|
||||
use crate::{DocAddress, DocId, SegmentOrdinal};
|
||||
// duplicate import removed; already imported above
|
||||
|
||||
/// Contains all information required by the TopHitsSegmentCollector to perform the
|
||||
/// top_hits aggregation on a segment.
|
||||
#[derive(Default)]
|
||||
pub struct TopHitsAggReqData {
|
||||
/// The accessors to access the fast field values.
|
||||
pub accessors: Vec<(Column<u64>, ColumnType)>,
|
||||
/// The accessors to access the fast field values for retrieving document fields.
|
||||
pub value_accessors: HashMap<String, Vec<DynamicColumn>>,
|
||||
/// The ordinal of the segment this request data is for.
|
||||
pub segment_ordinal: SegmentOrdinal,
|
||||
/// The name of the aggregation.
|
||||
pub name: String,
|
||||
/// The top_hits aggregation request.
|
||||
pub req: TopHitsAggregationReq,
|
||||
}
|
||||
|
||||
impl TopHitsAggReqData {
|
||||
/// Estimate the memory consumption of this struct in bytes.
|
||||
pub fn get_memory_consumption(&self) -> usize {
|
||||
std::mem::size_of::<Self>()
|
||||
}
|
||||
}
|
||||
|
||||
/// # Top Hits
|
||||
///
|
||||
@@ -229,6 +254,7 @@ impl TopHitsAggregationReq {
|
||||
self.sort
|
||||
.iter()
|
||||
.map(|KeyOrder { field, .. }| field.as_str())
|
||||
.chain(self.doc_value_fields.iter().map(|s| s.as_str()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -565,23 +591,18 @@ impl TopHitsSegmentCollector {
|
||||
impl SegmentAggregationCollector for TopHitsSegmentCollector {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut crate::aggregation::intermediate_agg_result::IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||
let req_data = agg_data.get_top_hits_req_data(self.accessor_idx);
|
||||
|
||||
let value_accessors = &agg_with_accessor.aggs.values[self.accessor_idx].value_accessors;
|
||||
let tophits_req = &agg_with_accessor.aggs.values[self.accessor_idx]
|
||||
.agg
|
||||
.agg
|
||||
.as_top_hits()
|
||||
.expect("aggregation request must be of type top hits");
|
||||
let value_accessors = &req_data.value_accessors;
|
||||
|
||||
let intermediate_result = IntermediateMetricResult::TopHits(
|
||||
self.into_top_hits_collector(value_accessors, tophits_req),
|
||||
self.into_top_hits_collector(value_accessors, &req_data.req),
|
||||
);
|
||||
results.push(
|
||||
name,
|
||||
req_data.name.to_string(),
|
||||
IntermediateAggregationResult::Metric(intermediate_result),
|
||||
)
|
||||
}
|
||||
@@ -590,32 +611,22 @@ impl SegmentAggregationCollector for TopHitsSegmentCollector {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc_id: crate::DocId,
|
||||
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let tophits_req = &agg_with_accessor.aggs.values[self.accessor_idx]
|
||||
.agg
|
||||
.agg
|
||||
.as_top_hits()
|
||||
.expect("aggregation request must be of type top hits");
|
||||
let accessors = &agg_with_accessor.aggs.values[self.accessor_idx].accessors;
|
||||
self.collect_with(doc_id, tophits_req, accessors)?;
|
||||
let req_data = agg_data.get_top_hits_req_data(self.accessor_idx);
|
||||
self.collect_with(doc_id, &req_data.req, &req_data.accessors)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
let tophits_req = &agg_with_accessor.aggs.values[self.accessor_idx]
|
||||
.agg
|
||||
.agg
|
||||
.as_top_hits()
|
||||
.expect("aggregation request must be of type top hits");
|
||||
let accessors = &agg_with_accessor.aggs.values[self.accessor_idx].accessors;
|
||||
let req_data = agg_data.get_top_hits_req_data(self.accessor_idx);
|
||||
// TODO: Consider getting fields with the column block accessor.
|
||||
for doc in docs {
|
||||
self.collect_with(*doc, tophits_req, accessors)?;
|
||||
self.collect_with(*doc, &req_data.req, &req_data.accessors)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -127,9 +127,10 @@
|
||||
//! [`AggregationResults`](agg_result::AggregationResults) via the
|
||||
//! [`into_final_result`](intermediate_agg_result::IntermediateAggregationResults::into_final_result) method.
|
||||
|
||||
mod accessor_helpers;
|
||||
mod agg_data;
|
||||
mod agg_limits;
|
||||
pub mod agg_req;
|
||||
mod agg_req_with_accessor;
|
||||
pub mod agg_result;
|
||||
pub mod bucket;
|
||||
mod buf_collector;
|
||||
@@ -140,7 +141,6 @@ pub mod intermediate_agg_result;
|
||||
pub mod metric;
|
||||
|
||||
mod segment_agg_result;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -160,6 +160,28 @@ use itertools::Itertools;
|
||||
use serde::de::{self, Visitor};
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
|
||||
/// Context parameters for aggregation execution
|
||||
///
|
||||
/// This struct holds shared resources needed during aggregation execution:
|
||||
/// - `limits`: Memory and bucket limits for the aggregation
|
||||
/// - `tokenizers`: TokenizerManager for parsing query strings in filter aggregations
|
||||
#[derive(Clone, Default)]
|
||||
pub struct AggContextParams {
|
||||
/// Aggregation limits (memory and bucket count)
|
||||
pub limits: AggregationLimitsGuard,
|
||||
/// Tokenizer manager for query string parsing
|
||||
pub tokenizers: TokenizerManager,
|
||||
}
|
||||
|
||||
impl AggContextParams {
|
||||
/// Create new aggregation context parameters
|
||||
pub fn new(limits: AggregationLimitsGuard, tokenizers: TokenizerManager) -> Self {
|
||||
Self { limits, tokenizers }
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_str_into_f64<E: de::Error>(value: &str) -> Result<f64, E> {
|
||||
let parsed = value
|
||||
.parse::<f64>()
|
||||
@@ -257,80 +279,6 @@ where D: Deserializer<'de> {
|
||||
deserializer.deserialize_any(StringOrFloatVisitor)
|
||||
}
|
||||
|
||||
/// Represents an associative array `(key => values)` in a very efficient manner.
|
||||
#[derive(PartialEq, Serialize, Deserialize)]
|
||||
pub(crate) struct VecWithNames<T> {
|
||||
pub(crate) values: Vec<T>,
|
||||
keys: Vec<String>,
|
||||
}
|
||||
|
||||
impl<T: Clone> Clone for VecWithNames<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
values: self.values.clone(),
|
||||
keys: self.keys.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for VecWithNames<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
values: Default::default(),
|
||||
keys: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: std::fmt::Debug> std::fmt::Debug for VecWithNames<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_map().entries(self.iter()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<HashMap<String, T>> for VecWithNames<T> {
|
||||
fn from(map: HashMap<String, T>) -> Self {
|
||||
VecWithNames::from_entries(map.into_iter().collect_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> VecWithNames<T> {
|
||||
fn from_entries(mut entries: Vec<(String, T)>) -> Self {
|
||||
// Sort to ensure order of elements match across multiple instances
|
||||
entries.sort_by(|left, right| left.0.cmp(&right.0));
|
||||
let mut data = Vec::with_capacity(entries.len());
|
||||
let mut data_names = Vec::with_capacity(entries.len());
|
||||
for entry in entries {
|
||||
data_names.push(entry.0);
|
||||
data.push(entry.1);
|
||||
}
|
||||
VecWithNames {
|
||||
values: data,
|
||||
keys: data_names,
|
||||
}
|
||||
}
|
||||
fn iter(&self) -> impl Iterator<Item = (&str, &T)> + '_ {
|
||||
self.keys().zip(self.values.iter())
|
||||
}
|
||||
fn keys(&self) -> impl Iterator<Item = &str> + '_ {
|
||||
self.keys.iter().map(|key| key.as_str())
|
||||
}
|
||||
fn values_mut(&mut self) -> impl Iterator<Item = &mut T> + '_ {
|
||||
self.values.iter_mut()
|
||||
}
|
||||
fn is_empty(&self) -> bool {
|
||||
self.keys.is_empty()
|
||||
}
|
||||
fn len(&self) -> usize {
|
||||
self.keys.len()
|
||||
}
|
||||
fn get(&self, name: &str) -> Option<&T> {
|
||||
self.keys()
|
||||
.position(|key| key == name)
|
||||
.map(|pos| &self.values[pos])
|
||||
}
|
||||
}
|
||||
|
||||
/// The serialized key is used in a `HashMap`.
|
||||
pub type SerializedKey = String;
|
||||
|
||||
@@ -464,7 +412,10 @@ mod tests {
|
||||
query: Option<(&str, &str)>,
|
||||
limits: AggregationLimitsGuard,
|
||||
) -> crate::Result<Value> {
|
||||
let collector = AggregationCollector::from_aggs(agg_req, limits);
|
||||
let collector = AggregationCollector::from_aggs(
|
||||
agg_req,
|
||||
AggContextParams::new(limits, index.tokenizers().clone()),
|
||||
);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
@@ -6,48 +6,41 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
pub(crate) use super::agg_limits::AggregationLimitsGuard;
|
||||
use super::agg_req::AggregationVariants;
|
||||
use super::agg_req_with_accessor::{AggregationWithAccessor, AggregationsWithAccessor};
|
||||
use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTermCollector};
|
||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::metric::{
|
||||
AverageAggregation, CountAggregation, ExtendedStatsAggregation, MaxAggregation, MinAggregation,
|
||||
SegmentPercentilesCollector, SegmentStatsCollector, SegmentStatsType, StatsAggregation,
|
||||
SumAggregation,
|
||||
};
|
||||
use crate::aggregation::bucket::TermMissingAgg;
|
||||
use crate::aggregation::metric::{
|
||||
CardinalityAggregationReq, SegmentCardinalityCollector, SegmentExtendedStatsCollector,
|
||||
TopHitsSegmentCollector,
|
||||
};
|
||||
use crate::aggregation::agg_data::AggregationsSegmentCtx;
|
||||
|
||||
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
||||
/// A SegmentAggregationCollector is used to collect aggregation results.
|
||||
pub trait SegmentAggregationCollector: CollectorClone + Debug {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()>;
|
||||
|
||||
#[inline]
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
) -> crate::Result<()>;
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
self.collect_block(&[doc], agg_data)
|
||||
}
|
||||
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()>;
|
||||
|
||||
/// Finalize method. Some Aggregator collect blocks of docs before calling `collect_block`.
|
||||
/// This method ensures those staged docs will be collected.
|
||||
fn flush(&mut self, _agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
|
||||
fn flush(&mut self, _agg_data: &mut AggregationsSegmentCtx) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait CollectorClone {
|
||||
/// A helper trait to enable cloning of Box<dyn SegmentAggregationCollector>
|
||||
pub trait CollectorClone {
|
||||
fn clone_box(&self) -> Box<dyn SegmentAggregationCollector>;
|
||||
}
|
||||
|
||||
@@ -65,119 +58,6 @@ impl Clone for Box<dyn SegmentAggregationCollector> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn build_segment_agg_collector(
|
||||
req: &mut AggregationsWithAccessor,
|
||||
) -> crate::Result<Box<dyn SegmentAggregationCollector>> {
|
||||
// Single collector special case
|
||||
if req.aggs.len() == 1 {
|
||||
let req = &mut req.aggs.values[0];
|
||||
let accessor_idx = 0;
|
||||
return build_single_agg_segment_collector(req, accessor_idx);
|
||||
}
|
||||
|
||||
let agg = GenericSegmentAggregationResultsCollector::from_req_and_validate(req)?;
|
||||
Ok(Box::new(agg))
|
||||
}
|
||||
|
||||
pub(crate) fn build_single_agg_segment_collector(
|
||||
req: &mut AggregationWithAccessor,
|
||||
accessor_idx: usize,
|
||||
) -> crate::Result<Box<dyn SegmentAggregationCollector>> {
|
||||
use AggregationVariants::*;
|
||||
match &req.agg.agg {
|
||||
Terms(terms_req) => {
|
||||
if req.accessors.is_empty() {
|
||||
Ok(Box::new(SegmentTermCollector::from_req_and_validate(
|
||||
terms_req,
|
||||
&mut req.sub_aggregation,
|
||||
req.field_type,
|
||||
accessor_idx,
|
||||
)?))
|
||||
} else {
|
||||
Ok(Box::new(TermMissingAgg::new(
|
||||
accessor_idx,
|
||||
&mut req.sub_aggregation,
|
||||
)?))
|
||||
}
|
||||
}
|
||||
Range(range_req) => Ok(Box::new(SegmentRangeCollector::from_req_and_validate(
|
||||
range_req,
|
||||
&mut req.sub_aggregation,
|
||||
&mut req.limits,
|
||||
req.field_type,
|
||||
accessor_idx,
|
||||
)?)),
|
||||
Histogram(histogram) => Ok(Box::new(SegmentHistogramCollector::from_req_and_validate(
|
||||
histogram.clone(),
|
||||
&mut req.sub_aggregation,
|
||||
req.field_type,
|
||||
accessor_idx,
|
||||
)?)),
|
||||
DateHistogram(histogram) => Ok(Box::new(SegmentHistogramCollector::from_req_and_validate(
|
||||
histogram.to_histogram_req()?,
|
||||
&mut req.sub_aggregation,
|
||||
req.field_type,
|
||||
accessor_idx,
|
||||
)?)),
|
||||
Average(AverageAggregation { missing, .. }) => {
|
||||
Ok(Box::new(SegmentStatsCollector::from_req(
|
||||
req.field_type,
|
||||
SegmentStatsType::Average,
|
||||
accessor_idx,
|
||||
*missing,
|
||||
)))
|
||||
}
|
||||
Count(CountAggregation { missing, .. }) => Ok(Box::new(SegmentStatsCollector::from_req(
|
||||
req.field_type,
|
||||
SegmentStatsType::Count,
|
||||
accessor_idx,
|
||||
*missing,
|
||||
))),
|
||||
Max(MaxAggregation { missing, .. }) => Ok(Box::new(SegmentStatsCollector::from_req(
|
||||
req.field_type,
|
||||
SegmentStatsType::Max,
|
||||
accessor_idx,
|
||||
*missing,
|
||||
))),
|
||||
Min(MinAggregation { missing, .. }) => Ok(Box::new(SegmentStatsCollector::from_req(
|
||||
req.field_type,
|
||||
SegmentStatsType::Min,
|
||||
accessor_idx,
|
||||
*missing,
|
||||
))),
|
||||
Stats(StatsAggregation { missing, .. }) => Ok(Box::new(SegmentStatsCollector::from_req(
|
||||
req.field_type,
|
||||
SegmentStatsType::Stats,
|
||||
accessor_idx,
|
||||
*missing,
|
||||
))),
|
||||
ExtendedStats(ExtendedStatsAggregation { missing, sigma, .. }) => Ok(Box::new(
|
||||
SegmentExtendedStatsCollector::from_req(req.field_type, *sigma, accessor_idx, *missing),
|
||||
)),
|
||||
Sum(SumAggregation { missing, .. }) => Ok(Box::new(SegmentStatsCollector::from_req(
|
||||
req.field_type,
|
||||
SegmentStatsType::Sum,
|
||||
accessor_idx,
|
||||
*missing,
|
||||
))),
|
||||
Percentiles(percentiles_req) => Ok(Box::new(
|
||||
SegmentPercentilesCollector::from_req_and_validate(
|
||||
percentiles_req,
|
||||
req.field_type,
|
||||
accessor_idx,
|
||||
)?,
|
||||
)),
|
||||
TopHits(top_hits_req) => Ok(Box::new(TopHitsSegmentCollector::from_req(
|
||||
top_hits_req,
|
||||
accessor_idx,
|
||||
req.segment_ordinal,
|
||||
))),
|
||||
Cardinality(CardinalityAggregationReq { missing, .. }) => Ok(Box::new(
|
||||
SegmentCardinalityCollector::from_req(req.field_type, accessor_idx, missing),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
/// The GenericSegmentAggregationResultsCollector is the generic version of the collector, which
|
||||
/// can handle arbitrary complexity of sub-aggregations. Ideally we never have to pick this one
|
||||
@@ -197,11 +77,11 @@ impl Debug for GenericSegmentAggregationResultsCollector {
|
||||
impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
agg_data: &AggregationsSegmentCtx,
|
||||
results: &mut IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
for agg in self.aggs {
|
||||
agg.add_intermediate_aggregation_result(agg_with_accessor, results)?;
|
||||
agg.add_intermediate_aggregation_result(agg_data, results)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -210,9 +90,9 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
self.collect_block(&[doc], agg_with_accessor)?;
|
||||
self.collect_block(&[doc], agg_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -220,32 +100,19 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<()> {
|
||||
for collector in &mut self.aggs {
|
||||
collector.collect_block(docs, agg_with_accessor)?;
|
||||
collector.collect_block(docs, agg_data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
|
||||
fn flush(&mut self, agg_data: &mut AggregationsSegmentCtx) -> crate::Result<()> {
|
||||
for collector in &mut self.aggs {
|
||||
collector.flush(agg_with_accessor)?;
|
||||
collector.flush(agg_data)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GenericSegmentAggregationResultsCollector {
|
||||
pub(crate) fn from_req_and_validate(req: &mut AggregationsWithAccessor) -> crate::Result<Self> {
|
||||
let aggs = req
|
||||
.aggs
|
||||
.values_mut()
|
||||
.enumerate()
|
||||
.map(|(accessor_idx, req)| build_single_agg_segment_collector(req, accessor_idx))
|
||||
.collect::<crate::Result<Vec<Box<dyn SegmentAggregationCollector>>>>()?;
|
||||
|
||||
Ok(GenericSegmentAggregationResultsCollector { aggs })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -484,7 +484,6 @@ impl FacetCounts {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::BTreeSet;
|
||||
use std::iter;
|
||||
|
||||
use columnar::Dictionary;
|
||||
use rand::distributions::Uniform;
|
||||
@@ -739,7 +738,7 @@ mod tests {
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet/{c}"));
|
||||
let doc = doc!(facet_field => facet);
|
||||
iter::repeat(doc).take(count)
|
||||
std::iter::repeat_n(doc, count)
|
||||
})
|
||||
.map(|mut doc| {
|
||||
doc.add_facet(
|
||||
@@ -787,7 +786,7 @@ mod tests {
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet/{c}"));
|
||||
let doc = doc!(facet_field => facet);
|
||||
iter::repeat(doc).take(count)
|
||||
std::iter::repeat_n(doc, count)
|
||||
})
|
||||
.collect();
|
||||
|
||||
|
||||
@@ -970,7 +970,7 @@ impl<Score, D, const R: bool> From<TopNComputerDeser<Score, D, R>> for TopNCompu
|
||||
}
|
||||
}
|
||||
|
||||
impl<Score, D, const R: bool> TopNComputer<Score, D, R>
|
||||
impl<Score, D, const REVERSE_ORDER: bool> TopNComputer<Score, D, REVERSE_ORDER>
|
||||
where
|
||||
Score: PartialOrd + Clone,
|
||||
D: Ord,
|
||||
@@ -991,7 +991,10 @@ where
|
||||
#[inline]
|
||||
pub fn push(&mut self, feature: Score, doc: D) {
|
||||
if let Some(last_median) = self.threshold.clone() {
|
||||
if feature < last_median {
|
||||
if !REVERSE_ORDER && feature > last_median {
|
||||
return;
|
||||
}
|
||||
if REVERSE_ORDER && feature < last_median {
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -1026,7 +1029,7 @@ where
|
||||
}
|
||||
|
||||
/// Returns the top n elements in sorted order.
|
||||
pub fn into_sorted_vec(mut self) -> Vec<ComparableDoc<Score, D, R>> {
|
||||
pub fn into_sorted_vec(mut self) -> Vec<ComparableDoc<Score, D, REVERSE_ORDER>> {
|
||||
if self.buffer.len() > self.top_n {
|
||||
self.truncate_top_n();
|
||||
}
|
||||
@@ -1037,7 +1040,7 @@ where
|
||||
/// Returns the top n elements in stored order.
|
||||
/// Useful if you do not need the elements in sorted order,
|
||||
/// for example when merging the results of multiple segments.
|
||||
pub fn into_vec(mut self) -> Vec<ComparableDoc<Score, D, R>> {
|
||||
pub fn into_vec(mut self) -> Vec<ComparableDoc<Score, D, REVERSE_ORDER>> {
|
||||
if self.buffer.len() > self.top_n {
|
||||
self.truncate_top_n();
|
||||
}
|
||||
@@ -1047,9 +1050,11 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::{TopDocs, TopNComputer};
|
||||
use crate::collector::top_collector::ComparableDoc;
|
||||
use crate::collector::Collector;
|
||||
use crate::collector::{Collector, DocSetCollector};
|
||||
use crate::query::{AllQuery, Query, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
@@ -1144,6 +1149,44 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_topn_computer_asc_prop(
|
||||
limit in 0..10_usize,
|
||||
docs in proptest::collection::vec((0..100_u64, 0..100_u64), 0..100_usize),
|
||||
) {
|
||||
let mut computer: TopNComputer<_, _, false> = TopNComputer::new(limit);
|
||||
for (feature, doc) in &docs {
|
||||
computer.push(*feature, *doc);
|
||||
}
|
||||
let mut comparable_docs = docs.into_iter().map(|(feature, doc)| ComparableDoc { feature, doc }).collect::<Vec<_>>();
|
||||
comparable_docs.sort();
|
||||
comparable_docs.truncate(limit);
|
||||
prop_assert_eq!(
|
||||
computer.into_sorted_vec(),
|
||||
comparable_docs,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_topn_computer_desc_prop(
|
||||
limit in 0..10_usize,
|
||||
docs in proptest::collection::vec((0..100_u64, 0..100_u64), 0..100_usize),
|
||||
) {
|
||||
let mut computer: TopNComputer<_, _, true> = TopNComputer::new(limit);
|
||||
for (feature, doc) in &docs {
|
||||
computer.push(*feature, *doc);
|
||||
}
|
||||
let mut comparable_docs = docs.into_iter().map(|(feature, doc)| ComparableDoc { feature, doc }).collect::<Vec<_>>();
|
||||
comparable_docs.sort();
|
||||
comparable_docs.truncate(limit);
|
||||
prop_assert_eq!(
|
||||
computer.into_sorted_vec(),
|
||||
comparable_docs,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity_without_offset() -> crate::Result<()> {
|
||||
let index = make_index()?;
|
||||
@@ -1250,6 +1293,220 @@ mod tests {
|
||||
assert_eq!(page_0, &page_2[..page_0.len()]);
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(20))]
|
||||
/// Build multiple segments with equal-scoring docs and verify stable ordering
|
||||
/// across pages when increasing limit or offset.
|
||||
#[test]
|
||||
fn proptest_stable_ordering_across_segments_with_pagination(
|
||||
docs_per_segment in proptest::collection::vec(1usize..50, 2..5)
|
||||
) {
|
||||
use crate::indexer::NoMergePolicy;
|
||||
|
||||
// Build an index with multiple segments; all docs will have the same score using AllQuery.
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
|
||||
for num_docs in &docs_per_segment {
|
||||
for _ in 0..*num_docs {
|
||||
writer.add_document(doc!(text => "x")).unwrap();
|
||||
}
|
||||
writer.commit().unwrap();
|
||||
}
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let total_docs: usize = docs_per_segment.iter().sum();
|
||||
// Full result set, first assert all scores are identical.
|
||||
let full_with_scores: Vec<(Score, DocAddress)> = searcher
|
||||
.search(&AllQuery, &TopDocs::with_limit(total_docs))
|
||||
.unwrap();
|
||||
// Sanity: at least one document was returned.
|
||||
prop_assert!(!full_with_scores.is_empty());
|
||||
let first_score = full_with_scores[0].0;
|
||||
prop_assert!(full_with_scores.iter().all(|(score, _)| *score == first_score));
|
||||
|
||||
// Keep only the addresses for the remaining checks.
|
||||
let full: Vec<DocAddress> = full_with_scores
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
|
||||
// Sanity: we actually created multiple segments and have documents.
|
||||
prop_assert!(docs_per_segment.len() >= 2);
|
||||
prop_assert!(total_docs >= 2);
|
||||
|
||||
// 1) Increasing limit should preserve prefix ordering.
|
||||
for k in 1..=total_docs {
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&AllQuery, &TopDocs::with_limit(k))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
prop_assert_eq!(page, full[..k].to_vec());
|
||||
}
|
||||
|
||||
// 2) Offset + limit pages should always match the corresponding slice.
|
||||
// For each offset, check three representative page sizes:
|
||||
// - first page (size 1)
|
||||
// - a middle page (roughly half of remaining)
|
||||
// - the last page (size = remaining)
|
||||
for offset in 0..total_docs {
|
||||
let remaining = total_docs - offset;
|
||||
|
||||
let assert_page_eq = |limit: usize| -> proptest::test_runner::TestCaseResult {
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&AllQuery, &TopDocs::with_limit(limit).and_offset(offset))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
prop_assert_eq!(page, full[offset..offset + limit].to_vec());
|
||||
Ok(())
|
||||
};
|
||||
|
||||
// Smallest page.
|
||||
assert_page_eq(1)?;
|
||||
// A middle-sized page (dedupes to 1 if remaining == 1).
|
||||
assert_page_eq((remaining / 2).max(1))?;
|
||||
// Largest page for this offset.
|
||||
assert_page_eq(remaining)?;
|
||||
}
|
||||
|
||||
// 3) Concatenating fixed-size pages by offset reproduces the full order.
|
||||
for page_size in 1..=total_docs.min(5) {
|
||||
let mut concat: Vec<DocAddress> = Vec::new();
|
||||
let mut offset = 0;
|
||||
while offset < total_docs {
|
||||
let size = page_size.min(total_docs - offset);
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&AllQuery, &TopDocs::with_limit(size).and_offset(offset))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
concat.extend(page);
|
||||
offset += size;
|
||||
}
|
||||
// Avoid moving `full` across loop iterations.
|
||||
prop_assert_eq!(concat, full.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(20))]
|
||||
/// Build multiple segments with same-scoring term matches and verify stable ordering
|
||||
/// across pages for a real scoring query (TermQuery with identical TF and fieldnorm).
|
||||
#[test]
|
||||
fn proptest_stable_ordering_across_segments_with_term_query_and_pagination(
|
||||
docs_per_segment in proptest::collection::vec(1usize..50, 2..5)
|
||||
) {
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::query::TermQuery;
|
||||
use crate::Term;
|
||||
|
||||
// Build an index with multiple segments; each doc has exactly one token "x",
|
||||
// ensuring equal BM25 scores across all matching docs (same TF=1 and fieldnorm=1).
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
|
||||
for num_docs in &docs_per_segment {
|
||||
for _ in 0..*num_docs {
|
||||
writer.add_document(doc!(text => "x")).unwrap();
|
||||
}
|
||||
writer.commit().unwrap();
|
||||
}
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let total_docs: usize = docs_per_segment.iter().sum();
|
||||
let term = Term::from_field_text(text, "x");
|
||||
let tq = TermQuery::new(term, IndexRecordOption::WithFreqs);
|
||||
|
||||
// Full result set, first assert all scores are identical across docs.
|
||||
let full_with_scores: Vec<(Score, DocAddress)> = searcher
|
||||
.search(&tq, &TopDocs::with_limit(total_docs))
|
||||
.unwrap();
|
||||
// Sanity: at least one document was returned.
|
||||
prop_assert!(!full_with_scores.is_empty());
|
||||
let first_score = full_with_scores[0].0;
|
||||
prop_assert!(full_with_scores.iter().all(|(score, _)| *score == first_score));
|
||||
|
||||
// Keep only the addresses for the remaining checks.
|
||||
let full: Vec<DocAddress> = full_with_scores
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
|
||||
// Sanity: we actually created multiple segments and have documents.
|
||||
prop_assert!(docs_per_segment.len() >= 2);
|
||||
prop_assert!(total_docs >= 2);
|
||||
|
||||
// 1) Increasing limit should preserve prefix ordering.
|
||||
for k in 1..=total_docs {
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&tq, &TopDocs::with_limit(k))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
prop_assert_eq!(page, full[..k].to_vec());
|
||||
}
|
||||
|
||||
// 2) Offset + limit pages should always match the corresponding slice.
|
||||
// Check three representative page sizes for each offset: 1, ~half, and remaining.
|
||||
for offset in 0..total_docs {
|
||||
let remaining = total_docs - offset;
|
||||
|
||||
let assert_page_eq = |limit: usize| -> proptest::test_runner::TestCaseResult {
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&tq, &TopDocs::with_limit(limit).and_offset(offset))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
prop_assert_eq!(page, full[offset..offset + limit].to_vec());
|
||||
Ok(())
|
||||
};
|
||||
|
||||
assert_page_eq(1)?;
|
||||
assert_page_eq((remaining / 2).max(1))?;
|
||||
assert_page_eq(remaining)?;
|
||||
}
|
||||
|
||||
// 3) Concatenating fixed-size pages by offset reproduces the full order.
|
||||
for page_size in 1..=total_docs.min(5) {
|
||||
let mut concat: Vec<DocAddress> = Vec::new();
|
||||
let mut offset = 0;
|
||||
while offset < total_docs {
|
||||
let size = page_size.min(total_docs - offset);
|
||||
let page: Vec<DocAddress> = searcher
|
||||
.search(&tq, &TopDocs::with_limit(size).and_offset(offset))
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(_score, addr)| addr)
|
||||
.collect();
|
||||
concat.extend(page);
|
||||
offset += size;
|
||||
}
|
||||
prop_assert_eq!(concat, full.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_top_0() {
|
||||
@@ -1486,6 +1743,72 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_top_field_collect_string_prop(
|
||||
order in prop_oneof!(Just(Order::Desc), Just(Order::Asc)),
|
||||
limit in 1..256_usize,
|
||||
offset in 0..256_usize,
|
||||
segments_terms in
|
||||
proptest::collection::vec(
|
||||
proptest::collection::vec(0..32_u8, 1..32_usize),
|
||||
0..8_usize,
|
||||
)
|
||||
) {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let city = schema_builder.add_text_field("city", TEXT | FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
|
||||
// A Vec<Vec<u8>>, where the outer Vec represents segments, and the inner Vec
|
||||
// represents terms.
|
||||
for segment_terms in segments_terms.into_iter() {
|
||||
for term in segment_terms.into_iter() {
|
||||
let term = format!("{term:0>3}");
|
||||
index_writer.add_document(doc!(
|
||||
city => term,
|
||||
))?;
|
||||
}
|
||||
index_writer.commit()?;
|
||||
}
|
||||
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_n_results = searcher.search(&AllQuery, &TopDocs::with_limit(limit)
|
||||
.and_offset(offset)
|
||||
.order_by_string_fast_field("city", order.clone()))?;
|
||||
let all_results = searcher.search(&AllQuery, &DocSetCollector)?.into_iter().map(|doc_address| {
|
||||
// Get the term for this address.
|
||||
// NOTE: We can't determine the SegmentIds that will be generated for Segments
|
||||
// ahead of time, so we can't pre-compute the expected `DocAddress`es.
|
||||
let column = searcher.segment_readers()[doc_address.segment_ord as usize].fast_fields().str("city").unwrap().unwrap();
|
||||
let term_ord = column.term_ords(doc_address.doc_id).next().unwrap();
|
||||
let mut city = Vec::new();
|
||||
column.dictionary().ord_to_term(term_ord, &mut city).unwrap();
|
||||
(String::try_from(city).unwrap(), doc_address)
|
||||
});
|
||||
|
||||
// Using the TopDocs collector should always be equivalent to sorting, skipping the
|
||||
// offset, and then taking the limit.
|
||||
let sorted_docs: Vec<_> = if order.is_desc() {
|
||||
let mut comparable_docs: Vec<ComparableDoc<_, _, true>> =
|
||||
all_results.into_iter().map(|(feature, doc)| ComparableDoc { feature, doc}).collect();
|
||||
comparable_docs.sort();
|
||||
comparable_docs.into_iter().map(|cd| (cd.feature, cd.doc)).collect()
|
||||
} else {
|
||||
let mut comparable_docs: Vec<ComparableDoc<_, _, false>> =
|
||||
all_results.into_iter().map(|(feature, doc)| ComparableDoc { feature, doc}).collect();
|
||||
comparable_docs.sort();
|
||||
comparable_docs.into_iter().map(|cd| (cd.feature, cd.doc)).collect()
|
||||
};
|
||||
let expected_docs = sorted_docs.into_iter().skip(offset).take(limit).collect::<Vec<_>>();
|
||||
prop_assert_eq!(
|
||||
expected_docs,
|
||||
top_n_results
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_field_does_not_exist() {
|
||||
@@ -1645,4 +1968,29 @@ mod tests {
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_topn_computer_asc() {
|
||||
let mut computer: TopNComputer<u32, u32, false> = TopNComputer::new(2);
|
||||
|
||||
computer.push(1u32, 1u32);
|
||||
computer.push(2u32, 2u32);
|
||||
computer.push(3u32, 3u32);
|
||||
computer.push(2u32, 4u32);
|
||||
computer.push(4u32, 5u32);
|
||||
computer.push(1u32, 6u32);
|
||||
assert_eq!(
|
||||
computer.into_sorted_vec(),
|
||||
&[
|
||||
ComparableDoc {
|
||||
feature: 1u32,
|
||||
doc: 1u32,
|
||||
},
|
||||
ComparableDoc {
|
||||
feature: 1u32,
|
||||
doc: 6u32,
|
||||
}
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ fn create_format() {
|
||||
}
|
||||
|
||||
fn path_for_version(version: &str) -> String {
|
||||
format!("./tests/compat_tests_data/index_v{}/", version)
|
||||
format!("./tests/compat_tests_data/index_v{version}/")
|
||||
}
|
||||
|
||||
/// feature flag quickwit uses a different dictionary type
|
||||
|
||||
@@ -65,8 +65,7 @@ impl Executor {
|
||||
if let Err(err) = fruit_sender_ref.send((idx, fruit)) {
|
||||
error!(
|
||||
"Failed to send search task. It probably means all search \
|
||||
threads have panicked. {:?}",
|
||||
err
|
||||
threads have panicked. {err:?}"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use columnar::NumericalValue;
|
||||
use common::json_path_writer::{JSON_END_OF_PATH, JSON_PATH_SEGMENT_SEP};
|
||||
use common::{replace_in_place, JsonPathWriter};
|
||||
use rustc_hash::FxHashMap;
|
||||
@@ -152,7 +153,7 @@ pub(crate) fn index_json_value<'a, V: Value<'a>>(
|
||||
if let Ok(i64_val) = val.try_into() {
|
||||
term_buffer.append_type_and_fast_value::<i64>(i64_val);
|
||||
} else {
|
||||
term_buffer.append_type_and_fast_value(val);
|
||||
term_buffer.append_type_and_fast_value::<u64>(val);
|
||||
}
|
||||
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
|
||||
}
|
||||
@@ -166,12 +167,30 @@ pub(crate) fn index_json_value<'a, V: Value<'a>>(
|
||||
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
|
||||
}
|
||||
ReferenceValueLeaf::F64(val) => {
|
||||
if !val.is_finite() {
|
||||
return;
|
||||
};
|
||||
set_path_id(
|
||||
term_buffer,
|
||||
ctx.path_to_unordered_id
|
||||
.get_or_allocate_unordered_id(json_path_writer.as_str()),
|
||||
);
|
||||
term_buffer.append_type_and_fast_value(val);
|
||||
// Normalize here is important.
|
||||
// In the inverted index, we coerce all numerical values to their canonical
|
||||
// representation.
|
||||
//
|
||||
// (We do the same thing on the query side)
|
||||
match NumericalValue::F64(val).normalize() {
|
||||
NumericalValue::I64(val_i64) => {
|
||||
term_buffer.append_type_and_fast_value::<i64>(val_i64);
|
||||
}
|
||||
NumericalValue::U64(val_u64) => {
|
||||
term_buffer.append_type_and_fast_value::<u64>(val_u64);
|
||||
}
|
||||
NumericalValue::F64(val_f64) => {
|
||||
term_buffer.append_type_and_fast_value::<f64>(val_f64);
|
||||
}
|
||||
}
|
||||
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
|
||||
}
|
||||
ReferenceValueLeaf::Bool(val) => {
|
||||
@@ -241,8 +260,8 @@ pub(crate) fn index_json_value<'a, V: Value<'a>>(
|
||||
///
|
||||
/// The term must be json + JSON path.
|
||||
pub fn convert_to_fast_value_and_append_to_json_term(
|
||||
mut term: Term,
|
||||
phrase: &str,
|
||||
term: &Term,
|
||||
text: &str,
|
||||
truncate_date_for_search: bool,
|
||||
) -> Option<Term> {
|
||||
assert_eq!(
|
||||
@@ -254,31 +273,50 @@ pub fn convert_to_fast_value_and_append_to_json_term(
|
||||
0,
|
||||
"JSON value bytes should be empty"
|
||||
);
|
||||
if let Ok(dt) = OffsetDateTime::parse(phrase, &Rfc3339) {
|
||||
let mut dt = DateTime::from_utc(dt.to_offset(UtcOffset::UTC));
|
||||
if truncate_date_for_search {
|
||||
dt = dt.truncate(DATE_TIME_PRECISION_INDEXED);
|
||||
try_convert_to_datetime_and_append_to_json_term(term, text, truncate_date_for_search)
|
||||
.or_else(|| try_convert_to_number_and_append_to_json_term(term, text))
|
||||
.or_else(|| try_convert_to_bool_and_append_to_json_term_typed(term, text))
|
||||
}
|
||||
|
||||
fn try_convert_to_datetime_and_append_to_json_term(
|
||||
term: &Term,
|
||||
text: &str,
|
||||
truncate_date_for_search: bool,
|
||||
) -> Option<Term> {
|
||||
let dt = OffsetDateTime::parse(text, &Rfc3339).ok()?;
|
||||
let mut dt = DateTime::from_utc(dt.to_offset(UtcOffset::UTC));
|
||||
if truncate_date_for_search {
|
||||
dt = dt.truncate(DATE_TIME_PRECISION_INDEXED);
|
||||
}
|
||||
let mut term_clone = term.clone();
|
||||
term_clone.append_type_and_fast_value(dt);
|
||||
Some(term_clone)
|
||||
}
|
||||
|
||||
fn try_convert_to_number_and_append_to_json_term(term: &Term, text: &str) -> Option<Term> {
|
||||
let numerical_value: NumericalValue = str::parse::<NumericalValue>(text).ok()?;
|
||||
let mut term_clone = term.clone();
|
||||
// Parse is actually returning normalized values already today, but let's not
|
||||
// not rely on that hidden contract.
|
||||
match numerical_value.normalize() {
|
||||
NumericalValue::I64(i64_value) => {
|
||||
term_clone.append_type_and_fast_value::<i64>(i64_value);
|
||||
}
|
||||
NumericalValue::U64(u64_value) => {
|
||||
term_clone.append_type_and_fast_value::<u64>(u64_value);
|
||||
}
|
||||
NumericalValue::F64(f64_value) => {
|
||||
term_clone.append_type_and_fast_value::<f64>(f64_value);
|
||||
}
|
||||
term.append_type_and_fast_value(dt);
|
||||
return Some(term);
|
||||
}
|
||||
if let Ok(i64_val) = str::parse::<i64>(phrase) {
|
||||
term.append_type_and_fast_value(i64_val);
|
||||
return Some(term);
|
||||
}
|
||||
if let Ok(u64_val) = str::parse::<u64>(phrase) {
|
||||
term.append_type_and_fast_value(u64_val);
|
||||
return Some(term);
|
||||
}
|
||||
if let Ok(f64_val) = str::parse::<f64>(phrase) {
|
||||
term.append_type_and_fast_value(f64_val);
|
||||
return Some(term);
|
||||
}
|
||||
if let Ok(bool_val) = str::parse::<bool>(phrase) {
|
||||
term.append_type_and_fast_value(bool_val);
|
||||
return Some(term);
|
||||
}
|
||||
None
|
||||
Some(term_clone)
|
||||
}
|
||||
|
||||
fn try_convert_to_bool_and_append_to_json_term_typed(term: &Term, text: &str) -> Option<Term> {
|
||||
let val = str::parse::<bool>(text).ok()?;
|
||||
let mut term_clone = term.clone();
|
||||
term_clone.append_type_and_fast_value(val);
|
||||
Some(term_clone)
|
||||
}
|
||||
|
||||
/// Splits a json path supplied to the query parser in such a way that
|
||||
|
||||
@@ -56,7 +56,7 @@ impl<T: Send + Sync + 'static> From<Box<T>> for DirectoryLock {
|
||||
impl Drop for DirectoryLockGuard {
|
||||
fn drop(&mut self) {
|
||||
if let Err(e) = self.directory.delete(&self.path) {
|
||||
error!("Failed to remove the lock file. {:?}", e);
|
||||
error!("Failed to remove the lock file. {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ impl FileWatcher {
|
||||
.map(|current_checksum| current_checksum != checksum)
|
||||
.unwrap_or(true);
|
||||
if metafile_has_changed {
|
||||
info!("Meta file {:?} was modified", path);
|
||||
info!("Meta file {path:?} was modified");
|
||||
current_checksum_opt = Some(checksum);
|
||||
// We actually ignore callbacks failing here.
|
||||
// We just wait for the end of their execution.
|
||||
@@ -75,7 +75,7 @@ impl FileWatcher {
|
||||
let reader = match fs::File::open(path) {
|
||||
Ok(f) => io::BufReader::new(f),
|
||||
Err(e) => {
|
||||
warn!("Failed to open meta file {:?}: {:?}", path, e);
|
||||
warn!("Failed to open meta file {path:?}: {e:?}");
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -157,7 +157,7 @@ impl ManagedDirectory {
|
||||
for file_to_delete in files_to_delete {
|
||||
match self.delete(&file_to_delete) {
|
||||
Ok(_) => {
|
||||
info!("Deleted {:?}", file_to_delete);
|
||||
info!("Deleted {file_to_delete:?}");
|
||||
deleted_files.push(file_to_delete);
|
||||
}
|
||||
Err(file_error) => {
|
||||
@@ -170,7 +170,7 @@ impl ManagedDirectory {
|
||||
if !cfg!(target_os = "windows") {
|
||||
// On windows, delete is expected to fail if the file
|
||||
// is mmapped.
|
||||
error!("Failed to delete {:?}", file_to_delete);
|
||||
error!("Failed to delete {file_to_delete:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use common::StableDeref;
|
||||
use fs4::FileExt;
|
||||
use fs4::fs_std::FileExt;
|
||||
#[cfg(all(feature = "mmap", unix))]
|
||||
pub use memmap2::Advice;
|
||||
use memmap2::Mmap;
|
||||
@@ -29,7 +29,7 @@ pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
|
||||
/// Create a default io error given a string.
|
||||
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
io::Error::other(msg)
|
||||
}
|
||||
|
||||
/// Returns `None` iff the file exists, can be read, but is empty (and hence
|
||||
@@ -369,7 +369,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
||||
|
||||
impl Directory for MmapDirectory {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError> {
|
||||
debug!("Open Read {:?}", path);
|
||||
debug!("Open Read {path:?}");
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
|
||||
@@ -414,7 +414,7 @@ impl Directory for MmapDirectory {
|
||||
}
|
||||
|
||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
debug!("Open Write {:?}", path);
|
||||
debug!("Open Write {path:?}");
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
let open_res = OpenOptions::new()
|
||||
@@ -467,7 +467,7 @@ impl Directory for MmapDirectory {
|
||||
}
|
||||
|
||||
fn atomic_write(&self, path: &Path, content: &[u8]) -> io::Result<()> {
|
||||
debug!("Atomic Write {:?}", path);
|
||||
debug!("Atomic Write {path:?}");
|
||||
let full_path = self.resolve_path(path);
|
||||
atomic_write(&full_path, content)?;
|
||||
Ok(())
|
||||
@@ -484,8 +484,8 @@ impl Directory for MmapDirectory {
|
||||
.map_err(LockError::wrap_io_error)?;
|
||||
if lock.is_blocking {
|
||||
file.lock_exclusive().map_err(LockError::wrap_io_error)?;
|
||||
} else {
|
||||
file.try_lock_exclusive().map_err(|_| LockError::LockBusy)?
|
||||
} else if !file.try_lock_exclusive().map_err(|_| LockError::LockBusy)? {
|
||||
return Err(LockError::LockBusy);
|
||||
}
|
||||
// dropping the file handle will release the lock.
|
||||
Ok(DirectoryLock::from(Box::new(ReleaseLockFile {
|
||||
|
||||
@@ -191,7 +191,7 @@ impl Directory for RamDirectory {
|
||||
.fs
|
||||
.read()
|
||||
.map_err(|e| OpenReadError::IoError {
|
||||
io_error: Arc::new(io::Error::new(io::ErrorKind::Other, e.to_string())),
|
||||
io_error: Arc::new(io::Error::other(e.to_string())),
|
||||
filepath: path.to_path_buf(),
|
||||
})?
|
||||
.exists(path))
|
||||
|
||||
@@ -90,10 +90,7 @@ impl WatchCallbackList {
|
||||
let _ = sender.send(Ok(()));
|
||||
});
|
||||
if let Err(err) = spawn_res {
|
||||
error!(
|
||||
"Failed to spawn thread to call watch callbacks. Cause: {:?}",
|
||||
err
|
||||
);
|
||||
error!("Failed to spawn thread to call watch callbacks. Cause: {err:?}");
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
@@ -87,6 +87,17 @@ pub trait DocSet: Send {
|
||||
/// length of the docset.
|
||||
fn size_hint(&self) -> u32;
|
||||
|
||||
/// Returns a best-effort hint of the cost to consume the entire docset.
|
||||
///
|
||||
/// Consuming means calling advance until [`TERMINATED`] is returned.
|
||||
/// The cost should be relative to the cost of driving a Term query,
|
||||
/// which would be the number of documents in the DocSet.
|
||||
///
|
||||
/// By default this returns `size_hint()`.
|
||||
fn cost(&self) -> u64 {
|
||||
self.size_hint() as u64
|
||||
}
|
||||
|
||||
/// Returns the number documents matching.
|
||||
/// Calling this method consumes the `DocSet`.
|
||||
fn count(&mut self, alive_bitset: &AliveBitSet) -> u32 {
|
||||
@@ -134,6 +145,10 @@ impl DocSet for &mut dyn DocSet {
|
||||
(**self).size_hint()
|
||||
}
|
||||
|
||||
fn cost(&self) -> u64 {
|
||||
(**self).cost()
|
||||
}
|
||||
|
||||
fn count(&mut self, alive_bitset: &AliveBitSet) -> u32 {
|
||||
(**self).count(alive_bitset)
|
||||
}
|
||||
@@ -169,6 +184,11 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||
unboxed.size_hint()
|
||||
}
|
||||
|
||||
fn cost(&self) -> u64 {
|
||||
let unboxed: &TDocSet = self.borrow();
|
||||
unboxed.cost()
|
||||
}
|
||||
|
||||
fn count(&mut self, alive_bitset: &AliveBitSet) -> u32 {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.count(alive_bitset)
|
||||
|
||||
@@ -216,7 +216,7 @@ impl IndexBuilder {
|
||||
|
||||
/// Opens or creates a new index in the provided directory
|
||||
pub fn open_or_create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
|
||||
let dir = dir.into();
|
||||
let dir: Box<dyn Directory> = dir.into();
|
||||
if !Index::exists(&*dir)? {
|
||||
return self.create(dir);
|
||||
}
|
||||
@@ -494,7 +494,7 @@ impl Index {
|
||||
.into_iter()
|
||||
.map(|segment| SegmentReader::open(&segment)?.fields_metadata())
|
||||
.collect::<Result<_, _>>()?;
|
||||
Ok(merge_field_meta_data(fields_metadata, &self.schema()))
|
||||
Ok(merge_field_meta_data(fields_metadata))
|
||||
}
|
||||
|
||||
/// Creates a new segment_meta (Advanced user only).
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use std::io;
|
||||
|
||||
use common::json_path_writer::JSON_END_OF_PATH;
|
||||
use common::BinarySerializable;
|
||||
use fnv::FnvHashSet;
|
||||
use common::{BinarySerializable, ByteCount};
|
||||
#[cfg(feature = "quickwit")]
|
||||
use futures_util::{FutureExt, StreamExt, TryStreamExt};
|
||||
#[cfg(feature = "quickwit")]
|
||||
@@ -36,6 +35,33 @@ pub struct InvertedIndexReader {
|
||||
total_num_tokens: u64,
|
||||
}
|
||||
|
||||
/// Object that records the amount of space used by a field in an inverted index.
|
||||
pub(crate) struct InvertedIndexFieldSpace {
|
||||
pub field_name: String,
|
||||
pub field_type: Type,
|
||||
pub postings_size: ByteCount,
|
||||
pub positions_size: ByteCount,
|
||||
pub num_terms: u64,
|
||||
}
|
||||
|
||||
/// Returns None if the term is not a valid JSON path.
|
||||
fn extract_field_name_and_field_type_from_json_path(term: &[u8]) -> Option<(String, Type)> {
|
||||
let index = term.iter().position(|&byte| byte == JSON_END_OF_PATH)?;
|
||||
let field_type_code = term.get(index + 1).copied()?;
|
||||
let field_type = Type::from_code(field_type_code)?;
|
||||
// Let's flush the current field.
|
||||
let field_name = String::from_utf8_lossy(&term[..index]).to_string();
|
||||
Some((field_name, field_type))
|
||||
}
|
||||
|
||||
impl InvertedIndexFieldSpace {
|
||||
fn record(&mut self, term_info: &TermInfo) {
|
||||
self.postings_size += ByteCount::from(term_info.posting_num_bytes() as u64);
|
||||
self.positions_size += ByteCount::from(term_info.positions_num_bytes() as u64);
|
||||
self.num_terms += 1;
|
||||
}
|
||||
}
|
||||
|
||||
impl InvertedIndexReader {
|
||||
pub(crate) fn new(
|
||||
termdict: TermDictionary,
|
||||
@@ -81,20 +107,56 @@ impl InvertedIndexReader {
|
||||
///
|
||||
/// Notice: This requires a full scan and therefore **very expensive**.
|
||||
/// TODO: Move to sstable to use the index.
|
||||
pub fn list_encoded_fields(&self) -> io::Result<Vec<(String, Type)>> {
|
||||
pub(crate) fn list_encoded_json_fields(&self) -> io::Result<Vec<InvertedIndexFieldSpace>> {
|
||||
let mut stream = self.termdict.stream()?;
|
||||
let mut fields = Vec::new();
|
||||
let mut fields_set = FnvHashSet::default();
|
||||
while let Some((term, _term_info)) = stream.next() {
|
||||
if let Some(index) = term.iter().position(|&byte| byte == JSON_END_OF_PATH) {
|
||||
if !fields_set.contains(&term[..index + 2]) {
|
||||
fields_set.insert(term[..index + 2].to_vec());
|
||||
let typ = Type::from_code(term[index + 1]).unwrap();
|
||||
fields.push((String::from_utf8_lossy(&term[..index]).to_string(), typ));
|
||||
let mut fields: Vec<InvertedIndexFieldSpace> = Vec::new();
|
||||
|
||||
let mut current_field_opt: Option<InvertedIndexFieldSpace> = None;
|
||||
// Current field bytes, including the JSON_END_OF_PATH.
|
||||
let mut current_field_bytes: Vec<u8> = Vec::new();
|
||||
|
||||
while let Some((term, term_info)) = stream.next() {
|
||||
if let Some(current_field) = &mut current_field_opt {
|
||||
if term.starts_with(¤t_field_bytes) {
|
||||
// We are still in the same field.
|
||||
current_field.record(term_info);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// This is a new field!
|
||||
// Let's flush the current field.
|
||||
fields.extend(current_field_opt.take());
|
||||
current_field_bytes.clear();
|
||||
|
||||
// And create a new one.
|
||||
let Some((field_name, field_type)) =
|
||||
extract_field_name_and_field_type_from_json_path(term)
|
||||
else {
|
||||
error!(
|
||||
"invalid term bytes encountered {term:?}. this only happens if the term \
|
||||
dictionary is corrupted. please report"
|
||||
);
|
||||
continue;
|
||||
};
|
||||
let mut field_space = InvertedIndexFieldSpace {
|
||||
field_name,
|
||||
field_type,
|
||||
postings_size: ByteCount::default(),
|
||||
positions_size: ByteCount::default(),
|
||||
num_terms: 0u64,
|
||||
};
|
||||
field_space.record(term_info);
|
||||
|
||||
// We include the json type and the json end of path to make sure the prefix check
|
||||
// is meaningful.
|
||||
current_field_bytes.extend_from_slice(&term[..field_space.field_name.len() + 2]);
|
||||
current_field_opt = Some(field_space);
|
||||
}
|
||||
|
||||
// We need to flush the last field as well.
|
||||
fields.extend(current_field_opt.take());
|
||||
|
||||
Ok(fields)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ops::BitOrAssign;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{fmt, io};
|
||||
|
||||
use common::{ByteCount, HasLen};
|
||||
use fnv::FnvHashMap;
|
||||
use itertools::Itertools;
|
||||
|
||||
@@ -304,12 +304,16 @@ impl SegmentReader {
|
||||
for (field, field_entry) in self.schema().fields() {
|
||||
let field_name = field_entry.name().to_string();
|
||||
let is_indexed = field_entry.is_indexed();
|
||||
|
||||
if is_indexed {
|
||||
let is_json = field_entry.field_type().value_type() == Type::Json;
|
||||
if is_json {
|
||||
let term_dictionary_json_field_num_bytes: u64 = self
|
||||
.termdict_composite
|
||||
.open_read(field)
|
||||
.map(|file_slice| file_slice.len() as u64)
|
||||
.unwrap_or(0u64);
|
||||
let inv_index = self.inverted_index(field)?;
|
||||
let encoded_fields_in_index = inv_index.list_encoded_fields()?;
|
||||
let encoded_fields_in_index = inv_index.list_encoded_json_fields()?;
|
||||
let mut build_path = |field_name: &str, mut json_path: String| {
|
||||
// In this case we need to map the potential fast field to the field name
|
||||
// accepted by the query parser.
|
||||
@@ -328,30 +332,65 @@ impl SegmentReader {
|
||||
format!("{field_name}.{json_path}")
|
||||
}
|
||||
};
|
||||
indexed_fields.extend(
|
||||
encoded_fields_in_index
|
||||
.into_iter()
|
||||
.map(|(name, typ)| (build_path(&field_name, name), typ))
|
||||
.map(|(field_name, typ)| FieldMetadata {
|
||||
indexed: true,
|
||||
stored: false,
|
||||
field_name,
|
||||
fast: false,
|
||||
typ,
|
||||
}),
|
||||
);
|
||||
let total_num_terms = encoded_fields_in_index
|
||||
.iter()
|
||||
.map(|field_space| field_space.num_terms)
|
||||
.sum();
|
||||
indexed_fields.extend(encoded_fields_in_index.into_iter().map(|field_space| {
|
||||
let field_name = build_path(&field_name, field_space.field_name);
|
||||
// It is complex to attribute the exact amount of bytes required by specific
|
||||
// field in the json field. Instead, as a proxy, we
|
||||
// attribute the total amount of bytes for the entire json field,
|
||||
// proportionally to the number of terms in each
|
||||
// fields.
|
||||
let term_dictionary_size = (term_dictionary_json_field_num_bytes
|
||||
* field_space.num_terms)
|
||||
.checked_div(total_num_terms)
|
||||
.unwrap_or(0);
|
||||
FieldMetadata {
|
||||
postings_size: Some(field_space.postings_size),
|
||||
positions_size: Some(field_space.positions_size),
|
||||
term_dictionary_size: Some(ByteCount::from(term_dictionary_size)),
|
||||
fast_size: None,
|
||||
// The stored flag will be set at the end of this function!
|
||||
stored: field_entry.is_stored(),
|
||||
field_name,
|
||||
typ: field_space.field_type,
|
||||
}
|
||||
}));
|
||||
} else {
|
||||
let postings_size: ByteCount = self
|
||||
.postings_composite
|
||||
.open_read(field)
|
||||
.map(|posting_fileslice| posting_fileslice.len())
|
||||
.unwrap_or(0)
|
||||
.into();
|
||||
let positions_size: ByteCount = self
|
||||
.positions_composite
|
||||
.open_read(field)
|
||||
.map(|positions_fileslice| positions_fileslice.len())
|
||||
.unwrap_or(0)
|
||||
.into();
|
||||
let term_dictionary_size: ByteCount = self
|
||||
.termdict_composite
|
||||
.open_read(field)
|
||||
.map(|term_dictionary_fileslice| term_dictionary_fileslice.len())
|
||||
.unwrap_or(0)
|
||||
.into();
|
||||
indexed_fields.push(FieldMetadata {
|
||||
indexed: true,
|
||||
stored: false,
|
||||
field_name: field_name.to_string(),
|
||||
fast: false,
|
||||
typ: field_entry.field_type().value_type(),
|
||||
// The stored flag will be set at the end of this function!
|
||||
stored: field_entry.is_stored(),
|
||||
fast_size: None,
|
||||
term_dictionary_size: Some(term_dictionary_size),
|
||||
postings_size: Some(postings_size),
|
||||
positions_size: Some(positions_size),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut fast_fields: Vec<FieldMetadata> = self
|
||||
let fast_fields: Vec<FieldMetadata> = self
|
||||
.fast_fields()
|
||||
.columnar()
|
||||
.iter_columns()?
|
||||
@@ -363,23 +402,21 @@ impl SegmentReader {
|
||||
.get(&field_name)
|
||||
.unwrap_or(&field_name)
|
||||
.to_string();
|
||||
let stored = is_field_stored(&field_name, &self.schema);
|
||||
FieldMetadata {
|
||||
indexed: false,
|
||||
stored: false,
|
||||
field_name,
|
||||
fast: true,
|
||||
typ: Type::from(handle.column_type()),
|
||||
stored,
|
||||
fast_size: Some(handle.num_bytes()),
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// Since the type is encoded differently in the fast field and in the inverted index,
|
||||
// the order of the fields is not guaranteed to be the same. Therefore, we sort the fields.
|
||||
// If we are sure that the order is the same, we can remove this sort.
|
||||
indexed_fields.sort_unstable();
|
||||
fast_fields.sort_unstable();
|
||||
let merged = merge_field_meta_data(vec![indexed_fields, fast_fields], &self.schema);
|
||||
|
||||
Ok(merged)
|
||||
let merged_field_metadatas: Vec<FieldMetadata> =
|
||||
merge_field_meta_data(vec![indexed_fields, fast_fields]);
|
||||
Ok(merged_field_metadatas)
|
||||
}
|
||||
|
||||
/// Returns the segment id
|
||||
@@ -443,20 +480,47 @@ pub struct FieldMetadata {
|
||||
// Notice: Don't reorder the declaration of 1.field_name 2.typ, as it is used for ordering by
|
||||
// field_name then typ.
|
||||
pub typ: Type,
|
||||
/// Is the field indexed for search
|
||||
pub indexed: bool,
|
||||
/// Is the field stored in the doc store
|
||||
pub stored: bool,
|
||||
/// Is the field stored in the columnar storage
|
||||
pub fast: bool,
|
||||
/// Size occupied in the columnar storage (None if not fast)
|
||||
pub fast_size: Option<ByteCount>,
|
||||
/// term_dictionary
|
||||
pub term_dictionary_size: Option<ByteCount>,
|
||||
/// Size occupied in the index postings storage (None if not indexed)
|
||||
pub postings_size: Option<ByteCount>,
|
||||
/// Size occupied in the index postings storage (None if positions are not recorded)
|
||||
pub positions_size: Option<ByteCount>,
|
||||
}
|
||||
impl BitOrAssign for FieldMetadata {
|
||||
fn bitor_assign(&mut self, rhs: Self) {
|
||||
assert!(self.field_name == rhs.field_name);
|
||||
assert!(self.typ == rhs.typ);
|
||||
self.indexed |= rhs.indexed;
|
||||
|
||||
fn merge_options(left: Option<ByteCount>, right: Option<ByteCount>) -> Option<ByteCount> {
|
||||
match (left, right) {
|
||||
(Some(l), Some(r)) => Some(l + r),
|
||||
(None, right) => right,
|
||||
(left, None) => left,
|
||||
}
|
||||
}
|
||||
|
||||
impl FieldMetadata {
|
||||
/// Returns true if and only if the field is indexed.
|
||||
pub fn is_indexed(&self) -> bool {
|
||||
self.postings_size.is_some()
|
||||
}
|
||||
|
||||
/// Returns true if and only if the field is a fast field (i.e.: recorded in columnar format).
|
||||
pub fn is_fast(&self) -> bool {
|
||||
self.fast_size.is_some()
|
||||
}
|
||||
|
||||
/// Merges two field metadata.
|
||||
pub fn merge(&mut self, rhs: Self) {
|
||||
assert_eq!(self.field_name, rhs.field_name);
|
||||
assert_eq!(self.typ, rhs.typ);
|
||||
self.stored |= rhs.stored;
|
||||
self.fast |= rhs.fast;
|
||||
self.fast_size = merge_options(self.fast_size, rhs.fast_size);
|
||||
self.term_dictionary_size =
|
||||
merge_options(self.term_dictionary_size, rhs.term_dictionary_size);
|
||||
self.postings_size = merge_options(self.postings_size, rhs.postings_size);
|
||||
self.positions_size = merge_options(self.positions_size, rhs.positions_size);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -469,23 +533,29 @@ fn is_field_stored(field_name: &str, schema: &Schema) -> bool {
|
||||
}
|
||||
|
||||
/// Helper to merge the field metadata from multiple segments.
|
||||
pub fn merge_field_meta_data(
|
||||
field_metadatas: Vec<Vec<FieldMetadata>>,
|
||||
schema: &Schema,
|
||||
) -> Vec<FieldMetadata> {
|
||||
pub fn merge_field_meta_data(mut field_metadatas: Vec<Vec<FieldMetadata>>) -> Vec<FieldMetadata> {
|
||||
// READ BEFORE REMOVING THIS!
|
||||
//
|
||||
// Because we replace field sep by `.`, fields are not always sorted.
|
||||
// Also, to enforce such an implicit contract, we would have to add
|
||||
// assert here.
|
||||
//
|
||||
// Sorting is linear time on pre-sorted data, so we are simply better off sorting data here.
|
||||
for field_metadatas in &mut field_metadatas {
|
||||
field_metadatas.sort_unstable();
|
||||
}
|
||||
let mut merged_field_metadata = Vec::new();
|
||||
for (_key, mut group) in &field_metadatas
|
||||
.into_iter()
|
||||
.kmerge_by(|left, right| left < right)
|
||||
.kmerge()
|
||||
// TODO: Remove allocation
|
||||
.chunk_by(|el| (el.field_name.to_string(), el.typ))
|
||||
{
|
||||
let mut merged: FieldMetadata = group.next().unwrap();
|
||||
for el in group {
|
||||
merged |= el;
|
||||
merged.merge(el);
|
||||
}
|
||||
// Currently is_field_stored is maybe too slow for the high cardinality case
|
||||
merged.stored = is_field_stored(&merged.field_name, schema);
|
||||
merged_field_metadata.push(merged);
|
||||
}
|
||||
merged_field_metadata
|
||||
@@ -507,7 +577,7 @@ fn intersect_alive_bitset(
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentReader {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "SegmentReader({:?})", self.segment_id)
|
||||
}
|
||||
}
|
||||
@@ -516,122 +586,168 @@ impl fmt::Debug for SegmentReader {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::index::Index;
|
||||
use crate::schema::{SchemaBuilder, Term, STORED, TEXT};
|
||||
use crate::schema::{Term, STORED, TEXT};
|
||||
use crate::IndexWriter;
|
||||
|
||||
#[track_caller]
|
||||
fn assert_merge(fields_metadatas: &[Vec<FieldMetadata>], expected: &[FieldMetadata]) {
|
||||
use itertools::Itertools;
|
||||
let num_els = fields_metadatas.len();
|
||||
for permutation in fields_metadatas.iter().cloned().permutations(num_els) {
|
||||
let res = merge_field_meta_data(permutation);
|
||||
assert_eq!(&res, &expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_same() {
|
||||
let schema = SchemaBuilder::new().build();
|
||||
fn test_merge_field_meta_data_same_field() {
|
||||
let field_metadata1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: true,
|
||||
term_dictionary_size: Some(ByteCount::from(100u64)),
|
||||
postings_size: Some(ByteCount::from(1_000u64)),
|
||||
positions_size: Some(ByteCount::from(2_000u64)),
|
||||
fast_size: Some(ByteCount::from(1_000u64).into()),
|
||||
};
|
||||
let field_metadata2 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: true,
|
||||
term_dictionary_size: Some(ByteCount::from(80u64)),
|
||||
postings_size: Some(ByteCount::from(1_500u64)),
|
||||
positions_size: Some(ByteCount::from(2_500u64)),
|
||||
fast_size: Some(ByteCount::from(3_000u64).into()),
|
||||
};
|
||||
let res = merge_field_meta_data(
|
||||
vec![vec![field_metadata1.clone()], vec![field_metadata2]],
|
||||
&schema,
|
||||
let expected = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
stored: false,
|
||||
term_dictionary_size: Some(ByteCount::from(180u64)),
|
||||
postings_size: Some(ByteCount::from(2_500u64)),
|
||||
positions_size: Some(ByteCount::from(4_500u64)),
|
||||
fast_size: Some(ByteCount::from(4_000u64).into()),
|
||||
};
|
||||
assert_merge(
|
||||
&[vec![field_metadata1.clone()], vec![field_metadata2]],
|
||||
&[expected],
|
||||
);
|
||||
assert_eq!(res, vec![field_metadata1]);
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_different() {
|
||||
let schema = SchemaBuilder::new().build();
|
||||
let field_metadata1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast: true,
|
||||
fast_size: Some(1_000u64.into()),
|
||||
term_dictionary_size: Some(100u64.into()),
|
||||
postings_size: Some(2_000u64.into()),
|
||||
positions_size: Some(4_000u64.into()),
|
||||
};
|
||||
let field_metadata2 = FieldMetadata {
|
||||
field_name: "b".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast: true,
|
||||
fast_size: Some(1_002u64.into()),
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
};
|
||||
let field_metadata3 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
term_dictionary_size: Some(101u64.into()),
|
||||
postings_size: Some(2_001u64.into()),
|
||||
positions_size: Some(4_001u64.into()),
|
||||
stored: false,
|
||||
fast: false,
|
||||
fast_size: None,
|
||||
};
|
||||
let res = merge_field_meta_data(
|
||||
vec![
|
||||
let expected = vec![
|
||||
FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
stored: false,
|
||||
term_dictionary_size: Some(201u64.into()),
|
||||
postings_size: Some(4_001u64.into()),
|
||||
positions_size: Some(8_001u64.into()),
|
||||
fast_size: Some(1_000u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "b".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
stored: false,
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
fast_size: Some(1_002u64.into()),
|
||||
},
|
||||
];
|
||||
assert_merge(
|
||||
&[
|
||||
vec![field_metadata1.clone(), field_metadata2.clone()],
|
||||
vec![field_metadata3],
|
||||
],
|
||||
&schema,
|
||||
&expected,
|
||||
);
|
||||
let field_metadata_expected1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
stored: false,
|
||||
fast: true,
|
||||
};
|
||||
assert_eq!(res, vec![field_metadata_expected1, field_metadata2.clone()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_merge() {
|
||||
use pretty_assertions::assert_eq;
|
||||
let get_meta_data = |name: &str, typ: Type| FieldMetadata {
|
||||
field_name: name.to_string(),
|
||||
typ,
|
||||
indexed: false,
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
stored: false,
|
||||
fast: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
};
|
||||
let schema = SchemaBuilder::new().build();
|
||||
let mut metas = vec![get_meta_data("d", Type::Str), get_meta_data("e", Type::U64)];
|
||||
metas.sort();
|
||||
let res = merge_field_meta_data(vec![vec![get_meta_data("e", Type::Str)], metas], &schema);
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![
|
||||
let metas = vec![get_meta_data("d", Type::Str), get_meta_data("e", Type::U64)];
|
||||
assert_merge(
|
||||
&[vec![get_meta_data("e", Type::Str)], metas],
|
||||
&[
|
||||
get_meta_data("d", Type::Str),
|
||||
get_meta_data("e", Type::Str),
|
||||
get_meta_data("e", Type::U64),
|
||||
]
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_field_meta_data_bitxor() {
|
||||
let field_metadata1 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: false,
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
stored: false,
|
||||
fast: true,
|
||||
fast_size: Some(10u64.into()),
|
||||
};
|
||||
let field_metadata2 = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
term_dictionary_size: Some(10u64.into()),
|
||||
postings_size: Some(11u64.into()),
|
||||
positions_size: Some(12u64.into()),
|
||||
stored: false,
|
||||
fast: false,
|
||||
fast_size: None,
|
||||
};
|
||||
let field_metadata_expected = FieldMetadata {
|
||||
field_name: "a".to_string(),
|
||||
typ: crate::schema::Type::Str,
|
||||
indexed: true,
|
||||
term_dictionary_size: Some(10u64.into()),
|
||||
postings_size: Some(11u64.into()),
|
||||
positions_size: Some(12u64.into()),
|
||||
stored: false,
|
||||
fast: true,
|
||||
fast_size: Some(10u64.into()),
|
||||
};
|
||||
let mut res1 = field_metadata1.clone();
|
||||
res1 |= field_metadata2.clone();
|
||||
res1.merge(field_metadata2.clone());
|
||||
let mut res2 = field_metadata2.clone();
|
||||
res2 |= field_metadata1;
|
||||
res2.merge(field_metadata1);
|
||||
assert_eq!(res1, field_metadata_expected);
|
||||
assert_eq!(res2, field_metadata_expected);
|
||||
}
|
||||
@@ -662,6 +778,7 @@ mod test {
|
||||
assert_eq!(4, searcher.segment_reader(0).max_doc());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_alive_docs_iterator() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
@@ -370,7 +370,7 @@ impl<D: Document> IndexWriter<D> {
|
||||
.map_err(|_| error_in_index_worker_thread("Failed to join merging thread."));
|
||||
|
||||
if let Err(ref e) = result {
|
||||
error!("Some merging thread failed {:?}", e);
|
||||
error!("Some merging thread failed {e:?}");
|
||||
}
|
||||
|
||||
result
|
||||
@@ -615,7 +615,7 @@ impl<D: Document> IndexWriter<D> {
|
||||
/// It is also possible to add a payload to the `commit`
|
||||
/// using this API.
|
||||
/// See [`PreparedCommit::set_payload()`].
|
||||
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit<D>> {
|
||||
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit<'_, D>> {
|
||||
// Here, because we join all of the worker threads,
|
||||
// all of the segment update for this commit have been
|
||||
// sent.
|
||||
@@ -644,7 +644,7 @@ impl<D: Document> IndexWriter<D> {
|
||||
|
||||
let commit_opstamp = self.stamper.stamp();
|
||||
let prepared_commit = PreparedCommit::new(self, commit_opstamp);
|
||||
info!("Prepared commit {}", commit_opstamp);
|
||||
info!("Prepared commit {commit_opstamp}");
|
||||
Ok(prepared_commit)
|
||||
}
|
||||
|
||||
|
||||
@@ -61,6 +61,8 @@ type AddBatchReceiver<D> = channel::Receiver<AddBatch<D>>;
|
||||
#[cfg(test)]
|
||||
mod tests_mmap {
|
||||
|
||||
use common::ByteCount;
|
||||
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::AggregationCollector;
|
||||
@@ -280,11 +282,14 @@ mod tests_mmap {
|
||||
field_name_out
|
||||
};
|
||||
|
||||
let mut fields = reader.searcher().segment_readers()[0]
|
||||
let mut fields: Vec<(String, Type)> = reader.searcher().segment_readers()[0]
|
||||
.inverted_index(field)
|
||||
.unwrap()
|
||||
.list_encoded_fields()
|
||||
.unwrap();
|
||||
.list_encoded_json_fields()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|field_space| (field_space.field_name, field_space.field_type))
|
||||
.collect();
|
||||
assert_eq!(fields.len(), 8);
|
||||
fields.sort();
|
||||
let mut expected_fields = vec![
|
||||
@@ -385,7 +390,12 @@ mod tests_mmap {
|
||||
let reader = &searcher.segment_readers()[0];
|
||||
let inverted_index = reader.inverted_index(json_field).unwrap();
|
||||
assert_eq!(
|
||||
inverted_index.list_encoded_fields().unwrap(),
|
||||
inverted_index
|
||||
.list_encoded_json_fields()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|field_space| (field_space.field_name, field_space.field_type))
|
||||
.collect::<Vec<_>>(),
|
||||
[
|
||||
("k8s.container.name".to_string(), Type::Str),
|
||||
("sub\u{1}a".to_string(), Type::I64),
|
||||
@@ -402,19 +412,41 @@ mod tests_mmap {
|
||||
fn test_json_fields_metadata_expanded_dots_one_segment() {
|
||||
test_json_fields_metadata(true, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_fields_metadata_expanded_dots_multi_segment() {
|
||||
test_json_fields_metadata(true, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_fields_metadata_no_expanded_dots_one_segment() {
|
||||
test_json_fields_metadata(false, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_fields_metadata_no_expanded_dots_multi_segment() {
|
||||
test_json_fields_metadata(false, false);
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn assert_size_eq(lhs: Option<ByteCount>, rhs: Option<ByteCount>) {
|
||||
let ignore_actual_values = |size_opt: Option<ByteCount>| size_opt.map(|val| val > 0);
|
||||
assert_eq!(ignore_actual_values(lhs), ignore_actual_values(rhs));
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn assert_field_metadata_eq_but_ignore_field_size(
|
||||
expected: &FieldMetadata,
|
||||
actual: &FieldMetadata,
|
||||
) {
|
||||
assert_eq!(&expected.field_name, &actual.field_name);
|
||||
assert_eq!(&expected.typ, &actual.typ);
|
||||
assert_eq!(&expected.stored, &actual.stored);
|
||||
assert_size_eq(expected.postings_size, actual.postings_size);
|
||||
assert_size_eq(expected.positions_size, actual.positions_size);
|
||||
assert_size_eq(expected.fast_size, actual.fast_size);
|
||||
}
|
||||
|
||||
fn test_json_fields_metadata(expanded_dots: bool, one_segment: bool) {
|
||||
use pretty_assertions::assert_eq;
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -453,81 +485,101 @@ mod tests_mmap {
|
||||
assert_eq!(searcher.num_docs(), 3);
|
||||
|
||||
let fields_metadata = index.fields_metadata().unwrap();
|
||||
assert_eq!(
|
||||
fields_metadata,
|
||||
[
|
||||
FieldMetadata {
|
||||
field_name: "empty".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::U64
|
||||
|
||||
let expected_fields = &[
|
||||
FieldMetadata {
|
||||
field_name: "empty".to_string(),
|
||||
stored: true,
|
||||
typ: Type::U64,
|
||||
term_dictionary_size: Some(0u64.into()),
|
||||
fast_size: Some(1u64.into()),
|
||||
postings_size: Some(0u64.into()),
|
||||
positions_size: Some(0u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: if expanded_dots {
|
||||
"json.shadow.k8s.container.name".to_string()
|
||||
} else {
|
||||
"json.shadow.k8s\\.container\\.name".to_string()
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: if expanded_dots {
|
||||
"json.shadow.k8s.container.name".to_string()
|
||||
} else {
|
||||
"json.shadow.k8s\\.container\\.name".to_string()
|
||||
},
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::Str
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.sub.a".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::I64
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.sub.b".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::I64
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.a".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::I64
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.a".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::Str
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.b".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::I64
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.val".to_string(),
|
||||
indexed: true,
|
||||
stored: true,
|
||||
fast: true,
|
||||
typ: Type::Str
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "numbers".to_string(),
|
||||
indexed: false,
|
||||
stored: false,
|
||||
fast: true,
|
||||
typ: Type::U64
|
||||
}
|
||||
]
|
||||
);
|
||||
stored: true,
|
||||
typ: Type::Str,
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
fast_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.sub.a".to_string(),
|
||||
typ: Type::I64,
|
||||
stored: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.sub.b".to_string(),
|
||||
typ: Type::I64,
|
||||
stored: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.a".to_string(),
|
||||
stored: true,
|
||||
typ: Type::I64,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.a".to_string(),
|
||||
typ: Type::Str,
|
||||
stored: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.suber.b".to_string(),
|
||||
typ: Type::I64,
|
||||
stored: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "json.shadow.val".to_string(),
|
||||
typ: Type::Str,
|
||||
stored: true,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: Some(1u64.into()),
|
||||
postings_size: Some(1u64.into()),
|
||||
positions_size: Some(1u64.into()),
|
||||
},
|
||||
FieldMetadata {
|
||||
field_name: "numbers".to_string(),
|
||||
stored: false,
|
||||
typ: Type::U64,
|
||||
fast_size: Some(1u64.into()),
|
||||
term_dictionary_size: None,
|
||||
postings_size: None,
|
||||
positions_size: None,
|
||||
},
|
||||
];
|
||||
assert_eq!(fields_metadata.len(), expected_fields.len());
|
||||
for (expected, value) in expected_fields.iter().zip(fields_metadata.iter()) {
|
||||
assert_field_metadata_eq_but_ignore_field_size(expected, value);
|
||||
}
|
||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||
// Test if returned field name can be queried
|
||||
for indexed_field in fields_metadata.iter().filter(|meta| meta.indexed) {
|
||||
for indexed_field in fields_metadata.iter().filter(|meta| meta.is_indexed()) {
|
||||
let val = if indexed_field.typ == Type::Str {
|
||||
"a"
|
||||
} else {
|
||||
@@ -543,7 +595,10 @@ mod tests_mmap {
|
||||
}
|
||||
}
|
||||
// Test if returned field name can be used for aggregation
|
||||
for fast_field in fields_metadata.iter().filter(|meta| meta.fast) {
|
||||
for fast_field in fields_metadata
|
||||
.iter()
|
||||
.filter(|field_metadata| field_metadata.is_fast())
|
||||
{
|
||||
let agg_req_str = json!(
|
||||
{
|
||||
"termagg": {
|
||||
|
||||
@@ -501,8 +501,7 @@ impl SegmentUpdater {
|
||||
Ok(segment_entries) => segment_entries,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"Starting the merge failed for the following reason. This is not fatal. {}",
|
||||
err
|
||||
"Starting the merge failed for the following reason. This is not fatal. {err}"
|
||||
);
|
||||
return err.into();
|
||||
}
|
||||
|
||||
@@ -873,8 +873,8 @@ mod tests {
|
||||
|
||||
fn assert_type(reader: &SegmentReader, field: &str, typ: ColumnType) {
|
||||
let cols = reader.fast_fields().dynamic_column_handles(field).unwrap();
|
||||
assert_eq!(cols.len(), 1, "{}", field);
|
||||
assert_eq!(cols[0].column_type(), typ, "{}", field);
|
||||
assert_eq!(cols.len(), 1, "{field}");
|
||||
assert_eq!(cols[0].column_type(), typ, "{field}");
|
||||
}
|
||||
assert_type(segment_reader, "json.toto", ColumnType::Str);
|
||||
assert_type(segment_reader, "json.float", ColumnType::F64);
|
||||
|
||||
53
src/lib.rs
53
src/lib.rs
@@ -55,7 +55,7 @@
|
||||
//! // between indexing threads.
|
||||
//! let mut index_writer: IndexWriter = index.writer(100_000_000)?;
|
||||
//!
|
||||
//! // Let's index one documents!
|
||||
//! // Let's index a document!
|
||||
//! index_writer.add_document(doc!(
|
||||
//! title => "The Old Man and the Sea",
|
||||
//! body => "He was an old man who fished alone in a skiff in \
|
||||
@@ -165,7 +165,7 @@ mod macros;
|
||||
mod future_result;
|
||||
|
||||
// Re-exports
|
||||
pub use common::DateTime;
|
||||
pub use common::{ByteCount, DateTime};
|
||||
pub use {columnar, query_grammar, time};
|
||||
|
||||
pub use crate::error::TantivyError;
|
||||
@@ -370,6 +370,8 @@ macro_rules! fail_point {
|
||||
/// Common test utilities.
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
use query_grammar::{UserInputAst, UserInputLeaf, UserInputLiteral};
|
||||
use rand::distributions::{Bernoulli, Uniform};
|
||||
@@ -382,7 +384,7 @@ pub mod tests {
|
||||
use crate::index::SegmentReader;
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::postings::Postings;
|
||||
use crate::query::BooleanQuery;
|
||||
use crate::query::{BooleanQuery, QueryParser};
|
||||
use crate::schema::*;
|
||||
use crate::{DateTime, DocAddress, Index, IndexWriter, ReloadPolicy};
|
||||
|
||||
@@ -1223,4 +1225,49 @@ pub mod tests {
|
||||
);
|
||||
assert_eq!(dt_from_ts_nanos.to_hms_micro(), offset_dt.to_hms_micro());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_number_ambiguity() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json_field = schema_builder.add_json_field("number", crate::schema::TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
{
|
||||
let mut doc = TantivyDocument::new();
|
||||
let mut obj = BTreeMap::default();
|
||||
obj.insert("key".to_string(), OwnedValue::I64(1i64));
|
||||
doc.add_object(json_field, obj);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
{
|
||||
let mut doc = TantivyDocument::new();
|
||||
let mut obj = BTreeMap::default();
|
||||
obj.insert("key".to_string(), OwnedValue::U64(1u64));
|
||||
doc.add_object(json_field, obj);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
{
|
||||
let mut doc = TantivyDocument::new();
|
||||
let mut obj = BTreeMap::default();
|
||||
obj.insert("key".to_string(), OwnedValue::F64(1.0f64));
|
||||
doc.add_object(json_field, obj);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
assert_eq!(searcher.num_docs(), 3);
|
||||
{
|
||||
let parser = QueryParser::for_index(&index, vec![]);
|
||||
let query = parser.parse_query("number.key:1").unwrap();
|
||||
let count = searcher.search(&query, &crate::collector::Count).unwrap();
|
||||
assert_eq!(count, 3);
|
||||
}
|
||||
{
|
||||
let parser = QueryParser::for_index(&index, vec![]);
|
||||
let query = parser.parse_query("number.key:1.0").unwrap();
|
||||
let count = searcher.search(&query, &crate::collector::Count).unwrap();
|
||||
assert_eq!(count, 3);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,8 +41,6 @@ const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
|
||||
use std::iter;
|
||||
|
||||
use proptest::prelude::*;
|
||||
use proptest::sample::select;
|
||||
|
||||
@@ -206,7 +204,7 @@ pub(crate) mod tests {
|
||||
#[test]
|
||||
fn test_position() -> crate::Result<()> {
|
||||
const CONST_VAL: u32 = 9u32;
|
||||
let positions_delta: Vec<u32> = iter::repeat(CONST_VAL).take(2_000_000).collect();
|
||||
let positions_delta: Vec<u32> = std::iter::repeat_n(CONST_VAL, 2_000_000).collect();
|
||||
let positions_data = create_positions_data(&positions_delta[..])?;
|
||||
assert_eq!(positions_data.len(), 1_015_627);
|
||||
let mut position_reader = PositionReader::open(positions_data)?;
|
||||
|
||||
@@ -227,19 +227,6 @@ impl BlockSegmentPostings {
|
||||
self.doc_decoder.output_array()
|
||||
}
|
||||
|
||||
/// Returns a full block, regardless of whether the block is complete or incomplete (
|
||||
/// as it happens for the last block of the posting list).
|
||||
///
|
||||
/// In the latter case, the block is guaranteed to be padded with the sentinel value:
|
||||
/// `TERMINATED`. The array is also guaranteed to be aligned on 16 bytes = 128 bits.
|
||||
///
|
||||
/// This method is useful to run SSE2 linear search.
|
||||
#[inline]
|
||||
pub(crate) fn full_block(&self) -> &[DocId; COMPRESSION_BLOCK_SIZE] {
|
||||
debug_assert!(self.block_is_loaded());
|
||||
self.doc_decoder.full_output()
|
||||
}
|
||||
|
||||
/// Return the document at index `idx` of the block.
|
||||
#[inline]
|
||||
pub fn doc(&self, idx: usize) -> u32 {
|
||||
@@ -275,22 +262,36 @@ impl BlockSegmentPostings {
|
||||
///
|
||||
/// If all docs are smaller than target, the block loaded may be empty,
|
||||
/// or be the last an incomplete VInt block.
|
||||
pub fn seek(&mut self, target_doc: DocId) {
|
||||
self.shallow_seek(target_doc);
|
||||
pub fn seek(&mut self, target_doc: DocId) -> usize {
|
||||
// Move to the block that might contain our document.
|
||||
self.seek_block(target_doc);
|
||||
self.load_block();
|
||||
|
||||
// At this point we are on the block that might contain our document.
|
||||
let doc = self.doc_decoder.seek_within_block(target_doc);
|
||||
|
||||
// The last block is not full and padded with TERMINATED,
|
||||
// so we are guaranteed to have at least one value (real or padding)
|
||||
// that is >= target_doc.
|
||||
debug_assert!(doc < COMPRESSION_BLOCK_SIZE);
|
||||
|
||||
// `doc` is now the first element >= `target_doc`.
|
||||
// If all docs are smaller than target, the current block is incomplete and padded
|
||||
// with TERMINATED. After the search, the cursor points to the first TERMINATED.
|
||||
doc
|
||||
}
|
||||
|
||||
pub(crate) fn position_offset(&self) -> u64 {
|
||||
self.skip_reader.position_offset()
|
||||
}
|
||||
|
||||
/// Dangerous API! This calls seek on the skip list,
|
||||
/// Dangerous API! This calls seeks the next block on the skip list,
|
||||
/// but does not `.load_block()` afterwards.
|
||||
///
|
||||
/// `.load_block()` needs to be called manually afterwards.
|
||||
/// If all docs are smaller than target, the block loaded may be empty,
|
||||
/// or be the last an incomplete VInt block.
|
||||
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
|
||||
pub(crate) fn seek_block(&mut self, target_doc: DocId) {
|
||||
if self.skip_reader.seek(target_doc) {
|
||||
self.block_max_score_cache = None;
|
||||
self.block_loaded = false;
|
||||
|
||||
@@ -151,9 +151,11 @@ impl BlockDecoder {
|
||||
&self.output[..self.output_len]
|
||||
}
|
||||
|
||||
/// Return in-block index of first value >= `target`.
|
||||
/// Uses the padded buffer to enable branchless search.
|
||||
#[inline]
|
||||
pub(crate) fn full_output(&self) -> &[u32; COMPRESSION_BLOCK_SIZE] {
|
||||
&self.output
|
||||
pub(crate) fn seek_within_block(&self, target: u32) -> usize {
|
||||
crate::postings::branchless_binary_search(&self.output, target)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
||||
@@ -667,12 +667,15 @@ mod bench {
|
||||
.read_postings(&TERM_D, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let mut intersection = Intersection::new(vec![
|
||||
segment_postings_a,
|
||||
segment_postings_b,
|
||||
segment_postings_c,
|
||||
segment_postings_d,
|
||||
]);
|
||||
let mut intersection = Intersection::new(
|
||||
vec![
|
||||
segment_postings_a,
|
||||
segment_postings_b,
|
||||
segment_postings_c,
|
||||
segment_postings_d,
|
||||
],
|
||||
reader.searcher().num_docs() as u32,
|
||||
);
|
||||
while intersection.advance() != TERMINATED {}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ use crate::docset::DocSet;
|
||||
use crate::fastfield::AliveBitSet;
|
||||
use crate::positions::PositionReader;
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::postings::{branchless_binary_search, BlockSegmentPostings, Postings};
|
||||
use crate::postings::{BlockSegmentPostings, Postings};
|
||||
use crate::{DocId, TERMINATED};
|
||||
|
||||
/// `SegmentPostings` represents the inverted list or postings associated with
|
||||
@@ -175,26 +175,11 @@ impl DocSet for SegmentPostings {
|
||||
return self.doc();
|
||||
}
|
||||
|
||||
self.block_cursor.seek(target);
|
||||
|
||||
// At this point we are on the block, that might contain our document.
|
||||
let output = self.block_cursor.full_block();
|
||||
self.cur = branchless_binary_search(output, target);
|
||||
|
||||
// The last block is not full and padded with the value TERMINATED,
|
||||
// so that we are guaranteed to have at least doc in the block (a real one or the padding)
|
||||
// that is greater or equal to the target.
|
||||
debug_assert!(self.cur < COMPRESSION_BLOCK_SIZE);
|
||||
|
||||
// `doc` is now the first element >= `target`
|
||||
|
||||
// If all docs are smaller than target the current block should be incomplemented and padded
|
||||
// with the value `TERMINATED`.
|
||||
//
|
||||
// After the search, the cursor should point to the first value of TERMINATED.
|
||||
let doc = output[self.cur];
|
||||
// Delegate block-local search to BlockSegmentPostings::seek, which returns
|
||||
// the in-block index of the first doc >= target.
|
||||
self.cur = self.block_cursor.seek(target);
|
||||
let doc = self.doc();
|
||||
debug_assert!(doc >= target);
|
||||
debug_assert_eq!(doc, self.doc());
|
||||
doc
|
||||
}
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ impl InvertedIndexSerializer {
|
||||
field: Field,
|
||||
total_num_tokens: u64,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
) -> io::Result<FieldSerializer> {
|
||||
) -> io::Result<FieldSerializer<'_>> {
|
||||
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
|
||||
let term_dictionary_write = self.terms_write.for_field(field);
|
||||
let postings_write = self.postings_write.for_field(field);
|
||||
@@ -126,7 +126,7 @@ impl<'a> FieldSerializer<'a> {
|
||||
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
|
||||
let average_fieldnorm = fieldnorm_reader
|
||||
.as_ref()
|
||||
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
|
||||
.map(|ff_reader| total_num_tokens as Score / ff_reader.num_docs() as Score)
|
||||
.unwrap_or(0.0);
|
||||
let postings_serializer = PostingsSerializer::new(
|
||||
postings_write,
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::query::Explanation;
|
||||
use crate::schema::Field;
|
||||
@@ -68,12 +66,6 @@ fn compute_tf_cache(average_fieldnorm: Score) -> [Score; 256] {
|
||||
cache
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
|
||||
pub struct Bm25Params {
|
||||
pub idf: Score,
|
||||
pub avg_fieldnorm: Score,
|
||||
}
|
||||
|
||||
/// A struct used for computing BM25 scores.
|
||||
#[derive(Clone)]
|
||||
pub struct Bm25Weight {
|
||||
|
||||
@@ -167,7 +167,7 @@ pub fn block_wand(
|
||||
let block_max_score_upperbound: Score = scorers[..pivot_len]
|
||||
.iter_mut()
|
||||
.map(|scorer| {
|
||||
scorer.shallow_seek(pivot_doc);
|
||||
scorer.seek_block(pivot_doc);
|
||||
scorer.block_max_score()
|
||||
})
|
||||
.sum();
|
||||
@@ -234,7 +234,7 @@ pub fn block_wand_single_scorer(
|
||||
return;
|
||||
}
|
||||
doc = last_doc_in_block + 1;
|
||||
scorer.shallow_seek(doc);
|
||||
scorer.seek_block(doc);
|
||||
}
|
||||
// Seek will effectively load that block.
|
||||
doc = scorer.seek(doc);
|
||||
@@ -256,7 +256,7 @@ pub fn block_wand_single_scorer(
|
||||
}
|
||||
}
|
||||
doc += 1;
|
||||
scorer.shallow_seek(doc);
|
||||
scorer.seek_block(doc);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,7 +302,6 @@ fn is_sorted<I: Iterator<Item = DocId>>(mut it: I) -> bool {
|
||||
mod tests {
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::iter;
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
@@ -368,10 +367,14 @@ mod tests {
|
||||
checkpoints
|
||||
}
|
||||
|
||||
fn compute_checkpoints_manual(term_scorers: Vec<TermScorer>, n: usize) -> Vec<(DocId, Score)> {
|
||||
fn compute_checkpoints_manual(
|
||||
term_scorers: Vec<TermScorer>,
|
||||
n: usize,
|
||||
max_doc: u32,
|
||||
) -> Vec<(DocId, Score)> {
|
||||
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
|
||||
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
|
||||
let mut scorer = BufferedUnionScorer::build(term_scorers, SumCombiner::default);
|
||||
let mut scorer = BufferedUnionScorer::build(term_scorers, SumCombiner::default, max_doc);
|
||||
|
||||
let mut limit = Score::MIN;
|
||||
loop {
|
||||
@@ -436,7 +439,7 @@ mod tests {
|
||||
let fieldnorms_expanded = fieldnorms
|
||||
.iter()
|
||||
.cloned()
|
||||
.flat_map(|fieldnorm| iter::repeat(fieldnorm).take(REPEAT))
|
||||
.flat_map(|fieldnorm| std::iter::repeat_n(fieldnorm, REPEAT))
|
||||
.collect::<Vec<u32>>();
|
||||
|
||||
let postings_lists_expanded: Vec<Vec<(DocId, u32)>> = posting_lists
|
||||
@@ -479,7 +482,8 @@ mod tests {
|
||||
for top_k in 1..4 {
|
||||
let checkpoints_for_each_pruning =
|
||||
compute_checkpoints_for_each_pruning(term_scorers.clone(), top_k);
|
||||
let checkpoints_manual = compute_checkpoints_manual(term_scorers.clone(), top_k);
|
||||
let checkpoints_manual =
|
||||
compute_checkpoints_manual(term_scorers.clone(), top_k, 100_000);
|
||||
assert_eq!(checkpoints_for_each_pruning.len(), checkpoints_manual.len());
|
||||
for (&(left_doc, left_score), &(right_doc, right_score)) in checkpoints_for_each_pruning
|
||||
.iter()
|
||||
|
||||
@@ -180,7 +180,7 @@ impl BooleanQuery {
|
||||
pub fn new(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
|
||||
// If the bool query includes at least one should clause
|
||||
// and no Must or MustNot clauses, the default value is 1. Otherwise, the default value is
|
||||
// 0. Keep pace with Elasticsearch.
|
||||
// 0. Keep compatible with Elasticsearch.
|
||||
let mut minimum_required = 0;
|
||||
for (occur, _) in &subqueries {
|
||||
match occur {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user