mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-09 10:32:55 +00:00
Compare commits
49 Commits
columnar-c
...
remove-byt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72925c2bba | ||
|
|
ed5a3b3172 | ||
|
|
ca20bfa776 | ||
|
|
faa706d804 | ||
|
|
850a0d7ae2 | ||
|
|
7fae4d98d7 | ||
|
|
bc36458334 | ||
|
|
8a71e00da3 | ||
|
|
e510f699c8 | ||
|
|
d25fc155b2 | ||
|
|
8ea97e7d6b | ||
|
|
0a726a0897 | ||
|
|
66ff53b0f4 | ||
|
|
d002698008 | ||
|
|
c838aa808b | ||
|
|
06850719dc | ||
|
|
5f23bb7e65 | ||
|
|
533ad99cd5 | ||
|
|
c7278b3258 | ||
|
|
6b403e3281 | ||
|
|
789cc8703e | ||
|
|
e5098d9fe8 | ||
|
|
f537334e4f | ||
|
|
e2aa5af075 | ||
|
|
02bebf4ff5 | ||
|
|
0274c982d5 | ||
|
|
74bf60b4f7 | ||
|
|
bf1449b22d | ||
|
|
111f25a8f7 | ||
|
|
019db10e8e | ||
|
|
7423f99719 | ||
|
|
f2f38c43ce | ||
|
|
71f43ace1d | ||
|
|
347614c841 | ||
|
|
097fd6138d | ||
|
|
01e5a22759 | ||
|
|
b60b7d2afe | ||
|
|
dfe4e95fde | ||
|
|
60cc2644d6 | ||
|
|
10bccac61b | ||
|
|
1cfb9ce59a | ||
|
|
539ff08a79 | ||
|
|
dab93df94e | ||
|
|
3120147a76 | ||
|
|
cbcafae04c | ||
|
|
36c6138e7f | ||
|
|
7a9befd18d | ||
|
|
62c811df2b | ||
|
|
03345f0aa2 |
6
.github/workflows/coverage.yml
vendored
6
.github/workflows/coverage.yml
vendored
@@ -2,9 +2,9 @@ name: Coverage
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ main ]
|
branches: [main]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ main ]
|
branches: [main]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
coverage:
|
coverage:
|
||||||
@@ -16,7 +16,7 @@ jobs:
|
|||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
- name: Generate code coverage
|
- name: Generate code coverage
|
||||||
run: cargo +nightly llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
run: cargo +nightly llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
|
||||||
- name: Upload coverage to Codecov
|
- name: Upload coverage to Codecov
|
||||||
uses: codecov/codecov-action@v3
|
uses: codecov/codecov-action@v3
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
|
|||||||
@@ -16,14 +16,13 @@ rust-version = "1.62"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
oneshot = "0.1.5"
|
oneshot = "0.1.5"
|
||||||
base64 = "0.21.0"
|
base64 = "0.21.0"
|
||||||
byteorder = "1.4.3"
|
|
||||||
crc32fast = "1.3.2"
|
crc32fast = "1.3.2"
|
||||||
once_cell = "1.10.0"
|
once_cell = "1.10.0"
|
||||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||||
aho-corasick = "0.7"
|
aho-corasick = "0.7"
|
||||||
tantivy-fst = "0.4.0"
|
tantivy-fst = "0.4.0"
|
||||||
memmap2 = { version = "0.5.3", optional = true }
|
memmap2 = { version = "0.5.3", optional = true }
|
||||||
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
lz4_flex = { version = "0.10", default-features = false, features = ["checked-decode"], optional = true }
|
||||||
brotli = { version = "3.3.4", optional = true }
|
brotli = { version = "3.3.4", optional = true }
|
||||||
zstd = { version = "0.12", optional = true, default-features = false }
|
zstd = { version = "0.12", optional = true, default-features = false }
|
||||||
snap = { version = "1.0.5", optional = true }
|
snap = { version = "1.0.5", optional = true }
|
||||||
@@ -44,7 +43,7 @@ rustc-hash = "1.1.0"
|
|||||||
thiserror = "1.0.30"
|
thiserror = "1.0.30"
|
||||||
htmlescape = "0.3.1"
|
htmlescape = "0.3.1"
|
||||||
fail = "0.5.0"
|
fail = "0.5.0"
|
||||||
murmurhash32 = "0.2.0"
|
murmurhash32 = "0.3.0"
|
||||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||||
smallvec = "1.8.0"
|
smallvec = "1.8.0"
|
||||||
rayon = "1.5.2"
|
rayon = "1.5.2"
|
||||||
@@ -58,7 +57,7 @@ arc-swap = "1.5.0"
|
|||||||
columnar = { version="0.1", path="./columnar", package ="tantivy-columnar" }
|
columnar = { version="0.1", path="./columnar", package ="tantivy-columnar" }
|
||||||
sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true }
|
sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true }
|
||||||
stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" }
|
stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" }
|
||||||
tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" }
|
query-grammar = { version= "0.19.0", path="./query-grammar", package = "tantivy-query-grammar" }
|
||||||
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
|
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
|
||||||
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
|
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
|
||||||
tokenizer-api = { version="0.1", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
tokenizer-api = { version="0.1", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
||||||
@@ -77,6 +76,7 @@ test-log = "0.2.10"
|
|||||||
env_logger = "0.10.0"
|
env_logger = "0.10.0"
|
||||||
pprof = { version = "0.11.0", features = ["flamegraph", "criterion"] }
|
pprof = { version = "0.11.0", features = ["flamegraph", "criterion"] }
|
||||||
futures = "0.3.21"
|
futures = "0.3.21"
|
||||||
|
paste = "1.0.11"
|
||||||
|
|
||||||
[dev-dependencies.fail]
|
[dev-dependencies.fail]
|
||||||
version = "0.5.0"
|
version = "0.5.0"
|
||||||
|
|||||||
@@ -1,23 +0,0 @@
|
|||||||
# This script takes care of packaging the build artifacts that will go in the
|
|
||||||
# release zipfile
|
|
||||||
|
|
||||||
$SRC_DIR = $PWD.Path
|
|
||||||
$STAGE = [System.Guid]::NewGuid().ToString()
|
|
||||||
|
|
||||||
Set-Location $ENV:Temp
|
|
||||||
New-Item -Type Directory -Name $STAGE
|
|
||||||
Set-Location $STAGE
|
|
||||||
|
|
||||||
$ZIP = "$SRC_DIR\$($Env:CRATE_NAME)-$($Env:APPVEYOR_REPO_TAG_NAME)-$($Env:TARGET).zip"
|
|
||||||
|
|
||||||
# TODO Update this to package the right artifacts
|
|
||||||
Copy-Item "$SRC_DIR\target\$($Env:TARGET)\release\hello.exe" '.\'
|
|
||||||
|
|
||||||
7z a "$ZIP" *
|
|
||||||
|
|
||||||
Push-AppveyorArtifact "$ZIP"
|
|
||||||
|
|
||||||
Remove-Item *.* -Force
|
|
||||||
Set-Location ..
|
|
||||||
Remove-Item $STAGE
|
|
||||||
Set-Location $SRC_DIR
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
# This script takes care of building your crate and packaging it for release
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
main() {
|
|
||||||
local src=$(pwd) \
|
|
||||||
stage=
|
|
||||||
|
|
||||||
case $TRAVIS_OS_NAME in
|
|
||||||
linux)
|
|
||||||
stage=$(mktemp -d)
|
|
||||||
;;
|
|
||||||
osx)
|
|
||||||
stage=$(mktemp -d -t tmp)
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
test -f Cargo.lock || cargo generate-lockfile
|
|
||||||
|
|
||||||
# TODO Update this to build the artifacts that matter to you
|
|
||||||
cross rustc --bin hello --target $TARGET --release -- -C lto
|
|
||||||
|
|
||||||
# TODO Update this to package the right artifacts
|
|
||||||
cp target/$TARGET/release/hello $stage/
|
|
||||||
|
|
||||||
cd $stage
|
|
||||||
tar czf $src/$CRATE_NAME-$TRAVIS_TAG-$TARGET.tar.gz *
|
|
||||||
cd $src
|
|
||||||
|
|
||||||
rm -rf $stage
|
|
||||||
}
|
|
||||||
|
|
||||||
main
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
set -ex
|
|
||||||
|
|
||||||
main() {
|
|
||||||
local target=
|
|
||||||
if [ $TRAVIS_OS_NAME = linux ]; then
|
|
||||||
target=x86_64-unknown-linux-musl
|
|
||||||
sort=sort
|
|
||||||
else
|
|
||||||
target=x86_64-apple-darwin
|
|
||||||
sort=gsort # for `sort --sort-version`, from brew's coreutils.
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Builds for iOS are done on OSX, but require the specific target to be
|
|
||||||
# installed.
|
|
||||||
case $TARGET in
|
|
||||||
aarch64-apple-ios)
|
|
||||||
rustup target install aarch64-apple-ios
|
|
||||||
;;
|
|
||||||
armv7-apple-ios)
|
|
||||||
rustup target install armv7-apple-ios
|
|
||||||
;;
|
|
||||||
armv7s-apple-ios)
|
|
||||||
rustup target install armv7s-apple-ios
|
|
||||||
;;
|
|
||||||
i386-apple-ios)
|
|
||||||
rustup target install i386-apple-ios
|
|
||||||
;;
|
|
||||||
x86_64-apple-ios)
|
|
||||||
rustup target install x86_64-apple-ios
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# This fetches latest stable release
|
|
||||||
local tag=$(git ls-remote --tags --refs --exit-code https://github.com/japaric/cross \
|
|
||||||
| cut -d/ -f3 \
|
|
||||||
| grep -E '^v[0.1.0-9.]+$' \
|
|
||||||
| $sort --version-sort \
|
|
||||||
| tail -n1)
|
|
||||||
curl -LSfs https://japaric.github.io/trust/install.sh | \
|
|
||||||
sh -s -- \
|
|
||||||
--force \
|
|
||||||
--git japaric/cross \
|
|
||||||
--tag $tag \
|
|
||||||
--target $target
|
|
||||||
}
|
|
||||||
|
|
||||||
main
|
|
||||||
30
ci/script.sh
30
ci/script.sh
@@ -1,30 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# This script takes care of testing your crate
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
main() {
|
|
||||||
if [ ! -z $CODECOV ]; then
|
|
||||||
echo "Codecov"
|
|
||||||
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov
|
|
||||||
else
|
|
||||||
echo "Build"
|
|
||||||
cross build --target $TARGET
|
|
||||||
if [ ! -z $DISABLE_TESTS ]; then
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
echo "Test"
|
|
||||||
cross test --target $TARGET --no-default-features --features mmap
|
|
||||||
cross test --target $TARGET --no-default-features --features mmap query-grammar
|
|
||||||
fi
|
|
||||||
for example in $(ls examples/*.rs)
|
|
||||||
do
|
|
||||||
cargo run --example $(basename $example .rs)
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# we don't run the "test phase" when doing deploys
|
|
||||||
if [ -z $TRAVIS_TAG ]; then
|
|
||||||
main
|
|
||||||
fi
|
|
||||||
@@ -17,6 +17,7 @@ stacker = { path = "../stacker", package="tantivy-stacker"}
|
|||||||
sstable = { path = "../sstable", package = "tantivy-sstable" }
|
sstable = { path = "../sstable", package = "tantivy-sstable" }
|
||||||
common = { path = "../common", package = "tantivy-common" }
|
common = { path = "../common", package = "tantivy-common" }
|
||||||
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
|
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
|
||||||
|
serde = "1.0.152"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
proptest = "1"
|
proptest = "1"
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
|||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
column.get_docids_for_value_range(
|
column.get_row_ids_for_value_range(
|
||||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||||
0..data.len() as u32,
|
0..data.len() as u32,
|
||||||
&mut positions,
|
&mut positions,
|
||||||
@@ -74,7 +74,7 @@ fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
|||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
column.get_docids_for_value_range(
|
column.get_row_ids_for_value_range(
|
||||||
*SINGLE_ITEM_RANGE.start() as u128..=*SINGLE_ITEM_RANGE.end() as u128,
|
*SINGLE_ITEM_RANGE.start() as u128..=*SINGLE_ITEM_RANGE.end() as u128,
|
||||||
0..data.len() as u32,
|
0..data.len() as u32,
|
||||||
&mut positions,
|
&mut positions,
|
||||||
@@ -90,7 +90,7 @@ fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
|||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
column.get_docids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
column.get_row_ids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||||
positions
|
positions
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ fn bench_intfastfield_getrange_u64_50percent_hit(b: &mut Bencher) {
|
|||||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
column.get_docids_for_value_range(
|
column.get_row_ids_for_value_range(
|
||||||
FIFTY_PERCENT_RANGE,
|
FIFTY_PERCENT_RANGE,
|
||||||
0..data.len() as u32,
|
0..data.len() as u32,
|
||||||
&mut positions,
|
&mut positions,
|
||||||
@@ -106,7 +106,7 @@ fn bench_intfastfield_getrange_u64_1percent_hit(b: &mut Bencher) {
|
|||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
column.get_docids_for_value_range(
|
column.get_row_ids_for_value_range(
|
||||||
ONE_PERCENT_ITEM_RANGE,
|
ONE_PERCENT_ITEM_RANGE,
|
||||||
0..data.len() as u32,
|
0..data.len() as u32,
|
||||||
&mut positions,
|
&mut positions,
|
||||||
@@ -123,7 +123,7 @@ fn bench_intfastfield_getrange_u64_single_hit(b: &mut Bencher) {
|
|||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
column.get_docids_for_value_range(SINGLE_ITEM_RANGE, 0..data.len() as u32, &mut positions);
|
column.get_row_ids_for_value_range(SINGLE_ITEM_RANGE, 0..data.len() as u32, &mut positions);
|
||||||
positions
|
positions
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -136,7 +136,7 @@ fn bench_intfastfield_getrange_u64_hit_all(b: &mut Bencher) {
|
|||||||
|
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
column.get_docids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
column.get_row_ids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
||||||
positions
|
positions
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
17
columnar/columnar-cli/Cargo.toml
Normal file
17
columnar/columnar-cli/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
[package]
|
||||||
|
name = "tantivy-columnar-cli"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
license = "MIT"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
columnar = {path="../", package="tantivy-columnar"}
|
||||||
|
serde_json = "1"
|
||||||
|
serde_json_borrow = {git="https://github.com/PSeitz/serde_json_borrow/"}
|
||||||
|
serde = "1"
|
||||||
|
|
||||||
|
[workspace]
|
||||||
|
members = []
|
||||||
|
|
||||||
|
[profile.release]
|
||||||
|
debug = true
|
||||||
134
columnar/columnar-cli/src/main.rs
Normal file
134
columnar/columnar-cli/src/main.rs
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
use columnar::ColumnarWriter;
|
||||||
|
use columnar::NumericalValue;
|
||||||
|
use serde_json_borrow;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io;
|
||||||
|
use std::io::BufRead;
|
||||||
|
use std::io::BufReader;
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct JsonStack {
|
||||||
|
path: String,
|
||||||
|
stack: Vec<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl JsonStack {
|
||||||
|
fn push(&mut self, seg: &str) {
|
||||||
|
let len = self.path.len();
|
||||||
|
self.stack.push(len);
|
||||||
|
self.path.push('.');
|
||||||
|
self.path.push_str(seg);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pop(&mut self) {
|
||||||
|
if let Some(len) = self.stack.pop() {
|
||||||
|
self.path.truncate(len);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn path(&self) -> &str {
|
||||||
|
&self.path[1..]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn append_json_to_columnar(
|
||||||
|
doc: u32,
|
||||||
|
json_value: &serde_json_borrow::Value,
|
||||||
|
columnar: &mut ColumnarWriter,
|
||||||
|
stack: &mut JsonStack,
|
||||||
|
) -> usize {
|
||||||
|
let mut count = 0;
|
||||||
|
match json_value {
|
||||||
|
serde_json_borrow::Value::Null => {}
|
||||||
|
serde_json_borrow::Value::Bool(val) => {
|
||||||
|
columnar.record_numerical(
|
||||||
|
doc,
|
||||||
|
stack.path(),
|
||||||
|
NumericalValue::from(if *val { 1u64 } else { 0u64 }),
|
||||||
|
);
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
serde_json_borrow::Value::Number(num) => {
|
||||||
|
let numerical_value: NumericalValue = if let Some(num_i64) = num.as_i64() {
|
||||||
|
num_i64.into()
|
||||||
|
} else if let Some(num_u64) = num.as_u64() {
|
||||||
|
num_u64.into()
|
||||||
|
} else if let Some(num_f64) = num.as_f64() {
|
||||||
|
num_f64.into()
|
||||||
|
} else {
|
||||||
|
panic!();
|
||||||
|
};
|
||||||
|
count += 1;
|
||||||
|
columnar.record_numerical(
|
||||||
|
doc,
|
||||||
|
stack.path(),
|
||||||
|
numerical_value,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
serde_json_borrow::Value::Str(msg) => {
|
||||||
|
columnar.record_str(
|
||||||
|
doc,
|
||||||
|
stack.path(),
|
||||||
|
msg,
|
||||||
|
);
|
||||||
|
count += 1;
|
||||||
|
},
|
||||||
|
serde_json_borrow::Value::Array(vals) => {
|
||||||
|
for val in vals {
|
||||||
|
count += append_json_to_columnar(doc, val, columnar, stack);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
serde_json_borrow::Value::Object(json_map) => {
|
||||||
|
for (child_key, child_val) in json_map {
|
||||||
|
stack.push(child_key);
|
||||||
|
count += append_json_to_columnar(doc, child_val, columnar, stack);
|
||||||
|
stack.pop();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
count
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> io::Result<()> {
|
||||||
|
let file = File::open("gh_small.json")?;
|
||||||
|
let mut reader = BufReader::new(file);
|
||||||
|
let mut line = String::with_capacity(100);
|
||||||
|
let mut columnar = columnar::ColumnarWriter::default();
|
||||||
|
let mut doc = 0;
|
||||||
|
let start = Instant::now();
|
||||||
|
let mut stack = JsonStack::default();
|
||||||
|
let mut total_count = 0;
|
||||||
|
|
||||||
|
let start_build = Instant::now();
|
||||||
|
loop {
|
||||||
|
line.clear();
|
||||||
|
let len = reader.read_line(&mut line)?;
|
||||||
|
if len == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let Ok(json_value) = serde_json::from_str::<serde_json_borrow::Value>(&line) else { continue; };
|
||||||
|
total_count += append_json_to_columnar(doc, &json_value, &mut columnar, &mut stack);
|
||||||
|
doc += 1;
|
||||||
|
}
|
||||||
|
println!("Build in {:?}", start_build.elapsed());
|
||||||
|
|
||||||
|
println!("value count {total_count}");
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
let start_serialize = Instant::now();
|
||||||
|
columnar.serialize(doc, None, &mut buffer)?;
|
||||||
|
println!("Serialized in {:?}", start_serialize.elapsed());
|
||||||
|
println!("num docs: {doc}, {:?}", start.elapsed());
|
||||||
|
println!("buffer len {} MB", buffer.len() / 1_000_000);
|
||||||
|
let columnar = columnar::ColumnarReader::open(buffer)?;
|
||||||
|
for (column_name, dynamic_column) in columnar.list_columns()? {
|
||||||
|
let num_bytes = dynamic_column.num_bytes();
|
||||||
|
let typ = dynamic_column.column_type();
|
||||||
|
if num_bytes > 1_000_000 {
|
||||||
|
println!("{column_name} {typ:?} {} KB", num_bytes / 1_000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!("{} columns", columnar.num_columns());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
# zero to one
|
# zero to one
|
||||||
|
|
||||||
* revisit line codec
|
* revisit line codec
|
||||||
* removal of all rows of a column in the schema due to deletes
|
|
||||||
* add columns from schema on merge
|
* add columns from schema on merge
|
||||||
* Plugging JSON
|
* Plugging JSON
|
||||||
* replug examples
|
* replug examples
|
||||||
|
|||||||
@@ -32,11 +32,11 @@ impl BytesColumn {
|
|||||||
|
|
||||||
/// Returns the number of rows in the column.
|
/// Returns the number of rows in the column.
|
||||||
pub fn num_rows(&self) -> RowId {
|
pub fn num_rows(&self) -> RowId {
|
||||||
self.term_ord_column.num_rows()
|
self.term_ord_column.num_docs()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn term_ords(&self, row_id: RowId) -> impl Iterator<Item = u64> + '_ {
|
pub fn term_ords(&self, row_id: RowId) -> impl Iterator<Item = u64> + '_ {
|
||||||
self.term_ord_column.values(row_id)
|
self.term_ord_column.values_for_doc(row_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the column of ordinals
|
/// Returns the column of ordinals
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ mod serialize;
|
|||||||
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::ops::Deref;
|
use std::ops::{Deref, Range, RangeInclusive};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common::BinarySerializable;
|
use common::BinarySerializable;
|
||||||
@@ -38,18 +38,20 @@ impl<T: MonotonicallyMappableToU64> Column<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||||
|
#[inline]
|
||||||
pub fn get_cardinality(&self) -> Cardinality {
|
pub fn get_cardinality(&self) -> Cardinality {
|
||||||
self.idx.get_cardinality()
|
self.idx.get_cardinality()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn num_rows(&self) -> RowId {
|
pub fn num_docs(&self) -> RowId {
|
||||||
match &self.idx {
|
match &self.idx {
|
||||||
ColumnIndex::Full => self.values.num_vals() as u32,
|
ColumnIndex::Empty { num_docs } => *num_docs,
|
||||||
ColumnIndex::Optional(optional_index) => optional_index.num_rows(),
|
ColumnIndex::Full => self.values.num_vals(),
|
||||||
|
ColumnIndex::Optional(optional_index) => optional_index.num_docs(),
|
||||||
ColumnIndex::Multivalued(col_index) => {
|
ColumnIndex::Multivalued(col_index) => {
|
||||||
// The multivalued index contains all value start row_id,
|
// The multivalued index contains all value start row_id,
|
||||||
// and one extra value at the end with the overall number of rows.
|
// and one extra value at the end with the overall number of rows.
|
||||||
col_index.num_rows()
|
col_index.num_docs()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -63,21 +65,40 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn first(&self, row_id: RowId) -> Option<T> {
|
pub fn first(&self, row_id: RowId) -> Option<T> {
|
||||||
self.values(row_id).next()
|
self.values_for_doc(row_id).next()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn values(&self, row_id: RowId) -> impl Iterator<Item = T> + '_ {
|
pub fn values_for_doc(&self, row_id: RowId) -> impl Iterator<Item = T> + '_ {
|
||||||
self.value_row_ids(row_id)
|
self.value_row_ids(row_id)
|
||||||
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
|
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the docids of values which are in the provided value range.
|
||||||
|
#[inline]
|
||||||
|
pub fn get_docids_for_value_range(
|
||||||
|
&self,
|
||||||
|
value_range: RangeInclusive<T>,
|
||||||
|
selected_docid_range: Range<u32>,
|
||||||
|
doc_ids: &mut Vec<u32>,
|
||||||
|
) {
|
||||||
|
// convert passed docid range to row id range
|
||||||
|
let rowid_range = self.idx.docid_range_to_rowids(selected_docid_range.clone());
|
||||||
|
|
||||||
|
// Load rows
|
||||||
|
self.values
|
||||||
|
.get_row_ids_for_value_range(value_range, rowid_range, doc_ids);
|
||||||
|
// Convert rows to docids
|
||||||
|
self.idx
|
||||||
|
.select_batch_in_place(selected_docid_range.start, doc_ids);
|
||||||
|
}
|
||||||
|
|
||||||
/// Fils the output vector with the (possibly multiple values that are associated_with
|
/// Fils the output vector with the (possibly multiple values that are associated_with
|
||||||
/// `row_id`.
|
/// `row_id`.
|
||||||
///
|
///
|
||||||
/// This method clears the `output` vector.
|
/// This method clears the `output` vector.
|
||||||
pub fn fill_vals(&self, row_id: RowId, output: &mut Vec<T>) {
|
pub fn fill_vals(&self, row_id: RowId, output: &mut Vec<T>) {
|
||||||
output.clear();
|
output.clear();
|
||||||
output.extend(self.values(row_id));
|
output.extend(self.values_for_doc(row_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn first_or_default_col(self, default_value: T) -> Arc<dyn ColumnValues<T>> {
|
pub fn first_or_default_col(self, default_value: T) -> Arc<dyn ColumnValues<T>> {
|
||||||
@@ -131,9 +152,10 @@ impl<T: PartialOrd + Debug + Send + Sync + Copy + 'static> ColumnValues<T>
|
|||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
fn num_vals(&self) -> u32 {
|
||||||
match &self.column.idx {
|
match &self.column.idx {
|
||||||
|
ColumnIndex::Empty { .. } => 0u32,
|
||||||
ColumnIndex::Full => self.column.values.num_vals(),
|
ColumnIndex::Full => self.column.values.num_vals(),
|
||||||
ColumnIndex::Optional(optional_idx) => optional_idx.num_rows(),
|
ColumnIndex::Optional(optional_idx) => optional_idx.num_docs(),
|
||||||
ColumnIndex::Multivalued(multivalue_idx) => multivalue_idx.num_rows(),
|
ColumnIndex::Multivalued(multivalue_idx) => multivalue_idx.num_docs(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,9 +7,10 @@ use sstable::Dictionary;
|
|||||||
|
|
||||||
use crate::column::{BytesColumn, Column};
|
use crate::column::{BytesColumn, Column};
|
||||||
use crate::column_index::{serialize_column_index, SerializableColumnIndex};
|
use crate::column_index::{serialize_column_index, SerializableColumnIndex};
|
||||||
use crate::column_values::serialize::serialize_column_values_u128;
|
use crate::column_values::{
|
||||||
use crate::column_values::u64_based::{serialize_u64_based_column_values, CodecType};
|
load_u64_based_column_values, serialize_column_values_u128, serialize_u64_based_column_values,
|
||||||
use crate::column_values::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
CodecType, MonotonicallyMappableToU128, MonotonicallyMappableToU64,
|
||||||
|
};
|
||||||
use crate::iterable::Iterable;
|
use crate::iterable::Iterable;
|
||||||
use crate::StrColumn;
|
use crate::StrColumn;
|
||||||
|
|
||||||
@@ -49,8 +50,7 @@ pub fn open_column_u64<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::
|
|||||||
);
|
);
|
||||||
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
||||||
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
||||||
let column_values =
|
let column_values = load_u64_based_column_values(column_values_data)?;
|
||||||
crate::column_values::u64_based::load_u64_based_column_values(column_values_data)?;
|
|
||||||
Ok(Column {
|
Ok(Column {
|
||||||
idx: column_index,
|
idx: column_index,
|
||||||
values: column_values,
|
values: column_values,
|
||||||
|
|||||||
@@ -91,13 +91,10 @@ fn iter_num_values<'a>(
|
|||||||
return 0u32;
|
return 0u32;
|
||||||
};
|
};
|
||||||
match column_index {
|
match column_index {
|
||||||
|
ColumnIndex::Empty { .. } => 0u32,
|
||||||
ColumnIndex::Full => 1,
|
ColumnIndex::Full => 1,
|
||||||
ColumnIndex::Optional(optional_index) => {
|
ColumnIndex::Optional(optional_index) => {
|
||||||
if optional_index.contains(row_addr.row_id) {
|
u32::from(optional_index.contains(row_addr.row_id))
|
||||||
1u32
|
|
||||||
} else {
|
|
||||||
0u32
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ColumnIndex::Multivalued(multivalued_index) => {
|
ColumnIndex::Multivalued(multivalued_index) => {
|
||||||
multivalued_index.range(row_addr.row_id).len() as u32
|
multivalued_index.range(row_addr.row_id).len() as u32
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
|
|||||||
Some(ColumnIndex::Multivalued(_)) => {
|
Some(ColumnIndex::Multivalued(_)) => {
|
||||||
panic!("No multivalued index is allowed when stacking column index");
|
panic!("No multivalued index is allowed when stacking column index");
|
||||||
}
|
}
|
||||||
None => Box::new(std::iter::empty()),
|
None | Some(ColumnIndex::Empty { .. }) => Box::new(std::iter::empty()),
|
||||||
};
|
};
|
||||||
rows_it
|
rows_it
|
||||||
}),
|
}),
|
||||||
@@ -74,7 +74,9 @@ fn convert_column_opt_to_multivalued_index<'a>(
|
|||||||
num_rows: RowId,
|
num_rows: RowId,
|
||||||
) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||||
match column_index_opt {
|
match column_index_opt {
|
||||||
None => Box::new(iter::repeat(0u32).take(num_rows as usize + 1)),
|
None | Some(ColumnIndex::Empty { .. }) => {
|
||||||
|
Box::new(iter::repeat(0u32).take(num_rows as usize + 1))
|
||||||
|
}
|
||||||
Some(ColumnIndex::Full) => Box::new(0..num_rows + 1),
|
Some(ColumnIndex::Full) => Box::new(0..num_rows + 1),
|
||||||
Some(ColumnIndex::Optional(optional_index)) => {
|
Some(ColumnIndex::Optional(optional_index)) => {
|
||||||
Box::new(
|
Box::new(
|
||||||
|
|||||||
@@ -10,10 +10,13 @@ pub use optional_index::{OptionalIndex, Set};
|
|||||||
pub use serialize::{open_column_index, serialize_column_index, SerializableColumnIndex};
|
pub use serialize::{open_column_index, serialize_column_index, SerializableColumnIndex};
|
||||||
|
|
||||||
use crate::column_index::multivalued_index::MultiValueIndex;
|
use crate::column_index::multivalued_index::MultiValueIndex;
|
||||||
use crate::{Cardinality, RowId};
|
use crate::{Cardinality, DocId, RowId};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub enum ColumnIndex {
|
pub enum ColumnIndex {
|
||||||
|
Empty {
|
||||||
|
num_docs: u32,
|
||||||
|
},
|
||||||
Full,
|
Full,
|
||||||
Optional(OptionalIndex),
|
Optional(OptionalIndex),
|
||||||
/// In addition, at index num_rows, an extra value is added
|
/// In addition, at index num_rows, an extra value is added
|
||||||
@@ -34,8 +37,10 @@ impl From<MultiValueIndex> for ColumnIndex {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ColumnIndex {
|
impl ColumnIndex {
|
||||||
|
#[inline]
|
||||||
pub fn get_cardinality(&self) -> Cardinality {
|
pub fn get_cardinality(&self) -> Cardinality {
|
||||||
match self {
|
match self {
|
||||||
|
ColumnIndex::Empty { .. } => Cardinality::Optional,
|
||||||
ColumnIndex::Full => Cardinality::Full,
|
ColumnIndex::Full => Cardinality::Full,
|
||||||
ColumnIndex::Optional(_) => Cardinality::Optional,
|
ColumnIndex::Optional(_) => Cardinality::Optional,
|
||||||
ColumnIndex::Multivalued(_) => Cardinality::Multivalued,
|
ColumnIndex::Multivalued(_) => Cardinality::Multivalued,
|
||||||
@@ -43,32 +48,58 @@ impl ColumnIndex {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if and only if there are at least one value associated to the row.
|
/// Returns true if and only if there are at least one value associated to the row.
|
||||||
pub fn has_value(&self, row_id: RowId) -> bool {
|
pub fn has_value(&self, doc_id: DocId) -> bool {
|
||||||
match self {
|
match self {
|
||||||
|
ColumnIndex::Empty { .. } => false,
|
||||||
ColumnIndex::Full => true,
|
ColumnIndex::Full => true,
|
||||||
ColumnIndex::Optional(optional_index) => optional_index.contains(row_id),
|
ColumnIndex::Optional(optional_index) => optional_index.contains(doc_id),
|
||||||
ColumnIndex::Multivalued(multivalued_index) => {
|
ColumnIndex::Multivalued(multivalued_index) => {
|
||||||
multivalued_index.range(row_id).len() > 0
|
!multivalued_index.range(doc_id).is_empty()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn value_row_ids(&self, row_id: RowId) -> Range<RowId> {
|
pub fn value_row_ids(&self, doc_id: DocId) -> Range<RowId> {
|
||||||
match self {
|
match self {
|
||||||
ColumnIndex::Full => row_id..row_id + 1,
|
ColumnIndex::Empty { .. } => 0..0,
|
||||||
|
ColumnIndex::Full => doc_id..doc_id + 1,
|
||||||
ColumnIndex::Optional(optional_index) => {
|
ColumnIndex::Optional(optional_index) => {
|
||||||
if let Some(val) = optional_index.rank_if_exists(row_id) {
|
if let Some(val) = optional_index.rank_if_exists(doc_id) {
|
||||||
val..val + 1
|
val..val + 1
|
||||||
} else {
|
} else {
|
||||||
0..0
|
0..0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ColumnIndex::Multivalued(multivalued_index) => multivalued_index.range(row_id),
|
ColumnIndex::Multivalued(multivalued_index) => multivalued_index.range(doc_id),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn select_batch_in_place(&self, rank_ids: &mut Vec<RowId>) {
|
pub fn docid_range_to_rowids(&self, doc_id: Range<DocId>) -> Range<RowId> {
|
||||||
match self {
|
match self {
|
||||||
|
ColumnIndex::Empty { .. } => 0..0,
|
||||||
|
ColumnIndex::Full => doc_id,
|
||||||
|
ColumnIndex::Optional(optional_index) => {
|
||||||
|
let row_start = optional_index.rank(doc_id.start);
|
||||||
|
let row_end = optional_index.rank(doc_id.end);
|
||||||
|
row_start..row_end
|
||||||
|
}
|
||||||
|
ColumnIndex::Multivalued(multivalued_index) => {
|
||||||
|
let end_docid = doc_id.end.min(multivalued_index.num_docs() - 1) + 1;
|
||||||
|
let start_docid = doc_id.start.min(end_docid);
|
||||||
|
|
||||||
|
let row_start = multivalued_index.start_index_column.get_val(start_docid);
|
||||||
|
let row_end = multivalued_index.start_index_column.get_val(end_docid);
|
||||||
|
|
||||||
|
row_start..row_end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn select_batch_in_place(&self, doc_id_start: DocId, rank_ids: &mut Vec<RowId>) {
|
||||||
|
match self {
|
||||||
|
ColumnIndex::Empty { .. } => {
|
||||||
|
rank_ids.clear();
|
||||||
|
}
|
||||||
ColumnIndex::Full => {
|
ColumnIndex::Full => {
|
||||||
// No need to do anything:
|
// No need to do anything:
|
||||||
// value_idx and row_idx are the same.
|
// value_idx and row_idx are the same.
|
||||||
@@ -77,8 +108,7 @@ impl ColumnIndex {
|
|||||||
optional_index.select_batch(&mut rank_ids[..]);
|
optional_index.select_batch(&mut rank_ids[..]);
|
||||||
}
|
}
|
||||||
ColumnIndex::Multivalued(multivalued_index) => {
|
ColumnIndex::Multivalued(multivalued_index) => {
|
||||||
// TODO important: avoid using 0u32, and restart from the beginning all of the time.
|
multivalued_index.select_batch_in_place(doc_id_start, rank_ids)
|
||||||
multivalued_index.select_batch_in_place(0u32, rank_ids)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,16 +5,17 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use common::OwnedBytes;
|
use common::OwnedBytes;
|
||||||
|
|
||||||
use crate::column_values::u64_based::CodecType;
|
use crate::column_values::{
|
||||||
use crate::column_values::ColumnValues;
|
load_u64_based_column_values, serialize_u64_based_column_values, CodecType, ColumnValues,
|
||||||
|
};
|
||||||
use crate::iterable::Iterable;
|
use crate::iterable::Iterable;
|
||||||
use crate::RowId;
|
use crate::{DocId, RowId};
|
||||||
|
|
||||||
pub fn serialize_multivalued_index(
|
pub fn serialize_multivalued_index(
|
||||||
multivalued_index: &dyn Iterable<RowId>,
|
multivalued_index: &dyn Iterable<RowId>,
|
||||||
output: &mut impl Write,
|
output: &mut impl Write,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
crate::column_values::u64_based::serialize_u64_based_column_values(
|
serialize_u64_based_column_values(
|
||||||
multivalued_index,
|
multivalued_index,
|
||||||
&[CodecType::Bitpacked, CodecType::Linear],
|
&[CodecType::Bitpacked, CodecType::Linear],
|
||||||
output,
|
output,
|
||||||
@@ -23,8 +24,7 @@ pub fn serialize_multivalued_index(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn open_multivalued_index(bytes: OwnedBytes) -> io::Result<MultiValueIndex> {
|
pub fn open_multivalued_index(bytes: OwnedBytes) -> io::Result<MultiValueIndex> {
|
||||||
let start_index_column: Arc<dyn ColumnValues<RowId>> =
|
let start_index_column: Arc<dyn ColumnValues<RowId>> = load_u64_based_column_values(bytes)?;
|
||||||
crate::column_values::u64_based::load_u64_based_column_values(bytes)?;
|
|
||||||
Ok(MultiValueIndex { start_index_column })
|
Ok(MultiValueIndex { start_index_column })
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,20 +52,20 @@ impl MultiValueIndex {
|
|||||||
/// Returns `[start, end)`, such that the values associated with
|
/// Returns `[start, end)`, such that the values associated with
|
||||||
/// the given document are `start..end`.
|
/// the given document are `start..end`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(crate) fn range(&self, row_id: RowId) -> Range<RowId> {
|
pub(crate) fn range(&self, doc_id: DocId) -> Range<RowId> {
|
||||||
let start = self.start_index_column.get_val(row_id);
|
let start = self.start_index_column.get_val(doc_id);
|
||||||
let end = self.start_index_column.get_val(row_id + 1);
|
let end = self.start_index_column.get_val(doc_id + 1);
|
||||||
start..end
|
start..end
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of documents in the index.
|
/// Returns the number of documents in the index.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn num_rows(&self) -> u32 {
|
pub fn num_docs(&self) -> u32 {
|
||||||
self.start_index_column.num_vals() - 1
|
self.start_index_column.num_vals() - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
|
/// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
|
||||||
/// row_ids. Positions are converted inplace to docids.
|
/// docids. Positions are converted inplace to docids.
|
||||||
///
|
///
|
||||||
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
|
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
|
||||||
/// index.
|
/// index.
|
||||||
@@ -76,20 +76,20 @@ impl MultiValueIndex {
|
|||||||
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
|
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
|
||||||
/// match a docid to its value position.
|
/// match a docid to its value position.
|
||||||
#[allow(clippy::bool_to_int_with_if)]
|
#[allow(clippy::bool_to_int_with_if)]
|
||||||
pub(crate) fn select_batch_in_place(&self, row_start: RowId, ranks: &mut Vec<u32>) {
|
pub(crate) fn select_batch_in_place(&self, docid_start: DocId, ranks: &mut Vec<u32>) {
|
||||||
if ranks.is_empty() {
|
if ranks.is_empty() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let mut cur_doc = row_start;
|
let mut cur_doc = docid_start;
|
||||||
let mut last_doc = None;
|
let mut last_doc = None;
|
||||||
|
|
||||||
assert!(self.start_index_column.get_val(row_start) as u32 <= ranks[0]);
|
assert!(self.start_index_column.get_val(docid_start) <= ranks[0]);
|
||||||
|
|
||||||
let mut write_doc_pos = 0;
|
let mut write_doc_pos = 0;
|
||||||
for i in 0..ranks.len() {
|
for i in 0..ranks.len() {
|
||||||
let pos = ranks[i];
|
let pos = ranks[i];
|
||||||
loop {
|
loop {
|
||||||
let end = self.start_index_column.get_val(cur_doc + 1) as u32;
|
let end = self.start_index_column.get_val(cur_doc + 1);
|
||||||
if end > pos {
|
if end > pos {
|
||||||
ranks[write_doc_pos] = cur_doc;
|
ranks[write_doc_pos] = cur_doc;
|
||||||
write_doc_pos += if last_doc == Some(cur_doc) { 0 } else { 1 };
|
write_doc_pos += if last_doc == Some(cur_doc) { 0 } else { 1 };
|
||||||
@@ -127,7 +127,7 @@ mod tests {
|
|||||||
let offsets: Vec<RowId> = vec![0, 10, 12, 15, 22, 23]; // docid values are [0..10, 10..12, 12..15, etc.]
|
let offsets: Vec<RowId> = vec![0, 10, 12, 15, 22, 23]; // docid values are [0..10, 10..12, 12..15, etc.]
|
||||||
let column: Arc<dyn ColumnValues<RowId>> = Arc::new(IterColumn::from(offsets.into_iter()));
|
let column: Arc<dyn ColumnValues<RowId>> = Arc::new(IterColumn::from(offsets.into_iter()));
|
||||||
let index = MultiValueIndex::from(column);
|
let index = MultiValueIndex::from(column);
|
||||||
assert_eq!(index.num_rows(), 5);
|
assert_eq!(index.num_docs(), 5);
|
||||||
let positions = &[10u32, 11, 15, 20, 21, 22];
|
let positions = &[10u32, 11, 15, 20, 21, 22];
|
||||||
assert_eq!(index_to_pos_helper(&index, 0..5, positions), vec![1, 3, 4]);
|
assert_eq!(index_to_pos_helper(&index, 0..5, positions), vec![1, 3, 4]);
|
||||||
assert_eq!(index_to_pos_helper(&index, 1..5, positions), vec![1, 3, 4]);
|
assert_eq!(index_to_pos_helper(&index, 1..5, positions), vec![1, 3, 4]);
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ use set_block::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::iterable::Iterable;
|
use crate::iterable::Iterable;
|
||||||
use crate::{InvalidData, RowId};
|
use crate::{DocId, InvalidData, RowId};
|
||||||
|
|
||||||
/// The threshold for for number of elements after which we switch to dense block encoding.
|
/// The threshold for for number of elements after which we switch to dense block encoding.
|
||||||
///
|
///
|
||||||
@@ -177,11 +177,11 @@ impl Set<RowId> for OptionalIndex {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn rank(&self, row_id: RowId) -> RowId {
|
fn rank(&self, doc_id: DocId) -> RowId {
|
||||||
let RowAddr {
|
let RowAddr {
|
||||||
block_id,
|
block_id,
|
||||||
in_block_row_id,
|
in_block_row_id,
|
||||||
} = row_addr_from_row_id(row_id);
|
} = row_addr_from_row_id(doc_id);
|
||||||
let block_meta = self.block_metas[block_id as usize];
|
let block_meta = self.block_metas[block_id as usize];
|
||||||
let block = self.block(block_meta);
|
let block = self.block(block_meta);
|
||||||
let block_offset_row_id = match block {
|
let block_offset_row_id = match block {
|
||||||
@@ -192,11 +192,11 @@ impl Set<RowId> for OptionalIndex {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn rank_if_exists(&self, row_id: RowId) -> Option<RowId> {
|
fn rank_if_exists(&self, doc_id: DocId) -> Option<RowId> {
|
||||||
let RowAddr {
|
let RowAddr {
|
||||||
block_id,
|
block_id,
|
||||||
in_block_row_id,
|
in_block_row_id,
|
||||||
} = row_addr_from_row_id(row_id);
|
} = row_addr_from_row_id(doc_id);
|
||||||
let block_meta = self.block_metas[block_id as usize];
|
let block_meta = self.block_metas[block_id as usize];
|
||||||
let block = self.block(block_meta);
|
let block = self.block(block_meta);
|
||||||
let block_offset_row_id = match block {
|
let block_offset_row_id = match block {
|
||||||
@@ -220,7 +220,7 @@ impl Set<RowId> for OptionalIndex {
|
|||||||
block_doc_idx_start + in_block_rank as u32
|
block_doc_idx_start + in_block_rank as u32
|
||||||
}
|
}
|
||||||
|
|
||||||
fn select_cursor<'b>(&'b self) -> OptionalIndexSelectCursor<'b> {
|
fn select_cursor(&self) -> OptionalIndexSelectCursor<'_> {
|
||||||
OptionalIndexSelectCursor {
|
OptionalIndexSelectCursor {
|
||||||
current_block_cursor: BlockSelectCursor::Sparse(
|
current_block_cursor: BlockSelectCursor::Sparse(
|
||||||
SparseBlockCodec::open(b"").select_cursor(),
|
SparseBlockCodec::open(b"").select_cursor(),
|
||||||
@@ -247,7 +247,7 @@ impl OptionalIndex {
|
|||||||
open_optional_index(bytes).unwrap()
|
open_optional_index(bytes).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn num_rows(&self) -> RowId {
|
pub fn num_docs(&self) -> RowId {
|
||||||
self.num_rows
|
self.num_rows
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -255,7 +255,7 @@ impl OptionalIndex {
|
|||||||
self.num_non_null_rows
|
self.num_non_null_rows
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn iter_rows<'a>(&'a self) -> impl Iterator<Item = RowId> + 'a {
|
pub fn iter_rows(&self) -> impl Iterator<Item = RowId> + '_ {
|
||||||
// TODO optimize
|
// TODO optimize
|
||||||
let mut select_batch = self.select_cursor();
|
let mut select_batch = self.select_cursor();
|
||||||
(0..self.num_non_null_rows).map(move |rank| select_batch.select(rank))
|
(0..self.num_non_null_rows).map(move |rank| select_batch.select(rank))
|
||||||
@@ -268,7 +268,7 @@ impl OptionalIndex {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn block<'a>(&'a self, block_meta: BlockMeta) -> Block<'a> {
|
fn block(&self, block_meta: BlockMeta) -> Block<'_> {
|
||||||
let BlockMeta {
|
let BlockMeta {
|
||||||
start_byte_offset,
|
start_byte_offset,
|
||||||
block_variant,
|
block_variant,
|
||||||
@@ -351,7 +351,7 @@ fn serialize_optional_index_block(block_els: &[u16], out: &mut impl io::Write) -
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn serialize_optional_index<'a, W: io::Write>(
|
pub fn serialize_optional_index<W: io::Write>(
|
||||||
non_null_rows: &dyn Iterable<RowId>,
|
non_null_rows: &dyn Iterable<RowId>,
|
||||||
num_rows: RowId,
|
num_rows: RowId,
|
||||||
output: &mut W,
|
output: &mut W,
|
||||||
@@ -427,7 +427,7 @@ impl SerializedBlockMeta {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn to_bytes(&self) -> [u8; SERIALIZED_BLOCK_META_NUM_BYTES] {
|
fn to_bytes(self) -> [u8; SERIALIZED_BLOCK_META_NUM_BYTES] {
|
||||||
assert!(self.num_non_null_rows > 0);
|
assert!(self.num_non_null_rows > 0);
|
||||||
let mut bytes = [0u8; SERIALIZED_BLOCK_META_NUM_BYTES];
|
let mut bytes = [0u8; SERIALIZED_BLOCK_META_NUM_BYTES];
|
||||||
bytes[0..2].copy_from_slice(&self.block_id.to_le_bytes());
|
bytes[0..2].copy_from_slice(&self.block_id.to_le_bytes());
|
||||||
@@ -440,7 +440,7 @@ impl SerializedBlockMeta {
|
|||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn is_sparse(num_rows_in_block: u32) -> bool {
|
fn is_sparse(num_rows_in_block: u32) -> bool {
|
||||||
num_rows_in_block < DENSE_BLOCK_THRESHOLD as u32
|
num_rows_in_block < DENSE_BLOCK_THRESHOLD
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deserialize_optional_index_block_metadatas(
|
fn deserialize_optional_index_block_metadatas(
|
||||||
@@ -448,7 +448,7 @@ fn deserialize_optional_index_block_metadatas(
|
|||||||
num_rows: u32,
|
num_rows: u32,
|
||||||
) -> (Box<[BlockMeta]>, u32) {
|
) -> (Box<[BlockMeta]>, u32) {
|
||||||
let num_blocks = data.len() / SERIALIZED_BLOCK_META_NUM_BYTES;
|
let num_blocks = data.len() / SERIALIZED_BLOCK_META_NUM_BYTES;
|
||||||
let mut block_metas = Vec::with_capacity(num_blocks as usize + 1);
|
let mut block_metas = Vec::with_capacity(num_blocks + 1);
|
||||||
let mut start_byte_offset = 0;
|
let mut start_byte_offset = 0;
|
||||||
let mut non_null_rows_before_block = 0;
|
let mut non_null_rows_before_block = 0;
|
||||||
for block_meta_bytes in data.chunks_exact(SERIALIZED_BLOCK_META_NUM_BYTES) {
|
for block_meta_bytes in data.chunks_exact(SERIALIZED_BLOCK_META_NUM_BYTES) {
|
||||||
@@ -479,7 +479,7 @@ fn deserialize_optional_index_block_metadatas(
|
|||||||
block_variant,
|
block_variant,
|
||||||
});
|
});
|
||||||
start_byte_offset += block_variant.num_bytes_in_block();
|
start_byte_offset += block_variant.num_bytes_in_block();
|
||||||
non_null_rows_before_block += num_non_null_rows as u32;
|
non_null_rows_before_block += num_non_null_rows;
|
||||||
}
|
}
|
||||||
block_metas.resize(
|
block_metas.resize(
|
||||||
((num_rows + BLOCK_SIZE - 1) / BLOCK_SIZE) as usize,
|
((num_rows + BLOCK_SIZE - 1) / BLOCK_SIZE) as usize,
|
||||||
@@ -501,7 +501,7 @@ pub fn open_optional_index(bytes: OwnedBytes) -> io::Result<OptionalIndex> {
|
|||||||
num_non_empty_block_bytes as usize * SERIALIZED_BLOCK_META_NUM_BYTES;
|
num_non_empty_block_bytes as usize * SERIALIZED_BLOCK_META_NUM_BYTES;
|
||||||
let (block_data, block_metas) = bytes.rsplit(block_metas_num_bytes);
|
let (block_data, block_metas) = bytes.rsplit(block_metas_num_bytes);
|
||||||
let (block_metas, num_non_null_rows) =
|
let (block_metas, num_non_null_rows) =
|
||||||
deserialize_optional_index_block_metadatas(block_metas.as_slice(), num_rows).into();
|
deserialize_optional_index_block_metadatas(block_metas.as_slice(), num_rows);
|
||||||
let optional_index = OptionalIndex {
|
let optional_index = OptionalIndex {
|
||||||
num_rows,
|
num_rows,
|
||||||
num_non_null_rows,
|
num_non_null_rows,
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ pub trait SetCodec {
|
|||||||
///
|
///
|
||||||
/// May panic if the elements are not sorted.
|
/// May panic if the elements are not sorted.
|
||||||
fn serialize(els: impl Iterator<Item = Self::Item>, wrt: impl io::Write) -> io::Result<()>;
|
fn serialize(els: impl Iterator<Item = Self::Item>, wrt: impl io::Write) -> io::Result<()>;
|
||||||
fn open<'a>(data: &'a [u8]) -> Self::Reader<'a>;
|
fn open(data: &[u8]) -> Self::Reader<'_>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Stateful object that makes it possible to compute several select in a row,
|
/// Stateful object that makes it possible to compute several select in a row,
|
||||||
@@ -43,5 +43,5 @@ pub trait Set<T> {
|
|||||||
fn select(&self, rank: T) -> T;
|
fn select(&self, rank: T) -> T;
|
||||||
|
|
||||||
/// Creates a brand new select cursor.
|
/// Creates a brand new select cursor.
|
||||||
fn select_cursor<'b>(&'b self) -> Self::SelectCursor<'b>;
|
fn select_cursor(&self) -> Self::SelectCursor<'_>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ pub const MINI_BLOCK_NUM_BYTES: usize = MINI_BLOCK_BITVEC_NUM_BYTES + MINI_BLOCK
|
|||||||
|
|
||||||
/// Number of bytes in a dense block.
|
/// Number of bytes in a dense block.
|
||||||
pub const DENSE_BLOCK_NUM_BYTES: u32 =
|
pub const DENSE_BLOCK_NUM_BYTES: u32 =
|
||||||
(ELEMENTS_PER_BLOCK as u32 / ELEMENTS_PER_MINI_BLOCK as u32) * MINI_BLOCK_NUM_BYTES as u32;
|
(ELEMENTS_PER_BLOCK / ELEMENTS_PER_MINI_BLOCK as u32) * MINI_BLOCK_NUM_BYTES as u32;
|
||||||
|
|
||||||
pub struct DenseBlockCodec;
|
pub struct DenseBlockCodec;
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ impl SetCodec for DenseBlockCodec {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn open<'a>(data: &'a [u8]) -> Self::Reader<'a> {
|
fn open(data: &[u8]) -> Self::Reader<'_> {
|
||||||
assert_eq!(data.len(), DENSE_BLOCK_NUM_BYTES as usize);
|
assert_eq!(data.len(), DENSE_BLOCK_NUM_BYTES as usize);
|
||||||
DenseBlock(data)
|
DenseBlock(data)
|
||||||
}
|
}
|
||||||
@@ -94,7 +94,7 @@ impl DenseMiniBlock {
|
|||||||
Self { bitvec, rank }
|
Self { bitvec, rank }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_bytes(&self) -> [u8; MINI_BLOCK_NUM_BYTES] {
|
fn to_bytes(self) -> [u8; MINI_BLOCK_NUM_BYTES] {
|
||||||
let mut bytes = [0u8; MINI_BLOCK_NUM_BYTES];
|
let mut bytes = [0u8; MINI_BLOCK_NUM_BYTES];
|
||||||
bytes[..MINI_BLOCK_BITVEC_NUM_BYTES].copy_from_slice(&self.bitvec.to_le_bytes());
|
bytes[..MINI_BLOCK_BITVEC_NUM_BYTES].copy_from_slice(&self.bitvec.to_le_bytes());
|
||||||
bytes[MINI_BLOCK_BITVEC_NUM_BYTES..].copy_from_slice(&self.rank.to_le_bytes());
|
bytes[MINI_BLOCK_BITVEC_NUM_BYTES..].copy_from_slice(&self.rank.to_le_bytes());
|
||||||
@@ -166,7 +166,7 @@ impl<'a> Set<u16> for DenseBlock<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn select_cursor<'b>(&'b self) -> Self::SelectCursor<'b> {
|
fn select_cursor(&self) -> Self::SelectCursor<'_> {
|
||||||
DenseBlockSelectCursor {
|
DenseBlockSelectCursor {
|
||||||
block_id: 0,
|
block_id: 0,
|
||||||
dense_block: *self,
|
dense_block: *self,
|
||||||
@@ -229,7 +229,7 @@ pub fn serialize_dense_codec(
|
|||||||
while block_id > current_block_id {
|
while block_id > current_block_id {
|
||||||
let dense_mini_block = DenseMiniBlock {
|
let dense_mini_block = DenseMiniBlock {
|
||||||
bitvec: block,
|
bitvec: block,
|
||||||
rank: non_null_rows_before as u16,
|
rank: non_null_rows_before,
|
||||||
};
|
};
|
||||||
output.write_all(&dense_mini_block.to_bytes())?;
|
output.write_all(&dense_mini_block.to_bytes())?;
|
||||||
non_null_rows_before += block.count_ones() as u16;
|
non_null_rows_before += block.count_ones() as u16;
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ impl SetCodec for SparseBlockCodec {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open<'a>(data: &'a [u8]) -> Self::Reader<'a> {
|
fn open(data: &[u8]) -> Self::Reader<'_> {
|
||||||
SparseBlock(data)
|
SparseBlock(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -56,7 +56,7 @@ impl<'a> Set<u16> for SparseBlock<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn select_cursor<'b>(&'b self) -> Self::SelectCursor<'b> {
|
fn select_cursor(&self) -> Self::SelectCursor<'_> {
|
||||||
*self
|
*self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ proptest! {
|
|||||||
fn test_with_random_sets_simple() {
|
fn test_with_random_sets_simple() {
|
||||||
let vals = 10..BLOCK_SIZE * 2;
|
let vals = 10..BLOCK_SIZE * 2;
|
||||||
let mut out: Vec<u8> = Vec::new();
|
let mut out: Vec<u8> = Vec::new();
|
||||||
serialize_optional_index(&vals.clone(), 100, &mut out).unwrap();
|
serialize_optional_index(&vals, 100, &mut out).unwrap();
|
||||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||||
let ranks: Vec<u32> = (65_472u32..65_473u32).collect();
|
let ranks: Vec<u32> = (65_472u32..65_473u32).collect();
|
||||||
let els: Vec<u32> = ranks.iter().copied().map(|rank| rank + 10).collect();
|
let els: Vec<u32> = ranks.iter().copied().map(|rank| rank + 10).collect();
|
||||||
@@ -142,7 +142,7 @@ fn test_optional_index_large() {
|
|||||||
|
|
||||||
fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) {
|
fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) {
|
||||||
let optional_index = OptionalIndex::for_test(num_rows, row_ids);
|
let optional_index = OptionalIndex::for_test(num_rows, row_ids);
|
||||||
assert_eq!(optional_index.num_rows(), num_rows);
|
assert_eq!(optional_index.num_docs(), num_rows);
|
||||||
assert!(optional_index.iter_rows().eq(row_ids.iter().copied()));
|
assert!(optional_index.iter_rows().eq(row_ids.iter().copied()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,7 +154,7 @@ fn test_optional_index_iter_empty() {
|
|||||||
fn test_optional_index_rank_aux(row_ids: &[RowId]) {
|
fn test_optional_index_rank_aux(row_ids: &[RowId]) {
|
||||||
let num_rows = row_ids.last().copied().unwrap_or(0u32) + 1;
|
let num_rows = row_ids.last().copied().unwrap_or(0u32) + 1;
|
||||||
let null_index = OptionalIndex::for_test(num_rows, row_ids);
|
let null_index = OptionalIndex::for_test(num_rows, row_ids);
|
||||||
assert_eq!(null_index.num_rows(), num_rows);
|
assert_eq!(null_index.num_docs(), num_rows);
|
||||||
for (row_id, row_val) in row_ids.iter().copied().enumerate() {
|
for (row_id, row_val) in row_ids.iter().copied().enumerate() {
|
||||||
assert_eq!(null_index.rank(row_val), row_id as u32);
|
assert_eq!(null_index.rank(row_val), row_id as u32);
|
||||||
assert_eq!(null_index.rank_if_exists(row_val), Some(row_id as u32));
|
assert_eq!(null_index.rank_if_exists(row_val), Some(row_id as u32));
|
||||||
@@ -196,7 +196,7 @@ fn test_optional_index_for_tests() {
|
|||||||
assert!(optional_index.contains(1));
|
assert!(optional_index.contains(1));
|
||||||
assert!(optional_index.contains(2));
|
assert!(optional_index.contains(2));
|
||||||
assert!(!optional_index.contains(3));
|
assert!(!optional_index.contains(3));
|
||||||
assert_eq!(optional_index.num_rows(), 4);
|
assert_eq!(optional_index.num_docs(), 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(all(test, feature = "unstable"))]
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
@@ -212,10 +212,13 @@ mod bench {
|
|||||||
fn gen_bools(fill_ratio: f64) -> OptionalIndex {
|
fn gen_bools(fill_ratio: f64) -> OptionalIndex {
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||||
let vals: Vec<bool> = (0..TOTAL_NUM_VALUES)
|
let vals: Vec<RowId> = (0..TOTAL_NUM_VALUES)
|
||||||
.map(|_| rng.gen_bool(fill_ratio))
|
.map(|_| rng.gen_bool(fill_ratio))
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(pos, val)| *val)
|
||||||
|
.map(|(pos, _)| pos as RowId)
|
||||||
.collect();
|
.collect();
|
||||||
serialize_optional_index(&&vals[..], &mut out).unwrap();
|
serialize_optional_index(&&vals[..], TOTAL_NUM_VALUES, &mut out).unwrap();
|
||||||
let codec = open_optional_index(OwnedBytes::new(out)).unwrap();
|
let codec = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||||
codec
|
codec
|
||||||
}
|
}
|
||||||
|
|||||||
135
columnar/src/column_values/bench.rs
Normal file
135
columnar/src/column_values/bench.rs
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common::OwnedBytes;
|
||||||
|
use rand::rngs::StdRng;
|
||||||
|
use rand::{Rng, SeedableRng};
|
||||||
|
use test::{self, Bencher};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::column_values::u64_based::*;
|
||||||
|
|
||||||
|
fn get_data() -> Vec<u64> {
|
||||||
|
let mut rng = StdRng::seed_from_u64(2u64);
|
||||||
|
let mut data: Vec<_> = (100..55000_u64)
|
||||||
|
.map(|num| num + rng.gen::<u8>() as u64)
|
||||||
|
.collect();
|
||||||
|
data.push(99_000);
|
||||||
|
data.insert(1000, 2000);
|
||||||
|
data.insert(2000, 100);
|
||||||
|
data.insert(3000, 4100);
|
||||||
|
data.insert(4000, 100);
|
||||||
|
data.insert(5000, 800);
|
||||||
|
data
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_stats(vals: impl Iterator<Item = u64>) -> ColumnStats {
|
||||||
|
let mut stats_collector = StatsCollector::default();
|
||||||
|
for val in vals {
|
||||||
|
stats_collector.collect(val);
|
||||||
|
}
|
||||||
|
stats_collector.stats()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(never)]
|
||||||
|
fn value_iter() -> impl Iterator<Item = u64> {
|
||||||
|
0..20_000
|
||||||
|
}
|
||||||
|
fn get_reader_for_bench<Codec: ColumnCodec>(data: &[u64]) -> Codec::ColumnValues {
|
||||||
|
let mut bytes = Vec::new();
|
||||||
|
let stats = compute_stats(data.iter().cloned());
|
||||||
|
let mut codec_serializer = Codec::estimator();
|
||||||
|
for val in data {
|
||||||
|
codec_serializer.collect(*val);
|
||||||
|
}
|
||||||
|
codec_serializer.serialize(&stats, Box::new(data.iter().copied()).as_mut(), &mut bytes);
|
||||||
|
|
||||||
|
Codec::load(OwnedBytes::new(bytes)).unwrap()
|
||||||
|
}
|
||||||
|
fn bench_get<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||||
|
let col = get_reader_for_bench::<Codec>(data);
|
||||||
|
b.iter(|| {
|
||||||
|
let mut sum = 0u64;
|
||||||
|
for pos in value_iter() {
|
||||||
|
let val = col.get_val(pos as u32);
|
||||||
|
sum = sum.wrapping_add(val);
|
||||||
|
}
|
||||||
|
sum
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(never)]
|
||||||
|
fn bench_get_dynamic_helper(b: &mut Bencher, col: Arc<dyn ColumnValues>) {
|
||||||
|
b.iter(|| {
|
||||||
|
let mut sum = 0u64;
|
||||||
|
for pos in value_iter() {
|
||||||
|
let val = col.get_val(pos as u32);
|
||||||
|
sum = sum.wrapping_add(val);
|
||||||
|
}
|
||||||
|
sum
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_get_dynamic<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||||
|
let col = Arc::new(get_reader_for_bench::<Codec>(data));
|
||||||
|
bench_get_dynamic_helper(b, col);
|
||||||
|
}
|
||||||
|
fn bench_create<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||||
|
let stats = compute_stats(data.iter().cloned());
|
||||||
|
|
||||||
|
let mut bytes = Vec::new();
|
||||||
|
b.iter(|| {
|
||||||
|
bytes.clear();
|
||||||
|
let mut codec_serializer = Codec::estimator();
|
||||||
|
for val in data.iter().take(1024) {
|
||||||
|
codec_serializer.collect(*val);
|
||||||
|
}
|
||||||
|
|
||||||
|
codec_serializer.serialize(&stats, Box::new(data.iter().copied()).as_mut(), &mut bytes)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_create::<BitpackedCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_create::<LinearCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_create::<BlockwiseLinearCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get::<BitpackedCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_bitpack_get_dynamic(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get_dynamic::<BitpackedCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get::<LinearCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_linearinterpol_get_dynamic(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get_dynamic::<LinearCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get::<BlockwiseLinearCodec>(b, &data);
|
||||||
|
}
|
||||||
|
#[bench]
|
||||||
|
fn bench_fastfield_multilinearinterpol_get_dynamic(b: &mut Bencher) {
|
||||||
|
let data: Vec<_> = get_data();
|
||||||
|
bench_get_dynamic::<BlockwiseLinearCodec>(b, &data);
|
||||||
|
}
|
||||||
@@ -1,383 +0,0 @@
|
|||||||
use std::fmt::Debug;
|
|
||||||
use std::marker::PhantomData;
|
|
||||||
use std::ops::{Range, RangeInclusive};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use tantivy_bitpacker::minmax;
|
|
||||||
|
|
||||||
use crate::column_values::monotonic_mapping::StrictlyMonotonicFn;
|
|
||||||
|
|
||||||
/// `ColumnValues` provides access to a dense field column.
|
|
||||||
///
|
|
||||||
/// `Column` are just a wrapper over `ColumnValues` and a `ColumnIndex`.
|
|
||||||
pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
|
||||||
/// Return the value associated with the given idx.
|
|
||||||
///
|
|
||||||
/// This accessor should return as fast as possible.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// May panic if `idx` is greater than the column length.
|
|
||||||
fn get_val(&self, idx: u32) -> T;
|
|
||||||
|
|
||||||
/// Fills an output buffer with the fast field values
|
|
||||||
/// associated with the `DocId` going from
|
|
||||||
/// `start` to `start + output.len()`.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Must panic if `start + output.len()` is greater than
|
|
||||||
/// the segment's `maxdoc`.
|
|
||||||
#[inline(always)]
|
|
||||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
|
||||||
for (out, idx) in output.iter_mut().zip(start..) {
|
|
||||||
*out = self.get_val(idx as u32);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the positions of values which are in the provided value range.
|
|
||||||
///
|
|
||||||
/// Note that position == docid for single value fast fields
|
|
||||||
#[inline(always)]
|
|
||||||
fn get_docids_for_value_range(
|
|
||||||
&self,
|
|
||||||
value_range: RangeInclusive<T>,
|
|
||||||
doc_id_range: Range<u32>,
|
|
||||||
positions: &mut Vec<u32>,
|
|
||||||
) {
|
|
||||||
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
|
|
||||||
for idx in doc_id_range.start..doc_id_range.end {
|
|
||||||
let val = self.get_val(idx);
|
|
||||||
if value_range.contains(&val) {
|
|
||||||
positions.push(idx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the minimum value for this fast field.
|
|
||||||
///
|
|
||||||
/// This min_value may not be exact.
|
|
||||||
/// For instance, the min value does not take in account of possible
|
|
||||||
/// deleted document. All values are however guaranteed to be higher than
|
|
||||||
/// `.min_value()`.
|
|
||||||
fn min_value(&self) -> T;
|
|
||||||
|
|
||||||
/// Returns the maximum value for this fast field.
|
|
||||||
///
|
|
||||||
/// This max_value may not be exact.
|
|
||||||
/// For instance, the max value does not take in account of possible
|
|
||||||
/// deleted document. All values are however guaranteed to be higher than
|
|
||||||
/// `.max_value()`.
|
|
||||||
fn max_value(&self) -> T;
|
|
||||||
|
|
||||||
/// The number of values in the column.
|
|
||||||
fn num_vals(&self) -> u32;
|
|
||||||
|
|
||||||
/// Returns a iterator over the data
|
|
||||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
|
|
||||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
|
||||||
#[inline(always)]
|
|
||||||
fn get_val(&self, idx: u32) -> T {
|
|
||||||
self.as_ref().get_val(idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn min_value(&self) -> T {
|
|
||||||
self.as_ref().min_value()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn max_value(&self) -> T {
|
|
||||||
self.as_ref().max_value()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn num_vals(&self) -> u32 {
|
|
||||||
self.as_ref().num_vals()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
|
|
||||||
self.as_ref().iter()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
|
||||||
self.as_ref().get_range(start, output)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, C: ColumnValues<T> + ?Sized, T: Copy + PartialOrd + Debug> ColumnValues<T> for &'a C {
|
|
||||||
fn get_val(&self, idx: u32) -> T {
|
|
||||||
(*self).get_val(idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn min_value(&self) -> T {
|
|
||||||
(*self).min_value()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn max_value(&self) -> T {
|
|
||||||
(*self).max_value()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
|
||||||
(*self).num_vals()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
|
|
||||||
(*self).iter()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
|
||||||
(*self).get_range(start, output)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// VecColumn provides `Column` over a slice.
|
|
||||||
pub struct VecColumn<'a, T = u64> {
|
|
||||||
pub(crate) values: &'a [T],
|
|
||||||
pub(crate) min_value: T,
|
|
||||||
pub(crate) max_value: T,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T: Copy + PartialOrd + Send + Sync + Debug> ColumnValues<T> for VecColumn<'a, T> {
|
|
||||||
fn get_val(&self, position: u32) -> T {
|
|
||||||
self.values[position as usize]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
|
||||||
Box::new(self.values.iter().copied())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn min_value(&self) -> T {
|
|
||||||
self.min_value
|
|
||||||
}
|
|
||||||
|
|
||||||
fn max_value(&self) -> T {
|
|
||||||
self.max_value
|
|
||||||
}
|
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
|
||||||
self.values.len() as u32
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
|
||||||
output.copy_from_slice(&self.values[start as usize..][..output.len()])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T: Copy + PartialOrd + Default, V> From<&'a V> for VecColumn<'a, T>
|
|
||||||
where V: AsRef<[T]> + ?Sized
|
|
||||||
{
|
|
||||||
fn from(values: &'a V) -> Self {
|
|
||||||
let values = values.as_ref();
|
|
||||||
let (min_value, max_value) = minmax(values.iter().copied()).unwrap_or_default();
|
|
||||||
Self {
|
|
||||||
values,
|
|
||||||
min_value,
|
|
||||||
max_value,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MonotonicMappingColumn<C, T, Input> {
|
|
||||||
from_column: C,
|
|
||||||
monotonic_mapping: T,
|
|
||||||
_phantom: PhantomData<Input>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a view of a column transformed by a strictly monotonic mapping. See
|
|
||||||
/// [`StrictlyMonotonicFn`].
|
|
||||||
///
|
|
||||||
/// E.g. apply a gcd monotonic_mapping([100, 200, 300]) == [1, 2, 3]
|
|
||||||
/// monotonic_mapping.mapping() is expected to be injective, and we should always have
|
|
||||||
/// monotonic_mapping.inverse(monotonic_mapping.mapping(el)) == el
|
|
||||||
///
|
|
||||||
/// The inverse of the mapping is required for:
|
|
||||||
/// `fn get_positions_for_value_range(&self, range: RangeInclusive<T>) -> Vec<u64> `
|
|
||||||
/// The user provides the original value range and we need to monotonic map them in the same way the
|
|
||||||
/// serialization does before calling the underlying column.
|
|
||||||
///
|
|
||||||
/// Note that when opening a codec, the monotonic_mapping should be the inverse of the mapping
|
|
||||||
/// during serialization. And therefore the monotonic_mapping_inv when opening is the same as
|
|
||||||
/// monotonic_mapping during serialization.
|
|
||||||
pub fn monotonic_map_column<C, T, Input, Output>(
|
|
||||||
from_column: C,
|
|
||||||
monotonic_mapping: T,
|
|
||||||
) -> impl ColumnValues<Output>
|
|
||||||
where
|
|
||||||
C: ColumnValues<Input>,
|
|
||||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
|
||||||
Input: PartialOrd + Debug + Send + Sync + Clone,
|
|
||||||
Output: PartialOrd + Debug + Send + Sync + Clone,
|
|
||||||
{
|
|
||||||
MonotonicMappingColumn {
|
|
||||||
from_column,
|
|
||||||
monotonic_mapping,
|
|
||||||
_phantom: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<C, T, Input, Output> ColumnValues<Output> for MonotonicMappingColumn<C, T, Input>
|
|
||||||
where
|
|
||||||
C: ColumnValues<Input>,
|
|
||||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
|
||||||
Input: PartialOrd + Send + Debug + Sync + Clone,
|
|
||||||
Output: PartialOrd + Send + Debug + Sync + Clone,
|
|
||||||
{
|
|
||||||
#[inline]
|
|
||||||
fn get_val(&self, idx: u32) -> Output {
|
|
||||||
let from_val = self.from_column.get_val(idx);
|
|
||||||
self.monotonic_mapping.mapping(from_val)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn min_value(&self) -> Output {
|
|
||||||
let from_min_value = self.from_column.min_value();
|
|
||||||
self.monotonic_mapping.mapping(from_min_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn max_value(&self) -> Output {
|
|
||||||
let from_max_value = self.from_column.max_value();
|
|
||||||
self.monotonic_mapping.mapping(from_max_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
|
||||||
self.from_column.num_vals()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = Output> + '_> {
|
|
||||||
Box::new(
|
|
||||||
self.from_column
|
|
||||||
.iter()
|
|
||||||
.map(|el| self.monotonic_mapping.mapping(el)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_docids_for_value_range(
|
|
||||||
&self,
|
|
||||||
range: RangeInclusive<Output>,
|
|
||||||
doc_id_range: Range<u32>,
|
|
||||||
positions: &mut Vec<u32>,
|
|
||||||
) {
|
|
||||||
self.from_column.get_docids_for_value_range(
|
|
||||||
self.monotonic_mapping.inverse(range.start().clone())
|
|
||||||
..=self.monotonic_mapping.inverse(range.end().clone()),
|
|
||||||
doc_id_range,
|
|
||||||
positions,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We voluntarily do not implement get_range as it yields a regression,
|
|
||||||
// and we do not have any specialized implementation anyway.
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wraps an iterator into a `Column`.
|
|
||||||
pub struct IterColumn<T>(T);
|
|
||||||
|
|
||||||
impl<T> From<T> for IterColumn<T>
|
|
||||||
where T: Iterator + Clone + ExactSizeIterator
|
|
||||||
{
|
|
||||||
fn from(iter: T) -> Self {
|
|
||||||
IterColumn(iter)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> ColumnValues<T::Item> for IterColumn<T>
|
|
||||||
where
|
|
||||||
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
|
|
||||||
T::Item: PartialOrd + Debug,
|
|
||||||
{
|
|
||||||
fn get_val(&self, idx: u32) -> T::Item {
|
|
||||||
self.0.clone().nth(idx as usize).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn min_value(&self) -> T::Item {
|
|
||||||
self.0.clone().next().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn max_value(&self) -> T::Item {
|
|
||||||
self.0.clone().last().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
|
||||||
self.0.len() as u32
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = T::Item> + '_> {
|
|
||||||
Box::new(self.0.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::column_values::monotonic_mapping::{
|
|
||||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternalBaseval,
|
|
||||||
StrictlyMonotonicMappingToInternalGCDBaseval,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_monotonic_mapping() {
|
|
||||||
let vals = &[3u64, 5u64][..];
|
|
||||||
let col = VecColumn::from(vals);
|
|
||||||
let mapped = monotonic_map_column(col, StrictlyMonotonicMappingToInternalBaseval::new(2));
|
|
||||||
assert_eq!(mapped.min_value(), 1u64);
|
|
||||||
assert_eq!(mapped.max_value(), 3u64);
|
|
||||||
assert_eq!(mapped.num_vals(), 2);
|
|
||||||
assert_eq!(mapped.num_vals(), 2);
|
|
||||||
assert_eq!(mapped.get_val(0), 1);
|
|
||||||
assert_eq!(mapped.get_val(1), 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_range_as_col() {
|
|
||||||
let col = IterColumn::from(10..100);
|
|
||||||
assert_eq!(col.num_vals(), 90);
|
|
||||||
assert_eq!(col.max_value(), 99);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_monotonic_mapping_iter() {
|
|
||||||
let vals: Vec<u64> = (10..110u64).map(|el| el * 10).collect();
|
|
||||||
let col = VecColumn::from(&vals);
|
|
||||||
let mapped = monotonic_map_column(
|
|
||||||
col,
|
|
||||||
StrictlyMonotonicMappingInverter::from(
|
|
||||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
let val_i64s: Vec<u64> = mapped.iter().collect();
|
|
||||||
for i in 0..100 {
|
|
||||||
assert_eq!(val_i64s[i as usize], mapped.get_val(i));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_monotonic_mapping_get_range() {
|
|
||||||
let vals: Vec<u64> = (0..100u64).map(|el| el * 10).collect();
|
|
||||||
let col = VecColumn::from(&vals);
|
|
||||||
let mapped = monotonic_map_column(
|
|
||||||
col,
|
|
||||||
StrictlyMonotonicMappingInverter::from(
|
|
||||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 0),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(mapped.min_value(), 0u64);
|
|
||||||
assert_eq!(mapped.max_value(), 9900u64);
|
|
||||||
assert_eq!(mapped.num_vals(), 100);
|
|
||||||
let val_u64s: Vec<u64> = mapped.iter().collect();
|
|
||||||
assert_eq!(val_u64s.len(), 100);
|
|
||||||
for i in 0..100 {
|
|
||||||
assert_eq!(val_u64s[i as usize], mapped.get_val(i));
|
|
||||||
assert_eq!(val_u64s[i as usize], vals[i as usize] * 10);
|
|
||||||
}
|
|
||||||
let mut buf = [0u64; 20];
|
|
||||||
mapped.get_range(7, &mut buf[..]);
|
|
||||||
assert_eq!(&val_u64s[7..][..20], &buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
41
columnar/src/column_values/merge.rs
Normal file
41
columnar/src/column_values/merge.rs
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
use std::fmt::Debug;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use crate::iterable::Iterable;
|
||||||
|
use crate::{ColumnIndex, ColumnValues, MergeRowOrder};
|
||||||
|
|
||||||
|
pub(crate) struct MergedColumnValues<'a, T> {
|
||||||
|
pub(crate) column_indexes: &'a [Option<ColumnIndex>],
|
||||||
|
pub(crate) column_values: &'a [Option<Arc<dyn ColumnValues<T>>>],
|
||||||
|
pub(crate) merge_row_order: &'a MergeRowOrder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T: Copy + PartialOrd + Debug> Iterable<T> for MergedColumnValues<'a, T> {
|
||||||
|
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||||
|
match self.merge_row_order {
|
||||||
|
MergeRowOrder::Stack(_) => Box::new(
|
||||||
|
self.column_values
|
||||||
|
.iter()
|
||||||
|
.flatten()
|
||||||
|
.flat_map(|column_value| column_value.iter()),
|
||||||
|
),
|
||||||
|
MergeRowOrder::Shuffled(shuffle_merge_order) => Box::new(
|
||||||
|
shuffle_merge_order
|
||||||
|
.iter_new_to_old_row_addrs()
|
||||||
|
.flat_map(|row_addr| {
|
||||||
|
let column_index =
|
||||||
|
self.column_indexes[row_addr.segment_ord as usize].as_ref()?;
|
||||||
|
let column_values =
|
||||||
|
self.column_values[row_addr.segment_ord as usize].as_ref()?;
|
||||||
|
let value_range = column_index.value_row_ids(row_addr.row_id);
|
||||||
|
Some((value_range, column_values))
|
||||||
|
})
|
||||||
|
.flat_map(|(value_range, column_values)| {
|
||||||
|
value_range
|
||||||
|
.into_iter()
|
||||||
|
.map(|val| column_values.get_val(val))
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
|
||||||
|
|
||||||
//! # `fastfield_codecs`
|
//! # `fastfield_codecs`
|
||||||
//!
|
//!
|
||||||
@@ -8,248 +7,214 @@
|
|||||||
//! - Monotonically map values to u64/u128
|
//! - Monotonically map values to u64/u128
|
||||||
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::io;
|
use std::ops::{Range, RangeInclusive};
|
||||||
use std::io::Write;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common::{BinarySerializable, OwnedBytes};
|
|
||||||
use compact_space::CompactSpaceDecompressor;
|
|
||||||
pub use monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
pub use monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
||||||
use monotonic_mapping::{StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal};
|
|
||||||
pub use monotonic_mapping_u128::MonotonicallyMappableToU128;
|
pub use monotonic_mapping_u128::MonotonicallyMappableToU128;
|
||||||
use serialize::U128Header;
|
|
||||||
|
|
||||||
mod compact_space;
|
mod merge;
|
||||||
pub(crate) mod monotonic_mapping;
|
pub(crate) mod monotonic_mapping;
|
||||||
pub(crate) mod monotonic_mapping_u128;
|
pub(crate) mod monotonic_mapping_u128;
|
||||||
mod stats;
|
mod stats;
|
||||||
pub(crate) mod u64_based;
|
mod u128_based;
|
||||||
|
mod u64_based;
|
||||||
|
mod vec_column;
|
||||||
|
|
||||||
mod column;
|
mod monotonic_column;
|
||||||
pub mod serialize;
|
|
||||||
|
|
||||||
pub use serialize::serialize_column_values_u128;
|
pub(crate) use merge::MergedColumnValues;
|
||||||
pub use stats::Stats;
|
pub use stats::ColumnStats;
|
||||||
|
pub use u128_based::{open_u128_mapped, serialize_column_values_u128};
|
||||||
pub use u64_based::{
|
pub use u64_based::{
|
||||||
load_u64_based_column_values, serialize_and_load_u64_based_column_values,
|
load_u64_based_column_values, serialize_and_load_u64_based_column_values,
|
||||||
serialize_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
|
serialize_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
|
||||||
};
|
};
|
||||||
|
pub use vec_column::VecColumn;
|
||||||
|
|
||||||
pub use self::column::{monotonic_map_column, ColumnValues, IterColumn, VecColumn};
|
pub use self::monotonic_column::monotonic_map_column;
|
||||||
use crate::iterable::Iterable;
|
use crate::RowId;
|
||||||
use crate::{ColumnIndex, MergeRowOrder};
|
|
||||||
|
|
||||||
pub(crate) struct MergedColumnValues<'a, T> {
|
/// `ColumnValues` provides access to a dense field column.
|
||||||
pub(crate) column_indexes: &'a [Option<ColumnIndex>],
|
///
|
||||||
pub(crate) column_values: &'a [Option<Arc<dyn ColumnValues<T>>>],
|
/// `Column` are just a wrapper over `ColumnValues` and a `ColumnIndex`.
|
||||||
pub(crate) merge_row_order: &'a MergeRowOrder,
|
///
|
||||||
}
|
/// Any methods with a default and specialized implementation need to be called in the
|
||||||
|
/// wrappers that implement the trait: Arc and MonotonicMappingColumn
|
||||||
|
pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||||
|
/// Return the value associated with the given idx.
|
||||||
|
///
|
||||||
|
/// This accessor should return as fast as possible.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// May panic if `idx` is greater than the column length.
|
||||||
|
fn get_val(&self, idx: u32) -> T;
|
||||||
|
|
||||||
impl<'a, T: Copy + PartialOrd + Debug> Iterable<T> for MergedColumnValues<'a, T> {
|
/// Allows to push down multiple fetch calls, to avoid dynamic dispatch overhead.
|
||||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
///
|
||||||
match self.merge_row_order {
|
/// idx and output should have the same length
|
||||||
MergeRowOrder::Stack(_) => {
|
///
|
||||||
Box::new(self
|
/// # Panics
|
||||||
.column_values
|
///
|
||||||
.iter()
|
/// May panic if `idx` is greater than the column length.
|
||||||
.flatten()
|
fn get_vals(&self, idx: &[u32], output: &mut [T]) {
|
||||||
.flat_map(|column_value| column_value.iter()))
|
assert!(idx.len() == output.len());
|
||||||
},
|
for (out, idx) in output.iter_mut().zip(idx.iter()) {
|
||||||
MergeRowOrder::Shuffled(shuffle_merge_order) => {
|
*out = self.get_val(*idx as u32);
|
||||||
Box::new(shuffle_merge_order
|
|
||||||
.iter_new_to_old_row_addrs()
|
|
||||||
.flat_map(|row_addr| {
|
|
||||||
let Some(column_index) = self.column_indexes[row_addr.segment_ord as usize].as_ref() else {
|
|
||||||
return None;
|
|
||||||
};
|
|
||||||
let Some(column_values) = self.column_values[row_addr.segment_ord as usize].as_ref() else {
|
|
||||||
return None;
|
|
||||||
};
|
|
||||||
let value_range = column_index.value_row_ids(row_addr.row_id);
|
|
||||||
Some((value_range, column_values))
|
|
||||||
})
|
|
||||||
.flat_map(|(value_range, column_values)| {
|
|
||||||
value_range
|
|
||||||
.into_iter()
|
|
||||||
.map(|val| column_values.get_val(val))
|
|
||||||
})
|
|
||||||
)
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
/// Fills an output buffer with the fast field values
|
||||||
#[repr(u8)]
|
/// associated with the `DocId` going from
|
||||||
/// Available codecs to use to encode the u128 (via [`MonotonicallyMappableToU128`]) converted data.
|
/// `start` to `start + output.len()`.
|
||||||
pub enum U128FastFieldCodecType {
|
///
|
||||||
/// This codec takes a large number space (u128) and reduces it to a compact number space, by
|
/// # Panics
|
||||||
/// removing the holes.
|
///
|
||||||
CompactSpace = 1,
|
/// Must panic if `start + output.len()` is greater than
|
||||||
}
|
/// the segment's `maxdoc`.
|
||||||
|
#[inline(always)]
|
||||||
impl BinarySerializable for U128FastFieldCodecType {
|
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||||
fn serialize<W: Write + ?Sized>(&self, wrt: &mut W) -> io::Result<()> {
|
for (out, idx) in output.iter_mut().zip(start..) {
|
||||||
self.to_code().serialize(wrt)
|
*out = self.get_val(idx as u32);
|
||||||
}
|
|
||||||
|
|
||||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
let code = u8::deserialize(reader)?;
|
|
||||||
let codec_type: Self = Self::from_code(code)
|
|
||||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
|
||||||
Ok(codec_type)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl U128FastFieldCodecType {
|
|
||||||
pub(crate) fn to_code(self) -> u8 {
|
|
||||||
self as u8
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
|
||||||
match code {
|
|
||||||
1 => Some(Self::CompactSpace),
|
|
||||||
_ => None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the row ids of values which are in the provided value range.
|
||||||
|
///
|
||||||
|
/// Note that position == docid for single value fast fields
|
||||||
|
#[inline(always)]
|
||||||
|
fn get_row_ids_for_value_range(
|
||||||
|
&self,
|
||||||
|
value_range: RangeInclusive<T>,
|
||||||
|
row_id_range: Range<RowId>,
|
||||||
|
row_id_hits: &mut Vec<RowId>,
|
||||||
|
) {
|
||||||
|
let row_id_range = row_id_range.start..row_id_range.end.min(self.num_vals());
|
||||||
|
for idx in row_id_range.start..row_id_range.end {
|
||||||
|
let val = self.get_val(idx);
|
||||||
|
if value_range.contains(&val) {
|
||||||
|
row_id_hits.push(idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the minimum value for this fast field.
|
||||||
|
///
|
||||||
|
/// This min_value may not be exact.
|
||||||
|
/// For instance, the min value does not take in account of possible
|
||||||
|
/// deleted document. All values are however guaranteed to be higher than
|
||||||
|
/// `.min_value()`.
|
||||||
|
fn min_value(&self) -> T;
|
||||||
|
|
||||||
|
/// Returns the maximum value for this fast field.
|
||||||
|
///
|
||||||
|
/// This max_value may not be exact.
|
||||||
|
/// For instance, the max value does not take in account of possible
|
||||||
|
/// deleted document. All values are however guaranteed to be higher than
|
||||||
|
/// `.max_value()`.
|
||||||
|
fn max_value(&self) -> T;
|
||||||
|
|
||||||
|
/// The number of values in the column.
|
||||||
|
fn num_vals(&self) -> u32;
|
||||||
|
|
||||||
|
/// Returns a iterator over the data
|
||||||
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
|
||||||
|
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||||
pub fn open_u128_mapped<T: MonotonicallyMappableToU128 + Debug>(
|
#[inline(always)]
|
||||||
mut bytes: OwnedBytes,
|
fn get_val(&self, idx: u32) -> T {
|
||||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
self.as_ref().get_val(idx)
|
||||||
let header = U128Header::deserialize(&mut bytes)?;
|
}
|
||||||
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
|
|
||||||
let reader = CompactSpaceDecompressor::open(bytes)?;
|
|
||||||
|
|
||||||
let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<T>> =
|
#[inline(always)]
|
||||||
StrictlyMonotonicMappingToInternal::<T>::new().into();
|
fn min_value(&self) -> T {
|
||||||
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
self.as_ref().min_value()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn max_value(&self) -> T {
|
||||||
|
self.as_ref().max_value()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
self.as_ref().num_vals()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
|
||||||
|
self.as_ref().iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||||
|
self.as_ref().get_range(start, output)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn get_row_ids_for_value_range(
|
||||||
|
&self,
|
||||||
|
range: RangeInclusive<T>,
|
||||||
|
doc_id_range: Range<u32>,
|
||||||
|
positions: &mut Vec<u32>,
|
||||||
|
) {
|
||||||
|
self.as_ref()
|
||||||
|
.get_row_ids_for_value_range(range, doc_id_range, positions)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wraps an cloneable iterator into a `Column`.
|
||||||
|
pub struct IterColumn<T>(T);
|
||||||
|
|
||||||
|
impl<T> From<T> for IterColumn<T>
|
||||||
|
where T: Iterator + Clone + ExactSizeIterator
|
||||||
|
{
|
||||||
|
fn from(iter: T) -> Self {
|
||||||
|
IterColumn(iter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> ColumnValues<T::Item> for IterColumn<T>
|
||||||
|
where
|
||||||
|
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
|
||||||
|
T::Item: PartialOrd + Debug,
|
||||||
|
{
|
||||||
|
fn get_val(&self, idx: u32) -> T::Item {
|
||||||
|
self.0.clone().nth(idx as usize).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_value(&self) -> T::Item {
|
||||||
|
self.0.clone().next().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_value(&self) -> T::Item {
|
||||||
|
self.0.clone().last().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
self.0.len() as u32
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter(&self) -> Box<dyn Iterator<Item = T::Item> + '_> {
|
||||||
|
Box::new(self.0.clone())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(all(test, feature = "unstable"))]
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
mod bench {
|
mod bench;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common::OwnedBytes;
|
|
||||||
use rand::rngs::StdRng;
|
|
||||||
use rand::{Rng, SeedableRng};
|
|
||||||
use test::{self, Bencher};
|
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
fn get_data() -> Vec<u64> {
|
#[test]
|
||||||
let mut rng = StdRng::seed_from_u64(2u64);
|
fn test_range_as_col() {
|
||||||
let mut data: Vec<_> = (100..55000_u64)
|
let col = IterColumn::from(10..100);
|
||||||
.map(|num| num + rng.gen::<u8>() as u64)
|
assert_eq!(col.num_vals(), 90);
|
||||||
.collect();
|
assert_eq!(col.max_value(), 99);
|
||||||
data.push(99_000);
|
|
||||||
data.insert(1000, 2000);
|
|
||||||
data.insert(2000, 100);
|
|
||||||
data.insert(3000, 4100);
|
|
||||||
data.insert(4000, 100);
|
|
||||||
data.insert(5000, 800);
|
|
||||||
data
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(never)]
|
|
||||||
fn value_iter() -> impl Iterator<Item = u64> {
|
|
||||||
0..20_000
|
|
||||||
}
|
|
||||||
fn get_reader_for_bench<Codec: FastFieldCodec>(data: &[u64]) -> Codec::Reader {
|
|
||||||
let mut bytes = Vec::new();
|
|
||||||
let min_value = *data.iter().min().unwrap();
|
|
||||||
let data = data.iter().map(|el| *el - min_value).collect::<Vec<_>>();
|
|
||||||
let col = VecColumn::from(&data);
|
|
||||||
let normalized_header = NormalizedHeader {
|
|
||||||
num_vals: col.num_vals(),
|
|
||||||
max_value: col.max_value(),
|
|
||||||
};
|
|
||||||
Codec::serialize(&VecColumn::from(&data), &mut bytes).unwrap();
|
|
||||||
Codec::open_from_bytes(OwnedBytes::new(bytes), normalized_header).unwrap()
|
|
||||||
}
|
|
||||||
fn bench_get<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
|
||||||
let col = get_reader_for_bench::<Codec>(data);
|
|
||||||
b.iter(|| {
|
|
||||||
let mut sum = 0u64;
|
|
||||||
for pos in value_iter() {
|
|
||||||
let val = col.get_val(pos as u32);
|
|
||||||
sum = sum.wrapping_add(val);
|
|
||||||
}
|
|
||||||
sum
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(never)]
|
|
||||||
fn bench_get_dynamic_helper(b: &mut Bencher, col: Arc<dyn ColumnValues>) {
|
|
||||||
b.iter(|| {
|
|
||||||
let mut sum = 0u64;
|
|
||||||
for pos in value_iter() {
|
|
||||||
let val = col.get_val(pos as u32);
|
|
||||||
sum = sum.wrapping_add(val);
|
|
||||||
}
|
|
||||||
sum
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
fn bench_get_dynamic<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
|
||||||
let col = Arc::new(get_reader_for_bench::<Codec>(data));
|
|
||||||
bench_get_dynamic_helper(b, col);
|
|
||||||
}
|
|
||||||
fn bench_create<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
|
||||||
let min_value = *data.iter().min().unwrap();
|
|
||||||
let data = data.iter().map(|el| *el - min_value).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut bytes = Vec::new();
|
|
||||||
b.iter(|| {
|
|
||||||
bytes.clear();
|
|
||||||
Codec::serialize(&VecColumn::from(&data), &mut bytes).unwrap();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
|
||||||
let data: Vec<_> = get_data();
|
|
||||||
bench_create::<BitpackedCodec>(b, &data);
|
|
||||||
}
|
|
||||||
#[bench]
|
|
||||||
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
|
||||||
let data: Vec<_> = get_data();
|
|
||||||
bench_create::<LinearCodec>(b, &data);
|
|
||||||
}
|
|
||||||
#[bench]
|
|
||||||
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
|
||||||
let data: Vec<_> = get_data();
|
|
||||||
bench_create::<BlockwiseLinearCodec>(b, &data);
|
|
||||||
}
|
|
||||||
#[bench]
|
|
||||||
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
|
||||||
let data: Vec<_> = get_data();
|
|
||||||
bench_get::<BitpackedCodec>(b, &data);
|
|
||||||
}
|
|
||||||
#[bench]
|
|
||||||
fn bench_fastfield_bitpack_get_dynamic(b: &mut Bencher) {
|
|
||||||
let data: Vec<_> = get_data();
|
|
||||||
bench_get_dynamic::<BitpackedCodec>(b, &data);
|
|
||||||
}
|
|
||||||
#[bench]
|
|
||||||
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
|
||||||
let data: Vec<_> = get_data();
|
|
||||||
bench_get::<LinearCodec>(b, &data);
|
|
||||||
}
|
|
||||||
#[bench]
|
|
||||||
fn bench_fastfield_linearinterpol_get_dynamic(b: &mut Bencher) {
|
|
||||||
let data: Vec<_> = get_data();
|
|
||||||
bench_get_dynamic::<LinearCodec>(b, &data);
|
|
||||||
}
|
|
||||||
#[bench]
|
|
||||||
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
|
||||||
let data: Vec<_> = get_data();
|
|
||||||
bench_get::<BlockwiseLinearCodec>(b, &data);
|
|
||||||
}
|
|
||||||
#[bench]
|
|
||||||
fn bench_fastfield_multilinearinterpol_get_dynamic(b: &mut Bencher) {
|
|
||||||
let data: Vec<_> = get_data();
|
|
||||||
bench_get_dynamic::<BlockwiseLinearCodec>(b, &data);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
120
columnar/src/column_values/monotonic_column.rs
Normal file
120
columnar/src/column_values/monotonic_column.rs
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
use std::fmt::Debug;
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
use std::ops::{Range, RangeInclusive};
|
||||||
|
|
||||||
|
use crate::column_values::monotonic_mapping::StrictlyMonotonicFn;
|
||||||
|
use crate::ColumnValues;
|
||||||
|
|
||||||
|
struct MonotonicMappingColumn<C, T, Input> {
|
||||||
|
from_column: C,
|
||||||
|
monotonic_mapping: T,
|
||||||
|
_phantom: PhantomData<Input>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a view of a column transformed by a strictly monotonic mapping. See
|
||||||
|
/// [`StrictlyMonotonicFn`].
|
||||||
|
///
|
||||||
|
/// E.g. apply a gcd monotonic_mapping([100, 200, 300]) == [1, 2, 3]
|
||||||
|
/// monotonic_mapping.mapping() is expected to be injective, and we should always have
|
||||||
|
/// monotonic_mapping.inverse(monotonic_mapping.mapping(el)) == el
|
||||||
|
///
|
||||||
|
/// The inverse of the mapping is required for:
|
||||||
|
/// `fn get_positions_for_value_range(&self, range: RangeInclusive<T>) -> Vec<u64> `
|
||||||
|
/// The user provides the original value range and we need to monotonic map them in the same way the
|
||||||
|
/// serialization does before calling the underlying column.
|
||||||
|
///
|
||||||
|
/// Note that when opening a codec, the monotonic_mapping should be the inverse of the mapping
|
||||||
|
/// during serialization. And therefore the monotonic_mapping_inv when opening is the same as
|
||||||
|
/// monotonic_mapping during serialization.
|
||||||
|
pub fn monotonic_map_column<C, T, Input, Output>(
|
||||||
|
from_column: C,
|
||||||
|
monotonic_mapping: T,
|
||||||
|
) -> impl ColumnValues<Output>
|
||||||
|
where
|
||||||
|
C: ColumnValues<Input>,
|
||||||
|
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||||
|
Input: PartialOrd + Debug + Send + Sync + Clone,
|
||||||
|
Output: PartialOrd + Debug + Send + Sync + Clone,
|
||||||
|
{
|
||||||
|
MonotonicMappingColumn {
|
||||||
|
from_column,
|
||||||
|
monotonic_mapping,
|
||||||
|
_phantom: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, T, Input, Output> ColumnValues<Output> for MonotonicMappingColumn<C, T, Input>
|
||||||
|
where
|
||||||
|
C: ColumnValues<Input>,
|
||||||
|
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||||
|
Input: PartialOrd + Send + Debug + Sync + Clone,
|
||||||
|
Output: PartialOrd + Send + Debug + Sync + Clone,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn get_val(&self, idx: u32) -> Output {
|
||||||
|
let from_val = self.from_column.get_val(idx);
|
||||||
|
self.monotonic_mapping.mapping(from_val)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_value(&self) -> Output {
|
||||||
|
let from_min_value = self.from_column.min_value();
|
||||||
|
self.monotonic_mapping.mapping(from_min_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_value(&self) -> Output {
|
||||||
|
let from_max_value = self.from_column.max_value();
|
||||||
|
self.monotonic_mapping.mapping(from_max_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
self.from_column.num_vals()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter(&self) -> Box<dyn Iterator<Item = Output> + '_> {
|
||||||
|
Box::new(
|
||||||
|
self.from_column
|
||||||
|
.iter()
|
||||||
|
.map(|el| self.monotonic_mapping.mapping(el)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_row_ids_for_value_range(
|
||||||
|
&self,
|
||||||
|
range: RangeInclusive<Output>,
|
||||||
|
doc_id_range: Range<u32>,
|
||||||
|
positions: &mut Vec<u32>,
|
||||||
|
) {
|
||||||
|
self.from_column.get_row_ids_for_value_range(
|
||||||
|
self.monotonic_mapping.inverse(range.start().clone())
|
||||||
|
..=self.monotonic_mapping.inverse(range.end().clone()),
|
||||||
|
doc_id_range,
|
||||||
|
positions,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We voluntarily do not implement get_range as it yields a regression,
|
||||||
|
// and we do not have any specialized implementation anyway.
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::column_values::monotonic_mapping::{
|
||||||
|
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||||
|
};
|
||||||
|
use crate::column_values::VecColumn;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_monotonic_mapping_iter() {
|
||||||
|
let vals: Vec<u64> = (0..100u64).map(|el| el * 10).collect();
|
||||||
|
let col = VecColumn::from(&vals);
|
||||||
|
let mapped = monotonic_map_column(
|
||||||
|
col,
|
||||||
|
StrictlyMonotonicMappingInverter::from(StrictlyMonotonicMappingToInternal::<i64>::new()),
|
||||||
|
);
|
||||||
|
let val_i64s: Vec<u64> = mapped.iter().collect();
|
||||||
|
for i in 0..100 {
|
||||||
|
assert_eq!(val_i64s[i as usize], mapped.get_val(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
use fastdivide::DividerU64;
|
use common::DateTime;
|
||||||
|
|
||||||
use super::MonotonicallyMappableToU128;
|
use super::MonotonicallyMappableToU128;
|
||||||
use crate::RowId;
|
use crate::RowId;
|
||||||
@@ -112,65 +112,6 @@ where T: MonotonicallyMappableToU64
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Mapping dividing by gcd and a base value.
|
|
||||||
///
|
|
||||||
/// The function is assumed to be only called on values divided by passed
|
|
||||||
/// gcd value. (It is necessary for the function to be monotonic.)
|
|
||||||
pub(crate) struct StrictlyMonotonicMappingToInternalGCDBaseval {
|
|
||||||
gcd_divider: DividerU64,
|
|
||||||
gcd: u64,
|
|
||||||
min_value: u64,
|
|
||||||
}
|
|
||||||
impl StrictlyMonotonicMappingToInternalGCDBaseval {
|
|
||||||
pub(crate) fn new(gcd: u64, min_value: u64) -> Self {
|
|
||||||
let gcd_divider = DividerU64::divide_by(gcd);
|
|
||||||
Self {
|
|
||||||
gcd_divider,
|
|
||||||
gcd,
|
|
||||||
min_value,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
|
||||||
for StrictlyMonotonicMappingToInternalGCDBaseval
|
|
||||||
{
|
|
||||||
#[inline(always)]
|
|
||||||
fn mapping(&self, inp: External) -> u64 {
|
|
||||||
self.gcd_divider
|
|
||||||
.divide(External::to_u64(inp) - self.min_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn inverse(&self, out: u64) -> External {
|
|
||||||
External::from_u64(self.min_value + out * self.gcd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Strictly monotonic mapping with a base value.
|
|
||||||
pub(crate) struct StrictlyMonotonicMappingToInternalBaseval {
|
|
||||||
min_value: u64,
|
|
||||||
}
|
|
||||||
impl StrictlyMonotonicMappingToInternalBaseval {
|
|
||||||
#[inline(always)]
|
|
||||||
pub(crate) fn new(min_value: u64) -> Self {
|
|
||||||
Self { min_value }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
|
||||||
for StrictlyMonotonicMappingToInternalBaseval
|
|
||||||
{
|
|
||||||
#[inline(always)]
|
|
||||||
fn mapping(&self, val: External) -> u64 {
|
|
||||||
External::to_u64(val) - self.min_value
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn inverse(&self, val: u64) -> External {
|
|
||||||
External::from_u64(self.min_value + val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MonotonicallyMappableToU64 for u64 {
|
impl MonotonicallyMappableToU64 for u64 {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn to_u64(self) -> u64 {
|
fn to_u64(self) -> u64 {
|
||||||
@@ -195,17 +136,15 @@ impl MonotonicallyMappableToU64 for i64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MonotonicallyMappableToU64 for crate::DateTime {
|
impl MonotonicallyMappableToU64 for DateTime {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn to_u64(self) -> u64 {
|
fn to_u64(self) -> u64 {
|
||||||
common::i64_to_u64(self.timestamp_micros)
|
common::i64_to_u64(self.into_timestamp_micros())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn from_u64(val: u64) -> Self {
|
fn from_u64(val: u64) -> Self {
|
||||||
crate::DateTime {
|
DateTime::from_timestamp_micros(common::u64_to_i64(val))
|
||||||
timestamp_micros: common::u64_to_i64(val),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -261,13 +200,6 @@ mod tests {
|
|||||||
// TODO
|
// TODO
|
||||||
// identity mapping
|
// identity mapping
|
||||||
// test_round_trip(&StrictlyMonotonicMappingToInternal::<u128>::new(), 100u128);
|
// test_round_trip(&StrictlyMonotonicMappingToInternal::<u128>::new(), 100u128);
|
||||||
|
|
||||||
// base value to i64 round trip
|
|
||||||
let mapping = StrictlyMonotonicMappingToInternalBaseval::new(100);
|
|
||||||
test_round_trip::<_, _, u64>(&mapping, 100i64);
|
|
||||||
// base value and gcd to u64 round trip
|
|
||||||
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100);
|
|
||||||
test_round_trip::<_, _, u64>(&mapping, 100u64);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_round_trip<T: StrictlyMonotonicFn<K, L>, K: std::fmt::Debug + Eq + Copy, L>(
|
fn test_round_trip<T: StrictlyMonotonicFn<K, L>, K: std::fmt::Debug + Eq + Copy, L>(
|
||||||
|
|||||||
@@ -6,21 +6,28 @@ use common::{BinarySerializable, VInt};
|
|||||||
|
|
||||||
use crate::RowId;
|
use crate::RowId;
|
||||||
|
|
||||||
|
/// Column statistics.
|
||||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
pub struct Stats {
|
pub struct ColumnStats {
|
||||||
|
/// GCD of the elements `el - min(column)`.
|
||||||
pub gcd: NonZeroU64,
|
pub gcd: NonZeroU64,
|
||||||
|
/// Minimum value of the column.
|
||||||
pub min_value: u64,
|
pub min_value: u64,
|
||||||
|
/// Maximum value of the column.
|
||||||
pub max_value: u64,
|
pub max_value: u64,
|
||||||
|
/// Number of rows in the column.
|
||||||
pub num_rows: RowId,
|
pub num_rows: RowId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Stats {
|
impl ColumnStats {
|
||||||
|
/// Amplitude of value.
|
||||||
|
/// Difference between the maximum and the minimum value.
|
||||||
pub fn amplitude(&self) -> u64 {
|
pub fn amplitude(&self) -> u64 {
|
||||||
self.max_value - self.min_value
|
self.max_value - self.min_value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for Stats {
|
impl BinarySerializable for ColumnStats {
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
VInt(self.min_value).serialize(writer)?;
|
VInt(self.min_value).serialize(writer)?;
|
||||||
VInt(self.gcd.get()).serialize(writer)?;
|
VInt(self.gcd.get()).serialize(writer)?;
|
||||||
@@ -37,7 +44,7 @@ impl BinarySerializable for Stats {
|
|||||||
let amplitude = VInt::deserialize(reader)?.0 * gcd.get();
|
let amplitude = VInt::deserialize(reader)?.0 * gcd.get();
|
||||||
let max_value = min_value + amplitude;
|
let max_value = min_value + amplitude;
|
||||||
let num_rows = VInt::deserialize(reader)?.0 as RowId;
|
let num_rows = VInt::deserialize(reader)?.0 as RowId;
|
||||||
Ok(Stats {
|
Ok(ColumnStats {
|
||||||
min_value,
|
min_value,
|
||||||
max_value,
|
max_value,
|
||||||
num_rows,
|
num_rows,
|
||||||
@@ -52,21 +59,21 @@ mod tests {
|
|||||||
|
|
||||||
use common::BinarySerializable;
|
use common::BinarySerializable;
|
||||||
|
|
||||||
use crate::column_values::Stats;
|
use crate::column_values::ColumnStats;
|
||||||
|
|
||||||
#[track_caller]
|
#[track_caller]
|
||||||
fn test_stats_ser_deser_aux(stats: &Stats, num_bytes: usize) {
|
fn test_stats_ser_deser_aux(stats: &ColumnStats, num_bytes: usize) {
|
||||||
let mut buffer: Vec<u8> = Vec::new();
|
let mut buffer: Vec<u8> = Vec::new();
|
||||||
stats.serialize(&mut buffer).unwrap();
|
stats.serialize(&mut buffer).unwrap();
|
||||||
assert_eq!(buffer.len(), num_bytes);
|
assert_eq!(buffer.len(), num_bytes);
|
||||||
let deser_stats = Stats::deserialize(&mut &buffer[..]).unwrap();
|
let deser_stats = ColumnStats::deserialize(&mut &buffer[..]).unwrap();
|
||||||
assert_eq!(stats, &deser_stats);
|
assert_eq!(stats, &deser_stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_stats_serialization() {
|
fn test_stats_serialization() {
|
||||||
test_stats_ser_deser_aux(
|
test_stats_ser_deser_aux(
|
||||||
&(Stats {
|
&(ColumnStats {
|
||||||
gcd: NonZeroU64::new(3).unwrap(),
|
gcd: NonZeroU64::new(3).unwrap(),
|
||||||
min_value: 1,
|
min_value: 1,
|
||||||
max_value: 3001,
|
max_value: 3001,
|
||||||
@@ -75,7 +82,7 @@ mod tests {
|
|||||||
5,
|
5,
|
||||||
);
|
);
|
||||||
test_stats_ser_deser_aux(
|
test_stats_ser_deser_aux(
|
||||||
&(Stats {
|
&(ColumnStats {
|
||||||
gcd: NonZeroU64::new(1_000).unwrap(),
|
gcd: NonZeroU64::new(1_000).unwrap(),
|
||||||
min_value: 1,
|
min_value: 1,
|
||||||
max_value: 3001,
|
max_value: 3001,
|
||||||
@@ -84,7 +91,7 @@ mod tests {
|
|||||||
5,
|
5,
|
||||||
);
|
);
|
||||||
test_stats_ser_deser_aux(
|
test_stats_ser_deser_aux(
|
||||||
&(Stats {
|
&(ColumnStats {
|
||||||
gcd: NonZeroU64::new(1).unwrap(),
|
gcd: NonZeroU64::new(1).unwrap(),
|
||||||
min_value: 0,
|
min_value: 0,
|
||||||
max_value: 0,
|
max_value: 0,
|
||||||
|
|||||||
@@ -17,16 +17,16 @@ use std::{
|
|||||||
ops::{Range, RangeInclusive},
|
ops::{Range, RangeInclusive},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
mod blank_range;
|
||||||
|
mod build_compact_space;
|
||||||
|
|
||||||
|
use build_compact_space::get_compact_space;
|
||||||
use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt, VIntU128};
|
use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt, VIntU128};
|
||||||
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||||
|
|
||||||
use crate::column_values::compact_space::build_compact_space::get_compact_space;
|
|
||||||
use crate::column_values::ColumnValues;
|
use crate::column_values::ColumnValues;
|
||||||
use crate::RowId;
|
use crate::RowId;
|
||||||
|
|
||||||
mod blank_range;
|
|
||||||
mod build_compact_space;
|
|
||||||
|
|
||||||
/// The cost per blank is quite hard actually, since blanks are delta encoded, the actual cost of
|
/// The cost per blank is quite hard actually, since blanks are delta encoded, the actual cost of
|
||||||
/// blanks depends on the number of blanks.
|
/// blanks depends on the number of blanks.
|
||||||
///
|
///
|
||||||
@@ -313,7 +313,7 @@ impl ColumnValues<u128> for CompactSpaceDecompressor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_docids_for_value_range(
|
fn get_row_ids_for_value_range(
|
||||||
&self,
|
&self,
|
||||||
value_range: RangeInclusive<u128>,
|
value_range: RangeInclusive<u128>,
|
||||||
positions_range: Range<u32>,
|
positions_range: Range<u32>,
|
||||||
@@ -464,7 +464,7 @@ mod tests {
|
|||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::column_values::serialize::U128Header;
|
use crate::column_values::u128_based::U128Header;
|
||||||
use crate::column_values::{open_u128_mapped, serialize_column_values_u128};
|
use crate::column_values::{open_u128_mapped, serialize_column_values_u128};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -709,7 +709,7 @@ mod tests {
|
|||||||
doc_id_range: Range<u32>,
|
doc_id_range: Range<u32>,
|
||||||
) -> Vec<u32> {
|
) -> Vec<u32> {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
column.get_docids_for_value_range(value_range, doc_id_range, &mut positions);
|
column.get_row_ids_for_value_range(value_range, doc_id_range, &mut positions);
|
||||||
positions
|
positions
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1,25 +1,19 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common::{BinarySerializable, VInt};
|
mod compact_space;
|
||||||
|
|
||||||
use crate::column_values::compact_space::CompactSpaceCompressor;
|
use common::{BinarySerializable, OwnedBytes, VInt};
|
||||||
use crate::column_values::U128FastFieldCodecType;
|
use compact_space::{CompactSpaceCompressor, CompactSpaceDecompressor};
|
||||||
|
|
||||||
|
use crate::column_values::monotonic_map_column;
|
||||||
|
use crate::column_values::monotonic_mapping::{
|
||||||
|
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||||
|
};
|
||||||
use crate::iterable::Iterable;
|
use crate::iterable::Iterable;
|
||||||
use crate::MonotonicallyMappableToU128;
|
use crate::{ColumnValues, MonotonicallyMappableToU128};
|
||||||
|
|
||||||
/// The normalized header gives some parameters after applying the following
|
|
||||||
/// normalization of the vector:
|
|
||||||
/// `val -> (val - min_value) / gcd`
|
|
||||||
///
|
|
||||||
/// By design, after normalization, `min_value = 0` and `gcd = 1`.
|
|
||||||
#[derive(Debug, Copy, Clone)]
|
|
||||||
pub struct NormalizedHeader {
|
|
||||||
/// The number of values in the underlying column.
|
|
||||||
pub num_vals: u32,
|
|
||||||
/// The max value of the underlying column.
|
|
||||||
pub max_value: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
pub(crate) struct U128Header {
|
pub(crate) struct U128Header {
|
||||||
@@ -68,6 +62,52 @@ pub fn serialize_column_values_u128<T: MonotonicallyMappableToU128>(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||||
|
#[repr(u8)]
|
||||||
|
/// Available codecs to use to encode the u128 (via [`MonotonicallyMappableToU128`]) converted data.
|
||||||
|
pub(crate) enum U128FastFieldCodecType {
|
||||||
|
/// This codec takes a large number space (u128) and reduces it to a compact number space, by
|
||||||
|
/// removing the holes.
|
||||||
|
CompactSpace = 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for U128FastFieldCodecType {
|
||||||
|
fn serialize<W: Write + ?Sized>(&self, wrt: &mut W) -> io::Result<()> {
|
||||||
|
self.to_code().serialize(wrt)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let code = u8::deserialize(reader)?;
|
||||||
|
let codec_type: Self = Self::from_code(code)
|
||||||
|
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||||
|
Ok(codec_type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl U128FastFieldCodecType {
|
||||||
|
pub(crate) fn to_code(self) -> u8 {
|
||||||
|
self as u8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||||
|
match code {
|
||||||
|
1 => Some(Self::CompactSpace),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||||
|
pub fn open_u128_mapped<T: MonotonicallyMappableToU128 + Debug>(
|
||||||
|
mut bytes: OwnedBytes,
|
||||||
|
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||||
|
let header = U128Header::deserialize(&mut bytes)?;
|
||||||
|
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
|
||||||
|
let reader = CompactSpaceDecompressor::open(bytes)?;
|
||||||
|
let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<T>> =
|
||||||
|
StrictlyMonotonicMappingToInternal::<T>::new().into();
|
||||||
|
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
||||||
|
}
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -4,7 +4,7 @@ use common::{BinarySerializable, OwnedBytes};
|
|||||||
use fastdivide::DividerU64;
|
use fastdivide::DividerU64;
|
||||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||||
|
|
||||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, ColumnStats};
|
||||||
use crate::{ColumnValues, RowId};
|
use crate::{ColumnValues, RowId};
|
||||||
|
|
||||||
/// Depending on the field type, a different
|
/// Depending on the field type, a different
|
||||||
@@ -13,7 +13,7 @@ use crate::{ColumnValues, RowId};
|
|||||||
pub struct BitpackedReader {
|
pub struct BitpackedReader {
|
||||||
data: OwnedBytes,
|
data: OwnedBytes,
|
||||||
bit_unpacker: BitUnpacker,
|
bit_unpacker: BitUnpacker,
|
||||||
stats: Stats,
|
stats: ColumnStats,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ColumnValues for BitpackedReader {
|
impl ColumnValues for BitpackedReader {
|
||||||
@@ -36,7 +36,7 @@ impl ColumnValues for BitpackedReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn num_bits(stats: &Stats) -> u8 {
|
fn num_bits(stats: &ColumnStats) -> u8 {
|
||||||
compute_num_bits(stats.amplitude() / stats.gcd)
|
compute_num_bits(stats.amplitude() / stats.gcd)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,14 +46,14 @@ pub struct BitpackedCodecEstimator;
|
|||||||
impl ColumnCodecEstimator for BitpackedCodecEstimator {
|
impl ColumnCodecEstimator for BitpackedCodecEstimator {
|
||||||
fn collect(&mut self, _value: u64) {}
|
fn collect(&mut self, _value: u64) {}
|
||||||
|
|
||||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
fn estimate(&self, stats: &ColumnStats) -> Option<u64> {
|
||||||
let num_bits_per_value = num_bits(stats);
|
let num_bits_per_value = num_bits(stats);
|
||||||
Some(stats.num_bytes() + (stats.num_rows as u64 * (num_bits_per_value as u64) + 7) / 8)
|
Some(stats.num_bytes() + (stats.num_rows as u64 * (num_bits_per_value as u64) + 7) / 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn serialize(
|
fn serialize(
|
||||||
&self,
|
&self,
|
||||||
stats: &Stats,
|
stats: &ColumnStats,
|
||||||
vals: &mut dyn Iterator<Item = u64>,
|
vals: &mut dyn Iterator<Item = u64>,
|
||||||
wrt: &mut dyn Write,
|
wrt: &mut dyn Write,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
@@ -72,12 +72,12 @@ impl ColumnCodecEstimator for BitpackedCodecEstimator {
|
|||||||
pub struct BitpackedCodec;
|
pub struct BitpackedCodec;
|
||||||
|
|
||||||
impl ColumnCodec for BitpackedCodec {
|
impl ColumnCodec for BitpackedCodec {
|
||||||
type Reader = BitpackedReader;
|
type ColumnValues = BitpackedReader;
|
||||||
type Estimator = BitpackedCodecEstimator;
|
type Estimator = BitpackedCodecEstimator;
|
||||||
|
|
||||||
/// Opens a fast field given a file.
|
/// Opens a fast field given a file.
|
||||||
fn load(mut data: OwnedBytes) -> io::Result<Self::Reader> {
|
fn load(mut data: OwnedBytes) -> io::Result<Self::ColumnValues> {
|
||||||
let stats = Stats::deserialize(&mut data)?;
|
let stats = ColumnStats::deserialize(&mut data)?;
|
||||||
let num_bits = num_bits(&stats);
|
let num_bits = num_bits(&stats);
|
||||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||||
Ok(BitpackedReader {
|
Ok(BitpackedReader {
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use fastdivide::DividerU64;
|
|||||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||||
|
|
||||||
use crate::column_values::u64_based::line::Line;
|
use crate::column_values::u64_based::line::Line;
|
||||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, ColumnStats};
|
||||||
use crate::column_values::{ColumnValues, VecColumn};
|
use crate::column_values::{ColumnValues, VecColumn};
|
||||||
use crate::MonotonicallyMappableToU64;
|
use crate::MonotonicallyMappableToU64;
|
||||||
|
|
||||||
@@ -84,7 +84,7 @@ impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
|||||||
self.block.clear();
|
self.block.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
fn estimate(&self, stats: &ColumnStats) -> Option<u64> {
|
||||||
let mut estimate = 4 + stats.num_bytes() + self.meta_num_bytes + self.values_num_bytes;
|
let mut estimate = 4 + stats.num_bytes() + self.meta_num_bytes + self.values_num_bytes;
|
||||||
if stats.gcd.get() > 1 {
|
if stats.gcd.get() > 1 {
|
||||||
let estimate_gain_from_gcd =
|
let estimate_gain_from_gcd =
|
||||||
@@ -100,7 +100,7 @@ impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
|||||||
|
|
||||||
fn serialize(
|
fn serialize(
|
||||||
&self,
|
&self,
|
||||||
stats: &Stats,
|
stats: &ColumnStats,
|
||||||
mut vals: &mut dyn Iterator<Item = u64>,
|
mut vals: &mut dyn Iterator<Item = u64>,
|
||||||
wrt: &mut dyn Write,
|
wrt: &mut dyn Write,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
@@ -165,12 +165,12 @@ impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
|||||||
pub struct BlockwiseLinearCodec;
|
pub struct BlockwiseLinearCodec;
|
||||||
|
|
||||||
impl ColumnCodec<u64> for BlockwiseLinearCodec {
|
impl ColumnCodec<u64> for BlockwiseLinearCodec {
|
||||||
type Reader = BlockwiseLinearReader;
|
type ColumnValues = BlockwiseLinearReader;
|
||||||
|
|
||||||
type Estimator = BlockwiseLinearEstimator;
|
type Estimator = BlockwiseLinearEstimator;
|
||||||
|
|
||||||
fn load(mut bytes: OwnedBytes) -> io::Result<Self::Reader> {
|
fn load(mut bytes: OwnedBytes) -> io::Result<Self::ColumnValues> {
|
||||||
let stats = Stats::deserialize(&mut bytes)?;
|
let stats = ColumnStats::deserialize(&mut bytes)?;
|
||||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||||
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
||||||
let (data, mut footer) = bytes.split(footer_offset);
|
let (data, mut footer) = bytes.split(footer_offset);
|
||||||
@@ -195,14 +195,14 @@ impl ColumnCodec<u64> for BlockwiseLinearCodec {
|
|||||||
pub struct BlockwiseLinearReader {
|
pub struct BlockwiseLinearReader {
|
||||||
blocks: Arc<[Block]>,
|
blocks: Arc<[Block]>,
|
||||||
data: OwnedBytes,
|
data: OwnedBytes,
|
||||||
stats: Stats,
|
stats: ColumnStats,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ColumnValues for BlockwiseLinearReader {
|
impl ColumnValues for BlockwiseLinearReader {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn get_val(&self, idx: u32) -> u64 {
|
fn get_val(&self, idx: u32) -> u64 {
|
||||||
let block_id = (idx / BLOCK_SIZE as u32) as usize;
|
let block_id = (idx / BLOCK_SIZE) as usize;
|
||||||
let idx_within_block = idx % (BLOCK_SIZE as u32);
|
let idx_within_block = idx % BLOCK_SIZE;
|
||||||
let block = &self.blocks[block_id];
|
let block = &self.blocks[block_id];
|
||||||
let interpoled_val: u64 = block.line.eval(idx_within_block);
|
let interpoled_val: u64 = block.line.eval(idx_within_block);
|
||||||
let block_bytes = &self.data[block.data_start_offset..];
|
let block_bytes = &self.data[block.data_start_offset..];
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
|||||||
|
|
||||||
use super::line::Line;
|
use super::line::Line;
|
||||||
use super::ColumnValues;
|
use super::ColumnValues;
|
||||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, ColumnStats};
|
||||||
use crate::column_values::VecColumn;
|
use crate::column_values::VecColumn;
|
||||||
use crate::RowId;
|
use crate::RowId;
|
||||||
|
|
||||||
@@ -18,7 +18,7 @@ const LINE_ESTIMATION_BLOCK_LEN: usize = 512;
|
|||||||
pub struct LinearReader {
|
pub struct LinearReader {
|
||||||
data: OwnedBytes,
|
data: OwnedBytes,
|
||||||
linear_params: LinearParams,
|
linear_params: LinearParams,
|
||||||
stats: Stats,
|
stats: ColumnStats,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ColumnValues for LinearReader {
|
impl ColumnValues for LinearReader {
|
||||||
@@ -106,7 +106,7 @@ impl ColumnCodecEstimator for LinearCodecEstimator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
fn estimate(&self, stats: &ColumnStats) -> Option<u64> {
|
||||||
let line = self.line?;
|
let line = self.line?;
|
||||||
let amplitude = self.max_deviation - self.min_deviation;
|
let amplitude = self.max_deviation - self.min_deviation;
|
||||||
let num_bits = compute_num_bits(amplitude);
|
let num_bits = compute_num_bits(amplitude);
|
||||||
@@ -123,7 +123,7 @@ impl ColumnCodecEstimator for LinearCodecEstimator {
|
|||||||
|
|
||||||
fn serialize(
|
fn serialize(
|
||||||
&self,
|
&self,
|
||||||
stats: &Stats,
|
stats: &ColumnStats,
|
||||||
vals: &mut dyn Iterator<Item = u64>,
|
vals: &mut dyn Iterator<Item = u64>,
|
||||||
wrt: &mut dyn io::Write,
|
wrt: &mut dyn io::Write,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
@@ -184,12 +184,12 @@ impl LinearCodecEstimator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ColumnCodec for LinearCodec {
|
impl ColumnCodec for LinearCodec {
|
||||||
type Reader = LinearReader;
|
type ColumnValues = LinearReader;
|
||||||
|
|
||||||
type Estimator = LinearCodecEstimator;
|
type Estimator = LinearCodecEstimator;
|
||||||
|
|
||||||
fn load(mut data: OwnedBytes) -> io::Result<Self::Reader> {
|
fn load(mut data: OwnedBytes) -> io::Result<Self::ColumnValues> {
|
||||||
let stats = Stats::deserialize(&mut data)?;
|
let stats = ColumnStats::deserialize(&mut data)?;
|
||||||
let linear_params = LinearParams::deserialize(&mut data)?;
|
let linear_params = LinearParams::deserialize(&mut data)?;
|
||||||
Ok(LinearReader {
|
Ok(LinearReader {
|
||||||
stats,
|
stats,
|
||||||
|
|||||||
@@ -13,35 +13,61 @@ use common::{BinarySerializable, OwnedBytes};
|
|||||||
use crate::column_values::monotonic_mapping::{
|
use crate::column_values::monotonic_mapping::{
|
||||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||||
};
|
};
|
||||||
use crate::column_values::u64_based::bitpacked::BitpackedCodec;
|
pub use crate::column_values::u64_based::bitpacked::BitpackedCodec;
|
||||||
use crate::column_values::u64_based::blockwise_linear::BlockwiseLinearCodec;
|
pub use crate::column_values::u64_based::blockwise_linear::BlockwiseLinearCodec;
|
||||||
use crate::column_values::u64_based::linear::LinearCodec;
|
pub use crate::column_values::u64_based::linear::LinearCodec;
|
||||||
use crate::column_values::u64_based::stats_collector::StatsCollector;
|
pub use crate::column_values::u64_based::stats_collector::StatsCollector;
|
||||||
use crate::column_values::{monotonic_map_column, Stats};
|
use crate::column_values::{monotonic_map_column, ColumnStats};
|
||||||
use crate::iterable::Iterable;
|
use crate::iterable::Iterable;
|
||||||
use crate::{ColumnValues, MonotonicallyMappableToU64};
|
use crate::{ColumnValues, MonotonicallyMappableToU64};
|
||||||
|
|
||||||
|
/// A `ColumnCodecEstimator` is in charge of gathering all
|
||||||
|
/// data required to serialize a column.
|
||||||
|
///
|
||||||
|
/// This happens during a first pass on data of the column elements.
|
||||||
|
/// During that pass, all column estimators receive a call to their
|
||||||
|
/// `.collect(el)`.
|
||||||
|
///
|
||||||
|
/// After this first pass, finalize is called.
|
||||||
|
/// `.estimate(..)` then should return an accurate estimation of the
|
||||||
|
/// size of the serialized column (were we to pick this codec.).
|
||||||
|
/// `.serialize(..)` then serializes the column using this codec.
|
||||||
pub trait ColumnCodecEstimator<T = u64>: 'static {
|
pub trait ColumnCodecEstimator<T = u64>: 'static {
|
||||||
|
/// Records a new value for estimation.
|
||||||
|
/// This method will be called for each element of the column during
|
||||||
|
/// `estimation`.
|
||||||
fn collect(&mut self, value: u64);
|
fn collect(&mut self, value: u64);
|
||||||
fn estimate(&self, stats: &Stats) -> Option<u64>;
|
/// Finalizes the first pass phase.
|
||||||
fn finalize(&mut self) {}
|
fn finalize(&mut self) {}
|
||||||
|
/// Returns an accurate estimation of the number of bytes that will
|
||||||
|
/// be used to represent this column.
|
||||||
|
fn estimate(&self, stats: &ColumnStats) -> Option<u64>;
|
||||||
|
/// Serializes the column using the given codec.
|
||||||
|
/// This constitutes a second pass over the columns values.
|
||||||
fn serialize(
|
fn serialize(
|
||||||
&self,
|
&self,
|
||||||
stats: &Stats,
|
stats: &ColumnStats,
|
||||||
vals: &mut dyn Iterator<Item = T>,
|
vals: &mut dyn Iterator<Item = T>,
|
||||||
wrt: &mut dyn io::Write,
|
wrt: &mut dyn io::Write,
|
||||||
) -> io::Result<()>;
|
) -> io::Result<()>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A column codec describes a colunm serialization format.
|
||||||
pub trait ColumnCodec<T: PartialOrd = u64> {
|
pub trait ColumnCodec<T: PartialOrd = u64> {
|
||||||
type Reader: ColumnValues<T> + 'static;
|
/// Specialized `ColumnValues` type.
|
||||||
|
type ColumnValues: ColumnValues<T> + 'static;
|
||||||
|
/// `Estimator` for the given codec.
|
||||||
type Estimator: ColumnCodecEstimator + Default;
|
type Estimator: ColumnCodecEstimator + Default;
|
||||||
|
|
||||||
fn load(bytes: OwnedBytes) -> io::Result<Self::Reader>;
|
/// Loads a column that has been serialized using this codec.
|
||||||
|
fn load(bytes: OwnedBytes) -> io::Result<Self::ColumnValues>;
|
||||||
|
|
||||||
|
/// Returns an estimator.
|
||||||
fn estimator() -> Self::Estimator {
|
fn estimator() -> Self::Estimator {
|
||||||
Self::Estimator::default()
|
Self::Estimator::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a boxed estimator.
|
||||||
fn boxed_estimator() -> Box<dyn ColumnCodecEstimator> {
|
fn boxed_estimator() -> Box<dyn ColumnCodecEstimator> {
|
||||||
Box::new(Self::estimator())
|
Box::new(Self::estimator())
|
||||||
}
|
}
|
||||||
@@ -62,6 +88,7 @@ pub enum CodecType {
|
|||||||
BlockwiseLinear = 2u8,
|
BlockwiseLinear = 2u8,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// List of all available u64-base codecs.
|
||||||
pub const ALL_U64_CODEC_TYPES: [CodecType; 3] = [
|
pub const ALL_U64_CODEC_TYPES: [CodecType; 3] = [
|
||||||
CodecType::Bitpacked,
|
CodecType::Bitpacked,
|
||||||
CodecType::Linear,
|
CodecType::Linear,
|
||||||
@@ -106,6 +133,7 @@ fn load_specific_codec<C: ColumnCodec, T: MonotonicallyMappableToU64>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CodecType {
|
impl CodecType {
|
||||||
|
/// Returns a boxed codec estimator associated to a given `CodecType`.
|
||||||
pub fn estimator(&self) -> Box<dyn ColumnCodecEstimator> {
|
pub fn estimator(&self) -> Box<dyn ColumnCodecEstimator> {
|
||||||
match self {
|
match self {
|
||||||
CodecType::Bitpacked => BitpackedCodec::boxed_estimator(),
|
CodecType::Bitpacked => BitpackedCodec::boxed_estimator(),
|
||||||
@@ -115,7 +143,8 @@ impl CodecType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn serialize_u64_based_column_values<'a, T: MonotonicallyMappableToU64>(
|
/// Serializes a given column of u64-mapped values.
|
||||||
|
pub fn serialize_u64_based_column_values<T: MonotonicallyMappableToU64>(
|
||||||
vals: &dyn Iterable<T>,
|
vals: &dyn Iterable<T>,
|
||||||
codec_types: &[CodecType],
|
codec_types: &[CodecType],
|
||||||
wrt: &mut dyn Write,
|
wrt: &mut dyn Write,
|
||||||
@@ -156,11 +185,14 @@ pub fn serialize_u64_based_column_values<'a, T: MonotonicallyMappableToU64>(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Load u64-based column values.
|
||||||
|
///
|
||||||
|
/// This method first identifies the codec off the first byte.
|
||||||
pub fn load_u64_based_column_values<T: MonotonicallyMappableToU64>(
|
pub fn load_u64_based_column_values<T: MonotonicallyMappableToU64>(
|
||||||
mut bytes: OwnedBytes,
|
mut bytes: OwnedBytes,
|
||||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||||
let codec_type: CodecType = bytes
|
let codec_type: CodecType = bytes
|
||||||
.get(0)
|
.first()
|
||||||
.copied()
|
.copied()
|
||||||
.and_then(CodecType::try_from_code)
|
.and_then(CodecType::try_from_code)
|
||||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Failed to read codec type"))?;
|
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Failed to read codec type"))?;
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use std::num::NonZeroU64;
|
|||||||
|
|
||||||
use fastdivide::DividerU64;
|
use fastdivide::DividerU64;
|
||||||
|
|
||||||
use crate::column_values::Stats;
|
use crate::column_values::ColumnStats;
|
||||||
use crate::RowId;
|
use crate::RowId;
|
||||||
|
|
||||||
/// Compute the gcd of two non null numbers.
|
/// Compute the gcd of two non null numbers.
|
||||||
@@ -33,14 +33,14 @@ pub struct StatsCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StatsCollector {
|
impl StatsCollector {
|
||||||
pub fn stats(&self) -> Stats {
|
pub fn stats(&self) -> ColumnStats {
|
||||||
let (min_value, max_value) = self.min_max_opt.unwrap_or((0u64, 0u64));
|
let (min_value, max_value) = self.min_max_opt.unwrap_or((0u64, 0u64));
|
||||||
let increment_gcd = if let Some((increment_gcd, _)) = self.increment_gcd_opt {
|
let increment_gcd = if let Some((increment_gcd, _)) = self.increment_gcd_opt {
|
||||||
increment_gcd
|
increment_gcd
|
||||||
} else {
|
} else {
|
||||||
NonZeroU64::new(1u64).unwrap()
|
NonZeroU64::new(1u64).unwrap()
|
||||||
};
|
};
|
||||||
Stats {
|
ColumnStats {
|
||||||
min_value,
|
min_value,
|
||||||
max_value,
|
max_value,
|
||||||
num_rows: self.num_rows,
|
num_rows: self.num_rows,
|
||||||
@@ -97,9 +97,9 @@ mod tests {
|
|||||||
use std::num::NonZeroU64;
|
use std::num::NonZeroU64;
|
||||||
|
|
||||||
use crate::column_values::u64_based::stats_collector::{compute_gcd, StatsCollector};
|
use crate::column_values::u64_based::stats_collector::{compute_gcd, StatsCollector};
|
||||||
use crate::column_values::u64_based::Stats;
|
use crate::column_values::u64_based::ColumnStats;
|
||||||
|
|
||||||
fn compute_stats(vals: impl Iterator<Item = u64>) -> Stats {
|
fn compute_stats(vals: impl Iterator<Item = u64>) -> ColumnStats {
|
||||||
let mut stats_collector = StatsCollector::default();
|
let mut stats_collector = StatsCollector::default();
|
||||||
for val in vals {
|
for val in vals {
|
||||||
stats_collector.collect(val);
|
stats_collector.collect(val);
|
||||||
@@ -144,7 +144,7 @@ mod tests {
|
|||||||
fn test_stats() {
|
fn test_stats() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
compute_stats([].into_iter()),
|
compute_stats([].into_iter()),
|
||||||
Stats {
|
ColumnStats {
|
||||||
gcd: NonZeroU64::new(1).unwrap(),
|
gcd: NonZeroU64::new(1).unwrap(),
|
||||||
min_value: 0,
|
min_value: 0,
|
||||||
max_value: 0,
|
max_value: 0,
|
||||||
@@ -153,7 +153,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
compute_stats([0, 1].into_iter()),
|
compute_stats([0, 1].into_iter()),
|
||||||
Stats {
|
ColumnStats {
|
||||||
gcd: NonZeroU64::new(1).unwrap(),
|
gcd: NonZeroU64::new(1).unwrap(),
|
||||||
min_value: 0,
|
min_value: 0,
|
||||||
max_value: 1,
|
max_value: 1,
|
||||||
@@ -162,7 +162,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
compute_stats([0, 1].into_iter()),
|
compute_stats([0, 1].into_iter()),
|
||||||
Stats {
|
ColumnStats {
|
||||||
gcd: NonZeroU64::new(1).unwrap(),
|
gcd: NonZeroU64::new(1).unwrap(),
|
||||||
min_value: 0,
|
min_value: 0,
|
||||||
max_value: 1,
|
max_value: 1,
|
||||||
@@ -171,7 +171,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
compute_stats([10, 20, 30].into_iter()),
|
compute_stats([10, 20, 30].into_iter()),
|
||||||
Stats {
|
ColumnStats {
|
||||||
gcd: NonZeroU64::new(10).unwrap(),
|
gcd: NonZeroU64::new(10).unwrap(),
|
||||||
min_value: 10,
|
min_value: 10,
|
||||||
max_value: 30,
|
max_value: 30,
|
||||||
@@ -180,7 +180,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
compute_stats([10, 50, 10, 30].into_iter()),
|
compute_stats([10, 50, 10, 30].into_iter()),
|
||||||
Stats {
|
ColumnStats {
|
||||||
gcd: NonZeroU64::new(20).unwrap(),
|
gcd: NonZeroU64::new(20).unwrap(),
|
||||||
min_value: 10,
|
min_value: 10,
|
||||||
max_value: 50,
|
max_value: 50,
|
||||||
@@ -189,7 +189,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
compute_stats([10, 0, 30].into_iter()),
|
compute_stats([10, 0, 30].into_iter()),
|
||||||
Stats {
|
ColumnStats {
|
||||||
gcd: NonZeroU64::new(10).unwrap(),
|
gcd: NonZeroU64::new(10).unwrap(),
|
||||||
min_value: 0,
|
min_value: 0,
|
||||||
max_value: 30,
|
max_value: 30,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use proptest::prelude::*;
|
use proptest::prelude::*;
|
||||||
use proptest::strategy::Strategy;
|
use proptest::strategy::Strategy;
|
||||||
use proptest::{prop_oneof, proptest};
|
use proptest::{num, prop_oneof, proptest};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_serialize_and_load_simple() {
|
fn test_serialize_and_load_simple() {
|
||||||
@@ -19,6 +19,62 @@ fn test_serialize_and_load_simple() {
|
|||||||
assert_eq!(col.get_val(1), 2);
|
assert_eq!(col.get_val(1), 2);
|
||||||
assert_eq!(col.get_val(2), 5);
|
assert_eq!(col.get_val(2), 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_empty_column_i64() {
|
||||||
|
let vals: [i64; 0] = [];
|
||||||
|
let mut num_acceptable_codecs = 0;
|
||||||
|
for codec in ALL_U64_CODEC_TYPES {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
if serialize_u64_based_column_values(&&vals[..], &[codec], &mut buffer).is_err() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
num_acceptable_codecs += 1;
|
||||||
|
let col = load_u64_based_column_values::<i64>(OwnedBytes::new(buffer)).unwrap();
|
||||||
|
assert_eq!(col.num_vals(), 0);
|
||||||
|
assert_eq!(col.min_value(), i64::MIN);
|
||||||
|
assert_eq!(col.max_value(), i64::MIN);
|
||||||
|
}
|
||||||
|
assert!(num_acceptable_codecs > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_empty_column_u64() {
|
||||||
|
let vals: [u64; 0] = [];
|
||||||
|
let mut num_acceptable_codecs = 0;
|
||||||
|
for codec in ALL_U64_CODEC_TYPES {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
if serialize_u64_based_column_values(&&vals[..], &[codec], &mut buffer).is_err() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
num_acceptable_codecs += 1;
|
||||||
|
let col = load_u64_based_column_values::<u64>(OwnedBytes::new(buffer)).unwrap();
|
||||||
|
assert_eq!(col.num_vals(), 0);
|
||||||
|
assert_eq!(col.min_value(), u64::MIN);
|
||||||
|
assert_eq!(col.max_value(), u64::MIN);
|
||||||
|
}
|
||||||
|
assert!(num_acceptable_codecs > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_empty_column_f64() {
|
||||||
|
let vals: [f64; 0] = [];
|
||||||
|
let mut num_acceptable_codecs = 0;
|
||||||
|
for codec in ALL_U64_CODEC_TYPES {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
if serialize_u64_based_column_values(&&vals[..], &[codec], &mut buffer).is_err() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
num_acceptable_codecs += 1;
|
||||||
|
let col = load_u64_based_column_values::<f64>(OwnedBytes::new(buffer)).unwrap();
|
||||||
|
assert_eq!(col.num_vals(), 0);
|
||||||
|
// FIXME. f64::MIN would be better!
|
||||||
|
assert!(col.min_value().is_nan());
|
||||||
|
assert!(col.max_value().is_nan());
|
||||||
|
}
|
||||||
|
assert!(num_acceptable_codecs > 0);
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn create_and_validate<TColumnCodec: ColumnCodec>(
|
pub(crate) fn create_and_validate<TColumnCodec: ColumnCodec>(
|
||||||
vals: &[u64],
|
vals: &[u64],
|
||||||
name: &str,
|
name: &str,
|
||||||
@@ -60,7 +116,7 @@ pub(crate) fn create_and_validate<TColumnCodec: ColumnCodec>(
|
|||||||
.map(|(pos, _)| pos as u32)
|
.map(|(pos, _)| pos as u32)
|
||||||
.collect();
|
.collect();
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
reader.get_docids_for_value_range(
|
reader.get_row_ids_for_value_range(
|
||||||
vals[test_rand_idx]..=vals[test_rand_idx],
|
vals[test_rand_idx]..=vals[test_rand_idx],
|
||||||
0..vals.len() as u32,
|
0..vals.len() as u32,
|
||||||
&mut positions,
|
&mut positions,
|
||||||
|
|||||||
52
columnar/src/column_values/vec_column.rs
Normal file
52
columnar/src/column_values/vec_column.rs
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
use tantivy_bitpacker::minmax;
|
||||||
|
|
||||||
|
use crate::ColumnValues;
|
||||||
|
|
||||||
|
/// VecColumn provides `Column` over a slice.
|
||||||
|
pub struct VecColumn<'a, T = u64> {
|
||||||
|
pub(crate) values: &'a [T],
|
||||||
|
pub(crate) min_value: T,
|
||||||
|
pub(crate) max_value: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T: Copy + PartialOrd + Send + Sync + Debug> ColumnValues<T> for VecColumn<'a, T> {
|
||||||
|
fn get_val(&self, position: u32) -> T {
|
||||||
|
self.values[position as usize]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||||
|
Box::new(self.values.iter().copied())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn min_value(&self) -> T {
|
||||||
|
self.min_value
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_value(&self) -> T {
|
||||||
|
self.max_value
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_vals(&self) -> u32 {
|
||||||
|
self.values.len() as u32
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||||
|
output.copy_from_slice(&self.values[start as usize..][..output.len()])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T: Copy + PartialOrd + Default, V> From<&'a V> for VecColumn<'a, T>
|
||||||
|
where V: AsRef<[T]> + ?Sized
|
||||||
|
{
|
||||||
|
fn from(values: &'a V) -> Self {
|
||||||
|
let values = values.as_ref();
|
||||||
|
let (min_value, max_value) = minmax(values.iter().copied()).unwrap_or_default();
|
||||||
|
Self {
|
||||||
|
values,
|
||||||
|
min_value,
|
||||||
|
max_value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,12 +1,14 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::net::Ipv6Addr;
|
use std::net::Ipv6Addr;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::value::NumericalType;
|
use crate::value::NumericalType;
|
||||||
use crate::InvalidData;
|
use crate::InvalidData;
|
||||||
|
|
||||||
/// The column type represents the column type.
|
/// The column type represents the column type.
|
||||||
/// Any changes need to be propagated to `COLUMN_TYPES`.
|
/// Any changes need to be propagated to `COLUMN_TYPES`.
|
||||||
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy, Ord, PartialOrd)]
|
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy, Ord, PartialOrd, Serialize, Deserialize)]
|
||||||
#[repr(u8)]
|
#[repr(u8)]
|
||||||
pub enum ColumnType {
|
pub enum ColumnType {
|
||||||
I64 = 0u8,
|
I64 = 0u8,
|
||||||
@@ -111,7 +113,7 @@ impl HasAssociatedColumnType for bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HasAssociatedColumnType for crate::DateTime {
|
impl HasAssociatedColumnType for common::DateTime {
|
||||||
fn column_type() -> ColumnType {
|
fn column_type() -> ColumnType {
|
||||||
ColumnType::DateTime
|
ColumnType::DateTime
|
||||||
}
|
}
|
||||||
@@ -143,7 +145,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for code in COLUMN_TYPES.len() as u8..=u8::MAX {
|
for code in COLUMN_TYPES.len() as u8..=u8::MAX {
|
||||||
assert!(ColumnType::try_from_code(code as u8).is_err());
|
assert!(ColumnType::try_from_code(code).is_err());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ pub const VERSION_FOOTER_NUM_BYTES: usize = MAGIC_BYTES.len() + std::mem::size_o
|
|||||||
|
|
||||||
/// We end the file by these 4 bytes just to somewhat identify that
|
/// We end the file by these 4 bytes just to somewhat identify that
|
||||||
/// this is indeed a columnar file.
|
/// this is indeed a columnar file.
|
||||||
const MAGIC_BYTES: [u8; 4] = [2, 113, 119, 066];
|
const MAGIC_BYTES: [u8; 4] = [2, 113, 119, 66];
|
||||||
|
|
||||||
pub fn footer() -> [u8; VERSION_FOOTER_NUM_BYTES] {
|
pub fn footer() -> [u8; VERSION_FOOTER_NUM_BYTES] {
|
||||||
let mut footer_bytes = [0u8; VERSION_FOOTER_NUM_BYTES];
|
let mut footer_bytes = [0u8; VERSION_FOOTER_NUM_BYTES];
|
||||||
@@ -27,8 +27,8 @@ pub enum Version {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Version {
|
impl Version {
|
||||||
fn to_bytes(&self) -> [u8; 4] {
|
fn to_bytes(self) -> [u8; 4] {
|
||||||
(*self as u32).to_le_bytes()
|
(self as u32).to_le_bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_from_bytes(bytes: [u8; 4]) -> Result<Version, InvalidData> {
|
fn try_from_bytes(bytes: [u8; 4]) -> Result<Version, InvalidData> {
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ impl<'a> RemappedTermOrdinalsValues<'a> {
|
|||||||
.enumerate()
|
.enumerate()
|
||||||
.flat_map(|(segment_ord, byte_column)| {
|
.flat_map(|(segment_ord, byte_column)| {
|
||||||
let segment_ord = self.term_ord_mapping.get_segment(segment_ord as u32);
|
let segment_ord = self.term_ord_mapping.get_segment(segment_ord as u32);
|
||||||
byte_column.into_iter().flat_map(move |bytes_column| {
|
byte_column.iter().flat_map(move |bytes_column| {
|
||||||
bytes_column
|
bytes_column
|
||||||
.ords()
|
.ords()
|
||||||
.values
|
.values
|
||||||
@@ -96,7 +96,7 @@ fn compute_term_bitset(column: &BytesColumn, row_bitset: &ReadOnlyBitSet) -> Bit
|
|||||||
let num_terms = column.dictionary().num_terms();
|
let num_terms = column.dictionary().num_terms();
|
||||||
let mut term_bitset = BitSet::with_max_value(num_terms as u32);
|
let mut term_bitset = BitSet::with_max_value(num_terms as u32);
|
||||||
for row_id in row_bitset.iter() {
|
for row_id in row_bitset.iter() {
|
||||||
for term_ord in column.term_ord_column.values(row_id) {
|
for term_ord in column.term_ord_column.values_for_doc(row_id) {
|
||||||
term_bitset.insert(term_ord as u32);
|
term_bitset.insert(term_ord as u32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -191,7 +191,7 @@ struct TermOrdinalMapping {
|
|||||||
impl TermOrdinalMapping {
|
impl TermOrdinalMapping {
|
||||||
fn add_segment(&mut self, max_term_ord: usize) {
|
fn add_segment(&mut self, max_term_ord: usize) {
|
||||||
self.per_segment_new_term_ordinals
|
self.per_segment_new_term_ordinals
|
||||||
.push(vec![TermOrdinal::default(); max_term_ord as usize]);
|
.push(vec![TermOrdinal::default(); max_term_ord]);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_from_to(&mut self, segment_ord: usize, from_ord: TermOrdinal, to_ord: TermOrdinal) {
|
fn register_from_to(&mut self, segment_ord: usize, from_ord: TermOrdinal, to_ord: TermOrdinal) {
|
||||||
|
|||||||
@@ -2,8 +2,6 @@ mod merge_dict_column;
|
|||||||
mod merge_mapping;
|
mod merge_mapping;
|
||||||
mod term_merger;
|
mod term_merger;
|
||||||
|
|
||||||
// mod sorted_doc_id_column;
|
|
||||||
|
|
||||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::net::Ipv6Addr;
|
use std::net::Ipv6Addr;
|
||||||
@@ -54,14 +52,34 @@ impl From<ColumnType> for ColumnTypeCategory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Merge several columnar table together.
|
||||||
|
///
|
||||||
|
/// If several columns with the same name are conflicting with the numerical types in the
|
||||||
|
/// input columnars, the first type compatible out of i64, u64, f64 in that order will be used.
|
||||||
|
///
|
||||||
|
/// `require_columns` makes it possible to ensure that some columns will be present in the
|
||||||
|
/// resulting columnar. When a required column is a numerical column type, one of two things can
|
||||||
|
/// happen:
|
||||||
|
/// - If the required column type is compatible with all of the input columnar, the resulsting
|
||||||
|
/// merged
|
||||||
|
/// columnar will simply coerce the input column and use the required column type.
|
||||||
|
/// - If the required column type is incompatible with one of the input columnar, the merged
|
||||||
|
/// will fail with an InvalidData error.
|
||||||
|
///
|
||||||
|
/// `merge_row_order` makes it possible to remove or reorder row in the resulting
|
||||||
|
/// `Columnar` table.
|
||||||
|
///
|
||||||
|
/// Reminder: a string and a numerical column may bare the same column name. This is not
|
||||||
|
/// considered a conflict.
|
||||||
pub fn merge_columnar(
|
pub fn merge_columnar(
|
||||||
columnar_readers: &[&ColumnarReader],
|
columnar_readers: &[&ColumnarReader],
|
||||||
|
required_columns: &[(String, ColumnType)],
|
||||||
merge_row_order: MergeRowOrder,
|
merge_row_order: MergeRowOrder,
|
||||||
output: &mut impl io::Write,
|
output: &mut impl io::Write,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let mut serializer = ColumnarSerializer::new(output);
|
let mut serializer = ColumnarSerializer::new(output);
|
||||||
|
|
||||||
let columns_to_merge = group_columns_for_merge(columnar_readers)?;
|
let columns_to_merge = group_columns_for_merge(columnar_readers, required_columns)?;
|
||||||
for ((column_name, column_type), columns) in columns_to_merge {
|
for ((column_name, column_type), columns) in columns_to_merge {
|
||||||
let mut column_serializer =
|
let mut column_serializer =
|
||||||
serializer.serialize_column(column_name.as_bytes(), column_type);
|
serializer.serialize_column(column_name.as_bytes(), column_type);
|
||||||
@@ -174,97 +192,183 @@ fn merge_column(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct GroupedColumns {
|
||||||
|
required_column_type: Option<ColumnType>,
|
||||||
|
columns: Vec<Option<DynamicColumn>>,
|
||||||
|
column_category: ColumnTypeCategory,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GroupedColumns {
|
||||||
|
fn for_category(column_category: ColumnTypeCategory, num_columnars: usize) -> Self {
|
||||||
|
GroupedColumns {
|
||||||
|
required_column_type: None,
|
||||||
|
columns: vec![None; num_columnars],
|
||||||
|
column_category,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the dynamic column for a given columnar.
|
||||||
|
fn set_column(&mut self, columnar_id: usize, column: DynamicColumn) {
|
||||||
|
self.columns[columnar_id] = Some(column);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Force the existence of a column, as well as its type.
|
||||||
|
fn require_type(&mut self, required_type: ColumnType) -> io::Result<()> {
|
||||||
|
if let Some(existing_required_type) = self.required_column_type {
|
||||||
|
if existing_required_type == required_type {
|
||||||
|
// This was just a duplicate in the `required_columns`.
|
||||||
|
// Nothing to do.
|
||||||
|
return Ok(());
|
||||||
|
} else {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
"Required column conflicts with another required column of the same type \
|
||||||
|
category.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.required_column_type = Some(required_type);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the column type after merge.
|
||||||
|
///
|
||||||
|
/// This method does not check if the column types can actually be coerced to
|
||||||
|
/// this type.
|
||||||
|
fn column_type_after_merge(&self) -> ColumnType {
|
||||||
|
if let Some(required_type) = self.required_column_type {
|
||||||
|
return required_type;
|
||||||
|
}
|
||||||
|
let column_type: HashSet<ColumnType> = self
|
||||||
|
.columns
|
||||||
|
.iter()
|
||||||
|
.flatten()
|
||||||
|
.map(|column| column.column_type())
|
||||||
|
.collect();
|
||||||
|
if column_type.len() == 1 {
|
||||||
|
return column_type.into_iter().next().unwrap();
|
||||||
|
}
|
||||||
|
// At the moment, only the numerical categorical column type has more than one possible
|
||||||
|
// column type.
|
||||||
|
assert_eq!(self.column_category, ColumnTypeCategory::Numerical);
|
||||||
|
merged_numerical_columns_type(self.columns.iter().flatten()).into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the type of the merged numerical column.
|
||||||
|
///
|
||||||
|
/// This function picks the first numerical type out of i64, u64, f64 (order matters
|
||||||
|
/// here), that is compatible with all the `columns`.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
/// Panics if one of the column is not numerical.
|
||||||
|
fn merged_numerical_columns_type<'a>(
|
||||||
|
columns: impl Iterator<Item = &'a DynamicColumn>,
|
||||||
|
) -> NumericalType {
|
||||||
|
let mut compatible_numerical_types = CompatibleNumericalTypes::default();
|
||||||
|
for column in columns {
|
||||||
|
let (min_value, max_value) =
|
||||||
|
min_max_if_numerical(column).expect("All columns re required to be numerical");
|
||||||
|
compatible_numerical_types.accept_value(min_value);
|
||||||
|
compatible_numerical_types.accept_value(max_value);
|
||||||
|
}
|
||||||
|
compatible_numerical_types.to_numerical_type()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
fn group_columns_for_merge(
|
fn group_columns_for_merge(
|
||||||
columnar_readers: &[&ColumnarReader],
|
columnar_readers: &[&ColumnarReader],
|
||||||
|
required_columns: &[(String, ColumnType)],
|
||||||
) -> io::Result<BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>>> {
|
) -> io::Result<BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>>> {
|
||||||
// Each column name may have multiple types of column associated.
|
// Each column name may have multiple types of column associated.
|
||||||
// For merging we are interested in the same column type category since they can be merged.
|
// For merging we are interested in the same column type category since they can be merged.
|
||||||
let mut columns_grouped: HashMap<(String, ColumnTypeCategory), Vec<Option<DynamicColumn>>> =
|
let mut columns_grouped: HashMap<(String, ColumnTypeCategory), GroupedColumns> = HashMap::new();
|
||||||
HashMap::new();
|
|
||||||
|
|
||||||
let num_columnars = columnar_readers.len();
|
for &(ref column_name, column_type) in required_columns {
|
||||||
|
columns_grouped
|
||||||
|
.entry((column_name.clone(), column_type.into()))
|
||||||
|
.or_insert_with(|| {
|
||||||
|
GroupedColumns::for_category(column_type.into(), columnar_readers.len())
|
||||||
|
})
|
||||||
|
.require_type(column_type)?;
|
||||||
|
}
|
||||||
|
|
||||||
for (columnar_id, columnar_reader) in columnar_readers.iter().enumerate() {
|
for (columnar_id, columnar_reader) in columnar_readers.iter().enumerate() {
|
||||||
let column_name_and_handle = columnar_reader.list_columns()?;
|
let column_name_and_handle = columnar_reader.list_columns()?;
|
||||||
for (column_name, handle) in column_name_and_handle {
|
for (column_name, handle) in column_name_and_handle {
|
||||||
let column_type_category: ColumnTypeCategory = handle.column_type().into();
|
let column_category: ColumnTypeCategory = handle.column_type().into();
|
||||||
let columns = columns_grouped
|
|
||||||
.entry((column_name, column_type_category))
|
|
||||||
.or_insert_with(|| vec![None; num_columnars]);
|
|
||||||
let column = handle.open()?;
|
let column = handle.open()?;
|
||||||
columns[columnar_id] = Some(column);
|
columns_grouped
|
||||||
|
.entry((column_name, column_category))
|
||||||
|
.or_insert_with(|| {
|
||||||
|
GroupedColumns::for_category(column_category, columnar_readers.len())
|
||||||
|
})
|
||||||
|
.set_column(columnar_id, column);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut merge_columns: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
let mut merge_columns: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||||
BTreeMap::default();
|
Default::default();
|
||||||
|
|
||||||
for ((column_name, col_category), mut columns) in columns_grouped {
|
for ((column_name, _), mut grouped_columns) in columns_grouped {
|
||||||
if col_category == ColumnTypeCategory::Numerical {
|
let column_type = grouped_columns.column_type_after_merge();
|
||||||
coerce_numerical_columns_to_same_type(&mut columns);
|
coerce_columns(column_type, &mut grouped_columns.columns)?;
|
||||||
}
|
merge_columns.insert((column_name, column_type), grouped_columns.columns);
|
||||||
let column_type = columns
|
|
||||||
.iter()
|
|
||||||
.flatten()
|
|
||||||
.map(|col| col.column_type())
|
|
||||||
.next()
|
|
||||||
.unwrap();
|
|
||||||
merge_columns.insert((column_name, column_type), columns);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(merge_columns)
|
Ok(merge_columns)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Coerce a set of numerical columns to the same type.
|
fn coerce_columns(
|
||||||
///
|
column_type: ColumnType,
|
||||||
/// If all columns are already from the same type, keep this type
|
columns: &mut [Option<DynamicColumn>],
|
||||||
/// (even if they could all be coerced to i64).
|
) -> io::Result<()> {
|
||||||
fn coerce_numerical_columns_to_same_type(columns: &mut [Option<DynamicColumn>]) {
|
|
||||||
let mut column_types: HashSet<NumericalType> = HashSet::default();
|
|
||||||
let mut compatible_numerical_types = CompatibleNumericalTypes::default();
|
|
||||||
for column in columns.iter().flatten() {
|
|
||||||
let min_value: NumericalValue;
|
|
||||||
let max_value: NumericalValue;
|
|
||||||
match column {
|
|
||||||
DynamicColumn::I64(column) => {
|
|
||||||
min_value = column.min_value().into();
|
|
||||||
max_value = column.max_value().into();
|
|
||||||
}
|
|
||||||
DynamicColumn::U64(column) => {
|
|
||||||
min_value = column.min_value().into();
|
|
||||||
max_value = column.min_value().into();
|
|
||||||
}
|
|
||||||
DynamicColumn::F64(column) => {
|
|
||||||
min_value = column.min_value().into();
|
|
||||||
max_value = column.min_value().into();
|
|
||||||
}
|
|
||||||
DynamicColumn::Bool(_)
|
|
||||||
| DynamicColumn::IpAddr(_)
|
|
||||||
| DynamicColumn::DateTime(_)
|
|
||||||
| DynamicColumn::Bytes(_)
|
|
||||||
| DynamicColumn::Str(_) => {
|
|
||||||
panic!("We expected only numerical columns.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
column_types.insert(column.column_type().numerical_type().unwrap());
|
|
||||||
compatible_numerical_types.accept_value(min_value);
|
|
||||||
compatible_numerical_types.accept_value(max_value);
|
|
||||||
}
|
|
||||||
if column_types.len() <= 1 {
|
|
||||||
// No need to do anything. The columns are already all from the same type.
|
|
||||||
// This is necessary to let use force a given type.
|
|
||||||
|
|
||||||
// TODO This works in a world where we do not allow a change of schema,
|
|
||||||
// but in the future, we will have to pass some kind of schema to enforce
|
|
||||||
// the logic.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
let coerce_type = compatible_numerical_types.to_numerical_type();
|
|
||||||
for column_opt in columns.iter_mut() {
|
for column_opt in columns.iter_mut() {
|
||||||
if let Some(column) = column_opt.take() {
|
if let Some(column) = column_opt.take() {
|
||||||
*column_opt = column.coerce_numerical(coerce_type);
|
*column_opt = Some(coerce_column(column_type, column)?);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn coerce_column(column_type: ColumnType, column: DynamicColumn) -> io::Result<DynamicColumn> {
|
||||||
|
if let Some(numerical_type) = column_type.numerical_type() {
|
||||||
|
column
|
||||||
|
.coerce_numerical(numerical_type)
|
||||||
|
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, ""))
|
||||||
|
} else {
|
||||||
|
if column.column_type() != column_type {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
format!(
|
||||||
|
"Cannot coerce column of type `{:?}` to `{column_type:?}`",
|
||||||
|
column.column_type()
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(column)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the (min, max) of a column provided it is numerical (i64, u64. f64).
|
||||||
|
///
|
||||||
|
/// The min and the max are simply the numerical value as defined by `ColumnValue::min_value()`,
|
||||||
|
/// and `ColumnValue::max_value()`.
|
||||||
|
///
|
||||||
|
/// It is important to note that these values are only guaranteed to be lower/upper bound
|
||||||
|
/// (as opposed to min/max value).
|
||||||
|
/// If a column is empty, the min and max values are currently set to 0.
|
||||||
|
fn min_max_if_numerical(column: &DynamicColumn) -> Option<(NumericalValue, NumericalValue)> {
|
||||||
|
match column {
|
||||||
|
DynamicColumn::I64(column) => Some((column.min_value().into(), column.max_value().into())),
|
||||||
|
DynamicColumn::U64(column) => Some((column.min_value().into(), column.min_value().into())),
|
||||||
|
DynamicColumn::F64(column) => Some((column.min_value().into(), column.min_value().into())),
|
||||||
|
DynamicColumn::Bool(_)
|
||||||
|
| DynamicColumn::IpAddr(_)
|
||||||
|
| DynamicColumn::DateTime(_)
|
||||||
|
| DynamicColumn::Bytes(_)
|
||||||
|
| DynamicColumn::Str(_) => None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -1,107 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use fastfield_codecs::Column;
|
|
||||||
use itertools::Itertools;
|
|
||||||
|
|
||||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
|
||||||
use crate::SegmentReader;
|
|
||||||
|
|
||||||
pub(crate) struct RemappedDocIdColumn<'a> {
|
|
||||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
|
||||||
fast_field_readers: Vec<Arc<dyn Column<u64>>>,
|
|
||||||
min_value: u64,
|
|
||||||
max_value: u64,
|
|
||||||
num_vals: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_min_max_val(
|
|
||||||
u64_reader: &dyn Column<u64>,
|
|
||||||
segment_reader: &SegmentReader,
|
|
||||||
) -> Option<(u64, u64)> {
|
|
||||||
if segment_reader.max_doc() == 0 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
if segment_reader.alive_bitset().is_none() {
|
|
||||||
// no deleted documents,
|
|
||||||
// we can use the previous min_val, max_val.
|
|
||||||
return Some((u64_reader.min_value(), u64_reader.max_value()));
|
|
||||||
}
|
|
||||||
// some deleted documents,
|
|
||||||
// we need to recompute the max / min
|
|
||||||
segment_reader
|
|
||||||
.doc_ids_alive()
|
|
||||||
.map(|doc_id| u64_reader.get_val(doc_id))
|
|
||||||
.minmax()
|
|
||||||
.into_option()
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> RemappedDocIdColumn<'a> {
|
|
||||||
pub(crate) fn new(
|
|
||||||
readers: &'a [SegmentReader],
|
|
||||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
|
||||||
field: &str,
|
|
||||||
) -> Self {
|
|
||||||
let (min_value, max_value) = readers
|
|
||||||
.iter()
|
|
||||||
.filter_map(|reader| {
|
|
||||||
let u64_reader: Arc<dyn Column<u64>> =
|
|
||||||
reader.fast_fields().typed_fast_field_reader(field).expect(
|
|
||||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
|
||||||
it should never happen.",
|
|
||||||
);
|
|
||||||
compute_min_max_val(&*u64_reader, reader)
|
|
||||||
})
|
|
||||||
.reduce(|a, b| (a.0.min(b.0), a.1.max(b.1)))
|
|
||||||
.expect("Unexpected error, empty readers in IndexMerger");
|
|
||||||
|
|
||||||
let fast_field_readers = readers
|
|
||||||
.iter()
|
|
||||||
.map(|reader| {
|
|
||||||
let u64_reader: Arc<dyn Column<u64>> =
|
|
||||||
reader.fast_fields().typed_fast_field_reader(field).expect(
|
|
||||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
|
||||||
it should never happen.",
|
|
||||||
);
|
|
||||||
u64_reader
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
RemappedDocIdColumn {
|
|
||||||
doc_id_mapping,
|
|
||||||
fast_field_readers,
|
|
||||||
min_value,
|
|
||||||
max_value,
|
|
||||||
num_vals: doc_id_mapping.len() as u32,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Column for RemappedDocIdColumn<'a> {
|
|
||||||
fn get_val(&self, _doc: u32) -> u64 {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
|
||||||
Box::new(
|
|
||||||
self.doc_id_mapping
|
|
||||||
.iter_old_doc_addrs()
|
|
||||||
.map(|old_doc_addr| {
|
|
||||||
let fast_field_reader =
|
|
||||||
&self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
|
||||||
fast_field_reader.get_val(old_doc_addr.doc_id)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
fn min_value(&self) -> u64 {
|
|
||||||
self.min_value
|
|
||||||
}
|
|
||||||
|
|
||||||
fn max_value(&self) -> u64 {
|
|
||||||
self.max_value
|
|
||||||
}
|
|
||||||
|
|
||||||
fn num_vals(&self) -> u32 {
|
|
||||||
self.num_vals
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -24,7 +24,7 @@ fn test_column_coercion_to_u64() {
|
|||||||
// u64 type
|
// u64 type
|
||||||
let columnar2 = make_columnar("numbers", &[u64::MAX]);
|
let columnar2 = make_columnar("numbers", &[u64::MAX]);
|
||||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
group_columns_for_merge(&[&columnar1, &columnar2], &[]).unwrap();
|
||||||
assert_eq!(column_map.len(), 1);
|
assert_eq!(column_map.len(), 1);
|
||||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||||
}
|
}
|
||||||
@@ -34,7 +34,7 @@ fn test_column_no_coercion_if_all_the_same() {
|
|||||||
let columnar1 = make_columnar("numbers", &[1u64]);
|
let columnar1 = make_columnar("numbers", &[1u64]);
|
||||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
group_columns_for_merge(&[&columnar1, &columnar2], &[]).unwrap();
|
||||||
assert_eq!(column_map.len(), 1);
|
assert_eq!(column_map.len(), 1);
|
||||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||||
}
|
}
|
||||||
@@ -44,17 +44,74 @@ fn test_column_coercion_to_i64() {
|
|||||||
let columnar1 = make_columnar("numbers", &[-1i64]);
|
let columnar1 = make_columnar("numbers", &[-1i64]);
|
||||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
group_columns_for_merge(&[&columnar1, &columnar2], &[]).unwrap();
|
||||||
assert_eq!(column_map.len(), 1);
|
assert_eq!(column_map.len(), 1);
|
||||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::I64)));
|
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::I64)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_impossible_coercion_returns_an_error() {
|
||||||
|
let columnar1 = make_columnar("numbers", &[u64::MAX]);
|
||||||
|
let group_error =
|
||||||
|
group_columns_for_merge(&[&columnar1], &[("numbers".to_string(), ColumnType::I64)])
|
||||||
|
.map(|_| ())
|
||||||
|
.unwrap_err();
|
||||||
|
assert_eq!(group_error.kind(), io::ErrorKind::InvalidInput);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_group_columns_with_required_column() {
|
||||||
|
let columnar1 = make_columnar("numbers", &[1i64]);
|
||||||
|
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||||
|
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||||
|
group_columns_for_merge(
|
||||||
|
&[&columnar1, &columnar2],
|
||||||
|
&[("numbers".to_string(), ColumnType::U64)],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(column_map.len(), 1);
|
||||||
|
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_group_columns_required_column_with_no_existing_columns() {
|
||||||
|
let columnar1 = make_columnar("numbers", &[2u64]);
|
||||||
|
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||||
|
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||||
|
group_columns_for_merge(
|
||||||
|
&[&columnar1, &columnar2],
|
||||||
|
&[("required_col".to_string(), ColumnType::Str)],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(column_map.len(), 2);
|
||||||
|
let columns = column_map
|
||||||
|
.get(&("required_col".to_string(), ColumnType::Str))
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(columns.len(), 2);
|
||||||
|
assert!(columns[0].is_none());
|
||||||
|
assert!(columns[1].is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_group_columns_required_column_is_above_all_columns_have_the_same_type_rule() {
|
||||||
|
let columnar1 = make_columnar("numbers", &[2i64]);
|
||||||
|
let columnar2 = make_columnar("numbers", &[2i64]);
|
||||||
|
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||||
|
group_columns_for_merge(
|
||||||
|
&[&columnar1, &columnar2],
|
||||||
|
&[("numbers".to_string(), ColumnType::U64)],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(column_map.len(), 1);
|
||||||
|
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_missing_column() {
|
fn test_missing_column() {
|
||||||
let columnar1 = make_columnar("numbers", &[-1i64]);
|
let columnar1 = make_columnar("numbers", &[-1i64]);
|
||||||
let columnar2 = make_columnar("numbers2", &[2u64]);
|
let columnar2 = make_columnar("numbers2", &[2u64]);
|
||||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
group_columns_for_merge(&[&columnar1, &columnar2], &[]).unwrap();
|
||||||
assert_eq!(column_map.len(), 2);
|
assert_eq!(column_map.len(), 2);
|
||||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::I64)));
|
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::I64)));
|
||||||
{
|
{
|
||||||
@@ -101,7 +158,7 @@ fn make_byte_columnar_multiple_columns(columns: &[(&str, &[&[&[u8]]])]) -> Colum
|
|||||||
for (column_name, column_values) in columns {
|
for (column_name, column_values) in columns {
|
||||||
for (row_id, vals) in column_values.iter().enumerate() {
|
for (row_id, vals) in column_values.iter().enumerate() {
|
||||||
for val in vals.iter() {
|
for val in vals.iter() {
|
||||||
dataframe_writer.record_bytes(row_id as u32, column_name, *val);
|
dataframe_writer.record_bytes(row_id as u32, column_name, val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -122,7 +179,7 @@ fn make_text_columnar_multiple_columns(columns: &[(&str, &[&[&str]])]) -> Column
|
|||||||
for (column_name, column_values) in columns {
|
for (column_name, column_values) in columns {
|
||||||
for (row_id, vals) in column_values.iter().enumerate() {
|
for (row_id, vals) in column_values.iter().enumerate() {
|
||||||
for val in vals.iter() {
|
for val in vals.iter() {
|
||||||
dataframe_writer.record_str(row_id as u32, column_name, *val);
|
dataframe_writer.record_str(row_id as u32, column_name, val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -151,6 +208,7 @@ fn test_merge_columnar_numbers() {
|
|||||||
let stack_merge_order = StackMergeOrder::stack(columnars);
|
let stack_merge_order = StackMergeOrder::stack(columnars);
|
||||||
crate::columnar::merge_columnar(
|
crate::columnar::merge_columnar(
|
||||||
columnars,
|
columnars,
|
||||||
|
&[],
|
||||||
MergeRowOrder::Stack(stack_merge_order),
|
MergeRowOrder::Stack(stack_merge_order),
|
||||||
&mut buffer,
|
&mut buffer,
|
||||||
)
|
)
|
||||||
@@ -176,6 +234,7 @@ fn test_merge_columnar_texts() {
|
|||||||
let stack_merge_order = StackMergeOrder::stack(columnars);
|
let stack_merge_order = StackMergeOrder::stack(columnars);
|
||||||
crate::columnar::merge_columnar(
|
crate::columnar::merge_columnar(
|
||||||
columnars,
|
columnars,
|
||||||
|
&[],
|
||||||
MergeRowOrder::Stack(stack_merge_order),
|
MergeRowOrder::Stack(stack_merge_order),
|
||||||
&mut buffer,
|
&mut buffer,
|
||||||
)
|
)
|
||||||
@@ -220,6 +279,7 @@ fn test_merge_columnar_byte() {
|
|||||||
let stack_merge_order = StackMergeOrder::stack(columnars);
|
let stack_merge_order = StackMergeOrder::stack(columnars);
|
||||||
crate::columnar::merge_columnar(
|
crate::columnar::merge_columnar(
|
||||||
columnars,
|
columnars,
|
||||||
|
&[],
|
||||||
MergeRowOrder::Stack(stack_merge_order),
|
MergeRowOrder::Stack(stack_merge_order),
|
||||||
&mut buffer,
|
&mut buffer,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
|
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
mod column_type;
|
mod column_type;
|
||||||
mod format_version;
|
mod format_version;
|
||||||
mod merge;
|
mod merge;
|
||||||
mod merge_index;
|
|
||||||
mod reader;
|
mod reader;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,32 @@ pub struct ColumnarReader {
|
|||||||
num_rows: RowId,
|
num_rows: RowId,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Functions by both the async/sync code listing columns.
|
||||||
|
/// It takes a stream from the column sstable and return the list of
|
||||||
|
/// `DynamicColumn` available in it.
|
||||||
|
fn read_all_columns_in_stream(
|
||||||
|
mut stream: sstable::Streamer<'_, RangeSSTable>,
|
||||||
|
column_data: &FileSlice,
|
||||||
|
) -> io::Result<Vec<DynamicColumnHandle>> {
|
||||||
|
let mut results = Vec::new();
|
||||||
|
while stream.advance() {
|
||||||
|
let key_bytes: &[u8] = stream.key();
|
||||||
|
let Some(column_code) = key_bytes.last().copied() else {
|
||||||
|
return Err(io_invalid_data("Empty column name.".to_string()));
|
||||||
|
};
|
||||||
|
let column_type = ColumnType::try_from_code(column_code)
|
||||||
|
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
|
||||||
|
let range = stream.value();
|
||||||
|
let file_slice = column_data.slice(range.start as usize..range.end as usize);
|
||||||
|
let dynamic_column_handle = DynamicColumnHandle {
|
||||||
|
file_slice,
|
||||||
|
column_type,
|
||||||
|
};
|
||||||
|
results.push(dynamic_column_handle);
|
||||||
|
}
|
||||||
|
Ok(results)
|
||||||
|
}
|
||||||
|
|
||||||
impl ColumnarReader {
|
impl ColumnarReader {
|
||||||
/// Opens a new Columnar file.
|
/// Opens a new Columnar file.
|
||||||
pub fn open<F>(file_slice: F) -> io::Result<ColumnarReader>
|
pub fn open<F>(file_slice: F) -> io::Result<ColumnarReader>
|
||||||
@@ -76,11 +102,7 @@ impl ColumnarReader {
|
|||||||
Ok(results)
|
Ok(results)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get all columns for the given column name.
|
fn stream_for_column_range(&self, column_name: &str) -> sstable::StreamerBuilder<RangeSSTable> {
|
||||||
///
|
|
||||||
/// There can be more than one column associated to a given column name, provided they have
|
|
||||||
/// different types.
|
|
||||||
pub fn read_columns(&self, column_name: &str) -> io::Result<Vec<DynamicColumnHandle>> {
|
|
||||||
// Each column is a associated to a given `column_key`,
|
// Each column is a associated to a given `column_key`,
|
||||||
// that starts by `column_name\0column_header`.
|
// that starts by `column_name\0column_header`.
|
||||||
//
|
//
|
||||||
@@ -89,36 +111,35 @@ impl ColumnarReader {
|
|||||||
//
|
//
|
||||||
// This is in turn equivalent to searching for the range
|
// This is in turn equivalent to searching for the range
|
||||||
// `[column_name,\0`..column_name\1)`.
|
// `[column_name,\0`..column_name\1)`.
|
||||||
|
// TODO can we get some more generic `prefix(..)` logic in the dictionary.
|
||||||
// TODO can we get some more generic `prefix(..)` logic in the dictioanry.
|
|
||||||
let mut start_key = column_name.to_string();
|
let mut start_key = column_name.to_string();
|
||||||
start_key.push('\0');
|
start_key.push('\0');
|
||||||
let mut end_key = column_name.to_string();
|
let mut end_key = column_name.to_string();
|
||||||
end_key.push(1u8 as char);
|
end_key.push(1u8 as char);
|
||||||
let mut stream = self
|
self.column_dictionary
|
||||||
.column_dictionary
|
|
||||||
.range()
|
.range()
|
||||||
.ge(start_key.as_bytes())
|
.ge(start_key.as_bytes())
|
||||||
.lt(end_key.as_bytes())
|
.lt(end_key.as_bytes())
|
||||||
.into_stream()?;
|
}
|
||||||
let mut results = Vec::new();
|
|
||||||
while stream.advance() {
|
pub async fn read_columns_async(
|
||||||
let key_bytes: &[u8] = stream.key();
|
&self,
|
||||||
assert!(key_bytes.starts_with(start_key.as_bytes()));
|
column_name: &str,
|
||||||
let column_code: u8 = key_bytes.last().cloned().unwrap();
|
) -> io::Result<Vec<DynamicColumnHandle>> {
|
||||||
let column_type = ColumnType::try_from_code(column_code)
|
let stream = self
|
||||||
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
|
.stream_for_column_range(column_name)
|
||||||
let range = stream.value().clone();
|
.into_stream_async()
|
||||||
let file_slice = self
|
.await?;
|
||||||
.column_data
|
read_all_columns_in_stream(stream, &self.column_data)
|
||||||
.slice(range.start as usize..range.end as usize);
|
}
|
||||||
let dynamic_column_handle = DynamicColumnHandle {
|
|
||||||
file_slice,
|
/// Get all columns for the given column name.
|
||||||
column_type,
|
///
|
||||||
};
|
/// There can be more than one column associated to a given column name, provided they have
|
||||||
results.push(dynamic_column_handle);
|
/// different types.
|
||||||
}
|
pub fn read_columns(&self, column_name: &str) -> io::Result<Vec<DynamicColumnHandle>> {
|
||||||
Ok(results)
|
let stream = self.stream_for_column_range(column_name).into_stream()?;
|
||||||
|
read_all_columns_in_stream(stream, &self.column_data)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the number of columns in the columnar.
|
/// Return the number of columns in the columnar.
|
||||||
@@ -162,7 +183,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expect = "Input type forbidden")]
|
#[should_panic(expected = "Input type forbidden")]
|
||||||
fn test_list_columns_strict_typing_panics_on_wrong_types() {
|
fn test_list_columns_strict_typing_panics_on_wrong_types() {
|
||||||
let mut columnar_writer = ColumnarWriter::default();
|
let mut columnar_writer = ColumnarWriter::default();
|
||||||
columnar_writer.record_column_type("count", ColumnType::U64, false);
|
columnar_writer.record_column_type("count", ColumnType::U64, false);
|
||||||
|
|||||||
@@ -310,7 +310,7 @@ mod tests {
|
|||||||
buffer.extend_from_slice(b"234234");
|
buffer.extend_from_slice(b"234234");
|
||||||
let mut bytes = &buffer[..];
|
let mut bytes = &buffer[..];
|
||||||
let serdeser_symbol = ColumnOperation::deserialize(&mut bytes).unwrap();
|
let serdeser_symbol = ColumnOperation::deserialize(&mut bytes).unwrap();
|
||||||
assert_eq!(bytes.len() + buf.as_ref().len() as usize, buffer.len());
|
assert_eq!(bytes.len() + buf.as_ref().len(), buffer.len());
|
||||||
assert_eq!(column_op, serdeser_symbol);
|
assert_eq!(column_op, serdeser_symbol);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -341,7 +341,7 @@ mod tests {
|
|||||||
fn test_column_operation_unordered_aux(val: u32, expected_len: usize) {
|
fn test_column_operation_unordered_aux(val: u32, expected_len: usize) {
|
||||||
let column_op = ColumnOperation::Value(UnorderedId(val));
|
let column_op = ColumnOperation::Value(UnorderedId(val));
|
||||||
let minibuf = column_op.serialize();
|
let minibuf = column_op.serialize();
|
||||||
assert_eq!(minibuf.as_ref().len() as usize, expected_len);
|
assert_eq!({ minibuf.as_ref().len() }, expected_len);
|
||||||
let mut buf = minibuf.as_ref().to_vec();
|
let mut buf = minibuf.as_ref().to_vec();
|
||||||
buf.extend_from_slice(&[2, 2, 2, 2, 2, 2]);
|
buf.extend_from_slice(&[2, 2, 2, 2, 2, 2]);
|
||||||
let mut cursor = &buf[..];
|
let mut cursor = &buf[..];
|
||||||
|
|||||||
@@ -47,6 +47,7 @@ struct SpareBuffers {
|
|||||||
/// let mut wrt: Vec<u8> = Vec::new();
|
/// let mut wrt: Vec<u8> = Vec::new();
|
||||||
/// columnar_writer.serialize(2u32, None, &mut wrt).unwrap();
|
/// columnar_writer.serialize(2u32, None, &mut wrt).unwrap();
|
||||||
/// ```
|
/// ```
|
||||||
|
#[derive(Default)]
|
||||||
pub struct ColumnarWriter {
|
pub struct ColumnarWriter {
|
||||||
numerical_field_hash_map: ArenaHashMap,
|
numerical_field_hash_map: ArenaHashMap,
|
||||||
datetime_field_hash_map: ArenaHashMap,
|
datetime_field_hash_map: ArenaHashMap,
|
||||||
@@ -60,22 +61,6 @@ pub struct ColumnarWriter {
|
|||||||
buffers: SpareBuffers,
|
buffers: SpareBuffers,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ColumnarWriter {
|
|
||||||
fn default() -> Self {
|
|
||||||
ColumnarWriter {
|
|
||||||
numerical_field_hash_map: ArenaHashMap::new(10_000),
|
|
||||||
bool_field_hash_map: ArenaHashMap::new(10_000),
|
|
||||||
ip_addr_field_hash_map: ArenaHashMap::new(10_000),
|
|
||||||
bytes_field_hash_map: ArenaHashMap::new(10_000),
|
|
||||||
str_field_hash_map: ArenaHashMap::new(10_000),
|
|
||||||
datetime_field_hash_map: ArenaHashMap::new(10_000),
|
|
||||||
dictionaries: Vec::new(),
|
|
||||||
arena: MemoryArena::default(),
|
|
||||||
buffers: SpareBuffers::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn mutate_or_create_column<V, TMutator>(
|
fn mutate_or_create_column<V, TMutator>(
|
||||||
arena_hash_map: &mut ArenaHashMap,
|
arena_hash_map: &mut ArenaHashMap,
|
||||||
@@ -266,11 +251,15 @@ impl ColumnarWriter {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: crate::DateTime) {
|
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: common::DateTime) {
|
||||||
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
|
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
|
||||||
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
||||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||||
column.record(doc, NumericalValue::I64(datetime.timestamp_micros), arena);
|
column.record(
|
||||||
|
doc,
|
||||||
|
NumericalValue::I64(datetime.into_timestamp_micros()),
|
||||||
|
arena,
|
||||||
|
);
|
||||||
column
|
column
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -667,7 +656,7 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sort_values_within_row_in_place(multivalued_index: &[RowId], values: &mut Vec<u64>) {
|
fn sort_values_within_row_in_place(multivalued_index: &[RowId], values: &mut [u64]) {
|
||||||
let mut start_index: usize = 0;
|
let mut start_index: usize = 0;
|
||||||
for end_index in multivalued_index.iter().copied() {
|
for end_index in multivalued_index.iter().copied() {
|
||||||
let end_index = end_index as usize;
|
let end_index = end_index as usize;
|
||||||
@@ -772,7 +761,7 @@ mod tests {
|
|||||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Full);
|
assert_eq!(column_writer.get_cardinality(3), Cardinality::Full);
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||||
.operation_iterator(&mut arena, None, &mut buffer)
|
.operation_iterator(&arena, None, &mut buffer)
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(symbols.len(), 6);
|
assert_eq!(symbols.len(), 6);
|
||||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||||
@@ -801,7 +790,7 @@ mod tests {
|
|||||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
|
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||||
.operation_iterator(&mut arena, None, &mut buffer)
|
.operation_iterator(&arena, None, &mut buffer)
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(symbols.len(), 4);
|
assert_eq!(symbols.len(), 4);
|
||||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
|
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
|
||||||
@@ -824,7 +813,7 @@ mod tests {
|
|||||||
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
|
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||||
.operation_iterator(&mut arena, None, &mut buffer)
|
.operation_iterator(&arena, None, &mut buffer)
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(symbols.len(), 2);
|
assert_eq!(symbols.len(), 2);
|
||||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||||
@@ -843,7 +832,7 @@ mod tests {
|
|||||||
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
|
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||||
.operation_iterator(&mut arena, None, &mut buffer)
|
.operation_iterator(&arena, None, &mut buffer)
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(symbols.len(), 3);
|
assert_eq!(symbols.len(), 3);
|
||||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ pub struct OptionalIndexBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl OptionalIndexBuilder {
|
impl OptionalIndexBuilder {
|
||||||
pub fn finish<'a>(&'a mut self, num_rows: RowId) -> impl Iterable<RowId> + 'a {
|
pub fn finish(&mut self, num_rows: RowId) -> impl Iterable<RowId> + '_ {
|
||||||
debug_assert!(self
|
debug_assert!(self
|
||||||
.docs
|
.docs
|
||||||
.last()
|
.last()
|
||||||
@@ -150,11 +150,7 @@ mod tests {
|
|||||||
multivalued_value_index_builder.record_row(2u32);
|
multivalued_value_index_builder.record_row(2u32);
|
||||||
multivalued_value_index_builder.record_value();
|
multivalued_value_index_builder.record_value();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
multivalued_value_index_builder
|
multivalued_value_index_builder.finish(4u32).to_vec(),
|
||||||
.finish(4u32)
|
|
||||||
.iter()
|
|
||||||
.copied()
|
|
||||||
.collect::<Vec<u32>>(),
|
|
||||||
vec![0, 0, 2, 3, 3]
|
vec![0, 0, 2, 3, 3]
|
||||||
);
|
);
|
||||||
multivalued_value_index_builder.reset();
|
multivalued_value_index_builder.reset();
|
||||||
@@ -162,11 +158,7 @@ mod tests {
|
|||||||
multivalued_value_index_builder.record_value();
|
multivalued_value_index_builder.record_value();
|
||||||
multivalued_value_index_builder.record_value();
|
multivalued_value_index_builder.record_value();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
multivalued_value_index_builder
|
multivalued_value_index_builder.finish(4u32).to_vec(),
|
||||||
.finish(4u32)
|
|
||||||
.iter()
|
|
||||||
.copied()
|
|
||||||
.collect::<Vec<u32>>(),
|
|
||||||
vec![0, 0, 0, 2, 2]
|
vec![0, 0, 0, 2, 2]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,12 +3,12 @@ use std::net::Ipv6Addr;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common::file_slice::FileSlice;
|
use common::file_slice::FileSlice;
|
||||||
use common::{HasLen, OwnedBytes};
|
use common::{DateTime, HasLen, OwnedBytes};
|
||||||
|
|
||||||
use crate::column::{BytesColumn, Column, StrColumn};
|
use crate::column::{BytesColumn, Column, StrColumn};
|
||||||
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
|
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
|
||||||
use crate::columnar::ColumnType;
|
use crate::columnar::ColumnType;
|
||||||
use crate::{Cardinality, DateTime, NumericalType};
|
use crate::{Cardinality, NumericalType};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub enum DynamicColumn {
|
pub enum DynamicColumn {
|
||||||
@@ -166,9 +166,9 @@ impl StrictlyMonotonicFn<i64, u64> for MapI64ToU64 {
|
|||||||
|
|
||||||
macro_rules! static_dynamic_conversions {
|
macro_rules! static_dynamic_conversions {
|
||||||
($typ:ty, $enum_name:ident) => {
|
($typ:ty, $enum_name:ident) => {
|
||||||
impl Into<Option<$typ>> for DynamicColumn {
|
impl From<DynamicColumn> for Option<$typ> {
|
||||||
fn into(self) -> Option<$typ> {
|
fn from(dynamic_column: DynamicColumn) -> Option<$typ> {
|
||||||
if let DynamicColumn::$enum_name(col) = self {
|
if let DynamicColumn::$enum_name(col) = dynamic_column {
|
||||||
Some(col)
|
Some(col)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@@ -188,7 +188,7 @@ static_dynamic_conversions!(Column<bool>, Bool);
|
|||||||
static_dynamic_conversions!(Column<u64>, U64);
|
static_dynamic_conversions!(Column<u64>, U64);
|
||||||
static_dynamic_conversions!(Column<i64>, I64);
|
static_dynamic_conversions!(Column<i64>, I64);
|
||||||
static_dynamic_conversions!(Column<f64>, F64);
|
static_dynamic_conversions!(Column<f64>, F64);
|
||||||
static_dynamic_conversions!(Column<crate::DateTime>, DateTime);
|
static_dynamic_conversions!(Column<DateTime>, DateTime);
|
||||||
static_dynamic_conversions!(StrColumn, Str);
|
static_dynamic_conversions!(StrColumn, Str);
|
||||||
static_dynamic_conversions!(BytesColumn, Bytes);
|
static_dynamic_conversions!(BytesColumn, Bytes);
|
||||||
static_dynamic_conversions!(Column<Ipv6Addr>, IpAddr);
|
static_dynamic_conversions!(Column<Ipv6Addr>, IpAddr);
|
||||||
@@ -206,10 +206,9 @@ impl DynamicColumnHandle {
|
|||||||
self.open_internal(column_bytes)
|
self.open_internal(column_bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO rename load_async
|
#[doc(hidden)]
|
||||||
pub async fn open_async(&self) -> io::Result<DynamicColumn> {
|
pub fn file_slice(&self) -> &FileSlice {
|
||||||
let column_bytes: OwnedBytes = self.file_slice.read_bytes_async().await?;
|
&self.file_slice
|
||||||
self.open_internal(column_bytes)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `u64` fast field reader reader associated with `fields` of types
|
/// Returns the `u64` fast field reader reader associated with `fields` of types
|
||||||
@@ -243,7 +242,7 @@ impl DynamicColumnHandle {
|
|||||||
ColumnType::Bool => crate::column::open_column_u64::<bool>(column_bytes)?.into(),
|
ColumnType::Bool => crate::column::open_column_u64::<bool>(column_bytes)?.into(),
|
||||||
ColumnType::IpAddr => crate::column::open_column_u128::<Ipv6Addr>(column_bytes)?.into(),
|
ColumnType::IpAddr => crate::column::open_column_u128::<Ipv6Addr>(column_bytes)?.into(),
|
||||||
ColumnType::DateTime => {
|
ColumnType::DateTime => {
|
||||||
crate::column::open_column_u64::<crate::DateTime>(column_bytes)?.into()
|
crate::column::open_column_u64::<DateTime>(column_bytes)?.into()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok(dynamic_column)
|
Ok(dynamic_column)
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ pub use value::{NumericalType, NumericalValue};
|
|||||||
pub use self::dynamic_column::{DynamicColumn, DynamicColumnHandle};
|
pub use self::dynamic_column::{DynamicColumn, DynamicColumnHandle};
|
||||||
|
|
||||||
pub type RowId = u32;
|
pub type RowId = u32;
|
||||||
|
pub type DocId = u32;
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
pub struct RowAddr {
|
pub struct RowAddr {
|
||||||
@@ -42,16 +43,7 @@ pub struct RowAddr {
|
|||||||
pub use sstable::Dictionary;
|
pub use sstable::Dictionary;
|
||||||
pub type Streamer<'a> = sstable::Streamer<'a, VoidSSTable>;
|
pub type Streamer<'a> = sstable::Streamer<'a, VoidSSTable>;
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialOrd, PartialEq, Default, Debug)]
|
pub use common::DateTime;
|
||||||
pub struct DateTime {
|
|
||||||
pub timestamp_micros: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DateTime {
|
|
||||||
pub fn into_timestamp_micros(self) -> i64 {
|
|
||||||
self.timestamp_micros
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug)]
|
#[derive(Copy, Clone, Debug)]
|
||||||
pub struct InvalidData;
|
pub struct InvalidData;
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ fn test_dataframe_writer_u64_multivalued() {
|
|||||||
divisor_col.get_cardinality(),
|
divisor_col.get_cardinality(),
|
||||||
crate::Cardinality::Multivalued
|
crate::Cardinality::Multivalued
|
||||||
);
|
);
|
||||||
assert_eq!(divisor_col.num_rows(), 7);
|
assert_eq!(divisor_col.num_docs(), 7);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
use common::DateTime;
|
||||||
|
|
||||||
use crate::InvalidData;
|
use crate::InvalidData;
|
||||||
|
|
||||||
#[derive(Copy, Clone, PartialEq, Debug)]
|
#[derive(Copy, Clone, PartialEq, Debug)]
|
||||||
@@ -104,10 +106,10 @@ impl Coerce for f64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Coerce for crate::DateTime {
|
impl Coerce for DateTime {
|
||||||
fn coerce(value: NumericalValue) -> Self {
|
fn coerce(value: NumericalValue) -> Self {
|
||||||
let timestamp_micros = i64::coerce(value);
|
let timestamp_micros = i64::coerce(value);
|
||||||
crate::DateTime { timestamp_micros }
|
DateTime::from_timestamp_micros(timestamp_micros)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,9 +13,10 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
byteorder = "1.4.3"
|
|
||||||
ownedbytes = { version= "0.5", path="../ownedbytes" }
|
ownedbytes = { version= "0.5", path="../ownedbytes" }
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||||
|
serde = { version = "1.0.136", features = ["derive"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
proptest = "1.0.0"
|
proptest = "1.0.0"
|
||||||
|
|||||||
136
common/src/datetime.rs
Normal file
136
common/src/datetime.rs
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use time::format_description::well_known::Rfc3339;
|
||||||
|
use time::{OffsetDateTime, PrimitiveDateTime, UtcOffset};
|
||||||
|
|
||||||
|
/// DateTime Precision
|
||||||
|
#[derive(
|
||||||
|
Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Default,
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum DatePrecision {
|
||||||
|
/// Seconds precision
|
||||||
|
#[default]
|
||||||
|
Seconds,
|
||||||
|
/// Milli-seconds precision.
|
||||||
|
Milliseconds,
|
||||||
|
/// Micro-seconds precision.
|
||||||
|
Microseconds,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A date/time value with microsecond precision.
|
||||||
|
///
|
||||||
|
/// This timestamp does not carry any explicit time zone information.
|
||||||
|
/// Users are responsible for applying the provided conversion
|
||||||
|
/// functions consistently. Internally the time zone is assumed
|
||||||
|
/// to be UTC, which is also used implicitly for JSON serialization.
|
||||||
|
///
|
||||||
|
/// All constructors and conversions are provided as explicit
|
||||||
|
/// functions and not by implementing any `From`/`Into` traits
|
||||||
|
/// to prevent unintended usage.
|
||||||
|
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub struct DateTime {
|
||||||
|
// Timestamp in microseconds.
|
||||||
|
pub(crate) timestamp_micros: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DateTime {
|
||||||
|
/// Create new from UNIX timestamp in seconds
|
||||||
|
pub const fn from_timestamp_secs(seconds: i64) -> Self {
|
||||||
|
Self {
|
||||||
|
timestamp_micros: seconds * 1_000_000,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new from UNIX timestamp in milliseconds
|
||||||
|
pub const fn from_timestamp_millis(milliseconds: i64) -> Self {
|
||||||
|
Self {
|
||||||
|
timestamp_micros: milliseconds * 1_000,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new from UNIX timestamp in microseconds.
|
||||||
|
pub const fn from_timestamp_micros(microseconds: i64) -> Self {
|
||||||
|
Self {
|
||||||
|
timestamp_micros: microseconds,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new from `OffsetDateTime`
|
||||||
|
///
|
||||||
|
/// The given date/time is converted to UTC and the actual
|
||||||
|
/// time zone is discarded.
|
||||||
|
pub const fn from_utc(dt: OffsetDateTime) -> Self {
|
||||||
|
let timestamp_micros = dt.unix_timestamp() * 1_000_000 + dt.microsecond() as i64;
|
||||||
|
Self { timestamp_micros }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new from `PrimitiveDateTime`
|
||||||
|
///
|
||||||
|
/// Implicitly assumes that the given date/time is in UTC!
|
||||||
|
/// Otherwise the original value must only be reobtained with
|
||||||
|
/// [`Self::into_primitive()`].
|
||||||
|
pub fn from_primitive(dt: PrimitiveDateTime) -> Self {
|
||||||
|
Self::from_utc(dt.assume_utc())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to UNIX timestamp in seconds.
|
||||||
|
pub const fn into_timestamp_secs(self) -> i64 {
|
||||||
|
self.timestamp_micros / 1_000_000
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to UNIX timestamp in milliseconds.
|
||||||
|
pub const fn into_timestamp_millis(self) -> i64 {
|
||||||
|
self.timestamp_micros / 1_000
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to UNIX timestamp in microseconds.
|
||||||
|
pub const fn into_timestamp_micros(self) -> i64 {
|
||||||
|
self.timestamp_micros
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to UTC `OffsetDateTime`
|
||||||
|
pub fn into_utc(self) -> OffsetDateTime {
|
||||||
|
let timestamp_nanos = self.timestamp_micros as i128 * 1000;
|
||||||
|
let utc_datetime = OffsetDateTime::from_unix_timestamp_nanos(timestamp_nanos)
|
||||||
|
.expect("valid UNIX timestamp");
|
||||||
|
debug_assert_eq!(UtcOffset::UTC, utc_datetime.offset());
|
||||||
|
utc_datetime
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to `OffsetDateTime` with the given time zone
|
||||||
|
pub fn into_offset(self, offset: UtcOffset) -> OffsetDateTime {
|
||||||
|
self.into_utc().to_offset(offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to `PrimitiveDateTime` without any time zone
|
||||||
|
///
|
||||||
|
/// The value should have been constructed with [`Self::from_primitive()`].
|
||||||
|
/// Otherwise the time zone is implicitly assumed to be UTC.
|
||||||
|
pub fn into_primitive(self) -> PrimitiveDateTime {
|
||||||
|
let utc_datetime = self.into_utc();
|
||||||
|
// Discard the UTC time zone offset
|
||||||
|
debug_assert_eq!(UtcOffset::UTC, utc_datetime.offset());
|
||||||
|
PrimitiveDateTime::new(utc_datetime.date(), utc_datetime.time())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Truncates the microseconds value to the corresponding precision.
|
||||||
|
pub fn truncate(self, precision: DatePrecision) -> Self {
|
||||||
|
let truncated_timestamp_micros = match precision {
|
||||||
|
DatePrecision::Seconds => (self.timestamp_micros / 1_000_000) * 1_000_000,
|
||||||
|
DatePrecision::Milliseconds => (self.timestamp_micros / 1_000) * 1_000,
|
||||||
|
DatePrecision::Microseconds => self.timestamp_micros,
|
||||||
|
};
|
||||||
|
Self {
|
||||||
|
timestamp_micros: truncated_timestamp_micros,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DateTime {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
let utc_rfc3339 = self.into_utc().format(&Rfc3339).map_err(|_| fmt::Error)?;
|
||||||
|
f.write_str(&utc_rfc3339)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,15 +2,15 @@
|
|||||||
|
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
pub use byteorder::LittleEndian as Endianness;
|
|
||||||
|
|
||||||
mod bitset;
|
mod bitset;
|
||||||
|
mod datetime;
|
||||||
pub mod file_slice;
|
pub mod file_slice;
|
||||||
mod group_by;
|
mod group_by;
|
||||||
mod serialize;
|
mod serialize;
|
||||||
mod vint;
|
mod vint;
|
||||||
mod writer;
|
mod writer;
|
||||||
pub use bitset::*;
|
pub use bitset::*;
|
||||||
|
pub use datetime::{DatePrecision, DateTime};
|
||||||
pub use group_by::GroupByIteratorExtended;
|
pub use group_by::GroupByIteratorExtended;
|
||||||
pub use ownedbytes::{OwnedBytes, StableDeref};
|
pub use ownedbytes::{OwnedBytes, StableDeref};
|
||||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||||
@@ -107,6 +107,21 @@ pub fn u64_to_f64(val: u64) -> f64 {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Replaces a given byte in the `bytes` slice of bytes.
|
||||||
|
///
|
||||||
|
/// This function assumes that the needle is rarely contained in the bytes string
|
||||||
|
/// and offers a fast path if the needle is not present.
|
||||||
|
pub fn replace_in_place(needle: u8, replacement: u8, bytes: &mut [u8]) {
|
||||||
|
if !bytes.contains(&needle) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for b in bytes {
|
||||||
|
if *b == needle {
|
||||||
|
*b = replacement;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
|
|
||||||
@@ -171,4 +186,20 @@ pub mod test {
|
|||||||
assert!(f64_to_u64(-2.0) < f64_to_u64(1.0));
|
assert!(f64_to_u64(-2.0) < f64_to_u64(1.0));
|
||||||
assert!(f64_to_u64(-2.0) < f64_to_u64(-1.5));
|
assert!(f64_to_u64(-2.0) < f64_to_u64(-1.5));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_replace_in_place() {
|
||||||
|
let test_aux = |before_replacement: &[u8], expected: &[u8]| {
|
||||||
|
let mut bytes: Vec<u8> = before_replacement.to_vec();
|
||||||
|
super::replace_in_place(b'b', b'c', &mut bytes);
|
||||||
|
assert_eq!(&bytes[..], expected);
|
||||||
|
};
|
||||||
|
test_aux(b"", b"");
|
||||||
|
test_aux(b"b", b"c");
|
||||||
|
test_aux(b"baaa", b"caaa");
|
||||||
|
test_aux(b"aaab", b"aaac");
|
||||||
|
test_aux(b"aaabaa", b"aaacaa");
|
||||||
|
test_aux(b"aaaaaa", b"aaaaaa");
|
||||||
|
test_aux(b"bbbb", b"cccc");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
use std::{fmt, io};
|
use std::{fmt, io};
|
||||||
|
|
||||||
use byteorder::{ReadBytesExt, WriteBytesExt};
|
use crate::VInt;
|
||||||
|
|
||||||
use crate::{Endianness, VInt};
|
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct Counter(u64);
|
struct Counter(u64);
|
||||||
@@ -107,11 +105,13 @@ impl<Left: BinarySerializable + FixedSize, Right: BinarySerializable + FixedSize
|
|||||||
|
|
||||||
impl BinarySerializable for u32 {
|
impl BinarySerializable for u32 {
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_u32::<Endianness>(*self)
|
writer.write_all(&self.to_le_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u32> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u32> {
|
||||||
reader.read_u32::<Endianness>()
|
let mut buf = [0u8; 4];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(u32::from_le_bytes(buf))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,11 +121,13 @@ impl FixedSize for u32 {
|
|||||||
|
|
||||||
impl BinarySerializable for u16 {
|
impl BinarySerializable for u16 {
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_u16::<Endianness>(*self)
|
writer.write_all(&self.to_le_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u16> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u16> {
|
||||||
reader.read_u16::<Endianness>()
|
let mut buf = [0u8; 2];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(Self::from_le_bytes(buf))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,10 +137,12 @@ impl FixedSize for u16 {
|
|||||||
|
|
||||||
impl BinarySerializable for u64 {
|
impl BinarySerializable for u64 {
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_u64::<Endianness>(*self)
|
writer.write_all(&self.to_le_bytes())
|
||||||
}
|
}
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
reader.read_u64::<Endianness>()
|
let mut buf = [0u8; 8];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(Self::from_le_bytes(buf))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,10 +152,12 @@ impl FixedSize for u64 {
|
|||||||
|
|
||||||
impl BinarySerializable for u128 {
|
impl BinarySerializable for u128 {
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_u128::<Endianness>(*self)
|
writer.write_all(&self.to_le_bytes())
|
||||||
}
|
}
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
reader.read_u128::<Endianness>()
|
let mut buf = [0u8; 16];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(Self::from_le_bytes(buf))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,10 +167,12 @@ impl FixedSize for u128 {
|
|||||||
|
|
||||||
impl BinarySerializable for f32 {
|
impl BinarySerializable for f32 {
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_f32::<Endianness>(*self)
|
writer.write_all(&self.to_le_bytes())
|
||||||
}
|
}
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
reader.read_f32::<Endianness>()
|
let mut buf = [0u8; 4];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(Self::from_le_bytes(buf))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,10 +182,12 @@ impl FixedSize for f32 {
|
|||||||
|
|
||||||
impl BinarySerializable for i64 {
|
impl BinarySerializable for i64 {
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_i64::<Endianness>(*self)
|
writer.write_all(&self.to_le_bytes())
|
||||||
}
|
}
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
reader.read_i64::<Endianness>()
|
let mut buf = [0u8; Self::SIZE_IN_BYTES];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(Self::from_le_bytes(buf))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -187,10 +197,12 @@ impl FixedSize for i64 {
|
|||||||
|
|
||||||
impl BinarySerializable for f64 {
|
impl BinarySerializable for f64 {
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_f64::<Endianness>(*self)
|
writer.write_all(&self.to_le_bytes())
|
||||||
}
|
}
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
reader.read_f64::<Endianness>()
|
let mut buf = [0u8; Self::SIZE_IN_BYTES];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(Self::from_le_bytes(buf))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -200,10 +212,12 @@ impl FixedSize for f64 {
|
|||||||
|
|
||||||
impl BinarySerializable for u8 {
|
impl BinarySerializable for u8 {
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_u8(*self)
|
writer.write_all(&self.to_le_bytes())
|
||||||
}
|
}
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u8> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
reader.read_u8()
|
let mut buf = [0u8; Self::SIZE_IN_BYTES];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(Self::from_le_bytes(buf))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -213,10 +227,10 @@ impl FixedSize for u8 {
|
|||||||
|
|
||||||
impl BinarySerializable for bool {
|
impl BinarySerializable for bool {
|
||||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_u8(u8::from(*self))
|
(*self as u8).serialize(writer)
|
||||||
}
|
}
|
||||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
|
||||||
let val = reader.read_u8()?;
|
let val = u8::deserialize(reader)?;
|
||||||
match val {
|
match val {
|
||||||
0 => Ok(false),
|
0 => Ok(false),
|
||||||
1 => Ok(true),
|
1 => Ok(true),
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
|
|
||||||
use byteorder::{ByteOrder, LittleEndian};
|
|
||||||
|
|
||||||
use super::BinarySerializable;
|
use super::BinarySerializable;
|
||||||
|
|
||||||
/// Variable int serializes a u128 number
|
/// Variable int serializes a u128 number
|
||||||
@@ -127,7 +125,7 @@ pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
|||||||
5,
|
5,
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
LittleEndian::write_u64(&mut buf[..], res);
|
buf.copy_from_slice(&res.to_le_bytes());
|
||||||
&buf[0..num_bytes]
|
&buf[0..num_bytes]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,130 +0,0 @@
|
|||||||
// # Aggregation example
|
|
||||||
//
|
|
||||||
// This example shows how you can use built-in aggregations.
|
|
||||||
// We will use range buckets and compute the average in each bucket.
|
|
||||||
//
|
|
||||||
|
|
||||||
use serde_json::Value;
|
|
||||||
use tantivy::aggregation::agg_req::{
|
|
||||||
Aggregation, Aggregations, BucketAggregation, BucketAggregationType, MetricAggregation,
|
|
||||||
RangeAggregation,
|
|
||||||
};
|
|
||||||
use tantivy::aggregation::agg_result::AggregationResults;
|
|
||||||
use tantivy::aggregation::metric::AverageAggregation;
|
|
||||||
use tantivy::aggregation::AggregationCollector;
|
|
||||||
use tantivy::query::TermQuery;
|
|
||||||
use tantivy::schema::{self, IndexRecordOption, Schema, TextFieldIndexing};
|
|
||||||
use tantivy::{doc, Index, Term};
|
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let text_fieldtype = schema::TextOptions::default()
|
|
||||||
.set_indexing_options(
|
|
||||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
|
||||||
)
|
|
||||||
.set_stored();
|
|
||||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
|
||||||
let score_fieldtype =
|
|
||||||
crate::schema::NumericOptions::default().set_fast();
|
|
||||||
let highscore_field = schema_builder.add_f64_field("highscore", score_fieldtype.clone());
|
|
||||||
let price_field = schema_builder.add_f64_field("price", score_fieldtype);
|
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
// # Indexing documents
|
|
||||||
//
|
|
||||||
// Lets index a bunch of documents for this example.
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
|
|
||||||
let mut index_writer = index.writer(50_000_000)?;
|
|
||||||
// writing the segment
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
text_field => "cool",
|
|
||||||
highscore_field => 1f64,
|
|
||||||
price_field => 0f64,
|
|
||||||
))?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
text_field => "cool",
|
|
||||||
highscore_field => 3f64,
|
|
||||||
price_field => 1f64,
|
|
||||||
))?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
text_field => "cool",
|
|
||||||
highscore_field => 5f64,
|
|
||||||
price_field => 1f64,
|
|
||||||
))?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
text_field => "nohit",
|
|
||||||
highscore_field => 6f64,
|
|
||||||
price_field => 2f64,
|
|
||||||
))?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
text_field => "cool",
|
|
||||||
highscore_field => 7f64,
|
|
||||||
price_field => 2f64,
|
|
||||||
))?;
|
|
||||||
index_writer.commit()?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
text_field => "cool",
|
|
||||||
highscore_field => 11f64,
|
|
||||||
price_field => 10f64,
|
|
||||||
))?;
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
text_field => "cool",
|
|
||||||
highscore_field => 14f64,
|
|
||||||
price_field => 15f64,
|
|
||||||
))?;
|
|
||||||
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
text_field => "cool",
|
|
||||||
highscore_field => 15f64,
|
|
||||||
price_field => 20f64,
|
|
||||||
))?;
|
|
||||||
|
|
||||||
index_writer.commit()?;
|
|
||||||
|
|
||||||
let reader = index.reader()?;
|
|
||||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
|
||||||
|
|
||||||
let term_query = TermQuery::new(
|
|
||||||
Term::from_field_text(text_field, "cool"),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
|
|
||||||
let sub_agg_req_1: Aggregations = vec![(
|
|
||||||
"average_price".to_string(),
|
|
||||||
Aggregation::Metric(MetricAggregation::Average(
|
|
||||||
AverageAggregation::from_field_name("price".to_string()),
|
|
||||||
)),
|
|
||||||
)]
|
|
||||||
.into_iter()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let agg_req_1: Aggregations = vec![(
|
|
||||||
"score_ranges".to_string(),
|
|
||||||
Aggregation::Bucket(BucketAggregation {
|
|
||||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
|
||||||
field: "highscore".to_string(),
|
|
||||||
ranges: vec![
|
|
||||||
(-1f64..9f64).into(),
|
|
||||||
(9f64..14f64).into(),
|
|
||||||
(14f64..20f64).into(),
|
|
||||||
],
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
sub_aggregation: sub_agg_req_1,
|
|
||||||
}),
|
|
||||||
)]
|
|
||||||
.into_iter()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
|
||||||
|
|
||||||
let res: Value = serde_json::to_value(agg_res)?;
|
|
||||||
println!("{}", serde_json::to_string_pretty(&res)?);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
// # IP Address example
|
|
||||||
//
|
|
||||||
// This example shows how the ip field can be used
|
|
||||||
// with IpV6 and IpV4.
|
|
||||||
|
|
||||||
use tantivy::collector::{Count, TopDocs};
|
|
||||||
use tantivy::query::QueryParser;
|
|
||||||
use tantivy::schema::{Schema, FAST, INDEXED, STORED, STRING};
|
|
||||||
use tantivy::Index;
|
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
|
||||||
// # Defining the schema
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let event_type = schema_builder.add_text_field("event_type", STRING | STORED);
|
|
||||||
let ip = schema_builder.add_ip_addr_field("ip", STORED | INDEXED | FAST);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
// # Indexing documents
|
|
||||||
let index = Index::create_in_ram(schema.clone());
|
|
||||||
|
|
||||||
let mut index_writer = index.writer(50_000_000)?;
|
|
||||||
let doc = schema.parse_document(
|
|
||||||
r#"{
|
|
||||||
"ip": "192.168.0.33",
|
|
||||||
"event_type": "login"
|
|
||||||
}"#,
|
|
||||||
)?;
|
|
||||||
index_writer.add_document(doc)?;
|
|
||||||
let doc = schema.parse_document(
|
|
||||||
r#"{
|
|
||||||
"ip": "192.168.0.80",
|
|
||||||
"event_type": "checkout"
|
|
||||||
}"#,
|
|
||||||
)?;
|
|
||||||
index_writer.add_document(doc)?;
|
|
||||||
let doc = schema.parse_document(
|
|
||||||
r#"{
|
|
||||||
"ip": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
|
|
||||||
"event_type": "checkout"
|
|
||||||
}"#,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
index_writer.add_document(doc)?;
|
|
||||||
index_writer.commit()?;
|
|
||||||
|
|
||||||
let reader = index.reader()?;
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![event_type, ip]);
|
|
||||||
{
|
|
||||||
let query = query_parser.parse_query("ip:[192.168.0.0 TO 192.168.0.100]")?;
|
|
||||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(5))?;
|
|
||||||
assert_eq!(count_docs.len(), 2);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let query = query_parser.parse_query("ip:[192.168.1.0 TO 192.168.1.100]")?;
|
|
||||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
|
||||||
assert_eq!(count_docs.len(), 0);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let query = query_parser.parse_query("ip:192.168.0.80")?;
|
|
||||||
let count_docs = searcher.search(&*query, &Count)?;
|
|
||||||
assert_eq!(count_docs, 1);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
// IpV6 needs to be escaped because it contains `:`
|
|
||||||
let query = query_parser.parse_query("ip:\"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"")?;
|
|
||||||
let count_docs = searcher.search(&*query, &Count)?;
|
|
||||||
assert_eq!(count_docs, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
319
examples/aggregation.rs
Normal file
319
examples/aggregation.rs
Normal file
@@ -0,0 +1,319 @@
|
|||||||
|
// # Aggregation example
|
||||||
|
//
|
||||||
|
// This example shows how you can use built-in aggregations.
|
||||||
|
// We will use nested aggregations with buckets and metrics:
|
||||||
|
// - Range buckets and compute the average in each bucket.
|
||||||
|
// - Term aggregation and compute the min price in each bucket
|
||||||
|
// ---
|
||||||
|
|
||||||
|
use serde_json::{Deserializer, Value};
|
||||||
|
use tantivy::aggregation::agg_req::{
|
||||||
|
Aggregation, Aggregations, BucketAggregation, BucketAggregationType, MetricAggregation,
|
||||||
|
RangeAggregation,
|
||||||
|
};
|
||||||
|
use tantivy::aggregation::agg_result::AggregationResults;
|
||||||
|
use tantivy::aggregation::bucket::RangeAggregationRange;
|
||||||
|
use tantivy::aggregation::metric::AverageAggregation;
|
||||||
|
use tantivy::aggregation::AggregationCollector;
|
||||||
|
use tantivy::query::AllQuery;
|
||||||
|
use tantivy::schema::{self, IndexRecordOption, Schema, TextFieldIndexing, FAST};
|
||||||
|
use tantivy::Index;
|
||||||
|
|
||||||
|
fn main() -> tantivy::Result<()> {
|
||||||
|
// # Create Schema
|
||||||
|
//
|
||||||
|
// Lets create a schema for a footwear shop, with 4 fields: name, category, stock and price.
|
||||||
|
// category, stock and price will be fast fields as that's the requirement
|
||||||
|
// for aggregation queries.
|
||||||
|
//
|
||||||
|
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
// In preparation of the `TermsAggregation`, the category field is configured with:
|
||||||
|
// - `set_fast`
|
||||||
|
// - `raw` tokenizer
|
||||||
|
//
|
||||||
|
// The tokenizer is set to "raw", because the fast field uses the same dictionary as the
|
||||||
|
// inverted index. (This behaviour will change in tantivy 0.20, where the fast field will
|
||||||
|
// always be raw tokenized independent from the regular tokenizing)
|
||||||
|
//
|
||||||
|
let text_fieldtype = schema::TextOptions::default()
|
||||||
|
.set_indexing_options(
|
||||||
|
TextFieldIndexing::default()
|
||||||
|
.set_index_option(IndexRecordOption::WithFreqs)
|
||||||
|
.set_tokenizer("raw"),
|
||||||
|
)
|
||||||
|
.set_fast()
|
||||||
|
.set_stored();
|
||||||
|
schema_builder.add_text_field("category", text_fieldtype);
|
||||||
|
schema_builder.add_f64_field("stock", FAST);
|
||||||
|
schema_builder.add_f64_field("price", FAST);
|
||||||
|
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
// # Indexing documents
|
||||||
|
//
|
||||||
|
// Lets index a bunch of documents for this example.
|
||||||
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
|
let data = r#"{
|
||||||
|
"name": "Almond Toe Court Shoes, Patent Black",
|
||||||
|
"category": "Womens Footwear",
|
||||||
|
"price": 99.00,
|
||||||
|
"stock": 5
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Suede Shoes, Blue",
|
||||||
|
"category": "Womens Footwear",
|
||||||
|
"price": 42.00,
|
||||||
|
"stock": 4
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Leather Driver Saddle Loafers, Tan",
|
||||||
|
"category": "Mens Footwear",
|
||||||
|
"price": 34.00,
|
||||||
|
"stock": 12
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Flip Flops, Red",
|
||||||
|
"category": "Mens Footwear",
|
||||||
|
"price": 19.00,
|
||||||
|
"stock": 6
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Flip Flops, Blue",
|
||||||
|
"category": "Mens Footwear",
|
||||||
|
"price": 19.00,
|
||||||
|
"stock": 0
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Gold Button Cardigan, Black",
|
||||||
|
"category": "Womens Casualwear",
|
||||||
|
"price": 167.00,
|
||||||
|
"stock": 6
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Cotton Shorts, Medium Red",
|
||||||
|
"category": "Womens Casualwear",
|
||||||
|
"price": 30.00,
|
||||||
|
"stock": 5
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Fine Stripe Short SleeveShirt, Grey",
|
||||||
|
"category": "Mens Casualwear",
|
||||||
|
"price": 49.99,
|
||||||
|
"stock": 9
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Fine Stripe Short SleeveShirt, Green",
|
||||||
|
"category": "Mens Casualwear",
|
||||||
|
"price": 49.99,
|
||||||
|
"offer": 39.99,
|
||||||
|
"stock": 9
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Sharkskin Waistcoat, Charcoal",
|
||||||
|
"category": "Mens Formalwear",
|
||||||
|
"price": 75.00,
|
||||||
|
"stock": 2
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Lightweight Patch PocketBlazer, Deer",
|
||||||
|
"category": "Mens Formalwear",
|
||||||
|
"price": 175.50,
|
||||||
|
"stock": 1
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Bird Print Dress, Black",
|
||||||
|
"category": "Womens Formalwear",
|
||||||
|
"price": 270.00,
|
||||||
|
"stock": 10
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"name": "Mid Twist Cut-Out Dress, Pink",
|
||||||
|
"category": "Womens Formalwear",
|
||||||
|
"price": 540.00,
|
||||||
|
"stock": 5
|
||||||
|
}"#;
|
||||||
|
|
||||||
|
let stream = Deserializer::from_str(data).into_iter::<Value>();
|
||||||
|
|
||||||
|
let mut index_writer = index.writer(50_000_000)?;
|
||||||
|
let mut num_indexed = 0;
|
||||||
|
for value in stream {
|
||||||
|
let doc = schema.parse_document(&serde_json::to_string(&value.unwrap())?)?;
|
||||||
|
index_writer.add_document(doc)?;
|
||||||
|
num_indexed += 1;
|
||||||
|
if num_indexed > 4 {
|
||||||
|
// Writing the first segment
|
||||||
|
index_writer.commit()?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writing the second segment
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
// We have two segments now. The `AggregationCollector` will run the aggregation on each
|
||||||
|
// segment and then merge the results into an `IntermediateAggregationResult`.
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
// ---
|
||||||
|
// # Aggregation Query
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// We can construct the query by building the request structure or by deserializing from JSON.
|
||||||
|
// The JSON API is more stable and therefore recommended.
|
||||||
|
//
|
||||||
|
// ## Request 1
|
||||||
|
|
||||||
|
let agg_req_str = r#"
|
||||||
|
{
|
||||||
|
"group_by_stock": {
|
||||||
|
"aggs": {
|
||||||
|
"average_price": { "avg": { "field": "price" } }
|
||||||
|
},
|
||||||
|
"range": {
|
||||||
|
"field": "stock",
|
||||||
|
"ranges": [
|
||||||
|
{ "key": "few", "to": 1.0 },
|
||||||
|
{ "key": "some", "from": 1.0, "to": 10.0 },
|
||||||
|
{ "key": "many", "from": 10.0 }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} "#;
|
||||||
|
|
||||||
|
// In this Aggregation we want to get the average price for different groups, depending on how
|
||||||
|
// many items are in stock. We define custom ranges `few`, `some`, `many` via the
|
||||||
|
// range aggregation.
|
||||||
|
// For every bucket we want the average price, so we create a nested metric aggregation on the
|
||||||
|
// range bucket aggregation. Only buckets support nested aggregations.
|
||||||
|
// ### Request JSON API
|
||||||
|
//
|
||||||
|
|
||||||
|
let agg_req: Aggregations = serde_json::from_str(agg_req_str)?;
|
||||||
|
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||||
|
|
||||||
|
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||||
|
let res2: Value = serde_json::to_value(agg_res)?;
|
||||||
|
|
||||||
|
// ### Request Rust API
|
||||||
|
//
|
||||||
|
// This is exactly the same request as above, but via the rust structures.
|
||||||
|
//
|
||||||
|
|
||||||
|
let agg_req: Aggregations = vec![(
|
||||||
|
"group_by_stock".to_string(),
|
||||||
|
Aggregation::Bucket(BucketAggregation {
|
||||||
|
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||||
|
field: "stock".to_string(),
|
||||||
|
ranges: vec![
|
||||||
|
RangeAggregationRange {
|
||||||
|
key: Some("few".into()),
|
||||||
|
from: None,
|
||||||
|
to: Some(1f64),
|
||||||
|
},
|
||||||
|
RangeAggregationRange {
|
||||||
|
key: Some("some".into()),
|
||||||
|
from: Some(1f64),
|
||||||
|
to: Some(10f64),
|
||||||
|
},
|
||||||
|
RangeAggregationRange {
|
||||||
|
key: Some("many".into()),
|
||||||
|
from: Some(10f64),
|
||||||
|
to: None,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
sub_aggregation: vec![(
|
||||||
|
"average_price".to_string(),
|
||||||
|
Aggregation::Metric(MetricAggregation::Average(
|
||||||
|
AverageAggregation::from_field_name("price".to_string()),
|
||||||
|
)),
|
||||||
|
)]
|
||||||
|
.into_iter()
|
||||||
|
.collect(),
|
||||||
|
}),
|
||||||
|
)]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||||
|
// We use the `AllQuery` which will pass all documents to the AggregationCollector.
|
||||||
|
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||||
|
|
||||||
|
let res1: Value = serde_json::to_value(agg_res)?;
|
||||||
|
|
||||||
|
// ### Aggregation Result
|
||||||
|
//
|
||||||
|
// The resulting structure deserializes in the same JSON format as elastic search.
|
||||||
|
//
|
||||||
|
let expected_res = r#"
|
||||||
|
{
|
||||||
|
"group_by_stock":{
|
||||||
|
"buckets":[
|
||||||
|
{"average_price":{"value":19.0},"doc_count":1,"key":"few","to":1.0},
|
||||||
|
{"average_price":{"value":124.748},"doc_count":10,"from":1.0,"key":"some","to":10.0},
|
||||||
|
{"average_price":{"value":152.0},"doc_count":2,"from":10.0,"key":"many"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"#;
|
||||||
|
let expected_json: Value = serde_json::from_str(expected_res)?;
|
||||||
|
assert_eq!(expected_json, res1);
|
||||||
|
assert_eq!(expected_json, res2);
|
||||||
|
|
||||||
|
// ### Request 2
|
||||||
|
//
|
||||||
|
// Now we are interested in the minimum price per category, so we create a bucket per
|
||||||
|
// category via `TermsAggregation`. We are interested in the highest minimum prices, and set the
|
||||||
|
// order of the buckets `"order": { "min_price": "desc" }` to be sorted by the the metric of
|
||||||
|
// the sub aggregation. (awesome)
|
||||||
|
//
|
||||||
|
let agg_req_str = r#"
|
||||||
|
{
|
||||||
|
"min_price_per_category": {
|
||||||
|
"aggs": {
|
||||||
|
"min_price": { "min": { "field": "price" } }
|
||||||
|
},
|
||||||
|
"terms": {
|
||||||
|
"field": "category",
|
||||||
|
"min_doc_count": 1,
|
||||||
|
"order": { "min_price": "desc" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} "#;
|
||||||
|
|
||||||
|
let agg_req: Aggregations = serde_json::from_str(agg_req_str)?;
|
||||||
|
|
||||||
|
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||||
|
|
||||||
|
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||||
|
let res: Value = serde_json::to_value(agg_res)?;
|
||||||
|
|
||||||
|
// Minimum price per category, sorted by minimum price descending
|
||||||
|
//
|
||||||
|
// As you can see, the starting prices for `Formalwear` are higher than `Casualwear`.
|
||||||
|
//
|
||||||
|
let expected_res = r#"
|
||||||
|
{
|
||||||
|
"min_price_per_category": {
|
||||||
|
"buckets": [
|
||||||
|
{ "doc_count": 2, "key": "Womens Formalwear", "min_price": { "value": 270.0 } },
|
||||||
|
{ "doc_count": 2, "key": "Mens Formalwear", "min_price": { "value": 75.0 } },
|
||||||
|
{ "doc_count": 2, "key": "Mens Casualwear", "min_price": { "value": 49.99 } },
|
||||||
|
{ "doc_count": 2, "key": "Womens Footwear", "min_price": { "value": 42.0 } },
|
||||||
|
{ "doc_count": 2, "key": "Womens Casualwear", "min_price": { "value": 30.0 } },
|
||||||
|
{ "doc_count": 3, "key": "Mens Footwear", "min_price": { "value": 19.0 } }
|
||||||
|
],
|
||||||
|
"sum_other_doc_count": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"#;
|
||||||
|
let expected_json: Value = serde_json::from_str(expected_res)?;
|
||||||
|
|
||||||
|
assert_eq!(expected_json, res);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -7,9 +7,7 @@
|
|||||||
// Of course, you can have a look at the tantivy's built-in collectors
|
// Of course, you can have a look at the tantivy's built-in collectors
|
||||||
// such as the `CountCollector` for more examples.
|
// such as the `CountCollector` for more examples.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use columnar::Column;
|
||||||
|
|
||||||
use fastfield_codecs::Column;
|
|
||||||
// ---
|
// ---
|
||||||
// Importing tantivy...
|
// Importing tantivy...
|
||||||
use tantivy::collector::{Collector, SegmentCollector};
|
use tantivy::collector::{Collector, SegmentCollector};
|
||||||
@@ -97,7 +95,7 @@ impl Collector for StatsCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct StatsSegmentCollector {
|
struct StatsSegmentCollector {
|
||||||
fast_field_reader: Arc<dyn Column<u64>>,
|
fast_field_reader: Column,
|
||||||
stats: Stats,
|
stats: Stats,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -105,10 +103,14 @@ impl SegmentCollector for StatsSegmentCollector {
|
|||||||
type Fruit = Option<Stats>;
|
type Fruit = Option<Stats>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: Score) {
|
fn collect(&mut self, doc: u32, _score: Score) {
|
||||||
let value = self.fast_field_reader.get_val(doc) as f64;
|
// Since we know the values are single value, we could call `first_or_default_col` on the
|
||||||
self.stats.count += 1;
|
// column and fetch single values.
|
||||||
self.stats.sum += value;
|
for value in self.fast_field_reader.values_for_doc(doc) {
|
||||||
self.stats.squared_sum += value * value;
|
let value = value as f64;
|
||||||
|
self.stats.count += 1;
|
||||||
|
self.stats.sum += value;
|
||||||
|
self.stats.squared_sum += value * value;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
|
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
|
||||||
@@ -169,7 +171,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let query_parser = QueryParser::for_index(&index, vec![product_name, product_description]);
|
let query_parser = QueryParser::for_index(&index, vec![product_name, product_description]);
|
||||||
|
|
||||||
// here we want to get a hit on the 'ken' in Frankenstein
|
// here we want to search for `broom` and use `StatsCollector` on the hits.
|
||||||
let query = query_parser.parse_query("broom")?;
|
let query = query_parser.parse_query("broom")?;
|
||||||
if let Some(stats) =
|
if let Some(stats) =
|
||||||
searcher.search(&query, &StatsCollector::with_field("price".to_string()))?
|
searcher.search(&query, &StatsCollector::with_field("price".to_string()))?
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
// # Defining a tokenizer pipeline
|
// # Defining a tokenizer pipeline
|
||||||
//
|
//
|
||||||
// In this example, we'll see how to define a tokenizer pipeline
|
// In this example, we'll see how to define a tokenizer
|
||||||
// by aligning a bunch of `TokenFilter`.
|
// by creating a custom `NgramTokenizer`.
|
||||||
use tantivy::collector::TopDocs;
|
use tantivy::collector::TopDocs;
|
||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::*;
|
use tantivy::schema::*;
|
||||||
@@ -14,6 +14,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
.set_stored()
|
.set_stored()
|
||||||
.set_fast()
|
.set_fast()
|
||||||
.set_precision(tantivy::DatePrecision::Seconds);
|
.set_precision(tantivy::DatePrecision::Seconds);
|
||||||
|
// Add `occurred_at` date field type
|
||||||
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
|
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
|
||||||
let event_type = schema_builder.add_text_field("event", STRING | STORED);
|
let event_type = schema_builder.add_text_field("event", STRING | STORED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -22,6 +23,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
let mut index_writer = index.writer(50_000_000)?;
|
let mut index_writer = index.writer(50_000_000)?;
|
||||||
|
// The dates are passed as string in the RFC3339 format
|
||||||
let doc = schema.parse_document(
|
let doc = schema.parse_document(
|
||||||
r#"{
|
r#"{
|
||||||
"occurred_at": "2022-06-22T12:53:50.53Z",
|
"occurred_at": "2022-06-22T12:53:50.53Z",
|
||||||
@@ -41,14 +43,16 @@ fn main() -> tantivy::Result<()> {
|
|||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
// # Default fields: event_type
|
// # Search
|
||||||
let query_parser = QueryParser::for_index(&index, vec![event_type]);
|
let query_parser = QueryParser::for_index(&index, vec![event_type]);
|
||||||
{
|
{
|
||||||
let query = query_parser.parse_query("event:comment")?;
|
// Simple exact search on the date
|
||||||
|
let query = query_parser.parse_query("occurred_at:\"2022-06-22T12:53:50.53Z\"")?;
|
||||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(5))?;
|
let count_docs = searcher.search(&*query, &TopDocs::with_limit(5))?;
|
||||||
assert_eq!(count_docs.len(), 1);
|
assert_eq!(count_docs.len(), 1);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
// Range query on the date field
|
||||||
let query = query_parser
|
let query = query_parser
|
||||||
.parse_query(r#"occurred_at:[2022-06-22T12:58:00Z TO 2022-06-23T00:00:00Z}"#)?;
|
.parse_query(r#"occurred_at:[2022-06-22T12:58:00Z TO 2022-06-23T00:00:00Z}"#)?;
|
||||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
|
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
|
||||||
@@ -71,7 +71,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
{
|
{
|
||||||
let mut facet_collector = FacetCollector::for_field(classification);
|
let mut facet_collector = FacetCollector::for_field("classification");
|
||||||
facet_collector.add_facet("/Felidae");
|
facet_collector.add_facet("/Felidae");
|
||||||
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||||
// This lists all of the facet counts, right below "/Felidae".
|
// This lists all of the facet counts, right below "/Felidae".
|
||||||
@@ -97,7 +97,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
let facet = Facet::from("/Felidae/Pantherinae");
|
let facet = Facet::from("/Felidae/Pantherinae");
|
||||||
let facet_term = Term::from_facet(classification, &facet);
|
let facet_term = Term::from_facet(classification, &facet);
|
||||||
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic);
|
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic);
|
||||||
let mut facet_collector = FacetCollector::for_field(classification);
|
let mut facet_collector = FacetCollector::for_field("classification");
|
||||||
facet_collector.add_facet("/Felidae/Pantherinae");
|
facet_collector.add_facet("/Felidae/Pantherinae");
|
||||||
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?;
|
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?;
|
||||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect();
|
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect();
|
||||||
@@ -1,3 +1,12 @@
|
|||||||
|
// # Faceted Search With Tweak Score
|
||||||
|
//
|
||||||
|
// This example covers the faceted search functionalities of
|
||||||
|
// tantivy.
|
||||||
|
//
|
||||||
|
// We will :
|
||||||
|
// - define a text field "name" in our schema
|
||||||
|
// - define a facet field "classification" in our schema
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use tantivy::collector::TopDocs;
|
use tantivy::collector::TopDocs;
|
||||||
@@ -55,8 +64,9 @@ fn main() -> tantivy::Result<()> {
|
|||||||
.collect(),
|
.collect(),
|
||||||
);
|
);
|
||||||
let top_docs_by_custom_score =
|
let top_docs_by_custom_score =
|
||||||
|
// Call TopDocs with a custom tweak score
|
||||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
||||||
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
let ingredient_reader = segment_reader.facet_reader("ingredient").unwrap();
|
||||||
let facet_dict = ingredient_reader.facet_dict();
|
let facet_dict = ingredient_reader.facet_dict();
|
||||||
|
|
||||||
let query_ords: HashSet<u64> = facets
|
let query_ords: HashSet<u64> = facets
|
||||||
@@ -64,12 +74,10 @@ fn main() -> tantivy::Result<()> {
|
|||||||
.filter_map(|key| facet_dict.term_ord(key.encoded_str()).unwrap())
|
.filter_map(|key| facet_dict.term_ord(key.encoded_str()).unwrap())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
|
|
||||||
|
|
||||||
move |doc: DocId, original_score: Score| {
|
move |doc: DocId, original_score: Score| {
|
||||||
ingredient_reader.facet_ords(doc, &mut facet_ords_buffer);
|
// Update the original score with a tweaked score
|
||||||
let missing_ingredients = facet_ords_buffer
|
let missing_ingredients = ingredient_reader
|
||||||
.iter()
|
.facet_ords(doc)
|
||||||
.filter(|ord| !query_ords.contains(ord))
|
.filter(|ord| !query_ords.contains(ord))
|
||||||
.count();
|
.count();
|
||||||
let tweak = 1.0 / 4_f32.powi(missing_ingredients as i32);
|
let tweak = 1.0 / 4_f32.powi(missing_ingredients as i32);
|
||||||
167
examples/fuzzy_search.rs
Normal file
167
examples/fuzzy_search.rs
Normal file
@@ -0,0 +1,167 @@
|
|||||||
|
// # Basic Example
|
||||||
|
//
|
||||||
|
// This example covers the basic functionalities of
|
||||||
|
// tantivy.
|
||||||
|
//
|
||||||
|
// We will :
|
||||||
|
// - define our schema
|
||||||
|
// - create an index in a directory
|
||||||
|
// - index a few documents into our index
|
||||||
|
// - search for the best document matching a basic query
|
||||||
|
// - retrieve the best document's original content.
|
||||||
|
// ---
|
||||||
|
// Importing tantivy...
|
||||||
|
use tantivy::collector::{Count, TopDocs};
|
||||||
|
use tantivy::query::FuzzyTermQuery;
|
||||||
|
use tantivy::schema::*;
|
||||||
|
use tantivy::{doc, Index, ReloadPolicy};
|
||||||
|
use tempfile::TempDir;
|
||||||
|
|
||||||
|
fn main() -> tantivy::Result<()> {
|
||||||
|
// Let's create a temporary directory for the
|
||||||
|
// sake of this example
|
||||||
|
let index_path = TempDir::new()?;
|
||||||
|
|
||||||
|
// # Defining the schema
|
||||||
|
//
|
||||||
|
// The Tantivy index requires a very strict schema.
|
||||||
|
// The schema declares which fields are in the index,
|
||||||
|
// and for each field, its type and "the way it should
|
||||||
|
// be indexed".
|
||||||
|
|
||||||
|
// First we need to define a schema ...
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
|
// Our first field is title.
|
||||||
|
// We want full-text search for it, and we also want
|
||||||
|
// to be able to retrieve the document after the search.
|
||||||
|
//
|
||||||
|
// `TEXT | STORED` is some syntactic sugar to describe
|
||||||
|
// that.
|
||||||
|
//
|
||||||
|
// `TEXT` means the field should be tokenized and indexed,
|
||||||
|
// along with its term frequency and term positions.
|
||||||
|
//
|
||||||
|
// `STORED` means that the field will also be saved
|
||||||
|
// in a compressed, row-oriented key-value store.
|
||||||
|
// This store is useful for reconstructing the
|
||||||
|
// documents that were selected during the search phase.
|
||||||
|
let title = schema_builder.add_text_field("title", TEXT | STORED);
|
||||||
|
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
// # Indexing documents
|
||||||
|
//
|
||||||
|
// Let's create a brand new index.
|
||||||
|
//
|
||||||
|
// This will actually just save a meta.json
|
||||||
|
// with our schema in the directory.
|
||||||
|
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||||
|
|
||||||
|
// To insert a document we will need an index writer.
|
||||||
|
// There must be only one writer at a time.
|
||||||
|
// This single `IndexWriter` is already
|
||||||
|
// multithreaded.
|
||||||
|
//
|
||||||
|
// Here we give tantivy a budget of `50MB`.
|
||||||
|
// Using a bigger memory_arena for the indexer may increase
|
||||||
|
// throughput, but 50 MB is already plenty.
|
||||||
|
let mut index_writer = index.writer(50_000_000)?;
|
||||||
|
|
||||||
|
// Let's index our documents!
|
||||||
|
// We first need a handle on the title and the body field.
|
||||||
|
|
||||||
|
// ### Adding documents
|
||||||
|
//
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
title => "The Name of the Wind",
|
||||||
|
))?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
title => "The Diary of Muadib",
|
||||||
|
))?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
title => "A Dairy Cow",
|
||||||
|
))?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
title => "The Diary of a Young Girl",
|
||||||
|
))?;
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
// ### Committing
|
||||||
|
//
|
||||||
|
// At this point our documents are not searchable.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// We need to call `.commit()` explicitly to force the
|
||||||
|
// `index_writer` to finish processing the documents in the queue,
|
||||||
|
// flush the current index to the disk, and advertise
|
||||||
|
// the existence of new documents.
|
||||||
|
//
|
||||||
|
// This call is blocking.
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
// If `.commit()` returns correctly, then all of the
|
||||||
|
// documents that have been added are guaranteed to be
|
||||||
|
// persistently indexed.
|
||||||
|
//
|
||||||
|
// In the scenario of a crash or a power failure,
|
||||||
|
// tantivy behaves as if it has rolled back to its last
|
||||||
|
// commit.
|
||||||
|
|
||||||
|
// # Searching
|
||||||
|
//
|
||||||
|
// ### Searcher
|
||||||
|
//
|
||||||
|
// A reader is required first in order to search an index.
|
||||||
|
// It acts as a `Searcher` pool that reloads itself,
|
||||||
|
// depending on a `ReloadPolicy`.
|
||||||
|
//
|
||||||
|
// For a search server you will typically create one reader for the entire lifetime of your
|
||||||
|
// program, and acquire a new searcher for every single request.
|
||||||
|
//
|
||||||
|
// In the code below, we rely on the 'ON_COMMIT' policy: the reader
|
||||||
|
// will reload the index automatically after each commit.
|
||||||
|
let reader = index
|
||||||
|
.reader_builder()
|
||||||
|
.reload_policy(ReloadPolicy::OnCommit)
|
||||||
|
.try_into()?;
|
||||||
|
|
||||||
|
// We now need to acquire a searcher.
|
||||||
|
//
|
||||||
|
// A searcher points to a snapshotted, immutable version of the index.
|
||||||
|
//
|
||||||
|
// Some search experience might require more than
|
||||||
|
// one query. Using the same searcher ensures that all of these queries will run on the
|
||||||
|
// same version of the index.
|
||||||
|
//
|
||||||
|
// Acquiring a `searcher` is very cheap.
|
||||||
|
//
|
||||||
|
// You should acquire a searcher every time you start processing a request and
|
||||||
|
// and release it right after your query is finished.
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
|
// ### FuzzyTermQuery
|
||||||
|
{
|
||||||
|
let term = Term::from_field_text(title, "Diary");
|
||||||
|
let query = FuzzyTermQuery::new(term, 2, true);
|
||||||
|
|
||||||
|
let (top_docs, count) = searcher
|
||||||
|
.search(&query, &(TopDocs::with_limit(5), Count))
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(count, 3);
|
||||||
|
assert_eq!(top_docs.len(), 3);
|
||||||
|
for (score, doc_address) in top_docs {
|
||||||
|
let retrieved_doc = searcher.doc(doc_address)?;
|
||||||
|
// Note that the score is not lower for the fuzzy hit.
|
||||||
|
// There's an issue open for that: https://github.com/quickwit-oss/tantivy/issues/563
|
||||||
|
println!("score {score:?} doc {}", schema.to_json(&retrieved_doc));
|
||||||
|
// score 1.0 doc {"title":["The Diary of Muadib"]}
|
||||||
|
//
|
||||||
|
// score 1.0 doc {"title":["The Diary of a Young Girl"]}
|
||||||
|
//
|
||||||
|
// score 1.0 doc {"title":["A Dairy Cow"]}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
107
examples/ip_field.rs
Normal file
107
examples/ip_field.rs
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// # IP Address example
|
||||||
|
//
|
||||||
|
// This example shows how the ip field can be used
|
||||||
|
// with IpV6 and IpV4.
|
||||||
|
|
||||||
|
use tantivy::collector::{Count, TopDocs};
|
||||||
|
use tantivy::query::QueryParser;
|
||||||
|
use tantivy::schema::{Schema, FAST, INDEXED, STORED, STRING};
|
||||||
|
use tantivy::Index;
|
||||||
|
|
||||||
|
fn main() -> tantivy::Result<()> {
|
||||||
|
// # Defining the schema
|
||||||
|
// We set the IP field as `INDEXED`, so it can be searched
|
||||||
|
// `FAST` will create a fast field. The fast field will be used to execute search queries.
|
||||||
|
// `FAST` is not a requirement for range queries, it can also be executed on the inverted index
|
||||||
|
// which is created by `INDEXED`.
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let event_type = schema_builder.add_text_field("event_type", STRING | STORED);
|
||||||
|
let ip = schema_builder.add_ip_addr_field("ip", STORED | INDEXED | FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
// # Indexing documents
|
||||||
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
|
let mut index_writer = index.writer(50_000_000)?;
|
||||||
|
|
||||||
|
// ### IPv4
|
||||||
|
// Adding documents that contain an IPv4 address. Notice that the IP addresses are passed as
|
||||||
|
// `String`. Since the field is of type ip, we parse the IP address from the string and store it
|
||||||
|
// internally as IPv6.
|
||||||
|
let doc = schema.parse_document(
|
||||||
|
r#"{
|
||||||
|
"ip": "192.168.0.33",
|
||||||
|
"event_type": "login"
|
||||||
|
}"#,
|
||||||
|
)?;
|
||||||
|
index_writer.add_document(doc)?;
|
||||||
|
let doc = schema.parse_document(
|
||||||
|
r#"{
|
||||||
|
"ip": "192.168.0.80",
|
||||||
|
"event_type": "checkout"
|
||||||
|
}"#,
|
||||||
|
)?;
|
||||||
|
index_writer.add_document(doc)?;
|
||||||
|
// ### IPv6
|
||||||
|
// Adding a document that contains an IPv6 address.
|
||||||
|
let doc = schema.parse_document(
|
||||||
|
r#"{
|
||||||
|
"ip": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
|
||||||
|
"event_type": "checkout"
|
||||||
|
}"#,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
index_writer.add_document(doc)?;
|
||||||
|
// Commit will create a segment containing our documents.
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
|
// # Search
|
||||||
|
// Range queries on IPv4. Since we created a fast field, the fast field will be used to execute
|
||||||
|
// the search.
|
||||||
|
// ### Range Queries
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![event_type, ip]);
|
||||||
|
{
|
||||||
|
// Inclusive range queries
|
||||||
|
let query = query_parser.parse_query("ip:[192.168.0.80 TO 192.168.0.100]")?;
|
||||||
|
let count_docs = searcher.search(&*query, &TopDocs::with_limit(5))?;
|
||||||
|
assert_eq!(count_docs.len(), 1);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Exclusive range queries
|
||||||
|
let query = query_parser.parse_query("ip:{192.168.0.80 TO 192.168.1.100]")?;
|
||||||
|
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||||
|
assert_eq!(count_docs.len(), 0);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Find docs with IP addresses smaller equal 192.168.1.100
|
||||||
|
let query = query_parser.parse_query("ip:[* TO 192.168.1.100]")?;
|
||||||
|
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||||
|
assert_eq!(count_docs.len(), 2);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Find docs with IP addresses smaller than 192.168.1.100
|
||||||
|
let query = query_parser.parse_query("ip:[* TO 192.168.1.100}")?;
|
||||||
|
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||||
|
assert_eq!(count_docs.len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ### Exact Queries
|
||||||
|
// Exact search on IPv4.
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("ip:192.168.0.80")?;
|
||||||
|
let count_docs = searcher.search(&*query, &Count)?;
|
||||||
|
assert_eq!(count_docs, 1);
|
||||||
|
}
|
||||||
|
// Exact search on IPv6.
|
||||||
|
// IpV6 addresses need to be quoted because they contain `:`
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("ip:\"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"")?;
|
||||||
|
let count_docs = searcher.search(&*query, &Count)?;
|
||||||
|
assert_eq!(count_docs, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -17,7 +17,6 @@ use tantivy::{
|
|||||||
|
|
||||||
type ProductId = u64;
|
type ProductId = u64;
|
||||||
|
|
||||||
/// Price
|
|
||||||
type Price = u32;
|
type Price = u32;
|
||||||
|
|
||||||
pub trait PriceFetcher: Send + Sync + 'static {
|
pub trait PriceFetcher: Send + Sync + 'static {
|
||||||
@@ -48,7 +47,10 @@ impl Warmer for DynamicPriceColumn {
|
|||||||
fn warm(&self, searcher: &Searcher) -> tantivy::Result<()> {
|
fn warm(&self, searcher: &Searcher) -> tantivy::Result<()> {
|
||||||
for segment in searcher.segment_readers() {
|
for segment in searcher.segment_readers() {
|
||||||
let key = (segment.segment_id(), segment.delete_opstamp());
|
let key = (segment.segment_id(), segment.delete_opstamp());
|
||||||
let product_id_reader = segment.fast_fields().u64(&self.field)?;
|
let product_id_reader = segment
|
||||||
|
.fast_fields()
|
||||||
|
.u64(&self.field)?
|
||||||
|
.first_or_default_col(0);
|
||||||
let product_ids: Vec<ProductId> = segment
|
let product_ids: Vec<ProductId> = segment
|
||||||
.doc_ids_alive()
|
.doc_ids_alive()
|
||||||
.map(|doc| product_id_reader.get_val(doc))
|
.map(|doc| product_id_reader.get_val(doc))
|
||||||
@@ -87,10 +89,10 @@ impl Warmer for DynamicPriceColumn {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For the sake of this example, the table is just an editable HashMap behind a RwLock.
|
// For the sake of this example, the table is just an editable HashMap behind a RwLock.
|
||||||
/// This map represents a map (ProductId -> Price)
|
// This map represents a map (ProductId -> Price)
|
||||||
///
|
//
|
||||||
/// In practise, it could be fetching things from an external service, like a SQL table.
|
// In practise, it could be fetching things from an external service, like a SQL table.
|
||||||
#[derive(Default, Clone)]
|
#[derive(Default, Clone)]
|
||||||
pub struct ExternalPriceTable {
|
pub struct ExternalPriceTable {
|
||||||
prices: Arc<RwLock<HashMap<ProductId, Price>>>,
|
prices: Arc<RwLock<HashMap<ProductId, Price>>>,
|
||||||
@@ -50,7 +50,7 @@ use std::collections::{HashMap, HashSet};
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
pub use super::bucket::RangeAggregation;
|
pub use super::bucket::RangeAggregation;
|
||||||
use super::bucket::{HistogramAggregation, TermsAggregation};
|
use super::bucket::{DateHistogramAggregationReq, HistogramAggregation, TermsAggregation};
|
||||||
use super::metric::{
|
use super::metric::{
|
||||||
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation, StatsAggregation,
|
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation, StatsAggregation,
|
||||||
SumAggregation,
|
SumAggregation,
|
||||||
@@ -110,10 +110,13 @@ impl BucketAggregationInternal {
|
|||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn as_histogram(&self) -> Option<&HistogramAggregation> {
|
pub(crate) fn as_histogram(&self) -> crate::Result<Option<HistogramAggregation>> {
|
||||||
match &self.bucket_agg {
|
match &self.bucket_agg {
|
||||||
BucketAggregationType::Histogram(histogram) => Some(histogram),
|
BucketAggregationType::Histogram(histogram) => Ok(Some(histogram.clone())),
|
||||||
_ => None,
|
BucketAggregationType::DateHistogram(histogram) => {
|
||||||
|
Ok(Some(histogram.to_histogram_req()?))
|
||||||
|
}
|
||||||
|
_ => Ok(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn as_term(&self) -> Option<&TermsAggregation> {
|
pub(crate) fn as_term(&self) -> Option<&TermsAggregation> {
|
||||||
@@ -124,15 +127,6 @@ impl BucketAggregationInternal {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extract all fields, where the term directory is used in the tree.
|
|
||||||
pub fn get_term_dict_field_names(aggs: &Aggregations) -> HashSet<String> {
|
|
||||||
let mut term_dict_field_names = Default::default();
|
|
||||||
for el in aggs.values() {
|
|
||||||
el.get_term_dict_field_names(&mut term_dict_field_names)
|
|
||||||
}
|
|
||||||
term_dict_field_names
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract all fast field names used in the tree.
|
/// Extract all fast field names used in the tree.
|
||||||
pub fn get_fast_field_names(aggs: &Aggregations) -> HashSet<String> {
|
pub fn get_fast_field_names(aggs: &Aggregations) -> HashSet<String> {
|
||||||
let mut fast_field_names = Default::default();
|
let mut fast_field_names = Default::default();
|
||||||
@@ -155,16 +149,12 @@ pub enum Aggregation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Aggregation {
|
impl Aggregation {
|
||||||
fn get_term_dict_field_names(&self, term_field_names: &mut HashSet<String>) {
|
|
||||||
if let Aggregation::Bucket(bucket) = self {
|
|
||||||
bucket.get_term_dict_field_names(term_field_names)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||||
match self {
|
match self {
|
||||||
Aggregation::Bucket(bucket) => bucket.get_fast_field_names(fast_field_names),
|
Aggregation::Bucket(bucket) => bucket.get_fast_field_names(fast_field_names),
|
||||||
Aggregation::Metric(metric) => metric.get_fast_field_names(fast_field_names),
|
Aggregation::Metric(metric) => {
|
||||||
|
fast_field_names.insert(metric.get_fast_field_name().to_string());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -193,14 +183,9 @@ pub struct BucketAggregation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BucketAggregation {
|
impl BucketAggregation {
|
||||||
fn get_term_dict_field_names(&self, term_dict_field_names: &mut HashSet<String>) {
|
|
||||||
if let BucketAggregationType::Terms(terms) = &self.bucket_agg {
|
|
||||||
term_dict_field_names.insert(terms.field.to_string());
|
|
||||||
}
|
|
||||||
term_dict_field_names.extend(get_term_dict_field_names(&self.sub_aggregation));
|
|
||||||
}
|
|
||||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||||
self.bucket_agg.get_fast_field_names(fast_field_names);
|
let fast_field_name = self.bucket_agg.get_fast_field_name();
|
||||||
|
fast_field_names.insert(fast_field_name.to_string());
|
||||||
fast_field_names.extend(get_fast_field_names(&self.sub_aggregation));
|
fast_field_names.extend(get_fast_field_names(&self.sub_aggregation));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -214,20 +199,22 @@ pub enum BucketAggregationType {
|
|||||||
/// Put data into buckets of user-defined ranges.
|
/// Put data into buckets of user-defined ranges.
|
||||||
#[serde(rename = "histogram")]
|
#[serde(rename = "histogram")]
|
||||||
Histogram(HistogramAggregation),
|
Histogram(HistogramAggregation),
|
||||||
|
/// Put data into buckets of user-defined ranges.
|
||||||
|
#[serde(rename = "date_histogram")]
|
||||||
|
DateHistogram(DateHistogramAggregationReq),
|
||||||
/// Put data into buckets of terms.
|
/// Put data into buckets of terms.
|
||||||
#[serde(rename = "terms")]
|
#[serde(rename = "terms")]
|
||||||
Terms(TermsAggregation),
|
Terms(TermsAggregation),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BucketAggregationType {
|
impl BucketAggregationType {
|
||||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
fn get_fast_field_name(&self) -> &str {
|
||||||
match self {
|
match self {
|
||||||
BucketAggregationType::Terms(terms) => fast_field_names.insert(terms.field.to_string()),
|
BucketAggregationType::Terms(terms) => terms.field.as_str(),
|
||||||
BucketAggregationType::Range(range) => fast_field_names.insert(range.field.to_string()),
|
BucketAggregationType::Range(range) => range.field.as_str(),
|
||||||
BucketAggregationType::Histogram(histogram) => {
|
BucketAggregationType::Histogram(histogram) => histogram.field.as_str(),
|
||||||
fast_field_names.insert(histogram.field.to_string())
|
BucketAggregationType::DateHistogram(histogram) => histogram.field.as_str(),
|
||||||
}
|
}
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -262,16 +249,15 @@ pub enum MetricAggregation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MetricAggregation {
|
impl MetricAggregation {
|
||||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
fn get_fast_field_name(&self) -> &str {
|
||||||
let fast_field_name = match self {
|
match self {
|
||||||
MetricAggregation::Average(avg) => avg.field_name(),
|
MetricAggregation::Average(avg) => avg.field_name(),
|
||||||
MetricAggregation::Count(count) => count.field_name(),
|
MetricAggregation::Count(count) => count.field_name(),
|
||||||
MetricAggregation::Max(max) => max.field_name(),
|
MetricAggregation::Max(max) => max.field_name(),
|
||||||
MetricAggregation::Min(min) => min.field_name(),
|
MetricAggregation::Min(min) => min.field_name(),
|
||||||
MetricAggregation::Stats(stats) => stats.field_name(),
|
MetricAggregation::Stats(stats) => stats.field_name(),
|
||||||
MetricAggregation::Sum(sum) => sum.field_name(),
|
MetricAggregation::Sum(sum) => sum.field_name(),
|
||||||
};
|
}
|
||||||
fast_field_names.insert(fast_field_name.to_string());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,17 +3,18 @@
|
|||||||
use std::rc::Rc;
|
use std::rc::Rc;
|
||||||
use std::sync::atomic::AtomicU32;
|
use std::sync::atomic::AtomicU32;
|
||||||
|
|
||||||
use columnar::{Column, StrColumn};
|
use columnar::{Column, ColumnType, StrColumn};
|
||||||
|
|
||||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
||||||
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
use super::bucket::{
|
||||||
|
DateHistogramAggregationReq, HistogramAggregation, RangeAggregation, TermsAggregation,
|
||||||
|
};
|
||||||
use super::metric::{
|
use super::metric::{
|
||||||
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation, StatsAggregation,
|
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation, StatsAggregation,
|
||||||
SumAggregation,
|
SumAggregation,
|
||||||
};
|
};
|
||||||
use super::segment_agg_result::BucketCount;
|
use super::segment_agg_result::BucketCount;
|
||||||
use super::VecWithNames;
|
use super::VecWithNames;
|
||||||
use crate::schema::Type;
|
|
||||||
use crate::{SegmentReader, TantivyError};
|
use crate::{SegmentReader, TantivyError};
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
@@ -41,7 +42,7 @@ pub struct BucketAggregationWithAccessor {
|
|||||||
/// based on search terms. So eventually this needs to be Option or moved.
|
/// based on search terms. So eventually this needs to be Option or moved.
|
||||||
pub(crate) accessor: Column<u64>,
|
pub(crate) accessor: Column<u64>,
|
||||||
pub(crate) str_dict_column: Option<StrColumn>,
|
pub(crate) str_dict_column: Option<StrColumn>,
|
||||||
pub(crate) field_type: Type,
|
pub(crate) field_type: ColumnType,
|
||||||
pub(crate) bucket_agg: BucketAggregationType,
|
pub(crate) bucket_agg: BucketAggregationType,
|
||||||
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
||||||
pub(crate) bucket_count: BucketCount,
|
pub(crate) bucket_count: BucketCount,
|
||||||
@@ -63,10 +64,14 @@ impl BucketAggregationWithAccessor {
|
|||||||
BucketAggregationType::Histogram(HistogramAggregation {
|
BucketAggregationType::Histogram(HistogramAggregation {
|
||||||
field: field_name, ..
|
field: field_name, ..
|
||||||
}) => get_ff_reader_and_validate(reader, field_name)?,
|
}) => get_ff_reader_and_validate(reader, field_name)?,
|
||||||
|
BucketAggregationType::DateHistogram(DateHistogramAggregationReq {
|
||||||
|
field: field_name,
|
||||||
|
..
|
||||||
|
}) => get_ff_reader_and_validate(reader, field_name)?,
|
||||||
BucketAggregationType::Terms(TermsAggregation {
|
BucketAggregationType::Terms(TermsAggregation {
|
||||||
field: field_name, ..
|
field: field_name, ..
|
||||||
}) => {
|
}) => {
|
||||||
str_dict_column = reader.fast_fields().str(&field_name)?;
|
str_dict_column = reader.fast_fields().str(field_name)?;
|
||||||
get_ff_reader_and_validate(reader, field_name)?
|
get_ff_reader_and_validate(reader, field_name)?
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -94,7 +99,7 @@ impl BucketAggregationWithAccessor {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct MetricAggregationWithAccessor {
|
pub struct MetricAggregationWithAccessor {
|
||||||
pub metric: MetricAggregation,
|
pub metric: MetricAggregation,
|
||||||
pub field_type: Type,
|
pub field_type: ColumnType,
|
||||||
pub accessor: Column<u64>,
|
pub accessor: Column<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -158,22 +163,12 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
|
|||||||
fn get_ff_reader_and_validate(
|
fn get_ff_reader_and_validate(
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
field_name: &str,
|
field_name: &str,
|
||||||
) -> crate::Result<(columnar::Column<u64>, Type)> {
|
) -> crate::Result<(columnar::Column<u64>, ColumnType)> {
|
||||||
let field = reader.schema().get_field(field_name)?;
|
|
||||||
// TODO we should get type metadata from columnar
|
|
||||||
let field_type = reader
|
|
||||||
.schema()
|
|
||||||
.get_field_entry(field)
|
|
||||||
.field_type()
|
|
||||||
.value_type();
|
|
||||||
// TODO Do validation
|
|
||||||
|
|
||||||
let ff_fields = reader.fast_fields();
|
let ff_fields = reader.fast_fields();
|
||||||
let ff_field = ff_fields.u64_lenient(field_name)?.ok_or_else(|| {
|
let ff_field_with_type = ff_fields
|
||||||
TantivyError::InvalidArgument(format!(
|
.u64_lenient_with_type(field_name)?
|
||||||
"No numerical fast field found for field: {}",
|
.ok_or_else(|| {
|
||||||
field_name
|
TantivyError::InvalidArgument(format!("No fast field found for field: {}", field_name))
|
||||||
))
|
})?;
|
||||||
})?;
|
Ok(ff_field_with_type)
|
||||||
Ok((ff_field, field_type))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ use super::bucket::GetDocCount;
|
|||||||
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
|
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
|
||||||
use super::metric::{SingleMetricResult, Stats};
|
use super::metric::{SingleMetricResult, Stats};
|
||||||
use super::Key;
|
use super::Key;
|
||||||
use crate::schema::Schema;
|
|
||||||
use crate::TantivyError;
|
use crate::TantivyError;
|
||||||
|
|
||||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
@@ -154,12 +153,9 @@ pub enum BucketResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BucketResult {
|
impl BucketResult {
|
||||||
pub(crate) fn empty_from_req(
|
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
|
||||||
req: &BucketAggregationInternal,
|
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<Self> {
|
|
||||||
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
||||||
empty_bucket.into_final_bucket_result(req, schema)
|
empty_bucket.into_final_bucket_result(req)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
1174
src/aggregation/agg_tests.rs
Normal file
1174
src/aggregation/agg_tests.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,8 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use super::{HistogramAggregation, HistogramBounds};
|
||||||
|
use crate::aggregation::AggregationError;
|
||||||
|
|
||||||
/// DateHistogramAggregation is similar to `HistogramAggregation`, but it can only be used with date
|
/// DateHistogramAggregation is similar to `HistogramAggregation`, but it can only be used with date
|
||||||
/// type.
|
/// type.
|
||||||
///
|
///
|
||||||
@@ -29,8 +32,16 @@ use serde::{Deserialize, Serialize};
|
|||||||
/// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry)
|
/// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry)
|
||||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct DateHistogramAggregationReq {
|
pub struct DateHistogramAggregationReq {
|
||||||
|
#[doc(hidden)]
|
||||||
|
/// Only for validation
|
||||||
|
interval: Option<String>,
|
||||||
|
#[doc(hidden)]
|
||||||
|
/// Only for validation
|
||||||
|
date_interval: Option<String>,
|
||||||
/// The field to aggregate on.
|
/// The field to aggregate on.
|
||||||
pub field: String,
|
pub field: String,
|
||||||
|
/// The format to format dates.
|
||||||
|
pub format: Option<String>,
|
||||||
/// The interval to chunk your data range. Each bucket spans a value range of
|
/// The interval to chunk your data range. Each bucket spans a value range of
|
||||||
/// [0..fixed_interval). Accepted values
|
/// [0..fixed_interval). Accepted values
|
||||||
///
|
///
|
||||||
@@ -55,72 +66,410 @@ pub struct DateHistogramAggregationReq {
|
|||||||
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
|
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
|
||||||
/// 1))`.
|
/// 1))`.
|
||||||
pub offset: Option<String>,
|
pub offset: Option<String>,
|
||||||
|
/// The minimum number of documents in a bucket to be returned. Defaults to 0.
|
||||||
|
pub min_doc_count: Option<u64>,
|
||||||
|
/// Limits the data range to `[min, max]` closed interval.
|
||||||
|
///
|
||||||
|
/// This can be used to filter values if they are not in the data range.
|
||||||
|
///
|
||||||
|
/// hard_bounds only limits the buckets, to force a range set both extended_bounds and
|
||||||
|
/// hard_bounds to the same range.
|
||||||
|
///
|
||||||
|
/// Needs to be provided as timestamp in microseconds precision.
|
||||||
|
///
|
||||||
|
/// ## Example
|
||||||
|
/// ```json
|
||||||
|
/// {
|
||||||
|
/// "sales_over_time": {
|
||||||
|
/// "date_histogram": {
|
||||||
|
/// "field": "dates",
|
||||||
|
/// "interval": "1d",
|
||||||
|
/// "hard_bounds": {
|
||||||
|
/// "min": 0,
|
||||||
|
/// "max": 1420502400000000
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
pub hard_bounds: Option<HistogramBounds>,
|
||||||
|
/// Can be set to extend your bounds. The range of the buckets is by default defined by the
|
||||||
|
/// data range of the values of the documents. As the name suggests, this can only be used to
|
||||||
|
/// extend the value range. If the bounds for min or max are not extending the range, the value
|
||||||
|
/// has no effect on the returned buckets.
|
||||||
|
///
|
||||||
|
/// Cannot be set in conjunction with min_doc_count > 0, since the empty buckets from extended
|
||||||
|
/// bounds would not be returned.
|
||||||
|
pub extended_bounds: Option<HistogramBounds>,
|
||||||
|
|
||||||
/// Whether to return the buckets as a hash map
|
/// Whether to return the buckets as a hash map
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub keyed: bool,
|
pub keyed: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DateHistogramAggregationReq {
|
impl DateHistogramAggregationReq {
|
||||||
|
pub(crate) fn to_histogram_req(&self) -> crate::Result<HistogramAggregation> {
|
||||||
|
self.validate()?;
|
||||||
|
Ok(HistogramAggregation {
|
||||||
|
field: self.field.to_string(),
|
||||||
|
interval: parse_into_microseconds(&self.fixed_interval)? as f64,
|
||||||
|
offset: self
|
||||||
|
.offset
|
||||||
|
.as_ref()
|
||||||
|
.map(|offset| parse_offset_into_microseconds(offset))
|
||||||
|
.transpose()?
|
||||||
|
.map(|el| el as f64),
|
||||||
|
min_doc_count: self.min_doc_count,
|
||||||
|
hard_bounds: None,
|
||||||
|
extended_bounds: None,
|
||||||
|
keyed: self.keyed,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
fn validate(&self) -> crate::Result<()> {
|
fn validate(&self) -> crate::Result<()> {
|
||||||
|
if self.interval.is_some() {
|
||||||
|
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||||
|
"`interval` parameter {:?} in date histogram is unsupported, only \
|
||||||
|
`fixed_interval` is supported",
|
||||||
|
self.interval
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
if self.format.is_some() {
|
||||||
|
return Err(crate::TantivyError::InvalidArgument(
|
||||||
|
"format parameter on date_histogram is unsupported".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.date_interval.is_some() {
|
||||||
|
return Err(crate::TantivyError::InvalidArgument(
|
||||||
|
"date_interval in date histogram is unsupported, only `fixed_interval` is \
|
||||||
|
supported"
|
||||||
|
.to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
parse_into_microseconds(&self.fixed_interval)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, Error)]
|
||||||
/// Errors when parsing the fixed interval for `DateHistogramAggregationReq`.
|
/// Errors when parsing the fixed interval for `DateHistogramAggregationReq`.
|
||||||
pub enum DateHistogramParseError {
|
pub enum DateHistogramParseError {
|
||||||
/// Unit not recognized in passed String
|
/// Unit not recognized in passed String
|
||||||
|
#[error("Unit not recognized in passed String {0:?}")]
|
||||||
UnitNotRecognized(String),
|
UnitNotRecognized(String),
|
||||||
/// Number not found in passed String
|
/// Number not found in passed String
|
||||||
|
#[error("Number not found in passed String {0:?}")]
|
||||||
NumberMissing(String),
|
NumberMissing(String),
|
||||||
/// Unit not found in passed String
|
/// Unit not found in passed String
|
||||||
|
#[error("Unit not found in passed String {0:?}")]
|
||||||
UnitMissing(String),
|
UnitMissing(String),
|
||||||
|
/// Offset invalid
|
||||||
|
#[error("passed offset is invalid {0:?}")]
|
||||||
|
InvalidOffset(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_into_milliseconds(input: &str) -> Result<u64, DateHistogramParseError> {
|
fn parse_offset_into_microseconds(input: &str) -> Result<i64, AggregationError> {
|
||||||
|
let is_sign = |byte| &[byte] == b"-" || &[byte] == b"+";
|
||||||
|
if input.is_empty() {
|
||||||
|
return Err(DateHistogramParseError::InvalidOffset(input.to_string()).into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let has_sign = is_sign(input.as_bytes()[0]);
|
||||||
|
if has_sign {
|
||||||
|
let (sign, input) = input.split_at(1);
|
||||||
|
let val = parse_into_microseconds(input)?;
|
||||||
|
if sign == "-" {
|
||||||
|
Ok(-val)
|
||||||
|
} else {
|
||||||
|
Ok(val)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
parse_into_microseconds(input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_into_microseconds(input: &str) -> Result<i64, AggregationError> {
|
||||||
let split_boundary = input
|
let split_boundary = input
|
||||||
.char_indices()
|
.as_bytes()
|
||||||
.take_while(|(pos, el)| el.is_numeric())
|
.iter()
|
||||||
|
.take_while(|byte| byte.is_ascii_digit())
|
||||||
.count();
|
.count();
|
||||||
let (number, unit) = input.split_at(split_boundary);
|
let (number, unit) = input.split_at(split_boundary);
|
||||||
if number.is_empty() {
|
if number.is_empty() {
|
||||||
return Err(DateHistogramParseError::NumberMissing(input.to_string()));
|
return Err(DateHistogramParseError::NumberMissing(input.to_string()).into());
|
||||||
}
|
}
|
||||||
if unit.is_empty() {
|
if unit.is_empty() {
|
||||||
return Err(DateHistogramParseError::UnitMissing(input.to_string()));
|
return Err(DateHistogramParseError::UnitMissing(input.to_string()).into());
|
||||||
}
|
}
|
||||||
let number: u64 = number.parse().unwrap();
|
let number: i64 = number
|
||||||
|
.parse()
|
||||||
|
// Technically this should never happen, but there was a bug
|
||||||
|
// here and being defensive does not hurt.
|
||||||
|
.map_err(|_err| DateHistogramParseError::NumberMissing(input.to_string()))?;
|
||||||
|
|
||||||
let multiplier_from_unit = match unit {
|
let multiplier_from_unit = match unit {
|
||||||
"ms" => 1,
|
"ms" => 1,
|
||||||
"s" => 1000,
|
"s" => 1000,
|
||||||
"m" => 60 * 1000,
|
"m" => 60 * 1000,
|
||||||
"h" => 60 * 60 * 1000,
|
"h" => 60 * 60 * 1000,
|
||||||
"d" => 24 * 60 * 60 * 1000,
|
"d" => 24 * 60 * 60 * 1000,
|
||||||
_ => return Err(DateHistogramParseError::UnitNotRecognized(unit.to_string())),
|
_ => return Err(DateHistogramParseError::UnitNotRecognized(unit.to_string()).into()),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(number * multiplier_from_unit)
|
Ok(number * multiplier_from_unit * 1000)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::aggregation::agg_req::Aggregations;
|
||||||
|
use crate::aggregation::tests::exec_request;
|
||||||
|
use crate::indexer::NoMergePolicy;
|
||||||
|
use crate::schema::{Schema, FAST};
|
||||||
|
use crate::Index;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn parser_test() {
|
fn test_parse_into_microseconds() {
|
||||||
assert_eq!(parse_into_milliseconds("1m").unwrap(), 60_000);
|
assert_eq!(parse_into_microseconds("1m").unwrap(), 60_000_000);
|
||||||
assert_eq!(parse_into_milliseconds("2m").unwrap(), 120_000);
|
assert_eq!(parse_into_microseconds("2m").unwrap(), 120_000_000);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
parse_into_milliseconds("2y").unwrap_err(),
|
parse_into_microseconds("2y").unwrap_err(),
|
||||||
DateHistogramParseError::UnitNotRecognized("y".to_string())
|
DateHistogramParseError::UnitNotRecognized("y".to_string()).into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
parse_into_milliseconds("2000").unwrap_err(),
|
parse_into_microseconds("2000").unwrap_err(),
|
||||||
DateHistogramParseError::UnitMissing("2000".to_string())
|
DateHistogramParseError::UnitMissing("2000".to_string()).into()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
parse_into_milliseconds("ms").unwrap_err(),
|
parse_into_microseconds("ms").unwrap_err(),
|
||||||
DateHistogramParseError::NumberMissing("ms".to_string())
|
DateHistogramParseError::NumberMissing("ms".to_string()).into()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_offset_into_microseconds() {
|
||||||
|
assert_eq!(parse_offset_into_microseconds("1m").unwrap(), 60_000_000);
|
||||||
|
assert_eq!(parse_offset_into_microseconds("+1m").unwrap(), 60_000_000);
|
||||||
|
assert_eq!(parse_offset_into_microseconds("-1m").unwrap(), -60_000_000);
|
||||||
|
assert_eq!(parse_offset_into_microseconds("2m").unwrap(), 120_000_000);
|
||||||
|
assert_eq!(parse_offset_into_microseconds("+2m").unwrap(), 120_000_000);
|
||||||
|
assert_eq!(parse_offset_into_microseconds("-2m").unwrap(), -120_000_000);
|
||||||
|
assert_eq!(parse_offset_into_microseconds("-2ms").unwrap(), -2_000);
|
||||||
|
assert_eq!(
|
||||||
|
parse_offset_into_microseconds("2y").unwrap_err(),
|
||||||
|
DateHistogramParseError::UnitNotRecognized("y".to_string()).into()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
parse_offset_into_microseconds("2000").unwrap_err(),
|
||||||
|
DateHistogramParseError::UnitMissing("2000".to_string()).into()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
parse_offset_into_microseconds("ms").unwrap_err(),
|
||||||
|
DateHistogramParseError::NumberMissing("ms".to_string()).into()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_into_milliseconds_do_not_accept_non_ascii() {
|
||||||
|
assert!(parse_into_microseconds("1m").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_test_index_from_docs(
|
||||||
|
merge_segments: bool,
|
||||||
|
segment_and_docs: &[Vec<&str>],
|
||||||
|
) -> crate::Result<Index> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
schema_builder.add_date_field("date", FAST);
|
||||||
|
schema_builder.add_text_field("text", FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
{
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?;
|
||||||
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
|
for values in segment_and_docs {
|
||||||
|
for doc_str in values {
|
||||||
|
let doc = schema.parse_document(doc_str)?;
|
||||||
|
index_writer.add_document(doc)?;
|
||||||
|
}
|
||||||
|
// writing the segment
|
||||||
|
index_writer.commit()?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if merge_segments {
|
||||||
|
let segment_ids = index
|
||||||
|
.searchable_segment_ids()
|
||||||
|
.expect("Searchable segments failed.");
|
||||||
|
if segment_ids.len() > 1 {
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
|
index_writer.wait_merging_threads()?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn histogram_test_date_force_merge_segments() -> crate::Result<()> {
|
||||||
|
histogram_test_date_merge_segments(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn histogram_test_date() -> crate::Result<()> {
|
||||||
|
histogram_test_date_merge_segments(false)
|
||||||
|
}
|
||||||
|
fn histogram_test_date_merge_segments(merge_segments: bool) -> crate::Result<()> {
|
||||||
|
let docs = vec![
|
||||||
|
vec![r#"{ "date": "2015-01-01T12:10:30Z", "text": "aaa" }"#],
|
||||||
|
vec![r#"{ "date": "2015-01-01T11:11:30Z", "text": "bbb" }"#],
|
||||||
|
vec![r#"{ "date": "2015-01-02T00:00:00Z", "text": "bbb" }"#],
|
||||||
|
vec![r#"{ "date": "2015-01-06T00:00:00Z", "text": "ccc" }"#],
|
||||||
|
];
|
||||||
|
|
||||||
|
let index = get_test_index_from_docs(merge_segments, &docs)?;
|
||||||
|
// 30day + offset
|
||||||
|
let elasticsearch_compatible_json = json!(
|
||||||
|
{
|
||||||
|
"sales_over_time": {
|
||||||
|
"date_histogram": {
|
||||||
|
"field": "date",
|
||||||
|
"fixed_interval": "30d",
|
||||||
|
"offset": "-4d"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
let agg_req: Aggregations =
|
||||||
|
serde_json::from_str(&serde_json::to_string(&elasticsearch_compatible_json).unwrap())
|
||||||
|
.unwrap();
|
||||||
|
let res = exec_request(agg_req, &index)?;
|
||||||
|
let expected_res = json!({
|
||||||
|
"sales_over_time" : {
|
||||||
|
"buckets" : [
|
||||||
|
{
|
||||||
|
"key_as_string" : "2015-01-01T00:00:00Z",
|
||||||
|
"key" : 1420070400000000.0,
|
||||||
|
"doc_count" : 4
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
});
|
||||||
|
assert_eq!(res, expected_res);
|
||||||
|
|
||||||
|
// 30day + offset + sub_agg
|
||||||
|
let elasticsearch_compatible_json = json!(
|
||||||
|
{
|
||||||
|
"sales_over_time": {
|
||||||
|
"date_histogram": {
|
||||||
|
"field": "date",
|
||||||
|
"fixed_interval": "30d",
|
||||||
|
"offset": "-4d"
|
||||||
|
},
|
||||||
|
"aggs": {
|
||||||
|
"texts": {
|
||||||
|
"terms": {"field": "text"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
let agg_req: Aggregations =
|
||||||
|
serde_json::from_str(&serde_json::to_string(&elasticsearch_compatible_json).unwrap())
|
||||||
|
.unwrap();
|
||||||
|
let res = exec_request(agg_req, &index)?;
|
||||||
|
println!("{}", serde_json::to_string_pretty(&res).unwrap());
|
||||||
|
let expected_res = json!({
|
||||||
|
"sales_over_time" : {
|
||||||
|
"buckets" : [
|
||||||
|
{
|
||||||
|
"key_as_string" : "2015-01-01T00:00:00Z",
|
||||||
|
"key" : 1420070400000000.0,
|
||||||
|
"doc_count" : 4,
|
||||||
|
"texts": {
|
||||||
|
"buckets": [
|
||||||
|
{
|
||||||
|
"doc_count": 2,
|
||||||
|
"key": "bbb"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"doc_count": 1,
|
||||||
|
"key": "ccc"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"doc_count": 1,
|
||||||
|
"key": "aaa"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"doc_count_error_upper_bound": 0,
|
||||||
|
"sum_other_doc_count": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
});
|
||||||
|
assert_eq!(res, expected_res);
|
||||||
|
|
||||||
|
// 1day
|
||||||
|
let elasticsearch_compatible_json = json!(
|
||||||
|
{
|
||||||
|
"sales_over_time": {
|
||||||
|
"date_histogram": {
|
||||||
|
"field": "date",
|
||||||
|
"fixed_interval": "1d"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
let agg_req: Aggregations =
|
||||||
|
serde_json::from_str(&serde_json::to_string(&elasticsearch_compatible_json).unwrap())
|
||||||
|
.unwrap();
|
||||||
|
let res = exec_request(agg_req, &index)?;
|
||||||
|
let expected_res = json!( {
|
||||||
|
"sales_over_time": {
|
||||||
|
"buckets": [
|
||||||
|
{
|
||||||
|
"doc_count": 2,
|
||||||
|
"key": 1420070400000000.0,
|
||||||
|
"key_as_string": "2015-01-01T00:00:00Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"doc_count": 1,
|
||||||
|
"key": 1420156800000000.0,
|
||||||
|
"key_as_string": "2015-01-02T00:00:00Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"doc_count": 0,
|
||||||
|
"key": 1420243200000000.0,
|
||||||
|
"key_as_string": "2015-01-03T00:00:00Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"doc_count": 0,
|
||||||
|
"key": 1420329600000000.0,
|
||||||
|
"key_as_string": "2015-01-04T00:00:00Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"doc_count": 0,
|
||||||
|
"key": 1420416000000000.0,
|
||||||
|
"key_as_string": "2015-01-05T00:00:00Z"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"doc_count": 1,
|
||||||
|
"key": 1420502400000000.0,
|
||||||
|
"key_as_string": "2015-01-06T00:00:00Z"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
});
|
||||||
|
assert_eq!(res, expected_res);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
|
|
||||||
use columnar::Column;
|
use columnar::ColumnType;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
use rustc_hash::FxHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tantivy_bitpacker::minmax;
|
||||||
|
|
||||||
use crate::aggregation::agg_req::AggregationsInternal;
|
use crate::aggregation::agg_req::AggregationsInternal;
|
||||||
use crate::aggregation::agg_req_with_accessor::{
|
use crate::aggregation::agg_req_with_accessor::{
|
||||||
@@ -14,10 +16,9 @@ use crate::aggregation::intermediate_agg_result::{
|
|||||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||||
};
|
};
|
||||||
use crate::aggregation::segment_agg_result::{
|
use crate::aggregation::segment_agg_result::{
|
||||||
GenericSegmentAggregationResultsCollector, SegmentAggregationCollector,
|
build_segment_agg_collector, SegmentAggregationCollector,
|
||||||
};
|
};
|
||||||
use crate::aggregation::{f64_from_fastfield_u64, format_date};
|
use crate::aggregation::{f64_from_fastfield_u64, format_date, VecWithNames};
|
||||||
use crate::schema::{Schema, Type};
|
|
||||||
use crate::{DocId, TantivyError};
|
use crate::{DocId, TantivyError};
|
||||||
|
|
||||||
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
||||||
@@ -176,7 +177,7 @@ impl HistogramBounds {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Default, Clone, Debug, PartialEq)]
|
||||||
pub(crate) struct SegmentHistogramBucketEntry {
|
pub(crate) struct SegmentHistogramBucketEntry {
|
||||||
pub key: f64,
|
pub key: f64,
|
||||||
pub doc_count: u64,
|
pub doc_count: u64,
|
||||||
@@ -185,7 +186,7 @@ pub(crate) struct SegmentHistogramBucketEntry {
|
|||||||
impl SegmentHistogramBucketEntry {
|
impl SegmentHistogramBucketEntry {
|
||||||
pub(crate) fn into_intermediate_bucket_entry(
|
pub(crate) fn into_intermediate_bucket_entry(
|
||||||
self,
|
self,
|
||||||
sub_aggregation: GenericSegmentAggregationResultsCollector,
|
sub_aggregation: Box<dyn SegmentAggregationCollector>,
|
||||||
agg_with_accessor: &AggregationsWithAccessor,
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
) -> crate::Result<IntermediateHistogramBucketEntry> {
|
) -> crate::Result<IntermediateHistogramBucketEntry> {
|
||||||
Ok(IntermediateHistogramBucketEntry {
|
Ok(IntermediateHistogramBucketEntry {
|
||||||
@@ -202,14 +203,85 @@ impl SegmentHistogramBucketEntry {
|
|||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct SegmentHistogramCollector {
|
pub struct SegmentHistogramCollector {
|
||||||
/// The buckets containing the aggregation data.
|
/// The buckets containing the aggregation data.
|
||||||
buckets: Vec<SegmentHistogramBucketEntry>,
|
buckets: FxHashMap<i64, SegmentHistogramBucketEntry>,
|
||||||
sub_aggregations: Option<Vec<GenericSegmentAggregationResultsCollector>>,
|
sub_aggregations: FxHashMap<i64, Box<dyn SegmentAggregationCollector>>,
|
||||||
field_type: Type,
|
sub_aggregation_blueprint: Option<Box<dyn SegmentAggregationCollector>>,
|
||||||
|
column_type: ColumnType,
|
||||||
interval: f64,
|
interval: f64,
|
||||||
offset: f64,
|
offset: f64,
|
||||||
min_doc_count: u64,
|
|
||||||
first_bucket_num: i64,
|
|
||||||
bounds: HistogramBounds,
|
bounds: HistogramBounds,
|
||||||
|
accessor_idx: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentAggregationCollector for SegmentHistogramCollector {
|
||||||
|
fn into_intermediate_aggregations_result(
|
||||||
|
self: Box<Self>,
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<IntermediateAggregationResults> {
|
||||||
|
let name = agg_with_accessor.buckets.keys[self.accessor_idx].to_string();
|
||||||
|
let agg_with_accessor = &agg_with_accessor.buckets.values[self.accessor_idx];
|
||||||
|
|
||||||
|
let bucket = self.into_intermediate_bucket_result(agg_with_accessor)?;
|
||||||
|
let buckets = Some(VecWithNames::from_entries(vec![(name, bucket)]));
|
||||||
|
|
||||||
|
Ok(IntermediateAggregationResults {
|
||||||
|
metrics: None,
|
||||||
|
buckets,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect(
|
||||||
|
&mut self,
|
||||||
|
doc: crate::DocId,
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
self.collect_block(&[doc], agg_with_accessor)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect_block(
|
||||||
|
&mut self,
|
||||||
|
docs: &[crate::DocId],
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let accessor = &agg_with_accessor.buckets.values[self.accessor_idx].accessor;
|
||||||
|
let sub_aggregation_accessor =
|
||||||
|
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||||
|
|
||||||
|
let bounds = self.bounds;
|
||||||
|
let interval = self.interval;
|
||||||
|
let offset = self.offset;
|
||||||
|
let get_bucket_pos = |val| (get_bucket_pos_f64(val, interval, offset) as i64);
|
||||||
|
|
||||||
|
for doc in docs {
|
||||||
|
for val in accessor.values_for_doc(*doc) {
|
||||||
|
let val = self.f64_from_fastfield_u64(val);
|
||||||
|
|
||||||
|
let bucket_pos = get_bucket_pos(val);
|
||||||
|
|
||||||
|
if bounds.contains(val) {
|
||||||
|
self.increment_bucket(
|
||||||
|
bucket_pos,
|
||||||
|
*doc,
|
||||||
|
sub_aggregation_accessor,
|
||||||
|
interval,
|
||||||
|
offset,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||||
|
let sub_aggregation_accessor =
|
||||||
|
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||||
|
|
||||||
|
for sub_aggregation in self.sub_aggregations.values_mut() {
|
||||||
|
sub_aggregation.flush(sub_aggregation_accessor)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentHistogramCollector {
|
impl SegmentHistogramCollector {
|
||||||
@@ -217,210 +289,96 @@ impl SegmentHistogramCollector {
|
|||||||
self,
|
self,
|
||||||
agg_with_accessor: &BucketAggregationWithAccessor,
|
agg_with_accessor: &BucketAggregationWithAccessor,
|
||||||
) -> crate::Result<IntermediateBucketResult> {
|
) -> crate::Result<IntermediateBucketResult> {
|
||||||
// Compute the number of buckets to validate against max num buckets
|
let mut buckets = Vec::with_capacity(self.buckets.len());
|
||||||
// Note: We use min_doc_count here, but it's only an lowerbound here, since were are on the
|
|
||||||
// intermediate level and after merging the number of documents of a bucket could exceed
|
|
||||||
// `min_doc_count`.
|
|
||||||
{
|
|
||||||
let cut_off_buckets_front = self
|
|
||||||
.buckets
|
|
||||||
.iter()
|
|
||||||
.take_while(|bucket| bucket.doc_count <= self.min_doc_count)
|
|
||||||
.count();
|
|
||||||
let cut_off_buckets_back = self.buckets[cut_off_buckets_front..]
|
|
||||||
.iter()
|
|
||||||
.rev()
|
|
||||||
.take_while(|bucket| bucket.doc_count <= self.min_doc_count)
|
|
||||||
.count();
|
|
||||||
let estimate_num_buckets =
|
|
||||||
self.buckets.len() - cut_off_buckets_front - cut_off_buckets_back;
|
|
||||||
|
|
||||||
agg_with_accessor
|
if self.sub_aggregation_blueprint.is_some() {
|
||||||
.bucket_count
|
for (bucket_pos, bucket) in self.buckets.into_iter() {
|
||||||
.add_count(estimate_num_buckets as u32);
|
let bucket_res = bucket.into_intermediate_bucket_entry(
|
||||||
agg_with_accessor.bucket_count.validate_bucket_count()?;
|
self.sub_aggregations.get(&bucket_pos).unwrap().clone(),
|
||||||
}
|
&agg_with_accessor.sub_aggregation,
|
||||||
|
);
|
||||||
|
|
||||||
let mut buckets = Vec::with_capacity(
|
|
||||||
self.buckets
|
|
||||||
.iter()
|
|
||||||
.filter(|bucket| bucket.doc_count != 0)
|
|
||||||
.count(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Below we remove empty buckets for two reasons
|
|
||||||
// 1. To reduce the size of the intermediate result, which may be passed on the wire.
|
|
||||||
// 2. To mimic elasticsearch, there are no empty buckets at the start and end.
|
|
||||||
//
|
|
||||||
// Empty buckets may be added later again in the final result, depending on the request.
|
|
||||||
if let Some(sub_aggregations) = self.sub_aggregations {
|
|
||||||
for bucket_res in self
|
|
||||||
.buckets
|
|
||||||
.into_iter()
|
|
||||||
.zip(sub_aggregations.into_iter())
|
|
||||||
.filter(|(bucket, _sub_aggregation)| bucket.doc_count != 0)
|
|
||||||
.map(|(bucket, sub_aggregation)| {
|
|
||||||
bucket.into_intermediate_bucket_entry(
|
|
||||||
sub_aggregation,
|
|
||||||
&agg_with_accessor.sub_aggregation,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
{
|
|
||||||
buckets.push(bucket_res?);
|
buckets.push(bucket_res?);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
buckets.extend(
|
buckets.extend(self.buckets.into_values().map(|bucket| bucket.into()));
|
||||||
self.buckets
|
|
||||||
.into_iter()
|
|
||||||
.filter(|bucket| bucket.doc_count != 0)
|
|
||||||
.map(|bucket| bucket.into()),
|
|
||||||
);
|
|
||||||
};
|
};
|
||||||
|
buckets.sort_unstable_by(|b1, b2| b1.key.partial_cmp(&b2.key).unwrap_or(Ordering::Equal));
|
||||||
|
|
||||||
Ok(IntermediateBucketResult::Histogram { buckets })
|
Ok(IntermediateBucketResult::Histogram {
|
||||||
|
buckets,
|
||||||
|
column_type: Some(self.column_type),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn from_req_and_validate(
|
pub(crate) fn from_req_and_validate(
|
||||||
req: &HistogramAggregation,
|
req: &HistogramAggregation,
|
||||||
sub_aggregation: &AggregationsWithAccessor,
|
sub_aggregation: &AggregationsWithAccessor,
|
||||||
field_type: Type,
|
field_type: ColumnType,
|
||||||
accessor: &Column<u64>,
|
accessor_idx: usize,
|
||||||
) -> crate::Result<Self> {
|
) -> crate::Result<Self> {
|
||||||
req.validate()?;
|
req.validate()?;
|
||||||
let min = f64_from_fastfield_u64(accessor.min_value(), &field_type);
|
|
||||||
let max = f64_from_fastfield_u64(accessor.max_value(), &field_type);
|
|
||||||
|
|
||||||
let (min, max) = get_req_min_max(req, Some((min, max)));
|
let sub_aggregation_blueprint = if sub_aggregation.is_empty() {
|
||||||
|
|
||||||
// We compute and generate the buckets range (min, max) based on the request and the min
|
|
||||||
// max in the fast field, but this is likely not ideal when this is a subbucket, where many
|
|
||||||
// unnecessary buckets may be generated.
|
|
||||||
let buckets = generate_buckets(req, min, max);
|
|
||||||
|
|
||||||
let sub_aggregations = if sub_aggregation.is_empty() {
|
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
let sub_aggregation =
|
let sub_aggregation = build_segment_agg_collector(sub_aggregation)?;
|
||||||
GenericSegmentAggregationResultsCollector::from_req_and_validate(sub_aggregation)?;
|
Some(sub_aggregation)
|
||||||
Some(buckets.iter().map(|_| sub_aggregation.clone()).collect())
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let buckets = buckets
|
|
||||||
.iter()
|
|
||||||
.map(|bucket| SegmentHistogramBucketEntry {
|
|
||||||
key: *bucket,
|
|
||||||
doc_count: 0,
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let first_bucket_num =
|
|
||||||
get_bucket_num_f64(min, req.interval, req.offset.unwrap_or(0.0)) as i64;
|
|
||||||
|
|
||||||
let bounds = req.hard_bounds.unwrap_or(HistogramBounds {
|
let bounds = req.hard_bounds.unwrap_or(HistogramBounds {
|
||||||
min: f64::MIN,
|
min: f64::MIN,
|
||||||
max: f64::MAX,
|
max: f64::MAX,
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
buckets,
|
buckets: Default::default(),
|
||||||
field_type,
|
column_type: field_type,
|
||||||
interval: req.interval,
|
interval: req.interval,
|
||||||
offset: req.offset.unwrap_or(0.0),
|
offset: req.offset.unwrap_or(0.0),
|
||||||
first_bucket_num,
|
|
||||||
bounds,
|
bounds,
|
||||||
sub_aggregations,
|
sub_aggregations: Default::default(),
|
||||||
min_doc_count: req.min_doc_count(),
|
sub_aggregation_blueprint,
|
||||||
|
accessor_idx,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub(crate) fn collect_block(
|
|
||||||
&mut self,
|
|
||||||
docs: &[DocId],
|
|
||||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
|
||||||
force_flush: bool,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let bounds = self.bounds;
|
|
||||||
let interval = self.interval;
|
|
||||||
let offset = self.offset;
|
|
||||||
let first_bucket_num = self.first_bucket_num;
|
|
||||||
let get_bucket_num =
|
|
||||||
|val| (get_bucket_num_f64(val, interval, offset) as i64 - first_bucket_num) as usize;
|
|
||||||
|
|
||||||
let accessor = &bucket_with_accessor.accessor;
|
|
||||||
for doc in docs {
|
|
||||||
for val in accessor.values(*doc) {
|
|
||||||
let val = self.f64_from_fastfield_u64(val);
|
|
||||||
|
|
||||||
let bucket_pos = get_bucket_num(val);
|
|
||||||
self.increment_bucket_if_in_bounds(
|
|
||||||
val,
|
|
||||||
&bounds,
|
|
||||||
bucket_pos,
|
|
||||||
*doc,
|
|
||||||
&bucket_with_accessor.sub_aggregation,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if force_flush {
|
|
||||||
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {
|
|
||||||
for sub_aggregation in sub_aggregations {
|
|
||||||
sub_aggregation
|
|
||||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn increment_bucket_if_in_bounds(
|
|
||||||
&mut self,
|
|
||||||
val: f64,
|
|
||||||
bounds: &HistogramBounds,
|
|
||||||
bucket_pos: usize,
|
|
||||||
doc: DocId,
|
|
||||||
bucket_with_accessor: &AggregationsWithAccessor,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
if bounds.contains(val) {
|
|
||||||
debug_assert_eq!(
|
|
||||||
self.buckets[bucket_pos].key,
|
|
||||||
get_bucket_val(val, self.interval, self.offset)
|
|
||||||
);
|
|
||||||
|
|
||||||
self.increment_bucket(bucket_pos, doc, bucket_with_accessor)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn increment_bucket(
|
fn increment_bucket(
|
||||||
&mut self,
|
&mut self,
|
||||||
bucket_pos: usize,
|
bucket_pos: i64,
|
||||||
doc: DocId,
|
doc: DocId,
|
||||||
bucket_with_accessor: &AggregationsWithAccessor,
|
bucket_with_accessor: &AggregationsWithAccessor,
|
||||||
|
interval: f64,
|
||||||
|
offset: f64,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let bucket = &mut self.buckets[bucket_pos];
|
let bucket = self.buckets.entry(bucket_pos).or_insert_with(|| {
|
||||||
|
let key = get_bucket_key_from_pos(bucket_pos as f64, interval, offset);
|
||||||
|
SegmentHistogramBucketEntry { key, doc_count: 0 }
|
||||||
|
});
|
||||||
bucket.doc_count += 1;
|
bucket.doc_count += 1;
|
||||||
if let Some(sub_aggregation) = self.sub_aggregations.as_mut() {
|
if let Some(sub_aggregation_blueprint) = self.sub_aggregation_blueprint.as_mut() {
|
||||||
sub_aggregation[bucket_pos].collect(doc, bucket_with_accessor)?;
|
self.sub_aggregations
|
||||||
|
.entry(bucket_pos)
|
||||||
|
.or_insert_with(|| sub_aggregation_blueprint.clone())
|
||||||
|
.collect(doc, bucket_with_accessor)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
fn f64_from_fastfield_u64(&self, val: u64) -> f64 {
|
fn f64_from_fastfield_u64(&self, val: u64) -> f64 {
|
||||||
f64_from_fastfield_u64(val, &self.field_type)
|
f64_from_fastfield_u64(val, &self.column_type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_bucket_num_f64(val: f64, interval: f64, offset: f64) -> f64 {
|
fn get_bucket_pos_f64(val: f64, interval: f64, offset: f64) -> f64 {
|
||||||
((val - offset) / interval).floor()
|
((val - offset) / interval).floor()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_bucket_val(val: f64, interval: f64, offset: f64) -> f64 {
|
fn get_bucket_key_from_pos(bucket_pos: f64, interval: f64, offset: f64) -> f64 {
|
||||||
let bucket_pos = get_bucket_num_f64(val, interval, offset);
|
|
||||||
bucket_pos * interval + offset
|
bucket_pos * interval + offset
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -429,19 +387,14 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
|||||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||||
histogram_req: &HistogramAggregation,
|
histogram_req: &HistogramAggregation,
|
||||||
sub_aggregation: &AggregationsInternal,
|
sub_aggregation: &AggregationsInternal,
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<Vec<BucketEntry>> {
|
) -> crate::Result<Vec<BucketEntry>> {
|
||||||
// Generate the full list of buckets without gaps.
|
// Generate the full list of buckets without gaps.
|
||||||
//
|
//
|
||||||
// The bounds are the min max from the current buckets, optionally extended by
|
// The bounds are the min max from the current buckets, optionally extended by
|
||||||
// extended_bounds from the request
|
// extended_bounds from the request
|
||||||
let min_max = if buckets.is_empty() {
|
let min_max = minmax(buckets.iter().map(|bucket| bucket.key));
|
||||||
None
|
|
||||||
} else {
|
// TODO add memory check
|
||||||
let min = buckets[0].key;
|
|
||||||
let max = buckets[buckets.len() - 1].key;
|
|
||||||
Some((min, max))
|
|
||||||
};
|
|
||||||
let fill_gaps_buckets = generate_buckets_with_opt_minmax(histogram_req, min_max);
|
let fill_gaps_buckets = generate_buckets_with_opt_minmax(histogram_req, min_max);
|
||||||
|
|
||||||
let empty_sub_aggregation = IntermediateAggregationResults::empty_from_req(sub_aggregation);
|
let empty_sub_aggregation = IntermediateAggregationResults::empty_from_req(sub_aggregation);
|
||||||
@@ -470,43 +423,33 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
|||||||
sub_aggregation: empty_sub_aggregation.clone(),
|
sub_aggregation: empty_sub_aggregation.clone(),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
.map(|intermediate_bucket| {
|
.map(|intermediate_bucket| intermediate_bucket.into_final_bucket_entry(sub_aggregation))
|
||||||
intermediate_bucket.into_final_bucket_entry(sub_aggregation, schema)
|
|
||||||
})
|
|
||||||
.collect::<crate::Result<Vec<_>>>()
|
.collect::<crate::Result<Vec<_>>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert to BucketEntry
|
// Convert to BucketEntry
|
||||||
pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
||||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||||
|
column_type: Option<ColumnType>,
|
||||||
histogram_req: &HistogramAggregation,
|
histogram_req: &HistogramAggregation,
|
||||||
sub_aggregation: &AggregationsInternal,
|
sub_aggregation: &AggregationsInternal,
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<Vec<BucketEntry>> {
|
) -> crate::Result<Vec<BucketEntry>> {
|
||||||
let mut buckets = if histogram_req.min_doc_count() == 0 {
|
let mut buckets = if histogram_req.min_doc_count() == 0 {
|
||||||
// With min_doc_count != 0, we may need to add buckets, so that there are no
|
// With min_doc_count != 0, we may need to add buckets, so that there are no
|
||||||
// gaps, since intermediate result does not contain empty buckets (filtered to
|
// gaps, since intermediate result does not contain empty buckets (filtered to
|
||||||
// reduce serialization size).
|
// reduce serialization size).
|
||||||
|
|
||||||
intermediate_buckets_to_final_buckets_fill_gaps(
|
intermediate_buckets_to_final_buckets_fill_gaps(buckets, histogram_req, sub_aggregation)?
|
||||||
buckets,
|
|
||||||
histogram_req,
|
|
||||||
sub_aggregation,
|
|
||||||
schema,
|
|
||||||
)?
|
|
||||||
} else {
|
} else {
|
||||||
buckets
|
buckets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count())
|
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count())
|
||||||
.map(|histogram_bucket| {
|
.map(|histogram_bucket| histogram_bucket.into_final_bucket_entry(sub_aggregation))
|
||||||
histogram_bucket.into_final_bucket_entry(sub_aggregation, schema)
|
|
||||||
})
|
|
||||||
.collect::<crate::Result<Vec<_>>>()?
|
.collect::<crate::Result<Vec<_>>>()?
|
||||||
};
|
};
|
||||||
|
|
||||||
// If we have a date type on the histogram buckets, we add the `key_as_string` field as rfc339
|
// If we have a date type on the histogram buckets, we add the `key_as_string` field as rfc339
|
||||||
let field = schema.get_field(&histogram_req.field)?;
|
if column_type == Some(ColumnType::DateTime) {
|
||||||
if schema.get_field_entry(field).field_type().is_date() {
|
|
||||||
for bucket in buckets.iter_mut() {
|
for bucket in buckets.iter_mut() {
|
||||||
if let crate::aggregation::Key::F64(val) = bucket.key {
|
if let crate::aggregation::Key::F64(val) = bucket.key {
|
||||||
let key_as_string = format_date(val as i64)?;
|
let key_as_string = format_date(val as i64)?;
|
||||||
@@ -537,12 +480,6 @@ fn get_req_min_max(req: &HistogramAggregation, min_max: Option<(f64, f64)>) -> (
|
|||||||
(min, max)
|
(min, max)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generates buckets with req.interval
|
|
||||||
/// range is computed for provided min_max and request extended_bounds/hard_bounds
|
|
||||||
pub(crate) fn generate_buckets(req: &HistogramAggregation, min: f64, max: f64) -> Vec<f64> {
|
|
||||||
generate_buckets_with_opt_minmax(req, Some((min, max)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generates buckets with req.interval
|
/// Generates buckets with req.interval
|
||||||
/// Range is computed for provided min_max and request extended_bounds/hard_bounds
|
/// Range is computed for provided min_max and request extended_bounds/hard_bounds
|
||||||
/// returns empty vec when there is no range to span
|
/// returns empty vec when there is no range to span
|
||||||
@@ -553,8 +490,8 @@ pub(crate) fn generate_buckets_with_opt_minmax(
|
|||||||
let (min, max) = get_req_min_max(req, min_max);
|
let (min, max) = get_req_min_max(req, min_max);
|
||||||
|
|
||||||
let offset = req.offset.unwrap_or(0.0);
|
let offset = req.offset.unwrap_or(0.0);
|
||||||
let first_bucket_num = get_bucket_num_f64(min, req.interval, offset) as i64;
|
let first_bucket_num = get_bucket_pos_f64(min, req.interval, offset) as i64;
|
||||||
let last_bucket_num = get_bucket_num_f64(max, req.interval, offset) as i64;
|
let last_bucket_num = get_bucket_pos_f64(max, req.interval, offset) as i64;
|
||||||
let mut buckets = Vec::with_capacity((first_bucket_num..=last_bucket_num).count());
|
let mut buckets = Vec::with_capacity((first_bucket_num..=last_bucket_num).count());
|
||||||
for bucket_pos in first_bucket_num..=last_bucket_num {
|
for bucket_pos in first_bucket_num..=last_bucket_num {
|
||||||
let bucket_key = bucket_pos as f64 * req.interval + offset;
|
let bucket_key = bucket_pos as f64 * req.interval + offset;
|
||||||
@@ -564,118 +501,6 @@ pub(crate) fn generate_buckets_with_opt_minmax(
|
|||||||
buckets
|
buckets
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn generate_buckets_test() {
|
|
||||||
let histogram_req = HistogramAggregation {
|
|
||||||
field: "dummy".to_string(),
|
|
||||||
interval: 2.0,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let buckets = generate_buckets(&histogram_req, 0.0, 10.0);
|
|
||||||
assert_eq!(buckets, vec![0.0, 2.0, 4.0, 6.0, 8.0, 10.0]);
|
|
||||||
|
|
||||||
let buckets = generate_buckets(&histogram_req, 2.5, 5.5);
|
|
||||||
assert_eq!(buckets, vec![2.0, 4.0]);
|
|
||||||
|
|
||||||
// Single bucket
|
|
||||||
let buckets = generate_buckets(&histogram_req, 0.5, 0.75);
|
|
||||||
assert_eq!(buckets, vec![0.0]);
|
|
||||||
|
|
||||||
// With offset
|
|
||||||
let histogram_req = HistogramAggregation {
|
|
||||||
field: "dummy".to_string(),
|
|
||||||
interval: 2.0,
|
|
||||||
offset: Some(0.5),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let buckets = generate_buckets(&histogram_req, 0.0, 10.0);
|
|
||||||
assert_eq!(buckets, vec![-1.5, 0.5, 2.5, 4.5, 6.5, 8.5]);
|
|
||||||
|
|
||||||
let buckets = generate_buckets(&histogram_req, 2.5, 5.5);
|
|
||||||
assert_eq!(buckets, vec![2.5, 4.5]);
|
|
||||||
|
|
||||||
// Single bucket
|
|
||||||
let buckets = generate_buckets(&histogram_req, 0.5, 0.75);
|
|
||||||
assert_eq!(buckets, vec![0.5]);
|
|
||||||
|
|
||||||
// no bucket
|
|
||||||
let buckets = generate_buckets(&histogram_req, f64::MAX, f64::MIN);
|
|
||||||
assert_eq!(buckets, vec![] as Vec<f64>);
|
|
||||||
|
|
||||||
// With extended_bounds
|
|
||||||
let histogram_req = HistogramAggregation {
|
|
||||||
field: "dummy".to_string(),
|
|
||||||
interval: 2.0,
|
|
||||||
extended_bounds: Some(HistogramBounds {
|
|
||||||
min: 0.0,
|
|
||||||
max: 10.0,
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let buckets = generate_buckets(&histogram_req, 0.0, 10.0);
|
|
||||||
assert_eq!(buckets, vec![0.0, 2.0, 4.0, 6.0, 8.0, 10.0]);
|
|
||||||
|
|
||||||
let buckets = generate_buckets(&histogram_req, 2.5, 5.5);
|
|
||||||
assert_eq!(buckets, vec![0.0, 2.0, 4.0, 6.0, 8.0, 10.0]);
|
|
||||||
|
|
||||||
// Single bucket, but extended_bounds
|
|
||||||
let buckets = generate_buckets(&histogram_req, 0.5, 0.75);
|
|
||||||
assert_eq!(buckets, vec![0.0, 2.0, 4.0, 6.0, 8.0, 10.0]);
|
|
||||||
|
|
||||||
// no bucket, but extended_bounds
|
|
||||||
let buckets = generate_buckets(&histogram_req, f64::MAX, f64::MIN);
|
|
||||||
assert_eq!(buckets, vec![0.0, 2.0, 4.0, 6.0, 8.0, 10.0]);
|
|
||||||
|
|
||||||
// With invalid extended_bounds
|
|
||||||
let histogram_req = HistogramAggregation {
|
|
||||||
field: "dummy".to_string(),
|
|
||||||
interval: 2.0,
|
|
||||||
extended_bounds: Some(HistogramBounds { min: 3.0, max: 5.0 }),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let buckets = generate_buckets(&histogram_req, 0.0, 10.0);
|
|
||||||
assert_eq!(buckets, vec![0.0, 2.0, 4.0, 6.0, 8.0, 10.0]);
|
|
||||||
|
|
||||||
// With hard_bounds reducing
|
|
||||||
let histogram_req = HistogramAggregation {
|
|
||||||
field: "dummy".to_string(),
|
|
||||||
interval: 2.0,
|
|
||||||
hard_bounds: Some(HistogramBounds { min: 3.0, max: 5.0 }),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let buckets = generate_buckets(&histogram_req, 0.0, 10.0);
|
|
||||||
assert_eq!(buckets, vec![2.0, 4.0]);
|
|
||||||
|
|
||||||
// With hard_bounds, extending has no effect
|
|
||||||
let histogram_req = HistogramAggregation {
|
|
||||||
field: "dummy".to_string(),
|
|
||||||
interval: 2.0,
|
|
||||||
hard_bounds: Some(HistogramBounds {
|
|
||||||
min: 0.0,
|
|
||||||
max: 10.0,
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let buckets = generate_buckets(&histogram_req, 2.5, 5.5);
|
|
||||||
assert_eq!(buckets, vec![2.0, 4.0]);
|
|
||||||
|
|
||||||
// Blubber
|
|
||||||
let histogram_req = HistogramAggregation {
|
|
||||||
field: "dummy".to_string(),
|
|
||||||
interval: 2.0,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let buckets = generate_buckets(&histogram_req, 4.0, 10.0);
|
|
||||||
assert_eq!(buckets, vec![4.0, 6.0, 8.0, 10.0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
@@ -1496,36 +1321,4 @@ mod tests {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn histogram_test_max_buckets_segments() -> crate::Result<()> {
|
|
||||||
let values = vec![0.0, 70000.0];
|
|
||||||
|
|
||||||
let index = get_test_index_from_values(true, &values)?;
|
|
||||||
|
|
||||||
let agg_req: Aggregations = vec![(
|
|
||||||
"my_interval".to_string(),
|
|
||||||
Aggregation::Bucket(BucketAggregation {
|
|
||||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
|
||||||
field: "score_f64".to_string(),
|
|
||||||
interval: 1.0,
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
sub_aggregation: Default::default(),
|
|
||||||
}),
|
|
||||||
)]
|
|
||||||
.into_iter()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let res = exec_request(agg_req, &index);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
res.unwrap_err().to_string(),
|
|
||||||
"An invalid argument was passed: 'Aborting aggregation because too many buckets were \
|
|
||||||
created'"
|
|
||||||
.to_string()
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,28 +21,25 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
|||||||
pub use term_agg::*;
|
pub use term_agg::*;
|
||||||
|
|
||||||
/// Order for buckets in a bucket aggregation.
|
/// Order for buckets in a bucket aggregation.
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize, Default)]
|
||||||
pub enum Order {
|
pub enum Order {
|
||||||
/// Asc order
|
/// Asc order
|
||||||
#[serde(rename = "asc")]
|
#[serde(rename = "asc")]
|
||||||
Asc,
|
Asc,
|
||||||
/// Desc order
|
/// Desc order
|
||||||
#[serde(rename = "desc")]
|
#[serde(rename = "desc")]
|
||||||
|
#[default]
|
||||||
Desc,
|
Desc,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Order {
|
|
||||||
fn default() -> Self {
|
|
||||||
Order::Desc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
/// Order property by which to apply the order
|
/// Order property by which to apply the order
|
||||||
|
#[derive(Default)]
|
||||||
pub enum OrderTarget {
|
pub enum OrderTarget {
|
||||||
/// The key of the bucket
|
/// The key of the bucket
|
||||||
Key,
|
Key,
|
||||||
/// The doc count of the bucket
|
/// The doc count of the bucket
|
||||||
|
#[default]
|
||||||
Count,
|
Count,
|
||||||
/// Order by value of the sub aggregation metric with identified by given `String`.
|
/// Order by value of the sub aggregation metric with identified by given `String`.
|
||||||
///
|
///
|
||||||
@@ -50,11 +47,6 @@ pub enum OrderTarget {
|
|||||||
SubAggregation(String),
|
SubAggregation(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for OrderTarget {
|
|
||||||
fn default() -> Self {
|
|
||||||
OrderTarget::Count
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<&str> for OrderTarget {
|
impl From<&str> for OrderTarget {
|
||||||
fn from(val: &str) -> Self {
|
fn from(val: &str) -> Self {
|
||||||
match val {
|
match val {
|
||||||
|
|||||||
@@ -1,24 +1,22 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
use columnar::MonotonicallyMappableToU64;
|
use columnar::{ColumnType, MonotonicallyMappableToU64};
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::aggregation::agg_req_with_accessor::{
|
use crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor;
|
||||||
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
|
||||||
};
|
|
||||||
use crate::aggregation::intermediate_agg_result::{
|
use crate::aggregation::intermediate_agg_result::{
|
||||||
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
IntermediateAggregationResults, IntermediateBucketResult, IntermediateRangeBucketEntry,
|
||||||
|
IntermediateRangeBucketResult,
|
||||||
};
|
};
|
||||||
use crate::aggregation::segment_agg_result::{
|
use crate::aggregation::segment_agg_result::{
|
||||||
BucketCount, GenericSegmentAggregationResultsCollector, SegmentAggregationCollector,
|
build_segment_agg_collector, BucketCount, SegmentAggregationCollector,
|
||||||
};
|
};
|
||||||
use crate::aggregation::{
|
use crate::aggregation::{
|
||||||
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
|
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey, VecWithNames,
|
||||||
};
|
};
|
||||||
use crate::schema::Type;
|
use crate::TantivyError;
|
||||||
use crate::{DocId, TantivyError};
|
|
||||||
|
|
||||||
/// Provide user-defined buckets to aggregate on.
|
/// Provide user-defined buckets to aggregate on.
|
||||||
/// Two special buckets will automatically be created to cover the whole range of values.
|
/// Two special buckets will automatically be created to cover the whole range of values.
|
||||||
@@ -128,14 +126,15 @@ pub(crate) struct SegmentRangeAndBucketEntry {
|
|||||||
pub struct SegmentRangeCollector {
|
pub struct SegmentRangeCollector {
|
||||||
/// The buckets containing the aggregation data.
|
/// The buckets containing the aggregation data.
|
||||||
buckets: Vec<SegmentRangeAndBucketEntry>,
|
buckets: Vec<SegmentRangeAndBucketEntry>,
|
||||||
field_type: Type,
|
column_type: ColumnType,
|
||||||
|
pub(crate) accessor_idx: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub(crate) struct SegmentRangeBucketEntry {
|
pub(crate) struct SegmentRangeBucketEntry {
|
||||||
pub key: Key,
|
pub key: Key,
|
||||||
pub doc_count: u64,
|
pub doc_count: u64,
|
||||||
pub sub_aggregation: Option<GenericSegmentAggregationResultsCollector>,
|
pub sub_aggregation: Option<Box<dyn SegmentAggregationCollector>>,
|
||||||
/// The from range of the bucket. Equals `f64::MIN` when `None`.
|
/// The from range of the bucket. Equals `f64::MIN` when `None`.
|
||||||
pub from: Option<f64>,
|
pub from: Option<f64>,
|
||||||
/// The to range of the bucket. Equals `f64::MAX` when `None`. Open interval, `to` is not
|
/// The to range of the bucket. Equals `f64::MAX` when `None`. Open interval, `to` is not
|
||||||
@@ -174,12 +173,14 @@ impl SegmentRangeBucketEntry {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentRangeCollector {
|
impl SegmentAggregationCollector for SegmentRangeCollector {
|
||||||
pub fn into_intermediate_bucket_result(
|
fn into_intermediate_aggregations_result(
|
||||||
self,
|
self: Box<Self>,
|
||||||
agg_with_accessor: &BucketAggregationWithAccessor,
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
) -> crate::Result<IntermediateBucketResult> {
|
) -> crate::Result<IntermediateAggregationResults> {
|
||||||
let field_type = self.field_type;
|
let field_type = self.column_type;
|
||||||
|
let name = agg_with_accessor.buckets.keys[self.accessor_idx].to_string();
|
||||||
|
let sub_agg = &agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||||
|
|
||||||
let buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
|
let buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
|
||||||
.buckets
|
.buckets
|
||||||
@@ -189,21 +190,77 @@ impl SegmentRangeCollector {
|
|||||||
range_to_string(&range_bucket.range, &field_type)?,
|
range_to_string(&range_bucket.range, &field_type)?,
|
||||||
range_bucket
|
range_bucket
|
||||||
.bucket
|
.bucket
|
||||||
.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
.into_intermediate_bucket_entry(sub_agg)?,
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
.collect::<crate::Result<_>>()?;
|
.collect::<crate::Result<_>>()?;
|
||||||
|
|
||||||
Ok(IntermediateBucketResult::Range(
|
let bucket = IntermediateBucketResult::Range(IntermediateRangeBucketResult {
|
||||||
IntermediateRangeBucketResult { buckets },
|
buckets,
|
||||||
))
|
column_type: Some(self.column_type),
|
||||||
|
});
|
||||||
|
|
||||||
|
let buckets = Some(VecWithNames::from_entries(vec![(name, bucket)]));
|
||||||
|
|
||||||
|
Ok(IntermediateAggregationResults {
|
||||||
|
metrics: None,
|
||||||
|
buckets,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn collect(
|
||||||
|
&mut self,
|
||||||
|
doc: crate::DocId,
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
self.collect_block(&[doc], agg_with_accessor)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect_block(
|
||||||
|
&mut self,
|
||||||
|
docs: &[crate::DocId],
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let accessor = &agg_with_accessor.buckets.values[self.accessor_idx].accessor;
|
||||||
|
let sub_aggregation_accessor =
|
||||||
|
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||||
|
for doc in docs {
|
||||||
|
for val in accessor.values_for_doc(*doc) {
|
||||||
|
let bucket_pos = self.get_bucket_pos(val);
|
||||||
|
|
||||||
|
let bucket = &mut self.buckets[bucket_pos];
|
||||||
|
|
||||||
|
bucket.bucket.doc_count += 1;
|
||||||
|
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||||
|
sub_aggregation.collect(*doc, sub_aggregation_accessor)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||||
|
let sub_aggregation_accessor =
|
||||||
|
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||||
|
|
||||||
|
for bucket in self.buckets.iter_mut() {
|
||||||
|
if let Some(sub_agg) = bucket.bucket.sub_aggregation.as_mut() {
|
||||||
|
sub_agg.flush(sub_aggregation_accessor)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentRangeCollector {
|
||||||
pub(crate) fn from_req_and_validate(
|
pub(crate) fn from_req_and_validate(
|
||||||
req: &RangeAggregation,
|
req: &RangeAggregation,
|
||||||
sub_aggregation: &AggregationsWithAccessor,
|
sub_aggregation: &AggregationsWithAccessor,
|
||||||
bucket_count: &BucketCount,
|
bucket_count: &BucketCount,
|
||||||
field_type: Type,
|
field_type: ColumnType,
|
||||||
|
accessor_idx: usize,
|
||||||
) -> crate::Result<Self> {
|
) -> crate::Result<Self> {
|
||||||
// The range input on the request is f64.
|
// The range input on the request is f64.
|
||||||
// We need to convert to u64 ranges, because we read the values as u64.
|
// We need to convert to u64 ranges, because we read the values as u64.
|
||||||
@@ -229,11 +286,7 @@ impl SegmentRangeCollector {
|
|||||||
let sub_aggregation = if sub_aggregation.is_empty() {
|
let sub_aggregation = if sub_aggregation.is_empty() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(
|
Some(build_segment_agg_collector(sub_aggregation)?)
|
||||||
GenericSegmentAggregationResultsCollector::from_req_and_validate(
|
|
||||||
sub_aggregation,
|
|
||||||
)?,
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(SegmentRangeAndBucketEntry {
|
Ok(SegmentRangeAndBucketEntry {
|
||||||
@@ -254,52 +307,11 @@ impl SegmentRangeCollector {
|
|||||||
|
|
||||||
Ok(SegmentRangeCollector {
|
Ok(SegmentRangeCollector {
|
||||||
buckets,
|
buckets,
|
||||||
field_type,
|
column_type: field_type,
|
||||||
|
accessor_idx,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub(crate) fn collect_block(
|
|
||||||
&mut self,
|
|
||||||
docs: &[DocId],
|
|
||||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
|
||||||
force_flush: bool,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let accessor = &bucket_with_accessor.accessor;
|
|
||||||
for doc in docs {
|
|
||||||
for val in accessor.values(*doc) {
|
|
||||||
let bucket_pos = self.get_bucket_pos(val);
|
|
||||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if force_flush {
|
|
||||||
for bucket in &mut self.buckets {
|
|
||||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
|
||||||
sub_aggregation
|
|
||||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn increment_bucket(
|
|
||||||
&mut self,
|
|
||||||
bucket_pos: usize,
|
|
||||||
doc: DocId,
|
|
||||||
bucket_with_accessor: &AggregationsWithAccessor,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let bucket = &mut self.buckets[bucket_pos];
|
|
||||||
|
|
||||||
bucket.bucket.doc_count += 1;
|
|
||||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
|
||||||
sub_aggregation.collect(doc, bucket_with_accessor)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get_bucket_pos(&self, val: u64) -> usize {
|
fn get_bucket_pos(&self, val: u64) -> usize {
|
||||||
let pos = self
|
let pos = self
|
||||||
@@ -325,7 +337,7 @@ impl SegmentRangeCollector {
|
|||||||
/// more computational expensive when many documents are hit.
|
/// more computational expensive when many documents are hit.
|
||||||
fn to_u64_range(
|
fn to_u64_range(
|
||||||
range: &RangeAggregationRange,
|
range: &RangeAggregationRange,
|
||||||
field_type: &Type,
|
field_type: &ColumnType,
|
||||||
) -> crate::Result<InternalRangeAggregationRange> {
|
) -> crate::Result<InternalRangeAggregationRange> {
|
||||||
let start = if let Some(from) = range.from {
|
let start = if let Some(from) = range.from {
|
||||||
f64_to_fastfield_u64(from, field_type)
|
f64_to_fastfield_u64(from, field_type)
|
||||||
@@ -351,7 +363,7 @@ fn to_u64_range(
|
|||||||
/// beginning and end and filling gaps.
|
/// beginning and end and filling gaps.
|
||||||
fn extend_validate_ranges(
|
fn extend_validate_ranges(
|
||||||
buckets: &[RangeAggregationRange],
|
buckets: &[RangeAggregationRange],
|
||||||
field_type: &Type,
|
field_type: &ColumnType,
|
||||||
) -> crate::Result<Vec<InternalRangeAggregationRange>> {
|
) -> crate::Result<Vec<InternalRangeAggregationRange>> {
|
||||||
let mut converted_buckets = buckets
|
let mut converted_buckets = buckets
|
||||||
.iter()
|
.iter()
|
||||||
@@ -393,13 +405,16 @@ fn extend_validate_ranges(
|
|||||||
Ok(converted_buckets)
|
Ok(converted_buckets)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> crate::Result<String> {
|
pub(crate) fn range_to_string(
|
||||||
|
range: &Range<u64>,
|
||||||
|
field_type: &ColumnType,
|
||||||
|
) -> crate::Result<String> {
|
||||||
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
||||||
// it should be rendered as "*-0" and not "*-*"
|
// it should be rendered as "*-0" and not "*-*"
|
||||||
let to_str = |val: u64, is_start: bool| {
|
let to_str = |val: u64, is_start: bool| {
|
||||||
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
|
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
|
||||||
Ok("*".to_string())
|
Ok("*".to_string())
|
||||||
} else if *field_type == Type::Date {
|
} else if *field_type == ColumnType::DateTime {
|
||||||
let val = i64::from_u64(val);
|
let val = i64::from_u64(val);
|
||||||
format_date(val)
|
format_date(val)
|
||||||
} else {
|
} else {
|
||||||
@@ -414,7 +429,7 @@ pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> crate::R
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> crate::Result<Key> {
|
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &ColumnType) -> crate::Result<Key> {
|
||||||
Ok(Key::Str(range_to_string(range, field_type)?))
|
Ok(Key::Str(range_to_string(range, field_type)?))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -426,8 +441,9 @@ mod tests {
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::aggregation::agg_req::{
|
use crate::aggregation::agg_req::{
|
||||||
Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
|
Aggregation, Aggregations, BucketAggregation, BucketAggregationType, MetricAggregation,
|
||||||
};
|
};
|
||||||
|
use crate::aggregation::metric::AverageAggregation;
|
||||||
use crate::aggregation::tests::{
|
use crate::aggregation::tests::{
|
||||||
exec_request, exec_request_with_query, get_test_index_2_segments,
|
exec_request, exec_request_with_query, get_test_index_2_segments,
|
||||||
get_test_index_with_num_docs,
|
get_test_index_with_num_docs,
|
||||||
@@ -435,7 +451,7 @@ mod tests {
|
|||||||
|
|
||||||
pub fn get_collector_from_ranges(
|
pub fn get_collector_from_ranges(
|
||||||
ranges: Vec<RangeAggregationRange>,
|
ranges: Vec<RangeAggregationRange>,
|
||||||
field_type: Type,
|
field_type: ColumnType,
|
||||||
) -> SegmentRangeCollector {
|
) -> SegmentRangeCollector {
|
||||||
let req = RangeAggregation {
|
let req = RangeAggregation {
|
||||||
field: "dummy".to_string(),
|
field: "dummy".to_string(),
|
||||||
@@ -448,6 +464,7 @@ mod tests {
|
|||||||
&Default::default(),
|
&Default::default(),
|
||||||
&Default::default(),
|
&Default::default(),
|
||||||
field_type,
|
field_type,
|
||||||
|
0,
|
||||||
)
|
)
|
||||||
.expect("unexpected error")
|
.expect("unexpected error")
|
||||||
}
|
}
|
||||||
@@ -484,6 +501,47 @@ mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn range_fraction_test_with_sub_agg() -> crate::Result<()> {
|
||||||
|
let index = get_test_index_with_num_docs(false, 100)?;
|
||||||
|
|
||||||
|
let sub_agg_req: Aggregations = vec![(
|
||||||
|
"score_f64".to_string(),
|
||||||
|
Aggregation::Metric(MetricAggregation::Average(
|
||||||
|
AverageAggregation::from_field_name("score_f64".to_string()),
|
||||||
|
)),
|
||||||
|
)]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let agg_req: Aggregations = vec![(
|
||||||
|
"range".to_string(),
|
||||||
|
Aggregation::Bucket(BucketAggregation {
|
||||||
|
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||||
|
field: "fraction_f64".to_string(),
|
||||||
|
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
sub_aggregation: sub_agg_req,
|
||||||
|
}),
|
||||||
|
)]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||||
|
|
||||||
|
assert_eq!(res["range"]["buckets"][0]["key"], "*-0");
|
||||||
|
assert_eq!(res["range"]["buckets"][0]["doc_count"], 0);
|
||||||
|
assert_eq!(res["range"]["buckets"][1]["key"], "0-0.1");
|
||||||
|
assert_eq!(res["range"]["buckets"][1]["doc_count"], 10);
|
||||||
|
assert_eq!(res["range"]["buckets"][2]["key"], "0.1-0.2");
|
||||||
|
assert_eq!(res["range"]["buckets"][2]["doc_count"], 10);
|
||||||
|
assert_eq!(res["range"]["buckets"][3]["key"], "0.2-*");
|
||||||
|
assert_eq!(res["range"]["buckets"][3]["doc_count"], 80);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn range_keyed_buckets_test() -> crate::Result<()> {
|
fn range_keyed_buckets_test() -> crate::Result<()> {
|
||||||
let index = get_test_index_with_num_docs(false, 100)?;
|
let index = get_test_index_with_num_docs(false, 100)?;
|
||||||
@@ -683,7 +741,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn bucket_test_extend_range_hole() {
|
fn bucket_test_extend_range_hole() {
|
||||||
let buckets = vec![(10f64..20f64).into(), (30f64..40f64).into()];
|
let buckets = vec![(10f64..20f64).into(), (30f64..40f64).into()];
|
||||||
let collector = get_collector_from_ranges(buckets, Type::F64);
|
let collector = get_collector_from_ranges(buckets, ColumnType::F64);
|
||||||
|
|
||||||
let buckets = collector.buckets;
|
let buckets = collector.buckets;
|
||||||
assert_eq!(buckets[0].range.start, u64::MIN);
|
assert_eq!(buckets[0].range.start, u64::MIN);
|
||||||
@@ -706,7 +764,7 @@ mod tests {
|
|||||||
(10f64..20f64).into(),
|
(10f64..20f64).into(),
|
||||||
(20f64..f64::MAX).into(),
|
(20f64..f64::MAX).into(),
|
||||||
];
|
];
|
||||||
let collector = get_collector_from_ranges(buckets, Type::F64);
|
let collector = get_collector_from_ranges(buckets, ColumnType::F64);
|
||||||
|
|
||||||
let buckets = collector.buckets;
|
let buckets = collector.buckets;
|
||||||
assert_eq!(buckets[0].range.start, u64::MIN);
|
assert_eq!(buckets[0].range.start, u64::MIN);
|
||||||
@@ -721,7 +779,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn bucket_range_test_negative_vals() {
|
fn bucket_range_test_negative_vals() {
|
||||||
let buckets = vec![(-10f64..-1f64).into()];
|
let buckets = vec![(-10f64..-1f64).into()];
|
||||||
let collector = get_collector_from_ranges(buckets, Type::F64);
|
let collector = get_collector_from_ranges(buckets, ColumnType::F64);
|
||||||
|
|
||||||
let buckets = collector.buckets;
|
let buckets = collector.buckets;
|
||||||
assert_eq!(&buckets[0].bucket.key.to_string(), "*--10");
|
assert_eq!(&buckets[0].bucket.key.to_string(), "*--10");
|
||||||
@@ -730,7 +788,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn bucket_range_test_positive_vals() {
|
fn bucket_range_test_positive_vals() {
|
||||||
let buckets = vec![(0f64..10f64).into()];
|
let buckets = vec![(0f64..10f64).into()];
|
||||||
let collector = get_collector_from_ranges(buckets, Type::F64);
|
let collector = get_collector_from_ranges(buckets, ColumnType::F64);
|
||||||
|
|
||||||
let buckets = collector.buckets;
|
let buckets = collector.buckets;
|
||||||
assert_eq!(&buckets[0].bucket.key.to_string(), "*-0");
|
assert_eq!(&buckets[0].bucket.key.to_string(), "*-0");
|
||||||
@@ -740,7 +798,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn range_binary_search_test_u64() {
|
fn range_binary_search_test_u64() {
|
||||||
let check_ranges = |ranges: Vec<RangeAggregationRange>| {
|
let check_ranges = |ranges: Vec<RangeAggregationRange>| {
|
||||||
let collector = get_collector_from_ranges(ranges, Type::U64);
|
let collector = get_collector_from_ranges(ranges, ColumnType::U64);
|
||||||
let search = |val: u64| collector.get_bucket_pos(val);
|
let search = |val: u64| collector.get_bucket_pos(val);
|
||||||
|
|
||||||
assert_eq!(search(u64::MIN), 0);
|
assert_eq!(search(u64::MIN), 0);
|
||||||
@@ -786,7 +844,7 @@ mod tests {
|
|||||||
fn range_binary_search_test_f64() {
|
fn range_binary_search_test_f64() {
|
||||||
let ranges = vec![(10.0..100.0).into()];
|
let ranges = vec![(10.0..100.0).into()];
|
||||||
|
|
||||||
let collector = get_collector_from_ranges(ranges, Type::F64);
|
let collector = get_collector_from_ranges(ranges, ColumnType::F64);
|
||||||
let search = |val: u64| collector.get_bucket_pos(val);
|
let search = |val: u64| collector.get_bucket_pos(val);
|
||||||
|
|
||||||
assert_eq!(search(u64::MIN), 0);
|
assert_eq!(search(u64::MIN), 0);
|
||||||
@@ -821,7 +879,7 @@ mod bench {
|
|||||||
buckets.push((bucket_start..bucket_start + bucket_size as f64).into())
|
buckets.push((bucket_start..bucket_start + bucket_size as f64).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
get_collector_from_ranges(buckets, Type::U64)
|
get_collector_from_ranges(buckets, ColumnType::U64)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_rand_docs(total_docs: u64, num_docs_returned: u64) -> Vec<u64> {
|
fn get_rand_docs(total_docs: u64, num_docs_returned: u64) -> Vec<u64> {
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
use columnar::Column;
|
use columnar::{Cardinality, ColumnType};
|
||||||
use itertools::Itertools;
|
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
@@ -10,15 +9,15 @@ use crate::aggregation::agg_req_with_accessor::{
|
|||||||
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
||||||
};
|
};
|
||||||
use crate::aggregation::intermediate_agg_result::{
|
use crate::aggregation::intermediate_agg_result::{
|
||||||
IntermediateBucketResult, IntermediateTermBucketEntry, IntermediateTermBucketResult,
|
IntermediateAggregationResults, IntermediateBucketResult, IntermediateTermBucketEntry,
|
||||||
|
IntermediateTermBucketResult,
|
||||||
};
|
};
|
||||||
use crate::aggregation::segment_agg_result::{
|
use crate::aggregation::segment_agg_result::{
|
||||||
build_segment_agg_collector, GenericSegmentAggregationResultsCollector,
|
build_segment_agg_collector, SegmentAggregationCollector,
|
||||||
SegmentAggregationCollector,
|
|
||||||
};
|
};
|
||||||
|
use crate::aggregation::{f64_from_fastfield_u64, Key, VecWithNames};
|
||||||
use crate::error::DataCorruption;
|
use crate::error::DataCorruption;
|
||||||
use crate::schema::Type;
|
use crate::TantivyError;
|
||||||
use crate::{DocId, TantivyError};
|
|
||||||
|
|
||||||
/// Creates a bucket for every unique term and counts the number of occurences.
|
/// Creates a bucket for every unique term and counts the number of occurences.
|
||||||
/// Note that doc_count in the response buckets equals term count here.
|
/// Note that doc_count in the response buckets equals term count here.
|
||||||
@@ -26,6 +25,10 @@ use crate::{DocId, TantivyError};
|
|||||||
/// If the text is untokenized and single value, that means one term per document and therefore it
|
/// If the text is untokenized and single value, that means one term per document and therefore it
|
||||||
/// is in fact doc count.
|
/// is in fact doc count.
|
||||||
///
|
///
|
||||||
|
/// ## Prerequisite
|
||||||
|
/// Term aggregations work only on [fast fields](`crate::fastfield`) of type `u64`, `f64`, `i64` and
|
||||||
|
/// text.
|
||||||
|
///
|
||||||
/// ### Terminology
|
/// ### Terminology
|
||||||
/// Shard parameters are supposed to be equivalent to elasticsearch shard parameter.
|
/// Shard parameters are supposed to be equivalent to elasticsearch shard parameter.
|
||||||
/// Since they are
|
/// Since they are
|
||||||
@@ -78,9 +81,9 @@ use crate::{DocId, TantivyError};
|
|||||||
/// ...
|
/// ...
|
||||||
/// "aggregations": {
|
/// "aggregations": {
|
||||||
/// "genres": {
|
/// "genres": {
|
||||||
/// "doc_count_error_upper_bound": 0,
|
/// "doc_count_error_upper_bound": 0,
|
||||||
/// "sum_other_doc_count": 0,
|
/// "sum_other_doc_count": 0,
|
||||||
/// "buckets": [
|
/// "buckets": [
|
||||||
/// { "key": "drumnbass", "doc_count": 6 },
|
/// { "key": "drumnbass", "doc_count": 6 },
|
||||||
/// { "key": "raggae", "doc_count": 4 },
|
/// { "key": "raggae", "doc_count": 4 },
|
||||||
/// { "key": "jazz", "doc_count": 2 }
|
/// { "key": "jazz", "doc_count": 2 }
|
||||||
@@ -200,9 +203,9 @@ impl TermsAggregationInternal {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default)]
|
#[derive(Clone, Debug, Default)]
|
||||||
/// Container to store term_ids and their buckets.
|
/// Container to store term_ids/or u64 values and their buckets.
|
||||||
struct TermBuckets {
|
struct TermBuckets {
|
||||||
pub(crate) entries: FxHashMap<u32, TermBucketEntry>,
|
pub(crate) entries: FxHashMap<u64, TermBucketEntry>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
@@ -245,19 +248,10 @@ impl TermBucketEntry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl TermBuckets {
|
impl TermBuckets {
|
||||||
pub(crate) fn from_req_and_validate(
|
|
||||||
sub_aggregation: &AggregationsWithAccessor,
|
|
||||||
_max_term_id: usize,
|
|
||||||
) -> crate::Result<Self> {
|
|
||||||
Ok(TermBuckets {
|
|
||||||
entries: Default::default(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||||
for entry in &mut self.entries.values_mut() {
|
for entry in &mut self.entries.values_mut() {
|
||||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||||
sub_aggregations.flush_staged_docs(agg_with_accessor, false)?;
|
sub_aggregations.flush(agg_with_accessor)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -272,6 +266,8 @@ pub struct SegmentTermCollector {
|
|||||||
term_buckets: TermBuckets,
|
term_buckets: TermBuckets,
|
||||||
req: TermsAggregationInternal,
|
req: TermsAggregationInternal,
|
||||||
blueprint: Option<Box<dyn SegmentAggregationCollector>>,
|
blueprint: Option<Box<dyn SegmentAggregationCollector>>,
|
||||||
|
field_type: ColumnType,
|
||||||
|
accessor_idx: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn get_agg_name_and_property(name: &str) -> (&str, &str) {
|
pub(crate) fn get_agg_name_and_property(name: &str) -> (&str, &str) {
|
||||||
@@ -279,10 +275,86 @@ pub(crate) fn get_agg_name_and_property(name: &str) -> (&str, &str) {
|
|||||||
(agg_name, agg_property)
|
(agg_name, agg_property)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl SegmentAggregationCollector for SegmentTermCollector {
|
||||||
|
fn into_intermediate_aggregations_result(
|
||||||
|
self: Box<Self>,
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<IntermediateAggregationResults> {
|
||||||
|
let name = agg_with_accessor.buckets.keys[self.accessor_idx].to_string();
|
||||||
|
let agg_with_accessor = &agg_with_accessor.buckets.values[self.accessor_idx];
|
||||||
|
|
||||||
|
let bucket = self.into_intermediate_bucket_result(agg_with_accessor)?;
|
||||||
|
let buckets = Some(VecWithNames::from_entries(vec![(name, bucket)]));
|
||||||
|
|
||||||
|
Ok(IntermediateAggregationResults {
|
||||||
|
metrics: None,
|
||||||
|
buckets,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect(
|
||||||
|
&mut self,
|
||||||
|
doc: crate::DocId,
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
self.collect_block(&[doc], agg_with_accessor)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect_block(
|
||||||
|
&mut self,
|
||||||
|
docs: &[crate::DocId],
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let accessor = &agg_with_accessor.buckets.values[self.accessor_idx].accessor;
|
||||||
|
let sub_aggregation_accessor =
|
||||||
|
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||||
|
|
||||||
|
if accessor.get_cardinality() == Cardinality::Full {
|
||||||
|
for doc in docs {
|
||||||
|
let term_id = accessor.values.get_val(*doc);
|
||||||
|
let entry = self
|
||||||
|
.term_buckets
|
||||||
|
.entries
|
||||||
|
.entry(term_id)
|
||||||
|
.or_insert_with(|| TermBucketEntry::from_blueprint(&self.blueprint));
|
||||||
|
entry.doc_count += 1;
|
||||||
|
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||||
|
sub_aggregations.collect(*doc, sub_aggregation_accessor)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for doc in docs {
|
||||||
|
for term_id in accessor.values_for_doc(*doc) {
|
||||||
|
let entry = self
|
||||||
|
.term_buckets
|
||||||
|
.entries
|
||||||
|
.entry(term_id)
|
||||||
|
.or_insert_with(|| TermBucketEntry::from_blueprint(&self.blueprint));
|
||||||
|
entry.doc_count += 1;
|
||||||
|
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||||
|
sub_aggregations.collect(*doc, sub_aggregation_accessor)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||||
|
let sub_aggregation_accessor =
|
||||||
|
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||||
|
|
||||||
|
self.term_buckets.force_flush(sub_aggregation_accessor)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SegmentTermCollector {
|
impl SegmentTermCollector {
|
||||||
pub(crate) fn from_req_and_validate(
|
pub(crate) fn from_req_and_validate(
|
||||||
req: &TermsAggregation,
|
req: &TermsAggregation,
|
||||||
sub_aggregations: &AggregationsWithAccessor,
|
sub_aggregations: &AggregationsWithAccessor,
|
||||||
|
field_type: ColumnType,
|
||||||
|
accessor_idx: usize,
|
||||||
) -> crate::Result<Self> {
|
) -> crate::Result<Self> {
|
||||||
let term_buckets = TermBuckets::default();
|
let term_buckets = TermBuckets::default();
|
||||||
|
|
||||||
@@ -312,6 +384,8 @@ impl SegmentTermCollector {
|
|||||||
req: TermsAggregationInternal::from_req(req),
|
req: TermsAggregationInternal::from_req(req),
|
||||||
term_buckets,
|
term_buckets,
|
||||||
blueprint,
|
blueprint,
|
||||||
|
field_type,
|
||||||
|
accessor_idx,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -319,10 +393,9 @@ impl SegmentTermCollector {
|
|||||||
self,
|
self,
|
||||||
agg_with_accessor: &BucketAggregationWithAccessor,
|
agg_with_accessor: &BucketAggregationWithAccessor,
|
||||||
) -> crate::Result<IntermediateBucketResult> {
|
) -> crate::Result<IntermediateBucketResult> {
|
||||||
let mut entries: Vec<(u32, TermBucketEntry)> =
|
let mut entries: Vec<(u64, TermBucketEntry)> =
|
||||||
self.term_buckets.entries.into_iter().collect();
|
self.term_buckets.entries.into_iter().collect();
|
||||||
|
|
||||||
let order_by_key = self.req.order.target == OrderTarget::Key;
|
|
||||||
let order_by_sub_aggregation =
|
let order_by_sub_aggregation =
|
||||||
matches!(self.req.order.target, OrderTarget::SubAggregation(_));
|
matches!(self.req.order.target, OrderTarget::SubAggregation(_));
|
||||||
|
|
||||||
@@ -351,61 +424,58 @@ impl SegmentTermCollector {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let (term_doc_count_before_cutoff, mut sum_other_doc_count) = if order_by_sub_aggregation {
|
let (term_doc_count_before_cutoff, sum_other_doc_count) = if order_by_sub_aggregation {
|
||||||
(0, 0)
|
(0, 0)
|
||||||
} else {
|
} else {
|
||||||
cut_off_buckets(&mut entries, self.req.segment_size as usize)
|
cut_off_buckets(&mut entries, self.req.segment_size as usize)
|
||||||
};
|
};
|
||||||
|
|
||||||
let inverted_index = agg_with_accessor
|
let mut dict: FxHashMap<Key, IntermediateTermBucketEntry> = Default::default();
|
||||||
.str_dict_column
|
dict.reserve(entries.len());
|
||||||
.as_ref()
|
if self.field_type == ColumnType::Str {
|
||||||
.expect("internal error: inverted index not loaded for term aggregation");
|
let term_dict = agg_with_accessor
|
||||||
let term_dict = inverted_index;
|
.str_dict_column
|
||||||
|
.as_ref()
|
||||||
|
.expect("internal error: term dictionary not found for term aggregation");
|
||||||
|
|
||||||
let mut dict: FxHashMap<String, IntermediateTermBucketEntry> = Default::default();
|
let mut buffer = String::new();
|
||||||
let mut buffer = String::new();
|
for (term_id, entry) in entries {
|
||||||
for (term_id, entry) in entries {
|
if !term_dict.ord_to_str(term_id, &mut buffer)? {
|
||||||
if !term_dict.ord_to_str(term_id as u64, &mut buffer)? {
|
return Err(TantivyError::InternalError(format!(
|
||||||
return Err(TantivyError::InternalError(format!(
|
"Couldn't find term_id {} in dict",
|
||||||
"Couldn't find term_id {} in dict",
|
term_id
|
||||||
term_id
|
)));
|
||||||
)));
|
|
||||||
}
|
|
||||||
dict.insert(
|
|
||||||
buffer.to_string(),
|
|
||||||
entry.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if self.req.min_doc_count == 0 {
|
|
||||||
// TODO: Handle rev streaming for descending sorting by keys
|
|
||||||
let mut stream = term_dict.dictionary().stream()?;
|
|
||||||
while let Some((key, _ord)) = stream.next() {
|
|
||||||
if dict.len() >= self.req.segment_size as usize {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
dict.insert(
|
||||||
|
Key::Str(buffer.to_string()),
|
||||||
|
entry.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if self.req.min_doc_count == 0 {
|
||||||
|
// TODO: Handle rev streaming for descending sorting by keys
|
||||||
|
let mut stream = term_dict.dictionary().stream()?;
|
||||||
|
while let Some((key, _ord)) = stream.next() {
|
||||||
|
if dict.len() >= self.req.segment_size as usize {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
let key = std::str::from_utf8(key)
|
let key = Key::Str(
|
||||||
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
|
std::str::from_utf8(key)
|
||||||
if !dict.contains_key(key) {
|
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?
|
||||||
dict.insert(key.to_owned(), Default::default());
|
.to_string(),
|
||||||
|
);
|
||||||
|
dict.entry(key).or_default();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
|
for (val, entry) in entries {
|
||||||
if order_by_key {
|
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||||
let mut dict_entries = dict.into_iter().collect_vec();
|
dict.insert(
|
||||||
if self.req.order.order == Order::Desc {
|
Key::F64(val),
|
||||||
dict_entries.sort_unstable_by(|(key1, _), (key2, _)| key1.cmp(key2));
|
entry.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
||||||
} else {
|
);
|
||||||
dict_entries.sort_unstable_by(|(key1, _), (key2, _)| key2.cmp(key1));
|
|
||||||
}
|
}
|
||||||
let (_, sum_other_docs) =
|
};
|
||||||
cut_off_buckets(&mut dict_entries, self.req.segment_size as usize);
|
|
||||||
|
|
||||||
sum_other_doc_count += sum_other_docs;
|
|
||||||
dict = dict_entries.into_iter().collect();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(IntermediateBucketResult::Terms(
|
Ok(IntermediateBucketResult::Terms(
|
||||||
IntermediateTermBucketResult {
|
IntermediateTermBucketResult {
|
||||||
@@ -415,36 +485,6 @@ impl SegmentTermCollector {
|
|||||||
},
|
},
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub(crate) fn collect_block(
|
|
||||||
&mut self,
|
|
||||||
docs: &[DocId],
|
|
||||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
|
||||||
force_flush: bool,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let accessor = &bucket_with_accessor.accessor;
|
|
||||||
|
|
||||||
for doc in docs {
|
|
||||||
for term_id in accessor.values(*doc) {
|
|
||||||
let entry = self
|
|
||||||
.term_buckets
|
|
||||||
.entries
|
|
||||||
.entry(term_id as u32)
|
|
||||||
.or_insert_with(|| TermBucketEntry::from_blueprint(&self.blueprint));
|
|
||||||
entry.doc_count += 1;
|
|
||||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
|
||||||
sub_aggregations.collect(*doc, &bucket_with_accessor.sub_aggregation)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if force_flush {
|
|
||||||
self.term_buckets
|
|
||||||
.force_flush(&bucket_with_accessor.sub_aggregation)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) trait GetDocCount {
|
pub(crate) trait GetDocCount {
|
||||||
@@ -455,6 +495,11 @@ impl GetDocCount for (u32, TermBucketEntry) {
|
|||||||
self.1.doc_count
|
self.1.doc_count
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl GetDocCount for (u64, TermBucketEntry) {
|
||||||
|
fn doc_count(&self) -> u64 {
|
||||||
|
self.1.doc_count
|
||||||
|
}
|
||||||
|
}
|
||||||
impl GetDocCount for (String, IntermediateTermBucketEntry) {
|
impl GetDocCount for (String, IntermediateTermBucketEntry) {
|
||||||
fn doc_count(&self) -> u64 {
|
fn doc_count(&self) -> u64 {
|
||||||
self.1.doc_count
|
self.1.doc_count
|
||||||
@@ -483,8 +528,7 @@ pub(crate) fn cut_off_buckets<T: GetDocCount + Debug>(
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::aggregation::agg_req::{
|
use crate::aggregation::agg_req::{
|
||||||
get_term_dict_field_names, Aggregation, Aggregations, BucketAggregation,
|
Aggregation, Aggregations, BucketAggregation, BucketAggregationType, MetricAggregation,
|
||||||
BucketAggregationType, MetricAggregation,
|
|
||||||
};
|
};
|
||||||
use crate::aggregation::metric::{AverageAggregation, StatsAggregation};
|
use crate::aggregation::metric::{AverageAggregation, StatsAggregation};
|
||||||
use crate::aggregation::tests::{
|
use crate::aggregation::tests::{
|
||||||
@@ -585,12 +629,6 @@ mod tests {
|
|||||||
serde_json::Value::Null
|
serde_json::Value::Null
|
||||||
);
|
);
|
||||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0); // TODO sum_other_doc_count with min_doc_count
|
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0); // TODO sum_other_doc_count with min_doc_count
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
get_term_dict_field_names(&agg_req),
|
|
||||||
vec!["string_id".to_string(),].into_iter().collect()
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -605,7 +643,8 @@ mod tests {
|
|||||||
fn terms_aggregation_test_order_count_merge_segment(merge_segments: bool) -> crate::Result<()> {
|
fn terms_aggregation_test_order_count_merge_segment(merge_segments: bool) -> crate::Result<()> {
|
||||||
let segment_and_terms = vec![
|
let segment_and_terms = vec![
|
||||||
vec![(5.0, "terma".to_string())],
|
vec![(5.0, "terma".to_string())],
|
||||||
vec![(4.0, "termb".to_string())],
|
vec![(2.0, "termb".to_string())],
|
||||||
|
vec![(2.0, "terma".to_string())],
|
||||||
vec![(1.0, "termc".to_string())],
|
vec![(1.0, "termc".to_string())],
|
||||||
vec![(1.0, "termc".to_string())],
|
vec![(1.0, "termc".to_string())],
|
||||||
vec![(1.0, "termc".to_string())],
|
vec![(1.0, "termc".to_string())],
|
||||||
@@ -646,7 +685,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}),
|
}),
|
||||||
sub_aggregation: sub_agg,
|
sub_aggregation: sub_agg.clone(),
|
||||||
}),
|
}),
|
||||||
)]
|
)]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -655,15 +694,114 @@ mod tests {
|
|||||||
let res = exec_request(agg_req, &index)?;
|
let res = exec_request(agg_req, &index)?;
|
||||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "termb");
|
assert_eq!(res["my_texts"]["buckets"][0]["key"], "termb");
|
||||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
|
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
|
||||||
|
assert_eq!(res["my_texts"]["buckets"][0]["avg_score"]["value"], 5.0);
|
||||||
|
|
||||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termc");
|
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termc");
|
||||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 3);
|
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 3);
|
||||||
|
assert_eq!(res["my_texts"]["buckets"][1]["avg_score"]["value"], 1.0);
|
||||||
|
|
||||||
assert_eq!(res["my_texts"]["buckets"][2]["key"], "terma");
|
assert_eq!(res["my_texts"]["buckets"][2]["key"], "terma");
|
||||||
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 5);
|
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 6);
|
||||||
|
assert_eq!(res["my_texts"]["buckets"][2]["avg_score"]["value"], 4.5);
|
||||||
|
|
||||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
||||||
|
|
||||||
|
// Agg on non string
|
||||||
|
//
|
||||||
|
let agg_req: Aggregations = vec![
|
||||||
|
(
|
||||||
|
"my_scores1".to_string(),
|
||||||
|
Aggregation::Bucket(BucketAggregation {
|
||||||
|
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||||
|
field: "score".to_string(),
|
||||||
|
order: Some(CustomOrder {
|
||||||
|
order: Order::Asc,
|
||||||
|
target: OrderTarget::Count,
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
sub_aggregation: sub_agg.clone(),
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"my_scores2".to_string(),
|
||||||
|
Aggregation::Bucket(BucketAggregation {
|
||||||
|
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||||
|
field: "score_f64".to_string(),
|
||||||
|
order: Some(CustomOrder {
|
||||||
|
order: Order::Asc,
|
||||||
|
target: OrderTarget::Count,
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
sub_aggregation: sub_agg.clone(),
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"my_scores3".to_string(),
|
||||||
|
Aggregation::Bucket(BucketAggregation {
|
||||||
|
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||||
|
field: "score_i64".to_string(),
|
||||||
|
order: Some(CustomOrder {
|
||||||
|
order: Order::Asc,
|
||||||
|
target: OrderTarget::Count,
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
sub_aggregation: sub_agg,
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let res = exec_request(agg_req, &index)?;
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][0]["key"], 8.0);
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][0]["doc_count"], 1);
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][0]["avg_score"]["value"], 8.0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][1]["key"], 2.0);
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][1]["doc_count"], 2);
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][1]["avg_score"]["value"], 2.0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][2]["key"], 1.0);
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][2]["doc_count"], 3);
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][2]["avg_score"]["value"], 1.0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][3]["key"], 5.0);
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][3]["doc_count"], 5);
|
||||||
|
assert_eq!(res["my_scores1"]["buckets"][3]["avg_score"]["value"], 5.0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores1"]["sum_other_doc_count"], 0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores2"]["buckets"][0]["key"], 8.0);
|
||||||
|
assert_eq!(res["my_scores2"]["buckets"][0]["doc_count"], 1);
|
||||||
|
assert_eq!(res["my_scores2"]["buckets"][0]["avg_score"]["value"], 8.0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores2"]["buckets"][1]["key"], 2.0);
|
||||||
|
assert_eq!(res["my_scores2"]["buckets"][1]["doc_count"], 2);
|
||||||
|
assert_eq!(res["my_scores2"]["buckets"][1]["avg_score"]["value"], 2.0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores2"]["buckets"][2]["key"], 1.0);
|
||||||
|
assert_eq!(res["my_scores2"]["buckets"][2]["doc_count"], 3);
|
||||||
|
assert_eq!(res["my_scores2"]["buckets"][2]["avg_score"]["value"], 1.0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores2"]["sum_other_doc_count"], 0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores3"]["buckets"][0]["key"], 8.0);
|
||||||
|
assert_eq!(res["my_scores3"]["buckets"][0]["doc_count"], 1);
|
||||||
|
assert_eq!(res["my_scores3"]["buckets"][0]["avg_score"]["value"], 8.0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores3"]["buckets"][1]["key"], 2.0);
|
||||||
|
assert_eq!(res["my_scores3"]["buckets"][1]["doc_count"], 2);
|
||||||
|
assert_eq!(res["my_scores3"]["buckets"][1]["avg_score"]["value"], 2.0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores3"]["buckets"][2]["key"], 1.0);
|
||||||
|
assert_eq!(res["my_scores3"]["buckets"][2]["doc_count"], 3);
|
||||||
|
assert_eq!(res["my_scores3"]["buckets"][2]["avg_score"]["value"], 1.0);
|
||||||
|
|
||||||
|
assert_eq!(res["my_scores3"]["sum_other_doc_count"], 0);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -857,14 +995,14 @@ mod tests {
|
|||||||
];
|
];
|
||||||
let index = get_test_index_from_values_and_terms(merge_segments, &segment_and_terms)?;
|
let index = get_test_index_from_values_and_terms(merge_segments, &segment_and_terms)?;
|
||||||
|
|
||||||
// key desc
|
// key asc
|
||||||
let agg_req: Aggregations = vec![(
|
let agg_req: Aggregations = vec![(
|
||||||
"my_texts".to_string(),
|
"my_texts".to_string(),
|
||||||
Aggregation::Bucket(BucketAggregation {
|
Aggregation::Bucket(BucketAggregation {
|
||||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||||
field: "string_id".to_string(),
|
field: "string_id".to_string(),
|
||||||
order: Some(CustomOrder {
|
order: Some(CustomOrder {
|
||||||
order: Order::Desc,
|
order: Order::Asc,
|
||||||
target: OrderTarget::Key,
|
target: OrderTarget::Key,
|
||||||
}),
|
}),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -891,7 +1029,7 @@ mod tests {
|
|||||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||||
field: "string_id".to_string(),
|
field: "string_id".to_string(),
|
||||||
order: Some(CustomOrder {
|
order: Some(CustomOrder {
|
||||||
order: Order::Desc,
|
order: Order::Asc,
|
||||||
target: OrderTarget::Key,
|
target: OrderTarget::Key,
|
||||||
}),
|
}),
|
||||||
size: Some(2),
|
size: Some(2),
|
||||||
@@ -915,14 +1053,14 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 3);
|
assert_eq!(res["my_texts"]["sum_other_doc_count"], 3);
|
||||||
|
|
||||||
// key desc and segment_size cut_off
|
// key asc and segment_size cut_off
|
||||||
let agg_req: Aggregations = vec![(
|
let agg_req: Aggregations = vec![(
|
||||||
"my_texts".to_string(),
|
"my_texts".to_string(),
|
||||||
Aggregation::Bucket(BucketAggregation {
|
Aggregation::Bucket(BucketAggregation {
|
||||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||||
field: "string_id".to_string(),
|
field: "string_id".to_string(),
|
||||||
order: Some(CustomOrder {
|
order: Some(CustomOrder {
|
||||||
order: Order::Desc,
|
order: Order::Asc,
|
||||||
target: OrderTarget::Key,
|
target: OrderTarget::Key,
|
||||||
}),
|
}),
|
||||||
size: Some(2),
|
size: Some(2),
|
||||||
@@ -945,14 +1083,14 @@ mod tests {
|
|||||||
serde_json::Value::Null
|
serde_json::Value::Null
|
||||||
);
|
);
|
||||||
|
|
||||||
// key asc
|
// key desc
|
||||||
let agg_req: Aggregations = vec![(
|
let agg_req: Aggregations = vec![(
|
||||||
"my_texts".to_string(),
|
"my_texts".to_string(),
|
||||||
Aggregation::Bucket(BucketAggregation {
|
Aggregation::Bucket(BucketAggregation {
|
||||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||||
field: "string_id".to_string(),
|
field: "string_id".to_string(),
|
||||||
order: Some(CustomOrder {
|
order: Some(CustomOrder {
|
||||||
order: Order::Asc,
|
order: Order::Desc,
|
||||||
target: OrderTarget::Key,
|
target: OrderTarget::Key,
|
||||||
}),
|
}),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -972,14 +1110,14 @@ mod tests {
|
|||||||
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 5);
|
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 5);
|
||||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
||||||
|
|
||||||
// key asc, size cut_off
|
// key desc, size cut_off
|
||||||
let agg_req: Aggregations = vec![(
|
let agg_req: Aggregations = vec![(
|
||||||
"my_texts".to_string(),
|
"my_texts".to_string(),
|
||||||
Aggregation::Bucket(BucketAggregation {
|
Aggregation::Bucket(BucketAggregation {
|
||||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||||
field: "string_id".to_string(),
|
field: "string_id".to_string(),
|
||||||
order: Some(CustomOrder {
|
order: Some(CustomOrder {
|
||||||
order: Order::Asc,
|
order: Order::Desc,
|
||||||
target: OrderTarget::Key,
|
target: OrderTarget::Key,
|
||||||
}),
|
}),
|
||||||
size: Some(2),
|
size: Some(2),
|
||||||
@@ -1002,14 +1140,14 @@ mod tests {
|
|||||||
);
|
);
|
||||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 5);
|
assert_eq!(res["my_texts"]["sum_other_doc_count"], 5);
|
||||||
|
|
||||||
// key asc, segment_size cut_off
|
// key desc, segment_size cut_off
|
||||||
let agg_req: Aggregations = vec![(
|
let agg_req: Aggregations = vec![(
|
||||||
"my_texts".to_string(),
|
"my_texts".to_string(),
|
||||||
Aggregation::Bucket(BucketAggregation {
|
Aggregation::Bucket(BucketAggregation {
|
||||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||||
field: "string_id".to_string(),
|
field: "string_id".to_string(),
|
||||||
order: Some(CustomOrder {
|
order: Some(CustomOrder {
|
||||||
order: Order::Asc,
|
order: Order::Desc,
|
||||||
target: OrderTarget::Key,
|
target: OrderTarget::Key,
|
||||||
}),
|
}),
|
||||||
size: Some(2),
|
size: Some(2),
|
||||||
|
|||||||
79
src/aggregation/buf_collector.rs
Normal file
79
src/aggregation/buf_collector.rs
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
||||||
|
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||||
|
use super::segment_agg_result::SegmentAggregationCollector;
|
||||||
|
use crate::DocId;
|
||||||
|
|
||||||
|
pub(crate) const DOC_BLOCK_SIZE: usize = 64;
|
||||||
|
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
|
||||||
|
|
||||||
|
/// BufAggregationCollector buffers documents before calling collect_block().
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct BufAggregationCollector {
|
||||||
|
pub(crate) collector: Box<dyn SegmentAggregationCollector>,
|
||||||
|
staged_docs: DocBlock,
|
||||||
|
num_staged_docs: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for BufAggregationCollector {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("SegmentAggregationResultsCollector")
|
||||||
|
.field("staged_docs", &&self.staged_docs[..self.num_staged_docs])
|
||||||
|
.field("num_staged_docs", &self.num_staged_docs)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BufAggregationCollector {
|
||||||
|
pub fn new(collector: Box<dyn SegmentAggregationCollector>) -> Self {
|
||||||
|
Self {
|
||||||
|
collector,
|
||||||
|
num_staged_docs: 0,
|
||||||
|
staged_docs: [0; DOC_BLOCK_SIZE],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentAggregationCollector for BufAggregationCollector {
|
||||||
|
fn into_intermediate_aggregations_result(
|
||||||
|
self: Box<Self>,
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<IntermediateAggregationResults> {
|
||||||
|
Box::new(self.collector).into_intermediate_aggregations_result(agg_with_accessor)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect(
|
||||||
|
&mut self,
|
||||||
|
doc: crate::DocId,
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
self.staged_docs[self.num_staged_docs] = doc;
|
||||||
|
self.num_staged_docs += 1;
|
||||||
|
if self.num_staged_docs == self.staged_docs.len() {
|
||||||
|
self.collector
|
||||||
|
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor)?;
|
||||||
|
self.num_staged_docs = 0;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect_block(
|
||||||
|
&mut self,
|
||||||
|
docs: &[crate::DocId],
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
for doc in docs {
|
||||||
|
self.collect(*doc, agg_with_accessor)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||||
|
self.collector
|
||||||
|
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor)?;
|
||||||
|
self.num_staged_docs = 0;
|
||||||
|
|
||||||
|
self.collector.flush(agg_with_accessor)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,14 +3,11 @@ use std::rc::Rc;
|
|||||||
use super::agg_req::Aggregations;
|
use super::agg_req::Aggregations;
|
||||||
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
||||||
use super::agg_result::AggregationResults;
|
use super::agg_result::AggregationResults;
|
||||||
|
use super::buf_collector::BufAggregationCollector;
|
||||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||||
use super::segment_agg_result::{
|
use super::segment_agg_result::{build_segment_agg_collector, SegmentAggregationCollector};
|
||||||
build_segment_agg_collector, GenericSegmentAggregationResultsCollector,
|
|
||||||
SegmentAggregationCollector,
|
|
||||||
};
|
|
||||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||||
use crate::collector::{Collector, SegmentCollector};
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
use crate::schema::Schema;
|
|
||||||
use crate::{SegmentReader, TantivyError};
|
use crate::{SegmentReader, TantivyError};
|
||||||
|
|
||||||
/// The default max bucket count, before the aggregation fails.
|
/// The default max bucket count, before the aggregation fails.
|
||||||
@@ -20,7 +17,6 @@ pub const MAX_BUCKET_COUNT: u32 = 65000;
|
|||||||
///
|
///
|
||||||
/// The collector collects all aggregations by the underlying aggregation request.
|
/// The collector collects all aggregations by the underlying aggregation request.
|
||||||
pub struct AggregationCollector {
|
pub struct AggregationCollector {
|
||||||
schema: Schema,
|
|
||||||
agg: Aggregations,
|
agg: Aggregations,
|
||||||
max_bucket_count: u32,
|
max_bucket_count: u32,
|
||||||
}
|
}
|
||||||
@@ -30,9 +26,8 @@ impl AggregationCollector {
|
|||||||
///
|
///
|
||||||
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
|
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
|
||||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>, schema: Schema) -> Self {
|
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
schema,
|
|
||||||
agg,
|
agg,
|
||||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||||
}
|
}
|
||||||
@@ -119,7 +114,7 @@ impl Collector for AggregationCollector {
|
|||||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
) -> crate::Result<Self::Fruit> {
|
) -> crate::Result<Self::Fruit> {
|
||||||
let res = merge_fruits(segment_fruits)?;
|
let res = merge_fruits(segment_fruits)?;
|
||||||
res.into_final_bucket_result(self.agg.clone(), &self.schema)
|
res.into_final_bucket_result(self.agg.clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -140,7 +135,7 @@ fn merge_fruits(
|
|||||||
/// `AggregationSegmentCollector` does the aggregation collection on a segment.
|
/// `AggregationSegmentCollector` does the aggregation collection on a segment.
|
||||||
pub struct AggregationSegmentCollector {
|
pub struct AggregationSegmentCollector {
|
||||||
aggs_with_accessor: AggregationsWithAccessor,
|
aggs_with_accessor: AggregationsWithAccessor,
|
||||||
result: Box<dyn SegmentAggregationCollector>,
|
result: BufAggregationCollector,
|
||||||
error: Option<TantivyError>,
|
error: Option<TantivyError>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,7 +149,8 @@ impl AggregationSegmentCollector {
|
|||||||
) -> crate::Result<Self> {
|
) -> crate::Result<Self> {
|
||||||
let aggs_with_accessor =
|
let aggs_with_accessor =
|
||||||
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
|
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
|
||||||
let result = build_segment_agg_collector(&aggs_with_accessor)?;
|
let result =
|
||||||
|
BufAggregationCollector::new(build_segment_agg_collector(&aggs_with_accessor)?);
|
||||||
Ok(AggregationSegmentCollector {
|
Ok(AggregationSegmentCollector {
|
||||||
aggs_with_accessor,
|
aggs_with_accessor,
|
||||||
result,
|
result,
|
||||||
@@ -180,9 +176,7 @@ impl SegmentCollector for AggregationSegmentCollector {
|
|||||||
if let Some(err) = self.error {
|
if let Some(err) = self.error {
|
||||||
return Err(err);
|
return Err(err);
|
||||||
}
|
}
|
||||||
self.result
|
self.result.flush(&self.aggs_with_accessor)?;
|
||||||
.flush_staged_docs(&self.aggs_with_accessor, true)?;
|
Box::new(self.result).into_intermediate_aggregations_result(&self.aggs_with_accessor)
|
||||||
self.result
|
|
||||||
.into_intermediate_aggregations_result(&self.aggs_with_accessor)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
9
src/aggregation/error.rs
Normal file
9
src/aggregation/error.rs
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
use super::bucket::DateHistogramParseError;
|
||||||
|
|
||||||
|
/// Error that may occur when opening a directory
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Error)]
|
||||||
|
pub enum AggregationError {
|
||||||
|
/// Failed to open the directory.
|
||||||
|
#[error("Date histogram parse error: {0:?}")]
|
||||||
|
DateHistogramParseError(#[from] DateHistogramParseError),
|
||||||
|
}
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
|
use columnar::ColumnType;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@@ -21,11 +22,9 @@ use super::metric::{
|
|||||||
IntermediateAverage, IntermediateCount, IntermediateMax, IntermediateMin, IntermediateStats,
|
IntermediateAverage, IntermediateCount, IntermediateMax, IntermediateMin, IntermediateStats,
|
||||||
IntermediateSum,
|
IntermediateSum,
|
||||||
};
|
};
|
||||||
use super::segment_agg_result::SegmentMetricResultCollector;
|
|
||||||
use super::{format_date, Key, SerializedKey, VecWithNames};
|
use super::{format_date, Key, SerializedKey, VecWithNames};
|
||||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
||||||
use crate::aggregation::bucket::TermsAggregationInternal;
|
use crate::aggregation::bucket::TermsAggregationInternal;
|
||||||
use crate::schema::Schema;
|
|
||||||
|
|
||||||
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
||||||
/// intermediate results.
|
/// intermediate results.
|
||||||
@@ -39,12 +38,8 @@ pub struct IntermediateAggregationResults {
|
|||||||
|
|
||||||
impl IntermediateAggregationResults {
|
impl IntermediateAggregationResults {
|
||||||
/// Convert intermediate result and its aggregation request to the final result.
|
/// Convert intermediate result and its aggregation request to the final result.
|
||||||
pub fn into_final_bucket_result(
|
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
|
||||||
self,
|
self.into_final_bucket_result_internal(&(req.into()))
|
||||||
req: Aggregations,
|
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<AggregationResults> {
|
|
||||||
self.into_final_bucket_result_internal(&(req.into()), schema)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert intermediate result and its aggregation request to the final result.
|
/// Convert intermediate result and its aggregation request to the final result.
|
||||||
@@ -54,7 +49,6 @@ impl IntermediateAggregationResults {
|
|||||||
pub(crate) fn into_final_bucket_result_internal(
|
pub(crate) fn into_final_bucket_result_internal(
|
||||||
self,
|
self,
|
||||||
req: &AggregationsInternal,
|
req: &AggregationsInternal,
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<AggregationResults> {
|
) -> crate::Result<AggregationResults> {
|
||||||
// Important assumption:
|
// Important assumption:
|
||||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||||
@@ -62,11 +56,11 @@ impl IntermediateAggregationResults {
|
|||||||
let mut results: FxHashMap<String, AggregationResult> = FxHashMap::default();
|
let mut results: FxHashMap<String, AggregationResult> = FxHashMap::default();
|
||||||
|
|
||||||
if let Some(buckets) = self.buckets {
|
if let Some(buckets) = self.buckets {
|
||||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets, schema)?
|
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||||
} else {
|
} else {
|
||||||
// When there are no buckets, we create empty buckets, so that the serialized json
|
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||||
// format is constant
|
// format is constant
|
||||||
add_empty_final_buckets_to_result(&mut results, &req.buckets, schema)?
|
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(metrics) = self.metrics {
|
if let Some(metrics) = self.metrics {
|
||||||
@@ -167,12 +161,10 @@ fn add_empty_final_metrics_to_result(
|
|||||||
fn add_empty_final_buckets_to_result(
|
fn add_empty_final_buckets_to_result(
|
||||||
results: &mut FxHashMap<String, AggregationResult>,
|
results: &mut FxHashMap<String, AggregationResult>,
|
||||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let requested_buckets = req_buckets.iter();
|
let requested_buckets = req_buckets.iter();
|
||||||
for (key, req) in requested_buckets {
|
for (key, req) in requested_buckets {
|
||||||
let empty_bucket =
|
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
||||||
AggregationResult::BucketResult(BucketResult::empty_from_req(req, schema)?);
|
|
||||||
results.insert(key.to_string(), empty_bucket);
|
results.insert(key.to_string(), empty_bucket);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -182,13 +174,12 @@ fn convert_and_add_final_buckets_to_result(
|
|||||||
results: &mut FxHashMap<String, AggregationResult>,
|
results: &mut FxHashMap<String, AggregationResult>,
|
||||||
buckets: VecWithNames<IntermediateBucketResult>,
|
buckets: VecWithNames<IntermediateBucketResult>,
|
||||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
assert_eq!(buckets.len(), req_buckets.len());
|
assert_eq!(buckets.len(), req_buckets.len());
|
||||||
|
|
||||||
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||||
for ((key, bucket), req) in buckets_with_request {
|
for ((key, bucket), req) in buckets_with_request {
|
||||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req, schema)?);
|
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
|
||||||
results.insert(key, result);
|
results.insert(key, result);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -220,32 +211,6 @@ pub enum IntermediateMetricResult {
|
|||||||
Sum(IntermediateSum),
|
Sum(IntermediateSum),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<SegmentMetricResultCollector> for IntermediateMetricResult {
|
|
||||||
fn from(tree: SegmentMetricResultCollector) -> Self {
|
|
||||||
use super::metric::SegmentStatsType;
|
|
||||||
match tree {
|
|
||||||
SegmentMetricResultCollector::Stats(collector) => match collector.collecting_for {
|
|
||||||
SegmentStatsType::Average => IntermediateMetricResult::Average(
|
|
||||||
IntermediateAverage::from_collector(collector),
|
|
||||||
),
|
|
||||||
SegmentStatsType::Count => {
|
|
||||||
IntermediateMetricResult::Count(IntermediateCount::from_collector(collector))
|
|
||||||
}
|
|
||||||
SegmentStatsType::Max => {
|
|
||||||
IntermediateMetricResult::Max(IntermediateMax::from_collector(collector))
|
|
||||||
}
|
|
||||||
SegmentStatsType::Min => {
|
|
||||||
IntermediateMetricResult::Min(IntermediateMin::from_collector(collector))
|
|
||||||
}
|
|
||||||
SegmentStatsType::Stats => IntermediateMetricResult::Stats(collector.stats),
|
|
||||||
SegmentStatsType::Sum => {
|
|
||||||
IntermediateMetricResult::Sum(IntermediateSum::from_collector(collector))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IntermediateMetricResult {
|
impl IntermediateMetricResult {
|
||||||
pub(crate) fn empty_from_req(req: &MetricAggregation) -> Self {
|
pub(crate) fn empty_from_req(req: &MetricAggregation) -> Self {
|
||||||
match req {
|
match req {
|
||||||
@@ -309,6 +274,8 @@ pub enum IntermediateBucketResult {
|
|||||||
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||||
/// sub_aggregations.
|
/// sub_aggregations.
|
||||||
Histogram {
|
Histogram {
|
||||||
|
/// The column_type of the underlying `Column`
|
||||||
|
column_type: Option<ColumnType>,
|
||||||
/// The buckets
|
/// The buckets
|
||||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||||
},
|
},
|
||||||
@@ -320,7 +287,6 @@ impl IntermediateBucketResult {
|
|||||||
pub(crate) fn into_final_bucket_result(
|
pub(crate) fn into_final_bucket_result(
|
||||||
self,
|
self,
|
||||||
req: &BucketAggregationInternal,
|
req: &BucketAggregationInternal,
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<BucketResult> {
|
) -> crate::Result<BucketResult> {
|
||||||
match self {
|
match self {
|
||||||
IntermediateBucketResult::Range(range_res) => {
|
IntermediateBucketResult::Range(range_res) => {
|
||||||
@@ -330,9 +296,9 @@ impl IntermediateBucketResult {
|
|||||||
.map(|bucket| {
|
.map(|bucket| {
|
||||||
bucket.into_final_bucket_entry(
|
bucket.into_final_bucket_entry(
|
||||||
&req.sub_aggregation,
|
&req.sub_aggregation,
|
||||||
schema,
|
|
||||||
req.as_range()
|
req.as_range()
|
||||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||||
|
range_res.column_type,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect::<crate::Result<Vec<_>>>()?;
|
.collect::<crate::Result<Vec<_>>>()?;
|
||||||
@@ -359,16 +325,21 @@ impl IntermediateBucketResult {
|
|||||||
};
|
};
|
||||||
Ok(BucketResult::Range { buckets })
|
Ok(BucketResult::Range { buckets })
|
||||||
}
|
}
|
||||||
IntermediateBucketResult::Histogram { buckets } => {
|
IntermediateBucketResult::Histogram {
|
||||||
|
column_type,
|
||||||
|
buckets,
|
||||||
|
} => {
|
||||||
|
let histogram_req = &req
|
||||||
|
.as_histogram()?
|
||||||
|
.expect("unexpected aggregation, expected histogram aggregation");
|
||||||
let buckets = intermediate_histogram_buckets_to_final_buckets(
|
let buckets = intermediate_histogram_buckets_to_final_buckets(
|
||||||
buckets,
|
buckets,
|
||||||
req.as_histogram()
|
column_type,
|
||||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
histogram_req,
|
||||||
&req.sub_aggregation,
|
&req.sub_aggregation,
|
||||||
schema,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let buckets = if req.as_histogram().unwrap().keyed {
|
let buckets = if histogram_req.keyed {
|
||||||
let mut bucket_map =
|
let mut bucket_map =
|
||||||
FxHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
FxHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||||
for bucket in buckets {
|
for bucket in buckets {
|
||||||
@@ -384,7 +355,6 @@ impl IntermediateBucketResult {
|
|||||||
req.as_term()
|
req.as_term()
|
||||||
.expect("unexpected aggregation, expected term aggregation"),
|
.expect("unexpected aggregation, expected term aggregation"),
|
||||||
&req.sub_aggregation,
|
&req.sub_aggregation,
|
||||||
schema,
|
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -393,8 +363,11 @@ impl IntermediateBucketResult {
|
|||||||
match req {
|
match req {
|
||||||
BucketAggregationType::Terms(_) => IntermediateBucketResult::Terms(Default::default()),
|
BucketAggregationType::Terms(_) => IntermediateBucketResult::Terms(Default::default()),
|
||||||
BucketAggregationType::Range(_) => IntermediateBucketResult::Range(Default::default()),
|
BucketAggregationType::Range(_) => IntermediateBucketResult::Range(Default::default()),
|
||||||
BucketAggregationType::Histogram(_) => {
|
BucketAggregationType::Histogram(_) | BucketAggregationType::DateHistogram(_) => {
|
||||||
IntermediateBucketResult::Histogram { buckets: vec![] }
|
IntermediateBucketResult::Histogram {
|
||||||
|
buckets: vec![],
|
||||||
|
column_type: None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -404,7 +377,7 @@ impl IntermediateBucketResult {
|
|||||||
IntermediateBucketResult::Terms(term_res_left),
|
IntermediateBucketResult::Terms(term_res_left),
|
||||||
IntermediateBucketResult::Terms(term_res_right),
|
IntermediateBucketResult::Terms(term_res_right),
|
||||||
) => {
|
) => {
|
||||||
merge_maps(&mut term_res_left.entries, term_res_right.entries);
|
merge_key_maps(&mut term_res_left.entries, term_res_right.entries);
|
||||||
term_res_left.sum_other_doc_count += term_res_right.sum_other_doc_count;
|
term_res_left.sum_other_doc_count += term_res_right.sum_other_doc_count;
|
||||||
term_res_left.doc_count_error_upper_bound +=
|
term_res_left.doc_count_error_upper_bound +=
|
||||||
term_res_right.doc_count_error_upper_bound;
|
term_res_right.doc_count_error_upper_bound;
|
||||||
@@ -414,7 +387,7 @@ impl IntermediateBucketResult {
|
|||||||
IntermediateBucketResult::Range(range_res_left),
|
IntermediateBucketResult::Range(range_res_left),
|
||||||
IntermediateBucketResult::Range(range_res_right),
|
IntermediateBucketResult::Range(range_res_right),
|
||||||
) => {
|
) => {
|
||||||
merge_maps(&mut range_res_left.buckets, range_res_right.buckets);
|
merge_serialized_key_maps(&mut range_res_left.buckets, range_res_right.buckets);
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
IntermediateBucketResult::Histogram {
|
IntermediateBucketResult::Histogram {
|
||||||
@@ -460,12 +433,13 @@ impl IntermediateBucketResult {
|
|||||||
/// Range aggregation including error counts
|
/// Range aggregation including error counts
|
||||||
pub struct IntermediateRangeBucketResult {
|
pub struct IntermediateRangeBucketResult {
|
||||||
pub(crate) buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry>,
|
pub(crate) buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry>,
|
||||||
|
pub(crate) column_type: Option<ColumnType>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
/// Term aggregation including error counts
|
/// Term aggregation including error counts
|
||||||
pub struct IntermediateTermBucketResult {
|
pub struct IntermediateTermBucketResult {
|
||||||
pub(crate) entries: FxHashMap<String, IntermediateTermBucketEntry>,
|
pub(crate) entries: FxHashMap<Key, IntermediateTermBucketEntry>,
|
||||||
pub(crate) sum_other_doc_count: u64,
|
pub(crate) sum_other_doc_count: u64,
|
||||||
pub(crate) doc_count_error_upper_bound: u64,
|
pub(crate) doc_count_error_upper_bound: u64,
|
||||||
}
|
}
|
||||||
@@ -475,7 +449,6 @@ impl IntermediateTermBucketResult {
|
|||||||
self,
|
self,
|
||||||
req: &TermsAggregation,
|
req: &TermsAggregation,
|
||||||
sub_aggregation_req: &AggregationsInternal,
|
sub_aggregation_req: &AggregationsInternal,
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<BucketResult> {
|
) -> crate::Result<BucketResult> {
|
||||||
let req = TermsAggregationInternal::from_req(req);
|
let req = TermsAggregationInternal::from_req(req);
|
||||||
let mut buckets: Vec<BucketEntry> = self
|
let mut buckets: Vec<BucketEntry> = self
|
||||||
@@ -485,11 +458,11 @@ impl IntermediateTermBucketResult {
|
|||||||
.map(|(key, entry)| {
|
.map(|(key, entry)| {
|
||||||
Ok(BucketEntry {
|
Ok(BucketEntry {
|
||||||
key_as_string: None,
|
key_as_string: None,
|
||||||
key: Key::Str(key),
|
key,
|
||||||
doc_count: entry.doc_count,
|
doc_count: entry.doc_count,
|
||||||
sub_aggregation: entry
|
sub_aggregation: entry
|
||||||
.sub_aggregation
|
.sub_aggregation
|
||||||
.into_final_bucket_result_internal(sub_aggregation_req, schema)?,
|
.into_final_bucket_result_internal(sub_aggregation_req)?,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect::<crate::Result<_>>()?;
|
.collect::<crate::Result<_>>()?;
|
||||||
@@ -498,7 +471,7 @@ impl IntermediateTermBucketResult {
|
|||||||
match req.order.target {
|
match req.order.target {
|
||||||
OrderTarget::Key => {
|
OrderTarget::Key => {
|
||||||
buckets.sort_by(|left, right| {
|
buckets.sort_by(|left, right| {
|
||||||
if req.order.order == Order::Desc {
|
if req.order.order == Order::Asc {
|
||||||
left.key.partial_cmp(&right.key)
|
left.key.partial_cmp(&right.key)
|
||||||
} else {
|
} else {
|
||||||
right.key.partial_cmp(&left.key)
|
right.key.partial_cmp(&left.key)
|
||||||
@@ -563,7 +536,7 @@ trait MergeFruits {
|
|||||||
fn merge_fruits(&mut self, other: Self);
|
fn merge_fruits(&mut self, other: Self);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_maps<V: MergeFruits + Clone>(
|
fn merge_serialized_key_maps<V: MergeFruits + Clone>(
|
||||||
entries_left: &mut FxHashMap<SerializedKey, V>,
|
entries_left: &mut FxHashMap<SerializedKey, V>,
|
||||||
mut entries_right: FxHashMap<SerializedKey, V>,
|
mut entries_right: FxHashMap<SerializedKey, V>,
|
||||||
) {
|
) {
|
||||||
@@ -578,6 +551,21 @@ fn merge_maps<V: MergeFruits + Clone>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn merge_key_maps<V: MergeFruits + Clone>(
|
||||||
|
entries_left: &mut FxHashMap<Key, V>,
|
||||||
|
mut entries_right: FxHashMap<Key, V>,
|
||||||
|
) {
|
||||||
|
for (name, entry_left) in entries_left.iter_mut() {
|
||||||
|
if let Some(entry_right) = entries_right.remove(name) {
|
||||||
|
entry_left.merge_fruits(entry_right);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, res) in entries_right.into_iter() {
|
||||||
|
entries_left.entry(key).or_insert(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||||
/// sub_aggregations.
|
/// sub_aggregations.
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
@@ -594,7 +582,6 @@ impl IntermediateHistogramBucketEntry {
|
|||||||
pub(crate) fn into_final_bucket_entry(
|
pub(crate) fn into_final_bucket_entry(
|
||||||
self,
|
self,
|
||||||
req: &AggregationsInternal,
|
req: &AggregationsInternal,
|
||||||
schema: &Schema,
|
|
||||||
) -> crate::Result<BucketEntry> {
|
) -> crate::Result<BucketEntry> {
|
||||||
Ok(BucketEntry {
|
Ok(BucketEntry {
|
||||||
key_as_string: None,
|
key_as_string: None,
|
||||||
@@ -602,7 +589,7 @@ impl IntermediateHistogramBucketEntry {
|
|||||||
doc_count: self.doc_count,
|
doc_count: self.doc_count,
|
||||||
sub_aggregation: self
|
sub_aggregation: self
|
||||||
.sub_aggregation
|
.sub_aggregation
|
||||||
.into_final_bucket_result_internal(req, schema)?,
|
.into_final_bucket_result_internal(req)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -639,15 +626,15 @@ impl IntermediateRangeBucketEntry {
|
|||||||
pub(crate) fn into_final_bucket_entry(
|
pub(crate) fn into_final_bucket_entry(
|
||||||
self,
|
self,
|
||||||
req: &AggregationsInternal,
|
req: &AggregationsInternal,
|
||||||
schema: &Schema,
|
_range_req: &RangeAggregation,
|
||||||
range_req: &RangeAggregation,
|
column_type: Option<ColumnType>,
|
||||||
) -> crate::Result<RangeBucketEntry> {
|
) -> crate::Result<RangeBucketEntry> {
|
||||||
let mut range_bucket_entry = RangeBucketEntry {
|
let mut range_bucket_entry = RangeBucketEntry {
|
||||||
key: self.key,
|
key: self.key,
|
||||||
doc_count: self.doc_count,
|
doc_count: self.doc_count,
|
||||||
sub_aggregation: self
|
sub_aggregation: self
|
||||||
.sub_aggregation
|
.sub_aggregation
|
||||||
.into_final_bucket_result_internal(req, schema)?,
|
.into_final_bucket_result_internal(req)?,
|
||||||
to: self.to,
|
to: self.to,
|
||||||
from: self.from,
|
from: self.from,
|
||||||
to_as_string: None,
|
to_as_string: None,
|
||||||
@@ -656,8 +643,7 @@ impl IntermediateRangeBucketEntry {
|
|||||||
|
|
||||||
// If we have a date type on the histogram buckets, we add the `key_as_string` field as
|
// If we have a date type on the histogram buckets, we add the `key_as_string` field as
|
||||||
// rfc339
|
// rfc339
|
||||||
let field = schema.get_field(&range_req.field)?;
|
if column_type == Some(ColumnType::DateTime) {
|
||||||
if schema.get_field_entry(field).field_type().is_date() {
|
|
||||||
if let Some(val) = range_bucket_entry.to {
|
if let Some(val) = range_bucket_entry.to {
|
||||||
let key_as_string = format_date(val as i64)?;
|
let key_as_string = format_date(val as i64)?;
|
||||||
range_bucket_entry.to_as_string = Some(key_as_string);
|
range_bucket_entry.to_as_string = Some(key_as_string);
|
||||||
@@ -728,7 +714,10 @@ mod tests {
|
|||||||
}
|
}
|
||||||
map.insert(
|
map.insert(
|
||||||
"my_agg_level2".to_string(),
|
"my_agg_level2".to_string(),
|
||||||
IntermediateBucketResult::Range(IntermediateRangeBucketResult { buckets }),
|
IntermediateBucketResult::Range(IntermediateRangeBucketResult {
|
||||||
|
buckets,
|
||||||
|
column_type: None,
|
||||||
|
}),
|
||||||
);
|
);
|
||||||
IntermediateAggregationResults {
|
IntermediateAggregationResults {
|
||||||
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
|
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
|
||||||
@@ -758,7 +747,10 @@ mod tests {
|
|||||||
}
|
}
|
||||||
map.insert(
|
map.insert(
|
||||||
"my_agg_level1".to_string(),
|
"my_agg_level1".to_string(),
|
||||||
IntermediateBucketResult::Range(IntermediateRangeBucketResult { buckets }),
|
IntermediateBucketResult::Range(IntermediateRangeBucketResult {
|
||||||
|
buckets,
|
||||||
|
column_type: None,
|
||||||
|
}),
|
||||||
);
|
);
|
||||||
IntermediateAggregationResults {
|
IntermediateAggregationResults {
|
||||||
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
|
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
|
||||||
|
|||||||
@@ -81,7 +81,7 @@ mod tests {
|
|||||||
"price_sum": { "sum": { "field": "price" } }
|
"price_sum": { "sum": { "field": "price" } }
|
||||||
}"#;
|
}"#;
|
||||||
let aggregations: Aggregations = serde_json::from_str(aggregations_json).unwrap();
|
let aggregations: Aggregations = serde_json::from_str(aggregations_json).unwrap();
|
||||||
let collector = AggregationCollector::from_aggs(aggregations, None, index.schema());
|
let collector = AggregationCollector::from_aggs(aggregations, None);
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let aggregations_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
let aggregations_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use columnar::Column;
|
use columnar::{Cardinality, Column, ColumnType};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -8,7 +8,6 @@ use crate::aggregation::intermediate_agg_result::{
|
|||||||
};
|
};
|
||||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||||
use crate::aggregation::{f64_from_fastfield_u64, VecWithNames};
|
use crate::aggregation::{f64_from_fastfield_u64, VecWithNames};
|
||||||
use crate::schema::Type;
|
|
||||||
use crate::{DocId, TantivyError};
|
use crate::{DocId, TantivyError};
|
||||||
|
|
||||||
/// A multi-value metric aggregation that computes a collection of statistics on numeric values that
|
/// A multi-value metric aggregation that computes a collection of statistics on numeric values that
|
||||||
@@ -153,26 +152,40 @@ pub(crate) enum SegmentStatsType {
|
|||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub(crate) struct SegmentStatsCollector {
|
pub(crate) struct SegmentStatsCollector {
|
||||||
field_type: Type,
|
field_type: ColumnType,
|
||||||
pub(crate) collecting_for: SegmentStatsType,
|
pub(crate) collecting_for: SegmentStatsType,
|
||||||
pub(crate) stats: IntermediateStats,
|
pub(crate) stats: IntermediateStats,
|
||||||
|
pub(crate) accessor_idx: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentStatsCollector {
|
impl SegmentStatsCollector {
|
||||||
pub fn from_req(field_type: Type, collecting_for: SegmentStatsType) -> Self {
|
pub fn from_req(
|
||||||
|
field_type: ColumnType,
|
||||||
|
collecting_for: SegmentStatsType,
|
||||||
|
accessor_idx: usize,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
field_type,
|
field_type,
|
||||||
collecting_for,
|
collecting_for,
|
||||||
stats: IntermediateStats::default(),
|
stats: IntermediateStats::default(),
|
||||||
|
accessor_idx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn collect_block(&mut self, docs: &[DocId], field: &Column<u64>) {
|
#[inline]
|
||||||
// TODO special case for Required, Optional column type
|
pub(crate) fn collect_block_with_field(&mut self, docs: &[DocId], field: &Column<u64>) {
|
||||||
for doc in docs {
|
if field.get_cardinality() == Cardinality::Full {
|
||||||
for val in field.values(*doc) {
|
for doc in docs {
|
||||||
|
let val = field.values.get_val(*doc);
|
||||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||||
self.stats.collect(val1);
|
self.stats.collect(val1);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
for doc in docs {
|
||||||
|
for val in field.values_for_doc(*doc) {
|
||||||
|
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||||
|
self.stats.collect(val1);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -182,7 +195,7 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
|
|||||||
self: Box<Self>,
|
self: Box<Self>,
|
||||||
agg_with_accessor: &AggregationsWithAccessor,
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
) -> crate::Result<IntermediateAggregationResults> {
|
) -> crate::Result<IntermediateAggregationResults> {
|
||||||
let name = agg_with_accessor.metrics.keys[0].to_string();
|
let name = agg_with_accessor.metrics.keys[self.accessor_idx].to_string();
|
||||||
|
|
||||||
let intermediate_metric_result = match self.collecting_for {
|
let intermediate_metric_result = match self.collecting_for {
|
||||||
SegmentStatsType::Average => {
|
SegmentStatsType::Average => {
|
||||||
@@ -219,8 +232,9 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
|
|||||||
doc: crate::DocId,
|
doc: crate::DocId,
|
||||||
agg_with_accessor: &AggregationsWithAccessor,
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let accessor = &agg_with_accessor.metrics.values[0].accessor;
|
let field = &agg_with_accessor.metrics.values[self.accessor_idx].accessor;
|
||||||
for val in accessor.values(doc) {
|
|
||||||
|
for val in field.values_for_doc(doc) {
|
||||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||||
self.stats.collect(val1);
|
self.stats.collect(val1);
|
||||||
}
|
}
|
||||||
@@ -228,11 +242,14 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn flush_staged_docs(
|
#[inline]
|
||||||
|
fn collect_block(
|
||||||
&mut self,
|
&mut self,
|
||||||
_agg_with_accessor: &AggregationsWithAccessor,
|
docs: &[crate::DocId],
|
||||||
_force_flush: bool,
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
|
let field = &agg_with_accessor.metrics.values[self.accessor_idx].accessor;
|
||||||
|
self.collect_block_with_field(docs, field);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -272,7 +289,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||||
|
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
@@ -293,6 +310,43 @@ mod tests {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_aggregation_stats_simple() -> crate::Result<()> {
|
||||||
|
// test index without segments
|
||||||
|
let values = vec![10.0];
|
||||||
|
|
||||||
|
let index = get_test_index_from_values(false, &values)?;
|
||||||
|
|
||||||
|
let agg_req_1: Aggregations = vec![(
|
||||||
|
"stats".to_string(),
|
||||||
|
Aggregation::Metric(MetricAggregation::Stats(StatsAggregation::from_field_name(
|
||||||
|
"score".to_string(),
|
||||||
|
))),
|
||||||
|
)]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||||
|
|
||||||
|
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||||
|
assert_eq!(
|
||||||
|
res["stats"],
|
||||||
|
json!({
|
||||||
|
"avg": 10.0,
|
||||||
|
"count": 1,
|
||||||
|
"max": 10.0,
|
||||||
|
"min": 10.0,
|
||||||
|
"sum": 10.0
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_aggregation_stats() -> crate::Result<()> {
|
fn test_aggregation_stats() -> crate::Result<()> {
|
||||||
let index = get_test_index_2_segments(false)?;
|
let index = get_test_index_2_segments(false)?;
|
||||||
@@ -349,7 +403,7 @@ mod tests {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user