mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-31 14:32:54 +00:00
Compare commits
27 Commits
columnar-m
...
missing-sp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7b06db062b | ||
|
|
097fd6138d | ||
|
|
01e5a22759 | ||
|
|
b60b7d2afe | ||
|
|
dfe4e95fde | ||
|
|
60cc2644d6 | ||
|
|
10bccac61b | ||
|
|
1cfb9ce59a | ||
|
|
539ff08a79 | ||
|
|
dab93df94e | ||
|
|
3120147a76 | ||
|
|
cbcafae04c | ||
|
|
36c6138e7f | ||
|
|
7a9befd18d | ||
|
|
62c811df2b | ||
|
|
03345f0aa2 | ||
|
|
b7bfa20e38 | ||
|
|
db8583db75 | ||
|
|
1390834ae8 | ||
|
|
3ac973bea4 | ||
|
|
405e2cf4d9 | ||
|
|
b63c6c27bc | ||
|
|
bd5eea9852 | ||
|
|
0f20787917 | ||
|
|
2874554ee4 | ||
|
|
cbc70a9eae | ||
|
|
226d0f88bc |
6
.github/workflows/coverage.yml
vendored
6
.github/workflows/coverage.yml
vendored
@@ -2,9 +2,9 @@ name: Coverage
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
@@ -16,7 +16,7 @@ jobs:
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo +nightly llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
||||
run: cargo +nightly llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
continue-on-error: true
|
||||
|
||||
@@ -23,7 +23,7 @@ regex = { version = "1.5.5", default-features = false, features = ["std", "unico
|
||||
aho-corasick = "0.7"
|
||||
tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.5.3", optional = true }
|
||||
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
||||
lz4_flex = { version = "0.10", default-features = false, features = ["checked-decode"], optional = true }
|
||||
brotli = { version = "3.3.4", optional = true }
|
||||
zstd = { version = "0.12", optional = true, default-features = false }
|
||||
snap = { version = "1.0.5", optional = true }
|
||||
@@ -55,12 +55,12 @@ measure_time = "0.8.2"
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
columnar = { version="0.1", path="./columnar", package ="tantivy-columnar" }
|
||||
sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true }
|
||||
stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" }
|
||||
tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" }
|
||||
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
|
||||
columnar = { version= "0.1", path="./columnar", package="tantivy-columnar" }
|
||||
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
|
||||
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version="0.1", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# This script takes care of packaging the build artifacts that will go in the
|
||||
# release zipfile
|
||||
|
||||
$SRC_DIR = $PWD.Path
|
||||
$STAGE = [System.Guid]::NewGuid().ToString()
|
||||
|
||||
Set-Location $ENV:Temp
|
||||
New-Item -Type Directory -Name $STAGE
|
||||
Set-Location $STAGE
|
||||
|
||||
$ZIP = "$SRC_DIR\$($Env:CRATE_NAME)-$($Env:APPVEYOR_REPO_TAG_NAME)-$($Env:TARGET).zip"
|
||||
|
||||
# TODO Update this to package the right artifacts
|
||||
Copy-Item "$SRC_DIR\target\$($Env:TARGET)\release\hello.exe" '.\'
|
||||
|
||||
7z a "$ZIP" *
|
||||
|
||||
Push-AppveyorArtifact "$ZIP"
|
||||
|
||||
Remove-Item *.* -Force
|
||||
Set-Location ..
|
||||
Remove-Item $STAGE
|
||||
Set-Location $SRC_DIR
|
||||
@@ -1,33 +0,0 @@
|
||||
# This script takes care of building your crate and packaging it for release
|
||||
|
||||
set -ex
|
||||
|
||||
main() {
|
||||
local src=$(pwd) \
|
||||
stage=
|
||||
|
||||
case $TRAVIS_OS_NAME in
|
||||
linux)
|
||||
stage=$(mktemp -d)
|
||||
;;
|
||||
osx)
|
||||
stage=$(mktemp -d -t tmp)
|
||||
;;
|
||||
esac
|
||||
|
||||
test -f Cargo.lock || cargo generate-lockfile
|
||||
|
||||
# TODO Update this to build the artifacts that matter to you
|
||||
cross rustc --bin hello --target $TARGET --release -- -C lto
|
||||
|
||||
# TODO Update this to package the right artifacts
|
||||
cp target/$TARGET/release/hello $stage/
|
||||
|
||||
cd $stage
|
||||
tar czf $src/$CRATE_NAME-$TRAVIS_TAG-$TARGET.tar.gz *
|
||||
cd $src
|
||||
|
||||
rm -rf $stage
|
||||
}
|
||||
|
||||
main
|
||||
@@ -1,47 +0,0 @@
|
||||
set -ex
|
||||
|
||||
main() {
|
||||
local target=
|
||||
if [ $TRAVIS_OS_NAME = linux ]; then
|
||||
target=x86_64-unknown-linux-musl
|
||||
sort=sort
|
||||
else
|
||||
target=x86_64-apple-darwin
|
||||
sort=gsort # for `sort --sort-version`, from brew's coreutils.
|
||||
fi
|
||||
|
||||
# Builds for iOS are done on OSX, but require the specific target to be
|
||||
# installed.
|
||||
case $TARGET in
|
||||
aarch64-apple-ios)
|
||||
rustup target install aarch64-apple-ios
|
||||
;;
|
||||
armv7-apple-ios)
|
||||
rustup target install armv7-apple-ios
|
||||
;;
|
||||
armv7s-apple-ios)
|
||||
rustup target install armv7s-apple-ios
|
||||
;;
|
||||
i386-apple-ios)
|
||||
rustup target install i386-apple-ios
|
||||
;;
|
||||
x86_64-apple-ios)
|
||||
rustup target install x86_64-apple-ios
|
||||
;;
|
||||
esac
|
||||
|
||||
# This fetches latest stable release
|
||||
local tag=$(git ls-remote --tags --refs --exit-code https://github.com/japaric/cross \
|
||||
| cut -d/ -f3 \
|
||||
| grep -E '^v[0.1.0-9.]+$' \
|
||||
| $sort --version-sort \
|
||||
| tail -n1)
|
||||
curl -LSfs https://japaric.github.io/trust/install.sh | \
|
||||
sh -s -- \
|
||||
--force \
|
||||
--git japaric/cross \
|
||||
--tag $tag \
|
||||
--target $target
|
||||
}
|
||||
|
||||
main
|
||||
30
ci/script.sh
30
ci/script.sh
@@ -1,30 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script takes care of testing your crate
|
||||
|
||||
set -ex
|
||||
|
||||
main() {
|
||||
if [ ! -z $CODECOV ]; then
|
||||
echo "Codecov"
|
||||
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov
|
||||
else
|
||||
echo "Build"
|
||||
cross build --target $TARGET
|
||||
if [ ! -z $DISABLE_TESTS ]; then
|
||||
return
|
||||
fi
|
||||
echo "Test"
|
||||
cross test --target $TARGET --no-default-features --features mmap
|
||||
cross test --target $TARGET --no-default-features --features mmap query-grammar
|
||||
fi
|
||||
for example in $(ls examples/*.rs)
|
||||
do
|
||||
cargo run --example $(basename $example .rs)
|
||||
done
|
||||
}
|
||||
|
||||
# we don't run the "test phase" when doing deploys
|
||||
if [ -z $TRAVIS_TAG ]; then
|
||||
main
|
||||
fi
|
||||
@@ -5,24 +5,23 @@ edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.10.5"
|
||||
log = "0.4.17"
|
||||
fnv = "1.0.7"
|
||||
fastdivide = "0.4.0"
|
||||
rand = { version = "0.8.5", optional = true }
|
||||
measure_time = { version = "0.8.2", optional = true }
|
||||
prettytable-rs = { version = "0.10.0", optional = true }
|
||||
|
||||
stacker = { path = "../stacker", package="tantivy-stacker"}
|
||||
serde_json = "1"
|
||||
thiserror = "1"
|
||||
fnv = "1"
|
||||
sstable = { path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { path = "../common", package = "tantivy-common" }
|
||||
itertools = "0.10"
|
||||
log = "0.4"
|
||||
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
|
||||
prettytable-rs = {version="0.10.0", optional= true}
|
||||
rand = {version="0.8.3", optional= true}
|
||||
fastdivide = "0.4"
|
||||
measure_time = { version="0.8.2", optional=true}
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
more-asserts = "0.3.0"
|
||||
rand = "0.8.3"
|
||||
more-asserts = "0.3.1"
|
||||
rand = "0.8.5"
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
test:
|
||||
echo "Run test only... No examples."
|
||||
cargo test --tests --lib
|
||||
|
||||
fmt:
|
||||
cargo +nightly fmt --all
|
||||
@@ -28,12 +28,7 @@ fn get_u128_column_random() -> Arc<dyn ColumnValues<u128>> {
|
||||
|
||||
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn ColumnValues<u128>> {
|
||||
let mut out = vec![];
|
||||
tantivy_columnar::column_values::serialize_column_values_u128(
|
||||
&(|| data.iter().copied()),
|
||||
data.len() as u32,
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
tantivy_columnar::column_values::serialize_column_values_u128(&data, &mut out).unwrap();
|
||||
let out = OwnedBytes::new(out);
|
||||
tantivy_columnar::column_values::open_u128_mapped::<u128>(out).unwrap()
|
||||
}
|
||||
@@ -41,7 +36,7 @@ fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn ColumnValues<u128>> {
|
||||
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
|
||||
const SINGLE_ITEM: u64 = 90;
|
||||
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
|
||||
const ONE_PERCENT_ITEM_RANGE: RangeInclusive<u64> = 49..=49;
|
||||
|
||||
fn get_data_50percent_item() -> Vec<u128> {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
|
||||
@@ -63,7 +58,7 @@ fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
column.get_row_ids_for_value_range(
|
||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
@@ -79,7 +74,7 @@ fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
column.get_row_ids_for_value_range(
|
||||
*SINGLE_ITEM_RANGE.start() as u128..=*SINGLE_ITEM_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
@@ -95,7 +90,7 @@ fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||
column.get_row_ids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
@@ -5,9 +5,7 @@ use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::column_values::{
|
||||
serialize_and_load_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
|
||||
};
|
||||
use tantivy_columnar::column_values::{serialize_and_load_u64_based_column_values, CodecType};
|
||||
use tantivy_columnar::*;
|
||||
use test::Bencher;
|
||||
|
||||
@@ -91,7 +89,7 @@ fn bench_intfastfield_getrange_u64_50percent_hit(b: &mut Bencher) {
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
column.get_row_ids_for_value_range(
|
||||
FIFTY_PERCENT_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
@@ -108,7 +106,7 @@ fn bench_intfastfield_getrange_u64_1percent_hit(b: &mut Bencher) {
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
column.get_row_ids_for_value_range(
|
||||
ONE_PERCENT_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
@@ -125,7 +123,7 @@ fn bench_intfastfield_getrange_u64_single_hit(b: &mut Bencher) {
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(SINGLE_ITEM_RANGE, 0..data.len() as u32, &mut positions);
|
||||
column.get_row_ids_for_value_range(SINGLE_ITEM_RANGE, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
@@ -138,7 +136,7 @@ fn bench_intfastfield_getrange_u64_hit_all(b: &mut Bencher) {
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
||||
column.get_row_ids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
@@ -180,7 +178,7 @@ fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0u32..n as u32 {
|
||||
a += column.get_val(i);
|
||||
a += column_ref.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
|
||||
17
columnar/columnar-cli/Cargo.toml
Normal file
17
columnar/columnar-cli/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "tantivy-columnar-cli"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
columnar = {path="../", package="tantivy-columnar"}
|
||||
serde_json = "1"
|
||||
serde_json_borrow = {git="https://github.com/PSeitz/serde_json_borrow/"}
|
||||
serde = "1"
|
||||
|
||||
[workspace]
|
||||
members = []
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
134
columnar/columnar-cli/src/main.rs
Normal file
134
columnar/columnar-cli/src/main.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
use columnar::ColumnarWriter;
|
||||
use columnar::NumericalValue;
|
||||
use serde_json_borrow;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::BufRead;
|
||||
use std::io::BufReader;
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Default)]
|
||||
struct JsonStack {
|
||||
path: String,
|
||||
stack: Vec<usize>,
|
||||
}
|
||||
|
||||
impl JsonStack {
|
||||
fn push(&mut self, seg: &str) {
|
||||
let len = self.path.len();
|
||||
self.stack.push(len);
|
||||
self.path.push('.');
|
||||
self.path.push_str(seg);
|
||||
}
|
||||
|
||||
fn pop(&mut self) {
|
||||
if let Some(len) = self.stack.pop() {
|
||||
self.path.truncate(len);
|
||||
}
|
||||
}
|
||||
|
||||
fn path(&self) -> &str {
|
||||
&self.path[1..]
|
||||
}
|
||||
}
|
||||
|
||||
fn append_json_to_columnar(
|
||||
doc: u32,
|
||||
json_value: &serde_json_borrow::Value,
|
||||
columnar: &mut ColumnarWriter,
|
||||
stack: &mut JsonStack,
|
||||
) -> usize {
|
||||
let mut count = 0;
|
||||
match json_value {
|
||||
serde_json_borrow::Value::Null => {}
|
||||
serde_json_borrow::Value::Bool(val) => {
|
||||
columnar.record_numerical(
|
||||
doc,
|
||||
stack.path(),
|
||||
NumericalValue::from(if *val { 1u64 } else { 0u64 }),
|
||||
);
|
||||
count += 1;
|
||||
}
|
||||
serde_json_borrow::Value::Number(num) => {
|
||||
let numerical_value: NumericalValue = if let Some(num_i64) = num.as_i64() {
|
||||
num_i64.into()
|
||||
} else if let Some(num_u64) = num.as_u64() {
|
||||
num_u64.into()
|
||||
} else if let Some(num_f64) = num.as_f64() {
|
||||
num_f64.into()
|
||||
} else {
|
||||
panic!();
|
||||
};
|
||||
count += 1;
|
||||
columnar.record_numerical(
|
||||
doc,
|
||||
stack.path(),
|
||||
numerical_value,
|
||||
);
|
||||
}
|
||||
serde_json_borrow::Value::Str(msg) => {
|
||||
columnar.record_str(
|
||||
doc,
|
||||
stack.path(),
|
||||
msg,
|
||||
);
|
||||
count += 1;
|
||||
},
|
||||
serde_json_borrow::Value::Array(vals) => {
|
||||
for val in vals {
|
||||
count += append_json_to_columnar(doc, val, columnar, stack);
|
||||
}
|
||||
},
|
||||
serde_json_borrow::Value::Object(json_map) => {
|
||||
for (child_key, child_val) in json_map {
|
||||
stack.push(child_key);
|
||||
count += append_json_to_columnar(doc, child_val, columnar, stack);
|
||||
stack.pop();
|
||||
}
|
||||
},
|
||||
}
|
||||
count
|
||||
}
|
||||
|
||||
fn main() -> io::Result<()> {
|
||||
let file = File::open("gh_small.json")?;
|
||||
let mut reader = BufReader::new(file);
|
||||
let mut line = String::with_capacity(100);
|
||||
let mut columnar = columnar::ColumnarWriter::default();
|
||||
let mut doc = 0;
|
||||
let start = Instant::now();
|
||||
let mut stack = JsonStack::default();
|
||||
let mut total_count = 0;
|
||||
|
||||
let start_build = Instant::now();
|
||||
loop {
|
||||
line.clear();
|
||||
let len = reader.read_line(&mut line)?;
|
||||
if len == 0 {
|
||||
break;
|
||||
}
|
||||
let Ok(json_value) = serde_json::from_str::<serde_json_borrow::Value>(&line) else { continue; };
|
||||
total_count += append_json_to_columnar(doc, &json_value, &mut columnar, &mut stack);
|
||||
doc += 1;
|
||||
}
|
||||
println!("Build in {:?}", start_build.elapsed());
|
||||
|
||||
println!("value count {total_count}");
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let start_serialize = Instant::now();
|
||||
columnar.serialize(doc, None, &mut buffer)?;
|
||||
println!("Serialized in {:?}", start_serialize.elapsed());
|
||||
println!("num docs: {doc}, {:?}", start.elapsed());
|
||||
println!("buffer len {} MB", buffer.len() / 1_000_000);
|
||||
let columnar = columnar::ColumnarReader::open(buffer)?;
|
||||
for (column_name, dynamic_column) in columnar.list_columns()? {
|
||||
let num_bytes = dynamic_column.num_bytes();
|
||||
let typ = dynamic_column.column_type();
|
||||
if num_bytes > 1_000_000 {
|
||||
println!("{column_name} {typ:?} {} KB", num_bytes / 1_000);
|
||||
}
|
||||
}
|
||||
println!("{} columns", columnar.num_columns());
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,14 +1,17 @@
|
||||
# zero to one
|
||||
* merges with non trivial mapping (deletes / sort)
|
||||
* emission of the sort mapping.
|
||||
+ muttivaued range queries restrat frm the beginning all of the time.
|
||||
|
||||
* revisit line codec
|
||||
* removal of all rows of a column in the schema due to deletes
|
||||
* add columns from schema on merge
|
||||
* Plugging JSON
|
||||
replug examples
|
||||
|
||||
* replug examples
|
||||
* move datetime to quickwit common
|
||||
* switch to nanos
|
||||
* reintroduce the gcd map.
|
||||
|
||||
# Perf and Size
|
||||
* remove alloc in `ord_to_term`
|
||||
+ multivaued range queries restrat frm the beginning all of the time.
|
||||
* re-add ZSTD compression for dictionaries
|
||||
no systematic monotonic mapping
|
||||
consider removing multilinear
|
||||
|
||||
@@ -32,7 +32,7 @@ impl BytesColumn {
|
||||
|
||||
/// Returns the number of rows in the column.
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.term_ord_column.num_rows()
|
||||
self.term_ord_column.num_docs()
|
||||
}
|
||||
|
||||
pub fn term_ords(&self, row_id: RowId) -> impl Iterator<Item = u64> + '_ {
|
||||
@@ -56,12 +56,6 @@ impl BytesColumn {
|
||||
#[derive(Clone)]
|
||||
pub struct StrColumn(BytesColumn);
|
||||
|
||||
impl From<BytesColumn> for StrColumn {
|
||||
fn from(bytes_col: BytesColumn) -> Self {
|
||||
StrColumn(bytes_col)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StrColumn> for BytesColumn {
|
||||
fn from(str_column: StrColumn) -> BytesColumn {
|
||||
str_column.0
|
||||
@@ -69,6 +63,10 @@ impl From<StrColumn> for BytesColumn {
|
||||
}
|
||||
|
||||
impl StrColumn {
|
||||
pub(crate) fn wrap(bytes_column: BytesColumn) -> StrColumn {
|
||||
StrColumn(bytes_column)
|
||||
}
|
||||
|
||||
pub fn dictionary(&self) -> &Dictionary<VoidSSTable> {
|
||||
self.0.dictionary.as_ref()
|
||||
}
|
||||
|
||||
@@ -3,14 +3,14 @@ mod serialize;
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::io::Write;
|
||||
use std::ops::Deref;
|
||||
use std::ops::{Deref, Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
pub use dictionary_encoded::{BytesColumn, StrColumn};
|
||||
pub use serialize::{
|
||||
open_column_bytes, open_column_u128, open_column_u64, serialize_column_mappable_to_u128,
|
||||
serialize_column_mappable_to_u64,
|
||||
open_column_bytes, open_column_str, open_column_u128, open_column_u64,
|
||||
serialize_column_mappable_to_u128, serialize_column_mappable_to_u64,
|
||||
};
|
||||
|
||||
use crate::column_index::ColumnIndex;
|
||||
@@ -41,14 +41,15 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
self.idx.get_cardinality()
|
||||
}
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
|
||||
pub fn num_docs(&self) -> RowId {
|
||||
match &self.idx {
|
||||
ColumnIndex::Full => self.values.num_vals() as u32,
|
||||
ColumnIndex::Optional(optional_index) => optional_index.num_rows(),
|
||||
ColumnIndex::Optional(optional_index) => optional_index.num_docs(),
|
||||
ColumnIndex::Multivalued(col_index) => {
|
||||
// The multivalued index contains all value start row_id,
|
||||
// and one extra value at the end with the overall number of rows.
|
||||
col_index.num_rows()
|
||||
col_index.num_docs()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -70,6 +71,34 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
|
||||
}
|
||||
|
||||
/// Get the docids of values which are in the provided value range.
|
||||
#[inline]
|
||||
pub fn get_docids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
selected_docid_range: Range<u32>,
|
||||
docids: &mut Vec<u32>,
|
||||
) {
|
||||
// convert passed docid range to row id range
|
||||
let rowid_range = self.idx.docid_range_to_rowids(selected_docid_range.clone());
|
||||
|
||||
// Load rows
|
||||
self.values
|
||||
.get_row_ids_for_value_range(value_range, rowid_range, docids);
|
||||
// Convert rows to docids
|
||||
self.idx
|
||||
.select_batch_in_place(docids, selected_docid_range.start);
|
||||
}
|
||||
|
||||
/// Fils the output vector with the (possibly multiple values that are associated_with
|
||||
/// `row_id`.
|
||||
///
|
||||
/// This method clears the `output` vector.
|
||||
pub fn fill_vals(&self, row_id: RowId, output: &mut Vec<T>) {
|
||||
output.clear();
|
||||
output.extend(self.values(row_id));
|
||||
}
|
||||
|
||||
pub fn first_or_default_col(self, default_value: T) -> Arc<dyn ColumnValues<T>> {
|
||||
Arc::new(FirstValueWithDefault {
|
||||
column: self,
|
||||
@@ -122,8 +151,8 @@ impl<T: PartialOrd + Debug + Send + Sync + Copy + 'static> ColumnValues<T>
|
||||
fn num_vals(&self) -> u32 {
|
||||
match &self.column.idx {
|
||||
ColumnIndex::Full => self.column.values.num_vals(),
|
||||
ColumnIndex::Optional(optional_idx) => optional_idx.num_rows(),
|
||||
ColumnIndex::Multivalued(_) => todo!(),
|
||||
ColumnIndex::Optional(optional_idx) => optional_idx.num_docs(),
|
||||
ColumnIndex::Multivalued(multivalue_idx) => multivalue_idx.num_docs(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::fmt::Debug;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
@@ -12,20 +11,20 @@ use crate::column_values::serialize::serialize_column_values_u128;
|
||||
use crate::column_values::u64_based::{serialize_u64_based_column_values, CodecType};
|
||||
use crate::column_values::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::StrColumn;
|
||||
|
||||
pub fn serialize_column_mappable_to_u128<T: MonotonicallyMappableToU128>(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
iterable: &dyn Iterable<T>,
|
||||
num_vals: u32,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
let column_index_num_bytes = serialize_column_index(column_index, output)?;
|
||||
serialize_column_values_u128(iterable, num_vals, output)?;
|
||||
serialize_column_values_u128(iterable, output)?;
|
||||
output.write_all(&column_index_num_bytes.to_le_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64 + Debug>(
|
||||
pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64>(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
column_values: &impl Iterable<T>,
|
||||
output: &mut impl Write,
|
||||
@@ -77,15 +76,19 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_bytes<T: From<BytesColumn>>(data: OwnedBytes) -> io::Result<T> {
|
||||
pub fn open_column_bytes(data: OwnedBytes) -> io::Result<BytesColumn> {
|
||||
let (body, dictionary_len_bytes) = data.rsplit(4);
|
||||
let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap());
|
||||
let (dictionary_bytes, column_bytes) = body.split(dictionary_len as usize);
|
||||
let dictionary = Arc::new(Dictionary::from_bytes(dictionary_bytes)?);
|
||||
let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes)?;
|
||||
let bytes_column = BytesColumn {
|
||||
Ok(BytesColumn {
|
||||
dictionary,
|
||||
term_ord_column,
|
||||
};
|
||||
Ok(bytes_column.into())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_str(data: OwnedBytes) -> io::Result<StrColumn> {
|
||||
let bytes_column = open_column_bytes(data)?;
|
||||
Ok(StrColumn::wrap(bytes_column))
|
||||
}
|
||||
|
||||
136
columnar/src/column_index/merge/mod.rs
Normal file
136
columnar/src/column_index/merge/mod.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
mod shuffled;
|
||||
mod stacked;
|
||||
|
||||
use shuffled::merge_column_index_shuffled;
|
||||
use stacked::merge_column_index_stacked;
|
||||
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::{Cardinality, ColumnIndex, MergeRowOrder};
|
||||
|
||||
// For simplification, we never have cardinality go down due to deletes.
|
||||
fn detect_cardinality(columns: &[Option<ColumnIndex>]) -> Cardinality {
|
||||
columns
|
||||
.iter()
|
||||
.flatten()
|
||||
.map(ColumnIndex::get_cardinality)
|
||||
.max()
|
||||
.unwrap_or(Cardinality::Full)
|
||||
}
|
||||
|
||||
pub fn merge_column_index<'a>(
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
merge_row_order: &'a MergeRowOrder,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
// For simplification, we do not try to detect whether the cardinality could be
|
||||
// downgraded thanks to deletes.
|
||||
let cardinality_after_merge = detect_cardinality(columns);
|
||||
match merge_row_order {
|
||||
MergeRowOrder::Stack(stack_merge_order) => {
|
||||
merge_column_index_stacked(columns, cardinality_after_merge, stack_merge_order)
|
||||
}
|
||||
MergeRowOrder::Shuffled(complex_merge_order) => {
|
||||
merge_column_index_shuffled(columns, cardinality_after_merge, complex_merge_order)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO actually, the shuffled code path is a bit too general.
|
||||
// In practise, we do not really shuffle everything.
|
||||
// The merge order restricted to a specific column keeps the original row order.
|
||||
//
|
||||
// This may offer some optimization that we have not explored yet.
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::column_index::merge::detect_cardinality;
|
||||
use crate::column_index::multivalued_index::MultiValueIndex;
|
||||
use crate::column_index::{merge_column_index, OptionalIndex, SerializableColumnIndex};
|
||||
use crate::{Cardinality, ColumnIndex, MergeRowOrder, RowAddr, RowId, ShuffleMergeOrder};
|
||||
|
||||
#[test]
|
||||
fn test_detect_cardinality() {
|
||||
assert_eq!(detect_cardinality(&[]), Cardinality::Full);
|
||||
let optional_index: ColumnIndex = OptionalIndex::for_test(1, &[]).into();
|
||||
let multivalued_index: ColumnIndex = MultiValueIndex::for_test(&[0, 1]).into();
|
||||
assert_eq!(
|
||||
detect_cardinality(&[Some(optional_index.clone()), None]),
|
||||
Cardinality::Optional
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[Some(optional_index.clone()), Some(ColumnIndex::Full)]),
|
||||
Cardinality::Optional
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[Some(multivalued_index.clone()), None]),
|
||||
Cardinality::Multivalued
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[
|
||||
Some(multivalued_index.clone()),
|
||||
Some(optional_index.clone())
|
||||
]),
|
||||
Cardinality::Multivalued
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[Some(optional_index), Some(multivalued_index)]),
|
||||
Cardinality::Multivalued
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_index_multivalued_sorted() {
|
||||
let column_indexes: Vec<Option<ColumnIndex>> =
|
||||
vec![Some(MultiValueIndex::for_test(&[0, 2, 5]).into())];
|
||||
let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test(
|
||||
&[2],
|
||||
vec![
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
row_id: 1u32,
|
||||
},
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
row_id: 0u32,
|
||||
},
|
||||
],
|
||||
)
|
||||
.into();
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
|
||||
else { panic!("Excpected a multivalued index") };
|
||||
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
|
||||
assert_eq!(&start_indexes, &[0, 3, 5]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_index_multivalued_sorted_several_segment() {
|
||||
let column_indexes: Vec<Option<ColumnIndex>> = vec![
|
||||
Some(MultiValueIndex::for_test(&[0, 2, 5]).into()),
|
||||
None,
|
||||
Some(MultiValueIndex::for_test(&[0, 1, 4]).into()),
|
||||
];
|
||||
let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test(
|
||||
&[2, 0, 2],
|
||||
vec![
|
||||
RowAddr {
|
||||
segment_ord: 2u32,
|
||||
row_id: 1u32,
|
||||
},
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
row_id: 0u32,
|
||||
},
|
||||
RowAddr {
|
||||
segment_ord: 2u32,
|
||||
row_id: 0u32,
|
||||
},
|
||||
],
|
||||
)
|
||||
.into();
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
|
||||
else { panic!("Excpected a multivalued index") };
|
||||
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
|
||||
assert_eq!(&start_indexes, &[0, 3, 5, 6]);
|
||||
}
|
||||
}
|
||||
167
columnar/src/column_index/merge/shuffled.rs
Normal file
167
columnar/src/column_index/merge/shuffled.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
use std::iter;
|
||||
|
||||
use crate::column_index::{SerializableColumnIndex, Set};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, ColumnIndex, RowId, ShuffleMergeOrder};
|
||||
|
||||
pub fn merge_column_index_shuffled<'a>(
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
cardinality_after_merge: Cardinality,
|
||||
shuffle_merge_order: &'a ShuffleMergeOrder,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
match cardinality_after_merge {
|
||||
Cardinality::Full => SerializableColumnIndex::Full,
|
||||
Cardinality::Optional => {
|
||||
let non_null_row_ids =
|
||||
merge_column_index_shuffled_optional(column_indexes, shuffle_merge_order);
|
||||
SerializableColumnIndex::Optional {
|
||||
non_null_row_ids,
|
||||
num_rows: shuffle_merge_order.num_rows(),
|
||||
}
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalue_start_index =
|
||||
merge_column_index_shuffled_multivalued(column_indexes, shuffle_merge_order);
|
||||
SerializableColumnIndex::Multivalued(multivalue_start_index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge several column indexes into one, ordering rows according to the merge_order passed as
|
||||
/// argument. While it is true that the `merge_order` may imply deletes and hence could in theory a
|
||||
/// multivalued index into an optional one, this is not supported today for simplification.
|
||||
///
|
||||
/// In other words the column_indexes passed as argument may NOT be multivalued.
|
||||
fn merge_column_index_shuffled_optional<'a>(
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
) -> Box<dyn Iterable<RowId> + 'a> {
|
||||
Box::new(ShuffledOptionalIndex {
|
||||
column_indexes,
|
||||
merge_order,
|
||||
})
|
||||
}
|
||||
|
||||
struct ShuffledOptionalIndex<'a> {
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
}
|
||||
|
||||
impl<'a> Iterable<u32> for ShuffledOptionalIndex<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
Box::new(self.merge_order
|
||||
.iter_new_to_old_row_addrs()
|
||||
.enumerate()
|
||||
.filter_map(|(new_row_id, old_row_addr)| {
|
||||
let Some(column_index) = &self.column_indexes[old_row_addr.segment_ord as usize] else {
|
||||
return None;
|
||||
};
|
||||
let row_id = new_row_id as u32;
|
||||
if column_index.has_value(old_row_addr.row_id) {
|
||||
Some(row_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
fn merge_column_index_shuffled_multivalued<'a>(
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
) -> Box<dyn Iterable<RowId> + 'a> {
|
||||
Box::new(ShuffledMultivaluedIndex {
|
||||
column_indexes,
|
||||
merge_order,
|
||||
})
|
||||
}
|
||||
|
||||
struct ShuffledMultivaluedIndex<'a> {
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
}
|
||||
|
||||
fn iter_num_values<'a>(
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
) -> impl Iterator<Item = u32> + 'a {
|
||||
merge_order.iter_new_to_old_row_addrs().map(|row_addr| {
|
||||
let Some(column_index) = &column_indexes[row_addr.segment_ord as usize] else {
|
||||
// No values in the entire column. It surely means there are 0 values associated to this row.
|
||||
return 0u32;
|
||||
};
|
||||
match column_index {
|
||||
ColumnIndex::Full => 1,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
u32::from(optional_index.contains(row_addr.row_id))
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
multivalued_index.range(row_addr.row_id).len() as u32
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Transforms an iterator containing the number of vals per row (with `num_rows` elements)
|
||||
/// into a `start_offset` iterator starting at 0 and (with `num_rows + 1` element)
|
||||
fn integrate_num_vals(num_vals: impl Iterator<Item = u32>) -> impl Iterator<Item = RowId> {
|
||||
iter::once(0u32).chain(num_vals.scan(0, |state, num_vals| {
|
||||
*state += num_vals;
|
||||
Some(*state)
|
||||
}))
|
||||
}
|
||||
|
||||
impl<'a> Iterable<u32> for ShuffledMultivaluedIndex<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
let num_vals_per_row = iter_num_values(self.column_indexes, self.merge_order);
|
||||
Box::new(integrate_num_vals(num_vals_per_row))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_index::OptionalIndex;
|
||||
use crate::RowAddr;
|
||||
|
||||
#[test]
|
||||
fn test_integrate_num_vals_empty() {
|
||||
assert!(integrate_num_vals(iter::empty()).eq(iter::once(0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_integrate_num_vals_one_el() {
|
||||
assert!(integrate_num_vals(iter::once(10)).eq([0, 10].into_iter()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_integrate_num_vals_several() {
|
||||
assert!(integrate_num_vals([3, 0, 10, 20].into_iter()).eq([0, 3, 3, 13, 33].into_iter()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_column_index_optional_shuffle() {
|
||||
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
|
||||
let column_indexes = vec![Some(optional_index), Some(ColumnIndex::Full)];
|
||||
let row_addrs = vec![
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
row_id: 1u32,
|
||||
},
|
||||
RowAddr {
|
||||
segment_ord: 1u32,
|
||||
row_id: 0u32,
|
||||
},
|
||||
];
|
||||
let shuffle_merge_order = ShuffleMergeOrder::for_test(&[2, 1], row_addrs);
|
||||
let serializable_index = merge_column_index_shuffled(
|
||||
&column_indexes[..],
|
||||
Cardinality::Optional,
|
||||
&shuffle_merge_order,
|
||||
);
|
||||
let SerializableColumnIndex::Optional { non_null_row_ids, num_rows } = serializable_index else { panic!() };
|
||||
assert_eq!(num_rows, 2);
|
||||
let non_null_rows: Vec<RowId> = non_null_row_ids.boxed_iter().collect();
|
||||
assert_eq!(&non_null_rows, &[1]);
|
||||
}
|
||||
}
|
||||
@@ -1,29 +1,19 @@
|
||||
use std::iter;
|
||||
|
||||
use crate::column_index::{
|
||||
multivalued_index, serialize_column_index, SerializableColumnIndex, Set,
|
||||
};
|
||||
use crate::column_index::{SerializableColumnIndex, Set};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, ColumnIndex, MergeRowOrder, RowId, StackMergeOrder};
|
||||
use crate::{Cardinality, ColumnIndex, RowId, StackMergeOrder};
|
||||
|
||||
fn detect_cardinality(columns: &[Option<ColumnIndex>]) -> Cardinality {
|
||||
columns
|
||||
.iter()
|
||||
.flatten()
|
||||
.map(ColumnIndex::get_cardinality)
|
||||
.max()
|
||||
.unwrap_or(Cardinality::Full)
|
||||
}
|
||||
|
||||
pub fn stack_column_index<'a>(
|
||||
/// Simple case:
|
||||
/// The new mapping just consists in stacking the different column indexes.
|
||||
///
|
||||
/// There are no sort nor deletes involved.
|
||||
pub fn merge_column_index_stacked<'a>(
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
merge_row_order: &'a MergeRowOrder,
|
||||
cardinality_after_merge: Cardinality,
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
let MergeRowOrder::Stack(stack_merge_order) = merge_row_order else {
|
||||
panic!("only supporting stacking at the moment.");
|
||||
};
|
||||
let cardinality = detect_cardinality(columns);
|
||||
match cardinality {
|
||||
match cardinality_after_merge {
|
||||
Cardinality::Full => SerializableColumnIndex::Full,
|
||||
Cardinality::Optional => SerializableColumnIndex::Optional {
|
||||
non_null_row_ids: Box::new(StackedOptionalIndex {
|
||||
@@ -60,7 +50,7 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
|
||||
Some(ColumnIndex::Optional(optional_index)) => Box::new(
|
||||
optional_index
|
||||
.iter_rows()
|
||||
.map(move |row_id: RowId| row_id + columnar_row_range.start),
|
||||
.map(move |row_id: RowId| columnar_row_range.start + row_id),
|
||||
),
|
||||
Some(ColumnIndex::Multivalued(_)) => {
|
||||
panic!("No multivalued index is allowed when stacking column index");
|
||||
@@ -141,13 +131,6 @@ fn stack_multivalued_indexes<'a>(
|
||||
}))
|
||||
}
|
||||
|
||||
fn stack_multivalued_index<'a>(
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
stack_merge_order: &StackMergeOrder,
|
||||
) -> Box<dyn Iterable<RowId> + 'a> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::RowId;
|
||||
@@ -5,12 +5,12 @@ mod serialize;
|
||||
|
||||
use std::ops::Range;
|
||||
|
||||
pub use merge::stack_column_index;
|
||||
pub use merge::merge_column_index;
|
||||
pub use optional_index::{OptionalIndex, Set};
|
||||
pub use serialize::{open_column_index, serialize_column_index, SerializableColumnIndex};
|
||||
|
||||
use crate::column_index::multivalued_index::MultiValueIndex;
|
||||
use crate::{Cardinality, RowId};
|
||||
use crate::{Cardinality, DocId, RowId};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum ColumnIndex {
|
||||
@@ -21,6 +21,18 @@ pub enum ColumnIndex {
|
||||
Multivalued(MultiValueIndex),
|
||||
}
|
||||
|
||||
impl From<OptionalIndex> for ColumnIndex {
|
||||
fn from(optional_index: OptionalIndex) -> ColumnIndex {
|
||||
ColumnIndex::Optional(optional_index)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MultiValueIndex> for ColumnIndex {
|
||||
fn from(multi_value_index: MultiValueIndex) -> ColumnIndex {
|
||||
ColumnIndex::Multivalued(multi_value_index)
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnIndex {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
@@ -30,21 +42,52 @@ impl ColumnIndex {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn value_row_ids(&self, row_id: RowId) -> Range<RowId> {
|
||||
/// Returns true if and only if there are at least one value associated to the row.
|
||||
pub fn has_value(&self, doc_id: DocId) -> bool {
|
||||
match self {
|
||||
ColumnIndex::Full => row_id..row_id + 1,
|
||||
ColumnIndex::Full => true,
|
||||
ColumnIndex::Optional(optional_index) => optional_index.contains(doc_id),
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
!multivalued_index.range(doc_id).is_empty()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn value_row_ids(&self, doc_id: DocId) -> Range<RowId> {
|
||||
match self {
|
||||
ColumnIndex::Full => doc_id..doc_id + 1,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
if let Some(val) = optional_index.rank_if_exists(row_id) {
|
||||
if let Some(val) = optional_index.rank_if_exists(doc_id) {
|
||||
val..val + 1
|
||||
} else {
|
||||
0..0
|
||||
}
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => multivalued_index.range(row_id),
|
||||
ColumnIndex::Multivalued(multivalued_index) => multivalued_index.range(doc_id),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn select_batch_in_place(&self, rank_ids: &mut Vec<RowId>) {
|
||||
pub fn docid_range_to_rowids(&self, doc_id: Range<DocId>) -> Range<RowId> {
|
||||
match self {
|
||||
ColumnIndex::Full => doc_id,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
let row_start = optional_index.rank(doc_id.start);
|
||||
let row_end = optional_index.rank(doc_id.end);
|
||||
row_start..row_end
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
let end_docid = doc_id.end.min(multivalued_index.num_docs() - 1) + 1;
|
||||
let start_docid = doc_id.start.min(end_docid);
|
||||
|
||||
let row_start = multivalued_index.start_index_column.get_val(start_docid);
|
||||
let row_end = multivalued_index.start_index_column.get_val(end_docid);
|
||||
|
||||
row_start..row_end
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn select_batch_in_place(&self, rank_ids: &mut Vec<RowId>, doc_id_start: DocId) {
|
||||
match self {
|
||||
ColumnIndex::Full => {
|
||||
// No need to do anything:
|
||||
@@ -54,8 +97,7 @@ impl ColumnIndex {
|
||||
optional_index.select_batch(&mut rank_ids[..]);
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
// TODO important: avoid using 0u32, and restart from the beginning all of the time.
|
||||
multivalued_index.select_batch_in_place(0u32, rank_ids)
|
||||
multivalued_index.select_batch_in_place(doc_id_start, rank_ids)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use common::OwnedBytes;
|
||||
use crate::column_values::u64_based::CodecType;
|
||||
use crate::column_values::ColumnValues;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::RowId;
|
||||
use crate::{DocId, RowId};
|
||||
|
||||
pub fn serialize_multivalued_index(
|
||||
multivalued_index: &dyn Iterable<RowId>,
|
||||
@@ -42,23 +42,30 @@ impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex {
|
||||
}
|
||||
|
||||
impl MultiValueIndex {
|
||||
pub fn for_test(start_offsets: &[RowId]) -> MultiValueIndex {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_multivalued_index(&start_offsets, &mut buffer).unwrap();
|
||||
let bytes = OwnedBytes::new(buffer);
|
||||
open_multivalued_index(bytes).unwrap()
|
||||
}
|
||||
|
||||
/// Returns `[start, end)`, such that the values associated with
|
||||
/// the given document are `start..end`.
|
||||
#[inline]
|
||||
pub(crate) fn range(&self, row_id: RowId) -> Range<RowId> {
|
||||
let start = self.start_index_column.get_val(row_id);
|
||||
let end = self.start_index_column.get_val(row_id + 1);
|
||||
pub(crate) fn range(&self, doc_id: DocId) -> Range<RowId> {
|
||||
let start = self.start_index_column.get_val(doc_id);
|
||||
let end = self.start_index_column.get_val(doc_id + 1);
|
||||
start..end
|
||||
}
|
||||
|
||||
/// Returns the number of documents in the index.
|
||||
#[inline]
|
||||
pub fn num_rows(&self) -> u32 {
|
||||
pub fn num_docs(&self) -> u32 {
|
||||
self.start_index_column.num_vals() - 1
|
||||
}
|
||||
|
||||
/// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
|
||||
/// row_ids. Positions are converted inplace to docids.
|
||||
/// docids. Positions are converted inplace to docids.
|
||||
///
|
||||
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
|
||||
/// index.
|
||||
@@ -69,14 +76,14 @@ impl MultiValueIndex {
|
||||
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
|
||||
/// match a docid to its value position.
|
||||
#[allow(clippy::bool_to_int_with_if)]
|
||||
pub(crate) fn select_batch_in_place(&self, row_start: RowId, ranks: &mut Vec<u32>) {
|
||||
pub(crate) fn select_batch_in_place(&self, docid_start: DocId, ranks: &mut Vec<u32>) {
|
||||
if ranks.is_empty() {
|
||||
return;
|
||||
}
|
||||
let mut cur_doc = row_start;
|
||||
let mut cur_doc = docid_start;
|
||||
let mut last_doc = None;
|
||||
|
||||
assert!(self.start_index_column.get_val(row_start) as u32 <= ranks[0]);
|
||||
assert!(self.start_index_column.get_val(docid_start) as u32 <= ranks[0]);
|
||||
|
||||
let mut write_doc_pos = 0;
|
||||
for i in 0..ranks.len() {
|
||||
@@ -120,7 +127,7 @@ mod tests {
|
||||
let offsets: Vec<RowId> = vec![0, 10, 12, 15, 22, 23]; // docid values are [0..10, 10..12, 12..15, etc.]
|
||||
let column: Arc<dyn ColumnValues<RowId>> = Arc::new(IterColumn::from(offsets.into_iter()));
|
||||
let index = MultiValueIndex::from(column);
|
||||
assert_eq!(index.num_rows(), 5);
|
||||
assert_eq!(index.num_docs(), 5);
|
||||
let positions = &[10u32, 11, 15, 20, 21, 22];
|
||||
assert_eq!(index_to_pos_helper(&index, 0..5, positions), vec![1, 3, 4]);
|
||||
assert_eq!(index_to_pos_helper(&index, 1..5, positions), vec![1, 3, 4]);
|
||||
|
||||
@@ -11,7 +11,7 @@ use set_block::{
|
||||
};
|
||||
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{InvalidData, RowId};
|
||||
use crate::{DocId, InvalidData, RowId};
|
||||
|
||||
/// The threshold for for number of elements after which we switch to dense block encoding.
|
||||
///
|
||||
@@ -88,22 +88,6 @@ pub struct OptionalIndex {
|
||||
block_metas: Arc<[BlockMeta]>,
|
||||
}
|
||||
|
||||
impl OptionalIndex {
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.num_rows
|
||||
}
|
||||
|
||||
pub fn num_non_nulls(&self) -> RowId {
|
||||
self.num_non_null_rows
|
||||
}
|
||||
|
||||
pub fn iter_rows<'a>(&'a self) -> impl Iterator<Item = RowId> + 'a {
|
||||
// TODO optimize
|
||||
let mut select_batch = self.select_cursor();
|
||||
(0..self.num_non_null_rows).map(move |rank| select_batch.select(rank))
|
||||
}
|
||||
}
|
||||
|
||||
/// Splits a value address into lower and upper 16bits.
|
||||
/// The lower 16 bits are the value in the block
|
||||
/// The upper 16 bits are the block index
|
||||
@@ -193,11 +177,11 @@ impl Set<RowId> for OptionalIndex {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rank(&self, row_id: RowId) -> RowId {
|
||||
fn rank(&self, doc_id: DocId) -> RowId {
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(row_id);
|
||||
} = row_addr_from_row_id(doc_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block = self.block(block_meta);
|
||||
let block_offset_row_id = match block {
|
||||
@@ -208,11 +192,11 @@ impl Set<RowId> for OptionalIndex {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rank_if_exists(&self, row_id: RowId) -> Option<RowId> {
|
||||
fn rank_if_exists(&self, doc_id: DocId) -> Option<RowId> {
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(row_id);
|
||||
} = row_addr_from_row_id(doc_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block = self.block(block_meta);
|
||||
let block_offset_row_id = match block {
|
||||
@@ -236,7 +220,7 @@ impl Set<RowId> for OptionalIndex {
|
||||
block_doc_idx_start + in_block_rank as u32
|
||||
}
|
||||
|
||||
fn select_cursor<'b>(&'b self) -> OptionalIndexSelectCursor<'b> {
|
||||
fn select_cursor(&self) -> OptionalIndexSelectCursor<'_> {
|
||||
OptionalIndexSelectCursor {
|
||||
current_block_cursor: BlockSelectCursor::Sparse(
|
||||
SparseBlockCodec::open(b"").select_cursor(),
|
||||
@@ -251,6 +235,31 @@ impl Set<RowId> for OptionalIndex {
|
||||
}
|
||||
|
||||
impl OptionalIndex {
|
||||
pub fn for_test(num_rows: RowId, row_ids: &[RowId]) -> OptionalIndex {
|
||||
assert!(row_ids
|
||||
.last()
|
||||
.copied()
|
||||
.map(|last_row_id| last_row_id < num_rows)
|
||||
.unwrap_or(true));
|
||||
let mut buffer = Vec::new();
|
||||
serialize_optional_index(&row_ids, num_rows, &mut buffer).unwrap();
|
||||
let bytes = OwnedBytes::new(buffer);
|
||||
open_optional_index(bytes).unwrap()
|
||||
}
|
||||
|
||||
pub fn num_docs(&self) -> RowId {
|
||||
self.num_rows
|
||||
}
|
||||
|
||||
pub fn num_non_nulls(&self) -> RowId {
|
||||
self.num_non_null_rows
|
||||
}
|
||||
|
||||
pub fn iter_rows(&self) -> impl Iterator<Item = RowId> + '_ {
|
||||
// TODO optimize
|
||||
let mut select_batch = self.select_cursor();
|
||||
(0..self.num_non_null_rows).map(move |rank| select_batch.select(rank))
|
||||
}
|
||||
pub fn select_batch(&self, ranks: &mut [RowId]) {
|
||||
let mut select_cursor = self.select_cursor();
|
||||
for rank in ranks.iter_mut() {
|
||||
@@ -259,7 +268,7 @@ impl OptionalIndex {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn block<'a>(&'a self, block_meta: BlockMeta) -> Block<'a> {
|
||||
fn block(&self, block_meta: BlockMeta) -> Block<'_> {
|
||||
let BlockMeta {
|
||||
start_byte_offset,
|
||||
block_variant,
|
||||
@@ -342,7 +351,7 @@ fn serialize_optional_index_block(block_els: &[u16], out: &mut impl io::Write) -
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn serialize_optional_index<'a, W: io::Write>(
|
||||
pub fn serialize_optional_index<W: io::Write>(
|
||||
non_null_rows: &dyn Iterable<RowId>,
|
||||
num_rows: RowId,
|
||||
output: &mut W,
|
||||
@@ -418,7 +427,7 @@ impl SerializedBlockMeta {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn to_bytes(&self) -> [u8; SERIALIZED_BLOCK_META_NUM_BYTES] {
|
||||
fn to_bytes(self) -> [u8; SERIALIZED_BLOCK_META_NUM_BYTES] {
|
||||
assert!(self.num_non_null_rows > 0);
|
||||
let mut bytes = [0u8; SERIALIZED_BLOCK_META_NUM_BYTES];
|
||||
bytes[0..2].copy_from_slice(&self.block_id.to_le_bytes());
|
||||
@@ -492,7 +501,7 @@ pub fn open_optional_index(bytes: OwnedBytes) -> io::Result<OptionalIndex> {
|
||||
num_non_empty_block_bytes as usize * SERIALIZED_BLOCK_META_NUM_BYTES;
|
||||
let (block_data, block_metas) = bytes.rsplit(block_metas_num_bytes);
|
||||
let (block_metas, num_non_null_rows) =
|
||||
deserialize_optional_index_block_metadatas(block_metas.as_slice(), num_rows).into();
|
||||
deserialize_optional_index_block_metadatas(block_metas.as_slice(), num_rows);
|
||||
let optional_index = OptionalIndex {
|
||||
num_rows,
|
||||
num_non_null_rows,
|
||||
|
||||
@@ -10,7 +10,7 @@ pub trait SetCodec {
|
||||
///
|
||||
/// May panic if the elements are not sorted.
|
||||
fn serialize(els: impl Iterator<Item = Self::Item>, wrt: impl io::Write) -> io::Result<()>;
|
||||
fn open<'a>(data: &'a [u8]) -> Self::Reader<'a>;
|
||||
fn open(data: &[u8]) -> Self::Reader<'_>;
|
||||
}
|
||||
|
||||
/// Stateful object that makes it possible to compute several select in a row,
|
||||
@@ -43,5 +43,5 @@ pub trait Set<T> {
|
||||
fn select(&self, rank: T) -> T;
|
||||
|
||||
/// Creates a brand new select cursor.
|
||||
fn select_cursor<'b>(&'b self) -> Self::SelectCursor<'b>;
|
||||
fn select_cursor(&self) -> Self::SelectCursor<'_>;
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ impl SetCodec for DenseBlockCodec {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn open<'a>(data: &'a [u8]) -> Self::Reader<'a> {
|
||||
fn open(data: &[u8]) -> Self::Reader<'_> {
|
||||
assert_eq!(data.len(), DENSE_BLOCK_NUM_BYTES as usize);
|
||||
DenseBlock(data)
|
||||
}
|
||||
@@ -94,7 +94,7 @@ impl DenseMiniBlock {
|
||||
Self { bitvec, rank }
|
||||
}
|
||||
|
||||
fn to_bytes(&self) -> [u8; MINI_BLOCK_NUM_BYTES] {
|
||||
fn to_bytes(self) -> [u8; MINI_BLOCK_NUM_BYTES] {
|
||||
let mut bytes = [0u8; MINI_BLOCK_NUM_BYTES];
|
||||
bytes[..MINI_BLOCK_BITVEC_NUM_BYTES].copy_from_slice(&self.bitvec.to_le_bytes());
|
||||
bytes[MINI_BLOCK_BITVEC_NUM_BYTES..].copy_from_slice(&self.rank.to_le_bytes());
|
||||
@@ -166,7 +166,7 @@ impl<'a> Set<u16> for DenseBlock<'a> {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn select_cursor<'b>(&'b self) -> Self::SelectCursor<'b> {
|
||||
fn select_cursor(&self) -> Self::SelectCursor<'_> {
|
||||
DenseBlockSelectCursor {
|
||||
block_id: 0,
|
||||
dense_block: *self,
|
||||
|
||||
@@ -16,7 +16,7 @@ impl SetCodec for SparseBlockCodec {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn open<'a>(data: &'a [u8]) -> Self::Reader<'a> {
|
||||
fn open(data: &[u8]) -> Self::Reader<'_> {
|
||||
SparseBlock(data)
|
||||
}
|
||||
}
|
||||
@@ -56,7 +56,7 @@ impl<'a> Set<u16> for SparseBlock<'a> {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn select_cursor<'b>(&'b self) -> Self::SelectCursor<'b> {
|
||||
fn select_cursor(&self) -> Self::SelectCursor<'_> {
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,59 +107,43 @@ fn test_null_index(data: &[bool]) {
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_test_translation() {
|
||||
let mut out = vec![];
|
||||
let iter = &[true, false, true, false];
|
||||
serialize_optional_index(&&iter[..], iter.len() as u32, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
let mut select_cursor = null_index.select_cursor();
|
||||
let optional_index = OptionalIndex::for_test(4, &[0, 2]);
|
||||
let mut select_cursor = optional_index.select_cursor();
|
||||
assert_eq!(select_cursor.select(0), 0);
|
||||
assert_eq!(select_cursor.select(1), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_translate() {
|
||||
let mut out = vec![];
|
||||
let iter = &[true, false, true, false];
|
||||
serialize_optional_index(&&iter[..], iter.len() as RowId, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
assert_eq!(null_index.rank_if_exists(0), Some(0));
|
||||
assert_eq!(null_index.rank_if_exists(2), Some(1));
|
||||
let optional_index = OptionalIndex::for_test(4, &[0, 2]);
|
||||
assert_eq!(optional_index.rank_if_exists(0), Some(0));
|
||||
assert_eq!(optional_index.rank_if_exists(2), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_small() {
|
||||
let mut out = vec![];
|
||||
let iter = &[true, false, true, false];
|
||||
serialize_optional_index(&&iter[..], iter.len() as RowId, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
assert!(null_index.contains(0));
|
||||
assert!(!null_index.contains(1));
|
||||
assert!(null_index.contains(2));
|
||||
assert!(!null_index.contains(3));
|
||||
let optional_index = OptionalIndex::for_test(4, &[0, 2]);
|
||||
assert!(optional_index.contains(0));
|
||||
assert!(!optional_index.contains(1));
|
||||
assert!(optional_index.contains(2));
|
||||
assert!(!optional_index.contains(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_large() {
|
||||
let mut docs = vec![];
|
||||
docs.extend((0..ELEMENTS_PER_BLOCK).map(|_idx| false));
|
||||
docs.extend((0..=1).map(|_idx| true));
|
||||
|
||||
let mut out = vec![];
|
||||
serialize_optional_index(&&docs[..], docs.len() as RowId, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
assert!(!null_index.contains(0));
|
||||
assert!(!null_index.contains(100));
|
||||
assert!(!null_index.contains(ELEMENTS_PER_BLOCK - 1));
|
||||
assert!(null_index.contains(ELEMENTS_PER_BLOCK));
|
||||
assert!(null_index.contains(ELEMENTS_PER_BLOCK + 1));
|
||||
let row_ids = &[ELEMENTS_PER_BLOCK, ELEMENTS_PER_BLOCK + 1];
|
||||
let optional_index = OptionalIndex::for_test(ELEMENTS_PER_BLOCK + 2, row_ids);
|
||||
assert!(!optional_index.contains(0));
|
||||
assert!(!optional_index.contains(100));
|
||||
assert!(!optional_index.contains(ELEMENTS_PER_BLOCK - 1));
|
||||
assert!(optional_index.contains(ELEMENTS_PER_BLOCK));
|
||||
assert!(optional_index.contains(ELEMENTS_PER_BLOCK + 1));
|
||||
}
|
||||
|
||||
fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
serialize_optional_index(&row_ids, num_rows, &mut buffer).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(buffer)).unwrap();
|
||||
assert_eq!(null_index.num_rows(), num_rows);
|
||||
assert!(null_index.iter_rows().eq(row_ids.iter().copied()));
|
||||
let optional_index = OptionalIndex::for_test(num_rows, row_ids);
|
||||
assert_eq!(optional_index.num_docs(), num_rows);
|
||||
assert!(optional_index.iter_rows().eq(row_ids.iter().copied()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -168,11 +152,9 @@ fn test_optional_index_iter_empty() {
|
||||
}
|
||||
|
||||
fn test_optional_index_rank_aux(row_ids: &[RowId]) {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
let num_rows = row_ids.last().copied().unwrap_or(0u32) + 1;
|
||||
serialize_optional_index(&row_ids, num_rows, &mut buffer).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(buffer)).unwrap();
|
||||
assert_eq!(null_index.num_rows(), num_rows);
|
||||
let null_index = OptionalIndex::for_test(num_rows, row_ids);
|
||||
assert_eq!(null_index.num_docs(), num_rows);
|
||||
for (row_id, row_val) in row_ids.iter().copied().enumerate() {
|
||||
assert_eq!(null_index.rank(row_val), row_id as u32);
|
||||
assert_eq!(null_index.rank_if_exists(row_val), Some(row_id as u32));
|
||||
@@ -207,6 +189,16 @@ fn test_optional_index_iter_dense_block() {
|
||||
test_optional_index_iter_aux(&block, 3 * BLOCK_SIZE);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_for_tests() {
|
||||
let optional_index = OptionalIndex::for_test(4, &[1, 2]);
|
||||
assert!(!optional_index.contains(0));
|
||||
assert!(optional_index.contains(1));
|
||||
assert!(optional_index.contains(2));
|
||||
assert!(!optional_index.contains(3));
|
||||
assert_eq!(optional_index.num_docs(), 4);
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
@@ -220,10 +212,13 @@ mod bench {
|
||||
fn gen_bools(fill_ratio: f64) -> OptionalIndex {
|
||||
let mut out = Vec::new();
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let vals: Vec<bool> = (0..TOTAL_NUM_VALUES)
|
||||
let vals: Vec<RowId> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(pos, val)| *val)
|
||||
.map(|(pos, _)| pos as RowId)
|
||||
.collect();
|
||||
serialize_optional_index(&&vals[..], &mut out).unwrap();
|
||||
serialize_optional_index(&&vals[..], TOTAL_NUM_VALUES, &mut out).unwrap();
|
||||
let codec = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
codec
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::sync::Arc;
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::column_values::monotonic_mapping::StrictlyMonotonicFn;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::RowId;
|
||||
|
||||
/// `ColumnValues` provides access to a dense field column.
|
||||
///
|
||||
@@ -36,21 +36,21 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the positions of values which are in the provided value range.
|
||||
/// Get the row ids of values which are in the provided value range.
|
||||
///
|
||||
/// Note that position == docid for single value fast fields
|
||||
#[inline(always)]
|
||||
fn get_docids_for_value_range(
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
row_id_range: Range<RowId>,
|
||||
row_id_hits: &mut Vec<RowId>,
|
||||
) {
|
||||
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
|
||||
for idx in doc_id_range.start..doc_id_range.end {
|
||||
let row_id_range = row_id_range.start..row_id_range.end.min(self.num_vals());
|
||||
for idx in row_id_range.start..row_id_range.end {
|
||||
let val = self.get_val(idx);
|
||||
if value_range.contains(&val) {
|
||||
positions.push(idx);
|
||||
row_id_hits.push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,12 +80,6 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: PartialOrd> Iterable<T> for &'a [Arc<dyn ColumnValues<T>>] {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
Box::new(self.iter().flat_map(|column_value| column_value.iter()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
@@ -116,31 +110,14 @@ impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>>
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
self.as_ref().get_range(start, output)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, C: ColumnValues<T> + ?Sized, T: Copy + PartialOrd + Debug> ColumnValues<T> for &'a C {
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
(*self).get_val(idx)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> T {
|
||||
(*self).min_value()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> T {
|
||||
(*self).max_value()
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
(*self).num_vals()
|
||||
}
|
||||
|
||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
|
||||
(*self).iter()
|
||||
}
|
||||
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
(*self).get_range(start, output)
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
row_id_range: Range<RowId>,
|
||||
row_id_hits: &mut Vec<RowId>,
|
||||
) {
|
||||
self.as_ref().get_row_ids_for_value_range(value_range, row_id_range, row_id_hits)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -264,13 +241,13 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
fn get_docids_for_value_range(
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
range: RangeInclusive<Output>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
self.from_column.get_docids_for_value_range(
|
||||
self.from_column.get_row_ids_for_value_range(
|
||||
self.monotonic_mapping.inverse(range.start().clone())
|
||||
..=self.monotonic_mapping.inverse(range.end().clone()),
|
||||
doc_id_range,
|
||||
|
||||
@@ -22,6 +22,7 @@ use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::compact_space::build_compact_space::get_compact_space;
|
||||
use crate::column_values::ColumnValues;
|
||||
use crate::RowId;
|
||||
|
||||
mod blank_range;
|
||||
mod build_compact_space;
|
||||
@@ -158,23 +159,30 @@ impl CompactSpace {
|
||||
pub struct CompactSpaceCompressor {
|
||||
params: IPCodecParams,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IPCodecParams {
|
||||
compact_space: CompactSpace,
|
||||
bit_unpacker: BitUnpacker,
|
||||
min_value: u128,
|
||||
max_value: u128,
|
||||
num_vals: u32,
|
||||
num_vals: RowId,
|
||||
num_bits: u8,
|
||||
}
|
||||
|
||||
impl CompactSpaceCompressor {
|
||||
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
|
||||
pub fn train_from(iter: impl Iterator<Item = u128>, num_vals: u32) -> Self {
|
||||
let mut values_sorted = BTreeSet::new();
|
||||
values_sorted.extend(iter);
|
||||
let total_num_values = num_vals;
|
||||
pub fn num_vals(&self) -> RowId {
|
||||
self.params.num_vals
|
||||
}
|
||||
|
||||
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
|
||||
pub fn train_from(iter: impl Iterator<Item = u128>) -> Self {
|
||||
let mut values_sorted = BTreeSet::new();
|
||||
let mut total_num_values = 0u32;
|
||||
for val in iter {
|
||||
total_num_values += 1u32;
|
||||
values_sorted.insert(val);
|
||||
}
|
||||
let compact_space =
|
||||
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
|
||||
let amplitude_compact_space = compact_space.amplitude_compact_space();
|
||||
@@ -305,7 +313,7 @@ impl ColumnValues<u128> for CompactSpaceDecompressor {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_docids_for_value_range(
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u128>,
|
||||
positions_range: Range<u32>,
|
||||
@@ -450,364 +458,352 @@ impl CompactSpaceDecompressor {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO reenable what can be reenabled.
|
||||
// #[cfg(test)]
|
||||
// mod tests {
|
||||
//
|
||||
// use super::*;
|
||||
// use crate::column::format_version::read_format_version;
|
||||
// use crate::column::column_footer::read_null_index_footer;
|
||||
// use crate::column::serialize::U128Header;
|
||||
// use crate::column::{open_u128, serialize_u128};
|
||||
//
|
||||
// #[test]
|
||||
// fn compact_space_test() {
|
||||
// let ips = &[
|
||||
// 2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
|
||||
// ]
|
||||
// .into_iter()
|
||||
// .collect();
|
||||
// let compact_space = get_compact_space(ips, ips.len() as u32, 11);
|
||||
// let amplitude = compact_space.amplitude_compact_space();
|
||||
// assert_eq!(amplitude, 17);
|
||||
// assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
|
||||
// assert_eq!(2, compact_space.u128_to_compact(3).unwrap());
|
||||
// assert_eq!(compact_space.u128_to_compact(100).unwrap_err(), 1);
|
||||
//
|
||||
// for (num1, num2) in (0..3).tuple_windows() {
|
||||
// assert_eq!(
|
||||
// compact_space.get_range_mapping(num1).compact_end() + 1,
|
||||
// compact_space.get_range_mapping(num2).compact_start
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// let mut output: Vec<u8> = Vec::new();
|
||||
// compact_space.serialize(&mut output).unwrap();
|
||||
//
|
||||
// assert_eq!(
|
||||
// compact_space,
|
||||
// CompactSpace::deserialize(&mut &output[..]).unwrap()
|
||||
// );
|
||||
//
|
||||
// for ip in ips {
|
||||
// let compact = compact_space.u128_to_compact(*ip).unwrap();
|
||||
// assert_eq!(compact_space.compact_to_u128(compact), *ip);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn compact_space_amplitude_test() {
|
||||
// let ips = &[100000u128, 1000000].into_iter().collect();
|
||||
// let compact_space = get_compact_space(ips, ips.len() as u32, 1);
|
||||
// let amplitude = compact_space.amplitude_compact_space();
|
||||
// assert_eq!(amplitude, 2);
|
||||
// }
|
||||
//
|
||||
// fn test_all(mut data: OwnedBytes, expected: &[u128]) {
|
||||
// let _header = U128Header::deserialize(&mut data);
|
||||
// let decompressor = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// for (idx, expected_val) in expected.iter().cloned().enumerate() {
|
||||
// let val = decompressor.get(idx as u32);
|
||||
// assert_eq!(val, expected_val);
|
||||
//
|
||||
// let test_range = |range: RangeInclusive<u128>| {
|
||||
// let expected_positions = expected
|
||||
// .iter()
|
||||
// .positions(|val| range.contains(val))
|
||||
// .map(|pos| pos as u32)
|
||||
// .collect::<Vec<_>>();
|
||||
// let mut positions = Vec::new();
|
||||
// decompressor.get_positions_for_value_range(
|
||||
// range,
|
||||
// 0..decompressor.num_vals(),
|
||||
// &mut positions,
|
||||
// );
|
||||
// assert_eq!(positions, expected_positions);
|
||||
// };
|
||||
//
|
||||
// test_range(expected_val.saturating_sub(1)..=expected_val);
|
||||
// test_range(expected_val..=expected_val);
|
||||
// test_range(expected_val..=expected_val.saturating_add(1));
|
||||
// test_range(expected_val.saturating_sub(1)..=expected_val.saturating_add(1));
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
|
||||
// let mut out = Vec::new();
|
||||
// serialize_u128(
|
||||
// || u128_vals.iter().cloned(),
|
||||
// u128_vals.len() as u32,
|
||||
// &mut out,
|
||||
// )
|
||||
// .unwrap();
|
||||
//
|
||||
// let data = OwnedBytes::new(out);
|
||||
// let (data, _format_version) = read_format_version(data).unwrap();
|
||||
// let (data, _null_index_footer) = read_null_index_footer(data).unwrap();
|
||||
// test_all(data.clone(), u128_vals);
|
||||
//
|
||||
// data
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_range_1() {
|
||||
// let vals = &[
|
||||
// 1u128,
|
||||
// 100u128,
|
||||
// 3u128,
|
||||
// 99999u128,
|
||||
// 100000u128,
|
||||
// 100001u128,
|
||||
// 4_000_211_221u128,
|
||||
// 4_000_211_222u128,
|
||||
// 333u128,
|
||||
// ];
|
||||
// let mut data = test_aux_vals(vals);
|
||||
//
|
||||
// let _header = U128Header::deserialize(&mut data);
|
||||
// let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// let complete_range = 0..vals.len() as u32;
|
||||
// for (pos, val) in vals.iter().enumerate() {
|
||||
// let val = *val;
|
||||
// let pos = pos as u32;
|
||||
// let mut positions = Vec::new();
|
||||
// decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||
// assert_eq!(positions, vec![pos]);
|
||||
// }
|
||||
//
|
||||
// handle docid range out of bounds
|
||||
// let positions: Vec<u32> = get_positions_for_value_range_helper(&decomp, 0..=1, 1..u32::MAX);
|
||||
// assert!(positions.is_empty());
|
||||
//
|
||||
// let positions =
|
||||
// get_positions_for_value_range_helper(&decomp, 0..=1, complete_range.clone());
|
||||
// assert_eq!(positions, vec![0]);
|
||||
// let positions =
|
||||
// get_positions_for_value_range_helper(&decomp, 0..=2, complete_range.clone());
|
||||
// assert_eq!(positions, vec![0]);
|
||||
// let positions =
|
||||
// get_positions_for_value_range_helper(&decomp, 0..=3, complete_range.clone());
|
||||
// assert_eq!(positions, vec![0, 2]);
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99999u128..=99999u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// vec![3]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99999u128..=100000u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// vec![3, 4]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99998u128..=100000u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// vec![3, 4]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99998u128..=99999u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[3]
|
||||
// );
|
||||
// assert!(get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99998u128..=99998u128,
|
||||
// complete_range.clone()
|
||||
// )
|
||||
// .is_empty());
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 333u128..=333u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 332u128..=333u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 332u128..=334u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 333u128..=334u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 4_000_211_221u128..=5_000_000_000u128,
|
||||
// complete_range
|
||||
// ),
|
||||
// &[6, 7]
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_empty() {
|
||||
// let vals = &[];
|
||||
// let data = test_aux_vals(vals);
|
||||
// let _decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_range_2() {
|
||||
// let vals = &[
|
||||
// 100u128,
|
||||
// 99999u128,
|
||||
// 100000u128,
|
||||
// 100001u128,
|
||||
// 4_000_211_221u128,
|
||||
// 4_000_211_222u128,
|
||||
// 333u128,
|
||||
// ];
|
||||
// let mut data = test_aux_vals(vals);
|
||||
// let _header = U128Header::deserialize(&mut data);
|
||||
// let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// let complete_range = 0..vals.len() as u32;
|
||||
// assert!(
|
||||
// &get_positions_for_value_range_helper(&decomp, 0..=5, complete_range.clone())
|
||||
// .is_empty(),
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(&decomp, 0..=100, complete_range.clone()),
|
||||
// &[0]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(&decomp, 0..=105, complete_range),
|
||||
// &[0]
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// fn get_positions_for_value_range_helper<C: Column<T> + ?Sized, T: PartialOrd>(
|
||||
// column: &C,
|
||||
// value_range: RangeInclusive<T>,
|
||||
// doc_id_range: Range<u32>,
|
||||
// ) -> Vec<u32> {
|
||||
// let mut positions = Vec::new();
|
||||
// column.get_docids_for_value_range(value_range, doc_id_range, &mut positions);
|
||||
// positions
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_range_3() {
|
||||
// let vals = &[
|
||||
// 200u128,
|
||||
// 201,
|
||||
// 202,
|
||||
// 203,
|
||||
// 204,
|
||||
// 204,
|
||||
// 206,
|
||||
// 207,
|
||||
// 208,
|
||||
// 209,
|
||||
// 210,
|
||||
// 1_000_000,
|
||||
// 5_000_000_000,
|
||||
// ];
|
||||
// let mut out = Vec::new();
|
||||
// serialize_u128(|| vals.iter().cloned(), vals.len() as u32, &mut out).unwrap();
|
||||
// let decomp = open_u128::<u128>(OwnedBytes::new(out)).unwrap();
|
||||
// let complete_range = 0..vals.len() as u32;
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 199..=200, complete_range.clone()),
|
||||
// vec![0]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 199..=201, complete_range.clone()),
|
||||
// vec![0, 1]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 200..=200, complete_range.clone()),
|
||||
// vec![0]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range),
|
||||
// vec![11]
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug1() {
|
||||
// let vals = &[9223372036854775806];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug2() {
|
||||
// let vals = &[340282366920938463463374607431768211455u128];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug3() {
|
||||
// let vals = &[340282366920938463463374607431768211454];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug4() {
|
||||
// let vals = &[340282366920938463463374607431768211455, 0];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_first_large_gaps() {
|
||||
// let vals = &[1_000_000_000u128; 100];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
// use itertools::Itertools;
|
||||
// use proptest::prelude::*;
|
||||
//
|
||||
// fn num_strategy() -> impl Strategy<Value = u128> {
|
||||
// prop_oneof![
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| u128::MAX - (num % 10) ),
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| i64::MAX as u128 + 5 - (num % 10) ),
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| i128::MAX as u128 + 5 - (num % 10) ),
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| num % 10 ),
|
||||
// 20 => prop::num::u128::ANY,
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// proptest! {
|
||||
// #![proptest_config(ProptestConfig::with_cases(10))]
|
||||
//
|
||||
// #[test]
|
||||
// fn compress_decompress_random(vals in proptest::collection::vec(num_strategy()
|
||||
// , 1..1000)) {
|
||||
// let _data = test_aux_vals(&vals);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::*;
|
||||
use crate::column_values::serialize::U128Header;
|
||||
use crate::column_values::{open_u128_mapped, serialize_column_values_u128};
|
||||
|
||||
#[test]
|
||||
fn compact_space_test() {
|
||||
let ips = &[
|
||||
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let compact_space = get_compact_space(ips, ips.len() as u32, 11);
|
||||
let amplitude = compact_space.amplitude_compact_space();
|
||||
assert_eq!(amplitude, 17);
|
||||
assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
|
||||
assert_eq!(2, compact_space.u128_to_compact(3).unwrap());
|
||||
assert_eq!(compact_space.u128_to_compact(100).unwrap_err(), 1);
|
||||
|
||||
for (num1, num2) in (0..3).tuple_windows() {
|
||||
assert_eq!(
|
||||
compact_space.get_range_mapping(num1).compact_end() + 1,
|
||||
compact_space.get_range_mapping(num2).compact_start
|
||||
);
|
||||
}
|
||||
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
compact_space.serialize(&mut output).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
compact_space,
|
||||
CompactSpace::deserialize(&mut &output[..]).unwrap()
|
||||
);
|
||||
|
||||
for ip in ips {
|
||||
let compact = compact_space.u128_to_compact(*ip).unwrap();
|
||||
assert_eq!(compact_space.compact_to_u128(compact), *ip);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compact_space_amplitude_test() {
|
||||
let ips = &[100000u128, 1000000].into_iter().collect();
|
||||
let compact_space = get_compact_space(ips, ips.len() as u32, 1);
|
||||
let amplitude = compact_space.amplitude_compact_space();
|
||||
assert_eq!(amplitude, 2);
|
||||
}
|
||||
|
||||
fn test_all(mut data: OwnedBytes, expected: &[u128]) {
|
||||
let _header = U128Header::deserialize(&mut data);
|
||||
let decompressor = CompactSpaceDecompressor::open(data).unwrap();
|
||||
for (idx, expected_val) in expected.iter().cloned().enumerate() {
|
||||
let val = decompressor.get(idx as u32);
|
||||
assert_eq!(val, expected_val);
|
||||
|
||||
let test_range = |range: RangeInclusive<u128>| {
|
||||
let expected_positions = expected
|
||||
.iter()
|
||||
.positions(|val| range.contains(val))
|
||||
.map(|pos| pos as u32)
|
||||
.collect::<Vec<_>>();
|
||||
let mut positions = Vec::new();
|
||||
decompressor.get_positions_for_value_range(
|
||||
range,
|
||||
0..decompressor.num_vals(),
|
||||
&mut positions,
|
||||
);
|
||||
assert_eq!(positions, expected_positions);
|
||||
};
|
||||
|
||||
test_range(expected_val.saturating_sub(1)..=expected_val);
|
||||
test_range(expected_val..=expected_val);
|
||||
test_range(expected_val..=expected_val.saturating_add(1));
|
||||
test_range(expected_val.saturating_sub(1)..=expected_val.saturating_add(1));
|
||||
}
|
||||
}
|
||||
|
||||
fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
|
||||
let mut out = Vec::new();
|
||||
serialize_column_values_u128(&u128_vals, &mut out).unwrap();
|
||||
let data = OwnedBytes::new(out);
|
||||
test_all(data.clone(), u128_vals);
|
||||
data
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_1() {
|
||||
let vals = &[
|
||||
1u128,
|
||||
100u128,
|
||||
3u128,
|
||||
99999u128,
|
||||
100000u128,
|
||||
100001u128,
|
||||
4_000_211_221u128,
|
||||
4_000_211_222u128,
|
||||
333u128,
|
||||
];
|
||||
let mut data = test_aux_vals(vals);
|
||||
|
||||
let _header = U128Header::deserialize(&mut data);
|
||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
for (pos, val) in vals.iter().enumerate() {
|
||||
let val = *val;
|
||||
let pos = pos as u32;
|
||||
let mut positions = Vec::new();
|
||||
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||
assert_eq!(positions, vec![pos]);
|
||||
}
|
||||
|
||||
// handle docid range out of bounds
|
||||
let positions: Vec<u32> = get_positions_for_value_range_helper(&decomp, 0..=1, 1..u32::MAX);
|
||||
assert!(positions.is_empty());
|
||||
|
||||
let positions =
|
||||
get_positions_for_value_range_helper(&decomp, 0..=1, complete_range.clone());
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions =
|
||||
get_positions_for_value_range_helper(&decomp, 0..=2, complete_range.clone());
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions =
|
||||
get_positions_for_value_range_helper(&decomp, 0..=3, complete_range.clone());
|
||||
assert_eq!(positions, vec![0, 2]);
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
99999u128..=99999u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
vec![3]
|
||||
);
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
99999u128..=100000u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
vec![3, 4]
|
||||
);
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
99998u128..=100000u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
vec![3, 4]
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
99998u128..=99999u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
&[3]
|
||||
);
|
||||
assert!(get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
99998u128..=99998u128,
|
||||
complete_range.clone()
|
||||
)
|
||||
.is_empty());
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
333u128..=333u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
&[8]
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
332u128..=333u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
&[8]
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
332u128..=334u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
&[8]
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
333u128..=334u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
&[8]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
4_000_211_221u128..=5_000_000_000u128,
|
||||
complete_range
|
||||
),
|
||||
&[6, 7]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty() {
|
||||
let vals = &[];
|
||||
let data = test_aux_vals(vals);
|
||||
let _decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_2() {
|
||||
let vals = &[
|
||||
100u128,
|
||||
99999u128,
|
||||
100000u128,
|
||||
100001u128,
|
||||
4_000_211_221u128,
|
||||
4_000_211_222u128,
|
||||
333u128,
|
||||
];
|
||||
let mut data = test_aux_vals(vals);
|
||||
let _header = U128Header::deserialize(&mut data);
|
||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
assert!(
|
||||
&get_positions_for_value_range_helper(&decomp, 0..=5, complete_range.clone())
|
||||
.is_empty(),
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(&decomp, 0..=100, complete_range.clone()),
|
||||
&[0]
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(&decomp, 0..=105, complete_range),
|
||||
&[0]
|
||||
);
|
||||
}
|
||||
|
||||
fn get_positions_for_value_range_helper<C: ColumnValues<T> + ?Sized, T: PartialOrd>(
|
||||
column: &C,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
) -> Vec<u32> {
|
||||
let mut positions = Vec::new();
|
||||
column.get_row_ids_for_value_range(value_range, doc_id_range, &mut positions);
|
||||
positions
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_3() {
|
||||
let vals = &[
|
||||
200u128,
|
||||
201,
|
||||
202,
|
||||
203,
|
||||
204,
|
||||
204,
|
||||
206,
|
||||
207,
|
||||
208,
|
||||
209,
|
||||
210,
|
||||
1_000_000,
|
||||
5_000_000_000,
|
||||
];
|
||||
let mut out = Vec::new();
|
||||
serialize_column_values_u128(&&vals[..], &mut out).unwrap();
|
||||
let decomp = open_u128_mapped(OwnedBytes::new(out)).unwrap();
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(&*decomp, 199..=200, complete_range.clone()),
|
||||
vec![0]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(&*decomp, 199..=201, complete_range.clone()),
|
||||
vec![0, 1]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(&*decomp, 200..=200, complete_range.clone()),
|
||||
vec![0]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range),
|
||||
vec![11]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug1() {
|
||||
let vals = &[9223372036854775806];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug2() {
|
||||
let vals = &[340282366920938463463374607431768211455u128];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug3() {
|
||||
let vals = &[340282366920938463463374607431768211454];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug4() {
|
||||
let vals = &[340282366920938463463374607431768211455, 0];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_first_large_gaps() {
|
||||
let vals = &[1_000_000_000u128; 100];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
fn num_strategy() -> impl Strategy<Value = u128> {
|
||||
prop_oneof![
|
||||
1 => prop::num::u128::ANY.prop_map(|num| u128::MAX - (num % 10) ),
|
||||
1 => prop::num::u128::ANY.prop_map(|num| i64::MAX as u128 + 5 - (num % 10) ),
|
||||
1 => prop::num::u128::ANY.prop_map(|num| i128::MAX as u128 + 5 - (num % 10) ),
|
||||
1 => prop::num::u128::ANY.prop_map(|num| num % 10 ),
|
||||
20 => prop::num::u128::ANY,
|
||||
]
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(10))]
|
||||
|
||||
#[test]
|
||||
fn compress_decompress_random(vals in proptest::collection::vec(num_strategy() , 1..1000)) {
|
||||
let _data = test_aux_vals(&vals);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,222 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate prettytable;
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::io::BufRead;
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use fastfield_codecs::{open_u128, serialize_u128, Column, FastFieldCodecType, VecColumn};
|
||||
use itertools::Itertools;
|
||||
use measure_time::print_time;
|
||||
use prettytable::{Cell, Row, Table};
|
||||
|
||||
fn print_set_stats(ip_addrs: &[u128]) {
|
||||
println!("NumIps\t{}", ip_addrs.len());
|
||||
let ip_addr_set: HashSet<u128> = ip_addrs.iter().cloned().collect();
|
||||
println!("NumUniqueIps\t{}", ip_addr_set.len());
|
||||
let ratio_unique = ip_addr_set.len() as f64 / ip_addrs.len() as f64;
|
||||
println!("RatioUniqueOverTotal\t{ratio_unique:.4}");
|
||||
|
||||
// histogram
|
||||
let mut ip_addrs = ip_addrs.to_vec();
|
||||
ip_addrs.sort();
|
||||
let mut cnts: Vec<usize> = ip_addrs
|
||||
.into_iter()
|
||||
.dedup_with_count()
|
||||
.map(|(cnt, _)| cnt)
|
||||
.collect();
|
||||
cnts.sort();
|
||||
|
||||
let top_256_cnt: usize = cnts.iter().rev().take(256).sum();
|
||||
let top_128_cnt: usize = cnts.iter().rev().take(128).sum();
|
||||
let top_64_cnt: usize = cnts.iter().rev().take(64).sum();
|
||||
let top_8_cnt: usize = cnts.iter().rev().take(8).sum();
|
||||
let total: usize = cnts.iter().sum();
|
||||
|
||||
println!("{}", total);
|
||||
println!("{}", top_256_cnt);
|
||||
println!("{}", top_128_cnt);
|
||||
println!("Percentage Top8 {:02}", top_8_cnt as f32 / total as f32);
|
||||
println!("Percentage Top64 {:02}", top_64_cnt as f32 / total as f32);
|
||||
println!("Percentage Top128 {:02}", top_128_cnt as f32 / total as f32);
|
||||
println!("Percentage Top256 {:02}", top_256_cnt as f32 / total as f32);
|
||||
|
||||
let mut cnts: Vec<(usize, usize)> = cnts.into_iter().dedup_with_count().collect();
|
||||
cnts.sort_by(|a, b| {
|
||||
if a.1 == b.1 {
|
||||
a.0.cmp(&b.0)
|
||||
} else {
|
||||
b.1.cmp(&a.1)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn ip_dataset() -> Vec<u128> {
|
||||
let mut ip_addr_v4 = 0;
|
||||
|
||||
let stdin = std::io::stdin();
|
||||
let ip_addrs: Vec<u128> = stdin
|
||||
.lock()
|
||||
.lines()
|
||||
.flat_map(|line| {
|
||||
let line = line.unwrap();
|
||||
let line = line.trim();
|
||||
let ip_addr = IpAddr::from_str(line.trim()).ok()?;
|
||||
if ip_addr.is_ipv4() {
|
||||
ip_addr_v4 += 1;
|
||||
}
|
||||
let ip_addr_v6: Ipv6Addr = match ip_addr {
|
||||
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||
IpAddr::V6(v6) => v6,
|
||||
};
|
||||
Some(ip_addr_v6)
|
||||
})
|
||||
.map(|ip_v6| u128::from_be_bytes(ip_v6.octets()))
|
||||
.collect();
|
||||
|
||||
println!("IpAddrsAny\t{}", ip_addrs.len());
|
||||
println!("IpAddrsV4\t{}", ip_addr_v4);
|
||||
|
||||
ip_addrs
|
||||
}
|
||||
|
||||
fn bench_ip() {
|
||||
let dataset = ip_dataset();
|
||||
print_set_stats(&dataset);
|
||||
|
||||
// Chunks
|
||||
{
|
||||
let mut data = vec![];
|
||||
for dataset in dataset.chunks(500_000) {
|
||||
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
|
||||
}
|
||||
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||
println!("Compression 50_000 chunks {:.4}", compression);
|
||||
println!(
|
||||
"Num Bits per elem {:.2}",
|
||||
(data.len() * 8) as f32 / dataset.len() as f32
|
||||
);
|
||||
}
|
||||
|
||||
let mut data = vec![];
|
||||
{
|
||||
print_time!("creation");
|
||||
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
|
||||
}
|
||||
|
||||
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||
println!("Compression {:.2}", compression);
|
||||
println!(
|
||||
"Num Bits per elem {:.2}",
|
||||
(data.len() * 8) as f32 / dataset.len() as f32
|
||||
);
|
||||
|
||||
let decompressor = open_u128::<u128>(OwnedBytes::new(data)).unwrap();
|
||||
// Sample some ranges
|
||||
let mut doc_values = Vec::new();
|
||||
for value in dataset.iter().take(1110).skip(1100).cloned() {
|
||||
doc_values.clear();
|
||||
print_time!("get range");
|
||||
decompressor.get_docids_for_value_range(
|
||||
value..=value,
|
||||
0..decompressor.num_vals(),
|
||||
&mut doc_values,
|
||||
);
|
||||
println!("{:?}", doc_values.len());
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if env::args().nth(1).unwrap() == "bench_ip" {
|
||||
bench_ip();
|
||||
return;
|
||||
}
|
||||
|
||||
let mut table = Table::new();
|
||||
|
||||
// Add a row per time
|
||||
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
|
||||
|
||||
for (data, data_set_name) in get_codec_test_data_sets() {
|
||||
let results: Vec<(f32, f32, FastFieldCodecType)> = [
|
||||
serialize_with_codec(&data, FastFieldCodecType::Bitpacked),
|
||||
serialize_with_codec(&data, FastFieldCodecType::Linear),
|
||||
serialize_with_codec(&data, FastFieldCodecType::BlockwiseLinear),
|
||||
]
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
let best_compression_ratio_codec = results
|
||||
.iter()
|
||||
.min_by(|&res1, &res2| res1.partial_cmp(res2).unwrap())
|
||||
.cloned()
|
||||
.unwrap();
|
||||
|
||||
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
|
||||
for (est, comp, codec_type) in results {
|
||||
let est_cell = est.to_string();
|
||||
let ratio_cell = comp.to_string();
|
||||
let style = if comp == best_compression_ratio_codec.1 {
|
||||
"Fb"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
table.add_row(Row::new(vec![
|
||||
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
|
||||
Cell::new(&ratio_cell).style_spec(style),
|
||||
Cell::new(&est_cell).style_spec(""),
|
||||
]));
|
||||
}
|
||||
}
|
||||
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
let mut data_and_names = vec![];
|
||||
|
||||
let data = (1000..=200_000_u64).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Autoincrement"));
|
||||
|
||||
let mut current_cumulative = 0;
|
||||
let data = (1..=200_000_u64)
|
||||
.map(|num| {
|
||||
let num = (num as f32 + num as f32).log10() as u64;
|
||||
current_cumulative += num;
|
||||
current_cumulative
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
// let data = (1..=200000_u64).map(|num| num + num).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Monotonically increasing concave"));
|
||||
|
||||
let mut current_cumulative = 0;
|
||||
let data = (1..=200_000_u64)
|
||||
.map(|num| {
|
||||
let num = (200_000.0 - num as f32).log10() as u64;
|
||||
current_cumulative += num;
|
||||
current_cumulative
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Monotonically increasing convex"));
|
||||
|
||||
let data = (1000..=200_000_u64)
|
||||
.map(|num| num + rand::random::<u8>() as u64)
|
||||
.collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Almost monotonically increasing"));
|
||||
|
||||
data_and_names
|
||||
}
|
||||
|
||||
pub fn serialize_with_codec(
|
||||
data: &[u64],
|
||||
codec_type: FastFieldCodecType,
|
||||
) -> Option<(f32, f32, FastFieldCodecType)> {
|
||||
let col = VecColumn::from(data);
|
||||
let estimation = fastfield_codecs::estimate(&col, codec_type)?;
|
||||
let mut out = Vec::new();
|
||||
fastfield_codecs::serialize(&col, &mut out, &[codec_type]).ok()?;
|
||||
let actual_compression = out.len() as f32 / (col.num_vals() * 8) as f32;
|
||||
Some((estimation, actual_compression, codec_type))
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
#![warn(missing_docs)]
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
//! # `fastfield_codecs`
|
||||
//!
|
||||
@@ -26,16 +25,58 @@ mod stats;
|
||||
pub(crate) mod u64_based;
|
||||
|
||||
mod column;
|
||||
pub mod serialize;
|
||||
pub(crate) mod serialize;
|
||||
|
||||
pub use serialize::serialize_column_values_u128;
|
||||
pub use stats::Stats;
|
||||
pub use stats::ColumnStats;
|
||||
pub use u64_based::{
|
||||
load_u64_based_column_values, serialize_and_load_u64_based_column_values,
|
||||
serialize_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
|
||||
};
|
||||
|
||||
pub use self::column::{monotonic_map_column, ColumnValues, IterColumn, VecColumn};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{ColumnIndex, MergeRowOrder};
|
||||
|
||||
pub(crate) struct MergedColumnValues<'a, T> {
|
||||
pub(crate) column_indexes: &'a [Option<ColumnIndex>],
|
||||
pub(crate) column_values: &'a [Option<Arc<dyn ColumnValues<T>>>],
|
||||
pub(crate) merge_row_order: &'a MergeRowOrder,
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Debug> Iterable<T> for MergedColumnValues<'a, T> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
match self.merge_row_order {
|
||||
MergeRowOrder::Stack(_) => {
|
||||
Box::new(self
|
||||
.column_values
|
||||
.iter()
|
||||
.flatten()
|
||||
.flat_map(|column_value| column_value.iter()))
|
||||
},
|
||||
MergeRowOrder::Shuffled(shuffle_merge_order) => {
|
||||
Box::new(shuffle_merge_order
|
||||
.iter_new_to_old_row_addrs()
|
||||
.flat_map(|row_addr| {
|
||||
let Some(column_index) = self.column_indexes[row_addr.segment_ord as usize].as_ref() else {
|
||||
return None;
|
||||
};
|
||||
let Some(column_values) = self.column_values[row_addr.segment_ord as usize].as_ref() else {
|
||||
return None;
|
||||
};
|
||||
let value_range = column_index.value_row_ids(row_addr.row_id);
|
||||
Some((value_range, column_values))
|
||||
})
|
||||
.flat_map(|(value_range, column_values)| {
|
||||
value_range
|
||||
.into_iter()
|
||||
.map(|val| column_values.get_val(val))
|
||||
})
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||
#[repr(u8)]
|
||||
@@ -95,6 +136,7 @@ mod bench {
|
||||
use test::{self, Bencher};
|
||||
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::*;
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
@@ -110,23 +152,30 @@ mod bench {
|
||||
data
|
||||
}
|
||||
|
||||
fn compute_stats(vals: impl Iterator<Item = u64>) -> ColumnStats {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
for val in vals {
|
||||
stats_collector.collect(val);
|
||||
}
|
||||
stats_collector.stats()
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
}
|
||||
fn get_reader_for_bench<Codec: FastFieldCodec>(data: &[u64]) -> Codec::Reader {
|
||||
fn get_reader_for_bench<Codec: ColumnCodec>(data: &[u64]) -> Codec::ColumnValues {
|
||||
let mut bytes = Vec::new();
|
||||
let min_value = *data.iter().min().unwrap();
|
||||
let data = data.iter().map(|el| *el - min_value).collect::<Vec<_>>();
|
||||
let col = VecColumn::from(&data);
|
||||
let normalized_header = NormalizedHeader {
|
||||
num_vals: col.num_vals(),
|
||||
max_value: col.max_value(),
|
||||
};
|
||||
Codec::serialize(&VecColumn::from(&data), &mut bytes).unwrap();
|
||||
Codec::open_from_bytes(OwnedBytes::new(bytes), normalized_header).unwrap()
|
||||
let stats = compute_stats(data.iter().cloned());
|
||||
let mut codec_serializer = Codec::estimator();
|
||||
for val in data {
|
||||
codec_serializer.collect(*val);
|
||||
}
|
||||
codec_serializer.serialize(&stats, Box::new(data.iter().copied()).as_mut(), &mut bytes);
|
||||
|
||||
Codec::load(OwnedBytes::new(bytes)).unwrap()
|
||||
}
|
||||
fn bench_get<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
fn bench_get<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = get_reader_for_bench::<Codec>(data);
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
@@ -150,18 +199,22 @@ mod bench {
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_get_dynamic<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
fn bench_get_dynamic<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = Arc::new(get_reader_for_bench::<Codec>(data));
|
||||
bench_get_dynamic_helper(b, col);
|
||||
}
|
||||
fn bench_create<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let min_value = *data.iter().min().unwrap();
|
||||
let data = data.iter().map(|el| *el - min_value).collect::<Vec<_>>();
|
||||
fn bench_create<Codec: ColumnCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let stats = compute_stats(data.iter().cloned());
|
||||
|
||||
let mut bytes = Vec::new();
|
||||
b.iter(|| {
|
||||
bytes.clear();
|
||||
Codec::serialize(&VecColumn::from(&data), &mut bytes).unwrap();
|
||||
let mut codec_serializer = Codec::estimator();
|
||||
for val in data.iter().take(1024) {
|
||||
codec_serializer.collect(*val);
|
||||
}
|
||||
|
||||
codec_serializer.serialize(&stats, Box::new(data.iter().copied()).as_mut(), &mut bytes)
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use common::DateTime;
|
||||
use fastdivide::DividerU64;
|
||||
|
||||
use super::MonotonicallyMappableToU128;
|
||||
@@ -122,6 +123,7 @@ pub(crate) struct StrictlyMonotonicMappingToInternalGCDBaseval {
|
||||
min_value: u64,
|
||||
}
|
||||
impl StrictlyMonotonicMappingToInternalGCDBaseval {
|
||||
/// Creates a linear mapping `x -> gcd*x + min_value`.
|
||||
pub(crate) fn new(gcd: u64, min_value: u64) -> Self {
|
||||
let gcd_divider = DividerU64::divide_by(gcd);
|
||||
Self {
|
||||
@@ -150,7 +152,9 @@ impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
||||
pub(crate) struct StrictlyMonotonicMappingToInternalBaseval {
|
||||
min_value: u64,
|
||||
}
|
||||
|
||||
impl StrictlyMonotonicMappingToInternalBaseval {
|
||||
/// Creates a linear mapping `x -> x + min_value`.
|
||||
#[inline(always)]
|
||||
pub(crate) fn new(min_value: u64) -> Self {
|
||||
Self { min_value }
|
||||
@@ -195,17 +199,15 @@ impl MonotonicallyMappableToU64 for i64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for crate::DateTime {
|
||||
impl MonotonicallyMappableToU64 for DateTime {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
common::i64_to_u64(self.timestamp_micros)
|
||||
common::i64_to_u64(self.into_timestamp_micros())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> Self {
|
||||
crate::DateTime {
|
||||
timestamp_micros: common::u64_to_i64(val),
|
||||
}
|
||||
DateTime::from_timestamp_micros(common::u64_to_i64(val))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,19 +8,6 @@ use crate::column_values::U128FastFieldCodecType;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::MonotonicallyMappableToU128;
|
||||
|
||||
/// The normalized header gives some parameters after applying the following
|
||||
/// normalization of the vector:
|
||||
/// `val -> (val - min_value) / gcd`
|
||||
///
|
||||
/// By design, after normalization, `min_value = 0` and `gcd = 1`.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct NormalizedHeader {
|
||||
/// The number of values in the underlying column.
|
||||
pub num_vals: u32,
|
||||
/// The max value of the underlying column.
|
||||
pub max_value: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub(crate) struct U128Header {
|
||||
pub num_vals: u32,
|
||||
@@ -47,20 +34,18 @@ impl BinarySerializable for U128Header {
|
||||
/// Serializes u128 values with the compact space codec.
|
||||
pub fn serialize_column_values_u128<T: MonotonicallyMappableToU128>(
|
||||
iterable: &dyn Iterable<T>,
|
||||
num_vals: u32,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let header = U128Header {
|
||||
num_vals,
|
||||
codec_type: U128FastFieldCodecType::CompactSpace,
|
||||
};
|
||||
header.serialize(output)?;
|
||||
let compressor = CompactSpaceCompressor::train_from(
|
||||
iterable
|
||||
.boxed_iter()
|
||||
.map(MonotonicallyMappableToU128::to_u128),
|
||||
num_vals,
|
||||
);
|
||||
let header = U128Header {
|
||||
num_vals: compressor.num_vals(),
|
||||
codec_type: U128FastFieldCodecType::CompactSpace,
|
||||
};
|
||||
header.serialize(output)?;
|
||||
compressor.compress_into(
|
||||
iterable
|
||||
.boxed_iter()
|
||||
@@ -74,7 +59,7 @@ pub fn serialize_column_values_u128<T: MonotonicallyMappableToU128>(
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::{
|
||||
self, serialize_and_load_u64_based_column_values, serialize_u64_based_column_values,
|
||||
serialize_and_load_u64_based_column_values, serialize_u64_based_column_values,
|
||||
ALL_U64_CODEC_TYPES,
|
||||
};
|
||||
use crate::column_values::CodecType;
|
||||
|
||||
@@ -6,21 +6,28 @@ use common::{BinarySerializable, VInt};
|
||||
|
||||
use crate::RowId;
|
||||
|
||||
/// Column statistics.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct Stats {
|
||||
pub struct ColumnStats {
|
||||
/// GCD of the elements `el - min(column)`.
|
||||
pub gcd: NonZeroU64,
|
||||
/// Minimum value of the column.
|
||||
pub min_value: u64,
|
||||
/// Maximum value of the column.
|
||||
pub max_value: u64,
|
||||
/// Number of rows in the column.
|
||||
pub num_rows: RowId,
|
||||
}
|
||||
|
||||
impl Stats {
|
||||
impl ColumnStats {
|
||||
/// Amplitude of value.
|
||||
/// Difference between the maximum and the minimum value.
|
||||
pub fn amplitude(&self) -> u64 {
|
||||
self.max_value - self.min_value
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Stats {
|
||||
impl BinarySerializable for ColumnStats {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.min_value).serialize(writer)?;
|
||||
VInt(self.gcd.get()).serialize(writer)?;
|
||||
@@ -37,7 +44,7 @@ impl BinarySerializable for Stats {
|
||||
let amplitude = VInt::deserialize(reader)?.0 * gcd.get();
|
||||
let max_value = min_value + amplitude;
|
||||
let num_rows = VInt::deserialize(reader)?.0 as RowId;
|
||||
Ok(Stats {
|
||||
Ok(ColumnStats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_rows,
|
||||
@@ -52,21 +59,21 @@ mod tests {
|
||||
|
||||
use common::BinarySerializable;
|
||||
|
||||
use crate::column_values::Stats;
|
||||
use crate::column_values::ColumnStats;
|
||||
|
||||
#[track_caller]
|
||||
fn test_stats_ser_deser_aux(stats: &Stats, num_bytes: usize) {
|
||||
fn test_stats_ser_deser_aux(stats: &ColumnStats, num_bytes: usize) {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
stats.serialize(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), num_bytes);
|
||||
let deser_stats = Stats::deserialize(&mut &buffer[..]).unwrap();
|
||||
let deser_stats = ColumnStats::deserialize(&mut &buffer[..]).unwrap();
|
||||
assert_eq!(stats, &deser_stats);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stats_serialization() {
|
||||
test_stats_ser_deser_aux(
|
||||
&(Stats {
|
||||
&(ColumnStats {
|
||||
gcd: NonZeroU64::new(3).unwrap(),
|
||||
min_value: 1,
|
||||
max_value: 3001,
|
||||
@@ -75,7 +82,7 @@ mod tests {
|
||||
5,
|
||||
);
|
||||
test_stats_ser_deser_aux(
|
||||
&(Stats {
|
||||
&(ColumnStats {
|
||||
gcd: NonZeroU64::new(1_000).unwrap(),
|
||||
min_value: 1,
|
||||
max_value: 3001,
|
||||
@@ -84,7 +91,7 @@ mod tests {
|
||||
5,
|
||||
);
|
||||
test_stats_ser_deser_aux(
|
||||
&(Stats {
|
||||
&(ColumnStats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 0,
|
||||
|
||||
@@ -4,7 +4,7 @@ use common::{BinarySerializable, OwnedBytes};
|
||||
use fastdivide::DividerU64;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, ColumnStats};
|
||||
use crate::{ColumnValues, RowId};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
@@ -13,7 +13,7 @@ use crate::{ColumnValues, RowId};
|
||||
pub struct BitpackedReader {
|
||||
data: OwnedBytes,
|
||||
bit_unpacker: BitUnpacker,
|
||||
stats: Stats,
|
||||
stats: ColumnStats,
|
||||
}
|
||||
|
||||
impl ColumnValues for BitpackedReader {
|
||||
@@ -36,7 +36,7 @@ impl ColumnValues for BitpackedReader {
|
||||
}
|
||||
}
|
||||
|
||||
fn num_bits(stats: &Stats) -> u8 {
|
||||
fn num_bits(stats: &ColumnStats) -> u8 {
|
||||
compute_num_bits(stats.amplitude() / stats.gcd)
|
||||
}
|
||||
|
||||
@@ -46,14 +46,14 @@ pub struct BitpackedCodecEstimator;
|
||||
impl ColumnCodecEstimator for BitpackedCodecEstimator {
|
||||
fn collect(&mut self, _value: u64) {}
|
||||
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
||||
fn estimate(&self, stats: &ColumnStats) -> Option<u64> {
|
||||
let num_bits_per_value = num_bits(stats);
|
||||
Some(stats.num_bytes() + (stats.num_rows as u64 * (num_bits_per_value as u64) + 7) / 8)
|
||||
}
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
stats: &ColumnStats,
|
||||
vals: &mut dyn Iterator<Item = u64>,
|
||||
wrt: &mut dyn Write,
|
||||
) -> io::Result<()> {
|
||||
@@ -72,12 +72,12 @@ impl ColumnCodecEstimator for BitpackedCodecEstimator {
|
||||
pub struct BitpackedCodec;
|
||||
|
||||
impl ColumnCodec for BitpackedCodec {
|
||||
type Reader = BitpackedReader;
|
||||
type ColumnValues = BitpackedReader;
|
||||
type Estimator = BitpackedCodecEstimator;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn load(mut data: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let stats = Stats::deserialize(&mut data)?;
|
||||
fn load(mut data: OwnedBytes) -> io::Result<Self::ColumnValues> {
|
||||
let stats = ColumnStats::deserialize(&mut data)?;
|
||||
let num_bits = num_bits(&stats);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(BitpackedReader {
|
||||
|
||||
@@ -7,7 +7,7 @@ use fastdivide::DividerU64;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::u64_based::line::Line;
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, ColumnStats};
|
||||
use crate::column_values::{ColumnValues, VecColumn};
|
||||
use crate::MonotonicallyMappableToU64;
|
||||
|
||||
@@ -84,7 +84,7 @@ impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
||||
self.block.clear();
|
||||
}
|
||||
}
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
||||
fn estimate(&self, stats: &ColumnStats) -> Option<u64> {
|
||||
let mut estimate = 4 + stats.num_bytes() + self.meta_num_bytes + self.values_num_bytes;
|
||||
if stats.gcd.get() > 1 {
|
||||
let estimate_gain_from_gcd =
|
||||
@@ -100,7 +100,7 @@ impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
stats: &ColumnStats,
|
||||
mut vals: &mut dyn Iterator<Item = u64>,
|
||||
wrt: &mut dyn Write,
|
||||
) -> io::Result<()> {
|
||||
@@ -165,12 +165,12 @@ impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
||||
pub struct BlockwiseLinearCodec;
|
||||
|
||||
impl ColumnCodec<u64> for BlockwiseLinearCodec {
|
||||
type Reader = BlockwiseLinearReader;
|
||||
type ColumnValues = BlockwiseLinearReader;
|
||||
|
||||
type Estimator = BlockwiseLinearEstimator;
|
||||
|
||||
fn load(mut bytes: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let stats = Stats::deserialize(&mut bytes)?;
|
||||
fn load(mut bytes: OwnedBytes) -> io::Result<Self::ColumnValues> {
|
||||
let stats = ColumnStats::deserialize(&mut bytes)?;
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
@@ -195,7 +195,7 @@ impl ColumnCodec<u64> for BlockwiseLinearCodec {
|
||||
pub struct BlockwiseLinearReader {
|
||||
blocks: Arc<[Block]>,
|
||||
data: OwnedBytes,
|
||||
stats: Stats,
|
||||
stats: ColumnStats,
|
||||
}
|
||||
|
||||
impl ColumnValues for BlockwiseLinearReader {
|
||||
|
||||
@@ -67,19 +67,6 @@ impl Line {
|
||||
self.intercept.wrapping_add(linear_part)
|
||||
}
|
||||
|
||||
// Same as train, but the intercept is only estimated from provided sample positions
|
||||
pub fn estimate(sample_positions_and_values: &[(u64, u64)]) -> Self {
|
||||
let first_val = sample_positions_and_values[0].1;
|
||||
let last_val = sample_positions_and_values[sample_positions_and_values.len() - 1].1;
|
||||
let num_vals = sample_positions_and_values[sample_positions_and_values.len() - 1].0 + 1;
|
||||
Self::train_from(
|
||||
first_val,
|
||||
last_val,
|
||||
num_vals as u32,
|
||||
sample_positions_and_values.iter().cloned(),
|
||||
)
|
||||
}
|
||||
|
||||
// Intercept is only computed from provided positions
|
||||
pub fn train_from(
|
||||
first_val: u64,
|
||||
|
||||
@@ -5,9 +5,9 @@ use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use super::line::Line;
|
||||
use super::ColumnValues;
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, ColumnStats};
|
||||
use crate::column_values::VecColumn;
|
||||
use crate::{MonotonicallyMappableToU64, RowId};
|
||||
use crate::RowId;
|
||||
|
||||
const HALF_SPACE: u64 = u64::MAX / 2;
|
||||
const LINE_ESTIMATION_BLOCK_LEN: usize = 512;
|
||||
@@ -18,7 +18,7 @@ const LINE_ESTIMATION_BLOCK_LEN: usize = 512;
|
||||
pub struct LinearReader {
|
||||
data: OwnedBytes,
|
||||
linear_params: LinearParams,
|
||||
stats: Stats,
|
||||
stats: ColumnStats,
|
||||
}
|
||||
|
||||
impl ColumnValues for LinearReader {
|
||||
@@ -106,7 +106,7 @@ impl ColumnCodecEstimator for LinearCodecEstimator {
|
||||
}
|
||||
}
|
||||
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
||||
fn estimate(&self, stats: &ColumnStats) -> Option<u64> {
|
||||
let line = self.line?;
|
||||
let amplitude = self.max_deviation - self.min_deviation;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
@@ -123,7 +123,7 @@ impl ColumnCodecEstimator for LinearCodecEstimator {
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
stats: &ColumnStats,
|
||||
vals: &mut dyn Iterator<Item = u64>,
|
||||
wrt: &mut dyn io::Write,
|
||||
) -> io::Result<()> {
|
||||
@@ -184,12 +184,12 @@ impl LinearCodecEstimator {
|
||||
}
|
||||
|
||||
impl ColumnCodec for LinearCodec {
|
||||
type Reader = LinearReader;
|
||||
type ColumnValues = LinearReader;
|
||||
|
||||
type Estimator = LinearCodecEstimator;
|
||||
|
||||
fn load(mut data: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let stats = Stats::deserialize(&mut data)?;
|
||||
fn load(mut data: OwnedBytes) -> io::Result<Self::ColumnValues> {
|
||||
let stats = ColumnStats::deserialize(&mut data)?;
|
||||
let linear_params = LinearParams::deserialize(&mut data)?;
|
||||
Ok(LinearReader {
|
||||
stats,
|
||||
|
||||
@@ -13,35 +13,61 @@ use common::{BinarySerializable, OwnedBytes};
|
||||
use crate::column_values::monotonic_mapping::{
|
||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||
};
|
||||
use crate::column_values::u64_based::bitpacked::BitpackedCodec;
|
||||
use crate::column_values::u64_based::blockwise_linear::BlockwiseLinearCodec;
|
||||
use crate::column_values::u64_based::linear::LinearCodec;
|
||||
use crate::column_values::u64_based::stats_collector::StatsCollector;
|
||||
use crate::column_values::{monotonic_map_column, Stats};
|
||||
pub use crate::column_values::u64_based::bitpacked::BitpackedCodec;
|
||||
pub use crate::column_values::u64_based::blockwise_linear::BlockwiseLinearCodec;
|
||||
pub use crate::column_values::u64_based::linear::LinearCodec;
|
||||
pub use crate::column_values::u64_based::stats_collector::StatsCollector;
|
||||
use crate::column_values::{monotonic_map_column, ColumnStats};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{ColumnValues, MonotonicallyMappableToU64};
|
||||
|
||||
/// A `ColumnCodecEstimator` is in charge of gathering all
|
||||
/// data required to serialize a column.
|
||||
///
|
||||
/// This happens during a first pass on data of the column elements.
|
||||
/// During that pass, all column estimators receive a call to their
|
||||
/// `.collect(el)`.
|
||||
///
|
||||
/// After this first pass, finalize is called.
|
||||
/// `.estimate(..)` then should return an accurate estimation of the
|
||||
/// size of the serialized column (were we to pick this codec.).
|
||||
/// `.serialize(..)` then serializes the column using this codec.
|
||||
pub trait ColumnCodecEstimator<T = u64>: 'static {
|
||||
/// Records a new value for estimation.
|
||||
/// This method will be called for each element of the column during
|
||||
/// `estimation`.
|
||||
fn collect(&mut self, value: u64);
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64>;
|
||||
/// Finalizes the first pass phase.
|
||||
fn finalize(&mut self) {}
|
||||
/// Returns an accurate estimation of the number of bytes that will
|
||||
/// be used to represent this column.
|
||||
fn estimate(&self, stats: &ColumnStats) -> Option<u64>;
|
||||
/// Serializes the column using the given codec.
|
||||
/// This constitutes a second pass over the columns values.
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
stats: &ColumnStats,
|
||||
vals: &mut dyn Iterator<Item = T>,
|
||||
wrt: &mut dyn io::Write,
|
||||
) -> io::Result<()>;
|
||||
}
|
||||
|
||||
/// A column codec describes a colunm serialization format.
|
||||
pub trait ColumnCodec<T: PartialOrd = u64> {
|
||||
type Reader: ColumnValues<T> + 'static;
|
||||
/// Specialized `ColumnValues` type.
|
||||
type ColumnValues: ColumnValues<T> + 'static;
|
||||
/// `Estimator` for the given codec.
|
||||
type Estimator: ColumnCodecEstimator + Default;
|
||||
|
||||
fn load(bytes: OwnedBytes) -> io::Result<Self::Reader>;
|
||||
/// Loads a column that has been serialized using this codec.
|
||||
fn load(bytes: OwnedBytes) -> io::Result<Self::ColumnValues>;
|
||||
|
||||
/// Returns an estimator.
|
||||
fn estimator() -> Self::Estimator {
|
||||
Self::Estimator::default()
|
||||
}
|
||||
|
||||
/// Returns a boxed estimator.
|
||||
fn boxed_estimator() -> Box<dyn ColumnCodecEstimator> {
|
||||
Box::new(Self::estimator())
|
||||
}
|
||||
@@ -62,6 +88,7 @@ pub enum CodecType {
|
||||
BlockwiseLinear = 2u8,
|
||||
}
|
||||
|
||||
/// List of all available u64-base codecs.
|
||||
pub const ALL_U64_CODEC_TYPES: [CodecType; 3] = [
|
||||
CodecType::Bitpacked,
|
||||
CodecType::Linear,
|
||||
@@ -106,6 +133,7 @@ fn load_specific_codec<C: ColumnCodec, T: MonotonicallyMappableToU64>(
|
||||
}
|
||||
|
||||
impl CodecType {
|
||||
/// Returns a boxed codec estimator associated to a given `CodecType`.
|
||||
pub fn estimator(&self) -> Box<dyn ColumnCodecEstimator> {
|
||||
match self {
|
||||
CodecType::Bitpacked => BitpackedCodec::boxed_estimator(),
|
||||
@@ -115,7 +143,8 @@ impl CodecType {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize_u64_based_column_values<'a, T: MonotonicallyMappableToU64>(
|
||||
/// Serializes a given column of u64-mapped values.
|
||||
pub fn serialize_u64_based_column_values<T: MonotonicallyMappableToU64>(
|
||||
vals: &dyn Iterable<T>,
|
||||
codec_types: &[CodecType],
|
||||
wrt: &mut dyn Write,
|
||||
@@ -156,11 +185,14 @@ pub fn serialize_u64_based_column_values<'a, T: MonotonicallyMappableToU64>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load u64-based column values.
|
||||
///
|
||||
/// This method first identifies the codec off the first byte.
|
||||
pub fn load_u64_based_column_values<T: MonotonicallyMappableToU64>(
|
||||
mut bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||
let codec_type: CodecType = bytes
|
||||
.get(0)
|
||||
.first()
|
||||
.copied()
|
||||
.and_then(CodecType::try_from_code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Failed to read codec type"))?;
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::num::NonZeroU64;
|
||||
|
||||
use fastdivide::DividerU64;
|
||||
|
||||
use crate::column_values::Stats;
|
||||
use crate::column_values::ColumnStats;
|
||||
use crate::RowId;
|
||||
|
||||
/// Compute the gcd of two non null numbers.
|
||||
@@ -33,14 +33,14 @@ pub struct StatsCollector {
|
||||
}
|
||||
|
||||
impl StatsCollector {
|
||||
pub fn stats(&self) -> Stats {
|
||||
pub fn stats(&self) -> ColumnStats {
|
||||
let (min_value, max_value) = self.min_max_opt.unwrap_or((0u64, 0u64));
|
||||
let increment_gcd = if let Some((increment_gcd, _)) = self.increment_gcd_opt {
|
||||
increment_gcd
|
||||
} else {
|
||||
NonZeroU64::new(1u64).unwrap()
|
||||
};
|
||||
Stats {
|
||||
ColumnStats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_rows: self.num_rows,
|
||||
@@ -97,9 +97,9 @@ mod tests {
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use crate::column_values::u64_based::stats_collector::{compute_gcd, StatsCollector};
|
||||
use crate::column_values::u64_based::Stats;
|
||||
use crate::column_values::u64_based::ColumnStats;
|
||||
|
||||
fn compute_stats(vals: impl Iterator<Item = u64>) -> Stats {
|
||||
fn compute_stats(vals: impl Iterator<Item = u64>) -> ColumnStats {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
for val in vals {
|
||||
stats_collector.collect(val);
|
||||
@@ -144,7 +144,7 @@ mod tests {
|
||||
fn test_stats() {
|
||||
assert_eq!(
|
||||
compute_stats([].into_iter()),
|
||||
Stats {
|
||||
ColumnStats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 0,
|
||||
@@ -153,7 +153,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([0, 1].into_iter()),
|
||||
Stats {
|
||||
ColumnStats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 1,
|
||||
@@ -162,7 +162,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([0, 1].into_iter()),
|
||||
Stats {
|
||||
ColumnStats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 1,
|
||||
@@ -171,7 +171,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([10, 20, 30].into_iter()),
|
||||
Stats {
|
||||
ColumnStats {
|
||||
gcd: NonZeroU64::new(10).unwrap(),
|
||||
min_value: 10,
|
||||
max_value: 30,
|
||||
@@ -180,7 +180,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([10, 50, 10, 30].into_iter()),
|
||||
Stats {
|
||||
ColumnStats {
|
||||
gcd: NonZeroU64::new(20).unwrap(),
|
||||
min_value: 10,
|
||||
max_value: 50,
|
||||
@@ -189,7 +189,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([10, 0, 30].into_iter()),
|
||||
Stats {
|
||||
ColumnStats {
|
||||
gcd: NonZeroU64::new(10).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 30,
|
||||
|
||||
@@ -60,7 +60,7 @@ pub(crate) fn create_and_validate<TColumnCodec: ColumnCodec>(
|
||||
.map(|(pos, _)| pos as u32)
|
||||
.collect();
|
||||
let mut positions = Vec::new();
|
||||
reader.get_docids_for_value_range(
|
||||
reader.get_row_ids_for_value_range(
|
||||
vals[test_rand_idx]..=vals[test_rand_idx],
|
||||
0..vals.len() as u32,
|
||||
&mut positions,
|
||||
|
||||
@@ -4,24 +4,22 @@ use std::net::Ipv6Addr;
|
||||
use crate::value::NumericalType;
|
||||
use crate::InvalidData;
|
||||
|
||||
/// The column type represents the column type and can fit on 6-bits.
|
||||
///
|
||||
/// - bits[0..3]: Column category type.
|
||||
/// - bits[3..6]: Numerical type if necessary.
|
||||
/// The column type represents the column type.
|
||||
/// Any changes need to be propagated to `COLUMN_TYPES`.
|
||||
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy, Ord, PartialOrd)]
|
||||
#[repr(u8)]
|
||||
pub enum ColumnType {
|
||||
I64 = 0u8,
|
||||
U64 = 1u8,
|
||||
F64 = 2u8,
|
||||
Bytes = 10u8,
|
||||
Str = 14u8,
|
||||
Bool = 18u8,
|
||||
IpAddr = 22u8,
|
||||
DateTime = 26u8,
|
||||
Bytes = 3u8,
|
||||
Str = 4u8,
|
||||
Bool = 5u8,
|
||||
IpAddr = 6u8,
|
||||
DateTime = 7u8,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
// The order needs to match _exactly_ the order in the enum
|
||||
const COLUMN_TYPES: [ColumnType; 8] = [
|
||||
ColumnType::I64,
|
||||
ColumnType::U64,
|
||||
@@ -39,18 +37,7 @@ impl ColumnType {
|
||||
}
|
||||
|
||||
pub(crate) fn try_from_code(code: u8) -> Result<ColumnType, InvalidData> {
|
||||
use ColumnType::*;
|
||||
match code {
|
||||
0u8 => Ok(I64),
|
||||
1u8 => Ok(U64),
|
||||
2u8 => Ok(F64),
|
||||
10u8 => Ok(Bytes),
|
||||
14u8 => Ok(Str),
|
||||
18u8 => Ok(Bool),
|
||||
22u8 => Ok(IpAddr),
|
||||
26u8 => Ok(Self::DateTime),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
COLUMN_TYPES.get(code as usize).copied().ok_or(InvalidData)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +111,7 @@ impl HasAssociatedColumnType for bool {
|
||||
}
|
||||
}
|
||||
|
||||
impl HasAssociatedColumnType for crate::DateTime {
|
||||
impl HasAssociatedColumnType for common::DateTime {
|
||||
fn column_type() -> ColumnType {
|
||||
ColumnType::DateTime
|
||||
}
|
||||
@@ -143,70 +130,20 @@ impl HasAssociatedColumnType for Ipv6Addr {
|
||||
}
|
||||
}
|
||||
|
||||
/// Column types are grouped into different categories that
|
||||
/// corresponds to the different types of `JsonValue` types.
|
||||
///
|
||||
/// The columnar writer will apply coercion rules to make sure that
|
||||
/// at most one column exist per `ColumnTypeCategory`.
|
||||
///
|
||||
/// See also [README.md].
|
||||
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]
|
||||
#[repr(u8)]
|
||||
pub enum ColumnTypeCategory {
|
||||
Bool,
|
||||
Str,
|
||||
Numerical,
|
||||
DateTime,
|
||||
Bytes,
|
||||
IpAddr,
|
||||
}
|
||||
|
||||
impl From<ColumnType> for ColumnTypeCategory {
|
||||
fn from(column_type: ColumnType) -> Self {
|
||||
match column_type {
|
||||
ColumnType::I64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::U64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::F64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::Bytes => ColumnTypeCategory::Bytes,
|
||||
ColumnType::Str => ColumnTypeCategory::Str,
|
||||
ColumnType::Bool => ColumnTypeCategory::Bool,
|
||||
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
|
||||
ColumnType::DateTime => ColumnTypeCategory::DateTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::*;
|
||||
use crate::Cardinality;
|
||||
|
||||
#[test]
|
||||
fn test_column_type_to_code() {
|
||||
let mut column_type_set: HashSet<ColumnType> = HashSet::new();
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Ok(column_type) = ColumnType::try_from_code(code) {
|
||||
assert_eq!(column_type.to_code(), code);
|
||||
assert!(column_type_set.insert(column_type));
|
||||
for (code, expected_column_type) in super::COLUMN_TYPES.iter().copied().enumerate() {
|
||||
if let Ok(column_type) = ColumnType::try_from_code(code as u8) {
|
||||
assert_eq!(column_type, expected_column_type);
|
||||
}
|
||||
}
|
||||
assert_eq!(column_type_set.len(), super::COLUMN_TYPES.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_category_sort_consistent_with_column_type_sort() {
|
||||
// This is a very important property because we
|
||||
// we need to serialize colunmn in the right order.
|
||||
let mut column_types: Vec<ColumnType> = super::COLUMN_TYPES.iter().copied().collect();
|
||||
column_types.sort_by_key(|col| col.to_code());
|
||||
let column_categories: Vec<ColumnTypeCategory> = column_types
|
||||
.into_iter()
|
||||
.map(ColumnTypeCategory::from)
|
||||
.collect();
|
||||
for (prev, next) in column_categories.iter().zip(column_categories.iter()) {
|
||||
assert!(prev <= next);
|
||||
for code in COLUMN_TYPES.len() as u8..=u8::MAX {
|
||||
assert!(ColumnType::try_from_code(code as u8).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ pub const VERSION_FOOTER_NUM_BYTES: usize = MAGIC_BYTES.len() + std::mem::size_o
|
||||
|
||||
/// We end the file by these 4 bytes just to somewhat identify that
|
||||
/// this is indeed a columnar file.
|
||||
const MAGIC_BYTES: [u8; 4] = [2, 113, 119, 066];
|
||||
const MAGIC_BYTES: [u8; 4] = [2, 113, 119, 66];
|
||||
|
||||
pub fn footer() -> [u8; VERSION_FOOTER_NUM_BYTES] {
|
||||
let mut footer_bytes = [0u8; VERSION_FOOTER_NUM_BYTES];
|
||||
@@ -27,8 +27,8 @@ pub enum Version {
|
||||
}
|
||||
|
||||
impl Version {
|
||||
fn to_bytes(&self) -> [u8; 4] {
|
||||
(*self as u32).to_le_bytes()
|
||||
fn to_bytes(self) -> [u8; 4] {
|
||||
(self as u32).to_le_bytes()
|
||||
}
|
||||
|
||||
fn try_from_bytes(bytes: [u8; 4]) -> Result<Version, InvalidData> {
|
||||
|
||||
@@ -1,68 +1,130 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::CountingWriter;
|
||||
use common::{BitSet, CountingWriter, ReadOnlyBitSet};
|
||||
use sstable::{SSTable, TermOrdinal};
|
||||
|
||||
use super::term_merger::TermMerger;
|
||||
use crate::column::serialize_column_mappable_to_u64;
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::BytesColumn;
|
||||
use crate::{BytesColumn, MergeRowOrder, ShuffleMergeOrder};
|
||||
|
||||
// Serialize [Dictionary, Column, dictionary num bytes U32::LE]
|
||||
// Column: [Column Index, Column Values, column index num bytes U32::LE]
|
||||
pub fn merge_bytes_or_str_column(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
bytes_columns: &[BytesColumn],
|
||||
bytes_columns: &[Option<BytesColumn>],
|
||||
merge_row_order: &MergeRowOrder,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
// Serialize dict and generate mapping for values
|
||||
let mut output = CountingWriter::wrap(output);
|
||||
let term_ord_mapping = serialize_merged_dict(bytes_columns, &mut output)?;
|
||||
// TODO !!! Remove useless terms.
|
||||
let term_ord_mapping = serialize_merged_dict(bytes_columns, merge_row_order, &mut output)?;
|
||||
let dictionary_num_bytes: u32 = output.written_bytes() as u32;
|
||||
let output = output.finish();
|
||||
let remapped_term_ordinals_values = RemappedTermOrdinalsValues {
|
||||
bytes_columns,
|
||||
term_ord_mapping: &term_ord_mapping,
|
||||
merge_row_order,
|
||||
};
|
||||
serialize_column_mappable_to_u64(column_index, &remapped_term_ordinals_values, output)?;
|
||||
// serialize_bytes_or_str_column(column_index, bytes_columns, &term_ord_mapping, output)?;
|
||||
output.write_all(&dictionary_num_bytes.to_le_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct RemappedTermOrdinalsValues<'a> {
|
||||
bytes_columns: &'a [BytesColumn],
|
||||
bytes_columns: &'a [Option<BytesColumn>],
|
||||
term_ord_mapping: &'a TermOrdinalMapping,
|
||||
merge_row_order: &'a MergeRowOrder,
|
||||
}
|
||||
|
||||
impl<'a> Iterable for RemappedTermOrdinalsValues<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
match self.merge_row_order {
|
||||
MergeRowOrder::Stack(_) => self.boxed_iter_stacked(),
|
||||
MergeRowOrder::Shuffled(shuffle_merge_order) => {
|
||||
self.boxed_iter_shuffled(shuffle_merge_order)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> RemappedTermOrdinalsValues<'a> {
|
||||
fn boxed_iter_stacked(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
let iter = self
|
||||
.bytes_columns
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(segment_ord, byte_column)| {
|
||||
let segment_ord = self.term_ord_mapping.get_segment(segment_ord);
|
||||
byte_column
|
||||
.ords()
|
||||
.values
|
||||
.iter()
|
||||
.map(move |term_ord| segment_ord[term_ord as usize])
|
||||
let segment_ord = self.term_ord_mapping.get_segment(segment_ord as u32);
|
||||
byte_column.iter().flat_map(move |bytes_column| {
|
||||
bytes_column
|
||||
.ords()
|
||||
.values
|
||||
.iter()
|
||||
.map(move |term_ord| segment_ord[term_ord as usize])
|
||||
})
|
||||
});
|
||||
// TODO see if we can better decompose the mapping / and the stacking
|
||||
Box::new(iter)
|
||||
}
|
||||
|
||||
fn boxed_iter_shuffled<'b>(
|
||||
&'b self,
|
||||
shuffle_merge_order: &'b ShuffleMergeOrder,
|
||||
) -> Box<dyn Iterator<Item = u64> + 'b> {
|
||||
Box::new(
|
||||
shuffle_merge_order
|
||||
.iter_new_to_old_row_addrs()
|
||||
.flat_map(move |old_addr| {
|
||||
let segment_ord = self.term_ord_mapping.get_segment(old_addr.segment_ord);
|
||||
self.bytes_columns[old_addr.segment_ord as usize]
|
||||
.as_ref()
|
||||
.into_iter()
|
||||
.flat_map(move |bytes_column| {
|
||||
bytes_column
|
||||
.term_ords(old_addr.row_id)
|
||||
.map(|old_term_ord: u64| segment_ord[old_term_ord as usize])
|
||||
})
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_term_bitset(column: &BytesColumn, row_bitset: &ReadOnlyBitSet) -> BitSet {
|
||||
let num_terms = column.dictionary().num_terms();
|
||||
let mut term_bitset = BitSet::with_max_value(num_terms as u32);
|
||||
for row_id in row_bitset.iter() {
|
||||
for term_ord in column.term_ord_column.values(row_id) {
|
||||
term_bitset.insert(term_ord as u32);
|
||||
}
|
||||
}
|
||||
term_bitset
|
||||
}
|
||||
|
||||
fn is_term_present(bitsets: &[Option<BitSet>], term_merger: &TermMerger) -> bool {
|
||||
for (segment_ord, from_term_ord) in term_merger.matching_segments() {
|
||||
if let Some(bitset) = bitsets[segment_ord].as_ref() {
|
||||
if bitset.contains(from_term_ord as u32) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn serialize_merged_dict(
|
||||
bytes_columns: &[BytesColumn],
|
||||
bytes_columns: &[Option<BytesColumn>],
|
||||
merge_row_order: &MergeRowOrder,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<TermOrdinalMapping> {
|
||||
let mut term_ord_mapping = TermOrdinalMapping::default();
|
||||
|
||||
let mut field_term_streams = Vec::new();
|
||||
for column in bytes_columns {
|
||||
for column in bytes_columns.iter().flatten() {
|
||||
term_ord_mapping.add_segment(column.dictionary.num_terms());
|
||||
let terms = column.dictionary.stream()?;
|
||||
field_term_streams.push(terms);
|
||||
@@ -71,21 +133,57 @@ fn serialize_merged_dict(
|
||||
let mut merged_terms = TermMerger::new(field_term_streams);
|
||||
let mut sstable_builder = sstable::VoidSSTable::writer(output);
|
||||
|
||||
let mut current_term_ord = 0;
|
||||
while merged_terms.advance() {
|
||||
let term_bytes: &[u8] = merged_terms.key();
|
||||
|
||||
sstable_builder.insert(term_bytes, &())?;
|
||||
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
|
||||
term_ord_mapping.register_from_to(segment_ord, from_term_ord, current_term_ord);
|
||||
// TODO support complex `merge_row_order`.
|
||||
match merge_row_order {
|
||||
MergeRowOrder::Stack(_) => {
|
||||
let mut current_term_ord = 0;
|
||||
while merged_terms.advance() {
|
||||
let term_bytes: &[u8] = merged_terms.key();
|
||||
sstable_builder.insert(term_bytes, &())?;
|
||||
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
|
||||
term_ord_mapping.register_from_to(segment_ord, from_term_ord, current_term_ord);
|
||||
}
|
||||
current_term_ord += 1;
|
||||
}
|
||||
sstable_builder.finish()?;
|
||||
}
|
||||
MergeRowOrder::Shuffled(shuffle_merge_order) => {
|
||||
assert_eq!(shuffle_merge_order.alive_bitsets.len(), bytes_columns.len());
|
||||
let mut term_bitsets: Vec<Option<BitSet>> = Vec::with_capacity(bytes_columns.len());
|
||||
for (alive_bitset_opt, bytes_column_opt) in shuffle_merge_order
|
||||
.alive_bitsets
|
||||
.iter()
|
||||
.zip(bytes_columns.iter())
|
||||
{
|
||||
match (alive_bitset_opt, bytes_column_opt) {
|
||||
(Some(alive_bitset), Some(bytes_column)) => {
|
||||
let term_bitset = compute_term_bitset(bytes_column, alive_bitset);
|
||||
term_bitsets.push(Some(term_bitset));
|
||||
}
|
||||
_ => {
|
||||
term_bitsets.push(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut current_term_ord = 0;
|
||||
while merged_terms.advance() {
|
||||
let term_bytes: &[u8] = merged_terms.key();
|
||||
if !is_term_present(&term_bitsets[..], &merged_terms) {
|
||||
continue;
|
||||
}
|
||||
sstable_builder.insert(term_bytes, &())?;
|
||||
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
|
||||
term_ord_mapping.register_from_to(segment_ord, from_term_ord, current_term_ord);
|
||||
}
|
||||
current_term_ord += 1;
|
||||
}
|
||||
sstable_builder.finish()?;
|
||||
}
|
||||
current_term_ord += 1;
|
||||
}
|
||||
sstable_builder.finish()?;
|
||||
Ok(term_ord_mapping)
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
#[derive(Default, Debug)]
|
||||
struct TermOrdinalMapping {
|
||||
per_segment_new_term_ordinals: Vec<Vec<TermOrdinal>>,
|
||||
}
|
||||
@@ -100,7 +198,7 @@ impl TermOrdinalMapping {
|
||||
self.per_segment_new_term_ordinals[segment_ord][from_ord as usize] = to_ord;
|
||||
}
|
||||
|
||||
fn get_segment(&self, segment_ord: usize) -> &[TermOrdinal] {
|
||||
&(self.per_segment_new_term_ordinals[segment_ord])[..]
|
||||
fn get_segment(&self, segment_ord: u32) -> &[TermOrdinal] {
|
||||
&(self.per_segment_new_term_ordinals[segment_ord as usize])[..]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use std::ops::Range;
|
||||
|
||||
use crate::{column, ColumnarReader, RowId};
|
||||
use common::{BitSet, OwnedBytes, ReadOnlyBitSet};
|
||||
|
||||
use crate::{ColumnarReader, RowAddr, RowId};
|
||||
|
||||
pub struct StackMergeOrder {
|
||||
// This does not start at 0. The first row is the number of
|
||||
@@ -42,19 +44,75 @@ pub enum MergeRowOrder {
|
||||
/// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder
|
||||
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order.
|
||||
/// ..
|
||||
/// No documents is deleted.
|
||||
Stack(StackMergeOrder),
|
||||
/// Some more complex mapping, that can interleaves rows from the different readers and
|
||||
/// possibly drop rows.
|
||||
Complex(()),
|
||||
/// Some more complex mapping, that may interleaves rows from the different readers and
|
||||
/// drop rows, or do both.
|
||||
Shuffled(ShuffleMergeOrder),
|
||||
}
|
||||
|
||||
impl From<StackMergeOrder> for MergeRowOrder {
|
||||
fn from(stack_merge_order: StackMergeOrder) -> MergeRowOrder {
|
||||
MergeRowOrder::Stack(stack_merge_order)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ShuffleMergeOrder> for MergeRowOrder {
|
||||
fn from(shuffle_merge_order: ShuffleMergeOrder) -> MergeRowOrder {
|
||||
MergeRowOrder::Shuffled(shuffle_merge_order)
|
||||
}
|
||||
}
|
||||
|
||||
impl MergeRowOrder {
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
match self {
|
||||
MergeRowOrder::Stack(stack_row_order) => stack_row_order.num_rows(),
|
||||
MergeRowOrder::Complex(_) => {
|
||||
todo!()
|
||||
}
|
||||
MergeRowOrder::Shuffled(complex_mapping) => complex_mapping.num_rows(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ShuffleMergeOrder {
|
||||
pub new_row_id_to_old_row_id: Vec<RowAddr>,
|
||||
pub alive_bitsets: Vec<Option<ReadOnlyBitSet>>,
|
||||
}
|
||||
|
||||
impl ShuffleMergeOrder {
|
||||
pub fn for_test(
|
||||
segment_num_rows: &[RowId],
|
||||
new_row_id_to_old_row_id: Vec<RowAddr>,
|
||||
) -> ShuffleMergeOrder {
|
||||
let mut alive_bitsets: Vec<BitSet> = segment_num_rows
|
||||
.iter()
|
||||
.map(|&num_rows| BitSet::with_max_value(num_rows))
|
||||
.collect();
|
||||
for &RowAddr {
|
||||
segment_ord,
|
||||
row_id,
|
||||
} in &new_row_id_to_old_row_id
|
||||
{
|
||||
alive_bitsets[segment_ord as usize].insert(row_id);
|
||||
}
|
||||
let alive_bitsets: Vec<Option<ReadOnlyBitSet>> = alive_bitsets
|
||||
.into_iter()
|
||||
.map(|alive_bitset| {
|
||||
let mut buffer = Vec::new();
|
||||
alive_bitset.serialize(&mut buffer).unwrap();
|
||||
let data = OwnedBytes::new(buffer);
|
||||
Some(ReadOnlyBitSet::open(data))
|
||||
})
|
||||
.collect();
|
||||
ShuffleMergeOrder {
|
||||
new_row_id_to_old_row_id,
|
||||
alive_bitsets,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.new_row_id_to_old_row_id.len() as RowId
|
||||
}
|
||||
|
||||
pub fn iter_new_to_old_row_addrs(&self) -> impl Iterator<Item = RowAddr> + '_ {
|
||||
self.new_row_id_to_old_row_id.iter().copied()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,24 +9,54 @@ use std::io;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use merge_mapping::{MergeRowOrder, StackMergeOrder};
|
||||
pub use merge_mapping::{MergeRowOrder, ShuffleMergeOrder, StackMergeOrder};
|
||||
|
||||
use super::writer::ColumnarSerializer;
|
||||
use crate::column::{serialize_column_mappable_to_u128, serialize_column_mappable_to_u64};
|
||||
use crate::column_index::stack_column_index;
|
||||
use crate::columnar::column_type::ColumnTypeCategory;
|
||||
use crate::column_values::MergedColumnValues;
|
||||
use crate::columnar::merge::merge_dict_column::merge_bytes_or_str_column;
|
||||
use crate::columnar::writer::CompatibleNumericalTypes;
|
||||
use crate::columnar::ColumnarReader;
|
||||
use crate::dynamic_column::DynamicColumn;
|
||||
use crate::{
|
||||
BytesColumn, Column, ColumnIndex, ColumnType, ColumnValues, MonotonicallyMappableToU128,
|
||||
NumericalType, NumericalValue,
|
||||
BytesColumn, Column, ColumnIndex, ColumnType, ColumnValues, NumericalType, NumericalValue,
|
||||
};
|
||||
|
||||
/// Column types are grouped into different categories.
|
||||
/// After merge, all columns belonging to the same category are coerced to
|
||||
/// the same column type.
|
||||
///
|
||||
/// In practise, today, only Numerical colummns are coerced into one type today.
|
||||
///
|
||||
/// See also [README.md].
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
|
||||
enum ColumnTypeCategory {
|
||||
Bool,
|
||||
Str,
|
||||
Numerical,
|
||||
DateTime,
|
||||
Bytes,
|
||||
IpAddr,
|
||||
}
|
||||
|
||||
impl From<ColumnType> for ColumnTypeCategory {
|
||||
fn from(column_type: ColumnType) -> Self {
|
||||
match column_type {
|
||||
ColumnType::I64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::U64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::F64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::Bytes => ColumnTypeCategory::Bytes,
|
||||
ColumnType::Str => ColumnTypeCategory::Str,
|
||||
ColumnType::Bool => ColumnTypeCategory::Bool,
|
||||
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
|
||||
ColumnType::DateTime => ColumnTypeCategory::DateTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn merge_columnar(
|
||||
columnar_readers: &[&ColumnarReader],
|
||||
mapping: MergeRowOrder,
|
||||
merge_row_order: MergeRowOrder,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let mut serializer = ColumnarSerializer::new(output);
|
||||
@@ -35,9 +65,14 @@ pub fn merge_columnar(
|
||||
for ((column_name, column_type), columns) in columns_to_merge {
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name.as_bytes(), column_type);
|
||||
merge_column(column_type, columns, &mapping, &mut column_serializer)?;
|
||||
merge_column(
|
||||
column_type,
|
||||
columns,
|
||||
&merge_row_order,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
serializer.finalize(mapping.num_rows())?;
|
||||
serializer.finalize(merge_row_order.num_rows())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -53,7 +88,7 @@ fn dynamic_column_to_u64_monotonic(dynamic_column: DynamicColumn) -> Option<Colu
|
||||
}
|
||||
}
|
||||
|
||||
pub fn merge_column(
|
||||
fn merge_column(
|
||||
column_type: ColumnType,
|
||||
columns: Vec<Option<DynamicColumn>>,
|
||||
merge_row_order: &MergeRowOrder,
|
||||
@@ -66,68 +101,80 @@ pub fn merge_column(
|
||||
| ColumnType::DateTime
|
||||
| ColumnType::Bool => {
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut column_values: Vec<Arc<dyn ColumnValues>> = Vec::with_capacity(columns.len());
|
||||
let mut column_values: Vec<Option<Arc<dyn ColumnValues>>> =
|
||||
Vec::with_capacity(columns.len());
|
||||
for dynamic_column_opt in columns {
|
||||
if let Some(Column { idx, values }) =
|
||||
dynamic_column_opt.and_then(dynamic_column_to_u64_monotonic)
|
||||
{
|
||||
column_indexes.push(Some(idx));
|
||||
column_values.push(values);
|
||||
column_values.push(Some(values));
|
||||
} else {
|
||||
column_indexes.push(None);
|
||||
column_values.push(None);
|
||||
}
|
||||
}
|
||||
let merged_column_index =
|
||||
crate::column_index::stack_column_index(&column_indexes[..], merge_row_order);
|
||||
serialize_column_mappable_to_u64(merged_column_index, &&column_values[..], wrt)?;
|
||||
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order);
|
||||
let merge_column_values = MergedColumnValues {
|
||||
column_indexes: &column_indexes[..],
|
||||
column_values: &column_values[..],
|
||||
merge_row_order,
|
||||
};
|
||||
serialize_column_mappable_to_u64(merged_column_index, &merge_column_values, wrt)?;
|
||||
}
|
||||
ColumnType::IpAddr => {
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut column_values: Vec<Arc<dyn ColumnValues<Ipv6Addr>>> =
|
||||
let mut column_values: Vec<Option<Arc<dyn ColumnValues<Ipv6Addr>>>> =
|
||||
Vec::with_capacity(columns.len());
|
||||
let mut num_values = 0;
|
||||
for dynamic_column_opt in columns {
|
||||
if let Some(DynamicColumn::IpAddr(Column { idx, values })) = dynamic_column_opt {
|
||||
num_values += values.num_vals();
|
||||
column_indexes.push(Some(idx));
|
||||
column_values.push(values);
|
||||
column_values.push(Some(values));
|
||||
} else {
|
||||
column_indexes.push(None);
|
||||
column_values.push(None);
|
||||
}
|
||||
}
|
||||
|
||||
let merged_column_index =
|
||||
crate::column_index::stack_column_index(&column_indexes[..], merge_row_order);
|
||||
serialize_column_mappable_to_u128(
|
||||
merged_column_index,
|
||||
&&column_values[..],
|
||||
num_values,
|
||||
wrt,
|
||||
)?;
|
||||
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order);
|
||||
let merge_column_values = MergedColumnValues {
|
||||
column_indexes: &column_indexes[..],
|
||||
column_values: &column_values,
|
||||
merge_row_order,
|
||||
};
|
||||
|
||||
serialize_column_mappable_to_u128(merged_column_index, &merge_column_values, wrt)?;
|
||||
}
|
||||
ColumnType::Bytes | ColumnType::Str => {
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut bytes_columns: Vec<BytesColumn> = Vec::with_capacity(columns.len());
|
||||
let mut bytes_columns: Vec<Option<BytesColumn>> = Vec::with_capacity(columns.len());
|
||||
for dynamic_column_opt in columns {
|
||||
match dynamic_column_opt {
|
||||
Some(DynamicColumn::Str(str_column)) => {
|
||||
column_indexes.push(Some(str_column.term_ord_column.idx.clone()));
|
||||
bytes_columns.push(str_column.into());
|
||||
bytes_columns.push(Some(str_column.into()));
|
||||
}
|
||||
Some(DynamicColumn::Bytes(bytes_column)) => {
|
||||
column_indexes.push(Some(bytes_column.term_ord_column.idx.clone()));
|
||||
bytes_columns.push(bytes_column);
|
||||
bytes_columns.push(Some(bytes_column));
|
||||
}
|
||||
_ => {
|
||||
column_indexes.push(None);
|
||||
bytes_columns.push(None);
|
||||
}
|
||||
_ => column_indexes.push(None),
|
||||
}
|
||||
}
|
||||
let merged_column_index =
|
||||
crate::column_index::stack_column_index(&column_indexes[..], merge_row_order);
|
||||
merge_bytes_or_str_column(merged_column_index, &bytes_columns, wrt)?;
|
||||
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order);
|
||||
merge_bytes_or_str_column(merged_column_index, &bytes_columns, merge_row_order, wrt)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn group_columns_for_merge(
|
||||
columnar_readers: &[&ColumnarReader],
|
||||
) -> io::Result<BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>>> {
|
||||
|
||||
@@ -1,169 +0,0 @@
|
||||
use std::cmp;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::flat_map_with_buffer::FlatMapWithBufferIter;
|
||||
use crate::fastfield::{MultiValueIndex, MultiValuedFastFieldReader};
|
||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
||||
use crate::{DocAddress, SegmentReader};
|
||||
|
||||
pub(crate) struct RemappedDocIdMultiValueColumn<'a> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
fast_field_readers: Vec<MultiValuedFastFieldReader<u64>>,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u32,
|
||||
}
|
||||
|
||||
impl<'a> RemappedDocIdMultiValueColumn<'a> {
|
||||
pub(crate) fn new(
|
||||
readers: &'a [SegmentReader],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
field: &str,
|
||||
) -> Self {
|
||||
// Our values are bitpacked and we need to know what should be
|
||||
// our bitwidth and our minimum value before serializing any values.
|
||||
//
|
||||
// Computing those is non-trivial if some documents are deleted.
|
||||
// We go through a complete first pass to compute the minimum and the
|
||||
// maximum value and initialize our Serializer.
|
||||
let mut num_vals = 0;
|
||||
let mut min_value = u64::MAX;
|
||||
let mut max_value = u64::MIN;
|
||||
let mut vals = Vec::new();
|
||||
let mut fast_field_readers = Vec::with_capacity(readers.len());
|
||||
for reader in readers {
|
||||
let ff_reader: MultiValuedFastFieldReader<u64> = reader
|
||||
.fast_fields()
|
||||
.typed_fast_field_multi_reader::<u64>(field)
|
||||
.expect(
|
||||
"Failed to find multivalued fast field reader. This is a bug in tantivy. \
|
||||
Please report.",
|
||||
);
|
||||
for doc in reader.doc_ids_alive() {
|
||||
ff_reader.get_vals(doc, &mut vals);
|
||||
for &val in &vals {
|
||||
min_value = cmp::min(val, min_value);
|
||||
max_value = cmp::max(val, max_value);
|
||||
}
|
||||
num_vals += vals.len();
|
||||
}
|
||||
fast_field_readers.push(ff_reader);
|
||||
// TODO optimize when no deletes
|
||||
}
|
||||
if min_value > max_value {
|
||||
min_value = 0;
|
||||
max_value = 0;
|
||||
}
|
||||
RemappedDocIdMultiValueColumn {
|
||||
doc_id_mapping,
|
||||
fast_field_readers,
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: num_vals as u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Column for RemappedDocIdMultiValueColumn<'a> {
|
||||
fn get_val(&self, _pos: u32) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(
|
||||
self.doc_id_mapping
|
||||
.iter_old_doc_addrs()
|
||||
.flat_map_with_buffer(|old_doc_addr: DocAddress, buffer| {
|
||||
let ff_reader = &self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
||||
ff_reader.get_vals(old_doc_addr.doc_id, buffer);
|
||||
}),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
multi_value_length_readers: Vec<&'a MultiValueIndex>,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u32,
|
||||
}
|
||||
|
||||
impl<'a> RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
pub(crate) fn new(
|
||||
segment_and_ff_readers: &'a [(&'a SegmentReader, &'a MultiValueIndex)],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
) -> Self {
|
||||
// We go through a complete first pass to compute the minimum and the
|
||||
// maximum value and initialize our Column.
|
||||
let mut num_vals = 0;
|
||||
let min_value = 0;
|
||||
let mut max_value = 0;
|
||||
let mut multi_value_length_readers = Vec::with_capacity(segment_and_ff_readers.len());
|
||||
for segment_and_ff_reader in segment_and_ff_readers {
|
||||
let segment_reader = segment_and_ff_reader.0;
|
||||
let multi_value_length_reader = segment_and_ff_reader.1;
|
||||
if !segment_reader.has_deletes() {
|
||||
max_value += multi_value_length_reader.total_num_vals() as u64;
|
||||
} else {
|
||||
for doc in segment_reader.doc_ids_alive() {
|
||||
max_value += multi_value_length_reader.num_vals_for_doc(doc) as u64;
|
||||
}
|
||||
}
|
||||
num_vals += segment_reader.num_docs();
|
||||
multi_value_length_readers.push(multi_value_length_reader);
|
||||
}
|
||||
// The value range is always get_val(doc)..get_val(doc + 1)
|
||||
num_vals += 1;
|
||||
Self {
|
||||
doc_id_mapping,
|
||||
multi_value_length_readers,
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Column for RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
fn get_val(&self, _pos: u32) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
let mut offset = 0;
|
||||
Box::new(
|
||||
std::iter::once(0).chain(self.doc_id_mapping.iter_old_doc_addrs().map(
|
||||
move |old_doc_addr| {
|
||||
let ff_reader =
|
||||
&self.multi_value_length_readers[old_doc_addr.segment_ord as usize];
|
||||
offset += ff_reader.num_vals_for_doc(old_doc_addr.doc_id);
|
||||
offset as u64
|
||||
},
|
||||
)),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@ fn make_columnar<T: Into<NumericalValue> + HasAssociatedColumnType + Copy>(
|
||||
}
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(vals.len() as RowId, &mut buffer)
|
||||
.serialize(vals.len() as RowId, None, &mut buffer)
|
||||
.unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
@@ -90,7 +90,9 @@ fn make_numerical_columnar_multiple_columns(
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
|
||||
dataframe_writer
|
||||
.serialize(num_rows, None, &mut buffer)
|
||||
.unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
@@ -109,7 +111,9 @@ fn make_byte_columnar_multiple_columns(columns: &[(&str, &[&[&[u8]]])]) -> Colum
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
|
||||
dataframe_writer
|
||||
.serialize(num_rows, None, &mut buffer)
|
||||
.unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
@@ -128,7 +132,9 @@ fn make_text_columnar_multiple_columns(columns: &[(&str, &[&[&str]])]) -> Column
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
|
||||
dataframe_writer
|
||||
.serialize(num_rows, None, &mut buffer)
|
||||
.unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,6 @@ mod reader;
|
||||
mod writer;
|
||||
|
||||
pub use column_type::{ColumnType, HasAssociatedColumnType};
|
||||
pub use merge::{merge_columnar, MergeRowOrder, StackMergeOrder};
|
||||
pub use merge::{merge_columnar, MergeRowOrder, ShuffleMergeOrder, StackMergeOrder};
|
||||
pub use reader::ColumnarReader;
|
||||
pub use writer::ColumnarWriter;
|
||||
|
||||
@@ -137,7 +137,7 @@ mod tests {
|
||||
columnar_writer.record_column_type("col1", ColumnType::Str, false);
|
||||
columnar_writer.record_column_type("col2", ColumnType::U64, false);
|
||||
let mut buffer = Vec::new();
|
||||
columnar_writer.serialize(1, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(1, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
let columns = columnar.list_columns().unwrap();
|
||||
assert_eq!(columns.len(), 2);
|
||||
@@ -153,7 +153,7 @@ mod tests {
|
||||
columnar_writer.record_column_type("count", ColumnType::U64, false);
|
||||
columnar_writer.record_numerical(1, "count", 1u64);
|
||||
let mut buffer = Vec::new();
|
||||
columnar_writer.serialize(2, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(2, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
let columns = columnar.list_columns().unwrap();
|
||||
assert_eq!(columns.len(), 1);
|
||||
@@ -162,7 +162,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expect = "Input type forbidden")]
|
||||
#[should_panic(expected = "Input type forbidden")]
|
||||
fn test_list_columns_strict_typing_panics_on_wrong_types() {
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
columnar_writer.record_column_type("count", ColumnType::U64, false);
|
||||
|
||||
@@ -41,10 +41,31 @@ impl ColumnWriter {
|
||||
pub(super) fn operation_iterator<'a, V: SymbolValue>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
old_to_new_ids_opt: Option<&[RowId]>,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<V>> + 'a {
|
||||
buffer.clear();
|
||||
self.values.read_to_end(arena, buffer);
|
||||
if let Some(old_to_new_ids) = old_to_new_ids_opt {
|
||||
// TODO avoid the extra deserialization / serialization.
|
||||
let mut sorted_ops: Vec<(RowId, ColumnOperation<V>)> = Vec::new();
|
||||
let mut new_doc = 0u32;
|
||||
let mut cursor = &buffer[..];
|
||||
for op in std::iter::from_fn(|| ColumnOperation::<V>::deserialize(&mut cursor)) {
|
||||
if let ColumnOperation::NewDoc(doc) = &op {
|
||||
new_doc = old_to_new_ids[*doc as usize];
|
||||
sorted_ops.push((new_doc, ColumnOperation::NewDoc(new_doc)));
|
||||
} else {
|
||||
sorted_ops.push((new_doc, op));
|
||||
}
|
||||
}
|
||||
// stable sort is crucial here.
|
||||
sorted_ops.sort_by_key(|(new_doc_id, _)| *new_doc_id);
|
||||
buffer.clear();
|
||||
for (_, op) in sorted_ops {
|
||||
buffer.extend_from_slice(op.serialize().as_ref());
|
||||
}
|
||||
}
|
||||
let mut cursor: &[u8] = &buffer[..];
|
||||
std::iter::from_fn(move || ColumnOperation::deserialize(&mut cursor))
|
||||
}
|
||||
@@ -189,10 +210,12 @@ impl CompatibleNumericalTypes {
|
||||
}
|
||||
|
||||
impl NumericalColumnWriter {
|
||||
pub fn column_type_and_cardinality(&self, num_docs: RowId) -> (NumericalType, Cardinality) {
|
||||
let numerical_type = self.compatible_numerical_types.to_numerical_type();
|
||||
let cardinality = self.column_writer.get_cardinality(num_docs);
|
||||
(numerical_type, cardinality)
|
||||
pub fn numerical_type(&self) -> NumericalType {
|
||||
self.compatible_numerical_types.to_numerical_type()
|
||||
}
|
||||
|
||||
pub fn cardinality(&self, num_docs: RowId) -> Cardinality {
|
||||
self.column_writer.get_cardinality(num_docs)
|
||||
}
|
||||
|
||||
pub fn record_numerical_value(
|
||||
@@ -208,9 +231,11 @@ impl NumericalColumnWriter {
|
||||
pub(super) fn operation_iterator<'a>(
|
||||
self,
|
||||
arena: &MemoryArena,
|
||||
old_to_new_ids: Option<&[RowId]>,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<NumericalValue>> + 'a {
|
||||
self.column_writer.operation_iterator(arena, buffer)
|
||||
self.column_writer
|
||||
.operation_iterator(arena, old_to_new_ids, buffer)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,9 +276,11 @@ impl StrOrBytesColumnWriter {
|
||||
pub(super) fn operation_iterator<'a>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
old_to_new_ids: Option<&[RowId]>,
|
||||
byte_buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<UnorderedId>> + 'a {
|
||||
self.column_writer.operation_iterator(arena, byte_buffer)
|
||||
self.column_writer
|
||||
.operation_iterator(arena, old_to_new_ids, byte_buffer)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ use crate::column_index::SerializableColumnIndex;
|
||||
use crate::column_values::{
|
||||
ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
|
||||
};
|
||||
use crate::columnar::column_type::{ColumnType, ColumnTypeCategory};
|
||||
use crate::columnar::column_type::ColumnType;
|
||||
use crate::columnar::writer::column_writers::{
|
||||
ColumnWriter, NumericalColumnWriter, StrOrBytesColumnWriter,
|
||||
};
|
||||
@@ -45,8 +45,9 @@ struct SpareBuffers {
|
||||
/// columnar_writer.record_str(1u32 /* doc id */, "product_name", "Apple");
|
||||
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10.5f64); //< uh oh we ended up mixing integer and floats.
|
||||
/// let mut wrt: Vec<u8> = Vec::new();
|
||||
/// columnar_writer.serialize(2u32, &mut wrt).unwrap();
|
||||
/// columnar_writer.serialize(2u32, None, &mut wrt).unwrap();
|
||||
/// ```
|
||||
#[derive(Default)]
|
||||
pub struct ColumnarWriter {
|
||||
numerical_field_hash_map: ArenaHashMap,
|
||||
datetime_field_hash_map: ArenaHashMap,
|
||||
@@ -60,22 +61,6 @@ pub struct ColumnarWriter {
|
||||
buffers: SpareBuffers,
|
||||
}
|
||||
|
||||
impl Default for ColumnarWriter {
|
||||
fn default() -> Self {
|
||||
ColumnarWriter {
|
||||
numerical_field_hash_map: ArenaHashMap::new(10_000),
|
||||
bool_field_hash_map: ArenaHashMap::new(10_000),
|
||||
ip_addr_field_hash_map: ArenaHashMap::new(10_000),
|
||||
bytes_field_hash_map: ArenaHashMap::new(10_000),
|
||||
str_field_hash_map: ArenaHashMap::new(10_000),
|
||||
datetime_field_hash_map: ArenaHashMap::new(10_000),
|
||||
dictionaries: Vec::new(),
|
||||
arena: MemoryArena::default(),
|
||||
buffers: SpareBuffers::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn mutate_or_create_column<V, TMutator>(
|
||||
arena_hash_map: &mut ArenaHashMap,
|
||||
@@ -104,6 +89,48 @@ impl ColumnarWriter {
|
||||
+ self.datetime_field_hash_map.mem_usage()
|
||||
}
|
||||
|
||||
/// Returns the list of doc ids from 0..num_docs sorted by the `sort_field`
|
||||
/// column.
|
||||
///
|
||||
/// If the column is multivalued, use the first value for scoring.
|
||||
/// If no value is associated to a specific row, the document is assigned
|
||||
/// the lowest possible score.
|
||||
///
|
||||
/// The sort applied is stable.
|
||||
pub fn sort_order(&self, sort_field: &str, num_docs: RowId, reversed: bool) -> Vec<u32> {
|
||||
let Some(numerical_col_writer) =
|
||||
self.numerical_field_hash_map.get::<NumericalColumnWriter>(sort_field.as_bytes()) else {
|
||||
return Vec::new();
|
||||
};
|
||||
let mut symbols_buffer = Vec::new();
|
||||
let mut values = Vec::new();
|
||||
let mut last_doc_opt: Option<RowId> = None;
|
||||
for op in numerical_col_writer.operation_iterator(&self.arena, None, &mut symbols_buffer) {
|
||||
match op {
|
||||
ColumnOperation::NewDoc(doc) => {
|
||||
last_doc_opt = Some(doc);
|
||||
}
|
||||
ColumnOperation::Value(numerical_value) => {
|
||||
if let Some(last_doc) = last_doc_opt {
|
||||
let score: f32 = f64::coerce(numerical_value) as f32;
|
||||
values.push((score, last_doc));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for doc in values.len() as u32..num_docs {
|
||||
values.push((0.0f32, doc));
|
||||
}
|
||||
values.sort_by(|(left_score, _), (right_score, _)| {
|
||||
if reversed {
|
||||
right_score.partial_cmp(left_score).unwrap()
|
||||
} else {
|
||||
left_score.partial_cmp(right_score).unwrap()
|
||||
}
|
||||
});
|
||||
values.into_iter().map(|(_score, doc)| doc).collect()
|
||||
}
|
||||
|
||||
/// Records a column type. This is useful to bypass the coercion process,
|
||||
/// makes sure the empty is present in the resulting columnar, or set
|
||||
/// the `sort_values_within_row`.
|
||||
@@ -224,11 +251,15 @@ impl ColumnarWriter {
|
||||
});
|
||||
}
|
||||
|
||||
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: crate::DateTime) {
|
||||
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: common::DateTime) {
|
||||
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
|
||||
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(doc, NumericalValue::I64(datetime.timestamp_micros), arena);
|
||||
column.record(
|
||||
doc,
|
||||
NumericalValue::I64(datetime.into_timestamp_micros()),
|
||||
arena,
|
||||
);
|
||||
column
|
||||
});
|
||||
}
|
||||
@@ -278,37 +309,47 @@ impl ColumnarWriter {
|
||||
},
|
||||
);
|
||||
}
|
||||
pub fn serialize(&mut self, num_docs: RowId, wrt: &mut dyn io::Write) -> io::Result<()> {
|
||||
pub fn serialize(
|
||||
&mut self,
|
||||
num_docs: RowId,
|
||||
old_to_new_row_ids: Option<&[RowId]>,
|
||||
wrt: &mut dyn io::Write,
|
||||
) -> io::Result<()> {
|
||||
let mut serializer = ColumnarSerializer::new(wrt);
|
||||
let mut columns: Vec<(&[u8], ColumnTypeCategory, Addr)> = self
|
||||
let mut columns: Vec<(&[u8], ColumnType, Addr)> = self
|
||||
.numerical_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Numerical, addr))
|
||||
.map(|(column_name, addr, _)| {
|
||||
let numerical_column_writer: NumericalColumnWriter =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let column_type = numerical_column_writer.numerical_type().into();
|
||||
(column_name, column_type, addr)
|
||||
})
|
||||
.collect();
|
||||
columns.extend(
|
||||
self.bytes_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, ColumnTypeCategory::Bytes, addr)),
|
||||
.map(|(term, addr, _)| (term, ColumnType::Bytes, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.str_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Str, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Str, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.bool_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Bool, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Bool, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.ip_addr_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::IpAddr, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::IpAddr, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.datetime_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::DateTime, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::DateTime, addr)),
|
||||
);
|
||||
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
|
||||
|
||||
@@ -316,20 +357,24 @@ impl ColumnarWriter {
|
||||
let mut symbol_byte_buffer: Vec<u8> = Vec::new();
|
||||
for (column_name, column_type, addr) in columns {
|
||||
match column_type {
|
||||
ColumnTypeCategory::Bool => {
|
||||
ColumnType::Bool => {
|
||||
let column_writer: ColumnWriter = self.bool_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, ColumnType::Bool);
|
||||
serializer.serialize_column(column_name, column_type);
|
||||
serialize_bool_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::IpAddr => {
|
||||
ColumnType::IpAddr => {
|
||||
let column_writer: ColumnWriter = self.ip_addr_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
@@ -337,50 +382,64 @@ impl ColumnarWriter {
|
||||
serialize_ip_addr_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::Bytes | ColumnTypeCategory::Str => {
|
||||
let (column_type, str_column_writer): (ColumnType, StrOrBytesColumnWriter) =
|
||||
if column_type == ColumnTypeCategory::Bytes {
|
||||
(ColumnType::Bytes, self.bytes_field_hash_map.read(addr))
|
||||
ColumnType::Bytes | ColumnType::Str => {
|
||||
let str_or_bytes_column_writer: StrOrBytesColumnWriter =
|
||||
if column_type == ColumnType::Bytes {
|
||||
self.bytes_field_hash_map.read(addr)
|
||||
} else {
|
||||
(ColumnType::Str, self.str_field_hash_map.read(addr))
|
||||
self.str_field_hash_map.read(addr)
|
||||
};
|
||||
let dictionary_builder =
|
||||
&dictionaries[str_column_writer.dictionary_id as usize];
|
||||
let cardinality = str_column_writer.column_writer.get_cardinality(num_docs);
|
||||
&dictionaries[str_or_bytes_column_writer.dictionary_id as usize];
|
||||
let cardinality = str_or_bytes_column_writer
|
||||
.column_writer
|
||||
.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, column_type);
|
||||
serialize_bytes_or_str_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
str_column_writer.sort_values_within_row,
|
||||
str_or_bytes_column_writer.sort_values_within_row,
|
||||
dictionary_builder,
|
||||
str_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
str_or_bytes_column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::Numerical => {
|
||||
ColumnType::F64 | ColumnType::I64 | ColumnType::U64 => {
|
||||
let numerical_column_writer: NumericalColumnWriter =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let (numerical_type, cardinality) =
|
||||
numerical_column_writer.column_type_and_cardinality(num_docs);
|
||||
let cardinality = numerical_column_writer.cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, ColumnType::from(numerical_type));
|
||||
serializer.serialize_column(column_name, column_type);
|
||||
let numerical_type = column_type.numerical_type().unwrap();
|
||||
serialize_numerical_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
numerical_type,
|
||||
numerical_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
numerical_column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::DateTime => {
|
||||
ColumnType::DateTime => {
|
||||
let column_writer: ColumnWriter = self.datetime_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
@@ -389,7 +448,11 @@ impl ColumnarWriter {
|
||||
cardinality,
|
||||
num_docs,
|
||||
NumericalType::I64,
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
@@ -588,13 +651,12 @@ where
|
||||
crate::column::serialize_column_mappable_to_u128(
|
||||
serializable_column_index,
|
||||
&&values[..],
|
||||
values.len() as u32,
|
||||
&mut wrt,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sort_values_within_row_in_place(multivalued_index: &[RowId], values: &mut Vec<u64>) {
|
||||
fn sort_values_within_row_in_place(multivalued_index: &[RowId], values: &mut [u64]) {
|
||||
let mut start_index: usize = 0;
|
||||
for end_index in multivalued_index.iter().copied() {
|
||||
let end_index = end_index as usize;
|
||||
@@ -699,7 +761,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Full);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.operation_iterator(&mut arena, None, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 6);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
@@ -728,7 +790,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.operation_iterator(&mut arena, None, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 4);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
|
||||
@@ -751,7 +813,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.operation_iterator(&mut arena, None, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 2);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
@@ -770,7 +832,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.operation_iterator(&mut arena, None, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 3);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
|
||||
@@ -29,7 +29,7 @@ pub struct OptionalIndexBuilder {
|
||||
}
|
||||
|
||||
impl OptionalIndexBuilder {
|
||||
pub fn finish<'a>(&'a mut self, num_rows: RowId) -> impl Iterable<RowId> + 'a {
|
||||
pub fn finish(&mut self, num_rows: RowId) -> impl Iterable<RowId> + '_ {
|
||||
debug_assert!(self
|
||||
.docs
|
||||
.last()
|
||||
|
||||
@@ -3,12 +3,12 @@ use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::file_slice::FileSlice;
|
||||
use common::{HasLen, OwnedBytes};
|
||||
use common::{DateTime, HasLen, OwnedBytes};
|
||||
|
||||
use crate::column::{BytesColumn, Column, StrColumn};
|
||||
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::{Cardinality, DateTime, NumericalType};
|
||||
use crate::{Cardinality, NumericalType};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DynamicColumn {
|
||||
@@ -166,9 +166,9 @@ impl StrictlyMonotonicFn<i64, u64> for MapI64ToU64 {
|
||||
|
||||
macro_rules! static_dynamic_conversions {
|
||||
($typ:ty, $enum_name:ident) => {
|
||||
impl Into<Option<$typ>> for DynamicColumn {
|
||||
fn into(self) -> Option<$typ> {
|
||||
if let DynamicColumn::$enum_name(col) = self {
|
||||
impl From<DynamicColumn> for Option<$typ> {
|
||||
fn from(dynamic_column: DynamicColumn) -> Option<$typ> {
|
||||
if let DynamicColumn::$enum_name(col) = dynamic_column {
|
||||
Some(col)
|
||||
} else {
|
||||
None
|
||||
@@ -188,7 +188,7 @@ static_dynamic_conversions!(Column<bool>, Bool);
|
||||
static_dynamic_conversions!(Column<u64>, U64);
|
||||
static_dynamic_conversions!(Column<i64>, I64);
|
||||
static_dynamic_conversions!(Column<f64>, F64);
|
||||
static_dynamic_conversions!(Column<crate::DateTime>, DateTime);
|
||||
static_dynamic_conversions!(Column<DateTime>, DateTime);
|
||||
static_dynamic_conversions!(StrColumn, Str);
|
||||
static_dynamic_conversions!(BytesColumn, Bytes);
|
||||
static_dynamic_conversions!(Column<Ipv6Addr>, IpAddr);
|
||||
@@ -235,17 +235,15 @@ impl DynamicColumnHandle {
|
||||
|
||||
fn open_internal(&self, column_bytes: OwnedBytes) -> io::Result<DynamicColumn> {
|
||||
let dynamic_column: DynamicColumn = match self.column_type {
|
||||
ColumnType::Bytes => {
|
||||
crate::column::open_column_bytes::<BytesColumn>(column_bytes)?.into()
|
||||
}
|
||||
ColumnType::Str => crate::column::open_column_bytes::<StrColumn>(column_bytes)?.into(),
|
||||
ColumnType::Bytes => crate::column::open_column_bytes(column_bytes)?.into(),
|
||||
ColumnType::Str => crate::column::open_column_str(column_bytes)?.into(),
|
||||
ColumnType::I64 => crate::column::open_column_u64::<i64>(column_bytes)?.into(),
|
||||
ColumnType::U64 => crate::column::open_column_u64::<u64>(column_bytes)?.into(),
|
||||
ColumnType::F64 => crate::column::open_column_u64::<f64>(column_bytes)?.into(),
|
||||
ColumnType::Bool => crate::column::open_column_u64::<bool>(column_bytes)?.into(),
|
||||
ColumnType::IpAddr => crate::column::open_column_u128::<Ipv6Addr>(column_bytes)?.into(),
|
||||
ColumnType::DateTime => {
|
||||
crate::column::open_column_u64::<crate::DateTime>(column_bytes)?.into()
|
||||
crate::column::open_column_u64::<DateTime>(column_bytes)?.into()
|
||||
}
|
||||
};
|
||||
Ok(dynamic_column)
|
||||
|
||||
@@ -24,7 +24,7 @@ pub use column_index::ColumnIndex;
|
||||
pub use column_values::{ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
||||
pub use columnar::{
|
||||
merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
|
||||
MergeRowOrder, StackMergeOrder,
|
||||
MergeRowOrder, ShuffleMergeOrder, StackMergeOrder,
|
||||
};
|
||||
use sstable::VoidSSTable;
|
||||
pub use value::{NumericalType, NumericalValue};
|
||||
@@ -32,13 +32,18 @@ pub use value::{NumericalType, NumericalValue};
|
||||
pub use self::dynamic_column::{DynamicColumn, DynamicColumnHandle};
|
||||
|
||||
pub type RowId = u32;
|
||||
pub type DocId = u32;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct RowAddr {
|
||||
pub segment_ord: u32,
|
||||
pub row_id: RowId,
|
||||
}
|
||||
|
||||
pub use sstable::Dictionary;
|
||||
pub type Streamer<'a> = sstable::Streamer<'a, VoidSSTable>;
|
||||
|
||||
#[derive(Clone, Copy, PartialOrd, PartialEq, Default, Debug)]
|
||||
pub struct DateTime {
|
||||
pub timestamp_micros: i64,
|
||||
}
|
||||
pub use common::DateTime;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct InvalidData;
|
||||
|
||||
@@ -12,7 +12,7 @@ fn test_dataframe_writer_str() {
|
||||
dataframe_writer.record_str(1u32, "my_string", "hello");
|
||||
dataframe_writer.record_str(3u32, "my_string", "helloeee");
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
@@ -26,7 +26,7 @@ fn test_dataframe_writer_bytes() {
|
||||
dataframe_writer.record_bytes(1u32, "my_string", b"hello");
|
||||
dataframe_writer.record_bytes(3u32, "my_string", b"helloeee");
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
@@ -40,7 +40,7 @@ fn test_dataframe_writer_bool() {
|
||||
dataframe_writer.record_bool(1u32, "bool.value", false);
|
||||
dataframe_writer.record_bool(3u32, "bool.value", true);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("bool.value").unwrap();
|
||||
@@ -63,7 +63,7 @@ fn test_dataframe_writer_u64_multivalued() {
|
||||
dataframe_writer.record_numerical(6u32, "divisor", 2u64);
|
||||
dataframe_writer.record_numerical(6u32, "divisor", 3u64);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(7, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(7, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("divisor").unwrap();
|
||||
@@ -75,7 +75,7 @@ fn test_dataframe_writer_u64_multivalued() {
|
||||
divisor_col.get_cardinality(),
|
||||
crate::Cardinality::Multivalued
|
||||
);
|
||||
assert_eq!(divisor_col.num_rows(), 7);
|
||||
assert_eq!(divisor_col.num_docs(), 7);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -84,7 +84,7 @@ fn test_dataframe_writer_ip_addr() {
|
||||
dataframe_writer.record_ip_addr(1, "ip_addr", Ipv6Addr::from_u128(1001));
|
||||
dataframe_writer.record_ip_addr(3, "ip_addr", Ipv6Addr::from_u128(1050));
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("ip_addr").unwrap();
|
||||
@@ -113,7 +113,7 @@ fn test_dataframe_writer_numerical() {
|
||||
dataframe_writer.record_numerical(2u32, "srical.value", NumericalValue::U64(13u64));
|
||||
dataframe_writer.record_numerical(4u32, "srical.value", NumericalValue::U64(15u64));
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(6, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(6, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("srical.value").unwrap();
|
||||
@@ -144,7 +144,7 @@ fn test_dictionary_encoded_str() {
|
||||
columnar_writer.record_str(3, "my.column", "c");
|
||||
columnar_writer.record_str(3, "my.column2", "different_column!");
|
||||
columnar_writer.record_str(4, "my.column", "b");
|
||||
columnar_writer.serialize(5, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let col_handles = columnar_reader.read_columns("my.column").unwrap();
|
||||
@@ -176,7 +176,7 @@ fn test_dictionary_encoded_bytes() {
|
||||
columnar_writer.record_bytes(3, "my.column", b"c");
|
||||
columnar_writer.record_bytes(3, "my.column2", b"different_column!");
|
||||
columnar_writer.record_bytes(4, "my.column", b"b");
|
||||
columnar_writer.serialize(5, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let col_handles = columnar_reader.read_columns("my.column").unwrap();
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use common::DateTime;
|
||||
|
||||
use crate::InvalidData;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Debug)]
|
||||
@@ -104,10 +106,10 @@ impl Coerce for f64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl Coerce for crate::DateTime {
|
||||
impl Coerce for DateTime {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
let timestamp_micros = i64::coerce(value);
|
||||
crate::DateTime { timestamp_micros }
|
||||
DateTime::from_timestamp_micros(timestamp_micros)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
||||
byteorder = "1.4.3"
|
||||
ownedbytes = { version= "0.5", path="../ownedbytes" }
|
||||
async-trait = "0.1"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1.0.0"
|
||||
|
||||
136
common/src/datetime.rs
Normal file
136
common/src/datetime.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
use std::fmt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use time::{OffsetDateTime, PrimitiveDateTime, UtcOffset};
|
||||
|
||||
/// DateTime Precision
|
||||
#[derive(
|
||||
Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Default,
|
||||
)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum DatePrecision {
|
||||
/// Seconds precision
|
||||
#[default]
|
||||
Seconds,
|
||||
/// Milli-seconds precision.
|
||||
Milliseconds,
|
||||
/// Micro-seconds precision.
|
||||
Microseconds,
|
||||
}
|
||||
|
||||
/// A date/time value with microsecond precision.
|
||||
///
|
||||
/// This timestamp does not carry any explicit time zone information.
|
||||
/// Users are responsible for applying the provided conversion
|
||||
/// functions consistently. Internally the time zone is assumed
|
||||
/// to be UTC, which is also used implicitly for JSON serialization.
|
||||
///
|
||||
/// All constructors and conversions are provided as explicit
|
||||
/// functions and not by implementing any `From`/`Into` traits
|
||||
/// to prevent unintended usage.
|
||||
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct DateTime {
|
||||
// Timestamp in microseconds.
|
||||
pub(crate) timestamp_micros: i64,
|
||||
}
|
||||
|
||||
impl DateTime {
|
||||
/// Create new from UNIX timestamp in seconds
|
||||
pub const fn from_timestamp_secs(seconds: i64) -> Self {
|
||||
Self {
|
||||
timestamp_micros: seconds * 1_000_000,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new from UNIX timestamp in milliseconds
|
||||
pub const fn from_timestamp_millis(milliseconds: i64) -> Self {
|
||||
Self {
|
||||
timestamp_micros: milliseconds * 1_000,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new from UNIX timestamp in microseconds.
|
||||
pub const fn from_timestamp_micros(microseconds: i64) -> Self {
|
||||
Self {
|
||||
timestamp_micros: microseconds,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new from `OffsetDateTime`
|
||||
///
|
||||
/// The given date/time is converted to UTC and the actual
|
||||
/// time zone is discarded.
|
||||
pub const fn from_utc(dt: OffsetDateTime) -> Self {
|
||||
let timestamp_micros = dt.unix_timestamp() * 1_000_000 + dt.microsecond() as i64;
|
||||
Self { timestamp_micros }
|
||||
}
|
||||
|
||||
/// Create new from `PrimitiveDateTime`
|
||||
///
|
||||
/// Implicitly assumes that the given date/time is in UTC!
|
||||
/// Otherwise the original value must only be reobtained with
|
||||
/// [`Self::into_primitive()`].
|
||||
pub fn from_primitive(dt: PrimitiveDateTime) -> Self {
|
||||
Self::from_utc(dt.assume_utc())
|
||||
}
|
||||
|
||||
/// Convert to UNIX timestamp in seconds.
|
||||
pub const fn into_timestamp_secs(self) -> i64 {
|
||||
self.timestamp_micros / 1_000_000
|
||||
}
|
||||
|
||||
/// Convert to UNIX timestamp in milliseconds.
|
||||
pub const fn into_timestamp_millis(self) -> i64 {
|
||||
self.timestamp_micros / 1_000
|
||||
}
|
||||
|
||||
/// Convert to UNIX timestamp in microseconds.
|
||||
pub const fn into_timestamp_micros(self) -> i64 {
|
||||
self.timestamp_micros
|
||||
}
|
||||
|
||||
/// Convert to UTC `OffsetDateTime`
|
||||
pub fn into_utc(self) -> OffsetDateTime {
|
||||
let timestamp_nanos = self.timestamp_micros as i128 * 1000;
|
||||
let utc_datetime = OffsetDateTime::from_unix_timestamp_nanos(timestamp_nanos)
|
||||
.expect("valid UNIX timestamp");
|
||||
debug_assert_eq!(UtcOffset::UTC, utc_datetime.offset());
|
||||
utc_datetime
|
||||
}
|
||||
|
||||
/// Convert to `OffsetDateTime` with the given time zone
|
||||
pub fn into_offset(self, offset: UtcOffset) -> OffsetDateTime {
|
||||
self.into_utc().to_offset(offset)
|
||||
}
|
||||
|
||||
/// Convert to `PrimitiveDateTime` without any time zone
|
||||
///
|
||||
/// The value should have been constructed with [`Self::from_primitive()`].
|
||||
/// Otherwise the time zone is implicitly assumed to be UTC.
|
||||
pub fn into_primitive(self) -> PrimitiveDateTime {
|
||||
let utc_datetime = self.into_utc();
|
||||
// Discard the UTC time zone offset
|
||||
debug_assert_eq!(UtcOffset::UTC, utc_datetime.offset());
|
||||
PrimitiveDateTime::new(utc_datetime.date(), utc_datetime.time())
|
||||
}
|
||||
|
||||
/// Truncates the microseconds value to the corresponding precision.
|
||||
pub fn truncate(self, precision: DatePrecision) -> Self {
|
||||
let truncated_timestamp_micros = match precision {
|
||||
DatePrecision::Seconds => (self.timestamp_micros / 1_000_000) * 1_000_000,
|
||||
DatePrecision::Milliseconds => (self.timestamp_micros / 1_000) * 1_000,
|
||||
DatePrecision::Microseconds => self.timestamp_micros,
|
||||
};
|
||||
Self {
|
||||
timestamp_micros: truncated_timestamp_micros,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for DateTime {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let utc_rfc3339 = self.into_utc().format(&Rfc3339).map_err(|_| fmt::Error)?;
|
||||
f.write_str(&utc_rfc3339)
|
||||
}
|
||||
}
|
||||
@@ -5,12 +5,14 @@ use std::ops::Deref;
|
||||
pub use byteorder::LittleEndian as Endianness;
|
||||
|
||||
mod bitset;
|
||||
mod datetime;
|
||||
pub mod file_slice;
|
||||
mod group_by;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
mod writer;
|
||||
pub use bitset::*;
|
||||
pub use datetime::{DatePrecision, DateTime};
|
||||
pub use group_by::GroupByIteratorExtended;
|
||||
pub use ownedbytes::{OwnedBytes, StableDeref};
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
|
||||
@@ -24,8 +24,7 @@ fn main() -> tantivy::Result<()> {
|
||||
)
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let score_fieldtype =
|
||||
crate::schema::NumericOptions::default().set_fast();
|
||||
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
|
||||
let highscore_field = schema_builder.add_f64_field("highscore", score_fieldtype.clone());
|
||||
let price_field = schema_builder.add_f64_field("price", score_fieldtype);
|
||||
|
||||
@@ -7,9 +7,7 @@
|
||||
// Of course, you can have a look at the tantivy's built-in collectors
|
||||
// such as the `CountCollector` for more examples.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use columnar::Column;
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
@@ -97,7 +95,7 @@ impl Collector for StatsCollector {
|
||||
}
|
||||
|
||||
struct StatsSegmentCollector {
|
||||
fast_field_reader: Arc<dyn Column<u64>>,
|
||||
fast_field_reader: Column,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
@@ -105,10 +103,14 @@ impl SegmentCollector for StatsSegmentCollector {
|
||||
type Fruit = Option<Stats>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: Score) {
|
||||
let value = self.fast_field_reader.get_val(doc) as f64;
|
||||
self.stats.count += 1;
|
||||
self.stats.sum += value;
|
||||
self.stats.squared_sum += value * value;
|
||||
// Since we know the values are single value, we could call `first_or_default_col` on the
|
||||
// column and fetch single values.
|
||||
for value in self.fast_field_reader.values(doc) {
|
||||
let value = value as f64;
|
||||
self.stats.count += 1;
|
||||
self.stats.sum += value;
|
||||
self.stats.squared_sum += value * value;
|
||||
}
|
||||
}
|
||||
|
||||
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
|
||||
@@ -71,7 +71,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
{
|
||||
let mut facet_collector = FacetCollector::for_field(classification);
|
||||
let mut facet_collector = FacetCollector::for_field("classification");
|
||||
facet_collector.add_facet("/Felidae");
|
||||
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
// This lists all of the facet counts, right below "/Felidae".
|
||||
@@ -97,7 +97,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let facet = Facet::from("/Felidae/Pantherinae");
|
||||
let facet_term = Term::from_facet(classification, &facet);
|
||||
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic);
|
||||
let mut facet_collector = FacetCollector::for_field(classification);
|
||||
let mut facet_collector = FacetCollector::for_field("classification");
|
||||
facet_collector.add_facet("/Felidae/Pantherinae");
|
||||
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?;
|
||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect();
|
||||
@@ -56,7 +56,7 @@ fn main() -> tantivy::Result<()> {
|
||||
);
|
||||
let top_docs_by_custom_score =
|
||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
||||
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
||||
let ingredient_reader = segment_reader.facet_reader("ingredient").unwrap();
|
||||
let facet_dict = ingredient_reader.facet_dict();
|
||||
|
||||
let query_ords: HashSet<u64> = facets
|
||||
@@ -64,12 +64,9 @@ fn main() -> tantivy::Result<()> {
|
||||
.filter_map(|key| facet_dict.term_ord(key.encoded_str()).unwrap())
|
||||
.collect();
|
||||
|
||||
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
|
||||
|
||||
move |doc: DocId, original_score: Score| {
|
||||
ingredient_reader.facet_ords(doc, &mut facet_ords_buffer);
|
||||
let missing_ingredients = facet_ords_buffer
|
||||
.iter()
|
||||
let missing_ingredients = ingredient_reader
|
||||
.facet_ords(doc)
|
||||
.filter(|ord| !query_ords.contains(ord))
|
||||
.count();
|
||||
let tweak = 1.0 / 4_f32.powi(missing_ingredients as i32);
|
||||
@@ -48,7 +48,10 @@ impl Warmer for DynamicPriceColumn {
|
||||
fn warm(&self, searcher: &Searcher) -> tantivy::Result<()> {
|
||||
for segment in searcher.segment_readers() {
|
||||
let key = (segment.segment_id(), segment.delete_opstamp());
|
||||
let product_id_reader = segment.fast_fields().u64(&self.field)?;
|
||||
let product_id_reader = segment
|
||||
.fast_fields()
|
||||
.u64(&self.field)?
|
||||
.first_or_default_col(0);
|
||||
let product_ids: Vec<ProductId> = segment
|
||||
.doc_ids_alive()
|
||||
.map(|doc| product_id_reader.get_val(doc))
|
||||
@@ -2,9 +2,8 @@
|
||||
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use columnar::{Column, StrColumn};
|
||||
|
||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
||||
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
||||
@@ -14,9 +13,8 @@ use super::metric::{
|
||||
};
|
||||
use super::segment_agg_result::BucketCount;
|
||||
use super::VecWithNames;
|
||||
use crate::fastfield::{type_and_cardinality, MultiValuedFastFieldReader};
|
||||
use crate::schema::Type;
|
||||
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
|
||||
use crate::{SegmentReader, TantivyError};
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct AggregationsWithAccessor {
|
||||
@@ -37,38 +35,12 @@ impl AggregationsWithAccessor {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) enum FastFieldAccessor {
|
||||
Multi(MultiValuedFastFieldReader<u64>),
|
||||
Single(Arc<dyn Column<u64>>),
|
||||
}
|
||||
impl FastFieldAccessor {
|
||||
pub fn as_single(&self) -> Option<&dyn Column<u64>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(_) => None,
|
||||
FastFieldAccessor::Single(reader) => Some(&**reader),
|
||||
}
|
||||
}
|
||||
pub fn into_single(self) -> Option<Arc<dyn Column<u64>>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(_) => None,
|
||||
FastFieldAccessor::Single(reader) => Some(reader),
|
||||
}
|
||||
}
|
||||
pub fn as_multi(&self) -> Option<&MultiValuedFastFieldReader<u64>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(reader) => Some(reader),
|
||||
FastFieldAccessor::Single(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BucketAggregationWithAccessor {
|
||||
/// In general there can be buckets without fast field access, e.g. buckets that are created
|
||||
/// based on search terms. So eventually this needs to be Option or moved.
|
||||
pub(crate) accessor: FastFieldAccessor,
|
||||
pub(crate) inverted_index: Option<Arc<InvertedIndexReader>>,
|
||||
pub(crate) accessor: Column<u64>,
|
||||
pub(crate) str_dict_column: Option<StrColumn>,
|
||||
pub(crate) field_type: Type,
|
||||
pub(crate) bucket_agg: BucketAggregationType,
|
||||
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
||||
@@ -83,20 +55,19 @@ impl BucketAggregationWithAccessor {
|
||||
bucket_count: Rc<AtomicU32>,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<BucketAggregationWithAccessor> {
|
||||
let mut inverted_index = None;
|
||||
let mut str_dict_column = None;
|
||||
let (accessor, field_type) = match &bucket {
|
||||
BucketAggregationType::Range(RangeAggregation {
|
||||
field: field_name, ..
|
||||
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
|
||||
}) => get_ff_reader_and_validate(reader, field_name)?,
|
||||
BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: field_name, ..
|
||||
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
|
||||
}) => get_ff_reader_and_validate(reader, field_name)?,
|
||||
BucketAggregationType::Terms(TermsAggregation {
|
||||
field: field_name, ..
|
||||
}) => {
|
||||
let field = reader.schema().get_field(field_name)?;
|
||||
inverted_index = Some(reader.inverted_index(field)?);
|
||||
get_ff_reader_and_validate(reader, field_name, Cardinality::MultiValues)?
|
||||
str_dict_column = reader.fast_fields().str(field_name)?;
|
||||
get_ff_reader_and_validate(reader, field_name)?
|
||||
}
|
||||
};
|
||||
let sub_aggregation = sub_aggregation.clone();
|
||||
@@ -110,7 +81,7 @@ impl BucketAggregationWithAccessor {
|
||||
max_bucket_count,
|
||||
)?,
|
||||
bucket_agg: bucket.clone(),
|
||||
inverted_index,
|
||||
str_dict_column,
|
||||
bucket_count: BucketCount {
|
||||
bucket_count,
|
||||
max_bucket_count,
|
||||
@@ -124,7 +95,7 @@ impl BucketAggregationWithAccessor {
|
||||
pub struct MetricAggregationWithAccessor {
|
||||
pub metric: MetricAggregation,
|
||||
pub field_type: Type,
|
||||
pub accessor: Arc<dyn Column>,
|
||||
pub accessor: Column<u64>,
|
||||
}
|
||||
|
||||
impl MetricAggregationWithAccessor {
|
||||
@@ -139,13 +110,10 @@ impl MetricAggregationWithAccessor {
|
||||
| MetricAggregation::Min(MinAggregation { field: field_name })
|
||||
| MetricAggregation::Stats(StatsAggregation { field: field_name })
|
||||
| MetricAggregation::Sum(SumAggregation { field: field_name }) => {
|
||||
let (accessor, field_type) =
|
||||
get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?;
|
||||
let (accessor, field_type) = get_ff_reader_and_validate(reader, field_name)?;
|
||||
|
||||
Ok(MetricAggregationWithAccessor {
|
||||
accessor: accessor
|
||||
.into_single()
|
||||
.expect("unexpected fast field cardinality"),
|
||||
accessor,
|
||||
field_type,
|
||||
metric: metric.clone(),
|
||||
})
|
||||
@@ -190,32 +158,22 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||
fn get_ff_reader_and_validate(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
cardinality: Cardinality,
|
||||
) -> crate::Result<(FastFieldAccessor, Type)> {
|
||||
) -> crate::Result<(columnar::Column<u64>, Type)> {
|
||||
let field = reader.schema().get_field(field_name)?;
|
||||
let field_type = reader.schema().get_field_entry(field).field_type();
|
||||
|
||||
if let Some((_ff_type, field_cardinality)) = type_and_cardinality(field_type) {
|
||||
if cardinality != field_cardinality {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Invalid field cardinality on field {} expected {:?}, but got {:?}",
|
||||
field_name, cardinality, field_cardinality
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Only fast fields of type f64, u64, i64 are supported, but got {:?} ",
|
||||
field_type.value_type()
|
||||
)));
|
||||
};
|
||||
// TODO we should get type metadata from columnar
|
||||
let field_type = reader
|
||||
.schema()
|
||||
.get_field_entry(field)
|
||||
.field_type()
|
||||
.value_type();
|
||||
// TODO Do validation
|
||||
|
||||
let ff_fields = reader.fast_fields();
|
||||
match cardinality {
|
||||
Cardinality::SingleValue => ff_fields
|
||||
.u64_lenient(field_name)
|
||||
.map(|field| (FastFieldAccessor::Single(field), field_type.value_type())),
|
||||
Cardinality::MultiValues => ff_fields
|
||||
.u64s_lenient(field_name)
|
||||
.map(|field| (FastFieldAccessor::Multi(field), field_type.value_type())),
|
||||
}
|
||||
let ff_field = ff_fields.u64_lenient(field_name)?.ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"No numerical fast field found for field: {}",
|
||||
field_name
|
||||
))
|
||||
})?;
|
||||
Ok((ff_field, field_type))
|
||||
}
|
||||
|
||||
137
src/aggregation/bucket/histogram/date_histogram.rs
Normal file
137
src/aggregation/bucket/histogram/date_histogram.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// DateHistogramAggregation is similar to `HistogramAggregation`, but it can only be used with date
|
||||
/// type.
|
||||
///
|
||||
/// Currently only **fixed time** intervals are supported. Calendar-aware time intervals are not
|
||||
/// supported.
|
||||
///
|
||||
/// Like the histogram, values are rounded down into the closest bucket.
|
||||
///
|
||||
/// For this calculation all fastfield values are converted to f64.
|
||||
///
|
||||
/// # Limitations/Compatibility
|
||||
/// Only fixed time intervals are supported.
|
||||
///
|
||||
/// # JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
/// "prices": {
|
||||
/// "date_histogram": {
|
||||
/// "field": "price",
|
||||
/// "fixed_interval": "30d"
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Response
|
||||
/// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry)
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct DateHistogramAggregationReq {
|
||||
/// The field to aggregate on.
|
||||
pub field: String,
|
||||
/// The interval to chunk your data range. Each bucket spans a value range of
|
||||
/// [0..fixed_interval). Accepted values
|
||||
///
|
||||
/// Fixed intervals are configured with the `fixed_interval` parameter.
|
||||
/// In contrast to calendar-aware intervals, fixed intervals are a fixed number of SI units and
|
||||
/// never deviate, regardless of where they fall on the calendar. One second is always
|
||||
/// composed of 1000ms. This allows fixed intervals to be specified in any multiple of the
|
||||
/// supported units. However, it means fixed intervals cannot express other units such as
|
||||
/// months, since the duration of a month is not a fixed quantity. Attempting to specify a
|
||||
/// calendar interval like month or quarter will return an Error.
|
||||
///
|
||||
/// The accepted units for fixed intervals are:
|
||||
/// * `ms`: milliseconds
|
||||
/// * `s`: seconds. Defined as 1000 milliseconds each.
|
||||
/// * `m`: minutes. Defined as 60 seconds each (60_000 milliseconds).
|
||||
/// * `h`: hours. Defined as 60 minutes each (3_600_000 milliseconds).
|
||||
/// * `d`: days. Defined as 24 hours (86_400_000 milliseconds).
|
||||
///
|
||||
/// Fractional time values are not supported, but you can address this by shifting to another
|
||||
/// time unit (e.g., `1.5h` could instead be specified as `90m`).
|
||||
pub fixed_interval: String,
|
||||
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
|
||||
/// 1))`.
|
||||
pub offset: Option<String>,
|
||||
/// Whether to return the buckets as a hash map
|
||||
#[serde(default)]
|
||||
pub keyed: bool,
|
||||
}
|
||||
|
||||
impl DateHistogramAggregationReq {
|
||||
fn validate(&self) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
/// Errors when parsing the fixed interval for `DateHistogramAggregationReq`.
|
||||
pub enum DateHistogramParseError {
|
||||
/// Unit not recognized in passed String
|
||||
UnitNotRecognized(String),
|
||||
/// Number not found in passed String
|
||||
NumberMissing(String),
|
||||
/// Unit not found in passed String
|
||||
UnitMissing(String),
|
||||
}
|
||||
|
||||
fn parse_into_milliseconds(input: &str) -> Result<u64, DateHistogramParseError> {
|
||||
let split_boundary = input
|
||||
.as_bytes()
|
||||
.iter()
|
||||
.take_while(|byte| byte.is_ascii_digit())
|
||||
.count();
|
||||
let (number, unit) = input.split_at(split_boundary);
|
||||
if number.is_empty() {
|
||||
return Err(DateHistogramParseError::NumberMissing(input.to_string()));
|
||||
}
|
||||
if unit.is_empty() {
|
||||
return Err(DateHistogramParseError::UnitMissing(input.to_string()));
|
||||
}
|
||||
let number: u64 = number
|
||||
.parse()
|
||||
// Technically this should never happen, but there was a bug
|
||||
// here and being defensive does not hurt.
|
||||
.map_err(|_err| DateHistogramParseError::NumberMissing(input.to_string()))?;
|
||||
|
||||
let multiplier_from_unit = match unit {
|
||||
"ms" => 1,
|
||||
"s" => 1000,
|
||||
"m" => 60 * 1000,
|
||||
"h" => 60 * 60 * 1000,
|
||||
"d" => 24 * 60 * 60 * 1000,
|
||||
_ => return Err(DateHistogramParseError::UnitNotRecognized(unit.to_string())),
|
||||
};
|
||||
|
||||
Ok(number * multiplier_from_unit)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_into_milliseconds() {
|
||||
assert_eq!(parse_into_milliseconds("1m").unwrap(), 60_000);
|
||||
assert_eq!(parse_into_milliseconds("2m").unwrap(), 120_000);
|
||||
assert_eq!(
|
||||
parse_into_milliseconds("2y").unwrap_err(),
|
||||
DateHistogramParseError::UnitNotRecognized("y".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
parse_into_milliseconds("2000").unwrap_err(),
|
||||
DateHistogramParseError::UnitMissing("2000".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
parse_into_milliseconds("ms").unwrap_err(),
|
||||
DateHistogramParseError::NumberMissing("ms".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_into_milliseconds_do_not_accept_non_ascii() {
|
||||
assert!(parse_into_milliseconds("1m").is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::Display;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use columnar::Column;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -13,7 +13,9 @@ use crate::aggregation::agg_result::BucketEntry;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
GenericSegmentAggregationResultsCollector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, format_date};
|
||||
use crate::schema::{Schema, Type};
|
||||
use crate::{DocId, TantivyError};
|
||||
@@ -62,7 +64,6 @@ use crate::{DocId, TantivyError};
|
||||
///
|
||||
/// Response
|
||||
/// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry)
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct HistogramAggregation {
|
||||
/// The field to aggregate on.
|
||||
@@ -184,7 +185,7 @@ pub(crate) struct SegmentHistogramBucketEntry {
|
||||
impl SegmentHistogramBucketEntry {
|
||||
pub(crate) fn into_intermediate_bucket_entry(
|
||||
self,
|
||||
sub_aggregation: SegmentAggregationResultsCollector,
|
||||
sub_aggregation: GenericSegmentAggregationResultsCollector,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateHistogramBucketEntry> {
|
||||
Ok(IntermediateHistogramBucketEntry {
|
||||
@@ -198,11 +199,11 @@ impl SegmentHistogramBucketEntry {
|
||||
|
||||
/// The collector puts values from the fast field into the correct buckets and does a conversion to
|
||||
/// the correct datatype.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SegmentHistogramCollector {
|
||||
/// The buckets containing the aggregation data.
|
||||
buckets: Vec<SegmentHistogramBucketEntry>,
|
||||
sub_aggregations: Option<Vec<SegmentAggregationResultsCollector>>,
|
||||
sub_aggregations: Option<Vec<GenericSegmentAggregationResultsCollector>>,
|
||||
field_type: Type,
|
||||
interval: f64,
|
||||
offset: f64,
|
||||
@@ -283,7 +284,7 @@ impl SegmentHistogramCollector {
|
||||
req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
field_type: Type,
|
||||
accessor: &dyn Column<u64>,
|
||||
accessor: &Column<u64>,
|
||||
) -> crate::Result<Self> {
|
||||
req.validate()?;
|
||||
let min = f64_from_fastfield_u64(accessor.min_value(), &field_type);
|
||||
@@ -300,7 +301,7 @@ impl SegmentHistogramCollector {
|
||||
None
|
||||
} else {
|
||||
let sub_aggregation =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(sub_aggregation)?;
|
||||
GenericSegmentAggregationResultsCollector::from_req_and_validate(sub_aggregation)?;
|
||||
Some(buckets.iter().map(|_| sub_aggregation.clone()).collect())
|
||||
};
|
||||
|
||||
@@ -335,7 +336,7 @@ impl SegmentHistogramCollector {
|
||||
#[inline]
|
||||
pub(crate) fn collect_block(
|
||||
&mut self,
|
||||
doc: &[DocId],
|
||||
docs: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) -> crate::Result<()> {
|
||||
@@ -346,64 +347,20 @@ impl SegmentHistogramCollector {
|
||||
let get_bucket_num =
|
||||
|val| (get_bucket_num_f64(val, interval, offset) as i64 - first_bucket_num) as usize;
|
||||
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinatility");
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0]));
|
||||
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1]));
|
||||
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2]));
|
||||
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3]));
|
||||
let accessor = &bucket_with_accessor.accessor;
|
||||
for doc in docs {
|
||||
for val in accessor.values(*doc) {
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
|
||||
let bucket_pos0 = get_bucket_num(val0);
|
||||
let bucket_pos1 = get_bucket_num(val1);
|
||||
let bucket_pos2 = get_bucket_num(val2);
|
||||
let bucket_pos3 = get_bucket_num(val3);
|
||||
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val0,
|
||||
&bounds,
|
||||
bucket_pos0,
|
||||
docs[0],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val1,
|
||||
&bounds,
|
||||
bucket_pos1,
|
||||
docs[1],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val2,
|
||||
&bounds,
|
||||
bucket_pos2,
|
||||
docs[2],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val3,
|
||||
&bounds,
|
||||
bucket_pos3,
|
||||
docs[3],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
let val = f64_from_fastfield_u64(accessor.get_val(doc), &self.field_type);
|
||||
if !bounds.contains(val) {
|
||||
continue;
|
||||
let bucket_pos = get_bucket_num(val);
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val,
|
||||
&bounds,
|
||||
bucket_pos,
|
||||
*doc,
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
}
|
||||
let bucket_pos = (get_bucket_num_f64(val, self.interval, self.offset) as i64
|
||||
- self.first_bucket_num) as usize;
|
||||
|
||||
debug_assert_eq!(
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset)
|
||||
);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {
|
||||
|
||||
@@ -1,2 +1,4 @@
|
||||
// mod date_histogram;
|
||||
mod histogram;
|
||||
// pub use date_histogram::*;
|
||||
pub use histogram::*;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -11,7 +11,9 @@ use crate::aggregation::agg_req_with_accessor::{
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
BucketCount, GenericSegmentAggregationResultsCollector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{
|
||||
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
|
||||
};
|
||||
@@ -114,7 +116,7 @@ impl From<Range<u64>> for InternalRangeAggregationRange {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct SegmentRangeAndBucketEntry {
|
||||
range: Range<u64>,
|
||||
bucket: SegmentRangeBucketEntry,
|
||||
@@ -122,18 +124,18 @@ pub(crate) struct SegmentRangeAndBucketEntry {
|
||||
|
||||
/// The collector puts values from the fast field into the correct buckets and does a conversion to
|
||||
/// the correct datatype.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SegmentRangeCollector {
|
||||
/// The buckets containing the aggregation data.
|
||||
buckets: Vec<SegmentRangeAndBucketEntry>,
|
||||
field_type: Type,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SegmentRangeBucketEntry {
|
||||
pub key: Key,
|
||||
pub doc_count: u64,
|
||||
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
|
||||
pub sub_aggregation: Option<GenericSegmentAggregationResultsCollector>,
|
||||
/// The from range of the bucket. Equals `f64::MIN` when `None`.
|
||||
pub from: Option<f64>,
|
||||
/// The to range of the bucket. Equals `f64::MAX` when `None`. Open interval, `to` is not
|
||||
@@ -227,9 +229,11 @@ impl SegmentRangeCollector {
|
||||
let sub_aggregation = if sub_aggregation.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(SegmentAggregationResultsCollector::from_req_and_validate(
|
||||
sub_aggregation,
|
||||
)?)
|
||||
Some(
|
||||
GenericSegmentAggregationResultsCollector::from_req_and_validate(
|
||||
sub_aggregation,
|
||||
)?,
|
||||
)
|
||||
};
|
||||
|
||||
Ok(SegmentRangeAndBucketEntry {
|
||||
@@ -257,35 +261,18 @@ impl SegmentRangeCollector {
|
||||
#[inline]
|
||||
pub(crate) fn collect_block(
|
||||
&mut self,
|
||||
doc: &[DocId],
|
||||
docs: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) -> crate::Result<()> {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinality");
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = accessor.get_val(docs[0]);
|
||||
let val2 = accessor.get_val(docs[1]);
|
||||
let val3 = accessor.get_val(docs[2]);
|
||||
let val4 = accessor.get_val(docs[3]);
|
||||
let bucket_pos1 = self.get_bucket_pos(val1);
|
||||
let bucket_pos2 = self.get_bucket_pos(val2);
|
||||
let bucket_pos3 = self.get_bucket_pos(val3);
|
||||
let bucket_pos4 = self.get_bucket_pos(val4);
|
||||
let accessor = &bucket_with_accessor.accessor;
|
||||
for doc in docs {
|
||||
for val in accessor.values(*doc) {
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
}
|
||||
|
||||
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
let val = accessor.get_val(doc);
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
for bucket in &mut self.buckets {
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
@@ -434,7 +421,7 @@ pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> crate::Resu
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use itertools::Itertools;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -11,10 +10,10 @@ use crate::aggregation::agg_req_with_accessor::{
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateBucketResult, IntermediateTermBucketEntry, IntermediateTermBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::MultiValuedFastFieldReader;
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// Creates a bucket for every unique term and counts the number of occurences.
|
||||
@@ -75,9 +74,9 @@ use crate::{DocId, TantivyError};
|
||||
/// ...
|
||||
/// "aggregations": {
|
||||
/// "genres": {
|
||||
/// "doc_count_error_upper_bound": 0,
|
||||
/// "sum_other_doc_count": 0,
|
||||
/// "buckets": [
|
||||
/// "doc_count_error_upper_bound": 0,
|
||||
/// "sum_other_doc_count": 0,
|
||||
/// "buckets": [
|
||||
/// { "key": "drumnbass", "doc_count": 6 },
|
||||
/// { "key": "raggae", "doc_count": 4 },
|
||||
/// { "key": "jazz", "doc_count": 2 }
|
||||
@@ -196,17 +195,16 @@ impl TermsAggregationInternal {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug, Default)]
|
||||
/// Container to store term_ids and their buckets.
|
||||
struct TermBuckets {
|
||||
pub(crate) entries: FxHashMap<u32, TermBucketEntry>,
|
||||
blueprint: Option<SegmentAggregationResultsCollector>,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Default)]
|
||||
#[derive(Clone, Default)]
|
||||
struct TermBucketEntry {
|
||||
doc_count: u64,
|
||||
sub_aggregations: Option<SegmentAggregationResultsCollector>,
|
||||
sub_aggregations: Option<Box<dyn SegmentAggregationCollector>>,
|
||||
}
|
||||
|
||||
impl Debug for TermBucketEntry {
|
||||
@@ -218,7 +216,7 @@ impl Debug for TermBucketEntry {
|
||||
}
|
||||
|
||||
impl TermBucketEntry {
|
||||
fn from_blueprint(blueprint: &Option<SegmentAggregationResultsCollector>) -> Self {
|
||||
fn from_blueprint(blueprint: &Option<Box<dyn SegmentAggregationCollector>>) -> Self {
|
||||
Self {
|
||||
doc_count: 0,
|
||||
sub_aggregations: blueprint.clone(),
|
||||
@@ -243,50 +241,6 @@ impl TermBucketEntry {
|
||||
}
|
||||
|
||||
impl TermBuckets {
|
||||
pub(crate) fn from_req_and_validate(
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
_max_term_id: usize,
|
||||
) -> crate::Result<Self> {
|
||||
let has_sub_aggregations = sub_aggregation.is_empty();
|
||||
|
||||
let blueprint = if has_sub_aggregations {
|
||||
let sub_aggregation =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(sub_aggregation)?;
|
||||
Some(sub_aggregation)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(TermBuckets {
|
||||
blueprint,
|
||||
entries: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn increment_bucket(
|
||||
&mut self,
|
||||
term_ids: &[u64],
|
||||
doc: DocId,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
bucket_count: &BucketCount,
|
||||
blueprint: &Option<SegmentAggregationResultsCollector>,
|
||||
) -> crate::Result<()> {
|
||||
for &term_id in term_ids {
|
||||
let entry = self.entries.entry(term_id as u32).or_insert_with(|| {
|
||||
bucket_count.add_count(1);
|
||||
|
||||
TermBucketEntry::from_blueprint(blueprint)
|
||||
});
|
||||
entry.doc_count += 1;
|
||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||
sub_aggregations.collect(doc, sub_aggregation)?;
|
||||
}
|
||||
}
|
||||
bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||
for entry in &mut self.entries.values_mut() {
|
||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||
@@ -299,13 +253,12 @@ impl TermBuckets {
|
||||
|
||||
/// The collector puts values from the fast field into the correct buckets and does a conversion to
|
||||
/// the correct datatype.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SegmentTermCollector {
|
||||
/// The buckets containing the aggregation data.
|
||||
term_buckets: TermBuckets,
|
||||
req: TermsAggregationInternal,
|
||||
field_type: Type,
|
||||
blueprint: Option<SegmentAggregationResultsCollector>,
|
||||
blueprint: Option<Box<dyn SegmentAggregationCollector>>,
|
||||
}
|
||||
|
||||
pub(crate) fn get_agg_name_and_property(name: &str) -> (&str, &str) {
|
||||
@@ -317,12 +270,8 @@ impl SegmentTermCollector {
|
||||
pub(crate) fn from_req_and_validate(
|
||||
req: &TermsAggregation,
|
||||
sub_aggregations: &AggregationsWithAccessor,
|
||||
field_type: Type,
|
||||
accessor: &MultiValuedFastFieldReader<u64>,
|
||||
) -> crate::Result<Self> {
|
||||
let max_term_id = accessor.max_value();
|
||||
let term_buckets =
|
||||
TermBuckets::from_req_and_validate(sub_aggregations, max_term_id as usize)?;
|
||||
let term_buckets = TermBuckets::default();
|
||||
|
||||
if let Some(custom_order) = req.order.as_ref() {
|
||||
// Validate sub aggregtion exists
|
||||
@@ -340,8 +289,7 @@ impl SegmentTermCollector {
|
||||
|
||||
let has_sub_aggregations = !sub_aggregations.is_empty();
|
||||
let blueprint = if has_sub_aggregations {
|
||||
let sub_aggregation =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(sub_aggregations)?;
|
||||
let sub_aggregation = build_segment_agg_collector(sub_aggregations)?;
|
||||
Some(sub_aggregation)
|
||||
} else {
|
||||
None
|
||||
@@ -350,7 +298,6 @@ impl SegmentTermCollector {
|
||||
Ok(SegmentTermCollector {
|
||||
req: TermsAggregationInternal::from_req(req),
|
||||
term_buckets,
|
||||
field_type,
|
||||
blueprint,
|
||||
})
|
||||
}
|
||||
@@ -362,13 +309,19 @@ impl SegmentTermCollector {
|
||||
let mut entries: Vec<(u32, TermBucketEntry)> =
|
||||
self.term_buckets.entries.into_iter().collect();
|
||||
|
||||
let order_by_key = self.req.order.target == OrderTarget::Key;
|
||||
let order_by_sub_aggregation =
|
||||
matches!(self.req.order.target, OrderTarget::SubAggregation(_));
|
||||
|
||||
match self.req.order.target {
|
||||
OrderTarget::Key => {
|
||||
// defer order and cut_off after loading the texts from the dictionary
|
||||
// We rely on the fact, that term ordinals match the order of the strings
|
||||
// TODO: We could have a special collector, that keeps only TOP n results at any
|
||||
// time.
|
||||
if self.req.order.order == Order::Desc {
|
||||
entries.sort_unstable_by_key(|bucket| std::cmp::Reverse(bucket.0));
|
||||
} else {
|
||||
entries.sort_unstable_by_key(|bucket| bucket.0);
|
||||
}
|
||||
}
|
||||
OrderTarget::SubAggregation(_name) => {
|
||||
// don't sort and cut off since it's hard to make assumptions on the quality of the
|
||||
@@ -384,34 +337,40 @@ impl SegmentTermCollector {
|
||||
}
|
||||
}
|
||||
|
||||
let (term_doc_count_before_cutoff, mut sum_other_doc_count) =
|
||||
if order_by_key || order_by_sub_aggregation {
|
||||
(0, 0)
|
||||
} else {
|
||||
cut_off_buckets(&mut entries, self.req.segment_size as usize)
|
||||
};
|
||||
let (term_doc_count_before_cutoff, sum_other_doc_count) = if order_by_sub_aggregation {
|
||||
(0, 0)
|
||||
} else {
|
||||
cut_off_buckets(&mut entries, self.req.segment_size as usize)
|
||||
};
|
||||
|
||||
let inverted_index = agg_with_accessor
|
||||
.inverted_index
|
||||
.str_dict_column
|
||||
.as_ref()
|
||||
.expect("internal error: inverted index not loaded for term aggregation");
|
||||
let term_dict = inverted_index.terms();
|
||||
let term_dict = inverted_index;
|
||||
|
||||
let mut dict: FxHashMap<String, IntermediateTermBucketEntry> = Default::default();
|
||||
let mut buffer = vec![];
|
||||
let mut buffer = String::new();
|
||||
for (term_id, entry) in entries {
|
||||
term_dict
|
||||
.ord_to_term(term_id as u64, &mut buffer)
|
||||
.expect("could not find term");
|
||||
if !term_dict.ord_to_str(term_id as u64, &mut buffer)? {
|
||||
return Err(TantivyError::InternalError(format!(
|
||||
"Couldn't find term_id {} in dict",
|
||||
term_id
|
||||
)));
|
||||
}
|
||||
dict.insert(
|
||||
String::from_utf8(buffer.to_vec())
|
||||
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?,
|
||||
buffer.to_string(),
|
||||
entry.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
||||
);
|
||||
}
|
||||
if self.req.min_doc_count == 0 {
|
||||
let mut stream = term_dict.stream()?;
|
||||
// TODO: Handle rev streaming for descending sorting by keys
|
||||
let mut stream = term_dict.dictionary().stream()?;
|
||||
while let Some((key, _ord)) = stream.next() {
|
||||
if dict.len() >= self.req.segment_size as usize {
|
||||
break;
|
||||
}
|
||||
|
||||
let key = std::str::from_utf8(key)
|
||||
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
|
||||
if !dict.contains_key(key) {
|
||||
@@ -420,20 +379,6 @@ impl SegmentTermCollector {
|
||||
}
|
||||
}
|
||||
|
||||
if order_by_key {
|
||||
let mut dict_entries = dict.into_iter().collect_vec();
|
||||
if self.req.order.order == Order::Desc {
|
||||
dict_entries.sort_unstable_by(|(key1, _), (key2, _)| key1.cmp(key2));
|
||||
} else {
|
||||
dict_entries.sort_unstable_by(|(key1, _), (key2, _)| key2.cmp(key1));
|
||||
}
|
||||
let (_, sum_other_docs) =
|
||||
cut_off_buckets(&mut dict_entries, self.req.segment_size as usize);
|
||||
|
||||
sum_other_doc_count += sum_other_docs;
|
||||
dict = dict_entries.into_iter().collect();
|
||||
}
|
||||
|
||||
Ok(IntermediateBucketResult::Terms(
|
||||
IntermediateTermBucketResult {
|
||||
entries: dict,
|
||||
@@ -446,65 +391,26 @@ impl SegmentTermCollector {
|
||||
#[inline]
|
||||
pub(crate) fn collect_block(
|
||||
&mut self,
|
||||
doc: &[DocId],
|
||||
docs: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) -> crate::Result<()> {
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
.as_multi()
|
||||
.expect("unexpected fast field cardinatility");
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
let mut vals1 = vec![];
|
||||
let mut vals2 = vec![];
|
||||
let mut vals3 = vec![];
|
||||
let mut vals4 = vec![];
|
||||
for docs in iter.by_ref() {
|
||||
accessor.get_vals(docs[0], &mut vals1);
|
||||
accessor.get_vals(docs[1], &mut vals2);
|
||||
accessor.get_vals(docs[2], &mut vals3);
|
||||
accessor.get_vals(docs[3], &mut vals4);
|
||||
let accessor = &bucket_with_accessor.accessor;
|
||||
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals1,
|
||||
docs[0],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals2,
|
||||
docs[1],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals3,
|
||||
docs[2],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals4,
|
||||
docs[3],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
)?;
|
||||
for doc in docs {
|
||||
for term_id in accessor.values(*doc) {
|
||||
let entry = self
|
||||
.term_buckets
|
||||
.entries
|
||||
.entry(term_id as u32)
|
||||
.or_insert_with(|| TermBucketEntry::from_blueprint(&self.blueprint));
|
||||
entry.doc_count += 1;
|
||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||
sub_aggregations.collect(*doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
accessor.get_vals(doc, &mut vals1);
|
||||
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals1,
|
||||
doc,
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
)?;
|
||||
}
|
||||
if force_flush {
|
||||
self.term_buckets
|
||||
.force_flush(&bucket_with_accessor.sub_aggregation)?;
|
||||
@@ -923,14 +829,14 @@ mod tests {
|
||||
];
|
||||
let index = get_test_index_from_values_and_terms(merge_segments, &segment_and_terms)?;
|
||||
|
||||
// key desc
|
||||
// key asc
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "string_id".to_string(),
|
||||
order: Some(CustomOrder {
|
||||
order: Order::Desc,
|
||||
order: Order::Asc,
|
||||
target: OrderTarget::Key,
|
||||
}),
|
||||
..Default::default()
|
||||
@@ -957,7 +863,7 @@ mod tests {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "string_id".to_string(),
|
||||
order: Some(CustomOrder {
|
||||
order: Order::Desc,
|
||||
order: Order::Asc,
|
||||
target: OrderTarget::Key,
|
||||
}),
|
||||
size: Some(2),
|
||||
@@ -981,14 +887,14 @@ mod tests {
|
||||
|
||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 3);
|
||||
|
||||
// key desc and segment_size cut_off
|
||||
// key asc and segment_size cut_off
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "string_id".to_string(),
|
||||
order: Some(CustomOrder {
|
||||
order: Order::Desc,
|
||||
order: Order::Asc,
|
||||
target: OrderTarget::Key,
|
||||
}),
|
||||
size: Some(2),
|
||||
@@ -1011,14 +917,14 @@ mod tests {
|
||||
serde_json::Value::Null
|
||||
);
|
||||
|
||||
// key asc
|
||||
// key desc
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "string_id".to_string(),
|
||||
order: Some(CustomOrder {
|
||||
order: Order::Asc,
|
||||
order: Order::Desc,
|
||||
target: OrderTarget::Key,
|
||||
}),
|
||||
..Default::default()
|
||||
@@ -1038,14 +944,14 @@ mod tests {
|
||||
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 5);
|
||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
||||
|
||||
// key asc, size cut_off
|
||||
// key desc, size cut_off
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "string_id".to_string(),
|
||||
order: Some(CustomOrder {
|
||||
order: Order::Asc,
|
||||
order: Order::Desc,
|
||||
target: OrderTarget::Key,
|
||||
}),
|
||||
size: Some(2),
|
||||
@@ -1068,14 +974,14 @@ mod tests {
|
||||
);
|
||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 5);
|
||||
|
||||
// key asc, segment_size cut_off
|
||||
// key desc, segment_size cut_off
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "string_id".to_string(),
|
||||
order: Some(CustomOrder {
|
||||
order: Order::Asc,
|
||||
order: Order::Desc,
|
||||
target: OrderTarget::Key,
|
||||
}),
|
||||
size: Some(2),
|
||||
@@ -1207,36 +1113,37 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_term_bucket_limit() -> crate::Result<()> {
|
||||
let terms: Vec<String> = (0..100_000).map(|el| el.to_string()).collect();
|
||||
let terms_per_segment = vec![terms.iter().map(|el| el.as_str()).collect()];
|
||||
// TODO reenable with memory limit
|
||||
//#[test]
|
||||
// fn terms_aggregation_term_bucket_limit() -> crate::Result<()> {
|
||||
// let terms: Vec<String> = (0..100_000).map(|el| el.to_string()).collect();
|
||||
// let terms_per_segment = vec![terms.iter().map(|el| el.as_str()).collect()];
|
||||
|
||||
let index = get_test_index_from_terms(true, &terms_per_segment)?;
|
||||
// let index = get_test_index_from_terms(true, &terms_per_segment)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "string_id".to_string(),
|
||||
min_doc_count: Some(0),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
// let agg_req: Aggregations = vec![(
|
||||
//"my_texts".to_string(),
|
||||
// Aggregation::Bucket(BucketAggregation {
|
||||
// bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
// field: "string_id".to_string(),
|
||||
// min_doc_count: Some(0),
|
||||
//..Default::default()
|
||||
//}),
|
||||
// sub_aggregation: Default::default(),
|
||||
//}),
|
||||
//)]
|
||||
//.into_iter()
|
||||
//.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None);
|
||||
// let res = exec_request_with_query(agg_req, &index, None);
|
||||
|
||||
assert!(res.is_err());
|
||||
// assert!(res.is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Ok(())
|
||||
//}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_multi_token_per_doc() -> crate::Result<()> {
|
||||
fn terms_aggregation_different_tokenizer_on_ff_test() -> crate::Result<()> {
|
||||
let terms = vec!["Hello Hello", "Hallo Hallo"];
|
||||
|
||||
let index = get_test_index_from_terms(true, &[terms])?;
|
||||
@@ -1256,12 +1163,13 @@ mod tests {
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None).unwrap();
|
||||
println!("{}", serde_json::to_string_pretty(&res).unwrap());
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "hello");
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "Hallo Hallo");
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 1);
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "hallo");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 2);
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "Hello Hello");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1352,68 +1260,3 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use itertools::Itertools;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn get_collector_with_buckets(num_docs: u64) -> TermBuckets {
|
||||
TermBuckets::from_req_and_validate(&Default::default(), num_docs as usize).unwrap()
|
||||
}
|
||||
|
||||
fn get_rand_terms(total_terms: u64, num_terms_returned: u64) -> Vec<u64> {
|
||||
let mut rng = thread_rng();
|
||||
|
||||
let all_terms = (0..total_terms - 1).collect_vec();
|
||||
|
||||
let mut vals = vec![];
|
||||
for _ in 0..num_terms_returned {
|
||||
let val = all_terms.as_slice().choose(&mut rng).unwrap();
|
||||
vals.push(*val);
|
||||
}
|
||||
|
||||
vals
|
||||
}
|
||||
|
||||
fn bench_term_buckets(b: &mut test::Bencher, num_terms: u64, total_terms: u64) {
|
||||
let mut collector = get_collector_with_buckets(total_terms);
|
||||
let vals = get_rand_terms(total_terms, num_terms);
|
||||
let aggregations_with_accessor: AggregationsWithAccessor = Default::default();
|
||||
let bucket_count: BucketCount = BucketCount {
|
||||
bucket_count: Default::default(),
|
||||
max_bucket_count: 1_000_001u32,
|
||||
};
|
||||
b.iter(|| {
|
||||
for &val in &vals {
|
||||
collector
|
||||
.increment_bucket(&[val], 0, &aggregations_with_accessor, &bucket_count, &None)
|
||||
.unwrap();
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_term_buckets_500_of_1_000_000(b: &mut test::Bencher) {
|
||||
bench_term_buckets(b, 500u64, 1_000_000u64)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_term_buckets_1_000_000_of_50_000(b: &mut test::Bencher) {
|
||||
bench_term_buckets(b, 1_000_000u64, 50_000u64)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_term_buckets_1_000_000_of_50(b: &mut test::Bencher) {
|
||||
bench_term_buckets(b, 1_000_000u64, 50u64)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_term_buckets_1_000_000_of_1_000_000(b: &mut test::Bencher) {
|
||||
bench_term_buckets(b, 1_000_000u64, 1_000_000u64)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ use super::agg_req::Aggregations;
|
||||
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use super::agg_result::AggregationResults;
|
||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use super::segment_agg_result::{build_segment_agg_collector, SegmentAggregationCollector};
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::schema::Schema;
|
||||
@@ -137,7 +137,7 @@ fn merge_fruits(
|
||||
/// `AggregationSegmentCollector` does the aggregation collection on a segment.
|
||||
pub struct AggregationSegmentCollector {
|
||||
aggs_with_accessor: AggregationsWithAccessor,
|
||||
result: SegmentAggregationResultsCollector,
|
||||
result: Box<dyn SegmentAggregationCollector>,
|
||||
error: Option<TantivyError>,
|
||||
}
|
||||
|
||||
@@ -151,8 +151,7 @@ impl AggregationSegmentCollector {
|
||||
) -> crate::Result<Self> {
|
||||
let aggs_with_accessor =
|
||||
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
|
||||
let result =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?;
|
||||
let result = build_segment_agg_collector(&aggs_with_accessor)?;
|
||||
Ok(AggregationSegmentCollector {
|
||||
aggs_with_accessor,
|
||||
result,
|
||||
|
||||
@@ -222,24 +222,23 @@ pub enum IntermediateMetricResult {
|
||||
|
||||
impl From<SegmentMetricResultCollector> for IntermediateMetricResult {
|
||||
fn from(tree: SegmentMetricResultCollector) -> Self {
|
||||
use super::metric::SegmentStatsType;
|
||||
match tree {
|
||||
SegmentMetricResultCollector::Stats(collector) => match collector.collecting_for {
|
||||
super::metric::SegmentStatsType::Average => IntermediateMetricResult::Average(
|
||||
SegmentStatsType::Average => IntermediateMetricResult::Average(
|
||||
IntermediateAverage::from_collector(collector),
|
||||
),
|
||||
super::metric::SegmentStatsType::Count => {
|
||||
SegmentStatsType::Count => {
|
||||
IntermediateMetricResult::Count(IntermediateCount::from_collector(collector))
|
||||
}
|
||||
super::metric::SegmentStatsType::Max => {
|
||||
SegmentStatsType::Max => {
|
||||
IntermediateMetricResult::Max(IntermediateMax::from_collector(collector))
|
||||
}
|
||||
super::metric::SegmentStatsType::Min => {
|
||||
SegmentStatsType::Min => {
|
||||
IntermediateMetricResult::Min(IntermediateMin::from_collector(collector))
|
||||
}
|
||||
super::metric::SegmentStatsType::Stats => {
|
||||
IntermediateMetricResult::Stats(collector.stats)
|
||||
}
|
||||
super::metric::SegmentStatsType::Sum => {
|
||||
SegmentStatsType::Stats => IntermediateMetricResult::Stats(collector.stats),
|
||||
SegmentStatsType::Sum => {
|
||||
IntermediateMetricResult::Sum(IntermediateSum::from_collector(collector))
|
||||
}
|
||||
},
|
||||
@@ -499,7 +498,7 @@ impl IntermediateTermBucketResult {
|
||||
match req.order.target {
|
||||
OrderTarget::Key => {
|
||||
buckets.sort_by(|left, right| {
|
||||
if req.order.order == Order::Desc {
|
||||
if req.order.order == Order::Asc {
|
||||
left.key.partial_cmp(&right.key)
|
||||
} else {
|
||||
right.key.partial_cmp(&left.key)
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
use fastfield_codecs::Column;
|
||||
use columnar::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, VecWithNames};
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
@@ -160,27 +166,74 @@ impl SegmentStatsCollector {
|
||||
stats: IntermediateStats::default(),
|
||||
}
|
||||
}
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = field.get_val(docs[0]);
|
||||
let val2 = field.get_val(docs[1]);
|
||||
let val3 = field.get_val(docs[2]);
|
||||
let val4 = field.get_val(docs[3]);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
let val4 = f64_from_fastfield_u64(val4, &self.field_type);
|
||||
pub(crate) fn collect_block(&mut self, docs: &[DocId], field: &Column<u64>) {
|
||||
// TODO special case for Required, Optional column type
|
||||
for doc in docs {
|
||||
for val in field.values(*doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentAggregationCollector for SegmentStatsCollector {
|
||||
fn into_intermediate_aggregations_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateAggregationResults> {
|
||||
let name = agg_with_accessor.metrics.keys[0].to_string();
|
||||
|
||||
let intermediate_metric_result = match self.collecting_for {
|
||||
SegmentStatsType::Average => {
|
||||
IntermediateMetricResult::Average(IntermediateAverage::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Count => {
|
||||
IntermediateMetricResult::Count(IntermediateCount::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Max => {
|
||||
IntermediateMetricResult::Max(IntermediateMax::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Min => {
|
||||
IntermediateMetricResult::Min(IntermediateMin::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Stats => IntermediateMetricResult::Stats(self.stats),
|
||||
SegmentStatsType::Sum => {
|
||||
IntermediateMetricResult::Sum(IntermediateSum::from_collector(*self))
|
||||
}
|
||||
};
|
||||
|
||||
let metrics = Some(VecWithNames::from_entries(vec![(
|
||||
name,
|
||||
intermediate_metric_result,
|
||||
)]));
|
||||
|
||||
Ok(IntermediateAggregationResults {
|
||||
metrics,
|
||||
buckets: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let accessor = &agg_with_accessor.metrics.values[0].accessor;
|
||||
for val in accessor.values(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val1);
|
||||
self.stats.collect(val2);
|
||||
self.stats.collect(val3);
|
||||
self.stats.collect(val4);
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
let val = field.get_val(doc);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush_staged_docs(
|
||||
&mut self,
|
||||
_agg_with_accessor: &AggregationsWithAccessor,
|
||||
_force_flush: bool,
|
||||
) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -172,8 +172,8 @@ pub use collector::{
|
||||
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
||||
MAX_BUCKET_COUNT,
|
||||
};
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
pub(crate) use date::format_date;
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -182,7 +182,7 @@ use crate::schema::Type;
|
||||
/// Represents an associative array `(key => values)` in a very efficient manner.
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub(crate) struct VecWithNames<T: Clone> {
|
||||
values: Vec<T>,
|
||||
pub(crate) values: Vec<T>,
|
||||
keys: Vec<String>,
|
||||
}
|
||||
|
||||
@@ -248,9 +248,6 @@ impl<T: Clone> VecWithNames<T> {
|
||||
fn values_mut(&mut self) -> impl Iterator<Item = &mut T> + '_ {
|
||||
self.values.iter_mut()
|
||||
}
|
||||
fn entries(&self) -> impl Iterator<Item = (&str, &T)> + '_ {
|
||||
self.keys().zip(self.values.iter())
|
||||
}
|
||||
fn is_empty(&self) -> bool {
|
||||
self.keys.is_empty()
|
||||
}
|
||||
@@ -336,8 +333,9 @@ mod tests {
|
||||
use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use crate::aggregation::segment_agg_result::DOC_BLOCK_SIZE;
|
||||
use crate::aggregation::DistributedAggregationCollector;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{Cardinality, IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use crate::{DateTime, Index, Term};
|
||||
|
||||
fn get_avg_req(field_name: &str) -> Aggregation {
|
||||
@@ -432,8 +430,7 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype.clone());
|
||||
let text_field_id = schema_builder.add_text_field("text_id", text_fieldtype);
|
||||
let string_field_id = schema_builder.add_text_field("string_id", STRING | FAST);
|
||||
let score_fieldtype =
|
||||
crate::schema::NumericOptions::default().set_fast();
|
||||
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
@@ -445,6 +442,7 @@ mod tests {
|
||||
{
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?;
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
for values in segment_and_values {
|
||||
for (i, term) in values {
|
||||
let i = *i;
|
||||
@@ -656,13 +654,11 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
schema_builder.add_text_field("dummy_text", STRING);
|
||||
let score_fieldtype =
|
||||
crate::schema::NumericOptions::default().set_fast();
|
||||
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
|
||||
let multivalue =
|
||||
crate::schema::NumericOptions::default().set_fast();
|
||||
let multivalue = crate::schema::NumericOptions::default().set_fast();
|
||||
let scores_field_i64 = schema_builder.add_i64_field("scores_i64", multivalue);
|
||||
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
@@ -1147,7 +1143,7 @@ mod tests {
|
||||
let agg_res = avg_on_field("dummy_text");
|
||||
assert_eq!(
|
||||
format!("{:?}", agg_res),
|
||||
r#"InvalidArgument("Only fast fields of type f64, u64, i64 are supported, but got Str ")"#
|
||||
r#"InvalidArgument("No numerical fast field found for field: dummy_text")"#
|
||||
);
|
||||
|
||||
let agg_res = avg_on_field("not_exist_field");
|
||||
@@ -1156,12 +1152,6 @@ mod tests {
|
||||
r#"FieldNotFound("not_exist_field")"#
|
||||
);
|
||||
|
||||
let agg_res = avg_on_field("scores_i64");
|
||||
assert_eq!(
|
||||
format!("{:?}", agg_res),
|
||||
r#"InvalidArgument("Invalid field cardinality on field scores_i64 expected SingleValue, but got MultiValues")"#
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1173,11 +1163,14 @@ mod tests {
|
||||
use test::{self, Bencher};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::bucket::{HistogramAggregation, HistogramBounds, TermsAggregation};
|
||||
use crate::aggregation::bucket::{
|
||||
CustomOrder, HistogramAggregation, HistogramBounds, Order, OrderTarget,
|
||||
TermsAggregation,
|
||||
};
|
||||
use crate::aggregation::metric::StatsAggregation;
|
||||
use crate::query::AllQuery;
|
||||
|
||||
fn get_test_index_bench(merge_segments: bool) -> crate::Result<Index> {
|
||||
fn get_test_index_bench(_merge_segments: bool) -> crate::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_fieldtype = crate::schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
@@ -1189,20 +1182,19 @@ mod tests {
|
||||
schema_builder.add_text_field("text_many_terms", STRING | FAST);
|
||||
let text_field_few_terms =
|
||||
schema_builder.add_text_field("text_few_terms", STRING | FAST);
|
||||
let score_fieldtype =
|
||||
crate::schema::NumericOptions::default().set_fast();
|
||||
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||
let score_field_f64 =
|
||||
schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
let index = Index::create_from_tempdir(schema_builder.build())?;
|
||||
let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"];
|
||||
let many_terms_data = (0..15_000)
|
||||
let many_terms_data = (0..150_000)
|
||||
.map(|num| format!("author{}", num))
|
||||
.collect::<Vec<_>>();
|
||||
{
|
||||
let mut rng = thread_rng();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000)?;
|
||||
// writing the segment
|
||||
for _ in 0..1_000_000 {
|
||||
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
||||
@@ -1217,14 +1209,6 @@ mod tests {
|
||||
}
|
||||
index_writer.commit()?;
|
||||
}
|
||||
if merge_segments {
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
@@ -1376,7 +1360,42 @@ mod tests {
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_aggregation_terms_many(b: &mut Bencher) {
|
||||
fn bench_aggregation_terms_many_with_sub_agg(b: &mut Bencher) {
|
||||
let index = get_test_index_bench(false).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let sub_agg_req: Aggregations = vec![(
|
||||
"average_f64".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score_f64".to_string()),
|
||||
)),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "text_many_terms".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req,
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_aggregation_terms_many2(b: &mut Bencher) {
|
||||
let index = get_test_index_bench(false).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
@@ -1401,6 +1420,36 @@ mod tests {
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_aggregation_terms_many_order_by_term(b: &mut Bencher) {
|
||||
let index = get_test_index_bench(false).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "text_many_terms".to_string(),
|
||||
order: Some(CustomOrder {
|
||||
order: Order::Desc,
|
||||
target: OrderTarget::Key,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_aggregation_range_only(b: &mut Bencher) {
|
||||
let index = get_test_index_bench(false).unwrap();
|
||||
|
||||
@@ -25,15 +25,89 @@ use crate::{DocId, TantivyError};
|
||||
pub(crate) const DOC_BLOCK_SIZE: usize = 64;
|
||||
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub(crate) struct SegmentAggregationResultsCollector {
|
||||
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
||||
fn into_intermediate_aggregations_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateAggregationResults>;
|
||||
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()>;
|
||||
|
||||
fn flush_staged_docs(
|
||||
&mut self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
force_flush: bool,
|
||||
) -> crate::Result<()>;
|
||||
}
|
||||
|
||||
pub(crate) trait CollectorClone {
|
||||
fn clone_box(&self) -> Box<dyn SegmentAggregationCollector>;
|
||||
}
|
||||
|
||||
impl<T> CollectorClone for T
|
||||
where T: 'static + SegmentAggregationCollector + Clone
|
||||
{
|
||||
fn clone_box(&self) -> Box<dyn SegmentAggregationCollector> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<dyn SegmentAggregationCollector> {
|
||||
fn clone(&self) -> Box<dyn SegmentAggregationCollector> {
|
||||
self.clone_box()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn build_segment_agg_collector(
|
||||
req: &AggregationsWithAccessor,
|
||||
) -> crate::Result<Box<dyn SegmentAggregationCollector>> {
|
||||
// Single metric special case
|
||||
if req.buckets.is_empty() && req.metrics.len() == 1 {
|
||||
let req = &req.metrics.values[0];
|
||||
let stats_collector = match &req.metric {
|
||||
MetricAggregation::Average(AverageAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Average)
|
||||
}
|
||||
MetricAggregation::Count(CountAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Count)
|
||||
}
|
||||
MetricAggregation::Max(MaxAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Max)
|
||||
}
|
||||
MetricAggregation::Min(MinAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Min)
|
||||
}
|
||||
MetricAggregation::Stats(StatsAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Stats)
|
||||
}
|
||||
MetricAggregation::Sum(SumAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Sum)
|
||||
}
|
||||
};
|
||||
|
||||
return Ok(Box::new(stats_collector));
|
||||
}
|
||||
|
||||
let agg = GenericSegmentAggregationResultsCollector::from_req_and_validate(req)?;
|
||||
Ok(Box::new(agg))
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// The GenericSegmentAggregationResultsCollector is the generic version of the collector, which
|
||||
/// can handle arbitrary complexity of sub-aggregations. Ideally we never have to pick this one
|
||||
/// and can provide specialized versions instead, that remove some of its overhead.
|
||||
pub(crate) struct GenericSegmentAggregationResultsCollector {
|
||||
pub(crate) metrics: Option<VecWithNames<SegmentMetricResultCollector>>,
|
||||
pub(crate) buckets: Option<VecWithNames<SegmentBucketResultCollector>>,
|
||||
staged_docs: DocBlock,
|
||||
num_staged_docs: usize,
|
||||
}
|
||||
|
||||
impl Default for SegmentAggregationResultsCollector {
|
||||
impl Default for GenericSegmentAggregationResultsCollector {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metrics: Default::default(),
|
||||
@@ -44,7 +118,7 @@ impl Default for SegmentAggregationResultsCollector {
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for SegmentAggregationResultsCollector {
|
||||
impl Debug for GenericSegmentAggregationResultsCollector {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SegmentAggregationResultsCollector")
|
||||
.field("metrics", &self.metrics)
|
||||
@@ -55,9 +129,9 @@ impl Debug for SegmentAggregationResultsCollector {
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentAggregationResultsCollector {
|
||||
pub fn into_intermediate_aggregations_result(
|
||||
self,
|
||||
impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
|
||||
fn into_intermediate_aggregations_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateAggregationResults> {
|
||||
let buckets = if let Some(buckets) = self.buckets {
|
||||
@@ -75,47 +149,7 @@ impl SegmentAggregationResultsCollector {
|
||||
Ok(IntermediateAggregationResults { metrics, buckets })
|
||||
}
|
||||
|
||||
pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> {
|
||||
let buckets = req
|
||||
.buckets
|
||||
.entries()
|
||||
.map(|(key, req)| {
|
||||
Ok((
|
||||
key.to_string(),
|
||||
SegmentBucketResultCollector::from_req_and_validate(req)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
let metrics = req
|
||||
.metrics
|
||||
.entries()
|
||||
.map(|(key, req)| {
|
||||
Ok((
|
||||
key.to_string(),
|
||||
SegmentMetricResultCollector::from_req_and_validate(req)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
let metrics = if metrics.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VecWithNames::from_entries(metrics))
|
||||
};
|
||||
let buckets = if buckets.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VecWithNames::from_entries(buckets))
|
||||
};
|
||||
Ok(SegmentAggregationResultsCollector {
|
||||
metrics,
|
||||
buckets,
|
||||
staged_docs: [0; DOC_BLOCK_SIZE],
|
||||
num_staged_docs: 0,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn collect(
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
@@ -128,7 +162,7 @@ impl SegmentAggregationResultsCollector {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn flush_staged_docs(
|
||||
fn flush_staged_docs(
|
||||
&mut self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
force_flush: bool,
|
||||
@@ -162,6 +196,66 @@ impl SegmentAggregationResultsCollector {
|
||||
}
|
||||
}
|
||||
|
||||
impl GenericSegmentAggregationResultsCollector {
|
||||
pub fn into_intermediate_aggregations_result(
|
||||
self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateAggregationResults> {
|
||||
let buckets = if let Some(buckets) = self.buckets {
|
||||
let entries = buckets
|
||||
.into_iter()
|
||||
.zip(agg_with_accessor.buckets.values())
|
||||
.map(|((key, bucket), acc)| Ok((key, bucket.into_intermediate_bucket_result(acc)?)))
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
Some(VecWithNames::from_entries(entries))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let metrics = self.metrics.map(VecWithNames::from_other);
|
||||
|
||||
Ok(IntermediateAggregationResults { metrics, buckets })
|
||||
}
|
||||
|
||||
pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> {
|
||||
let buckets = req
|
||||
.buckets
|
||||
.iter()
|
||||
.map(|(key, req)| {
|
||||
Ok((
|
||||
key.to_string(),
|
||||
SegmentBucketResultCollector::from_req_and_validate(req)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
let metrics = req
|
||||
.metrics
|
||||
.iter()
|
||||
.map(|(key, req)| {
|
||||
Ok((
|
||||
key.to_string(),
|
||||
SegmentMetricResultCollector::from_req_and_validate(req)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
let metrics = if metrics.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VecWithNames::from_entries(metrics))
|
||||
};
|
||||
let buckets = if buckets.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VecWithNames::from_entries(buckets))
|
||||
};
|
||||
Ok(GenericSegmentAggregationResultsCollector {
|
||||
metrics,
|
||||
buckets,
|
||||
staged_docs: [0; DOC_BLOCK_SIZE],
|
||||
num_staged_docs: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) enum SegmentMetricResultCollector {
|
||||
Stats(SegmentStatsCollector),
|
||||
@@ -205,7 +299,7 @@ impl SegmentMetricResultCollector {
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], metric: &MetricAggregationWithAccessor) {
|
||||
match self {
|
||||
SegmentMetricResultCollector::Stats(stats_collector) => {
|
||||
stats_collector.collect_block(doc, &*metric.accessor);
|
||||
stats_collector.collect_block(doc, &metric.accessor);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -215,7 +309,7 @@ impl SegmentMetricResultCollector {
|
||||
/// segments.
|
||||
/// The typical structure of Map<Key, Bucket> is not suitable during collection for performance
|
||||
/// reasons.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) enum SegmentBucketResultCollector {
|
||||
Range(SegmentRangeCollector),
|
||||
Histogram(Box<SegmentHistogramCollector>),
|
||||
@@ -243,14 +337,7 @@ impl SegmentBucketResultCollector {
|
||||
pub fn from_req_and_validate(req: &BucketAggregationWithAccessor) -> crate::Result<Self> {
|
||||
match &req.bucket_agg {
|
||||
BucketAggregationType::Terms(terms_req) => Ok(Self::Terms(Box::new(
|
||||
SegmentTermCollector::from_req_and_validate(
|
||||
terms_req,
|
||||
&req.sub_aggregation,
|
||||
req.field_type,
|
||||
req.accessor
|
||||
.as_multi()
|
||||
.expect("unexpected fast field cardinality"),
|
||||
)?,
|
||||
SegmentTermCollector::from_req_and_validate(terms_req, &req.sub_aggregation)?,
|
||||
))),
|
||||
BucketAggregationType::Range(range_req) => {
|
||||
Ok(Self::Range(SegmentRangeCollector::from_req_and_validate(
|
||||
@@ -265,9 +352,7 @@ impl SegmentBucketResultCollector {
|
||||
histogram,
|
||||
&req.sub_aggregation,
|
||||
req.field_type,
|
||||
req.accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinality"),
|
||||
&req.accessor,
|
||||
)?,
|
||||
))),
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// }
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// let mut facet_collector = FacetCollector::for_field("facet");
|
||||
/// facet_collector.add_facet("/category/fiction");
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
///
|
||||
@@ -829,7 +829,7 @@ mod bench {
|
||||
let reader = index.reader().unwrap();
|
||||
b.iter(|| {
|
||||
let searcher = reader.searcher();
|
||||
let facet_collector = FacetCollector::for_field(facet_field);
|
||||
let facet_collector = FacetCollector::for_field("facet");
|
||||
searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ where
|
||||
|
||||
let fast_field_reader = segment_reader
|
||||
.fast_fields()
|
||||
.typed_column_first_or_default(schema.get_field_name(self.field))?;
|
||||
.column_first_or_default(schema.get_field_name(self.field))?;
|
||||
|
||||
let segment_collector = self
|
||||
.collector
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use columnar::ColumnValues;
|
||||
use columnar::{BytesColumn, Column};
|
||||
|
||||
use super::*;
|
||||
use crate::collector::{Count, FilterCollector, TopDocs};
|
||||
@@ -57,7 +55,7 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
||||
|
||||
assert_eq!(filtered_top_docs.len(), 0);
|
||||
|
||||
fn date_filter(value: columnar::DateTime) -> bool {
|
||||
fn date_filter(value: DateTime) -> bool {
|
||||
(crate::DateTime::from(value).into_utc()
|
||||
- OffsetDateTime::parse("2019-04-09T00:00:00+00:00", &Rfc3339).unwrap())
|
||||
.whole_weeks()
|
||||
@@ -160,7 +158,7 @@ pub struct FastFieldTestCollector {
|
||||
|
||||
pub struct FastFieldSegmentCollector {
|
||||
vals: Vec<u64>,
|
||||
reader: Arc<dyn columnar::ColumnValues>,
|
||||
reader: Column,
|
||||
}
|
||||
|
||||
impl FastFieldTestCollector {
|
||||
@@ -203,8 +201,7 @@ impl SegmentCollector for FastFieldSegmentCollector {
|
||||
type Fruit = Vec<u64>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
let val = self.reader.get_val(doc);
|
||||
self.vals.push(val);
|
||||
self.vals.extend(self.reader.values(doc));
|
||||
}
|
||||
|
||||
fn harvest(self) -> Vec<u64> {
|
||||
@@ -212,62 +209,73 @@ impl SegmentCollector for FastFieldSegmentCollector {
|
||||
}
|
||||
}
|
||||
|
||||
// /// Collects in order all of the fast field bytes for all of the
|
||||
// /// docs in the `DocSet`
|
||||
// ///
|
||||
// /// This collector is mainly useful for tests.
|
||||
// pub struct BytesFastFieldTestCollector {
|
||||
// field: Field,
|
||||
// }
|
||||
/// Collects in order all of the fast field bytes for all of the
|
||||
/// docs in the `DocSet`
|
||||
///
|
||||
/// This collector is mainly useful for tests.
|
||||
/// It is very slow.
|
||||
pub struct BytesFastFieldTestCollector {
|
||||
field: String,
|
||||
}
|
||||
|
||||
// pub struct BytesFastFieldSegmentCollector {
|
||||
// vals: Vec<u8>,
|
||||
// reader: BytesFastFieldReader,
|
||||
// }
|
||||
pub struct BytesFastFieldSegmentCollector {
|
||||
vals: Vec<u8>,
|
||||
column_opt: Option<BytesColumn>,
|
||||
buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
// impl BytesFastFieldTestCollector {
|
||||
// pub fn for_field(field: Field) -> BytesFastFieldTestCollector {
|
||||
// BytesFastFieldTestCollector { field }
|
||||
// }
|
||||
// }
|
||||
impl BytesFastFieldTestCollector {
|
||||
pub fn for_field(field: impl ToString) -> BytesFastFieldTestCollector {
|
||||
BytesFastFieldTestCollector {
|
||||
field: field.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// impl Collector for BytesFastFieldTestCollector {
|
||||
// type Fruit = Vec<u8>;
|
||||
// type Child = BytesFastFieldSegmentCollector;
|
||||
impl Collector for BytesFastFieldTestCollector {
|
||||
type Fruit = Vec<u8>;
|
||||
type Child = BytesFastFieldSegmentCollector;
|
||||
|
||||
// fn for_segment(
|
||||
// &self,
|
||||
// _segment_local_id: u32,
|
||||
// segment_reader: &SegmentReader,
|
||||
// ) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||
// let reader = segment_reader.fast_fields().bytes(self.field)?;
|
||||
// Ok(BytesFastFieldSegmentCollector {
|
||||
// vals: Vec::new(),
|
||||
// reader,
|
||||
// })
|
||||
// }
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||
let column_opt = segment_reader.fast_fields().bytes(&self.field)?;
|
||||
Ok(BytesFastFieldSegmentCollector {
|
||||
vals: Vec::new(),
|
||||
column_opt,
|
||||
buffer: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
// fn requires_scoring(&self) -> bool {
|
||||
// false
|
||||
// }
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
// fn merge_fruits(&self, children: Vec<Vec<u8>>) -> crate::Result<Vec<u8>> {
|
||||
// Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
|
||||
// }
|
||||
// }
|
||||
fn merge_fruits(&self, children: Vec<Vec<u8>>) -> crate::Result<Vec<u8>> {
|
||||
Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
|
||||
}
|
||||
}
|
||||
|
||||
// impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||
// type Fruit = Vec<u8>;
|
||||
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||
type Fruit = Vec<u8>;
|
||||
|
||||
// fn collect(&mut self, doc: u32, _score: Score) {
|
||||
// let data = self.reader.get_bytes(doc);
|
||||
// self.vals.extend(data);
|
||||
// }
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
if let Some(column) = self.column_opt.as_ref() {
|
||||
for term_ord in column.term_ords(doc) {
|
||||
let (vals, buffer) = (&mut self.vals, &mut self.buffer);
|
||||
if column.ord_to_bytes(term_ord, buffer).unwrap() {
|
||||
vals.extend(&buffer[..]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fn harvest(self) -> <Self as SegmentCollector>::Fruit {
|
||||
// self.vals
|
||||
// }
|
||||
// }
|
||||
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
|
||||
self.vals
|
||||
}
|
||||
}
|
||||
|
||||
fn make_test_searcher() -> crate::Result<Searcher> {
|
||||
let schema = Schema::builder().build();
|
||||
|
||||
@@ -235,7 +235,6 @@ impl TopDocs {
|
||||
/// # use tantivy::query::{Query, QueryParser};
|
||||
/// use tantivy::Searcher;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
@@ -252,7 +251,7 @@ impl TopDocs {
|
||||
/// # index_writer.commit()?;
|
||||
/// # let reader = index.reader()?;
|
||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query)?;
|
||||
/// # assert_eq!(top_docs,
|
||||
/// # vec![(97u64, DocAddress::new(0u32, 1)),
|
||||
/// # (80u64, DocAddress::new(0u32, 3))]);
|
||||
@@ -262,8 +261,7 @@ impl TopDocs {
|
||||
/// /// collects the top 10 documents, order by the u64-`field`
|
||||
/// /// given in argument.
|
||||
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
||||
/// query: &dyn Query,
|
||||
/// rating_field: Field)
|
||||
/// query: &dyn Query)
|
||||
/// -> tantivy::Result<Vec<(u64, DocAddress)>> {
|
||||
///
|
||||
/// // This is where we build our topdocs collector
|
||||
@@ -271,7 +269,7 @@ impl TopDocs {
|
||||
/// // Note the `rating_field` needs to be a FAST field here.
|
||||
/// let top_books_by_rating = TopDocs
|
||||
/// ::with_limit(10)
|
||||
/// .order_by_u64_field(rating_field);
|
||||
/// .order_by_u64_field("rating");
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `u64` in the pair is the value of our fast field for
|
||||
@@ -323,22 +321,21 @@ impl TopDocs {
|
||||
/// # use tantivy::query::{Query, AllQuery};
|
||||
/// use tantivy::Searcher;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
/// # let title = schema_builder.add_text_field("company", TEXT);
|
||||
/// # let rating = schema_builder.add_i64_field("revenue", FAST);
|
||||
/// # let revenue = schema_builder.add_i64_field("revenue", FAST);
|
||||
/// # let schema = schema_builder.build();
|
||||
/// #
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||
/// # index_writer.add_document(doc!(title => "MadCow Inc.", rating => 92_000_000i64))?;
|
||||
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", rating => 119_000_000i64))?;
|
||||
/// # index_writer.add_document(doc!(title => "Declining Cow", rating => -63_000_000i64))?;
|
||||
/// # index_writer.add_document(doc!(title => "MadCow Inc.", revenue => 92_000_000i64))?;
|
||||
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", revenue => 119_000_000i64))?;
|
||||
/// # index_writer.add_document(doc!(title => "Declining Cow", revenue => -63_000_000i64))?;
|
||||
/// # assert!(index_writer.commit().is_ok());
|
||||
/// # let reader = index.reader()?;
|
||||
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, rating)?;
|
||||
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, "revenue")?;
|
||||
/// # assert_eq!(top_docs,
|
||||
/// # vec![(119_000_000i64, DocAddress::new(0, 1)),
|
||||
/// # (92_000_000i64, DocAddress::new(0, 0))]);
|
||||
@@ -349,7 +346,7 @@ impl TopDocs {
|
||||
/// /// given in argument.
|
||||
/// fn docs_sorted_by_revenue(searcher: &Searcher,
|
||||
/// query: &dyn Query,
|
||||
/// revenue_field: Field)
|
||||
/// revenue_field: &str)
|
||||
/// -> tantivy::Result<Vec<(i64, DocAddress)>> {
|
||||
///
|
||||
/// // This is where we build our topdocs collector
|
||||
@@ -358,7 +355,7 @@ impl TopDocs {
|
||||
/// // type `sort_by_field`. revenue_field here is a FAST i64 field.
|
||||
/// let top_company_by_revenue = TopDocs
|
||||
/// ::with_limit(2)
|
||||
/// .order_by_fast_field(revenue_field);
|
||||
/// .order_by_fast_field("revenue");
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `i64` in the pair is the value of our fast field for
|
||||
@@ -460,9 +457,10 @@ impl TopDocs {
|
||||
/// // Typically, fast_fields.
|
||||
/// //
|
||||
/// // In our case, we will get a reader for the popularity
|
||||
/// // fast field.
|
||||
/// // fast field. For simplicity we read the first or default value in the fast
|
||||
/// // field.
|
||||
/// let popularity_reader =
|
||||
/// segment_reader.fast_fields().u64("popularity").unwrap();
|
||||
/// segment_reader.fast_fields().u64("popularity").unwrap().first_or_default_col(0);
|
||||
///
|
||||
/// // We can now define our actual scoring function
|
||||
/// move |doc: DocId, original_score: Score| {
|
||||
@@ -569,9 +567,9 @@ impl TopDocs {
|
||||
/// // Note that this is implemented by using a `(u64, u64)`
|
||||
/// // as a score.
|
||||
/// let popularity_reader =
|
||||
/// segment_reader.fast_fields().u64("popularity").unwrap();
|
||||
/// segment_reader.fast_fields().u64("popularity").unwrap().first_or_default_col(0);
|
||||
/// let boosted_reader =
|
||||
/// segment_reader.fast_fields().u64("boosted").unwrap();
|
||||
/// segment_reader.fast_fields().u64("boosted").unwrap().first_or_default_col(0);
|
||||
///
|
||||
/// // We can now define our actual scoring function
|
||||
/// move |doc: DocId| {
|
||||
|
||||
@@ -905,12 +905,14 @@ mod tests {
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let index = Index::create(directory.clone(), schema, IndexSettings::default())?;
|
||||
|
||||
let mut writer = index.writer_with_num_threads(8, 24_000_000).unwrap();
|
||||
for i in 0u64..8_000u64 {
|
||||
writer.add_document(doc!(field => i))?;
|
||||
let mut writer = index.writer_with_num_threads(1, 32_000_000).unwrap();
|
||||
for _seg in 0..8 {
|
||||
for i in 0u64..1_000u64 {
|
||||
writer.add_document(doc!(field => i))?;
|
||||
}
|
||||
writer.commit()?;
|
||||
}
|
||||
|
||||
writer.commit()?;
|
||||
let mem_right_after_commit = directory.total_mem_usage();
|
||||
|
||||
let reader = index
|
||||
|
||||
@@ -135,6 +135,8 @@ impl InvertedIndexReader {
|
||||
term_info: &TermInfo,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<SegmentPostings> {
|
||||
let option = option.downgrade(self.record_option);
|
||||
|
||||
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
|
||||
let position_reader = {
|
||||
if option.has_positions() {
|
||||
|
||||
@@ -249,7 +249,7 @@ impl SearcherInner {
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
doc_store_cache_size: usize,
|
||||
doc_store_cache_num_blocks: usize,
|
||||
) -> io::Result<SearcherInner> {
|
||||
assert_eq!(
|
||||
&segment_readers
|
||||
@@ -261,7 +261,7 @@ impl SearcherInner {
|
||||
);
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size))
|
||||
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_num_blocks))
|
||||
.collect::<io::Result<Vec<_>>>()?;
|
||||
|
||||
Ok(SearcherInner {
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::directory::{CompositeFile, FileSlice};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{intersect_alive_bitsets, AliveBitSet, FacetReader, FastFieldReaders};
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
||||
use crate::schema::{Field, FieldType, IndexRecordOption, Schema, Type};
|
||||
use crate::schema::{Field, IndexRecordOption, Schema, Type};
|
||||
use crate::space_usage::SegmentSpaceUsage;
|
||||
use crate::store::StoreReader;
|
||||
use crate::termdict::TermDictionary;
|
||||
@@ -99,7 +99,7 @@ impl SegmentReader {
|
||||
"`{field_name}` is not a facet field.`"
|
||||
)));
|
||||
}
|
||||
let Some(facet_column) = self.fast_fields().str_column_opt(field_name)? else {
|
||||
let Some(facet_column) = self.fast_fields().str(field_name)? else {
|
||||
panic!("Facet Field `{field_name}` is missing. This should not happen");
|
||||
};
|
||||
Ok(FacetReader::new(facet_column))
|
||||
@@ -128,9 +128,12 @@ impl SegmentReader {
|
||||
&self.fieldnorm_readers
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `StoreReader`.
|
||||
pub fn get_store_reader(&self, cache_size: usize) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone(), cache_size)
|
||||
/// Accessor to the segment's [`StoreReader`](crate::store::StoreReader).
|
||||
///
|
||||
/// `cache_num_blocks` sets the number of decompressed blocks to be cached in an LRU.
|
||||
/// The size of blocks is configurable, this should be reflexted in the
|
||||
pub fn get_store_reader(&self, cache_num_blocks: usize) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone(), cache_num_blocks)
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
|
||||
@@ -196,8 +196,21 @@ impl MmapDirectory {
|
||||
directory_path,
|
||||
)));
|
||||
}
|
||||
let canonical_path: PathBuf = directory_path.canonicalize().map_err(|io_err| {
|
||||
OpenDirectoryError::wrap_io_error(io_err, PathBuf::from(directory_path))
|
||||
#[allow(clippy::bind_instead_of_map)]
|
||||
let canonical_path: PathBuf = directory_path.canonicalize().or_else(|io_err| {
|
||||
let directory_path = directory_path.to_owned();
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
// `canonicalize` returns "Incorrect function" (error code 1)
|
||||
// for virtual drives (network drives, ramdisk, etc.).
|
||||
if io_err.raw_os_error() == Some(1) && directory_path.exists() {
|
||||
// Should call `std::path::absolute` when it is stabilised.
|
||||
return Ok(directory_path);
|
||||
}
|
||||
}
|
||||
|
||||
Err(OpenDirectoryError::wrap_io_error(io_err, directory_path))
|
||||
})?;
|
||||
if !canonical_path.is_dir() {
|
||||
return Err(OpenDirectoryError::NotADirectory(PathBuf::from(
|
||||
@@ -443,6 +456,16 @@ impl Directory for MmapDirectory {
|
||||
Ok(self.inner.watch(watch_callback))
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn sync_directory(&self) -> Result<(), io::Error> {
|
||||
// On Windows, it is not necessary to fsync the parent directory to
|
||||
// ensure that the directory entry containing the file has also reached
|
||||
// disk, and calling sync_data on a handle to directory is a no-op on
|
||||
// local disks, but will return an error on virtual drives.
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
fn sync_directory(&self) -> Result<(), io::Error> {
|
||||
let mut open_opts = OpenOptions::new();
|
||||
|
||||
@@ -450,19 +473,6 @@ impl Directory for MmapDirectory {
|
||||
// write must not be set, or it fails with EISDIR
|
||||
open_opts.read(true);
|
||||
|
||||
// On Windows, opening a directory requires FILE_FLAG_BACKUP_SEMANTICS
|
||||
// and calling sync_all() only works if write access is requested.
|
||||
#[cfg(windows)]
|
||||
{
|
||||
use std::os::windows::fs::OpenOptionsExt;
|
||||
|
||||
use winapi::um::winbase;
|
||||
|
||||
open_opts
|
||||
.write(true)
|
||||
.custom_flags(winbase::FILE_FLAG_BACKUP_SEMANTICS);
|
||||
}
|
||||
|
||||
let fd = open_opts.open(&self.inner.root_path)?;
|
||||
fd.sync_data()?;
|
||||
Ok(())
|
||||
|
||||
@@ -49,11 +49,6 @@ impl AliveBitSet {
|
||||
Self::open(alive_bitset_bytes)
|
||||
}
|
||||
|
||||
pub(crate) fn from_bitset(bitset: &BitSet) -> AliveBitSet {
|
||||
let readonly_bitset = ReadOnlyBitSet::from(bitset);
|
||||
AliveBitSet::from(readonly_bitset)
|
||||
}
|
||||
|
||||
/// Opens an alive bitset given its file.
|
||||
pub fn open(bytes: OwnedBytes) -> AliveBitSet {
|
||||
let bitset = ReadOnlyBitSet::open(bytes);
|
||||
|
||||
@@ -54,6 +54,7 @@ impl FacetReader {
|
||||
self.facet_column.ords().values(doc)
|
||||
}
|
||||
|
||||
/// Accessor to the facet dictionary.
|
||||
pub fn facet_dict(&self) -> &columnar::Dictionary {
|
||||
self.facet_column.dictionary()
|
||||
}
|
||||
|
||||
@@ -22,14 +22,13 @@
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
pub use columnar::Column;
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
|
||||
pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet};
|
||||
pub use self::error::{FastFieldNotAvailableError, Result};
|
||||
pub use self::facet_reader::FacetReader;
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub use self::writer::FastFieldsWriter;
|
||||
use crate::schema::{Type, Value};
|
||||
use crate::schema::Type;
|
||||
use crate::DateTime;
|
||||
|
||||
mod alive_bitset;
|
||||
@@ -103,43 +102,13 @@ impl FastValue for DateTime {
|
||||
}
|
||||
}
|
||||
|
||||
impl columnar::MonotonicallyMappableToU64 for DateTime {
|
||||
fn to_u64(self) -> u64 {
|
||||
self.timestamp_micros.to_u64()
|
||||
}
|
||||
|
||||
fn from_u64(val: u64) -> Self {
|
||||
DateTime {
|
||||
timestamp_micros: MonotonicallyMappableToU64::from_u64(val),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn unexpected_value(expected: &str, actual: &Value) -> crate::TantivyError {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
"Expected a {:?} in fast field, but got {:?}",
|
||||
expected, actual
|
||||
))
|
||||
}
|
||||
|
||||
fn value_to_u64(value: &Value) -> crate::Result<u64> {
|
||||
let value = match value {
|
||||
Value::U64(val) => val.to_u64(),
|
||||
Value::I64(val) => val.to_u64(),
|
||||
Value::F64(val) => val.to_u64(),
|
||||
Value::Bool(val) => val.to_u64(),
|
||||
Value::Date(val) => val.to_u64(),
|
||||
_ => return Err(unexpected_value("u64/i64/f64/bool/date", value)),
|
||||
};
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use std::ops::RangeInclusive;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::path::Path;
|
||||
|
||||
use columnar::{Column, MonotonicallyMappableToU64};
|
||||
use common::{HasLen, TerminatingWrite};
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::prelude::SliceRandom;
|
||||
@@ -147,7 +116,7 @@ mod tests {
|
||||
use rand::{Rng, SeedableRng};
|
||||
|
||||
use super::*;
|
||||
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
||||
use crate::directory::{Directory, RamDirectory, WritePtr};
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::schema::{
|
||||
Document, Facet, FacetOptions, Field, Schema, SchemaBuilder, FAST, INDEXED, STRING, TEXT,
|
||||
@@ -191,7 +160,10 @@ mod tests {
|
||||
|
||||
assert_eq!(file.len(), 161);
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let column = fast_field_readers.u64("field").unwrap();
|
||||
let column = fast_field_readers
|
||||
.u64("field")
|
||||
.unwrap()
|
||||
.first_or_default_col(0);
|
||||
assert_eq!(column.get_val(0), 13u64);
|
||||
assert_eq!(column.get_val(1), 14u64);
|
||||
assert_eq!(column.get_val(2), 2u64);
|
||||
@@ -238,7 +210,10 @@ mod tests {
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 189);
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let col = fast_field_readers.u64("field").unwrap();
|
||||
let col = fast_field_readers
|
||||
.u64("field")
|
||||
.unwrap()
|
||||
.first_or_default_col(0);
|
||||
assert_eq!(col.get_val(0), 4u64);
|
||||
assert_eq!(col.get_val(1), 14_082_001u64);
|
||||
assert_eq!(col.get_val(2), 3_052u64);
|
||||
@@ -268,7 +243,10 @@ mod tests {
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 162);
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let fast_field_reader = fast_field_readers.u64("field").unwrap();
|
||||
let fast_field_reader = fast_field_readers
|
||||
.u64("field")
|
||||
.unwrap()
|
||||
.first_or_default_col(0);
|
||||
for doc in 0..10_000 {
|
||||
assert_eq!(fast_field_reader.get_val(doc), 100_000u64);
|
||||
}
|
||||
@@ -298,7 +276,10 @@ mod tests {
|
||||
assert_eq!(file.len(), 4557);
|
||||
{
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let col = fast_field_readers.u64("field").unwrap();
|
||||
let col = fast_field_readers
|
||||
.u64("field")
|
||||
.unwrap()
|
||||
.first_or_default_col(0);
|
||||
for doc in 1..10_000 {
|
||||
assert_eq!(col.get_val(doc), 5_000_000_000_000_000_000u64 + doc as u64);
|
||||
}
|
||||
@@ -329,7 +310,10 @@ mod tests {
|
||||
|
||||
{
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let col = fast_field_readers.i64("field").unwrap();
|
||||
let col = fast_field_readers
|
||||
.i64("field")
|
||||
.unwrap()
|
||||
.first_or_default_col(0);
|
||||
assert_eq!(col.min_value(), -100i64);
|
||||
assert_eq!(col.max_value(), 9_999i64);
|
||||
for (doc, i) in (-100i64..10_000i64).enumerate() {
|
||||
@@ -364,7 +348,18 @@ mod tests {
|
||||
let file = directory.open_read(path).unwrap();
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let col = fast_field_readers.i64("field").unwrap();
|
||||
assert_eq!(col.get_val(0), 0i64);
|
||||
assert_eq!(col.first(0), None);
|
||||
|
||||
let col = fast_field_readers
|
||||
.i64("field")
|
||||
.unwrap()
|
||||
.first_or_default_col(0);
|
||||
assert_eq!(col.get_val(0), 0);
|
||||
let col = fast_field_readers
|
||||
.i64("field")
|
||||
.unwrap()
|
||||
.first_or_default_col(-100);
|
||||
assert_eq!(col.get_val(0), -100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -385,8 +380,11 @@ mod tests {
|
||||
|
||||
let file = directory.open_read(path).unwrap();
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let col = fast_field_readers.date("date").unwrap();
|
||||
assert_eq!(col.get_val(0), columnar::DateTime::default());
|
||||
let col = fast_field_readers
|
||||
.date("date")
|
||||
.unwrap()
|
||||
.first_or_default_col(DateTime::default());
|
||||
assert_eq!(col.get_val(0), DateTime::default());
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
@@ -418,7 +416,10 @@ mod tests {
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let col = fast_field_readers.u64("field").unwrap();
|
||||
let col = fast_field_readers
|
||||
.u64("field")
|
||||
.unwrap()
|
||||
.first_or_default_col(0);
|
||||
for a in 0..n {
|
||||
assert_eq!(col.get_val(a as u32), permutation[a]);
|
||||
}
|
||||
@@ -436,39 +437,38 @@ mod tests {
|
||||
test_intfastfield_permutation_with_data(permutation);
|
||||
}
|
||||
|
||||
// TODO reenable when merge is here.
|
||||
// #[test]
|
||||
// fn test_merge_missing_date_fast_field() {
|
||||
// let mut schema_builder = Schema::builder();
|
||||
// let date_field = schema_builder.add_date_field("date", FAST);
|
||||
// let schema = schema_builder.build();
|
||||
// let index = Index::create_in_ram(schema);
|
||||
// let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
// index_writer
|
||||
// .add_document(doc!(date_field =>DateTime::from_utc(OffsetDateTime::now_utc())))
|
||||
// .unwrap();
|
||||
// index_writer.commit().unwrap();
|
||||
// index_writer.add_document(doc!()).unwrap();
|
||||
// index_writer.commit().unwrap();
|
||||
// let reader = index.reader().unwrap();
|
||||
// let segment_ids: Vec<SegmentId> = reader
|
||||
// .searcher()
|
||||
// .segment_readers()
|
||||
// .iter()
|
||||
// .map(SegmentReader::segment_id)
|
||||
// .collect();
|
||||
// assert_eq!(segment_ids.len(), 2);
|
||||
// index_writer.merge(&segment_ids[..]).wait().unwrap();
|
||||
// reader.reload().unwrap();
|
||||
// assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
// }
|
||||
#[test]
|
||||
fn test_merge_missing_date_fast_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer
|
||||
.add_document(doc!(date_field => DateTime::from_utc(OffsetDateTime::now_utc())))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let segment_ids: Vec<SegmentId> = reader
|
||||
.searcher()
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(SegmentReader::segment_id)
|
||||
.collect();
|
||||
assert_eq!(segment_ids.len(), 2);
|
||||
index_writer.merge(&segment_ids[..]).wait().unwrap();
|
||||
reader.reload().unwrap();
|
||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
}
|
||||
|
||||
// fn get_vals_for_docs(column: &columnar::Column<u64>, docs: Range<u32>) -> Vec<u64> {
|
||||
// docs.into_iter()
|
||||
// .flat_map(|doc| column.values(doc))
|
||||
// .collect()
|
||||
// }
|
||||
fn get_vals_for_docs(column: &Column<u64>, docs: Range<u32>) -> Vec<u64> {
|
||||
docs.into_iter()
|
||||
.flat_map(|doc| column.values(doc))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_fastfield() {
|
||||
@@ -524,57 +524,61 @@ mod tests {
|
||||
assert!(str_column.ord_to_str(0, &mut str_term).unwrap());
|
||||
assert_eq!("AAAAA", &str_term);
|
||||
|
||||
// let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
// assert_eq!(inverted_index.terms().num_terms(), 3);
|
||||
// let mut bytes = vec![];
|
||||
// assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
|
||||
// assert_eq!(bytes, "aaaaa".as_bytes());
|
||||
// }
|
||||
let inverted_index = segment_reader.inverted_index(text_field).unwrap();
|
||||
assert_eq!(inverted_index.terms().num_terms(), 3);
|
||||
let mut bytes = vec![];
|
||||
assert!(inverted_index.terms().ord_to_term(0, &mut bytes).unwrap());
|
||||
assert_eq!(bytes, "aaaaa".as_bytes());
|
||||
}
|
||||
|
||||
// {
|
||||
// // second segment
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
{
|
||||
// second segment
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "AAAAA", // term_ord 0
|
||||
// ))?;
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
text_field => "AAAAA", // term_ord 0
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "CCCCC AAAAA", // term_ord 1, after merge 2
|
||||
// ))?;
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
text_field => "CCCCC AAAAA", // term_ord 1, after merge 2
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
// index_writer.add_document(doc!())?;
|
||||
// index_writer.commit()?;
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// assert_eq!(searcher.segment_readers().len(), 2);
|
||||
// let segment_reader = searcher.segment_reader(1);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let text_fast_field = fast_fields.u64s("text").unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 2);
|
||||
let segment_reader = searcher.segment_reader(1);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let text_fast_field = fast_fields.str("text").unwrap().unwrap();
|
||||
|
||||
// assert_eq!(get_vals_for_docs(&text_fast_field, 0..3), vec![0, 1, 0]);
|
||||
assert_eq!(&get_vals_for_docs(&text_fast_field.ords(), 0..2), &[0, 1]);
|
||||
}
|
||||
|
||||
// TODO uncomment once merging is available
|
||||
// Merging the segments
|
||||
// {
|
||||
// let segment_ids = index.searchable_segment_ids()?;
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
// index_writer.merge(&segment_ids).wait()?;
|
||||
// index_writer.wait_merging_threads()?;
|
||||
// }
|
||||
//
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// let segment_reader = searcher.segment_reader(0);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let text_fast_field = fast_fields.u64s("text").unwrap();
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_vals_for_docs(&text_fast_field, 0..8),
|
||||
// vec![1, 0, 0, 0, 1, 3 /* next segment */, 0, 2, 0]
|
||||
// );
|
||||
{
|
||||
let segment_ids = index.searchable_segment_ids().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer.merge(&segment_ids).wait().unwrap();
|
||||
index_writer.wait_merging_threads().unwrap();
|
||||
}
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let text_column = fast_fields.str("text").unwrap().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
get_vals_for_docs(text_column.ords(), 0..8),
|
||||
vec![1, 0, 0, 0, 1, 3 /* next segment */, 0, 2]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -588,11 +592,7 @@ mod tests {
|
||||
writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let str_column = segment_reader
|
||||
.fast_fields()
|
||||
.str_column_opt("text")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let str_column = segment_reader.fast_fields().str("text").unwrap().unwrap();
|
||||
// The string values are not sorted here.
|
||||
let term_ords: Vec<u64> = str_column.term_ords(0u32).collect();
|
||||
assert_eq!(&term_ords, &[1, 0]);
|
||||
@@ -618,155 +618,162 @@ mod tests {
|
||||
assert_eq!(&facet_ords, &[0, 1]);
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_string_fastfield() -> crate::Result<()> {
|
||||
// let mut schema_builder = Schema::builder();
|
||||
// let text_field = schema_builder.add_text_field("text", STRING | FAST);
|
||||
// let schema = schema_builder.build();
|
||||
// let index = Index::create_in_ram(schema);
|
||||
#[test]
|
||||
fn test_string_fastfield() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", STRING | FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
// {
|
||||
// // first segment
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
// index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "BBBBB", // term_ord 1
|
||||
// ))?;
|
||||
// index_writer.add_document(doc!())?;
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "AAAAA", // term_ord 0
|
||||
// ))?;
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "AAAAA", // term_ord 0
|
||||
// ))?;
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "zumberthree", // term_ord 2, after merge term_ord 3
|
||||
// ))?;
|
||||
{
|
||||
// first segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "BBBBB", // term_ord 1
|
||||
))?;
|
||||
index_writer.add_document(doc!())?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "AAAAA", // term_ord 0
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "AAAAA", // term_ord 0
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "zumberthree", // term_ord 2, after merge term_ord 3
|
||||
))?;
|
||||
|
||||
// index_writer.add_document(doc!())?;
|
||||
// index_writer.commit()?;
|
||||
index_writer.add_document(doc!())?;
|
||||
index_writer.commit()?;
|
||||
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// assert_eq!(searcher.segment_readers().len(), 1);
|
||||
// let segment_reader = searcher.segment_reader(0);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let text_col = fast_fields.str("text").unwrap().unwrap();
|
||||
|
||||
// assert_eq!(get_vals_for_docs(&text_fast_field, 0..6), vec![1, 0, 0, 2]);
|
||||
assert_eq!(get_vals_for_docs(&text_col.ords(), 0..6), vec![1, 0, 0, 2]);
|
||||
|
||||
// let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
// assert_eq!(inverted_index.terms().num_terms(), 3);
|
||||
// let mut bytes = vec![];
|
||||
// assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
|
||||
// assert_eq!(bytes, "AAAAA".as_bytes());
|
||||
// }
|
||||
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
assert_eq!(inverted_index.terms().num_terms(), 3);
|
||||
let mut bytes = vec![];
|
||||
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
|
||||
assert_eq!(bytes, "AAAAA".as_bytes());
|
||||
}
|
||||
|
||||
// {
|
||||
// // second segment
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
{
|
||||
// second segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "AAAAA", // term_ord 0
|
||||
// ))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "AAAAA", // term_ord 0
|
||||
))?;
|
||||
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "CCCCC", // term_ord 1, after merge 2
|
||||
// ))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "CCCCC", // term_ord 1, after merge 2
|
||||
))?;
|
||||
|
||||
// index_writer.add_document(doc!())?;
|
||||
// index_writer.commit()?;
|
||||
index_writer.add_document(doc!())?;
|
||||
index_writer.commit()?;
|
||||
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// assert_eq!(searcher.segment_readers().len(), 2);
|
||||
// let segment_reader = searcher.segment_reader(1);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 2);
|
||||
let segment_reader = searcher.segment_reader(1);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let text_fast_field = fast_fields.str("text").unwrap().unwrap();
|
||||
|
||||
// assert_eq!(get_vals_for_docs(&text_fast_field, 0..2), vec![0, 1]);
|
||||
// }
|
||||
// // Merging the segments
|
||||
// {
|
||||
// let segment_ids = index.searchable_segment_ids()?;
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
// index_writer.merge(&segment_ids).wait()?;
|
||||
// index_writer.wait_merging_threads()?;
|
||||
// }
|
||||
assert_eq!(&get_vals_for_docs(text_fast_field.ords(), 0..2), &[0, 1]);
|
||||
}
|
||||
// Merging the segments
|
||||
{
|
||||
let segment_ids = index.searchable_segment_ids()?;
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// let segment_reader = searcher.segment_reader(0);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let text_fast_field = fast_fields.str("text").unwrap().unwrap();
|
||||
|
||||
// assert_eq!(
|
||||
// get_vals_for_docs(&text_fast_field, 0..9),
|
||||
// vec![1, 0, 0, 3 /* next segment */, 0, 2]
|
||||
// );
|
||||
assert_eq!(
|
||||
get_vals_for_docs(&text_fast_field.ords(), 0..9),
|
||||
vec![1, 0, 0, 3 /* next segment */, 0, 2]
|
||||
);
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_datefastfield() -> crate::Result<()> {
|
||||
// let mut schema_builder = Schema::builder();
|
||||
// let date_field = schema_builder.add_date_field(
|
||||
// "date",
|
||||
// DateOptions::from(FAST).set_precision(DatePrecision::Microseconds),
|
||||
// );
|
||||
// let multi_date_field = schema_builder.add_date_field(
|
||||
// "multi_date",
|
||||
// DateOptions::default()
|
||||
// .set_precision(DatePrecision::Microseconds)
|
||||
// .set_fast(),
|
||||
// );
|
||||
// let schema = schema_builder.build();
|
||||
// let index = Index::create_in_ram(schema);
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
// index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
// index_writer.add_document(doc!(
|
||||
// date_field => DateTime::from_u64(1i64.to_u64()),
|
||||
// multi_date_field => DateTime::from_u64(2i64.to_u64()),
|
||||
// multi_date_field => DateTime::from_u64(3i64.to_u64())
|
||||
// ))?;
|
||||
// index_writer.add_document(doc!(
|
||||
// date_field => DateTime::from_u64(4i64.to_u64())
|
||||
// ))?;
|
||||
// index_writer.add_document(doc!(
|
||||
// multi_date_field => DateTime::from_u64(5i64.to_u64()),
|
||||
// multi_date_field => DateTime::from_u64(6i64.to_u64())
|
||||
// ))?;
|
||||
// index_writer.commit()?;
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// assert_eq!(searcher.segment_readers().len(), 1);
|
||||
// let segment_reader = searcher.segment_reader(0);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let date_fast_field = fast_fields.date(date_field).unwrap();
|
||||
// let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
||||
// let mut dates = vec![];
|
||||
// {
|
||||
// assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64);
|
||||
// dates_fast_field.get_vals(0u32, &mut dates);
|
||||
// assert_eq!(dates.len(), 2);
|
||||
// assert_eq!(dates[0].into_timestamp_micros(), 2i64);
|
||||
// assert_eq!(dates[1].into_timestamp_micros(), 3i64);
|
||||
// }
|
||||
// {
|
||||
// assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64);
|
||||
// dates_fast_field.get_vals(1u32, &mut dates);
|
||||
// assert!(dates.is_empty());
|
||||
// }
|
||||
// {
|
||||
// assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64);
|
||||
// dates_fast_field.get_vals(2u32, &mut dates);
|
||||
// assert_eq!(dates.len(), 2);
|
||||
// assert_eq!(dates[0].into_timestamp_micros(), 5i64);
|
||||
// assert_eq!(dates[1].into_timestamp_micros(), 6i64);
|
||||
// }
|
||||
// Ok(())
|
||||
// }
|
||||
#[test]
|
||||
fn test_datefastfield() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field(
|
||||
"date",
|
||||
DateOptions::from(FAST).set_precision(DatePrecision::Microseconds),
|
||||
);
|
||||
let multi_date_field = schema_builder.add_date_field(
|
||||
"multi_date",
|
||||
DateOptions::default()
|
||||
.set_precision(DatePrecision::Microseconds)
|
||||
.set_fast(),
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(
|
||||
date_field => DateTime::from_u64(1i64.to_u64()),
|
||||
multi_date_field => DateTime::from_u64(2i64.to_u64()),
|
||||
multi_date_field => DateTime::from_u64(3i64.to_u64())
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
date_field => DateTime::from_u64(4i64.to_u64())
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
multi_date_field => DateTime::from_u64(5i64.to_u64()),
|
||||
multi_date_field => DateTime::from_u64(6i64.to_u64())
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let date_fast_field = fast_fields
|
||||
.column_opt::<DateTime>("date")
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.first_or_default_col(Default::default());
|
||||
let dates_fast_field = fast_fields
|
||||
.column_opt::<DateTime>("multi_date")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let mut dates = vec![];
|
||||
{
|
||||
assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64);
|
||||
dates_fast_field.fill_vals(0u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].into_timestamp_micros(), 2i64);
|
||||
assert_eq!(dates[1].into_timestamp_micros(), 3i64);
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64);
|
||||
dates_fast_field.fill_vals(1u32, &mut dates);
|
||||
assert!(dates.is_empty());
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64);
|
||||
dates_fast_field.fill_vals(2u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].into_timestamp_micros(), 5i64);
|
||||
assert_eq!(dates[1].into_timestamp_micros(), 6i64);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield_bool_small() {
|
||||
@@ -796,10 +803,10 @@ mod tests {
|
||||
assert_eq!(file.len(), 175);
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let bool_col = fast_field_readers.bool("field_bool").unwrap();
|
||||
assert_eq!(bool_col.get_val(0), true);
|
||||
assert_eq!(bool_col.get_val(1), false);
|
||||
assert_eq!(bool_col.get_val(2), true);
|
||||
assert_eq!(bool_col.get_val(3), false);
|
||||
assert_eq!(bool_col.first(0), Some(true));
|
||||
assert_eq!(bool_col.first(1), Some(false));
|
||||
assert_eq!(bool_col.first(2), Some(true));
|
||||
assert_eq!(bool_col.first(3), Some(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -829,8 +836,8 @@ mod tests {
|
||||
let readers = FastFieldReaders::open(file).unwrap();
|
||||
let bool_col = readers.bool("field_bool").unwrap();
|
||||
for i in 0..25 {
|
||||
assert_eq!(bool_col.get_val(i * 2), true);
|
||||
assert_eq!(bool_col.get_val(i * 2 + 1), false);
|
||||
assert_eq!(bool_col.first(i * 2), Some(true));
|
||||
assert_eq!(bool_col.first(i * 2 + 1), Some(false));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -853,7 +860,17 @@ mod tests {
|
||||
assert_eq!(file.len(), 177);
|
||||
let fastfield_readers = FastFieldReaders::open(file).unwrap();
|
||||
let col = fastfield_readers.bool("field_bool").unwrap();
|
||||
assert_eq!(col.first(0), None);
|
||||
let col = fastfield_readers
|
||||
.bool("field_bool")
|
||||
.unwrap()
|
||||
.first_or_default_col(false);
|
||||
assert_eq!(col.get_val(0), false);
|
||||
let col = fastfield_readers
|
||||
.bool("field_bool")
|
||||
.unwrap()
|
||||
.first_or_default_col(true);
|
||||
assert_eq!(col.get_val(0), true);
|
||||
}
|
||||
|
||||
fn get_index(docs: &[crate::Document], schema: &Schema) -> crate::Result<RamDirectory> {
|
||||
@@ -907,7 +924,7 @@ mod tests {
|
||||
let col = readers.date("field").unwrap();
|
||||
|
||||
for (i, time) in times.iter().enumerate() {
|
||||
let dt: crate::DateTime = col.get_val(i as u32).into();
|
||||
let dt: DateTime = col.first(i as u32).unwrap().into();
|
||||
assert_eq!(dt, time.truncate(precision));
|
||||
}
|
||||
readers.column_num_bytes("field").unwrap()
|
||||
@@ -943,13 +960,17 @@ mod tests {
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment = &searcher.segment_readers()[0];
|
||||
let field = segment.fast_fields().u64("url_norm_hash").unwrap();
|
||||
let field = segment
|
||||
.fast_fields()
|
||||
.u64("url_norm_hash")
|
||||
.unwrap()
|
||||
.first_or_default_col(0);
|
||||
|
||||
let numbers = vec![100, 200, 300];
|
||||
let test_range = |range: RangeInclusive<u64>| {
|
||||
let expexted_count = numbers.iter().filter(|num| range.contains(num)).count();
|
||||
let mut vec = vec![];
|
||||
field.get_docids_for_value_range(range, 0..u32::MAX, &mut vec);
|
||||
field.get_row_ids_for_value_range(range, 0..u32::MAX, &mut vec);
|
||||
assert_eq!(vec.len(), expexted_count);
|
||||
};
|
||||
test_range(50..=50);
|
||||
@@ -976,9 +997,8 @@ mod tests {
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let fastfields = searcher.segment_reader(0u32).fast_fields();
|
||||
let column: columnar::Column<Ipv6Addr> =
|
||||
fastfields.typed_column_opt("ip").unwrap().unwrap();
|
||||
assert_eq!(column.num_rows(), 3);
|
||||
let column: Column<Ipv6Addr> = fastfields.column_opt("ip").unwrap().unwrap();
|
||||
assert_eq!(column.num_docs(), 3);
|
||||
assert_eq!(column.first(0), None);
|
||||
assert_eq!(column.first(1), Some(ip_addr));
|
||||
assert_eq!(column.first(2), None);
|
||||
@@ -1014,13 +1034,17 @@ mod tests {
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment = &searcher.segment_readers()[0];
|
||||
let field = segment.fast_fields().u64("url_norm_hash").unwrap();
|
||||
let field = segment
|
||||
.fast_fields()
|
||||
.u64("url_norm_hash")
|
||||
.unwrap()
|
||||
.first_or_default_col(0);
|
||||
|
||||
let numbers = vec![1000, 1001, 1003];
|
||||
let test_range = |range: RangeInclusive<u64>| {
|
||||
let expexted_count = numbers.iter().filter(|num| range.contains(num)).count();
|
||||
let mut vec = vec![];
|
||||
field.get_docids_for_value_range(range, 0..u32::MAX, &mut vec);
|
||||
field.get_row_ids_for_value_range(range, 0..u32::MAX, &mut vec);
|
||||
assert_eq!(vec.len(), expexted_count);
|
||||
};
|
||||
let test_range_variant = |start, stop| {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user