mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-01 15:02:55 +00:00
Compare commits
12 Commits
typed-colu
...
columnar-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c6c1485abd | ||
|
|
b7bfa20e38 | ||
|
|
db8583db75 | ||
|
|
1390834ae8 | ||
|
|
3ac973bea4 | ||
|
|
405e2cf4d9 | ||
|
|
b63c6c27bc | ||
|
|
bd5eea9852 | ||
|
|
0f20787917 | ||
|
|
2874554ee4 | ||
|
|
cbc70a9eae | ||
|
|
226d0f88bc |
@@ -55,13 +55,12 @@ measure_time = "0.8.2"
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
columnar = { version="0.1", path="./columnar", package ="tantivy-columnar" }
|
||||
sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true }
|
||||
stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" }
|
||||
tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" }
|
||||
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
|
||||
columnar = { version= "0.1", path="./columnar", package="tantivy-columnar" }
|
||||
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
|
||||
fastfield_codecs = { version= "0.3", path="./fastfield_codecs", default-features = false }
|
||||
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
|
||||
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version="0.1", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
@@ -108,7 +107,7 @@ unstable = [] # useful for benches.
|
||||
quickwit = ["sstable"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
|
||||
members = ["query-grammar", "bitpacker", "common", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
# tests that define specific behavior in fail check points
|
||||
|
||||
@@ -19,7 +19,7 @@ impl BitPacker {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn write<TWrite: io::Write>(
|
||||
pub fn write<TWrite: io::Write + ?Sized>(
|
||||
&mut self,
|
||||
val: u64,
|
||||
num_bits: u8,
|
||||
@@ -43,7 +43,7 @@ impl BitPacker {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn flush<TWrite: io::Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
pub fn flush<TWrite: io::Write + ?Sized>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
if self.mini_buffer_written > 0 {
|
||||
let num_bytes = (self.mini_buffer_written + 7) / 8;
|
||||
let bytes = self.mini_buffer.to_le_bytes();
|
||||
@@ -54,7 +54,7 @@ impl BitPacker {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn close<TWrite: io::Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
pub fn close<TWrite: io::Write + ?Sized>(&mut self, output: &mut TWrite) -> io::Result<()> {
|
||||
self.flush(output)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -5,24 +5,23 @@ edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.10.5"
|
||||
log = "0.4.17"
|
||||
fnv = "1.0.7"
|
||||
fastdivide = "0.4.0"
|
||||
rand = { version = "0.8.5", optional = true }
|
||||
measure_time = { version = "0.8.2", optional = true }
|
||||
prettytable-rs = { version = "0.10.0", optional = true }
|
||||
|
||||
stacker = { path = "../stacker", package="tantivy-stacker"}
|
||||
serde_json = "1"
|
||||
thiserror = "1"
|
||||
fnv = "1"
|
||||
sstable = { path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { path = "../common", package = "tantivy-common" }
|
||||
itertools = "0.10"
|
||||
log = "0.4"
|
||||
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
|
||||
prettytable-rs = {version="0.10.0", optional= true}
|
||||
rand = {version="0.8.3", optional= true}
|
||||
fastdivide = "0.4"
|
||||
measure_time = { version="0.8.2", optional=true}
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
more-asserts = "0.3.0"
|
||||
rand = "0.8.3"
|
||||
more-asserts = "0.3.1"
|
||||
rand = "0.8.5"
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
test:
|
||||
echo "Run test only... No examples."
|
||||
cargo test --tests --lib
|
||||
|
||||
fmt:
|
||||
cargo +nightly fmt --all
|
||||
@@ -1,311 +0,0 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::*;
|
||||
use test::Bencher;
|
||||
|
||||
use super::*;
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
fn generate_random() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||
.map(|el| el + random::<u16>() as u64)
|
||||
.collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation_gcd() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
||||
column: &[T],
|
||||
) -> Arc<dyn Column<T>> {
|
||||
let mut buffer = Vec::new();
|
||||
serialize(VecColumn::from(&column), &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
open(OwnedBytes::new(buffer)).unwrap()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = permutation[a as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = column.get_val(a as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
|
||||
const SINGLE_ITEM: u64 = 90;
|
||||
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
|
||||
const ONE_PERCENT_ITEM_RANGE: RangeInclusive<u64> = 49..=49;
|
||||
fn get_data_50percent_item() -> Vec<u128> {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
|
||||
let mut data = vec![];
|
||||
for _ in 0..300_000 {
|
||||
let val = rng.gen_range(1..=100);
|
||||
data.push(val);
|
||||
}
|
||||
data.push(SINGLE_ITEM);
|
||||
|
||||
data.shuffle(&mut rng);
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
}
|
||||
fn get_u128_column_random() -> Arc<dyn Column<u128>> {
|
||||
let permutation = generate_random();
|
||||
let permutation = permutation.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
get_u128_column_from_data(&permutation)
|
||||
}
|
||||
|
||||
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn Column<u128>> {
|
||||
let mut out = vec![];
|
||||
let iter_gen = || data.iter().cloned();
|
||||
serialize_u128(iter_gen, data.len() as u32, &mut out).unwrap();
|
||||
let out = OwnedBytes::new(out);
|
||||
open_u128::<u128>(out).unwrap()
|
||||
}
|
||||
|
||||
// U64 RANGE START
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
FIFTY_PERCENT_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_1percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
ONE_PERCENT_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
SINGLE_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U64 RANGE END
|
||||
|
||||
// U128 RANGE START
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
*SINGLE_ITEM_RANGE.start() as u128..=*SINGLE_ITEM_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U128 RANGE END
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let mut a = 0u128;
|
||||
for i in 0u64..column.num_vals() as u64 {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_stride5_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let n = column.num_vals();
|
||||
let mut a = 0u128;
|
||||
for i in (0..n / 5).map(|val| val * 5) {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += permutation[i as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0u32..n as u32 {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
|
||||
let permutation = generate_permutation_gcd();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..n {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..permutation.len() {
|
||||
a += permutation[i as usize] as u64;
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
}
|
||||
124
columnar/benches/bench_u128.rs
Normal file
124
columnar/benches/bench_u128.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
#![feature(test)]
|
||||
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::{random, Rng, SeedableRng};
|
||||
use tantivy_columnar::ColumnValues;
|
||||
use test::Bencher;
|
||||
extern crate test;
|
||||
|
||||
// TODO does this make sense for IPv6 ?
|
||||
fn generate_random() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||
.map(|el| el + random::<u16>() as u64)
|
||||
.collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
fn get_u128_column_random() -> Arc<dyn ColumnValues<u128>> {
|
||||
let permutation = generate_random();
|
||||
let permutation = permutation.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
get_u128_column_from_data(&permutation)
|
||||
}
|
||||
|
||||
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn ColumnValues<u128>> {
|
||||
let mut out = vec![];
|
||||
tantivy_columnar::column_values::serialize_column_values_u128(&data, &mut out).unwrap();
|
||||
let out = OwnedBytes::new(out);
|
||||
tantivy_columnar::column_values::open_u128_mapped::<u128>(out).unwrap()
|
||||
}
|
||||
|
||||
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
|
||||
const SINGLE_ITEM: u64 = 90;
|
||||
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
|
||||
|
||||
fn get_data_50percent_item() -> Vec<u128> {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
|
||||
let mut data = vec![];
|
||||
for _ in 0..300_000 {
|
||||
let val = rng.gen_range(1..=100);
|
||||
data.push(val);
|
||||
}
|
||||
data.push(SINGLE_ITEM);
|
||||
data.shuffle(&mut rng);
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
*SINGLE_ITEM_RANGE.start() as u128..=*SINGLE_ITEM_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U128 RANGE END
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let mut a = 0u128;
|
||||
for i in 0u64..column.num_vals() as u64 {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_stride5_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let n = column.num_vals();
|
||||
let mut a = 0u128;
|
||||
for i in (0..n / 5).map(|val| val * 5) {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
211
columnar/benches/bench_u64.rs
Normal file
211
columnar/benches/bench_u64.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::column_values::{serialize_and_load_u64_based_column_values, CodecType};
|
||||
use tantivy_columnar::*;
|
||||
use test::Bencher;
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
fn generate_random() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||
.map(|el| el + random::<u16>() as u64)
|
||||
.collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation_gcd() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
pub fn serialize_and_load(column: &[u64], codec_type: CodecType) -> Arc<dyn ColumnValues<u64>> {
|
||||
serialize_and_load_u64_based_column_values(&column, &[codec_type])
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = permutation[a as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_fflookup_bitpacked(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = column.get_val(a as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
|
||||
const SINGLE_ITEM: u64 = 90;
|
||||
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
|
||||
const ONE_PERCENT_ITEM_RANGE: RangeInclusive<u64> = 49..=49;
|
||||
fn get_data_50percent_item() -> Vec<u128> {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
|
||||
let mut data = vec![];
|
||||
for _ in 0..300_000 {
|
||||
let val = rng.gen_range(1..=100);
|
||||
data.push(val);
|
||||
}
|
||||
data.push(SINGLE_ITEM);
|
||||
|
||||
data.shuffle(&mut rng);
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
}
|
||||
|
||||
// U64 RANGE START
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
FIFTY_PERCENT_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_1percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
ONE_PERCENT_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(SINGLE_ITEM_RANGE, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&data, CodecType::Bitpacked);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U64 RANGE END
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += permutation[i as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
let column_ref = column.as_ref();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0u32..n as u32 {
|
||||
a += column_ref.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
|
||||
let permutation = generate_permutation_gcd();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn ColumnValues<u64>> = serialize_and_load(&permutation, CodecType::Bitpacked);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..n {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..permutation.len() {
|
||||
a += permutation[i as usize] as u64;
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
17
columnar/columnar-cli/Cargo.toml
Normal file
17
columnar/columnar-cli/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "tantivy-columnar-cli"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
columnar = {path="../", package="tantivy-columnar"}
|
||||
serde_json = "1"
|
||||
serde_json_borrow = {git="https://github.com/PSeitz/serde_json_borrow/"}
|
||||
serde = "1"
|
||||
|
||||
[workspace]
|
||||
members = []
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
134
columnar/columnar-cli/src/main.rs
Normal file
134
columnar/columnar-cli/src/main.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
use columnar::ColumnarWriter;
|
||||
use columnar::NumericalValue;
|
||||
use serde_json_borrow;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::BufRead;
|
||||
use std::io::BufReader;
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Default)]
|
||||
struct JsonStack {
|
||||
path: String,
|
||||
stack: Vec<usize>,
|
||||
}
|
||||
|
||||
impl JsonStack {
|
||||
fn push(&mut self, seg: &str) {
|
||||
let len = self.path.len();
|
||||
self.stack.push(len);
|
||||
self.path.push('.');
|
||||
self.path.push_str(seg);
|
||||
}
|
||||
|
||||
fn pop(&mut self) {
|
||||
if let Some(len) = self.stack.pop() {
|
||||
self.path.truncate(len);
|
||||
}
|
||||
}
|
||||
|
||||
fn path(&self) -> &str {
|
||||
&self.path[1..]
|
||||
}
|
||||
}
|
||||
|
||||
fn append_json_to_columnar(
|
||||
doc: u32,
|
||||
json_value: &serde_json_borrow::Value,
|
||||
columnar: &mut ColumnarWriter,
|
||||
stack: &mut JsonStack,
|
||||
) -> usize {
|
||||
let mut count = 0;
|
||||
match json_value {
|
||||
serde_json_borrow::Value::Null => {}
|
||||
serde_json_borrow::Value::Bool(val) => {
|
||||
columnar.record_numerical(
|
||||
doc,
|
||||
stack.path(),
|
||||
NumericalValue::from(if *val { 1u64 } else { 0u64 }),
|
||||
);
|
||||
count += 1;
|
||||
}
|
||||
serde_json_borrow::Value::Number(num) => {
|
||||
let numerical_value: NumericalValue = if let Some(num_i64) = num.as_i64() {
|
||||
num_i64.into()
|
||||
} else if let Some(num_u64) = num.as_u64() {
|
||||
num_u64.into()
|
||||
} else if let Some(num_f64) = num.as_f64() {
|
||||
num_f64.into()
|
||||
} else {
|
||||
panic!();
|
||||
};
|
||||
count += 1;
|
||||
columnar.record_numerical(
|
||||
doc,
|
||||
stack.path(),
|
||||
numerical_value,
|
||||
);
|
||||
}
|
||||
serde_json_borrow::Value::Str(msg) => {
|
||||
columnar.record_str(
|
||||
doc,
|
||||
stack.path(),
|
||||
msg,
|
||||
);
|
||||
count += 1;
|
||||
},
|
||||
serde_json_borrow::Value::Array(vals) => {
|
||||
for val in vals {
|
||||
count += append_json_to_columnar(doc, val, columnar, stack);
|
||||
}
|
||||
},
|
||||
serde_json_borrow::Value::Object(json_map) => {
|
||||
for (child_key, child_val) in json_map {
|
||||
stack.push(child_key);
|
||||
count += append_json_to_columnar(doc, child_val, columnar, stack);
|
||||
stack.pop();
|
||||
}
|
||||
},
|
||||
}
|
||||
count
|
||||
}
|
||||
|
||||
fn main() -> io::Result<()> {
|
||||
let file = File::open("gh_small.json")?;
|
||||
let mut reader = BufReader::new(file);
|
||||
let mut line = String::with_capacity(100);
|
||||
let mut columnar = columnar::ColumnarWriter::default();
|
||||
let mut doc = 0;
|
||||
let start = Instant::now();
|
||||
let mut stack = JsonStack::default();
|
||||
let mut total_count = 0;
|
||||
|
||||
let start_build = Instant::now();
|
||||
loop {
|
||||
line.clear();
|
||||
let len = reader.read_line(&mut line)?;
|
||||
if len == 0 {
|
||||
break;
|
||||
}
|
||||
let Ok(json_value) = serde_json::from_str::<serde_json_borrow::Value>(&line) else { continue; };
|
||||
total_count += append_json_to_columnar(doc, &json_value, &mut columnar, &mut stack);
|
||||
doc += 1;
|
||||
}
|
||||
println!("Build in {:?}", start_build.elapsed());
|
||||
|
||||
println!("value count {total_count}");
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let start_serialize = Instant::now();
|
||||
columnar.serialize(doc, None, &mut buffer)?;
|
||||
println!("Serialized in {:?}", start_serialize.elapsed());
|
||||
println!("num docs: {doc}, {:?}", start.elapsed());
|
||||
println!("buffer len {} MB", buffer.len() / 1_000_000);
|
||||
let columnar = columnar::ColumnarReader::open(buffer)?;
|
||||
for (column_name, dynamic_column) in columnar.list_columns()? {
|
||||
let num_bytes = dynamic_column.num_bytes();
|
||||
let typ = dynamic_column.column_type();
|
||||
if num_bytes > 1_000_000 {
|
||||
println!("{column_name} {typ:?} {} KB", num_bytes / 1_000);
|
||||
}
|
||||
}
|
||||
println!("{} columns", columnar.num_columns());
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,25 +1,22 @@
|
||||
# zero to one
|
||||
* merges
|
||||
* full still needs a num_values
|
||||
* replug u128
|
||||
* add dictionary encoded stuff
|
||||
* fix multivalued
|
||||
* find a way to make columnar work with strict types
|
||||
* plug to tantivy
|
||||
- indexing
|
||||
- aggregations
|
||||
- merge
|
||||
* replug facets
|
||||
* replug range queries
|
||||
+ mutlivaued range queries restrat frm the beginning all of the time.
|
||||
|
||||
* revisit line codec
|
||||
* removal of all rows of a column in the schema due to deletes
|
||||
* add columns from schema on merge
|
||||
* Plugging JSON
|
||||
* replug examples
|
||||
* move datetime to quickwit common
|
||||
* switch to nanos
|
||||
* reintroduce the gcd map.
|
||||
|
||||
# Perf and Size
|
||||
* remove alloc in `ord_to_term`
|
||||
+ multivaued range queries restrat frm the beginning all of the time.
|
||||
* re-add ZSTD compression for dictionaries
|
||||
no systematic monotonic mapping
|
||||
consider removing multilinear
|
||||
f32?
|
||||
adhoc solution for bool?
|
||||
|
||||
add metrics helper for aggregate. sum(row_id)
|
||||
review inline absence/presence
|
||||
improv perf of select using PDEP
|
||||
@@ -29,7 +26,6 @@ Add alignment?
|
||||
Consider another codec to bridge the gap between few and 5k elements
|
||||
|
||||
# Cleanup and rationalization
|
||||
remove the 6 bit limitation of columntype. use 4 + 4 bits instead.
|
||||
in benchmark, unify percent vs ratio, f32 vs f64.
|
||||
investigate if should have better errors? io::Error is overused at the moment.
|
||||
rename rank/select in unit tests
|
||||
@@ -40,16 +36,13 @@ use the rank & select naming in unit tests branch.
|
||||
multi-linear -> blockwise
|
||||
linear codec -> simply a multiplication for the index column
|
||||
rename columnar to something more explicit, like column_dictionary or columnar_table
|
||||
remove old column from the fast field API.
|
||||
remove the Column traits alias.
|
||||
rename fastfield -> column
|
||||
document changes
|
||||
rationalization FastFieldValue, HasColumnType
|
||||
|
||||
isolate u128_based and uniform naming
|
||||
|
||||
# Other
|
||||
fix enhance column-cli
|
||||
|
||||
# Santa claus
|
||||
|
||||
autodetect datetime ipaddr, plug customizable tokenizer.
|
||||
|
||||
@@ -56,18 +56,28 @@ impl BytesColumn {
|
||||
#[derive(Clone)]
|
||||
pub struct StrColumn(BytesColumn);
|
||||
|
||||
impl From<BytesColumn> for StrColumn {
|
||||
fn from(bytes_col: BytesColumn) -> Self {
|
||||
StrColumn(bytes_col)
|
||||
impl From<StrColumn> for BytesColumn {
|
||||
fn from(str_column: StrColumn) -> BytesColumn {
|
||||
str_column.0
|
||||
}
|
||||
}
|
||||
|
||||
impl StrColumn {
|
||||
pub(crate) fn wrap(bytes_column: BytesColumn) -> StrColumn {
|
||||
StrColumn(bytes_column)
|
||||
}
|
||||
|
||||
pub fn dictionary(&self) -> &Dictionary<VoidSSTable> {
|
||||
self.0.dictionary.as_ref()
|
||||
}
|
||||
|
||||
/// Fills the buffer
|
||||
pub fn ord_to_str(&self, term_ord: u64, output: &mut String) -> io::Result<bool> {
|
||||
unsafe {
|
||||
let buf = output.as_mut_vec();
|
||||
self.0.dictionary.ord_to_term(term_ord, buf)?;
|
||||
if !self.0.dictionary.ord_to_term(term_ord, buf)? {
|
||||
return Ok(false);
|
||||
}
|
||||
// TODO consider remove checks if it hurts performance.
|
||||
if std::str::from_utf8(buf.as_slice()).is_err() {
|
||||
buf.clear();
|
||||
|
||||
@@ -2,27 +2,46 @@ mod dictionary_encoded;
|
||||
mod serialize;
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::io::Write;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
pub use dictionary_encoded::{BytesColumn, StrColumn};
|
||||
pub use serialize::{
|
||||
open_column_bytes, open_column_u128, open_column_u64, serialize_column_mappable_to_u128,
|
||||
serialize_column_mappable_to_u64,
|
||||
open_column_bytes, open_column_str, open_column_u128, open_column_u64,
|
||||
serialize_column_mappable_to_u128, serialize_column_mappable_to_u64,
|
||||
};
|
||||
|
||||
use crate::column_index::ColumnIndex;
|
||||
use crate::column_values::ColumnValues;
|
||||
use crate::{Cardinality, RowId};
|
||||
use crate::column_values::monotonic_mapping::StrictlyMonotonicMappingToInternal;
|
||||
use crate::column_values::{monotonic_map_column, ColumnValues};
|
||||
use crate::{Cardinality, MonotonicallyMappableToU64, RowId};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Column<T> {
|
||||
pub struct Column<T = u64> {
|
||||
pub idx: ColumnIndex,
|
||||
pub values: Arc<dyn ColumnValues<T>>,
|
||||
}
|
||||
|
||||
impl<T: MonotonicallyMappableToU64> Column<T> {
|
||||
pub fn to_u64_monotonic(self) -> Column<u64> {
|
||||
let values = Arc::new(monotonic_map_column(
|
||||
self.values,
|
||||
StrictlyMonotonicMappingToInternal::<T>::new(),
|
||||
));
|
||||
Column {
|
||||
idx: self.idx,
|
||||
values,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
self.idx.get_cardinality()
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
match &self.idx {
|
||||
ColumnIndex::Full => self.values.num_vals() as u32,
|
||||
@@ -52,6 +71,15 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
|
||||
}
|
||||
|
||||
/// Fils the output vector with the (possibly multiple values that are associated_with
|
||||
/// `row_id`.
|
||||
///
|
||||
/// This method clears the `output` vector.
|
||||
pub fn fill_vals(&self, row_id: RowId, output: &mut Vec<T>) {
|
||||
output.clear();
|
||||
output.extend(self.values(row_id));
|
||||
}
|
||||
|
||||
pub fn first_or_default_col(self, default_value: T) -> Arc<dyn ColumnValues<T>> {
|
||||
Arc::new(FirstValueWithDefault {
|
||||
column: self,
|
||||
@@ -69,7 +97,7 @@ impl<T> Deref for Column<T> {
|
||||
}
|
||||
|
||||
impl BinarySerializable for Cardinality {
|
||||
fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
self.to_code().serialize(writer)
|
||||
}
|
||||
|
||||
@@ -105,7 +133,7 @@ impl<T: PartialOrd + Debug + Send + Sync + Copy + 'static> ColumnValues<T>
|
||||
match &self.column.idx {
|
||||
ColumnIndex::Full => self.column.values.num_vals(),
|
||||
ColumnIndex::Optional(optional_idx) => optional_idx.num_rows(),
|
||||
ColumnIndex::Multivalued(_) => todo!(),
|
||||
ColumnIndex::Multivalued(multivalue_idx) => multivalue_idx.num_rows(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::fmt::Debug;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
@@ -9,43 +8,31 @@ use sstable::Dictionary;
|
||||
use crate::column::{BytesColumn, Column};
|
||||
use crate::column_index::{serialize_column_index, SerializableColumnIndex};
|
||||
use crate::column_values::serialize::serialize_column_values_u128;
|
||||
use crate::column_values::{
|
||||
serialize_column_values, ColumnValues, FastFieldCodecType, MonotonicallyMappableToU128,
|
||||
MonotonicallyMappableToU64,
|
||||
};
|
||||
use crate::column_values::u64_based::{serialize_u64_based_column_values, CodecType};
|
||||
use crate::column_values::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::StrColumn;
|
||||
|
||||
pub fn serialize_column_mappable_to_u128<
|
||||
F: Fn() -> I,
|
||||
I: Iterator<Item = T>,
|
||||
T: MonotonicallyMappableToU128,
|
||||
>(
|
||||
pub fn serialize_column_mappable_to_u128<T: MonotonicallyMappableToU128>(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
column_values: F,
|
||||
num_vals: u32,
|
||||
iterable: &dyn Iterable<T>,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
let column_index_num_bytes = serialize_column_index(column_index, output)?;
|
||||
serialize_column_values_u128(
|
||||
|| column_values().map(|val| val.to_u128()),
|
||||
num_vals,
|
||||
output,
|
||||
)?;
|
||||
serialize_column_values_u128(iterable, output)?;
|
||||
output.write_all(&column_index_num_bytes.to_le_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64 + Debug>(
|
||||
pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64>(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
column_values: &impl ColumnValues<T>,
|
||||
column_values: &impl Iterable<T>,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
let column_index_num_bytes = serialize_column_index(column_index, output)?;
|
||||
serialize_column_values(
|
||||
serialize_u64_based_column_values(
|
||||
column_values,
|
||||
&[
|
||||
FastFieldCodecType::Bitpacked,
|
||||
FastFieldCodecType::BlockwiseLinear,
|
||||
],
|
||||
&[CodecType::Bitpacked, CodecType::BlockwiseLinear],
|
||||
output,
|
||||
)?;
|
||||
output.write_all(&column_index_num_bytes.to_le_bytes())?;
|
||||
@@ -62,7 +49,8 @@ pub fn open_column_u64<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::
|
||||
);
|
||||
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
||||
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
||||
let column_values = crate::column_values::open_u64_mapped(column_values_data)?;
|
||||
let column_values =
|
||||
crate::column_values::u64_based::load_u64_based_column_values(column_values_data)?;
|
||||
Ok(Column {
|
||||
idx: column_index,
|
||||
values: column_values,
|
||||
@@ -88,15 +76,19 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_bytes<T: From<BytesColumn>>(data: OwnedBytes) -> io::Result<T> {
|
||||
pub fn open_column_bytes(data: OwnedBytes) -> io::Result<BytesColumn> {
|
||||
let (body, dictionary_len_bytes) = data.rsplit(4);
|
||||
let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap());
|
||||
let (dictionary_bytes, column_bytes) = body.split(dictionary_len as usize);
|
||||
let dictionary = Arc::new(Dictionary::from_bytes(dictionary_bytes)?);
|
||||
let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes)?;
|
||||
let bytes_column = BytesColumn {
|
||||
Ok(BytesColumn {
|
||||
dictionary,
|
||||
term_ord_column,
|
||||
};
|
||||
Ok(bytes_column.into())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_str(data: OwnedBytes) -> io::Result<StrColumn> {
|
||||
let bytes_column = open_column_bytes(data)?;
|
||||
Ok(StrColumn::wrap(bytes_column))
|
||||
}
|
||||
|
||||
136
columnar/src/column_index/merge/mod.rs
Normal file
136
columnar/src/column_index/merge/mod.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
mod shuffled;
|
||||
mod stacked;
|
||||
|
||||
use shuffled::merge_column_index_shuffled;
|
||||
use stacked::merge_column_index_stacked;
|
||||
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::{Cardinality, ColumnIndex, MergeRowOrder};
|
||||
|
||||
// For simplification, we never have cardinality go down due to deletes.
|
||||
fn detect_cardinality(columns: &[Option<ColumnIndex>]) -> Cardinality {
|
||||
columns
|
||||
.iter()
|
||||
.flatten()
|
||||
.map(ColumnIndex::get_cardinality)
|
||||
.max()
|
||||
.unwrap_or(Cardinality::Full)
|
||||
}
|
||||
|
||||
pub fn merge_column_index<'a>(
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
merge_row_order: &'a MergeRowOrder,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
// For simplification, we do not try to detect whether the cardinality could be
|
||||
// downgraded thanks to deletes.
|
||||
let cardinality_after_merge = detect_cardinality(columns);
|
||||
match merge_row_order {
|
||||
MergeRowOrder::Stack(stack_merge_order) => {
|
||||
merge_column_index_stacked(columns, cardinality_after_merge, stack_merge_order)
|
||||
}
|
||||
MergeRowOrder::Shuffled(complex_merge_order) => {
|
||||
merge_column_index_shuffled(columns, cardinality_after_merge, complex_merge_order)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO actually, the shuffled code path is a bit too general.
|
||||
// In practise, we do not really shuffle everything.
|
||||
// The merge order restricted to a specific column keeps the original row order.
|
||||
//
|
||||
// This may offer some optimization that we have not explored yet.
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::column_index::merge::detect_cardinality;
|
||||
use crate::column_index::multivalued_index::MultiValueIndex;
|
||||
use crate::column_index::{merge_column_index, OptionalIndex, SerializableColumnIndex};
|
||||
use crate::{Cardinality, ColumnIndex, MergeRowOrder, RowAddr, RowId, ShuffleMergeOrder};
|
||||
|
||||
#[test]
|
||||
fn test_detect_cardinality() {
|
||||
assert_eq!(detect_cardinality(&[]), Cardinality::Full);
|
||||
let optional_index: ColumnIndex = OptionalIndex::for_test(1, &[]).into();
|
||||
let multivalued_index: ColumnIndex = MultiValueIndex::for_test(&[0, 1]).into();
|
||||
assert_eq!(
|
||||
detect_cardinality(&[Some(optional_index.clone()), None]),
|
||||
Cardinality::Optional
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[Some(optional_index.clone()), Some(ColumnIndex::Full)]),
|
||||
Cardinality::Optional
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[Some(multivalued_index.clone()), None]),
|
||||
Cardinality::Multivalued
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[
|
||||
Some(multivalued_index.clone()),
|
||||
Some(optional_index.clone())
|
||||
]),
|
||||
Cardinality::Multivalued
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[Some(optional_index), Some(multivalued_index)]),
|
||||
Cardinality::Multivalued
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_index_multivalued_sorted() {
|
||||
let column_indexes: Vec<Option<ColumnIndex>> =
|
||||
vec![Some(MultiValueIndex::for_test(&[0, 2, 5]).into())];
|
||||
let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test(
|
||||
&[2],
|
||||
vec![
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
row_id: 1u32,
|
||||
},
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
row_id: 0u32,
|
||||
},
|
||||
],
|
||||
)
|
||||
.into();
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
|
||||
else { panic!("Excpected a multivalued index") };
|
||||
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
|
||||
assert_eq!(&start_indexes, &[0, 3, 5]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_index_multivalued_sorted_several_segment() {
|
||||
let column_indexes: Vec<Option<ColumnIndex>> = vec![
|
||||
Some(MultiValueIndex::for_test(&[0, 2, 5]).into()),
|
||||
None,
|
||||
Some(MultiValueIndex::for_test(&[0, 1, 4]).into()),
|
||||
];
|
||||
let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test(
|
||||
&[2, 0, 2],
|
||||
vec![
|
||||
RowAddr {
|
||||
segment_ord: 2u32,
|
||||
row_id: 1u32,
|
||||
},
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
row_id: 0u32,
|
||||
},
|
||||
RowAddr {
|
||||
segment_ord: 2u32,
|
||||
row_id: 0u32,
|
||||
},
|
||||
],
|
||||
)
|
||||
.into();
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
|
||||
else { panic!("Excpected a multivalued index") };
|
||||
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
|
||||
assert_eq!(&start_indexes, &[0, 3, 5, 6]);
|
||||
}
|
||||
}
|
||||
171
columnar/src/column_index/merge/shuffled.rs
Normal file
171
columnar/src/column_index/merge/shuffled.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use std::iter;
|
||||
|
||||
use crate::column_index::{SerializableColumnIndex, Set};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, ColumnIndex, RowId, ShuffleMergeOrder};
|
||||
|
||||
pub fn merge_column_index_shuffled<'a>(
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
cardinality_after_merge: Cardinality,
|
||||
shuffle_merge_order: &'a ShuffleMergeOrder,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
match cardinality_after_merge {
|
||||
Cardinality::Full => SerializableColumnIndex::Full,
|
||||
Cardinality::Optional => {
|
||||
let non_null_row_ids =
|
||||
merge_column_index_shuffled_optional(column_indexes, shuffle_merge_order);
|
||||
SerializableColumnIndex::Optional {
|
||||
non_null_row_ids,
|
||||
num_rows: shuffle_merge_order.num_rows(),
|
||||
}
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalue_start_index =
|
||||
merge_column_index_shuffled_multivalued(column_indexes, shuffle_merge_order);
|
||||
SerializableColumnIndex::Multivalued(multivalue_start_index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge several column indexes into one, ordering rows according to the merge_order passed as
|
||||
/// argument. While it is true that the `merge_order` may imply deletes and hence could in theory a
|
||||
/// multivalued index into an optional one, this is not supported today for simplification.
|
||||
///
|
||||
/// In other words the column_indexes passed as argument may NOT be multivalued.
|
||||
fn merge_column_index_shuffled_optional<'a>(
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
) -> Box<dyn Iterable<RowId> + 'a> {
|
||||
Box::new(ShuffledOptionalIndex {
|
||||
column_indexes,
|
||||
merge_order,
|
||||
})
|
||||
}
|
||||
|
||||
struct ShuffledOptionalIndex<'a> {
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
}
|
||||
|
||||
impl<'a> Iterable<u32> for ShuffledOptionalIndex<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
Box::new(self.merge_order
|
||||
.iter_new_to_old_row_addrs()
|
||||
.enumerate()
|
||||
.filter_map(|(new_row_id, old_row_addr)| {
|
||||
let Some(column_index) = &self.column_indexes[old_row_addr.segment_ord as usize] else {
|
||||
return None;
|
||||
};
|
||||
let row_id = new_row_id as u32;
|
||||
if column_index.has_value(old_row_addr.row_id) {
|
||||
Some(row_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
fn merge_column_index_shuffled_multivalued<'a>(
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
) -> Box<dyn Iterable<RowId> + 'a> {
|
||||
Box::new(ShuffledMultivaluedIndex {
|
||||
column_indexes,
|
||||
merge_order,
|
||||
})
|
||||
}
|
||||
|
||||
struct ShuffledMultivaluedIndex<'a> {
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
}
|
||||
|
||||
fn iter_num_values<'a>(
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
) -> impl Iterator<Item = u32> + 'a {
|
||||
merge_order.iter_new_to_old_row_addrs().map(|row_addr| {
|
||||
let Some(column_index) = &column_indexes[row_addr.segment_ord as usize] else {
|
||||
// No values in the entire column. It surely means there are 0 values associated to this row.
|
||||
return 0u32;
|
||||
};
|
||||
match column_index {
|
||||
ColumnIndex::Full => 1,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
if optional_index.contains(row_addr.row_id) {
|
||||
1u32
|
||||
} else {
|
||||
0u32
|
||||
}
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
multivalued_index.range(row_addr.row_id).len() as u32
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Transforms an iterator containing the number of vals per row (with `num_rows` elements)
|
||||
/// into a `start_offset` iterator starting at 0 and (with `num_rows + 1` element)
|
||||
fn integrate_num_vals(num_vals: impl Iterator<Item = u32>) -> impl Iterator<Item = RowId> {
|
||||
iter::once(0u32).chain(num_vals.scan(0, |state, num_vals| {
|
||||
*state += num_vals;
|
||||
Some(*state)
|
||||
}))
|
||||
}
|
||||
|
||||
impl<'a> Iterable<u32> for ShuffledMultivaluedIndex<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
let num_vals_per_row = iter_num_values(self.column_indexes, self.merge_order);
|
||||
Box::new(integrate_num_vals(num_vals_per_row))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_index::OptionalIndex;
|
||||
use crate::RowAddr;
|
||||
|
||||
#[test]
|
||||
fn test_integrate_num_vals_empty() {
|
||||
assert!(integrate_num_vals(iter::empty()).eq(iter::once(0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_integrate_num_vals_one_el() {
|
||||
assert!(integrate_num_vals(iter::once(10)).eq([0, 10].into_iter()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_integrate_num_vals_several() {
|
||||
assert!(integrate_num_vals([3, 0, 10, 20].into_iter()).eq([0, 3, 3, 13, 33].into_iter()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_column_index_optional_shuffle() {
|
||||
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
|
||||
let column_indexes = vec![Some(optional_index), Some(ColumnIndex::Full)];
|
||||
let row_addrs = vec![
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
row_id: 1u32,
|
||||
},
|
||||
RowAddr {
|
||||
segment_ord: 1u32,
|
||||
row_id: 0u32,
|
||||
},
|
||||
];
|
||||
let shuffle_merge_order = ShuffleMergeOrder::for_test(&[2, 1], row_addrs);
|
||||
let serializable_index = merge_column_index_shuffled(
|
||||
&column_indexes[..],
|
||||
Cardinality::Optional,
|
||||
&shuffle_merge_order,
|
||||
);
|
||||
let SerializableColumnIndex::Optional { non_null_row_ids, num_rows } = serializable_index else { panic!() };
|
||||
assert_eq!(num_rows, 2);
|
||||
let non_null_rows: Vec<RowId> = non_null_row_ids.boxed_iter().collect();
|
||||
assert_eq!(&non_null_rows, &[1]);
|
||||
}
|
||||
}
|
||||
154
columnar/src/column_index/merge/stacked.rs
Normal file
154
columnar/src/column_index/merge/stacked.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
use std::iter;
|
||||
|
||||
use crate::column_index::{SerializableColumnIndex, Set};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, ColumnIndex, RowId, StackMergeOrder};
|
||||
|
||||
/// Simple case:
|
||||
/// The new mapping just consists in stacking the different column indexes.
|
||||
///
|
||||
/// There are no sort nor deletes involved.
|
||||
pub fn merge_column_index_stacked<'a>(
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
cardinality_after_merge: Cardinality,
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
match cardinality_after_merge {
|
||||
Cardinality::Full => SerializableColumnIndex::Full,
|
||||
Cardinality::Optional => SerializableColumnIndex::Optional {
|
||||
non_null_row_ids: Box::new(StackedOptionalIndex {
|
||||
columns,
|
||||
stack_merge_order,
|
||||
}),
|
||||
num_rows: stack_merge_order.num_rows(),
|
||||
},
|
||||
Cardinality::Multivalued => {
|
||||
let stacked_multivalued_index = StackedMultivaluedIndex {
|
||||
columns,
|
||||
stack_merge_order,
|
||||
};
|
||||
SerializableColumnIndex::Multivalued(Box::new(stacked_multivalued_index))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct StackedOptionalIndex<'a> {
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
}
|
||||
|
||||
impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
Box::new(
|
||||
self.columns
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(columnar_id, column_index_opt)| {
|
||||
let columnar_row_range = self.stack_merge_order.columnar_range(columnar_id);
|
||||
let rows_it: Box<dyn Iterator<Item = RowId>> = match column_index_opt {
|
||||
Some(ColumnIndex::Full) => Box::new(columnar_row_range),
|
||||
Some(ColumnIndex::Optional(optional_index)) => Box::new(
|
||||
optional_index
|
||||
.iter_rows()
|
||||
.map(move |row_id: RowId| columnar_row_range.start + row_id),
|
||||
),
|
||||
Some(ColumnIndex::Multivalued(_)) => {
|
||||
panic!("No multivalued index is allowed when stacking column index");
|
||||
}
|
||||
None => Box::new(std::iter::empty()),
|
||||
};
|
||||
rows_it
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct StackedMultivaluedIndex<'a> {
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
}
|
||||
|
||||
fn convert_column_opt_to_multivalued_index<'a>(
|
||||
column_index_opt: Option<&'a ColumnIndex>,
|
||||
num_rows: RowId,
|
||||
) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
match column_index_opt {
|
||||
None => Box::new(iter::repeat(0u32).take(num_rows as usize + 1)),
|
||||
Some(ColumnIndex::Full) => Box::new(0..num_rows + 1),
|
||||
Some(ColumnIndex::Optional(optional_index)) => {
|
||||
Box::new(
|
||||
(0..num_rows)
|
||||
// TODO optimize
|
||||
.map(|row_id| optional_index.rank(row_id))
|
||||
.chain(std::iter::once(optional_index.num_non_nulls())),
|
||||
)
|
||||
}
|
||||
Some(ColumnIndex::Multivalued(multivalued_index)) => {
|
||||
multivalued_index.start_index_column.iter()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterable<RowId> for StackedMultivaluedIndex<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = RowId> + '_> {
|
||||
let multivalued_indexes =
|
||||
self.columns
|
||||
.iter()
|
||||
.map(Option::as_ref)
|
||||
.enumerate()
|
||||
.map(|(columnar_id, column_opt)| {
|
||||
let num_rows =
|
||||
self.stack_merge_order.columnar_range(columnar_id).len() as RowId;
|
||||
convert_column_opt_to_multivalued_index(column_opt, num_rows)
|
||||
});
|
||||
stack_multivalued_indexes(multivalued_indexes)
|
||||
}
|
||||
}
|
||||
|
||||
// Refactor me
|
||||
fn stack_multivalued_indexes<'a>(
|
||||
mut multivalued_indexes: impl Iterator<Item = Box<dyn Iterator<Item = RowId> + 'a>> + 'a,
|
||||
) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
let mut offset = 0;
|
||||
let mut last_row_id = 0;
|
||||
let mut current_it = multivalued_indexes.next();
|
||||
Box::new(std::iter::from_fn(move || loop {
|
||||
let Some(multivalued_index) = current_it.as_mut() else {
|
||||
return None;
|
||||
};
|
||||
if let Some(row_id) = multivalued_index.next() {
|
||||
last_row_id = offset + row_id;
|
||||
return Some(last_row_id);
|
||||
}
|
||||
offset = last_row_id;
|
||||
loop {
|
||||
current_it = multivalued_indexes.next();
|
||||
if current_it.as_mut()?.next().is_some() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::RowId;
|
||||
|
||||
fn it<'a>(row_ids: &'a [RowId]) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
Box::new(row_ids.iter().copied())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stack() {
|
||||
let columns = [
|
||||
it(&[0u32, 0u32]),
|
||||
it(&[0u32, 1u32, 1u32, 4u32]),
|
||||
it(&[0u32, 3u32, 5u32]),
|
||||
it(&[0u32, 4u32]),
|
||||
]
|
||||
.into_iter();
|
||||
let start_offsets: Vec<RowId> = super::stack_multivalued_indexes(columns).collect();
|
||||
assert_eq!(start_offsets, &[0, 0, 1, 1, 4, 7, 9, 13]);
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
mod merge;
|
||||
mod multivalued_index;
|
||||
mod optional_index;
|
||||
mod serialize;
|
||||
|
||||
use std::ops::Range;
|
||||
|
||||
pub use optional_index::{OptionalIndex, SerializableOptionalIndex, Set};
|
||||
pub use merge::merge_column_index;
|
||||
pub use optional_index::{OptionalIndex, Set};
|
||||
pub use serialize::{open_column_index, serialize_column_index, SerializableColumnIndex};
|
||||
|
||||
use crate::column_index::multivalued_index::MultiValueIndex;
|
||||
@@ -19,6 +21,18 @@ pub enum ColumnIndex {
|
||||
Multivalued(MultiValueIndex),
|
||||
}
|
||||
|
||||
impl From<OptionalIndex> for ColumnIndex {
|
||||
fn from(optional_index: OptionalIndex) -> ColumnIndex {
|
||||
ColumnIndex::Optional(optional_index)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MultiValueIndex> for ColumnIndex {
|
||||
fn from(multi_value_index: MultiValueIndex) -> ColumnIndex {
|
||||
ColumnIndex::Multivalued(multi_value_index)
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnIndex {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
@@ -28,6 +42,17 @@ impl ColumnIndex {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if there are at least one value associated to the row.
|
||||
pub fn has_value(&self, row_id: RowId) -> bool {
|
||||
match self {
|
||||
ColumnIndex::Full => true,
|
||||
ColumnIndex::Optional(optional_index) => optional_index.contains(row_id),
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
multivalued_index.range(row_id).len() > 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn value_row_ids(&self, row_id: RowId) -> Range<RowId> {
|
||||
match self {
|
||||
ColumnIndex::Full => row_id..row_id + 1,
|
||||
|
||||
@@ -5,16 +5,18 @@ use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
|
||||
use crate::column_values::{ColumnValues, FastFieldCodecType};
|
||||
use crate::column_values::u64_based::CodecType;
|
||||
use crate::column_values::ColumnValues;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::RowId;
|
||||
|
||||
pub fn serialize_multivalued_index(
|
||||
multivalued_index: &dyn ColumnValues<RowId>,
|
||||
multivalued_index: &dyn Iterable<RowId>,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
crate::column_values::serialize_column_values(
|
||||
&*multivalued_index,
|
||||
&[FastFieldCodecType::Bitpacked, FastFieldCodecType::Linear],
|
||||
crate::column_values::u64_based::serialize_u64_based_column_values(
|
||||
multivalued_index,
|
||||
&[CodecType::Bitpacked, CodecType::Linear],
|
||||
output,
|
||||
)?;
|
||||
Ok(())
|
||||
@@ -22,7 +24,7 @@ pub fn serialize_multivalued_index(
|
||||
|
||||
pub fn open_multivalued_index(bytes: OwnedBytes) -> io::Result<MultiValueIndex> {
|
||||
let start_index_column: Arc<dyn ColumnValues<RowId>> =
|
||||
crate::column_values::open_u64_mapped(bytes)?;
|
||||
crate::column_values::u64_based::load_u64_based_column_values(bytes)?;
|
||||
Ok(MultiValueIndex { start_index_column })
|
||||
}
|
||||
|
||||
@@ -30,7 +32,7 @@ pub fn open_multivalued_index(bytes: OwnedBytes) -> io::Result<MultiValueIndex>
|
||||
/// Index to resolve value range for given doc_id.
|
||||
/// Starts at 0.
|
||||
pub struct MultiValueIndex {
|
||||
start_index_column: Arc<dyn crate::ColumnValues<RowId>>,
|
||||
pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>,
|
||||
}
|
||||
|
||||
impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex {
|
||||
@@ -40,6 +42,13 @@ impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex {
|
||||
}
|
||||
|
||||
impl MultiValueIndex {
|
||||
pub fn for_test(start_offsets: &[RowId]) -> MultiValueIndex {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_multivalued_index(&start_offsets, &mut buffer).unwrap();
|
||||
let bytes = OwnedBytes::new(buffer);
|
||||
open_multivalued_index(bytes).unwrap()
|
||||
}
|
||||
|
||||
/// Returns `[start, end)`, such that the values associated with
|
||||
/// the given document are `start..end`.
|
||||
#[inline]
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use std::io::{self, Write};
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
mod set;
|
||||
@@ -11,6 +10,7 @@ use set_block::{
|
||||
DenseBlock, DenseBlockCodec, SparseBlock, SparseBlockCodec, DENSE_BLOCK_NUM_BYTES,
|
||||
};
|
||||
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{InvalidData, RowId};
|
||||
|
||||
/// The threshold for for number of elements after which we switch to dense block encoding.
|
||||
@@ -88,16 +88,6 @@ pub struct OptionalIndex {
|
||||
block_metas: Arc<[BlockMeta]>,
|
||||
}
|
||||
|
||||
impl OptionalIndex {
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.num_rows
|
||||
}
|
||||
|
||||
pub fn num_non_nulls(&self) -> RowId {
|
||||
self.num_non_null_rows
|
||||
}
|
||||
}
|
||||
|
||||
/// Splits a value address into lower and upper 16bits.
|
||||
/// The lower 16 bits are the value in the block
|
||||
/// The upper 16 bits are the block index
|
||||
@@ -186,6 +176,21 @@ impl Set<RowId> for OptionalIndex {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rank(&self, row_id: RowId) -> RowId {
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(row_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block = self.block(block_meta);
|
||||
let block_offset_row_id = match block {
|
||||
Block::Dense(dense_block) => dense_block.rank(in_block_row_id),
|
||||
Block::Sparse(sparse_block) => sparse_block.rank(in_block_row_id),
|
||||
} as u32;
|
||||
block_meta.non_null_rows_before_block + block_offset_row_id
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rank_if_exists(&self, row_id: RowId) -> Option<RowId> {
|
||||
let RowAddr {
|
||||
@@ -230,6 +235,31 @@ impl Set<RowId> for OptionalIndex {
|
||||
}
|
||||
|
||||
impl OptionalIndex {
|
||||
pub fn for_test(num_rows: RowId, row_ids: &[RowId]) -> OptionalIndex {
|
||||
assert!(row_ids
|
||||
.last()
|
||||
.copied()
|
||||
.map(|last_row_id| last_row_id < num_rows)
|
||||
.unwrap_or(true));
|
||||
let mut buffer = Vec::new();
|
||||
serialize_optional_index(&row_ids, num_rows, &mut buffer).unwrap();
|
||||
let bytes = OwnedBytes::new(buffer);
|
||||
open_optional_index(bytes).unwrap()
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.num_rows
|
||||
}
|
||||
|
||||
pub fn num_non_nulls(&self) -> RowId {
|
||||
self.num_non_null_rows
|
||||
}
|
||||
|
||||
pub fn iter_rows<'a>(&'a self) -> impl Iterator<Item = RowId> + 'a {
|
||||
// TODO optimize
|
||||
let mut select_batch = self.select_cursor();
|
||||
(0..self.num_non_null_rows).map(move |rank| select_batch.select(rank))
|
||||
}
|
||||
pub fn select_batch(&self, ranks: &mut [RowId]) {
|
||||
let mut select_cursor = self.select_cursor();
|
||||
for rank in ranks.iter_mut() {
|
||||
@@ -300,7 +330,7 @@ impl OptionalIndexCodec {
|
||||
}
|
||||
|
||||
impl BinarySerializable for OptionalIndexCodec {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&[self.to_code()])
|
||||
}
|
||||
|
||||
@@ -322,12 +352,13 @@ fn serialize_optional_index_block(block_els: &[u16], out: &mut impl io::Write) -
|
||||
}
|
||||
|
||||
pub fn serialize_optional_index<'a, W: io::Write>(
|
||||
serializable_optional_index: &dyn SerializableOptionalIndex<'a>,
|
||||
non_null_rows: &dyn Iterable<RowId>,
|
||||
num_rows: RowId,
|
||||
output: &mut W,
|
||||
) -> io::Result<()> {
|
||||
VInt(serializable_optional_index.num_rows() as u64).serialize(output)?;
|
||||
VInt(num_rows as u64).serialize(output)?;
|
||||
|
||||
let mut rows_it = serializable_optional_index.non_null_rows();
|
||||
let mut rows_it = non_null_rows.boxed_iter();
|
||||
let mut block_metadata: Vec<SerializedBlockMeta> = Vec::new();
|
||||
let mut current_block = Vec::new();
|
||||
|
||||
@@ -480,19 +511,5 @@ pub fn open_optional_index(bytes: OwnedBytes) -> io::Result<OptionalIndex> {
|
||||
Ok(optional_index)
|
||||
}
|
||||
|
||||
pub trait SerializableOptionalIndex<'a> {
|
||||
fn num_rows(&self) -> RowId;
|
||||
fn non_null_rows(&self) -> Box<dyn Iterator<Item = RowId> + 'a>;
|
||||
}
|
||||
|
||||
impl SerializableOptionalIndex<'static> for Range<u32> {
|
||||
fn num_rows(&self) -> RowId {
|
||||
self.end
|
||||
}
|
||||
fn non_null_rows(&self) -> Box<dyn Iterator<Item = RowId> + 'static> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
@@ -28,7 +28,10 @@ pub trait Set<T> {
|
||||
/// Returns true if the elements is contained in the Set
|
||||
fn contains(&self, el: T) -> bool;
|
||||
|
||||
/// If the set contains `el` returns its position in the sortd set of elements.
|
||||
/// Returns the number of rows in the set that are < `el`
|
||||
fn rank(&self, el: T) -> T;
|
||||
|
||||
/// If the set contains `el` returns the element rank.
|
||||
/// If the set does not contain the element, it returns `None`.
|
||||
fn rank_if_exists(&self, el: T) -> Option<T>;
|
||||
|
||||
|
||||
@@ -148,6 +148,15 @@ impl<'a> Set<u16> for DenseBlock<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn rank(&self, el: u16) -> u16 {
|
||||
let block_pos = el / ELEMENTS_PER_MINI_BLOCK;
|
||||
let index_block = self.mini_block(block_pos);
|
||||
let pos_in_block_bit_vec = el % ELEMENTS_PER_MINI_BLOCK;
|
||||
let ones_in_block = rank_u64(index_block.bitvec, pos_in_block_bit_vec);
|
||||
index_block.rank + ones_in_block
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn select(&self, rank: u16) -> u16 {
|
||||
let block_id = self.find_miniblock_containing_rank(rank, 0).unwrap();
|
||||
|
||||
@@ -44,6 +44,11 @@ impl<'a> Set<u16> for SparseBlock<'a> {
|
||||
self.binary_search(el).ok()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn rank(&self, el: u16) -> u16 {
|
||||
self.binary_search(el).unwrap_or_else(|el| el)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn select(&self, rank: u16) -> u16 {
|
||||
let offset = rank as usize * 2;
|
||||
|
||||
@@ -17,6 +17,10 @@ fn test_set_helper<C: SetCodec<Item = u16>>(vals: &[u16]) -> usize {
|
||||
for val in 0u16..=u16::MAX {
|
||||
assert_eq!(tested_set.contains(val), hash_set.contains_key(&val));
|
||||
assert_eq!(tested_set.rank_if_exists(val), hash_set.get(&val).copied());
|
||||
assert_eq!(
|
||||
tested_set.rank(val),
|
||||
vals.iter().cloned().take_while(|v| *v < val).count() as u16
|
||||
);
|
||||
}
|
||||
for rank in 0..vals.len() {
|
||||
assert_eq!(tested_set.select(rank as u16), vals[rank]);
|
||||
|
||||
@@ -37,7 +37,7 @@ proptest! {
|
||||
fn test_with_random_sets_simple() {
|
||||
let vals = 10..BLOCK_SIZE * 2;
|
||||
let mut out: Vec<u8> = Vec::new();
|
||||
serialize_optional_index(&vals.clone(), &mut out).unwrap();
|
||||
serialize_optional_index(&vals.clone(), 100, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
let ranks: Vec<u32> = (65_472u32..65_473u32).collect();
|
||||
let els: Vec<u32> = ranks.iter().copied().map(|rank| rank + 10).collect();
|
||||
@@ -66,12 +66,8 @@ fn test_optional_index_one_block_true() {
|
||||
test_null_index(&iter[..]);
|
||||
}
|
||||
|
||||
impl<'a> SerializableOptionalIndex<'a> for &'a [bool] {
|
||||
fn num_rows(&self) -> RowId {
|
||||
self.len() as u32
|
||||
}
|
||||
|
||||
fn non_null_rows(&self) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
impl<'a> Iterable<RowId> for &'a [bool] {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
Box::new(
|
||||
self.iter()
|
||||
.cloned()
|
||||
@@ -84,7 +80,7 @@ impl<'a> SerializableOptionalIndex<'a> for &'a [bool] {
|
||||
|
||||
fn test_null_index(data: &[bool]) {
|
||||
let mut out: Vec<u8> = Vec::new();
|
||||
serialize_optional_index(&data, &mut out).unwrap();
|
||||
serialize_optional_index(&data, data.len() as RowId, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
let orig_idx_with_value: Vec<u32> = data
|
||||
.iter()
|
||||
@@ -111,51 +107,96 @@ fn test_null_index(data: &[bool]) {
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_test_translation() {
|
||||
let mut out = vec![];
|
||||
let iter = &[true, false, true, false];
|
||||
serialize_optional_index(&&iter[..], &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
let mut select_cursor = null_index.select_cursor();
|
||||
let optional_index = OptionalIndex::for_test(4, &[0, 2]);
|
||||
let mut select_cursor = optional_index.select_cursor();
|
||||
assert_eq!(select_cursor.select(0), 0);
|
||||
assert_eq!(select_cursor.select(1), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_translate() {
|
||||
let mut out = vec![];
|
||||
let iter = &[true, false, true, false];
|
||||
serialize_optional_index(&&iter[..], &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
assert_eq!(null_index.rank_if_exists(0), Some(0));
|
||||
assert_eq!(null_index.rank_if_exists(2), Some(1));
|
||||
let optional_index = OptionalIndex::for_test(4, &[0, 2]);
|
||||
assert_eq!(optional_index.rank_if_exists(0), Some(0));
|
||||
assert_eq!(optional_index.rank_if_exists(2), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_small() {
|
||||
let mut out = vec![];
|
||||
let iter = &[true, false, true, false];
|
||||
serialize_optional_index(&&iter[..], &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
assert!(null_index.contains(0));
|
||||
assert!(!null_index.contains(1));
|
||||
assert!(null_index.contains(2));
|
||||
assert!(!null_index.contains(3));
|
||||
let optional_index = OptionalIndex::for_test(4, &[0, 2]);
|
||||
assert!(optional_index.contains(0));
|
||||
assert!(!optional_index.contains(1));
|
||||
assert!(optional_index.contains(2));
|
||||
assert!(!optional_index.contains(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_large() {
|
||||
let mut docs = vec![];
|
||||
docs.extend((0..ELEMENTS_PER_BLOCK).map(|_idx| false));
|
||||
docs.extend((0..=1).map(|_idx| true));
|
||||
let row_ids = &[ELEMENTS_PER_BLOCK, ELEMENTS_PER_BLOCK + 1];
|
||||
let optional_index = OptionalIndex::for_test(ELEMENTS_PER_BLOCK + 2, row_ids);
|
||||
assert!(!optional_index.contains(0));
|
||||
assert!(!optional_index.contains(100));
|
||||
assert!(!optional_index.contains(ELEMENTS_PER_BLOCK - 1));
|
||||
assert!(optional_index.contains(ELEMENTS_PER_BLOCK));
|
||||
assert!(optional_index.contains(ELEMENTS_PER_BLOCK + 1));
|
||||
}
|
||||
|
||||
let mut out = vec![];
|
||||
serialize_optional_index(&&docs[..], &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
assert!(!null_index.contains(0));
|
||||
assert!(!null_index.contains(100));
|
||||
assert!(!null_index.contains(ELEMENTS_PER_BLOCK - 1));
|
||||
assert!(null_index.contains(ELEMENTS_PER_BLOCK));
|
||||
assert!(null_index.contains(ELEMENTS_PER_BLOCK + 1));
|
||||
fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) {
|
||||
let optional_index = OptionalIndex::for_test(num_rows, row_ids);
|
||||
assert_eq!(optional_index.num_rows(), num_rows);
|
||||
assert!(optional_index.iter_rows().eq(row_ids.iter().copied()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_iter_empty() {
|
||||
test_optional_index_iter_aux(&[], 0u32);
|
||||
}
|
||||
|
||||
fn test_optional_index_rank_aux(row_ids: &[RowId]) {
|
||||
let num_rows = row_ids.last().copied().unwrap_or(0u32) + 1;
|
||||
let null_index = OptionalIndex::for_test(num_rows, row_ids);
|
||||
assert_eq!(null_index.num_rows(), num_rows);
|
||||
for (row_id, row_val) in row_ids.iter().copied().enumerate() {
|
||||
assert_eq!(null_index.rank(row_val), row_id as u32);
|
||||
assert_eq!(null_index.rank_if_exists(row_val), Some(row_id as u32));
|
||||
if row_val > 0 && !null_index.contains(&row_val - 1) {
|
||||
assert_eq!(null_index.rank(row_val - 1), row_id as u32);
|
||||
}
|
||||
assert_eq!(null_index.rank(row_val + 1), row_id as u32 + 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_rank() {
|
||||
test_optional_index_rank_aux(&[1u32]);
|
||||
test_optional_index_rank_aux(&[0u32, 1u32]);
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
test_optional_index_rank_aux(&block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_iter_empty_one() {
|
||||
test_optional_index_iter_aux(&[1], 2u32);
|
||||
test_optional_index_iter_aux(&[100_000], 200_000u32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_iter_dense_block() {
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
test_optional_index_iter_aux(&block, 3 * BLOCK_SIZE);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_for_tests() {
|
||||
let optional_index = OptionalIndex::for_test(4, &[1, 2]);
|
||||
assert!(!optional_index.contains(0));
|
||||
assert!(optional_index.contains(1));
|
||||
assert!(optional_index.contains(2));
|
||||
assert!(!optional_index.contains(3));
|
||||
assert_eq!(optional_index.num_rows(), 4);
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
|
||||
@@ -5,23 +5,26 @@ use common::{CountingWriter, OwnedBytes};
|
||||
|
||||
use crate::column_index::multivalued_index::serialize_multivalued_index;
|
||||
use crate::column_index::optional_index::serialize_optional_index;
|
||||
use crate::column_index::{ColumnIndex, SerializableOptionalIndex};
|
||||
use crate::column_values::ColumnValues;
|
||||
use crate::column_index::ColumnIndex;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, RowId};
|
||||
|
||||
pub enum SerializableColumnIndex<'a> {
|
||||
Full,
|
||||
Optional(Box<dyn SerializableOptionalIndex<'a> + 'a>),
|
||||
Optional {
|
||||
non_null_row_ids: Box<dyn Iterable<RowId> + 'a>,
|
||||
num_rows: RowId,
|
||||
},
|
||||
// TODO remove the Arc<dyn> apart from serialization this is not
|
||||
// dynamic at all.
|
||||
Multivalued(Box<dyn ColumnValues<RowId> + 'a>),
|
||||
Multivalued(Box<dyn Iterable<RowId> + 'a>),
|
||||
}
|
||||
|
||||
impl<'a> SerializableColumnIndex<'a> {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
SerializableColumnIndex::Full => Cardinality::Full,
|
||||
SerializableColumnIndex::Optional(_) => Cardinality::Optional,
|
||||
SerializableColumnIndex::Optional { .. } => Cardinality::Optional,
|
||||
SerializableColumnIndex::Multivalued(_) => Cardinality::Multivalued,
|
||||
}
|
||||
}
|
||||
@@ -36,9 +39,10 @@ pub fn serialize_column_index(
|
||||
output.write_all(&[cardinality])?;
|
||||
match column_index {
|
||||
SerializableColumnIndex::Full => {}
|
||||
SerializableColumnIndex::Optional(optional_index) => {
|
||||
serialize_optional_index(&*optional_index, &mut output)?
|
||||
}
|
||||
SerializableColumnIndex::Optional {
|
||||
non_null_row_ids,
|
||||
num_rows,
|
||||
} => serialize_optional_index(non_null_row_ids.as_ref(), num_rows, &mut output)?,
|
||||
SerializableColumnIndex::Multivalued(multivalued_index) => {
|
||||
serialize_multivalued_index(&*multivalued_index, &mut output)?
|
||||
}
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use super::serialize::NormalizedHeader;
|
||||
use super::{ColumnValues, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct BitpackedReader {
|
||||
data: OwnedBytes,
|
||||
bit_unpacker: BitUnpacker,
|
||||
normalized_header: NormalizedHeader,
|
||||
}
|
||||
|
||||
impl ColumnValues for BitpackedReader {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
self.bit_unpacker.get(doc, &self.data)
|
||||
}
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
// The BitpackedReader assumes a normalized vector.
|
||||
0
|
||||
}
|
||||
#[inline]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.normalized_header.max_value
|
||||
}
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.normalized_header.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BitpackedCodec;
|
||||
|
||||
impl FastFieldCodec for BitpackedCodec {
|
||||
/// The CODEC_TYPE is an enum value used for serialization.
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Bitpacked;
|
||||
|
||||
type Reader = BitpackedReader;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(
|
||||
data: OwnedBytes,
|
||||
normalized_header: NormalizedHeader,
|
||||
) -> io::Result<Self::Reader> {
|
||||
let num_bits = compute_num_bits(normalized_header.max_value);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(BitpackedReader {
|
||||
data,
|
||||
bit_unpacker,
|
||||
normalized_header,
|
||||
})
|
||||
}
|
||||
|
||||
/// Serializes data with the BitpackedFastFieldSerializer.
|
||||
///
|
||||
/// The bitpacker assumes that the column has been normalized.
|
||||
/// i.e. It has already been shifted by its minimum value, so that its
|
||||
/// current minimum value is 0.
|
||||
///
|
||||
/// Ideally, we made a shift upstream on the column so that `col.min_value() == 0`.
|
||||
fn serialize(column: &dyn ColumnValues, write: &mut impl Write) -> io::Result<()> {
|
||||
assert_eq!(column.min_value(), 0u64);
|
||||
let num_bits = compute_num_bits(column.max_value());
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for val in column.iter() {
|
||||
bit_packer.write(val, num_bits, write)?;
|
||||
}
|
||||
bit_packer.close(write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn estimate(column: &dyn ColumnValues) -> Option<f32> {
|
||||
let num_bits = compute_num_bits(column.max_value());
|
||||
let num_bits_uncompressed = 64;
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_values::tests::create_and_validate;
|
||||
|
||||
fn create_and_validate_bitpacked_codec(data: &[u64], name: &str) {
|
||||
create_and_validate::<BitpackedCodec>(data, name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = crate::column_values::tests::get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate_bitpacked_codec(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate::<BitpackedCodec>(&data, name);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bitpacked_fast_field_rand() {
|
||||
for _ in 0..500 {
|
||||
let mut data = (0..1 + rand::random::<u8>() as usize)
|
||||
.map(|_| rand::random::<i64>() as u64 / 2)
|
||||
.collect::<Vec<_>>();
|
||||
create_and_validate_bitpacked_codec(&data, "rand");
|
||||
data.reverse();
|
||||
create_and_validate::<BitpackedCodec>(&data, "rand");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,188 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
use std::{io, iter};
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, DeserializeFrom, OwnedBytes};
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::line::Line;
|
||||
use crate::column_values::serialize::NormalizedHeader;
|
||||
use crate::column_values::{ColumnValues, FastFieldCodec, FastFieldCodecType, VecColumn};
|
||||
|
||||
const CHUNK_SIZE: usize = 512;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct Block {
|
||||
line: Line,
|
||||
bit_unpacker: BitUnpacker,
|
||||
data_start_offset: usize,
|
||||
}
|
||||
|
||||
impl BinarySerializable for Block {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.line.serialize(writer)?;
|
||||
self.bit_unpacker.bit_width().serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let line = Line::deserialize(reader)?;
|
||||
let bit_width = u8::deserialize(reader)?;
|
||||
Ok(Block {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
data_start_offset: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_num_blocks(num_vals: u32) -> usize {
|
||||
(num_vals as usize + CHUNK_SIZE - 1) / CHUNK_SIZE
|
||||
}
|
||||
|
||||
pub struct BlockwiseLinearCodec;
|
||||
|
||||
impl FastFieldCodec for BlockwiseLinearCodec {
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::BlockwiseLinear;
|
||||
type Reader = BlockwiseLinearReader;
|
||||
|
||||
fn open_from_bytes(
|
||||
bytes: common::OwnedBytes,
|
||||
normalized_header: NormalizedHeader,
|
||||
) -> io::Result<Self::Reader> {
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
let num_blocks = compute_num_blocks(normalized_header.num_vals);
|
||||
let mut blocks: Vec<Block> = iter::repeat_with(|| Block::deserialize(&mut footer))
|
||||
.take(num_blocks)
|
||||
.collect::<io::Result<_>>()?;
|
||||
|
||||
let mut start_offset = 0;
|
||||
for block in &mut blocks {
|
||||
block.data_start_offset = start_offset;
|
||||
start_offset += (block.bit_unpacker.bit_width() as usize) * CHUNK_SIZE / 8;
|
||||
}
|
||||
Ok(BlockwiseLinearReader {
|
||||
blocks: Arc::new(blocks),
|
||||
data,
|
||||
normalized_header,
|
||||
})
|
||||
}
|
||||
|
||||
// Estimate first_chunk and extrapolate
|
||||
fn estimate(column: &dyn ColumnValues) -> Option<f32> {
|
||||
if column.num_vals() < 10 * CHUNK_SIZE as u32 {
|
||||
return None;
|
||||
}
|
||||
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE).collect();
|
||||
let line = Line::train(&VecColumn::from(&first_chunk));
|
||||
for (i, buffer_val) in first_chunk.iter_mut().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
||||
}
|
||||
let estimated_bit_width = first_chunk
|
||||
.iter()
|
||||
.map(|el| ((el + 1) as f32 * 3.0) as u64)
|
||||
.map(compute_num_bits)
|
||||
.max()
|
||||
.unwrap();
|
||||
|
||||
let metadata_per_block = {
|
||||
let mut out = vec![];
|
||||
Block::default().serialize(&mut out).unwrap();
|
||||
out.len()
|
||||
};
|
||||
let num_bits = estimated_bit_width as u64 * column.num_vals() as u64
|
||||
// function metadata per block
|
||||
+ metadata_per_block as u64 * (column.num_vals() as u64 / CHUNK_SIZE as u64);
|
||||
let num_bits_uncompressed = 64 * column.num_vals();
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
|
||||
fn serialize(column: &dyn ColumnValues, wrt: &mut impl io::Write) -> io::Result<()> {
|
||||
// The BitpackedReader assumes a normalized vector.
|
||||
assert_eq!(column.min_value(), 0);
|
||||
let mut buffer = Vec::with_capacity(CHUNK_SIZE);
|
||||
let num_vals = column.num_vals();
|
||||
|
||||
let num_blocks = compute_num_blocks(num_vals);
|
||||
let mut blocks = Vec::with_capacity(num_blocks);
|
||||
|
||||
let mut vals = column.iter();
|
||||
|
||||
let mut bit_packer = BitPacker::new();
|
||||
|
||||
for _ in 0..num_blocks {
|
||||
buffer.clear();
|
||||
buffer.extend((&mut vals).take(CHUNK_SIZE));
|
||||
let line = Line::train(&VecColumn::from(&buffer));
|
||||
|
||||
assert!(!buffer.is_empty());
|
||||
|
||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
||||
}
|
||||
let bit_width = buffer.iter().copied().map(compute_num_bits).max().unwrap();
|
||||
|
||||
for &buffer_val in &buffer {
|
||||
bit_packer.write(buffer_val, bit_width, wrt)?;
|
||||
}
|
||||
|
||||
blocks.push(Block {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
data_start_offset: 0,
|
||||
});
|
||||
}
|
||||
|
||||
bit_packer.close(wrt)?;
|
||||
|
||||
assert_eq!(blocks.len(), compute_num_blocks(num_vals));
|
||||
|
||||
let mut counting_wrt = CountingWriter::wrap(wrt);
|
||||
for block in &blocks {
|
||||
block.serialize(&mut counting_wrt)?;
|
||||
}
|
||||
let footer_len = counting_wrt.written_bytes();
|
||||
(footer_len as u32).serialize(&mut counting_wrt)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BlockwiseLinearReader {
|
||||
blocks: Arc<Vec<Block>>,
|
||||
normalized_header: NormalizedHeader,
|
||||
data: OwnedBytes,
|
||||
}
|
||||
|
||||
impl ColumnValues for BlockwiseLinearReader {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> u64 {
|
||||
let block_id = (idx / CHUNK_SIZE as u32) as usize;
|
||||
let idx_within_block = idx % (CHUNK_SIZE as u32);
|
||||
let block = &self.blocks[block_id];
|
||||
let interpoled_val: u64 = block.line.eval(idx_within_block);
|
||||
let block_bytes = &self.data[block.data_start_offset..];
|
||||
let bitpacked_diff = block.bit_unpacker.get(idx_within_block, block_bytes);
|
||||
interpoled_val.wrapping_add(bitpacked_diff)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> u64 {
|
||||
// The BlockwiseLinearReader assumes a normalized vector.
|
||||
0u64
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.normalized_header.max_value
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.normalized_header.num_vals
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
@@ -9,7 +10,7 @@ use crate::column_values::monotonic_mapping::StrictlyMonotonicFn;
|
||||
/// `ColumnValues` provides access to a dense field column.
|
||||
///
|
||||
/// `Column` are just a wrapper over `ColumnValues` and a `ColumnIndex`.
|
||||
pub trait ColumnValues<T: PartialOrd + Debug = u64>: Send + Sync {
|
||||
pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
/// Return the value associated with the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
@@ -27,7 +28,7 @@ pub trait ColumnValues<T: PartialOrd + Debug = u64>: Send + Sync {
|
||||
///
|
||||
/// Must panic if `start + output.len()` is greater than
|
||||
/// the segment's `maxdoc`.
|
||||
#[inline]
|
||||
#[inline(always)]
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
for (out, idx) in output.iter_mut().zip(start..) {
|
||||
*out = self.get_val(idx as u32);
|
||||
@@ -37,7 +38,7 @@ pub trait ColumnValues<T: PartialOrd + Debug = u64>: Send + Sync {
|
||||
/// Get the positions of values which are in the provided value range.
|
||||
///
|
||||
/// Note that position == docid for single value fast fields
|
||||
#[inline]
|
||||
#[inline(always)]
|
||||
fn get_docids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
@@ -78,27 +79,33 @@ pub trait ColumnValues<T: PartialOrd + Debug = u64>: Send + Sync {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for std::sync::Arc<dyn ColumnValues<T>> {
|
||||
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
self.as_ref().get_val(idx)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> T {
|
||||
self.as_ref().min_value()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn max_value(&self) -> T {
|
||||
self.as_ref().max_value()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.as_ref().num_vals()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
|
||||
self.as_ref().iter()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
self.as_ref().get_range(start, output)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::compact_space::build_compact_space::get_compact_space;
|
||||
use crate::column_values::ColumnValues;
|
||||
use crate::RowId;
|
||||
|
||||
mod blank_range;
|
||||
mod build_compact_space;
|
||||
@@ -55,7 +56,7 @@ impl RangeMapping {
|
||||
}
|
||||
|
||||
impl BinarySerializable for CompactSpace {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: io::Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.ranges_mapping.len() as u64).serialize(writer)?;
|
||||
|
||||
let mut prev_value = 0;
|
||||
@@ -158,23 +159,30 @@ impl CompactSpace {
|
||||
pub struct CompactSpaceCompressor {
|
||||
params: IPCodecParams,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IPCodecParams {
|
||||
compact_space: CompactSpace,
|
||||
bit_unpacker: BitUnpacker,
|
||||
min_value: u128,
|
||||
max_value: u128,
|
||||
num_vals: u32,
|
||||
num_vals: RowId,
|
||||
num_bits: u8,
|
||||
}
|
||||
|
||||
impl CompactSpaceCompressor {
|
||||
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
|
||||
pub fn train_from(iter: impl Iterator<Item = u128>, num_vals: u32) -> Self {
|
||||
let mut values_sorted = BTreeSet::new();
|
||||
values_sorted.extend(iter);
|
||||
let total_num_values = num_vals;
|
||||
pub fn num_vals(&self) -> RowId {
|
||||
self.params.num_vals
|
||||
}
|
||||
|
||||
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
|
||||
pub fn train_from(iter: impl Iterator<Item = u128>) -> Self {
|
||||
let mut values_sorted = BTreeSet::new();
|
||||
let mut total_num_values = 0u32;
|
||||
for val in iter {
|
||||
total_num_values += 1u32;
|
||||
values_sorted.insert(val);
|
||||
}
|
||||
let compact_space =
|
||||
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
|
||||
let amplitude_compact_space = compact_space.amplitude_compact_space();
|
||||
@@ -247,7 +255,7 @@ pub struct CompactSpaceDecompressor {
|
||||
}
|
||||
|
||||
impl BinarySerializable for IPCodecParams {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: io::Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
// header flags for future optional dictionary encoding
|
||||
let footer_flags = 0u64;
|
||||
footer_flags.serialize(writer)?;
|
||||
@@ -450,364 +458,352 @@ impl CompactSpaceDecompressor {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO reenable what can be reenabled.
|
||||
// #[cfg(test)]
|
||||
// mod tests {
|
||||
//
|
||||
// use super::*;
|
||||
// use crate::column::format_version::read_format_version;
|
||||
// use crate::column::column_footer::read_null_index_footer;
|
||||
// use crate::column::serialize::U128Header;
|
||||
// use crate::column::{open_u128, serialize_u128};
|
||||
//
|
||||
// #[test]
|
||||
// fn compact_space_test() {
|
||||
// let ips = &[
|
||||
// 2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
|
||||
// ]
|
||||
// .into_iter()
|
||||
// .collect();
|
||||
// let compact_space = get_compact_space(ips, ips.len() as u32, 11);
|
||||
// let amplitude = compact_space.amplitude_compact_space();
|
||||
// assert_eq!(amplitude, 17);
|
||||
// assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
|
||||
// assert_eq!(2, compact_space.u128_to_compact(3).unwrap());
|
||||
// assert_eq!(compact_space.u128_to_compact(100).unwrap_err(), 1);
|
||||
//
|
||||
// for (num1, num2) in (0..3).tuple_windows() {
|
||||
// assert_eq!(
|
||||
// compact_space.get_range_mapping(num1).compact_end() + 1,
|
||||
// compact_space.get_range_mapping(num2).compact_start
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// let mut output: Vec<u8> = Vec::new();
|
||||
// compact_space.serialize(&mut output).unwrap();
|
||||
//
|
||||
// assert_eq!(
|
||||
// compact_space,
|
||||
// CompactSpace::deserialize(&mut &output[..]).unwrap()
|
||||
// );
|
||||
//
|
||||
// for ip in ips {
|
||||
// let compact = compact_space.u128_to_compact(*ip).unwrap();
|
||||
// assert_eq!(compact_space.compact_to_u128(compact), *ip);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn compact_space_amplitude_test() {
|
||||
// let ips = &[100000u128, 1000000].into_iter().collect();
|
||||
// let compact_space = get_compact_space(ips, ips.len() as u32, 1);
|
||||
// let amplitude = compact_space.amplitude_compact_space();
|
||||
// assert_eq!(amplitude, 2);
|
||||
// }
|
||||
//
|
||||
// fn test_all(mut data: OwnedBytes, expected: &[u128]) {
|
||||
// let _header = U128Header::deserialize(&mut data);
|
||||
// let decompressor = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// for (idx, expected_val) in expected.iter().cloned().enumerate() {
|
||||
// let val = decompressor.get(idx as u32);
|
||||
// assert_eq!(val, expected_val);
|
||||
//
|
||||
// let test_range = |range: RangeInclusive<u128>| {
|
||||
// let expected_positions = expected
|
||||
// .iter()
|
||||
// .positions(|val| range.contains(val))
|
||||
// .map(|pos| pos as u32)
|
||||
// .collect::<Vec<_>>();
|
||||
// let mut positions = Vec::new();
|
||||
// decompressor.get_positions_for_value_range(
|
||||
// range,
|
||||
// 0..decompressor.num_vals(),
|
||||
// &mut positions,
|
||||
// );
|
||||
// assert_eq!(positions, expected_positions);
|
||||
// };
|
||||
//
|
||||
// test_range(expected_val.saturating_sub(1)..=expected_val);
|
||||
// test_range(expected_val..=expected_val);
|
||||
// test_range(expected_val..=expected_val.saturating_add(1));
|
||||
// test_range(expected_val.saturating_sub(1)..=expected_val.saturating_add(1));
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
|
||||
// let mut out = Vec::new();
|
||||
// serialize_u128(
|
||||
// || u128_vals.iter().cloned(),
|
||||
// u128_vals.len() as u32,
|
||||
// &mut out,
|
||||
// )
|
||||
// .unwrap();
|
||||
//
|
||||
// let data = OwnedBytes::new(out);
|
||||
// let (data, _format_version) = read_format_version(data).unwrap();
|
||||
// let (data, _null_index_footer) = read_null_index_footer(data).unwrap();
|
||||
// test_all(data.clone(), u128_vals);
|
||||
//
|
||||
// data
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_range_1() {
|
||||
// let vals = &[
|
||||
// 1u128,
|
||||
// 100u128,
|
||||
// 3u128,
|
||||
// 99999u128,
|
||||
// 100000u128,
|
||||
// 100001u128,
|
||||
// 4_000_211_221u128,
|
||||
// 4_000_211_222u128,
|
||||
// 333u128,
|
||||
// ];
|
||||
// let mut data = test_aux_vals(vals);
|
||||
//
|
||||
// let _header = U128Header::deserialize(&mut data);
|
||||
// let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// let complete_range = 0..vals.len() as u32;
|
||||
// for (pos, val) in vals.iter().enumerate() {
|
||||
// let val = *val;
|
||||
// let pos = pos as u32;
|
||||
// let mut positions = Vec::new();
|
||||
// decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||
// assert_eq!(positions, vec![pos]);
|
||||
// }
|
||||
//
|
||||
// handle docid range out of bounds
|
||||
// let positions: Vec<u32> = get_positions_for_value_range_helper(&decomp, 0..=1, 1..u32::MAX);
|
||||
// assert!(positions.is_empty());
|
||||
//
|
||||
// let positions =
|
||||
// get_positions_for_value_range_helper(&decomp, 0..=1, complete_range.clone());
|
||||
// assert_eq!(positions, vec![0]);
|
||||
// let positions =
|
||||
// get_positions_for_value_range_helper(&decomp, 0..=2, complete_range.clone());
|
||||
// assert_eq!(positions, vec![0]);
|
||||
// let positions =
|
||||
// get_positions_for_value_range_helper(&decomp, 0..=3, complete_range.clone());
|
||||
// assert_eq!(positions, vec![0, 2]);
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99999u128..=99999u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// vec![3]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99999u128..=100000u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// vec![3, 4]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99998u128..=100000u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// vec![3, 4]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99998u128..=99999u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[3]
|
||||
// );
|
||||
// assert!(get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 99998u128..=99998u128,
|
||||
// complete_range.clone()
|
||||
// )
|
||||
// .is_empty());
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 333u128..=333u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 332u128..=333u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 332u128..=334u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 333u128..=334u128,
|
||||
// complete_range.clone()
|
||||
// ),
|
||||
// &[8]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(
|
||||
// &decomp,
|
||||
// 4_000_211_221u128..=5_000_000_000u128,
|
||||
// complete_range
|
||||
// ),
|
||||
// &[6, 7]
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_empty() {
|
||||
// let vals = &[];
|
||||
// let data = test_aux_vals(vals);
|
||||
// let _decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_range_2() {
|
||||
// let vals = &[
|
||||
// 100u128,
|
||||
// 99999u128,
|
||||
// 100000u128,
|
||||
// 100001u128,
|
||||
// 4_000_211_221u128,
|
||||
// 4_000_211_222u128,
|
||||
// 333u128,
|
||||
// ];
|
||||
// let mut data = test_aux_vals(vals);
|
||||
// let _header = U128Header::deserialize(&mut data);
|
||||
// let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
// let complete_range = 0..vals.len() as u32;
|
||||
// assert!(
|
||||
// &get_positions_for_value_range_helper(&decomp, 0..=5, complete_range.clone())
|
||||
// .is_empty(),
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(&decomp, 0..=100, complete_range.clone()),
|
||||
// &[0]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// &get_positions_for_value_range_helper(&decomp, 0..=105, complete_range),
|
||||
// &[0]
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// fn get_positions_for_value_range_helper<C: Column<T> + ?Sized, T: PartialOrd>(
|
||||
// column: &C,
|
||||
// value_range: RangeInclusive<T>,
|
||||
// doc_id_range: Range<u32>,
|
||||
// ) -> Vec<u32> {
|
||||
// let mut positions = Vec::new();
|
||||
// column.get_docids_for_value_range(value_range, doc_id_range, &mut positions);
|
||||
// positions
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_range_3() {
|
||||
// let vals = &[
|
||||
// 200u128,
|
||||
// 201,
|
||||
// 202,
|
||||
// 203,
|
||||
// 204,
|
||||
// 204,
|
||||
// 206,
|
||||
// 207,
|
||||
// 208,
|
||||
// 209,
|
||||
// 210,
|
||||
// 1_000_000,
|
||||
// 5_000_000_000,
|
||||
// ];
|
||||
// let mut out = Vec::new();
|
||||
// serialize_u128(|| vals.iter().cloned(), vals.len() as u32, &mut out).unwrap();
|
||||
// let decomp = open_u128::<u128>(OwnedBytes::new(out)).unwrap();
|
||||
// let complete_range = 0..vals.len() as u32;
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 199..=200, complete_range.clone()),
|
||||
// vec![0]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 199..=201, complete_range.clone()),
|
||||
// vec![0, 1]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 200..=200, complete_range.clone()),
|
||||
// vec![0]
|
||||
// );
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range),
|
||||
// vec![11]
|
||||
// );
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug1() {
|
||||
// let vals = &[9223372036854775806];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug2() {
|
||||
// let vals = &[340282366920938463463374607431768211455u128];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug3() {
|
||||
// let vals = &[340282366920938463463374607431768211454];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_bug4() {
|
||||
// let vals = &[340282366920938463463374607431768211455, 0];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
//
|
||||
// #[test]
|
||||
// fn test_first_large_gaps() {
|
||||
// let vals = &[1_000_000_000u128; 100];
|
||||
// let _data = test_aux_vals(vals);
|
||||
// }
|
||||
// use itertools::Itertools;
|
||||
// use proptest::prelude::*;
|
||||
//
|
||||
// fn num_strategy() -> impl Strategy<Value = u128> {
|
||||
// prop_oneof![
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| u128::MAX - (num % 10) ),
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| i64::MAX as u128 + 5 - (num % 10) ),
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| i128::MAX as u128 + 5 - (num % 10) ),
|
||||
// 1 => prop::num::u128::ANY.prop_map(|num| num % 10 ),
|
||||
// 20 => prop::num::u128::ANY,
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// proptest! {
|
||||
// #![proptest_config(ProptestConfig::with_cases(10))]
|
||||
//
|
||||
// #[test]
|
||||
// fn compress_decompress_random(vals in proptest::collection::vec(num_strategy()
|
||||
// , 1..1000)) {
|
||||
// let _data = test_aux_vals(&vals);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::*;
|
||||
use crate::column_values::serialize::U128Header;
|
||||
use crate::column_values::{open_u128_mapped, serialize_column_values_u128};
|
||||
|
||||
#[test]
|
||||
fn compact_space_test() {
|
||||
let ips = &[
|
||||
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let compact_space = get_compact_space(ips, ips.len() as u32, 11);
|
||||
let amplitude = compact_space.amplitude_compact_space();
|
||||
assert_eq!(amplitude, 17);
|
||||
assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
|
||||
assert_eq!(2, compact_space.u128_to_compact(3).unwrap());
|
||||
assert_eq!(compact_space.u128_to_compact(100).unwrap_err(), 1);
|
||||
|
||||
for (num1, num2) in (0..3).tuple_windows() {
|
||||
assert_eq!(
|
||||
compact_space.get_range_mapping(num1).compact_end() + 1,
|
||||
compact_space.get_range_mapping(num2).compact_start
|
||||
);
|
||||
}
|
||||
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
compact_space.serialize(&mut output).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
compact_space,
|
||||
CompactSpace::deserialize(&mut &output[..]).unwrap()
|
||||
);
|
||||
|
||||
for ip in ips {
|
||||
let compact = compact_space.u128_to_compact(*ip).unwrap();
|
||||
assert_eq!(compact_space.compact_to_u128(compact), *ip);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compact_space_amplitude_test() {
|
||||
let ips = &[100000u128, 1000000].into_iter().collect();
|
||||
let compact_space = get_compact_space(ips, ips.len() as u32, 1);
|
||||
let amplitude = compact_space.amplitude_compact_space();
|
||||
assert_eq!(amplitude, 2);
|
||||
}
|
||||
|
||||
fn test_all(mut data: OwnedBytes, expected: &[u128]) {
|
||||
let _header = U128Header::deserialize(&mut data);
|
||||
let decompressor = CompactSpaceDecompressor::open(data).unwrap();
|
||||
for (idx, expected_val) in expected.iter().cloned().enumerate() {
|
||||
let val = decompressor.get(idx as u32);
|
||||
assert_eq!(val, expected_val);
|
||||
|
||||
let test_range = |range: RangeInclusive<u128>| {
|
||||
let expected_positions = expected
|
||||
.iter()
|
||||
.positions(|val| range.contains(val))
|
||||
.map(|pos| pos as u32)
|
||||
.collect::<Vec<_>>();
|
||||
let mut positions = Vec::new();
|
||||
decompressor.get_positions_for_value_range(
|
||||
range,
|
||||
0..decompressor.num_vals(),
|
||||
&mut positions,
|
||||
);
|
||||
assert_eq!(positions, expected_positions);
|
||||
};
|
||||
|
||||
test_range(expected_val.saturating_sub(1)..=expected_val);
|
||||
test_range(expected_val..=expected_val);
|
||||
test_range(expected_val..=expected_val.saturating_add(1));
|
||||
test_range(expected_val.saturating_sub(1)..=expected_val.saturating_add(1));
|
||||
}
|
||||
}
|
||||
|
||||
fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
|
||||
let mut out = Vec::new();
|
||||
serialize_column_values_u128(&u128_vals, &mut out).unwrap();
|
||||
let data = OwnedBytes::new(out);
|
||||
test_all(data.clone(), u128_vals);
|
||||
data
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_1() {
|
||||
let vals = &[
|
||||
1u128,
|
||||
100u128,
|
||||
3u128,
|
||||
99999u128,
|
||||
100000u128,
|
||||
100001u128,
|
||||
4_000_211_221u128,
|
||||
4_000_211_222u128,
|
||||
333u128,
|
||||
];
|
||||
let mut data = test_aux_vals(vals);
|
||||
|
||||
let _header = U128Header::deserialize(&mut data);
|
||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
for (pos, val) in vals.iter().enumerate() {
|
||||
let val = *val;
|
||||
let pos = pos as u32;
|
||||
let mut positions = Vec::new();
|
||||
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||
assert_eq!(positions, vec![pos]);
|
||||
}
|
||||
|
||||
// handle docid range out of bounds
|
||||
let positions: Vec<u32> = get_positions_for_value_range_helper(&decomp, 0..=1, 1..u32::MAX);
|
||||
assert!(positions.is_empty());
|
||||
|
||||
let positions =
|
||||
get_positions_for_value_range_helper(&decomp, 0..=1, complete_range.clone());
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions =
|
||||
get_positions_for_value_range_helper(&decomp, 0..=2, complete_range.clone());
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions =
|
||||
get_positions_for_value_range_helper(&decomp, 0..=3, complete_range.clone());
|
||||
assert_eq!(positions, vec![0, 2]);
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
99999u128..=99999u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
vec![3]
|
||||
);
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
99999u128..=100000u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
vec![3, 4]
|
||||
);
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
99998u128..=100000u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
vec![3, 4]
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
99998u128..=99999u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
&[3]
|
||||
);
|
||||
assert!(get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
99998u128..=99998u128,
|
||||
complete_range.clone()
|
||||
)
|
||||
.is_empty());
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
333u128..=333u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
&[8]
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
332u128..=333u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
&[8]
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
332u128..=334u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
&[8]
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
333u128..=334u128,
|
||||
complete_range.clone()
|
||||
),
|
||||
&[8]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
4_000_211_221u128..=5_000_000_000u128,
|
||||
complete_range
|
||||
),
|
||||
&[6, 7]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty() {
|
||||
let vals = &[];
|
||||
let data = test_aux_vals(vals);
|
||||
let _decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_2() {
|
||||
let vals = &[
|
||||
100u128,
|
||||
99999u128,
|
||||
100000u128,
|
||||
100001u128,
|
||||
4_000_211_221u128,
|
||||
4_000_211_222u128,
|
||||
333u128,
|
||||
];
|
||||
let mut data = test_aux_vals(vals);
|
||||
let _header = U128Header::deserialize(&mut data);
|
||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
assert!(
|
||||
&get_positions_for_value_range_helper(&decomp, 0..=5, complete_range.clone())
|
||||
.is_empty(),
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(&decomp, 0..=100, complete_range.clone()),
|
||||
&[0]
|
||||
);
|
||||
assert_eq!(
|
||||
&get_positions_for_value_range_helper(&decomp, 0..=105, complete_range),
|
||||
&[0]
|
||||
);
|
||||
}
|
||||
|
||||
fn get_positions_for_value_range_helper<C: ColumnValues<T> + ?Sized, T: PartialOrd>(
|
||||
column: &C,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
) -> Vec<u32> {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(value_range, doc_id_range, &mut positions);
|
||||
positions
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_3() {
|
||||
let vals = &[
|
||||
200u128,
|
||||
201,
|
||||
202,
|
||||
203,
|
||||
204,
|
||||
204,
|
||||
206,
|
||||
207,
|
||||
208,
|
||||
209,
|
||||
210,
|
||||
1_000_000,
|
||||
5_000_000_000,
|
||||
];
|
||||
let mut out = Vec::new();
|
||||
serialize_column_values_u128(&&vals[..], &mut out).unwrap();
|
||||
let decomp = open_u128_mapped(OwnedBytes::new(out)).unwrap();
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(&*decomp, 199..=200, complete_range.clone()),
|
||||
vec![0]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(&*decomp, 199..=201, complete_range.clone()),
|
||||
vec![0, 1]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(&*decomp, 200..=200, complete_range.clone()),
|
||||
vec![0]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range),
|
||||
vec![11]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug1() {
|
||||
let vals = &[9223372036854775806];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug2() {
|
||||
let vals = &[340282366920938463463374607431768211455u128];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug3() {
|
||||
let vals = &[340282366920938463463374607431768211454];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug4() {
|
||||
let vals = &[340282366920938463463374607431768211455, 0];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_first_large_gaps() {
|
||||
let vals = &[1_000_000_000u128; 100];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
fn num_strategy() -> impl Strategy<Value = u128> {
|
||||
prop_oneof![
|
||||
1 => prop::num::u128::ANY.prop_map(|num| u128::MAX - (num % 10) ),
|
||||
1 => prop::num::u128::ANY.prop_map(|num| i64::MAX as u128 + 5 - (num % 10) ),
|
||||
1 => prop::num::u128::ANY.prop_map(|num| i128::MAX as u128 + 5 - (num % 10) ),
|
||||
1 => prop::num::u128::ANY.prop_map(|num| num % 10 ),
|
||||
20 => prop::num::u128::ANY,
|
||||
]
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(10))]
|
||||
|
||||
#[test]
|
||||
fn compress_decompress_random(vals in proptest::collection::vec(num_strategy() , 1..1000)) {
|
||||
let _data = test_aux_vals(&vals);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use fastdivide::DividerU64;
|
||||
|
||||
/// Compute the gcd of two non null numbers.
|
||||
///
|
||||
/// It is recommended, but not required, to feed values such that `large >= small`.
|
||||
fn compute_gcd(mut large: NonZeroU64, mut small: NonZeroU64) -> NonZeroU64 {
|
||||
loop {
|
||||
let rem: u64 = large.get() % small;
|
||||
if let Some(new_small) = NonZeroU64::new(rem) {
|
||||
(large, small) = (small, new_small);
|
||||
} else {
|
||||
return small;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find GCD for iterator of numbers
|
||||
pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<NonZeroU64> {
|
||||
let mut numbers = numbers.flat_map(NonZeroU64::new);
|
||||
let mut gcd: NonZeroU64 = numbers.next()?;
|
||||
if gcd.get() == 1 {
|
||||
return Some(gcd);
|
||||
}
|
||||
|
||||
let mut gcd_divider = DividerU64::divide_by(gcd.get());
|
||||
for val in numbers {
|
||||
let remainder = val.get() - (gcd_divider.divide(val.get())) * gcd.get();
|
||||
if remainder == 0 {
|
||||
continue;
|
||||
}
|
||||
gcd = compute_gcd(val, gcd);
|
||||
if gcd.get() == 1 {
|
||||
return Some(gcd);
|
||||
}
|
||||
|
||||
gcd_divider = DividerU64::divide_by(gcd.get());
|
||||
}
|
||||
Some(gcd)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use crate::column_values::gcd::{compute_gcd, find_gcd};
|
||||
|
||||
#[test]
|
||||
fn test_compute_gcd() {
|
||||
let test_compute_gcd_aux = |large, small, expected| {
|
||||
let large = NonZeroU64::new(large).unwrap();
|
||||
let small = NonZeroU64::new(small).unwrap();
|
||||
let expected = NonZeroU64::new(expected).unwrap();
|
||||
assert_eq!(compute_gcd(small, large), expected);
|
||||
assert_eq!(compute_gcd(large, small), expected);
|
||||
};
|
||||
test_compute_gcd_aux(1, 4, 1);
|
||||
test_compute_gcd_aux(2, 4, 2);
|
||||
test_compute_gcd_aux(10, 25, 5);
|
||||
test_compute_gcd_aux(25, 25, 25);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_gcd_test() {
|
||||
assert_eq!(find_gcd([0].into_iter()), None);
|
||||
assert_eq!(find_gcd([0, 10].into_iter()), NonZeroU64::new(10));
|
||||
assert_eq!(find_gcd([10, 0].into_iter()), NonZeroU64::new(10));
|
||||
assert_eq!(find_gcd([].into_iter()), None);
|
||||
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), NonZeroU64::new(5));
|
||||
assert_eq!(find_gcd([15, 16, 10].into_iter()), NonZeroU64::new(1));
|
||||
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), NonZeroU64::new(5));
|
||||
assert_eq!(find_gcd([0, 0].into_iter()), None);
|
||||
}
|
||||
}
|
||||
@@ -1,230 +0,0 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use super::line::Line;
|
||||
use super::serialize::NormalizedHeader;
|
||||
use super::{ColumnValues, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct LinearReader {
|
||||
data: OwnedBytes,
|
||||
linear_params: LinearParams,
|
||||
header: NormalizedHeader,
|
||||
}
|
||||
|
||||
impl ColumnValues for LinearReader {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
let interpoled_val: u64 = self.linear_params.line.eval(doc);
|
||||
let bitpacked_diff = self.linear_params.bit_unpacker.get(doc, &self.data);
|
||||
interpoled_val.wrapping_add(bitpacked_diff)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> u64 {
|
||||
// The LinearReader assumes a normalized vector.
|
||||
0u64
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.header.max_value
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.header.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
/// Fastfield serializer, which tries to guess values by linear interpolation
|
||||
/// and stores the difference bitpacked.
|
||||
pub struct LinearCodec;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct LinearParams {
|
||||
line: Line,
|
||||
bit_unpacker: BitUnpacker,
|
||||
}
|
||||
|
||||
impl BinarySerializable for LinearParams {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.line.serialize(writer)?;
|
||||
self.bit_unpacker.bit_width().serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let line = Line::deserialize(reader)?;
|
||||
let bit_width = u8::deserialize(reader)?;
|
||||
Ok(Self {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldCodec for LinearCodec {
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Linear;
|
||||
|
||||
type Reader = LinearReader;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(mut data: OwnedBytes, header: NormalizedHeader) -> io::Result<Self::Reader> {
|
||||
let linear_params = LinearParams::deserialize(&mut data)?;
|
||||
Ok(LinearReader {
|
||||
data,
|
||||
linear_params,
|
||||
header,
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new fast field serializer.
|
||||
fn serialize(column: &dyn ColumnValues, write: &mut impl Write) -> io::Result<()> {
|
||||
assert_eq!(column.min_value(), 0);
|
||||
let line = Line::train(column);
|
||||
|
||||
let max_offset_from_line = column
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(pos, actual_value)| {
|
||||
let calculated_value = line.eval(pos as u32);
|
||||
actual_value.wrapping_sub(calculated_value)
|
||||
})
|
||||
.max()
|
||||
.unwrap();
|
||||
|
||||
let num_bits = compute_num_bits(max_offset_from_line);
|
||||
let linear_params = LinearParams {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
};
|
||||
linear_params.serialize(write)?;
|
||||
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for (pos, actual_value) in column.iter().enumerate() {
|
||||
let calculated_value = line.eval(pos as u32);
|
||||
let offset = actual_value.wrapping_sub(calculated_value);
|
||||
bit_packer.write(offset, num_bits, write)?;
|
||||
}
|
||||
bit_packer.close(write)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// estimation for linear interpolation is hard because, you don't know
|
||||
/// where the local maxima for the deviation of the calculated value are and
|
||||
/// the offset to shift all values to >=0 is also unknown.
|
||||
#[allow(clippy::question_mark)]
|
||||
fn estimate(column: &dyn ColumnValues) -> Option<f32> {
|
||||
if column.num_vals() < 3 {
|
||||
return None; // disable compressor for this case
|
||||
}
|
||||
|
||||
let limit_num_vals = column.num_vals().min(100_000);
|
||||
|
||||
let num_samples = 100;
|
||||
let step_size = (limit_num_vals / num_samples).max(1); // 20 samples
|
||||
let mut sample_positions_and_values: Vec<_> = Vec::new();
|
||||
for (pos, val) in column.iter().enumerate().step_by(step_size as usize) {
|
||||
sample_positions_and_values.push((pos as u64, val));
|
||||
}
|
||||
|
||||
let line = Line::estimate(&sample_positions_and_values);
|
||||
|
||||
let estimated_bit_width = sample_positions_and_values
|
||||
.into_iter()
|
||||
.map(|(pos, actual_value)| {
|
||||
let interpolated_val = line.eval(pos as u32);
|
||||
actual_value.wrapping_sub(interpolated_val)
|
||||
})
|
||||
.map(|diff| ((diff as f32 * 1.5) * 2.0) as u64)
|
||||
.map(compute_num_bits)
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Extrapolate to whole column
|
||||
let num_bits = (estimated_bit_width as u64 * column.num_vals() as u64) + 64;
|
||||
let num_bits_uncompressed = 64 * column.num_vals();
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rand::RngCore;
|
||||
|
||||
use super::*;
|
||||
use crate::column_values::tests;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> {
|
||||
tests::create_and_validate::<LinearCodec>(data, name)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression() {
|
||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate(&data, "simple monotonically large").unwrap();
|
||||
|
||||
assert_le!(actual_compression, 0.001);
|
||||
assert_le!(estimate, 0.02);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_datasets() {
|
||||
let data_sets = tests::get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate(&data, name);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_test_large_amplitude() {
|
||||
let data = vec![
|
||||
i64::MAX as u64 / 2,
|
||||
i64::MAX as u64 / 3,
|
||||
i64::MAX as u64 / 2,
|
||||
];
|
||||
|
||||
create_and_validate(&data, "large amplitude");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn overflow_error_test() {
|
||||
let data = vec![1572656989877777, 1170935903116329, 720575940379279, 0];
|
||||
create_and_validate(&data, "overflow test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_concave_data() {
|
||||
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
|
||||
create_and_validate(&data, "concave data");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_convex_data() {
|
||||
let data = vec![0, 40, 60, 70, 75, 77];
|
||||
create_and_validate(&data, "convex data");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_test_simple() {
|
||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
||||
create_and_validate(&data, "simple monotonically");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_rand() {
|
||||
let mut rng = rand::thread_rng();
|
||||
for _ in 0..50 {
|
||||
let mut data = (0..10_000).map(|_| rng.next_u64()).collect::<Vec<_>>();
|
||||
create_and_validate(&data, "random");
|
||||
data.reverse();
|
||||
create_and_validate(&data, "random");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,222 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate prettytable;
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::io::BufRead;
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use fastfield_codecs::{open_u128, serialize_u128, Column, FastFieldCodecType, VecColumn};
|
||||
use itertools::Itertools;
|
||||
use measure_time::print_time;
|
||||
use prettytable::{Cell, Row, Table};
|
||||
|
||||
fn print_set_stats(ip_addrs: &[u128]) {
|
||||
println!("NumIps\t{}", ip_addrs.len());
|
||||
let ip_addr_set: HashSet<u128> = ip_addrs.iter().cloned().collect();
|
||||
println!("NumUniqueIps\t{}", ip_addr_set.len());
|
||||
let ratio_unique = ip_addr_set.len() as f64 / ip_addrs.len() as f64;
|
||||
println!("RatioUniqueOverTotal\t{ratio_unique:.4}");
|
||||
|
||||
// histogram
|
||||
let mut ip_addrs = ip_addrs.to_vec();
|
||||
ip_addrs.sort();
|
||||
let mut cnts: Vec<usize> = ip_addrs
|
||||
.into_iter()
|
||||
.dedup_with_count()
|
||||
.map(|(cnt, _)| cnt)
|
||||
.collect();
|
||||
cnts.sort();
|
||||
|
||||
let top_256_cnt: usize = cnts.iter().rev().take(256).sum();
|
||||
let top_128_cnt: usize = cnts.iter().rev().take(128).sum();
|
||||
let top_64_cnt: usize = cnts.iter().rev().take(64).sum();
|
||||
let top_8_cnt: usize = cnts.iter().rev().take(8).sum();
|
||||
let total: usize = cnts.iter().sum();
|
||||
|
||||
println!("{}", total);
|
||||
println!("{}", top_256_cnt);
|
||||
println!("{}", top_128_cnt);
|
||||
println!("Percentage Top8 {:02}", top_8_cnt as f32 / total as f32);
|
||||
println!("Percentage Top64 {:02}", top_64_cnt as f32 / total as f32);
|
||||
println!("Percentage Top128 {:02}", top_128_cnt as f32 / total as f32);
|
||||
println!("Percentage Top256 {:02}", top_256_cnt as f32 / total as f32);
|
||||
|
||||
let mut cnts: Vec<(usize, usize)> = cnts.into_iter().dedup_with_count().collect();
|
||||
cnts.sort_by(|a, b| {
|
||||
if a.1 == b.1 {
|
||||
a.0.cmp(&b.0)
|
||||
} else {
|
||||
b.1.cmp(&a.1)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn ip_dataset() -> Vec<u128> {
|
||||
let mut ip_addr_v4 = 0;
|
||||
|
||||
let stdin = std::io::stdin();
|
||||
let ip_addrs: Vec<u128> = stdin
|
||||
.lock()
|
||||
.lines()
|
||||
.flat_map(|line| {
|
||||
let line = line.unwrap();
|
||||
let line = line.trim();
|
||||
let ip_addr = IpAddr::from_str(line.trim()).ok()?;
|
||||
if ip_addr.is_ipv4() {
|
||||
ip_addr_v4 += 1;
|
||||
}
|
||||
let ip_addr_v6: Ipv6Addr = match ip_addr {
|
||||
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||
IpAddr::V6(v6) => v6,
|
||||
};
|
||||
Some(ip_addr_v6)
|
||||
})
|
||||
.map(|ip_v6| u128::from_be_bytes(ip_v6.octets()))
|
||||
.collect();
|
||||
|
||||
println!("IpAddrsAny\t{}", ip_addrs.len());
|
||||
println!("IpAddrsV4\t{}", ip_addr_v4);
|
||||
|
||||
ip_addrs
|
||||
}
|
||||
|
||||
fn bench_ip() {
|
||||
let dataset = ip_dataset();
|
||||
print_set_stats(&dataset);
|
||||
|
||||
// Chunks
|
||||
{
|
||||
let mut data = vec![];
|
||||
for dataset in dataset.chunks(500_000) {
|
||||
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
|
||||
}
|
||||
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||
println!("Compression 50_000 chunks {:.4}", compression);
|
||||
println!(
|
||||
"Num Bits per elem {:.2}",
|
||||
(data.len() * 8) as f32 / dataset.len() as f32
|
||||
);
|
||||
}
|
||||
|
||||
let mut data = vec![];
|
||||
{
|
||||
print_time!("creation");
|
||||
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
|
||||
}
|
||||
|
||||
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||
println!("Compression {:.2}", compression);
|
||||
println!(
|
||||
"Num Bits per elem {:.2}",
|
||||
(data.len() * 8) as f32 / dataset.len() as f32
|
||||
);
|
||||
|
||||
let decompressor = open_u128::<u128>(OwnedBytes::new(data)).unwrap();
|
||||
// Sample some ranges
|
||||
let mut doc_values = Vec::new();
|
||||
for value in dataset.iter().take(1110).skip(1100).cloned() {
|
||||
doc_values.clear();
|
||||
print_time!("get range");
|
||||
decompressor.get_docids_for_value_range(
|
||||
value..=value,
|
||||
0..decompressor.num_vals(),
|
||||
&mut doc_values,
|
||||
);
|
||||
println!("{:?}", doc_values.len());
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if env::args().nth(1).unwrap() == "bench_ip" {
|
||||
bench_ip();
|
||||
return;
|
||||
}
|
||||
|
||||
let mut table = Table::new();
|
||||
|
||||
// Add a row per time
|
||||
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
|
||||
|
||||
for (data, data_set_name) in get_codec_test_data_sets() {
|
||||
let results: Vec<(f32, f32, FastFieldCodecType)> = [
|
||||
serialize_with_codec(&data, FastFieldCodecType::Bitpacked),
|
||||
serialize_with_codec(&data, FastFieldCodecType::Linear),
|
||||
serialize_with_codec(&data, FastFieldCodecType::BlockwiseLinear),
|
||||
]
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
let best_compression_ratio_codec = results
|
||||
.iter()
|
||||
.min_by(|&res1, &res2| res1.partial_cmp(res2).unwrap())
|
||||
.cloned()
|
||||
.unwrap();
|
||||
|
||||
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
|
||||
for (est, comp, codec_type) in results {
|
||||
let est_cell = est.to_string();
|
||||
let ratio_cell = comp.to_string();
|
||||
let style = if comp == best_compression_ratio_codec.1 {
|
||||
"Fb"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
table.add_row(Row::new(vec![
|
||||
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
|
||||
Cell::new(&ratio_cell).style_spec(style),
|
||||
Cell::new(&est_cell).style_spec(""),
|
||||
]));
|
||||
}
|
||||
}
|
||||
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
let mut data_and_names = vec![];
|
||||
|
||||
let data = (1000..=200_000_u64).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Autoincrement"));
|
||||
|
||||
let mut current_cumulative = 0;
|
||||
let data = (1..=200_000_u64)
|
||||
.map(|num| {
|
||||
let num = (num as f32 + num as f32).log10() as u64;
|
||||
current_cumulative += num;
|
||||
current_cumulative
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
// let data = (1..=200000_u64).map(|num| num + num).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Monotonically increasing concave"));
|
||||
|
||||
let mut current_cumulative = 0;
|
||||
let data = (1..=200_000_u64)
|
||||
.map(|num| {
|
||||
let num = (200_000.0 - num as f32).log10() as u64;
|
||||
current_cumulative += num;
|
||||
current_cumulative
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Monotonically increasing convex"));
|
||||
|
||||
let data = (1000..=200_000_u64)
|
||||
.map(|num| num + rand::random::<u8>() as u64)
|
||||
.collect::<Vec<_>>();
|
||||
data_and_names.push((data, "Almost monotonically increasing"));
|
||||
|
||||
data_and_names
|
||||
}
|
||||
|
||||
pub fn serialize_with_codec(
|
||||
data: &[u64],
|
||||
codec_type: FastFieldCodecType,
|
||||
) -> Option<(f32, f32, FastFieldCodecType)> {
|
||||
let col = VecColumn::from(data);
|
||||
let estimation = fastfield_codecs::estimate(&col, codec_type)?;
|
||||
let mut out = Vec::new();
|
||||
fastfield_codecs::serialize(&col, &mut out, &[codec_type]).ok()?;
|
||||
let actual_compression = out.len() as f32 / (col.num_vals() * 8) as f32;
|
||||
Some((estimation, actual_compression, codec_type))
|
||||
}
|
||||
@@ -7,9 +7,6 @@
|
||||
//! - Encode data in different codecs.
|
||||
//! - Monotonically map values to u64/u128
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -18,72 +15,66 @@ use std::sync::Arc;
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use compact_space::CompactSpaceDecompressor;
|
||||
pub use monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
||||
use monotonic_mapping::{
|
||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||
StrictlyMonotonicMappingToInternalBaseval, StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
use monotonic_mapping::{StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal};
|
||||
pub use monotonic_mapping_u128::MonotonicallyMappableToU128;
|
||||
use serialize::{Header, U128Header};
|
||||
use serialize::U128Header;
|
||||
|
||||
mod bitpacked;
|
||||
mod blockwise_linear;
|
||||
mod compact_space;
|
||||
mod line;
|
||||
mod linear;
|
||||
pub(crate) mod monotonic_mapping;
|
||||
pub(crate) mod monotonic_mapping_u128;
|
||||
mod stats;
|
||||
pub(crate) mod u64_based;
|
||||
|
||||
mod column;
|
||||
mod gcd;
|
||||
pub mod serialize;
|
||||
|
||||
pub use serialize::serialize_column_values_u128;
|
||||
pub use stats::Stats;
|
||||
pub use u64_based::{
|
||||
load_u64_based_column_values, serialize_and_load_u64_based_column_values,
|
||||
serialize_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
|
||||
};
|
||||
|
||||
pub use self::column::{monotonic_map_column, ColumnValues, IterColumn, VecColumn};
|
||||
#[cfg(test)]
|
||||
pub use self::serialize::tests::serialize_and_load;
|
||||
pub use self::serialize::{serialize_column_values, NormalizedHeader};
|
||||
use crate::column_values::bitpacked::BitpackedCodec;
|
||||
use crate::column_values::blockwise_linear::BlockwiseLinearCodec;
|
||||
use crate::column_values::linear::LinearCodec;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{ColumnIndex, MergeRowOrder};
|
||||
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||
#[repr(u8)]
|
||||
/// Available codecs to use to encode the u64 (via [`MonotonicallyMappableToU64`]) converted data.
|
||||
pub enum FastFieldCodecType {
|
||||
/// Bitpack all values in the value range. The number of bits is defined by the amplitude
|
||||
/// `column.max_value() - column.min_value()`
|
||||
Bitpacked = 1,
|
||||
/// Linear interpolation puts a line between the first and last value and then bitpacks the
|
||||
/// values by the offset from the line. The number of bits is defined by the max deviation from
|
||||
/// the line.
|
||||
Linear = 2,
|
||||
/// Same as [`FastFieldCodecType::Linear`], but encodes in blocks of 512 elements.
|
||||
BlockwiseLinear = 3,
|
||||
pub(crate) struct MergedColumnValues<'a, T> {
|
||||
pub(crate) column_indexes: &'a [Option<ColumnIndex>],
|
||||
pub(crate) column_values: &'a [Option<Arc<dyn ColumnValues<T>>>],
|
||||
pub(crate) merge_row_order: &'a MergeRowOrder,
|
||||
}
|
||||
|
||||
impl BinarySerializable for FastFieldCodecType {
|
||||
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
self.to_code().serialize(wrt)
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let code = u8::deserialize(reader)?;
|
||||
let codec_type: Self = Self::from_code(code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||
Ok(codec_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldCodecType {
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||
match code {
|
||||
1 => Some(Self::Bitpacked),
|
||||
2 => Some(Self::Linear),
|
||||
3 => Some(Self::BlockwiseLinear),
|
||||
_ => None,
|
||||
impl<'a, T: Copy + PartialOrd + Debug> Iterable<T> for MergedColumnValues<'a, T> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
match self.merge_row_order {
|
||||
MergeRowOrder::Stack(_) => {
|
||||
Box::new(self
|
||||
.column_values
|
||||
.iter()
|
||||
.flatten()
|
||||
.flat_map(|column_value| column_value.iter()))
|
||||
},
|
||||
MergeRowOrder::Shuffled(shuffle_merge_order) => {
|
||||
Box::new(shuffle_merge_order
|
||||
.iter_new_to_old_row_addrs()
|
||||
.flat_map(|row_addr| {
|
||||
let Some(column_index) = self.column_indexes[row_addr.segment_ord as usize].as_ref() else {
|
||||
return None;
|
||||
};
|
||||
let Some(column_values) = self.column_values[row_addr.segment_ord as usize].as_ref() else {
|
||||
return None;
|
||||
};
|
||||
let value_range = column_index.value_row_ids(row_addr.row_id);
|
||||
Some((value_range, column_values))
|
||||
})
|
||||
.flat_map(|(value_range, column_values)| {
|
||||
value_range
|
||||
.into_iter()
|
||||
.map(|val| column_values.get_val(val))
|
||||
})
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -98,7 +89,7 @@ pub enum U128FastFieldCodecType {
|
||||
}
|
||||
|
||||
impl BinarySerializable for U128FastFieldCodecType {
|
||||
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
self.to_code().serialize(wrt)
|
||||
}
|
||||
|
||||
@@ -136,68 +127,6 @@ pub fn open_u128_mapped<T: MonotonicallyMappableToU128 + Debug>(
|
||||
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
||||
}
|
||||
|
||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||
pub fn open_u64_mapped<T: MonotonicallyMappableToU64 + Debug>(
|
||||
mut bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||
let header = Header::deserialize(&mut bytes)?;
|
||||
match header.codec_type {
|
||||
FastFieldCodecType::Bitpacked => open_specific_codec::<BitpackedCodec, _>(bytes, &header),
|
||||
FastFieldCodecType::Linear => open_specific_codec::<LinearCodec, _>(bytes, &header),
|
||||
FastFieldCodecType::BlockwiseLinear => {
|
||||
open_specific_codec::<BlockwiseLinearCodec, _>(bytes, &header)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn open_specific_codec<C: FastFieldCodec, Item: MonotonicallyMappableToU64 + Debug>(
|
||||
bytes: OwnedBytes,
|
||||
header: &Header,
|
||||
) -> io::Result<Arc<dyn ColumnValues<Item>>> {
|
||||
let normalized_header = header.normalized();
|
||||
let reader = C::open_from_bytes(bytes, normalized_header)?;
|
||||
let min_value = header.min_value;
|
||||
if let Some(gcd) = header.gcd {
|
||||
let mapping = StrictlyMonotonicMappingInverter::from(
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd.get(), min_value),
|
||||
);
|
||||
Ok(Arc::new(monotonic_map_column(reader, mapping)))
|
||||
} else {
|
||||
let mapping = StrictlyMonotonicMappingInverter::from(
|
||||
StrictlyMonotonicMappingToInternalBaseval::new(min_value),
|
||||
);
|
||||
Ok(Arc::new(monotonic_map_column(reader, mapping)))
|
||||
}
|
||||
}
|
||||
|
||||
/// The FastFieldSerializerEstimate trait is required on all variants
|
||||
/// of fast field compressions, to decide which one to choose.
|
||||
pub(crate) trait FastFieldCodec: 'static {
|
||||
/// A codex needs to provide a unique name and id, which is
|
||||
/// used for debugging and de/serialization.
|
||||
const CODEC_TYPE: FastFieldCodecType;
|
||||
|
||||
type Reader: ColumnValues<u64> + 'static;
|
||||
|
||||
/// Reads the metadata and returns the CodecReader
|
||||
fn open_from_bytes(bytes: OwnedBytes, header: NormalizedHeader) -> io::Result<Self::Reader>;
|
||||
|
||||
/// Serializes the data using the serializer into write.
|
||||
///
|
||||
/// The column iterator should be preferred over using column `get_val` method for
|
||||
/// performance reasons.
|
||||
fn serialize(column: &dyn ColumnValues, write: &mut impl Write) -> io::Result<()>;
|
||||
|
||||
/// Returns an estimate of the compression ratio.
|
||||
/// If the codec is not applicable, returns `None`.
|
||||
///
|
||||
/// The baseline is uncompressed 64bit data.
|
||||
///
|
||||
/// It could make sense to also return a value representing
|
||||
/// computational complexity.
|
||||
fn estimate(column: &dyn ColumnValues) -> Option<f32>;
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -1,22 +1,12 @@
|
||||
use std::fmt::Debug;
|
||||
use std::io;
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use common::{BinarySerializable, VInt};
|
||||
use log::warn;
|
||||
|
||||
use super::bitpacked::BitpackedCodec;
|
||||
use super::blockwise_linear::BlockwiseLinearCodec;
|
||||
use super::linear::LinearCodec;
|
||||
use super::monotonic_mapping::{
|
||||
StrictlyMonotonicFn, StrictlyMonotonicMappingToInternal,
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
use super::{
|
||||
monotonic_map_column, ColumnValues, FastFieldCodec, FastFieldCodecType,
|
||||
MonotonicallyMappableToU64, U128FastFieldCodecType,
|
||||
};
|
||||
use crate::column_values::compact_space::CompactSpaceCompressor;
|
||||
use crate::column_values::U128FastFieldCodecType;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::MonotonicallyMappableToU128;
|
||||
|
||||
/// The normalized header gives some parameters after applying the following
|
||||
/// normalization of the vector:
|
||||
@@ -31,53 +21,6 @@ pub struct NormalizedHeader {
|
||||
pub max_value: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub(crate) struct Header {
|
||||
pub num_vals: u32,
|
||||
pub min_value: u64,
|
||||
pub max_value: u64,
|
||||
pub gcd: Option<NonZeroU64>,
|
||||
pub codec_type: FastFieldCodecType,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
pub fn normalized(self) -> NormalizedHeader {
|
||||
let gcd = self.gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
||||
let gcd_min_val_mapping =
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, self.min_value);
|
||||
|
||||
let max_value = gcd_min_val_mapping.mapping(self.max_value);
|
||||
NormalizedHeader {
|
||||
num_vals: self.num_vals,
|
||||
max_value,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn normalize_column<C: ColumnValues>(&self, from_column: C) -> impl ColumnValues {
|
||||
normalize_column(from_column, self.min_value, self.gcd)
|
||||
}
|
||||
|
||||
pub fn compute_header(
|
||||
column: impl ColumnValues<u64>,
|
||||
codecs: &[FastFieldCodecType],
|
||||
) -> Option<Header> {
|
||||
let num_vals = column.num_vals();
|
||||
let min_value = column.min_value();
|
||||
let max_value = column.max_value();
|
||||
let gcd = super::gcd::find_gcd(column.iter().map(|val| val - min_value))
|
||||
.filter(|gcd| gcd.get() > 1u64);
|
||||
let normalized_column = normalize_column(column, min_value, gcd);
|
||||
let codec_type = detect_codec(normalized_column, codecs)?;
|
||||
Some(Header {
|
||||
num_vals,
|
||||
min_value,
|
||||
max_value,
|
||||
gcd,
|
||||
codec_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub(crate) struct U128Header {
|
||||
pub num_vals: u32,
|
||||
@@ -85,7 +28,7 @@ pub(crate) struct U128Header {
|
||||
}
|
||||
|
||||
impl BinarySerializable for U128Header {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: io::Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.num_vals as u64).serialize(writer)?;
|
||||
self.codec_type.serialize(writer)?;
|
||||
Ok(())
|
||||
@@ -101,157 +44,39 @@ impl BinarySerializable for U128Header {
|
||||
}
|
||||
}
|
||||
|
||||
fn normalize_column<C: ColumnValues>(
|
||||
from_column: C,
|
||||
min_value: u64,
|
||||
gcd: Option<NonZeroU64>,
|
||||
) -> impl ColumnValues {
|
||||
let gcd = gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
||||
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, min_value);
|
||||
monotonic_map_column(from_column, mapping)
|
||||
}
|
||||
|
||||
impl BinarySerializable for Header {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.num_vals as u64).serialize(writer)?;
|
||||
VInt(self.min_value).serialize(writer)?;
|
||||
VInt(self.max_value - self.min_value).serialize(writer)?;
|
||||
if let Some(gcd) = self.gcd {
|
||||
VInt(gcd.get()).serialize(writer)?;
|
||||
} else {
|
||||
VInt(0u64).serialize(writer)?;
|
||||
}
|
||||
self.codec_type.serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let num_vals = VInt::deserialize(reader)?.0 as u32;
|
||||
let min_value = VInt::deserialize(reader)?.0;
|
||||
let amplitude = VInt::deserialize(reader)?.0;
|
||||
let max_value = min_value + amplitude;
|
||||
let gcd_u64 = VInt::deserialize(reader)?.0;
|
||||
let codec_type = FastFieldCodecType::deserialize(reader)?;
|
||||
Ok(Header {
|
||||
num_vals,
|
||||
min_value,
|
||||
max_value,
|
||||
gcd: NonZeroU64::new(gcd_u64),
|
||||
codec_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Serializes u128 values with the compact space codec.
|
||||
pub fn serialize_column_values_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
|
||||
iter_gen: F,
|
||||
num_vals: u32,
|
||||
pub fn serialize_column_values_u128<T: MonotonicallyMappableToU128>(
|
||||
iterable: &dyn Iterable<T>,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let compressor = CompactSpaceCompressor::train_from(
|
||||
iterable
|
||||
.boxed_iter()
|
||||
.map(MonotonicallyMappableToU128::to_u128),
|
||||
);
|
||||
let header = U128Header {
|
||||
num_vals,
|
||||
num_vals: compressor.num_vals(),
|
||||
codec_type: U128FastFieldCodecType::CompactSpace,
|
||||
};
|
||||
header.serialize(output)?;
|
||||
let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
|
||||
compressor.compress_into(iter_gen(), output)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serializes the column with the codec with the best estimate on the data.
|
||||
pub fn serialize_column_values<T: MonotonicallyMappableToU64 + Debug>(
|
||||
typed_column: impl ColumnValues<T>,
|
||||
codecs: &[FastFieldCodecType],
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
|
||||
let header = Header::compute_header(&column, codecs).ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!(
|
||||
"Data cannot be serialized with this list of codec. {:?}",
|
||||
codecs
|
||||
),
|
||||
)
|
||||
})?;
|
||||
header.serialize(output)?;
|
||||
let normalized_column = header.normalize_column(column);
|
||||
assert_eq!(normalized_column.min_value(), 0u64);
|
||||
serialize_given_codec(normalized_column, header.codec_type, output)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn detect_codec(
|
||||
column: impl ColumnValues<u64>,
|
||||
codecs: &[FastFieldCodecType],
|
||||
) -> Option<FastFieldCodecType> {
|
||||
let mut estimations = Vec::new();
|
||||
for &codec in codecs {
|
||||
let estimation_opt = match codec {
|
||||
FastFieldCodecType::Bitpacked => BitpackedCodec::estimate(&column),
|
||||
FastFieldCodecType::Linear => LinearCodec::estimate(&column),
|
||||
FastFieldCodecType::BlockwiseLinear => BlockwiseLinearCodec::estimate(&column),
|
||||
};
|
||||
if let Some(estimation) = estimation_opt {
|
||||
estimations.push((estimation, codec));
|
||||
}
|
||||
}
|
||||
if let Some(broken_estimation) = estimations.iter().find(|estimation| estimation.0.is_nan()) {
|
||||
warn!(
|
||||
"broken estimation for fast field codec {:?}",
|
||||
broken_estimation.1
|
||||
);
|
||||
}
|
||||
// removing nan values for codecs with broken calculations, and max values which disables
|
||||
// codecs
|
||||
estimations.retain(|estimation| !estimation.0.is_nan() && estimation.0 != f32::MAX);
|
||||
estimations.sort_by(|(score_left, _), (score_right, _)| score_left.total_cmp(score_right));
|
||||
Some(estimations.first()?.1)
|
||||
}
|
||||
|
||||
pub(crate) fn serialize_given_codec(
|
||||
column: impl ColumnValues<u64>,
|
||||
codec_type: FastFieldCodecType,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
match codec_type {
|
||||
FastFieldCodecType::Bitpacked => {
|
||||
BitpackedCodec::serialize(&column, output)?;
|
||||
}
|
||||
FastFieldCodecType::Linear => {
|
||||
LinearCodec::serialize(&column, output)?;
|
||||
}
|
||||
FastFieldCodecType::BlockwiseLinear => {
|
||||
BlockwiseLinearCodec::serialize(&column, output)?;
|
||||
}
|
||||
}
|
||||
compressor.compress_into(
|
||||
iterable
|
||||
.boxed_iter()
|
||||
.map(MonotonicallyMappableToU128::to_u128),
|
||||
output,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
|
||||
use super::*;
|
||||
use crate::column_values::{open_u64_mapped, VecColumn};
|
||||
use crate::column_values::u64_based::{
|
||||
serialize_and_load_u64_based_column_values, serialize_u64_based_column_values,
|
||||
ALL_U64_CODEC_TYPES,
|
||||
};
|
||||
use crate::column_values::CodecType;
|
||||
|
||||
const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
|
||||
FastFieldCodecType::Bitpacked,
|
||||
FastFieldCodecType::Linear,
|
||||
FastFieldCodecType::BlockwiseLinear,
|
||||
];
|
||||
|
||||
/// Helper function to serialize a column (autodetect from all codecs) and then open it
|
||||
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
||||
column: &[T],
|
||||
) -> Arc<dyn ColumnValues<T>> {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_column_values(&VecColumn::from(&column), &ALL_CODEC_TYPES, &mut buffer).unwrap();
|
||||
open_u64_mapped(OwnedBytes::new(buffer)).unwrap()
|
||||
}
|
||||
#[test]
|
||||
fn test_serialize_deserialize_u128_header() {
|
||||
let original = U128Header {
|
||||
@@ -267,15 +92,22 @@ pub mod tests {
|
||||
#[test]
|
||||
fn test_serialize_deserialize() {
|
||||
let original = [1u64, 5u64, 10u64];
|
||||
let restored: Vec<u64> = serialize_and_load(&original[..]).iter().collect();
|
||||
let restored: Vec<u64> =
|
||||
serialize_and_load_u64_based_column_values(&&original[..], &ALL_U64_CODEC_TYPES)
|
||||
.iter()
|
||||
.collect();
|
||||
assert_eq!(&restored, &original[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_bool_size_bitwidth_1() {
|
||||
let mut buffer = Vec::new();
|
||||
let col = VecColumn::from(&[false, true][..]);
|
||||
serialize_column_values(&col, &ALL_CODEC_TYPES, &mut buffer).unwrap();
|
||||
serialize_u64_based_column_values::<bool>(
|
||||
&&[false, true][..],
|
||||
&ALL_U64_CODEC_TYPES,
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
// TODO put the header as a footer so that it serves as a padding.
|
||||
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 5 + 1);
|
||||
@@ -284,19 +116,23 @@ pub mod tests {
|
||||
#[test]
|
||||
fn test_fastfield_bool_bit_size_bitwidth_0() {
|
||||
let mut buffer = Vec::new();
|
||||
let col = VecColumn::from(&[true][..]);
|
||||
serialize_column_values(&col, &ALL_CODEC_TYPES, &mut buffer).unwrap();
|
||||
// 5 bytes of header, 0 bytes of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 5);
|
||||
serialize_u64_based_column_values::<bool>(
|
||||
&&[false, true][..],
|
||||
&ALL_U64_CODEC_TYPES,
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
// 6 bytes of header, 0 bytes of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd() {
|
||||
let mut buffer = Vec::new();
|
||||
let vals: Vec<u64> = (0..80).map(|val| (val % 7) * 1_000u64).collect();
|
||||
let col = VecColumn::from(&vals[..]);
|
||||
serialize_column_values(&col, &[FastFieldCodecType::Bitpacked], &mut buffer).unwrap();
|
||||
serialize_u64_based_column_values(&&vals[..], &[CodecType::Bitpacked], &mut buffer)
|
||||
.unwrap();
|
||||
// Values are stored over 3 bits.
|
||||
assert_eq!(buffer.len(), 7 + (3 * 80 / 8));
|
||||
assert_eq!(buffer.len(), 6 + (3 * 80 / 8));
|
||||
}
|
||||
}
|
||||
|
||||
96
columnar/src/column_values/stats.rs
Normal file
96
columnar/src/column_values/stats.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use common::{BinarySerializable, VInt};
|
||||
|
||||
use crate::RowId;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct Stats {
|
||||
pub gcd: NonZeroU64,
|
||||
pub min_value: u64,
|
||||
pub max_value: u64,
|
||||
pub num_rows: RowId,
|
||||
}
|
||||
|
||||
impl Stats {
|
||||
pub fn amplitude(&self) -> u64 {
|
||||
self.max_value - self.min_value
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Stats {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.min_value).serialize(writer)?;
|
||||
VInt(self.gcd.get()).serialize(writer)?;
|
||||
VInt(self.amplitude() / self.gcd).serialize(writer)?;
|
||||
VInt(self.num_rows as u64).serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let min_value = VInt::deserialize(reader)?.0;
|
||||
let gcd = VInt::deserialize(reader)?.0;
|
||||
let gcd = NonZeroU64::new(gcd)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "GCD of 0 is forbidden"))?;
|
||||
let amplitude = VInt::deserialize(reader)?.0 * gcd.get();
|
||||
let max_value = min_value + amplitude;
|
||||
let num_rows = VInt::deserialize(reader)?.0 as RowId;
|
||||
Ok(Stats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_rows,
|
||||
gcd,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use common::BinarySerializable;
|
||||
|
||||
use crate::column_values::Stats;
|
||||
|
||||
#[track_caller]
|
||||
fn test_stats_ser_deser_aux(stats: &Stats, num_bytes: usize) {
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
stats.serialize(&mut buffer).unwrap();
|
||||
assert_eq!(buffer.len(), num_bytes);
|
||||
let deser_stats = Stats::deserialize(&mut &buffer[..]).unwrap();
|
||||
assert_eq!(stats, &deser_stats);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stats_serialization() {
|
||||
test_stats_ser_deser_aux(
|
||||
&(Stats {
|
||||
gcd: NonZeroU64::new(3).unwrap(),
|
||||
min_value: 1,
|
||||
max_value: 3001,
|
||||
num_rows: 10,
|
||||
}),
|
||||
5,
|
||||
);
|
||||
test_stats_ser_deser_aux(
|
||||
&(Stats {
|
||||
gcd: NonZeroU64::new(1_000).unwrap(),
|
||||
min_value: 1,
|
||||
max_value: 3001,
|
||||
num_rows: 10,
|
||||
}),
|
||||
5,
|
||||
);
|
||||
test_stats_ser_deser_aux(
|
||||
&(Stats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 0,
|
||||
num_rows: 0,
|
||||
}),
|
||||
4,
|
||||
);
|
||||
}
|
||||
}
|
||||
127
columnar/src/column_values/u64_based/bitpacked.rs
Normal file
127
columnar/src/column_values/u64_based/bitpacked.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use fastdivide::DividerU64;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
||||
use crate::{ColumnValues, RowId};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct BitpackedReader {
|
||||
data: OwnedBytes,
|
||||
bit_unpacker: BitUnpacker,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
impl ColumnValues for BitpackedReader {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
self.stats.min_value + self.stats.gcd.get() * self.bit_unpacker.get(doc, &self.data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
}
|
||||
#[inline]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.stats.max_value
|
||||
}
|
||||
#[inline]
|
||||
fn num_vals(&self) -> RowId {
|
||||
self.stats.num_rows
|
||||
}
|
||||
}
|
||||
|
||||
fn num_bits(stats: &Stats) -> u8 {
|
||||
compute_num_bits(stats.amplitude() / stats.gcd)
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BitpackedCodecEstimator;
|
||||
|
||||
impl ColumnCodecEstimator for BitpackedCodecEstimator {
|
||||
fn collect(&mut self, _value: u64) {}
|
||||
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
||||
let num_bits_per_value = num_bits(stats);
|
||||
Some(stats.num_bytes() + (stats.num_rows as u64 * (num_bits_per_value as u64) + 7) / 8)
|
||||
}
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
vals: &mut dyn Iterator<Item = u64>,
|
||||
wrt: &mut dyn Write,
|
||||
) -> io::Result<()> {
|
||||
stats.serialize(wrt)?;
|
||||
let num_bits = num_bits(stats);
|
||||
let mut bit_packer = BitPacker::new();
|
||||
let divider = DividerU64::divide_by(stats.gcd.get());
|
||||
for val in vals {
|
||||
bit_packer.write(divider.divide(val - stats.min_value), num_bits, wrt)?;
|
||||
}
|
||||
bit_packer.close(wrt)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BitpackedCodec;
|
||||
|
||||
impl ColumnCodec for BitpackedCodec {
|
||||
type Reader = BitpackedReader;
|
||||
type Estimator = BitpackedCodecEstimator;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn load(mut data: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let stats = Stats::deserialize(&mut data)?;
|
||||
let num_bits = num_bits(&stats);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(BitpackedReader {
|
||||
data,
|
||||
bit_unpacker,
|
||||
stats,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::tests::create_and_validate;
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets_simple() {
|
||||
create_and_validate::<BitpackedCodec>(&[4, 3, 12], "name");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets_simple_gcd() {
|
||||
create_and_validate::<BitpackedCodec>(&[1000, 2000, 3000], "name");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = crate::column_values::u64_based::tests::get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate::<BitpackedCodec>(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate::<BitpackedCodec>(&data, name);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bitpacked_fast_field_rand() {
|
||||
for _ in 0..500 {
|
||||
let mut data = (0..1 + rand::random::<u8>() as usize)
|
||||
.map(|_| rand::random::<i64>() as u64 / 2)
|
||||
.collect::<Vec<_>>();
|
||||
create_and_validate::<BitpackedCodec>(&data, "rand");
|
||||
data.reverse();
|
||||
create_and_validate::<BitpackedCodec>(&data, "rand");
|
||||
}
|
||||
}
|
||||
}
|
||||
281
columnar/src/column_values/u64_based/blockwise_linear.rs
Normal file
281
columnar/src/column_values/u64_based/blockwise_linear.rs
Normal file
@@ -0,0 +1,281 @@
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
use std::{io, iter};
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, DeserializeFrom, OwnedBytes};
|
||||
use fastdivide::DividerU64;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::u64_based::line::Line;
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
||||
use crate::column_values::{ColumnValues, VecColumn};
|
||||
use crate::MonotonicallyMappableToU64;
|
||||
|
||||
const BLOCK_SIZE: u32 = 512u32;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct Block {
|
||||
line: Line,
|
||||
bit_unpacker: BitUnpacker,
|
||||
data_start_offset: usize,
|
||||
}
|
||||
|
||||
impl BinarySerializable for Block {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.line.serialize(writer)?;
|
||||
self.bit_unpacker.bit_width().serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let line = Line::deserialize(reader)?;
|
||||
let bit_width = u8::deserialize(reader)?;
|
||||
Ok(Block {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
data_start_offset: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_num_blocks(num_vals: u32) -> u32 {
|
||||
(num_vals + BLOCK_SIZE - 1) / BLOCK_SIZE
|
||||
}
|
||||
|
||||
pub struct BlockwiseLinearEstimator {
|
||||
block: Vec<u64>,
|
||||
values_num_bytes: u64,
|
||||
meta_num_bytes: u64,
|
||||
}
|
||||
|
||||
impl Default for BlockwiseLinearEstimator {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
block: Vec::with_capacity(BLOCK_SIZE as usize),
|
||||
values_num_bytes: 0u64,
|
||||
meta_num_bytes: 0u64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockwiseLinearEstimator {
|
||||
fn flush_block_estimate(&mut self) {
|
||||
if self.block.is_empty() {
|
||||
return;
|
||||
}
|
||||
let line = Line::train(&VecColumn::from(&self.block));
|
||||
let mut max_value = 0u64;
|
||||
for (i, buffer_val) in self.block.iter().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
let val = buffer_val.wrapping_sub(interpolated_val);
|
||||
max_value = val.max(max_value);
|
||||
}
|
||||
let bit_width = compute_num_bits(max_value) as usize;
|
||||
self.values_num_bytes += (bit_width * self.block.len() + 7) as u64 / 8;
|
||||
self.meta_num_bytes += 1 + line.num_bytes();
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
||||
fn collect(&mut self, value: u64) {
|
||||
self.block.push(value);
|
||||
if self.block.len() == BLOCK_SIZE as usize {
|
||||
self.flush_block_estimate();
|
||||
self.block.clear();
|
||||
}
|
||||
}
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
||||
let mut estimate = 4 + stats.num_bytes() + self.meta_num_bytes + self.values_num_bytes;
|
||||
if stats.gcd.get() > 1 {
|
||||
let estimate_gain_from_gcd =
|
||||
(stats.gcd.get() as f32).log2().floor() * stats.num_rows as f32 / 8.0f32;
|
||||
estimate = estimate.saturating_sub(estimate_gain_from_gcd as u64);
|
||||
}
|
||||
Some(estimate)
|
||||
}
|
||||
|
||||
fn finalize(&mut self) {
|
||||
self.flush_block_estimate();
|
||||
}
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
mut vals: &mut dyn Iterator<Item = u64>,
|
||||
wrt: &mut dyn Write,
|
||||
) -> io::Result<()> {
|
||||
stats.serialize(wrt)?;
|
||||
let mut buffer = Vec::with_capacity(BLOCK_SIZE as usize);
|
||||
let num_blocks = compute_num_blocks(stats.num_rows) as usize;
|
||||
let mut blocks = Vec::with_capacity(num_blocks);
|
||||
|
||||
let mut bit_packer = BitPacker::new();
|
||||
|
||||
let gcd_divider = DividerU64::divide_by(stats.gcd.get());
|
||||
|
||||
for _ in 0..num_blocks {
|
||||
buffer.clear();
|
||||
buffer.extend(
|
||||
(&mut vals)
|
||||
.map(MonotonicallyMappableToU64::to_u64)
|
||||
.take(BLOCK_SIZE as usize),
|
||||
);
|
||||
|
||||
for buffer_val in buffer.iter_mut() {
|
||||
*buffer_val = gcd_divider.divide(*buffer_val - stats.min_value);
|
||||
}
|
||||
|
||||
let line = Line::train(&VecColumn::from(&buffer));
|
||||
|
||||
assert!(!buffer.is_empty());
|
||||
|
||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
||||
}
|
||||
|
||||
let bit_width = buffer.iter().copied().map(compute_num_bits).max().unwrap();
|
||||
|
||||
for &buffer_val in &buffer {
|
||||
bit_packer.write(buffer_val, bit_width, wrt)?;
|
||||
}
|
||||
|
||||
blocks.push(Block {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
data_start_offset: 0,
|
||||
});
|
||||
}
|
||||
|
||||
bit_packer.close(wrt)?;
|
||||
|
||||
assert_eq!(blocks.len(), num_blocks);
|
||||
|
||||
let mut counting_wrt = CountingWriter::wrap(wrt);
|
||||
for block in &blocks {
|
||||
block.serialize(&mut counting_wrt)?;
|
||||
}
|
||||
let footer_len = counting_wrt.written_bytes();
|
||||
(footer_len as u32).serialize(&mut counting_wrt)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BlockwiseLinearCodec;
|
||||
|
||||
impl ColumnCodec<u64> for BlockwiseLinearCodec {
|
||||
type Reader = BlockwiseLinearReader;
|
||||
|
||||
type Estimator = BlockwiseLinearEstimator;
|
||||
|
||||
fn load(mut bytes: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let stats = Stats::deserialize(&mut bytes)?;
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
let num_blocks = compute_num_blocks(stats.num_rows);
|
||||
let mut blocks: Vec<Block> = iter::repeat_with(|| Block::deserialize(&mut footer))
|
||||
.take(num_blocks as usize)
|
||||
.collect::<io::Result<_>>()?;
|
||||
let mut start_offset = 0;
|
||||
for block in &mut blocks {
|
||||
block.data_start_offset = start_offset;
|
||||
start_offset += (block.bit_unpacker.bit_width() as usize) * BLOCK_SIZE as usize / 8;
|
||||
}
|
||||
Ok(BlockwiseLinearReader {
|
||||
blocks: blocks.into_boxed_slice().into(),
|
||||
data,
|
||||
stats,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BlockwiseLinearReader {
|
||||
blocks: Arc<[Block]>,
|
||||
data: OwnedBytes,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
impl ColumnValues for BlockwiseLinearReader {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> u64 {
|
||||
let block_id = (idx / BLOCK_SIZE as u32) as usize;
|
||||
let idx_within_block = idx % (BLOCK_SIZE as u32);
|
||||
let block = &self.blocks[block_id];
|
||||
let interpoled_val: u64 = block.line.eval(idx_within_block);
|
||||
let block_bytes = &self.data[block.data_start_offset..];
|
||||
let bitpacked_diff = block.bit_unpacker.get(idx_within_block, block_bytes);
|
||||
// TODO optimize me! the line parameters could be tweaked to include the multiplication and
|
||||
// remove the dependency.
|
||||
self.stats.min_value
|
||||
+ self
|
||||
.stats
|
||||
.gcd
|
||||
.get()
|
||||
.wrapping_mul(interpoled_val.wrapping_add(bitpacked_diff))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.stats.max_value
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.stats.num_rows
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::tests::create_and_validate;
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets_simple() {
|
||||
create_and_validate::<BlockwiseLinearCodec>(
|
||||
&[11, 20, 40, 20, 10, 10, 10, 10, 10, 10],
|
||||
"simple test",
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets_simple_gcd() {
|
||||
let (_, actual_compression_rate) = create_and_validate::<BlockwiseLinearCodec>(
|
||||
&[10, 20, 40, 20, 10, 10, 10, 10, 10, 10],
|
||||
"name",
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(actual_compression_rate, 0.175);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = crate::column_values::u64_based::tests::get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, name);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockwise_linear_fast_field_rand() {
|
||||
for _ in 0..500 {
|
||||
let mut data = (0..1 + rand::random::<u8>() as usize)
|
||||
.map(|_| rand::random::<i64>() as u64 / 2)
|
||||
.collect::<Vec<_>>();
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "rand");
|
||||
data.reverse();
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "rand");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,8 +17,8 @@ const MID_POINT: u64 = (1u64 << 32) - 1u64;
|
||||
/// `y = m * x >> 32 + b`
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct Line {
|
||||
slope: u64,
|
||||
intercept: u64,
|
||||
pub(crate) slope: u64,
|
||||
pub(crate) intercept: u64,
|
||||
}
|
||||
|
||||
/// Compute the line slope.
|
||||
@@ -67,21 +67,8 @@ impl Line {
|
||||
self.intercept.wrapping_add(linear_part)
|
||||
}
|
||||
|
||||
// Same as train, but the intercept is only estimated from provided sample positions
|
||||
pub fn estimate(sample_positions_and_values: &[(u64, u64)]) -> Self {
|
||||
let first_val = sample_positions_and_values[0].1;
|
||||
let last_val = sample_positions_and_values[sample_positions_and_values.len() - 1].1;
|
||||
let num_vals = sample_positions_and_values[sample_positions_and_values.len() - 1].0 + 1;
|
||||
Self::train_from(
|
||||
first_val,
|
||||
last_val,
|
||||
num_vals as u32,
|
||||
sample_positions_and_values.iter().cloned(),
|
||||
)
|
||||
}
|
||||
|
||||
// Intercept is only computed from provided positions
|
||||
fn train_from(
|
||||
pub fn train_from(
|
||||
first_val: u64,
|
||||
last_val: u64,
|
||||
num_vals: u32,
|
||||
@@ -145,6 +132,7 @@ impl Line {
|
||||
///
|
||||
/// This function is only invariable by translation if all of the
|
||||
/// `ys` are packaged into half of the space. (See heuristic below)
|
||||
/// TODO USE array
|
||||
pub fn train(ys: &dyn ColumnValues) -> Self {
|
||||
let first_val = ys.iter().next().unwrap();
|
||||
let last_val = ys.iter().nth(ys.num_vals() as usize - 1).unwrap();
|
||||
@@ -158,7 +146,7 @@ impl Line {
|
||||
}
|
||||
|
||||
impl BinarySerializable for Line {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: io::Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.slope).serialize(writer)?;
|
||||
VInt(self.intercept).serialize(writer)?;
|
||||
Ok(())
|
||||
277
columnar/src/column_values/u64_based/linear.rs
Normal file
277
columnar/src/column_values/u64_based/linear.rs
Normal file
@@ -0,0 +1,277 @@
|
||||
use std::io;
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use super::line::Line;
|
||||
use super::ColumnValues;
|
||||
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
|
||||
use crate::column_values::VecColumn;
|
||||
use crate::RowId;
|
||||
|
||||
const HALF_SPACE: u64 = u64::MAX / 2;
|
||||
const LINE_ESTIMATION_BLOCK_LEN: usize = 512;
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct LinearReader {
|
||||
data: OwnedBytes,
|
||||
linear_params: LinearParams,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
impl ColumnValues for LinearReader {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
let interpoled_val: u64 = self.linear_params.line.eval(doc);
|
||||
let bitpacked_diff = self.linear_params.bit_unpacker.get(doc, &self.data);
|
||||
interpoled_val.wrapping_add(bitpacked_diff)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.stats.max_value
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.stats.num_rows
|
||||
}
|
||||
}
|
||||
|
||||
/// Fastfield serializer, which tries to guess values by linear interpolation
|
||||
/// and stores the difference bitpacked.
|
||||
pub struct LinearCodec;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct LinearParams {
|
||||
line: Line,
|
||||
bit_unpacker: BitUnpacker,
|
||||
}
|
||||
|
||||
impl BinarySerializable for LinearParams {
|
||||
fn serialize<W: io::Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.line.serialize(writer)?;
|
||||
self.bit_unpacker.bit_width().serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let line = Line::deserialize(reader)?;
|
||||
let bit_width = u8::deserialize(reader)?;
|
||||
Ok(Self {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LinearCodecEstimator {
|
||||
block: Vec<u64>,
|
||||
line: Option<Line>,
|
||||
row_id: RowId,
|
||||
min_deviation: u64,
|
||||
max_deviation: u64,
|
||||
first_val: u64,
|
||||
last_val: u64,
|
||||
}
|
||||
|
||||
impl Default for LinearCodecEstimator {
|
||||
fn default() -> LinearCodecEstimator {
|
||||
LinearCodecEstimator {
|
||||
block: Vec::with_capacity(LINE_ESTIMATION_BLOCK_LEN),
|
||||
line: None,
|
||||
row_id: 0,
|
||||
min_deviation: u64::MAX,
|
||||
max_deviation: u64::MIN,
|
||||
first_val: 0u64,
|
||||
last_val: 0u64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnCodecEstimator for LinearCodecEstimator {
|
||||
fn finalize(&mut self) {
|
||||
if let Some(line) = self.line.as_mut() {
|
||||
line.intercept = line
|
||||
.intercept
|
||||
.wrapping_add(self.min_deviation)
|
||||
.wrapping_sub(HALF_SPACE);
|
||||
}
|
||||
}
|
||||
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64> {
|
||||
let line = self.line?;
|
||||
let amplitude = self.max_deviation - self.min_deviation;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let linear_params = LinearParams {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
};
|
||||
Some(
|
||||
stats.num_bytes()
|
||||
+ linear_params.num_bytes()
|
||||
+ (num_bits as u64 * stats.num_rows as u64 + 7) / 8,
|
||||
)
|
||||
}
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
vals: &mut dyn Iterator<Item = u64>,
|
||||
wrt: &mut dyn io::Write,
|
||||
) -> io::Result<()> {
|
||||
stats.serialize(wrt)?;
|
||||
let line = self.line.unwrap();
|
||||
let amplitude = self.max_deviation - self.min_deviation;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let linear_params = LinearParams {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
};
|
||||
linear_params.serialize(wrt)?;
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for (pos, value) in vals.enumerate() {
|
||||
let calculated_value = line.eval(pos as u32);
|
||||
let offset = value.wrapping_sub(calculated_value);
|
||||
bit_packer.write(offset, num_bits, wrt)?;
|
||||
}
|
||||
bit_packer.close(wrt)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, value: u64) {
|
||||
if let Some(line) = self.line {
|
||||
self.collect_after_line_estimation(&line, value);
|
||||
} else {
|
||||
self.collect_before_line_estimation(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LinearCodecEstimator {
|
||||
#[inline]
|
||||
fn collect_after_line_estimation(&mut self, line: &Line, value: u64) {
|
||||
let interpoled_val: u64 = line.eval(self.row_id);
|
||||
let deviation = value.wrapping_add(HALF_SPACE).wrapping_sub(interpoled_val);
|
||||
self.min_deviation = self.min_deviation.min(deviation);
|
||||
self.max_deviation = self.max_deviation.max(deviation);
|
||||
if self.row_id == 0 {
|
||||
self.first_val = value;
|
||||
}
|
||||
self.last_val = value;
|
||||
self.row_id += 1u32;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect_before_line_estimation(&mut self, value: u64) {
|
||||
self.block.push(value);
|
||||
if self.block.len() == LINE_ESTIMATION_BLOCK_LEN {
|
||||
let line = Line::train(&VecColumn::from(&self.block));
|
||||
let block = std::mem::take(&mut self.block);
|
||||
for val in block {
|
||||
self.collect_after_line_estimation(&line, val);
|
||||
}
|
||||
self.line = Some(line);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnCodec for LinearCodec {
|
||||
type Reader = LinearReader;
|
||||
|
||||
type Estimator = LinearCodecEstimator;
|
||||
|
||||
fn load(mut data: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let stats = Stats::deserialize(&mut data)?;
|
||||
let linear_params = LinearParams::deserialize(&mut data)?;
|
||||
Ok(LinearReader {
|
||||
stats,
|
||||
linear_params,
|
||||
data,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rand::RngCore;
|
||||
|
||||
use super::*;
|
||||
use crate::column_values::u64_based::tests::{create_and_validate, get_codec_test_datasets};
|
||||
|
||||
#[test]
|
||||
fn test_compression_simple() {
|
||||
let vals = (100u64..)
|
||||
.take(super::LINE_ESTIMATION_BLOCK_LEN)
|
||||
.collect::<Vec<_>>();
|
||||
create_and_validate::<LinearCodec>(&vals, "simple monotonically large").unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression() {
|
||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate::<LinearCodec>(&data, "simple monotonically large").unwrap();
|
||||
assert_le!(actual_compression, 0.001);
|
||||
assert_le!(estimate, 0.02);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_datasets() {
|
||||
let data_sets = get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate::<LinearCodec>(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate::<LinearCodec>(&data, name);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_test_large_amplitude() {
|
||||
let data = vec![
|
||||
i64::MAX as u64 / 2,
|
||||
i64::MAX as u64 / 3,
|
||||
i64::MAX as u64 / 2,
|
||||
];
|
||||
create_and_validate::<LinearCodec>(&data, "large amplitude");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn overflow_error_test() {
|
||||
let data = vec![1572656989877777, 1170935903116329, 720575940379279, 0];
|
||||
create_and_validate::<LinearCodec>(&data, "overflow test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_concave_data() {
|
||||
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
|
||||
create_and_validate::<LinearCodec>(&data, "concave data");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_convex_data() {
|
||||
let data = vec![0, 40, 60, 70, 75, 77];
|
||||
create_and_validate::<LinearCodec>(&data, "convex data");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_test_simple() {
|
||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
||||
create_and_validate::<LinearCodec>(&data, "simple monotonically");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_rand() {
|
||||
let mut rng = rand::thread_rng();
|
||||
for _ in 0..50 {
|
||||
let mut data = (0..10_000).map(|_| rng.next_u64()).collect::<Vec<_>>();
|
||||
create_and_validate::<LinearCodec>(&data, "random");
|
||||
data.reverse();
|
||||
create_and_validate::<LinearCodec>(&data, "random");
|
||||
}
|
||||
}
|
||||
}
|
||||
182
columnar/src/column_values/u64_based/mod.rs
Normal file
182
columnar/src/column_values/u64_based/mod.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
mod bitpacked;
|
||||
mod blockwise_linear;
|
||||
mod line;
|
||||
mod linear;
|
||||
mod stats_collector;
|
||||
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
|
||||
use crate::column_values::monotonic_mapping::{
|
||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||
};
|
||||
use crate::column_values::u64_based::bitpacked::BitpackedCodec;
|
||||
use crate::column_values::u64_based::blockwise_linear::BlockwiseLinearCodec;
|
||||
use crate::column_values::u64_based::linear::LinearCodec;
|
||||
use crate::column_values::u64_based::stats_collector::StatsCollector;
|
||||
use crate::column_values::{monotonic_map_column, Stats};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{ColumnValues, MonotonicallyMappableToU64};
|
||||
|
||||
pub trait ColumnCodecEstimator<T = u64>: 'static {
|
||||
fn collect(&mut self, value: u64);
|
||||
fn estimate(&self, stats: &Stats) -> Option<u64>;
|
||||
fn finalize(&mut self) {}
|
||||
fn serialize(
|
||||
&self,
|
||||
stats: &Stats,
|
||||
vals: &mut dyn Iterator<Item = T>,
|
||||
wrt: &mut dyn io::Write,
|
||||
) -> io::Result<()>;
|
||||
}
|
||||
|
||||
pub trait ColumnCodec<T: PartialOrd = u64> {
|
||||
type Reader: ColumnValues<T> + 'static;
|
||||
type Estimator: ColumnCodecEstimator + Default;
|
||||
|
||||
fn load(bytes: OwnedBytes) -> io::Result<Self::Reader>;
|
||||
|
||||
fn estimator() -> Self::Estimator {
|
||||
Self::Estimator::default()
|
||||
}
|
||||
fn boxed_estimator() -> Box<dyn ColumnCodecEstimator> {
|
||||
Box::new(Self::estimator())
|
||||
}
|
||||
}
|
||||
|
||||
/// Available codecs to use to encode the u64 (via [`MonotonicallyMappableToU64`]) converted data.
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||
#[repr(u8)]
|
||||
pub enum CodecType {
|
||||
/// Bitpack all values in the value range. The number of bits is defined by the amplitude
|
||||
/// `column.max_value() - column.min_value()`
|
||||
Bitpacked = 0u8,
|
||||
/// Linear interpolation puts a line between the first and last value and then bitpacks the
|
||||
/// values by the offset from the line. The number of bits is defined by the max deviation from
|
||||
/// the line.
|
||||
Linear = 1u8,
|
||||
/// Same as [`CodecType::Linear`], but encodes in blocks of 512 elements.
|
||||
BlockwiseLinear = 2u8,
|
||||
}
|
||||
|
||||
pub const ALL_U64_CODEC_TYPES: [CodecType; 3] = [
|
||||
CodecType::Bitpacked,
|
||||
CodecType::Linear,
|
||||
CodecType::BlockwiseLinear,
|
||||
];
|
||||
|
||||
impl CodecType {
|
||||
fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
fn try_from_code(code: u8) -> Option<CodecType> {
|
||||
match code {
|
||||
0u8 => Some(CodecType::Bitpacked),
|
||||
1u8 => Some(CodecType::Linear),
|
||||
2u8 => Some(CodecType::BlockwiseLinear),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn load<T: MonotonicallyMappableToU64>(
|
||||
&self,
|
||||
bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||
match self {
|
||||
CodecType::Bitpacked => load_specific_codec::<BitpackedCodec, T>(bytes),
|
||||
CodecType::Linear => load_specific_codec::<LinearCodec, T>(bytes),
|
||||
CodecType::BlockwiseLinear => load_specific_codec::<BlockwiseLinearCodec, T>(bytes),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_specific_codec<C: ColumnCodec, T: MonotonicallyMappableToU64>(
|
||||
bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||
let reader = C::load(bytes)?;
|
||||
let reader_typed = monotonic_map_column(
|
||||
reader,
|
||||
StrictlyMonotonicMappingInverter::from(StrictlyMonotonicMappingToInternal::<T>::new()),
|
||||
);
|
||||
Ok(Arc::new(reader_typed))
|
||||
}
|
||||
|
||||
impl CodecType {
|
||||
pub fn estimator(&self) -> Box<dyn ColumnCodecEstimator> {
|
||||
match self {
|
||||
CodecType::Bitpacked => BitpackedCodec::boxed_estimator(),
|
||||
CodecType::Linear => LinearCodec::boxed_estimator(),
|
||||
CodecType::BlockwiseLinear => BlockwiseLinearCodec::boxed_estimator(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize_u64_based_column_values<'a, T: MonotonicallyMappableToU64>(
|
||||
vals: &dyn Iterable<T>,
|
||||
codec_types: &[CodecType],
|
||||
wrt: &mut dyn Write,
|
||||
) -> io::Result<()> {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
let mut estimators: Vec<(CodecType, Box<dyn ColumnCodecEstimator>)> =
|
||||
Vec::with_capacity(codec_types.len());
|
||||
for &codec_type in codec_types {
|
||||
estimators.push((codec_type, codec_type.estimator()));
|
||||
}
|
||||
for val in vals.boxed_iter() {
|
||||
let val_u64 = val.to_u64();
|
||||
stats_collector.collect(val_u64);
|
||||
for (_, estimator) in &mut estimators {
|
||||
estimator.collect(val_u64);
|
||||
}
|
||||
}
|
||||
for (_, estimator) in &mut estimators {
|
||||
estimator.finalize();
|
||||
}
|
||||
let stats = stats_collector.stats();
|
||||
let (_, best_codec, best_codec_estimator) = estimators
|
||||
.into_iter()
|
||||
.flat_map(|(codec_type, estimator)| {
|
||||
let num_bytes = estimator.estimate(&stats)?;
|
||||
Some((num_bytes, codec_type, estimator))
|
||||
})
|
||||
.min_by_key(|(num_bytes, _, _)| *num_bytes)
|
||||
.ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::InvalidData, "No available applicable codec.")
|
||||
})?;
|
||||
best_codec.to_code().serialize(wrt)?;
|
||||
best_codec_estimator.serialize(
|
||||
&stats,
|
||||
&mut vals.boxed_iter().map(MonotonicallyMappableToU64::to_u64),
|
||||
wrt,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_u64_based_column_values<T: MonotonicallyMappableToU64>(
|
||||
mut bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn ColumnValues<T>>> {
|
||||
let codec_type: CodecType = bytes
|
||||
.get(0)
|
||||
.copied()
|
||||
.and_then(CodecType::try_from_code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Failed to read codec type"))?;
|
||||
bytes.advance(1);
|
||||
codec_type.load(bytes)
|
||||
}
|
||||
|
||||
/// Helper function to serialize a column (autodetect from all codecs) and then open it
|
||||
pub fn serialize_and_load_u64_based_column_values<T: MonotonicallyMappableToU64>(
|
||||
vals: &dyn Iterable,
|
||||
codec_types: &[CodecType],
|
||||
) -> Arc<dyn ColumnValues<T>> {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_u64_based_column_values(vals, codec_types, &mut buffer).unwrap();
|
||||
load_u64_based_column_values::<T>(OwnedBytes::new(buffer)).unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
200
columnar/src/column_values/u64_based/stats_collector.rs
Normal file
200
columnar/src/column_values/u64_based/stats_collector.rs
Normal file
@@ -0,0 +1,200 @@
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use fastdivide::DividerU64;
|
||||
|
||||
use crate::column_values::Stats;
|
||||
use crate::RowId;
|
||||
|
||||
/// Compute the gcd of two non null numbers.
|
||||
///
|
||||
/// It is recommended, but not required, to feed values such that `large >= small`.
|
||||
fn compute_gcd(mut large: NonZeroU64, mut small: NonZeroU64) -> NonZeroU64 {
|
||||
loop {
|
||||
let rem: u64 = large.get() % small;
|
||||
if let Some(new_small) = NonZeroU64::new(rem) {
|
||||
(large, small) = (small, new_small);
|
||||
} else {
|
||||
return small;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct StatsCollector {
|
||||
min_max_opt: Option<(u64, u64)>,
|
||||
num_rows: RowId,
|
||||
// We measure the GCD of the difference between the values and the minimal value.
|
||||
// This is the same as computing the difference between the values and the first value.
|
||||
//
|
||||
// This way, we can compress i64-converted-to-u64 (e.g. timestamp that were supplied in
|
||||
// seconds, only to be converted in microseconds).
|
||||
increment_gcd_opt: Option<(NonZeroU64, DividerU64)>,
|
||||
first_value_opt: Option<u64>,
|
||||
}
|
||||
|
||||
impl StatsCollector {
|
||||
pub fn stats(&self) -> Stats {
|
||||
let (min_value, max_value) = self.min_max_opt.unwrap_or((0u64, 0u64));
|
||||
let increment_gcd = if let Some((increment_gcd, _)) = self.increment_gcd_opt {
|
||||
increment_gcd
|
||||
} else {
|
||||
NonZeroU64::new(1u64).unwrap()
|
||||
};
|
||||
Stats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_rows: self.num_rows,
|
||||
gcd: increment_gcd,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn update_increment_gcd(&mut self, value: u64) {
|
||||
let Some(first_value) = self.first_value_opt else {
|
||||
// We set the first value and just quit.
|
||||
self.first_value_opt = Some(value);
|
||||
return;
|
||||
};
|
||||
let Some(non_zero_value) = NonZeroU64::new(value.abs_diff(first_value)) else {
|
||||
// We can simply skip 0 values.
|
||||
return;
|
||||
};
|
||||
let Some((gcd, gcd_divider)) = self.increment_gcd_opt else {
|
||||
self.set_increment_gcd(non_zero_value);
|
||||
return;
|
||||
};
|
||||
if gcd.get() == 1 {
|
||||
// It won't see any update now.
|
||||
return;
|
||||
}
|
||||
let remainder =
|
||||
non_zero_value.get() - (gcd_divider.divide(non_zero_value.get())) * gcd.get();
|
||||
if remainder == 0 {
|
||||
return;
|
||||
}
|
||||
let new_gcd = compute_gcd(non_zero_value, gcd);
|
||||
self.set_increment_gcd(new_gcd);
|
||||
}
|
||||
|
||||
fn set_increment_gcd(&mut self, gcd: NonZeroU64) {
|
||||
let new_divider = DividerU64::divide_by(gcd.get());
|
||||
self.increment_gcd_opt = Some((gcd, new_divider));
|
||||
}
|
||||
|
||||
pub fn collect(&mut self, value: u64) {
|
||||
self.min_max_opt = Some(if let Some((min, max)) = self.min_max_opt {
|
||||
(min.min(value), max.max(value))
|
||||
} else {
|
||||
(value, value)
|
||||
});
|
||||
self.num_rows += 1;
|
||||
self.update_increment_gcd(value);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use crate::column_values::u64_based::stats_collector::{compute_gcd, StatsCollector};
|
||||
use crate::column_values::u64_based::Stats;
|
||||
|
||||
fn compute_stats(vals: impl Iterator<Item = u64>) -> Stats {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
for val in vals {
|
||||
stats_collector.collect(val);
|
||||
}
|
||||
stats_collector.stats()
|
||||
}
|
||||
|
||||
fn find_gcd(vals: impl Iterator<Item = u64>) -> u64 {
|
||||
compute_stats(vals).gcd.get()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_gcd() {
|
||||
let test_compute_gcd_aux = |large, small, expected| {
|
||||
let large = NonZeroU64::new(large).unwrap();
|
||||
let small = NonZeroU64::new(small).unwrap();
|
||||
let expected = NonZeroU64::new(expected).unwrap();
|
||||
assert_eq!(compute_gcd(small, large), expected);
|
||||
assert_eq!(compute_gcd(large, small), expected);
|
||||
};
|
||||
test_compute_gcd_aux(1, 4, 1);
|
||||
test_compute_gcd_aux(2, 4, 2);
|
||||
test_compute_gcd_aux(10, 25, 5);
|
||||
test_compute_gcd_aux(25, 25, 25);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gcd() {
|
||||
assert_eq!(find_gcd([0].into_iter()), 1);
|
||||
assert_eq!(find_gcd([0, 10].into_iter()), 10);
|
||||
assert_eq!(find_gcd([10, 0].into_iter()), 10);
|
||||
assert_eq!(find_gcd([].into_iter()), 1);
|
||||
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), 5);
|
||||
assert_eq!(find_gcd([15, 16, 10].into_iter()), 1);
|
||||
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), 5);
|
||||
assert_eq!(find_gcd([0, 0].into_iter()), 1);
|
||||
assert_eq!(find_gcd([1, 10, 4, 1, 7, 10].into_iter()), 3);
|
||||
assert_eq!(find_gcd([1, 10, 0, 4, 1, 7, 10].into_iter()), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stats() {
|
||||
assert_eq!(
|
||||
compute_stats([].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 0,
|
||||
num_rows: 0
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([0, 1].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 1,
|
||||
num_rows: 2
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([0, 1].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 1,
|
||||
num_rows: 2
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([10, 20, 30].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(10).unwrap(),
|
||||
min_value: 10,
|
||||
max_value: 30,
|
||||
num_rows: 3
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([10, 50, 10, 30].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(20).unwrap(),
|
||||
min_value: 10,
|
||||
max_value: 50,
|
||||
num_rows: 4
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
compute_stats([10, 0, 30].into_iter()),
|
||||
Stats {
|
||||
gcd: NonZeroU64::new(10).unwrap(),
|
||||
min_value: 0,
|
||||
max_value: 30,
|
||||
num_rows: 3
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -2,53 +2,88 @@ use proptest::prelude::*;
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use super::bitpacked::BitpackedCodec;
|
||||
use super::blockwise_linear::BlockwiseLinearCodec;
|
||||
use super::linear::LinearCodec;
|
||||
use super::serialize::Header;
|
||||
|
||||
pub(crate) fn create_and_validate<Codec: FastFieldCodec>(
|
||||
data: &[u64],
|
||||
#[test]
|
||||
fn test_serialize_and_load_simple() {
|
||||
let mut buffer = Vec::new();
|
||||
let vals = &[1u64, 2u64, 5u64];
|
||||
serialize_u64_based_column_values(
|
||||
&&vals[..],
|
||||
&[CodecType::Bitpacked, CodecType::BlockwiseLinear],
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(buffer.len(), 7);
|
||||
let col = load_u64_based_column_values::<u64>(OwnedBytes::new(buffer)).unwrap();
|
||||
assert_eq!(col.num_vals(), 3);
|
||||
assert_eq!(col.get_val(0), 1);
|
||||
assert_eq!(col.get_val(1), 2);
|
||||
assert_eq!(col.get_val(2), 5);
|
||||
}
|
||||
pub(crate) fn create_and_validate<TColumnCodec: ColumnCodec>(
|
||||
vals: &[u64],
|
||||
name: &str,
|
||||
) -> Option<(f32, f32)> {
|
||||
let col = &VecColumn::from(data);
|
||||
let header = Header::compute_header(col, &[Codec::CODEC_TYPE])?;
|
||||
let normalized_col = header.normalize_column(col);
|
||||
let estimation = Codec::estimate(&normalized_col)?;
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
let mut codec_estimator: TColumnCodec::Estimator = Default::default();
|
||||
|
||||
let mut out = Vec::new();
|
||||
let col = VecColumn::from(data);
|
||||
serialize_column_values(&col, &[Codec::CODEC_TYPE], &mut out).unwrap();
|
||||
for val in vals.boxed_iter() {
|
||||
stats_collector.collect(val);
|
||||
codec_estimator.collect(val);
|
||||
}
|
||||
codec_estimator.finalize();
|
||||
let stats = stats_collector.stats();
|
||||
let estimation = codec_estimator.estimate(&stats)?;
|
||||
|
||||
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
|
||||
let mut buffer = Vec::new();
|
||||
codec_estimator
|
||||
.serialize(&stats, vals.boxed_iter().as_mut(), &mut buffer)
|
||||
.unwrap();
|
||||
|
||||
let reader = super::open_u64_mapped::<u64>(OwnedBytes::new(out)).unwrap();
|
||||
assert_eq!(reader.num_vals(), data.len() as u32);
|
||||
for (doc, orig_val) in data.iter().copied().enumerate() {
|
||||
let actual_compression = buffer.len() as u64;
|
||||
|
||||
let reader = TColumnCodec::load(OwnedBytes::new(buffer)).unwrap();
|
||||
assert_eq!(reader.num_vals(), vals.len() as u32);
|
||||
for (doc, orig_val) in vals.iter().copied().enumerate() {
|
||||
let val = reader.get_val(doc as u32);
|
||||
assert_eq!(
|
||||
val, orig_val,
|
||||
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data `{data:?}`",
|
||||
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data `{vals:?}`",
|
||||
);
|
||||
}
|
||||
|
||||
if !data.is_empty() {
|
||||
let test_rand_idx = rand::thread_rng().gen_range(0..=data.len() - 1);
|
||||
let expected_positions: Vec<u32> = data
|
||||
if !vals.is_empty() {
|
||||
let test_rand_idx = rand::thread_rng().gen_range(0..=vals.len() - 1);
|
||||
let expected_positions: Vec<u32> = vals
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, el)| **el == data[test_rand_idx])
|
||||
.filter(|(_, el)| **el == vals[test_rand_idx])
|
||||
.map(|(pos, _)| pos as u32)
|
||||
.collect();
|
||||
let mut positions = Vec::new();
|
||||
reader.get_docids_for_value_range(
|
||||
data[test_rand_idx]..=data[test_rand_idx],
|
||||
0..data.len() as u32,
|
||||
vals[test_rand_idx]..=vals[test_rand_idx],
|
||||
0..vals.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
assert_eq!(expected_positions, positions);
|
||||
}
|
||||
Some((estimation, actual_compression))
|
||||
if actual_compression > 1000 {
|
||||
assert!(relative_difference(estimation, actual_compression) < 0.10f32);
|
||||
}
|
||||
Some((
|
||||
compression_rate(estimation, stats.num_rows),
|
||||
compression_rate(actual_compression, stats.num_rows),
|
||||
))
|
||||
}
|
||||
|
||||
fn compression_rate(num_bytes: u64, num_values: u32) -> f32 {
|
||||
num_bytes as f32 / (num_values as f32 * 8.0)
|
||||
}
|
||||
|
||||
fn relative_difference(left: u64, right: u64) -> f32 {
|
||||
let left = left as f32;
|
||||
let right = right as f32;
|
||||
2.0f32 * (left - right).abs() / (left + right)
|
||||
}
|
||||
|
||||
proptest! {
|
||||
@@ -64,12 +99,21 @@ proptest! {
|
||||
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_proptest_small_blockwise_linear(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_small_blockwise_linear_example() {
|
||||
create_and_validate::<BlockwiseLinearCodec>(
|
||||
&[9223372036854775808, 9223370937344622593],
|
||||
"proptest multilinearinterpol",
|
||||
);
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(10))]
|
||||
|
||||
@@ -118,8 +162,8 @@ pub fn get_codec_test_datasets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
data_and_names
|
||||
}
|
||||
|
||||
fn test_codec<C: FastFieldCodec>() {
|
||||
let codec_name = format!("{:?}", C::CODEC_TYPE);
|
||||
fn test_codec<C: ColumnCodec>() {
|
||||
let codec_name = std::any::type_name::<C>();
|
||||
for (data, dataset_name) in get_codec_test_datasets() {
|
||||
let estimate_actual_opt: Option<(f32, f32)> =
|
||||
tests::create_and_validate::<C>(&data, dataset_name);
|
||||
@@ -146,53 +190,48 @@ fn test_codec_multi_interpolation() {
|
||||
|
||||
use super::*;
|
||||
|
||||
fn estimate<C: ColumnCodec>(vals: &[u64]) -> Option<f32> {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
let mut estimator = C::Estimator::default();
|
||||
for &val in vals {
|
||||
stats_collector.collect(val);
|
||||
estimator.collect(val);
|
||||
}
|
||||
estimator.finalize();
|
||||
let stats = stats_collector.stats();
|
||||
let num_bytes = estimator.estimate(&stats)?;
|
||||
if stats.num_rows == 0 {
|
||||
return None;
|
||||
}
|
||||
Some(num_bytes as f32 / (8.0 * stats.num_rows as f32))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn estimation_good_interpolation_case() {
|
||||
let data = (10..=20000_u64).collect::<Vec<_>>();
|
||||
let data: VecColumn = data.as_slice().into();
|
||||
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
let linear_interpol_estimation = estimate::<LinearCodec>(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.01);
|
||||
|
||||
let multi_linear_interpol_estimation = BlockwiseLinearCodec::estimate(&data).unwrap();
|
||||
let multi_linear_interpol_estimation = estimate::<BlockwiseLinearCodec>(&data).unwrap();
|
||||
assert_le!(multi_linear_interpol_estimation, 0.2);
|
||||
assert_lt!(linear_interpol_estimation, multi_linear_interpol_estimation);
|
||||
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
let bitpacked_estimation = estimate::<BitpackedCodec>(&data).unwrap();
|
||||
assert_lt!(linear_interpol_estimation, bitpacked_estimation);
|
||||
}
|
||||
#[test]
|
||||
fn estimation_test_bad_interpolation_case() {
|
||||
let data: &[u64] = &[200, 10, 10, 10, 10, 1000, 20];
|
||||
|
||||
let data: VecColumn = data.into();
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.34);
|
||||
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_lt!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn estimation_prefer_bitpacked() {
|
||||
let data = VecColumn::from(&[10, 10, 10, 10]);
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_lt!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
||||
let mut data: Vec<u64> = (201..=20000_u64).collect();
|
||||
data.push(1_000_000);
|
||||
let data: VecColumn = data.as_slice().into();
|
||||
|
||||
// in this case the linear interpolation can't in fact not be worse than bitpacking,
|
||||
// but the estimator adds some threshold, which leads to estimated worse behavior
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
let linear_interpol_estimation = estimate::<LinearCodec>(&data[..]).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.35);
|
||||
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
let bitpacked_estimation = estimate::<BitpackedCodec>(&data).unwrap();
|
||||
assert_le!(bitpacked_estimation, 0.32);
|
||||
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
@@ -201,7 +240,7 @@ fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
||||
fn test_fast_field_codec_type_to_code() {
|
||||
let mut count_codec = 0;
|
||||
for code in 0..=255 {
|
||||
if let Some(codec_type) = FastFieldCodecType::from_code(code) {
|
||||
if let Some(codec_type) = CodecType::try_from_code(code) {
|
||||
assert_eq!(codec_type.to_code(), code);
|
||||
count_codec += 1;
|
||||
}
|
||||
@@ -209,19 +248,16 @@ fn test_fast_field_codec_type_to_code() {
|
||||
assert_eq!(count_codec, 3);
|
||||
}
|
||||
|
||||
fn test_fastfield_gcd_i64_with_codec(
|
||||
codec_type: FastFieldCodecType,
|
||||
num_vals: usize,
|
||||
) -> io::Result<()> {
|
||||
fn test_fastfield_gcd_i64_with_codec(codec_type: CodecType, num_vals: usize) -> io::Result<()> {
|
||||
let mut vals: Vec<i64> = (-4..=(num_vals as i64) - 5).map(|val| val * 1000).collect();
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
crate::column_values::serialize_column_values(
|
||||
&VecColumn::from(&vals),
|
||||
crate::column_values::serialize_u64_based_column_values(
|
||||
&&vals[..],
|
||||
&[codec_type],
|
||||
&mut buffer,
|
||||
)?;
|
||||
let buffer = OwnedBytes::new(buffer);
|
||||
let column = crate::column_values::open_u64_mapped::<i64>(buffer.clone())?;
|
||||
let column = crate::column_values::load_u64_based_column_values::<i64>(buffer.clone())?;
|
||||
assert_eq!(column.get_val(0), -4000i64);
|
||||
assert_eq!(column.get_val(1), -3000i64);
|
||||
assert_eq!(column.get_val(2), -2000i64);
|
||||
@@ -232,8 +268,8 @@ fn test_fastfield_gcd_i64_with_codec(
|
||||
let mut buffer_without_gcd = Vec::new();
|
||||
vals.pop();
|
||||
vals.push(1001i64);
|
||||
crate::column_values::serialize_column_values(
|
||||
&VecColumn::from(&vals),
|
||||
crate::column_values::serialize_u64_based_column_values(
|
||||
&&vals[..],
|
||||
&[codec_type],
|
||||
&mut buffer_without_gcd,
|
||||
)?;
|
||||
@@ -246,28 +282,25 @@ fn test_fastfield_gcd_i64_with_codec(
|
||||
#[test]
|
||||
fn test_fastfield_gcd_i64() -> io::Result<()> {
|
||||
for &codec_type in &[
|
||||
FastFieldCodecType::Bitpacked,
|
||||
FastFieldCodecType::BlockwiseLinear,
|
||||
FastFieldCodecType::Linear,
|
||||
CodecType::Bitpacked,
|
||||
CodecType::BlockwiseLinear,
|
||||
CodecType::Linear,
|
||||
] {
|
||||
test_fastfield_gcd_i64_with_codec(codec_type, 5500)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_fastfield_gcd_u64_with_codec(
|
||||
codec_type: FastFieldCodecType,
|
||||
num_vals: usize,
|
||||
) -> io::Result<()> {
|
||||
fn test_fastfield_gcd_u64_with_codec(codec_type: CodecType, num_vals: usize) -> io::Result<()> {
|
||||
let mut vals: Vec<u64> = (1..=num_vals).map(|i| i as u64 * 1000u64).collect();
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
crate::column_values::serialize_column_values(
|
||||
&VecColumn::from(&vals),
|
||||
crate::column_values::serialize_u64_based_column_values(
|
||||
&&vals[..],
|
||||
&[codec_type],
|
||||
&mut buffer,
|
||||
)?;
|
||||
let buffer = OwnedBytes::new(buffer);
|
||||
let column = crate::column_values::open_u64_mapped::<u64>(buffer.clone())?;
|
||||
let column = crate::column_values::load_u64_based_column_values::<u64>(buffer.clone())?;
|
||||
assert_eq!(column.get_val(0), 1000u64);
|
||||
assert_eq!(column.get_val(1), 2000u64);
|
||||
assert_eq!(column.get_val(2), 3000u64);
|
||||
@@ -278,8 +311,8 @@ fn test_fastfield_gcd_u64_with_codec(
|
||||
let mut buffer_without_gcd = Vec::new();
|
||||
vals.pop();
|
||||
vals.push(1001u64);
|
||||
crate::column_values::serialize_column_values(
|
||||
&VecColumn::from(&vals),
|
||||
crate::column_values::serialize_u64_based_column_values(
|
||||
&&vals[..],
|
||||
&[codec_type],
|
||||
&mut buffer_without_gcd,
|
||||
)?;
|
||||
@@ -291,9 +324,9 @@ fn test_fastfield_gcd_u64_with_codec(
|
||||
#[test]
|
||||
fn test_fastfield_gcd_u64() -> io::Result<()> {
|
||||
for &codec_type in &[
|
||||
FastFieldCodecType::Bitpacked,
|
||||
FastFieldCodecType::BlockwiseLinear,
|
||||
FastFieldCodecType::Linear,
|
||||
CodecType::Bitpacked,
|
||||
CodecType::BlockwiseLinear,
|
||||
CodecType::Linear,
|
||||
] {
|
||||
test_fastfield_gcd_u64_with_codec(codec_type, 5500)?;
|
||||
}
|
||||
@@ -302,7 +335,10 @@ fn test_fastfield_gcd_u64() -> io::Result<()> {
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield2() {
|
||||
let test_fastfield = crate::column_values::serialize_and_load(&[100u64, 200u64, 300u64]);
|
||||
let test_fastfield = crate::column_values::serialize_and_load_u64_based_column_values::<u64>(
|
||||
&&[100u64, 200u64, 300u64][..],
|
||||
&ALL_U64_CODEC_TYPES,
|
||||
);
|
||||
assert_eq!(test_fastfield.get_val(0), 100);
|
||||
assert_eq!(test_fastfield.get_val(1), 200);
|
||||
assert_eq!(test_fastfield.get_val(2), 300);
|
||||
@@ -4,24 +4,22 @@ use std::net::Ipv6Addr;
|
||||
use crate::value::NumericalType;
|
||||
use crate::InvalidData;
|
||||
|
||||
/// The column type represents the column type and can fit on 6-bits.
|
||||
///
|
||||
/// - bits[0..3]: Column category type.
|
||||
/// - bits[3..6]: Numerical type if necessary.
|
||||
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)]
|
||||
/// The column type represents the column type.
|
||||
/// Any changes need to be propagated to `COLUMN_TYPES`.
|
||||
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy, Ord, PartialOrd)]
|
||||
#[repr(u8)]
|
||||
pub enum ColumnType {
|
||||
I64 = 0u8,
|
||||
U64 = 1u8,
|
||||
F64 = 2u8,
|
||||
Bytes = 10u8,
|
||||
Str = 14u8,
|
||||
Bool = 18u8,
|
||||
IpAddr = 22u8,
|
||||
DateTime = 26u8,
|
||||
Bytes = 3u8,
|
||||
Str = 4u8,
|
||||
Bool = 5u8,
|
||||
IpAddr = 6u8,
|
||||
DateTime = 7u8,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
// The order needs to match _exactly_ the order in the enum
|
||||
const COLUMN_TYPES: [ColumnType; 8] = [
|
||||
ColumnType::I64,
|
||||
ColumnType::U64,
|
||||
@@ -39,18 +37,7 @@ impl ColumnType {
|
||||
}
|
||||
|
||||
pub(crate) fn try_from_code(code: u8) -> Result<ColumnType, InvalidData> {
|
||||
use ColumnType::*;
|
||||
match code {
|
||||
0u8 => Ok(I64),
|
||||
1u8 => Ok(U64),
|
||||
2u8 => Ok(F64),
|
||||
10u8 => Ok(Bytes),
|
||||
14u8 => Ok(Str),
|
||||
18u8 => Ok(Bool),
|
||||
22u8 => Ok(IpAddr),
|
||||
26u8 => Ok(Self::DateTime),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
COLUMN_TYPES.get(code as usize).copied().ok_or(InvalidData)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,18 +52,6 @@ impl From<NumericalType> for ColumnType {
|
||||
}
|
||||
|
||||
impl ColumnType {
|
||||
/// get column type category
|
||||
pub(crate) fn column_type_category(self) -> ColumnTypeCategory {
|
||||
match self {
|
||||
ColumnType::I64 | ColumnType::U64 | ColumnType::F64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::Bytes => ColumnTypeCategory::Bytes,
|
||||
ColumnType::Str => ColumnTypeCategory::Str,
|
||||
ColumnType::Bool => ColumnTypeCategory::Bool,
|
||||
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
|
||||
ColumnType::DateTime => ColumnTypeCategory::DateTime,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn numerical_type(&self) -> Option<NumericalType> {
|
||||
match self {
|
||||
ColumnType::I64 => Some(NumericalType::I64),
|
||||
@@ -155,70 +130,20 @@ impl HasAssociatedColumnType for Ipv6Addr {
|
||||
}
|
||||
}
|
||||
|
||||
/// Column types are grouped into different categories that
|
||||
/// corresponds to the different types of `JsonValue` types.
|
||||
///
|
||||
/// The columnar writer will apply coercion rules to make sure that
|
||||
/// at most one column exist per `ColumnTypeCategory`.
|
||||
///
|
||||
/// See also [README.md].
|
||||
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]
|
||||
#[repr(u8)]
|
||||
pub enum ColumnTypeCategory {
|
||||
Bool,
|
||||
Str,
|
||||
Numerical,
|
||||
DateTime,
|
||||
Bytes,
|
||||
IpAddr,
|
||||
}
|
||||
|
||||
impl From<ColumnType> for ColumnTypeCategory {
|
||||
fn from(column_type: ColumnType) -> Self {
|
||||
match column_type {
|
||||
ColumnType::I64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::U64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::F64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::Bytes => ColumnTypeCategory::Bytes,
|
||||
ColumnType::Str => ColumnTypeCategory::Str,
|
||||
ColumnType::Bool => ColumnTypeCategory::Bool,
|
||||
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
|
||||
ColumnType::DateTime => ColumnTypeCategory::DateTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::*;
|
||||
use crate::Cardinality;
|
||||
|
||||
#[test]
|
||||
fn test_column_type_to_code() {
|
||||
let mut column_type_set: HashSet<ColumnType> = HashSet::new();
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Ok(column_type) = ColumnType::try_from_code(code) {
|
||||
assert_eq!(column_type.to_code(), code);
|
||||
assert!(column_type_set.insert(column_type));
|
||||
for (code, expected_column_type) in super::COLUMN_TYPES.iter().copied().enumerate() {
|
||||
if let Ok(column_type) = ColumnType::try_from_code(code as u8) {
|
||||
assert_eq!(column_type, expected_column_type);
|
||||
}
|
||||
}
|
||||
assert_eq!(column_type_set.len(), super::COLUMN_TYPES.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_category_sort_consistent_with_column_type_sort() {
|
||||
// This is a very important property because we
|
||||
// we need to serialize colunmn in the right order.
|
||||
let mut column_types: Vec<ColumnType> = super::COLUMN_TYPES.iter().copied().collect();
|
||||
column_types.sort_by_key(|col| col.to_code());
|
||||
let column_categories: Vec<ColumnTypeCategory> = column_types
|
||||
.into_iter()
|
||||
.map(ColumnTypeCategory::from)
|
||||
.collect();
|
||||
for (prev, next) in column_categories.iter().zip(column_categories.iter()) {
|
||||
assert!(prev <= next);
|
||||
for code in COLUMN_TYPES.len() as u8..=u8::MAX {
|
||||
assert!(ColumnType::try_from_code(code as u8).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,176 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
|
||||
use super::column_type::ColumnTypeCategory;
|
||||
use crate::columnar::ColumnarReader;
|
||||
use crate::dynamic_column::DynamicColumn;
|
||||
|
||||
pub enum MergeDocOrder {
|
||||
/// Columnar tables are simply stacked one above the other.
|
||||
/// If the i-th columnar_readers has n_rows_i rows, then
|
||||
/// in the resulting columnar,
|
||||
/// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder
|
||||
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order.
|
||||
/// ..
|
||||
Stack,
|
||||
/// Some more complex mapping, that can interleaves rows from the different readers and
|
||||
/// possibly drop rows.
|
||||
Complex(()),
|
||||
}
|
||||
|
||||
pub fn merge_columnar(
|
||||
_columnar_readers: &[ColumnarReader],
|
||||
mapping: MergeDocOrder,
|
||||
_output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
match mapping {
|
||||
MergeDocOrder::Stack => {
|
||||
// implement me :)
|
||||
todo!();
|
||||
}
|
||||
MergeDocOrder::Complex(_) => {
|
||||
// for later
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn collect_columns(
|
||||
columnar_readers: &[&ColumnarReader],
|
||||
) -> io::Result<HashMap<String, HashMap<ColumnTypeCategory, Vec<DynamicColumn>>>> {
|
||||
// Each column name may have multiple types of column associated.
|
||||
// For merging we are interested in the same column type category since they can be merged.
|
||||
let mut field_name_to_group: HashMap<String, HashMap<ColumnTypeCategory, Vec<DynamicColumn>>> =
|
||||
HashMap::new();
|
||||
|
||||
for columnar_reader in columnar_readers {
|
||||
let column_name_and_handle = columnar_reader.list_columns()?;
|
||||
for (column_name, handle) in column_name_and_handle {
|
||||
let column_type_to_handles = field_name_to_group
|
||||
.entry(column_name.to_string())
|
||||
.or_default();
|
||||
|
||||
let columns = column_type_to_handles
|
||||
.entry(handle.column_type().column_type_category())
|
||||
.or_default();
|
||||
columns.push(handle.open()?);
|
||||
}
|
||||
}
|
||||
|
||||
normalize_columns(&mut field_name_to_group);
|
||||
|
||||
Ok(field_name_to_group)
|
||||
}
|
||||
|
||||
/// Cast numerical type columns to the same type
|
||||
pub(crate) fn normalize_columns(
|
||||
map: &mut HashMap<String, HashMap<ColumnTypeCategory, Vec<DynamicColumn>>>,
|
||||
) {
|
||||
for (_field_name, type_category_to_columns) in map.iter_mut() {
|
||||
for (type_category, columns) in type_category_to_columns {
|
||||
if type_category == &ColumnTypeCategory::Numerical {
|
||||
let casted_columns = cast_to_common_numerical_column(&columns);
|
||||
*columns = casted_columns;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Receives a list of columns of numerical types (u64, i64, f64)
|
||||
///
|
||||
/// Returns a list of `DynamicColumn` which are all of the same numerical type
|
||||
fn cast_to_common_numerical_column(columns: &[DynamicColumn]) -> Vec<DynamicColumn> {
|
||||
assert!(columns
|
||||
.iter()
|
||||
.all(|column| column.column_type().numerical_type().is_some()));
|
||||
let coerce_to_i64: Vec<_> = columns
|
||||
.iter()
|
||||
.map(|column| column.clone().coerce_to_i64())
|
||||
.collect();
|
||||
|
||||
if coerce_to_i64.iter().all(|column| column.is_some()) {
|
||||
return coerce_to_i64
|
||||
.into_iter()
|
||||
.map(|column| column.unwrap())
|
||||
.collect();
|
||||
}
|
||||
|
||||
let coerce_to_u64: Vec<_> = columns
|
||||
.iter()
|
||||
.map(|column| column.clone().coerce_to_u64())
|
||||
.collect();
|
||||
|
||||
if coerce_to_u64.iter().all(|column| column.is_some()) {
|
||||
return coerce_to_u64
|
||||
.into_iter()
|
||||
.map(|column| column.unwrap())
|
||||
.collect();
|
||||
}
|
||||
|
||||
columns
|
||||
.iter()
|
||||
.map(|column| {
|
||||
column
|
||||
.clone()
|
||||
.coerce_to_f64()
|
||||
.expect("couldn't cast column to f64")
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::ColumnarWriter;
|
||||
|
||||
#[test]
|
||||
fn test_column_coercion() {
|
||||
// i64 type
|
||||
let columnar1 = {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(1u32, "numbers", 1i64);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(2, &mut buffer).unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
};
|
||||
// u64 type
|
||||
let columnar2 = {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(1u32, "numbers", u64::MAX - 100);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(2, &mut buffer).unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
};
|
||||
|
||||
// f64 type
|
||||
let columnar3 = {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(1u32, "numbers", 30.5);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(2, &mut buffer).unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
};
|
||||
|
||||
let column_map = collect_columns(&[&columnar1, &columnar2, &columnar3]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
let cat_to_columns = column_map.get("numbers").unwrap();
|
||||
assert_eq!(cat_to_columns.len(), 1);
|
||||
|
||||
let numerical = cat_to_columns.get(&ColumnTypeCategory::Numerical).unwrap();
|
||||
assert!(numerical.iter().all(|column| column.is_f64()));
|
||||
|
||||
let column_map = collect_columns(&[&columnar1, &columnar1]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
let cat_to_columns = column_map.get("numbers").unwrap();
|
||||
assert_eq!(cat_to_columns.len(), 1);
|
||||
let numerical = cat_to_columns.get(&ColumnTypeCategory::Numerical).unwrap();
|
||||
assert!(numerical.iter().all(|column| column.is_i64()));
|
||||
|
||||
let column_map = collect_columns(&[&columnar2, &columnar2]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
let cat_to_columns = column_map.get("numbers").unwrap();
|
||||
assert_eq!(cat_to_columns.len(), 1);
|
||||
let numerical = cat_to_columns.get(&ColumnTypeCategory::Numerical).unwrap();
|
||||
assert!(numerical.iter().all(|column| column.is_u64()));
|
||||
}
|
||||
}
|
||||
204
columnar/src/columnar/merge/merge_dict_column.rs
Normal file
204
columnar/src/columnar/merge/merge_dict_column.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::{BitSet, CountingWriter, ReadOnlyBitSet};
|
||||
use sstable::{SSTable, TermOrdinal};
|
||||
|
||||
use super::term_merger::TermMerger;
|
||||
use crate::column::serialize_column_mappable_to_u64;
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{BytesColumn, MergeRowOrder, ShuffleMergeOrder};
|
||||
|
||||
// Serialize [Dictionary, Column, dictionary num bytes U32::LE]
|
||||
// Column: [Column Index, Column Values, column index num bytes U32::LE]
|
||||
pub fn merge_bytes_or_str_column(
|
||||
column_index: SerializableColumnIndex<'_>,
|
||||
bytes_columns: &[Option<BytesColumn>],
|
||||
merge_row_order: &MergeRowOrder,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
// Serialize dict and generate mapping for values
|
||||
let mut output = CountingWriter::wrap(output);
|
||||
// TODO !!! Remove useless terms.
|
||||
let term_ord_mapping = serialize_merged_dict(bytes_columns, merge_row_order, &mut output)?;
|
||||
let dictionary_num_bytes: u32 = output.written_bytes() as u32;
|
||||
let output = output.finish();
|
||||
let remapped_term_ordinals_values = RemappedTermOrdinalsValues {
|
||||
bytes_columns,
|
||||
term_ord_mapping: &term_ord_mapping,
|
||||
merge_row_order,
|
||||
};
|
||||
serialize_column_mappable_to_u64(column_index, &remapped_term_ordinals_values, output)?;
|
||||
output.write_all(&dictionary_num_bytes.to_le_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct RemappedTermOrdinalsValues<'a> {
|
||||
bytes_columns: &'a [Option<BytesColumn>],
|
||||
term_ord_mapping: &'a TermOrdinalMapping,
|
||||
merge_row_order: &'a MergeRowOrder,
|
||||
}
|
||||
|
||||
impl<'a> Iterable for RemappedTermOrdinalsValues<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
match self.merge_row_order {
|
||||
MergeRowOrder::Stack(_) => self.boxed_iter_stacked(),
|
||||
MergeRowOrder::Shuffled(shuffle_merge_order) => {
|
||||
self.boxed_iter_shuffled(shuffle_merge_order)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> RemappedTermOrdinalsValues<'a> {
|
||||
fn boxed_iter_stacked(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
let iter = self
|
||||
.bytes_columns
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(segment_ord, byte_column)| {
|
||||
let segment_ord = self.term_ord_mapping.get_segment(segment_ord as u32);
|
||||
byte_column.into_iter().flat_map(move |bytes_column| {
|
||||
bytes_column
|
||||
.ords()
|
||||
.values
|
||||
.iter()
|
||||
.map(move |term_ord| segment_ord[term_ord as usize])
|
||||
})
|
||||
});
|
||||
// TODO see if we can better decompose the mapping / and the stacking
|
||||
Box::new(iter)
|
||||
}
|
||||
|
||||
fn boxed_iter_shuffled<'b>(
|
||||
&'b self,
|
||||
shuffle_merge_order: &'b ShuffleMergeOrder,
|
||||
) -> Box<dyn Iterator<Item = u64> + 'b> {
|
||||
Box::new(
|
||||
shuffle_merge_order
|
||||
.iter_new_to_old_row_addrs()
|
||||
.flat_map(move |old_addr| {
|
||||
let segment_ord = self.term_ord_mapping.get_segment(old_addr.segment_ord);
|
||||
self.bytes_columns[old_addr.segment_ord as usize]
|
||||
.as_ref()
|
||||
.into_iter()
|
||||
.flat_map(move |bytes_column| {
|
||||
bytes_column
|
||||
.term_ords(old_addr.row_id)
|
||||
.map(|old_term_ord: u64| segment_ord[old_term_ord as usize])
|
||||
})
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_term_bitset(column: &BytesColumn, row_bitset: &ReadOnlyBitSet) -> BitSet {
|
||||
let num_terms = column.dictionary().num_terms();
|
||||
let mut term_bitset = BitSet::with_max_value(num_terms as u32);
|
||||
for row_id in row_bitset.iter() {
|
||||
for term_ord in column.term_ord_column.values(row_id) {
|
||||
term_bitset.insert(term_ord as u32);
|
||||
}
|
||||
}
|
||||
term_bitset
|
||||
}
|
||||
|
||||
fn is_term_present(bitsets: &[Option<BitSet>], term_merger: &TermMerger) -> bool {
|
||||
for (segment_ord, from_term_ord) in term_merger.matching_segments() {
|
||||
if let Some(bitset) = bitsets[segment_ord].as_ref() {
|
||||
if bitset.contains(from_term_ord as u32) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn serialize_merged_dict(
|
||||
bytes_columns: &[Option<BytesColumn>],
|
||||
merge_row_order: &MergeRowOrder,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<TermOrdinalMapping> {
|
||||
let mut term_ord_mapping = TermOrdinalMapping::default();
|
||||
|
||||
let mut field_term_streams = Vec::new();
|
||||
for column in bytes_columns.iter().flatten() {
|
||||
term_ord_mapping.add_segment(column.dictionary.num_terms());
|
||||
let terms = column.dictionary.stream()?;
|
||||
field_term_streams.push(terms);
|
||||
}
|
||||
|
||||
let mut merged_terms = TermMerger::new(field_term_streams);
|
||||
let mut sstable_builder = sstable::VoidSSTable::writer(output);
|
||||
|
||||
// TODO support complex `merge_row_order`.
|
||||
match merge_row_order {
|
||||
MergeRowOrder::Stack(_) => {
|
||||
let mut current_term_ord = 0;
|
||||
while merged_terms.advance() {
|
||||
let term_bytes: &[u8] = merged_terms.key();
|
||||
sstable_builder.insert(term_bytes, &())?;
|
||||
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
|
||||
term_ord_mapping.register_from_to(segment_ord, from_term_ord, current_term_ord);
|
||||
}
|
||||
current_term_ord += 1;
|
||||
}
|
||||
sstable_builder.finish()?;
|
||||
}
|
||||
MergeRowOrder::Shuffled(shuffle_merge_order) => {
|
||||
assert_eq!(shuffle_merge_order.alive_bitsets.len(), bytes_columns.len());
|
||||
let mut term_bitsets: Vec<Option<BitSet>> = Vec::with_capacity(bytes_columns.len());
|
||||
for (alive_bitset_opt, bytes_column_opt) in shuffle_merge_order
|
||||
.alive_bitsets
|
||||
.iter()
|
||||
.zip(bytes_columns.iter())
|
||||
{
|
||||
match (alive_bitset_opt, bytes_column_opt) {
|
||||
(Some(alive_bitset), Some(bytes_column)) => {
|
||||
let term_bitset = compute_term_bitset(bytes_column, alive_bitset);
|
||||
term_bitsets.push(Some(term_bitset));
|
||||
}
|
||||
_ => {
|
||||
term_bitsets.push(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut current_term_ord = 0;
|
||||
while merged_terms.advance() {
|
||||
let term_bytes: &[u8] = merged_terms.key();
|
||||
if !is_term_present(&term_bitsets[..], &merged_terms) {
|
||||
continue;
|
||||
}
|
||||
sstable_builder.insert(term_bytes, &())?;
|
||||
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
|
||||
term_ord_mapping.register_from_to(segment_ord, from_term_ord, current_term_ord);
|
||||
}
|
||||
current_term_ord += 1;
|
||||
}
|
||||
sstable_builder.finish()?;
|
||||
}
|
||||
}
|
||||
Ok(term_ord_mapping)
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct TermOrdinalMapping {
|
||||
per_segment_new_term_ordinals: Vec<Vec<TermOrdinal>>,
|
||||
}
|
||||
|
||||
impl TermOrdinalMapping {
|
||||
fn add_segment(&mut self, max_term_ord: usize) {
|
||||
self.per_segment_new_term_ordinals
|
||||
.push(vec![TermOrdinal::default(); max_term_ord as usize]);
|
||||
}
|
||||
|
||||
fn register_from_to(&mut self, segment_ord: usize, from_ord: TermOrdinal, to_ord: TermOrdinal) {
|
||||
self.per_segment_new_term_ordinals[segment_ord][from_ord as usize] = to_ord;
|
||||
}
|
||||
|
||||
fn get_segment(&self, segment_ord: u32) -> &[TermOrdinal] {
|
||||
&(self.per_segment_new_term_ordinals[segment_ord as usize])[..]
|
||||
}
|
||||
}
|
||||
118
columnar/src/columnar/merge/merge_mapping.rs
Normal file
118
columnar/src/columnar/merge/merge_mapping.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use std::ops::Range;
|
||||
|
||||
use common::{BitSet, OwnedBytes, ReadOnlyBitSet};
|
||||
|
||||
use crate::{ColumnarReader, RowAddr, RowId};
|
||||
|
||||
pub struct StackMergeOrder {
|
||||
// This does not start at 0. The first row is the number of
|
||||
// rows in the first columnar.
|
||||
cumulated_row_ids: Vec<RowId>,
|
||||
}
|
||||
|
||||
impl StackMergeOrder {
|
||||
pub fn stack(columnars: &[&ColumnarReader]) -> StackMergeOrder {
|
||||
let mut cumulated_row_ids: Vec<RowId> = Vec::with_capacity(columnars.len());
|
||||
let mut cumulated_row_id = 0;
|
||||
for columnar in columnars {
|
||||
cumulated_row_id += columnar.num_rows();
|
||||
cumulated_row_ids.push(cumulated_row_id);
|
||||
}
|
||||
StackMergeOrder { cumulated_row_ids }
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.cumulated_row_ids.last().copied().unwrap_or(0)
|
||||
}
|
||||
|
||||
pub fn offset(&self, columnar_id: usize) -> RowId {
|
||||
if columnar_id == 0 {
|
||||
return 0;
|
||||
}
|
||||
self.cumulated_row_ids[columnar_id - 1]
|
||||
}
|
||||
|
||||
pub fn columnar_range(&self, columnar_id: usize) -> Range<RowId> {
|
||||
self.offset(columnar_id)..self.offset(columnar_id + 1)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum MergeRowOrder {
|
||||
/// Columnar tables are simply stacked one above the other.
|
||||
/// If the i-th columnar_readers has n_rows_i rows, then
|
||||
/// in the resulting columnar,
|
||||
/// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder
|
||||
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order.
|
||||
/// ..
|
||||
/// No documents is deleted.
|
||||
Stack(StackMergeOrder),
|
||||
/// Some more complex mapping, that may interleaves rows from the different readers and
|
||||
/// drop rows, or do both.
|
||||
Shuffled(ShuffleMergeOrder),
|
||||
}
|
||||
|
||||
impl From<StackMergeOrder> for MergeRowOrder {
|
||||
fn from(stack_merge_order: StackMergeOrder) -> MergeRowOrder {
|
||||
MergeRowOrder::Stack(stack_merge_order)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ShuffleMergeOrder> for MergeRowOrder {
|
||||
fn from(shuffle_merge_order: ShuffleMergeOrder) -> MergeRowOrder {
|
||||
MergeRowOrder::Shuffled(shuffle_merge_order)
|
||||
}
|
||||
}
|
||||
|
||||
impl MergeRowOrder {
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
match self {
|
||||
MergeRowOrder::Stack(stack_row_order) => stack_row_order.num_rows(),
|
||||
MergeRowOrder::Shuffled(complex_mapping) => complex_mapping.num_rows(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ShuffleMergeOrder {
|
||||
pub new_row_id_to_old_row_id: Vec<RowAddr>,
|
||||
pub alive_bitsets: Vec<Option<ReadOnlyBitSet>>,
|
||||
}
|
||||
|
||||
impl ShuffleMergeOrder {
|
||||
pub fn for_test(
|
||||
segment_num_rows: &[RowId],
|
||||
new_row_id_to_old_row_id: Vec<RowAddr>,
|
||||
) -> ShuffleMergeOrder {
|
||||
let mut alive_bitsets: Vec<BitSet> = segment_num_rows
|
||||
.iter()
|
||||
.map(|&num_rows| BitSet::with_max_value(num_rows))
|
||||
.collect();
|
||||
for &RowAddr {
|
||||
segment_ord,
|
||||
row_id,
|
||||
} in &new_row_id_to_old_row_id
|
||||
{
|
||||
alive_bitsets[segment_ord as usize].insert(row_id);
|
||||
}
|
||||
let alive_bitsets: Vec<Option<ReadOnlyBitSet>> = alive_bitsets
|
||||
.into_iter()
|
||||
.map(|alive_bitset| {
|
||||
let mut buffer = Vec::new();
|
||||
alive_bitset.serialize(&mut buffer).unwrap();
|
||||
let data = OwnedBytes::new(buffer);
|
||||
Some(ReadOnlyBitSet::open(data))
|
||||
})
|
||||
.collect();
|
||||
ShuffleMergeOrder {
|
||||
new_row_id_to_old_row_id,
|
||||
alive_bitsets,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.new_row_id_to_old_row_id.len() as RowId
|
||||
}
|
||||
|
||||
pub fn iter_new_to_old_row_addrs(&self) -> impl Iterator<Item = RowAddr> + '_ {
|
||||
self.new_row_id_to_old_row_id.iter().copied()
|
||||
}
|
||||
}
|
||||
271
columnar/src/columnar/merge/mod.rs
Normal file
271
columnar/src/columnar/merge/mod.rs
Normal file
@@ -0,0 +1,271 @@
|
||||
mod merge_dict_column;
|
||||
mod merge_mapping;
|
||||
mod term_merger;
|
||||
|
||||
// mod sorted_doc_id_column;
|
||||
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::io;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use merge_mapping::{MergeRowOrder, ShuffleMergeOrder, StackMergeOrder};
|
||||
|
||||
use super::writer::ColumnarSerializer;
|
||||
use crate::column::{serialize_column_mappable_to_u128, serialize_column_mappable_to_u64};
|
||||
use crate::column_values::MergedColumnValues;
|
||||
use crate::columnar::merge::merge_dict_column::merge_bytes_or_str_column;
|
||||
use crate::columnar::writer::CompatibleNumericalTypes;
|
||||
use crate::columnar::ColumnarReader;
|
||||
use crate::dynamic_column::DynamicColumn;
|
||||
use crate::{
|
||||
BytesColumn, Column, ColumnIndex, ColumnType, ColumnValues, NumericalType, NumericalValue,
|
||||
};
|
||||
|
||||
/// Column types are grouped into different categories.
|
||||
/// After merge, all columns belonging to the same category are coerced to
|
||||
/// the same column type.
|
||||
///
|
||||
/// In practise, today, only Numerical colummns are coerced into one type today.
|
||||
///
|
||||
/// See also [README.md].
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
|
||||
enum ColumnTypeCategory {
|
||||
Bool,
|
||||
Str,
|
||||
Numerical,
|
||||
DateTime,
|
||||
Bytes,
|
||||
IpAddr,
|
||||
}
|
||||
|
||||
impl From<ColumnType> for ColumnTypeCategory {
|
||||
fn from(column_type: ColumnType) -> Self {
|
||||
match column_type {
|
||||
ColumnType::I64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::U64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::F64 => ColumnTypeCategory::Numerical,
|
||||
ColumnType::Bytes => ColumnTypeCategory::Bytes,
|
||||
ColumnType::Str => ColumnTypeCategory::Str,
|
||||
ColumnType::Bool => ColumnTypeCategory::Bool,
|
||||
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
|
||||
ColumnType::DateTime => ColumnTypeCategory::DateTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn merge_columnar(
|
||||
columnar_readers: &[&ColumnarReader],
|
||||
merge_row_order: MergeRowOrder,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let mut serializer = ColumnarSerializer::new(output);
|
||||
|
||||
let columns_to_merge = group_columns_for_merge(columnar_readers)?;
|
||||
for ((column_name, column_type), columns) in columns_to_merge {
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name.as_bytes(), column_type);
|
||||
merge_column(
|
||||
column_type,
|
||||
columns,
|
||||
&merge_row_order,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
serializer.finalize(merge_row_order.num_rows())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn dynamic_column_to_u64_monotonic(dynamic_column: DynamicColumn) -> Option<Column<u64>> {
|
||||
match dynamic_column {
|
||||
DynamicColumn::Bool(column) => Some(column.to_u64_monotonic()),
|
||||
DynamicColumn::I64(column) => Some(column.to_u64_monotonic()),
|
||||
DynamicColumn::U64(column) => Some(column.to_u64_monotonic()),
|
||||
DynamicColumn::F64(column) => Some(column.to_u64_monotonic()),
|
||||
DynamicColumn::DateTime(column) => Some(column.to_u64_monotonic()),
|
||||
DynamicColumn::IpAddr(_) | DynamicColumn::Bytes(_) | DynamicColumn::Str(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn merge_column(
|
||||
column_type: ColumnType,
|
||||
columns: Vec<Option<DynamicColumn>>,
|
||||
merge_row_order: &MergeRowOrder,
|
||||
wrt: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
match column_type {
|
||||
ColumnType::I64
|
||||
| ColumnType::U64
|
||||
| ColumnType::F64
|
||||
| ColumnType::DateTime
|
||||
| ColumnType::Bool => {
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut column_values: Vec<Option<Arc<dyn ColumnValues>>> =
|
||||
Vec::with_capacity(columns.len());
|
||||
for dynamic_column_opt in columns {
|
||||
if let Some(Column { idx, values }) =
|
||||
dynamic_column_opt.and_then(dynamic_column_to_u64_monotonic)
|
||||
{
|
||||
column_indexes.push(Some(idx));
|
||||
column_values.push(Some(values));
|
||||
} else {
|
||||
column_indexes.push(None);
|
||||
column_values.push(None);
|
||||
}
|
||||
}
|
||||
let merged_column_index =
|
||||
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order);
|
||||
let merge_column_values = MergedColumnValues {
|
||||
column_indexes: &column_indexes[..],
|
||||
column_values: &column_values[..],
|
||||
merge_row_order,
|
||||
};
|
||||
serialize_column_mappable_to_u64(merged_column_index, &merge_column_values, wrt)?;
|
||||
}
|
||||
ColumnType::IpAddr => {
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut column_values: Vec<Option<Arc<dyn ColumnValues<Ipv6Addr>>>> =
|
||||
Vec::with_capacity(columns.len());
|
||||
for dynamic_column_opt in columns {
|
||||
if let Some(DynamicColumn::IpAddr(Column { idx, values })) = dynamic_column_opt {
|
||||
column_indexes.push(Some(idx));
|
||||
column_values.push(Some(values));
|
||||
} else {
|
||||
column_indexes.push(None);
|
||||
column_values.push(None);
|
||||
}
|
||||
}
|
||||
|
||||
let merged_column_index =
|
||||
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order);
|
||||
let merge_column_values = MergedColumnValues {
|
||||
column_indexes: &column_indexes[..],
|
||||
column_values: &column_values,
|
||||
merge_row_order,
|
||||
};
|
||||
|
||||
serialize_column_mappable_to_u128(merged_column_index, &merge_column_values, wrt)?;
|
||||
}
|
||||
ColumnType::Bytes | ColumnType::Str => {
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut bytes_columns: Vec<Option<BytesColumn>> = Vec::with_capacity(columns.len());
|
||||
for dynamic_column_opt in columns {
|
||||
match dynamic_column_opt {
|
||||
Some(DynamicColumn::Str(str_column)) => {
|
||||
column_indexes.push(Some(str_column.term_ord_column.idx.clone()));
|
||||
bytes_columns.push(Some(str_column.into()));
|
||||
}
|
||||
Some(DynamicColumn::Bytes(bytes_column)) => {
|
||||
column_indexes.push(Some(bytes_column.term_ord_column.idx.clone()));
|
||||
bytes_columns.push(Some(bytes_column));
|
||||
}
|
||||
_ => {
|
||||
column_indexes.push(None);
|
||||
bytes_columns.push(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
let merged_column_index =
|
||||
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order);
|
||||
merge_bytes_or_str_column(merged_column_index, &bytes_columns, merge_row_order, wrt)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn group_columns_for_merge(
|
||||
columnar_readers: &[&ColumnarReader],
|
||||
) -> io::Result<BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>>> {
|
||||
// Each column name may have multiple types of column associated.
|
||||
// For merging we are interested in the same column type category since they can be merged.
|
||||
let mut columns_grouped: HashMap<(String, ColumnTypeCategory), Vec<Option<DynamicColumn>>> =
|
||||
HashMap::new();
|
||||
|
||||
let num_columnars = columnar_readers.len();
|
||||
|
||||
for (columnar_id, columnar_reader) in columnar_readers.iter().enumerate() {
|
||||
let column_name_and_handle = columnar_reader.list_columns()?;
|
||||
for (column_name, handle) in column_name_and_handle {
|
||||
let column_type_category: ColumnTypeCategory = handle.column_type().into();
|
||||
let columns = columns_grouped
|
||||
.entry((column_name, column_type_category))
|
||||
.or_insert_with(|| vec![None; num_columnars]);
|
||||
let column = handle.open()?;
|
||||
columns[columnar_id] = Some(column);
|
||||
}
|
||||
}
|
||||
|
||||
let mut merge_columns: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
BTreeMap::default();
|
||||
|
||||
for ((column_name, col_category), mut columns) in columns_grouped {
|
||||
if col_category == ColumnTypeCategory::Numerical {
|
||||
coerce_numerical_columns_to_same_type(&mut columns);
|
||||
}
|
||||
let column_type = columns
|
||||
.iter()
|
||||
.flatten()
|
||||
.map(|col| col.column_type())
|
||||
.next()
|
||||
.unwrap();
|
||||
merge_columns.insert((column_name, column_type), columns);
|
||||
}
|
||||
|
||||
Ok(merge_columns)
|
||||
}
|
||||
|
||||
/// Coerce a set of numerical columns to the same type.
|
||||
///
|
||||
/// If all columns are already from the same type, keep this type
|
||||
/// (even if they could all be coerced to i64).
|
||||
fn coerce_numerical_columns_to_same_type(columns: &mut [Option<DynamicColumn>]) {
|
||||
let mut column_types: HashSet<NumericalType> = HashSet::default();
|
||||
let mut compatible_numerical_types = CompatibleNumericalTypes::default();
|
||||
for column in columns.iter().flatten() {
|
||||
let min_value: NumericalValue;
|
||||
let max_value: NumericalValue;
|
||||
match column {
|
||||
DynamicColumn::I64(column) => {
|
||||
min_value = column.min_value().into();
|
||||
max_value = column.max_value().into();
|
||||
}
|
||||
DynamicColumn::U64(column) => {
|
||||
min_value = column.min_value().into();
|
||||
max_value = column.min_value().into();
|
||||
}
|
||||
DynamicColumn::F64(column) => {
|
||||
min_value = column.min_value().into();
|
||||
max_value = column.min_value().into();
|
||||
}
|
||||
DynamicColumn::Bool(_)
|
||||
| DynamicColumn::IpAddr(_)
|
||||
| DynamicColumn::DateTime(_)
|
||||
| DynamicColumn::Bytes(_)
|
||||
| DynamicColumn::Str(_) => {
|
||||
panic!("We expected only numerical columns.");
|
||||
}
|
||||
}
|
||||
column_types.insert(column.column_type().numerical_type().unwrap());
|
||||
compatible_numerical_types.accept_value(min_value);
|
||||
compatible_numerical_types.accept_value(max_value);
|
||||
}
|
||||
if column_types.len() <= 1 {
|
||||
// No need to do anything. The columns are already all from the same type.
|
||||
// This is necessary to let use force a given type.
|
||||
|
||||
// TODO This works in a world where we do not allow a change of schema,
|
||||
// but in the future, we will have to pass some kind of schema to enforce
|
||||
// the logic.
|
||||
return;
|
||||
}
|
||||
let coerce_type = compatible_numerical_types.to_numerical_type();
|
||||
for column_opt in columns.iter_mut() {
|
||||
if let Some(column) = column_opt.take() {
|
||||
*column_opt = column.coerce_numerical(coerce_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
107
columnar/src/columnar/merge/term_merger.rs
Normal file
107
columnar/src/columnar/merge/term_merger.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
|
||||
use sstable::TermOrdinal;
|
||||
|
||||
use crate::Streamer;
|
||||
|
||||
pub struct HeapItem<'a> {
|
||||
pub streamer: Streamer<'a>,
|
||||
pub segment_ord: usize,
|
||||
}
|
||||
|
||||
impl<'a> PartialEq for HeapItem<'a> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.segment_ord == other.segment_ord
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Eq for HeapItem<'a> {}
|
||||
|
||||
impl<'a> PartialOrd for HeapItem<'a> {
|
||||
fn partial_cmp(&self, other: &HeapItem<'a>) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Ord for HeapItem<'a> {
|
||||
fn cmp(&self, other: &HeapItem<'a>) -> Ordering {
|
||||
(&other.streamer.key(), &other.segment_ord).cmp(&(&self.streamer.key(), &self.segment_ord))
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a list of sorted term streams,
|
||||
/// returns an iterator over sorted unique terms.
|
||||
///
|
||||
/// The item yield is actually a pair with
|
||||
/// - the term
|
||||
/// - a slice with the ordinal of the segments containing
|
||||
/// the terms.
|
||||
pub struct TermMerger<'a> {
|
||||
heap: BinaryHeap<HeapItem<'a>>,
|
||||
current_streamers: Vec<HeapItem<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> TermMerger<'a> {
|
||||
/// Stream of merged term dictionary
|
||||
pub fn new(streams: Vec<Streamer<'a>>) -> TermMerger<'a> {
|
||||
TermMerger {
|
||||
heap: BinaryHeap::new(),
|
||||
current_streamers: streams
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(ord, streamer)| HeapItem {
|
||||
streamer,
|
||||
segment_ord: ord,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn matching_segments<'b: 'a>(
|
||||
&'b self,
|
||||
) -> impl 'b + Iterator<Item = (usize, TermOrdinal)> {
|
||||
self.current_streamers
|
||||
.iter()
|
||||
.map(|heap_item| (heap_item.segment_ord, heap_item.streamer.term_ord()))
|
||||
}
|
||||
|
||||
fn advance_segments(&mut self) {
|
||||
let streamers = &mut self.current_streamers;
|
||||
let heap = &mut self.heap;
|
||||
for mut heap_item in streamers.drain(..) {
|
||||
if heap_item.streamer.advance() {
|
||||
heap.push(heap_item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Advance the term iterator to the next term.
|
||||
/// Returns true if there is indeed another term
|
||||
/// False if there is none.
|
||||
pub fn advance(&mut self) -> bool {
|
||||
self.advance_segments();
|
||||
if let Some(head) = self.heap.pop() {
|
||||
self.current_streamers.push(head);
|
||||
while let Some(next_streamer) = self.heap.peek() {
|
||||
if self.current_streamers[0].streamer.key() != next_streamer.streamer.key() {
|
||||
break;
|
||||
}
|
||||
let next_heap_it = self.heap.pop().unwrap(); // safe : we peeked beforehand
|
||||
self.current_streamers.push(next_heap_it);
|
||||
}
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current term.
|
||||
///
|
||||
/// This method may be called
|
||||
/// if and only if advance() has been called before
|
||||
/// and "true" was returned.
|
||||
pub fn key(&self) -> &[u8] {
|
||||
self.current_streamers[0].streamer.key()
|
||||
}
|
||||
}
|
||||
258
columnar/src/columnar/merge/tests.rs
Normal file
258
columnar/src/columnar/merge/tests.rs
Normal file
@@ -0,0 +1,258 @@
|
||||
use super::*;
|
||||
use crate::{Cardinality, ColumnarWriter, HasAssociatedColumnType, RowId};
|
||||
|
||||
fn make_columnar<T: Into<NumericalValue> + HasAssociatedColumnType + Copy>(
|
||||
column_name: &str,
|
||||
vals: &[T],
|
||||
) -> ColumnarReader {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_column_type(column_name, T::column_type(), false);
|
||||
for (row_id, val) in vals.iter().copied().enumerate() {
|
||||
dataframe_writer.record_numerical(row_id as RowId, column_name, val.into());
|
||||
}
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(vals.len() as RowId, None, &mut buffer)
|
||||
.unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_coercion_to_u64() {
|
||||
// i64 type
|
||||
let columnar1 = make_columnar("numbers", &[1i64]);
|
||||
// u64 type
|
||||
let columnar2 = make_columnar("numbers", &[u64::MAX]);
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_no_coercion_if_all_the_same() {
|
||||
let columnar1 = make_columnar("numbers", &[1u64]);
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::U64)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_coercion_to_i64() {
|
||||
let columnar1 = make_columnar("numbers", &[-1i64]);
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::I64)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_missing_column() {
|
||||
let columnar1 = make_columnar("numbers", &[-1i64]);
|
||||
let columnar2 = make_columnar("numbers2", &[2u64]);
|
||||
let column_map: BTreeMap<(String, ColumnType), Vec<Option<DynamicColumn>>> =
|
||||
group_columns_for_merge(&[&columnar1, &columnar2]).unwrap();
|
||||
assert_eq!(column_map.len(), 2);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnType::I64)));
|
||||
{
|
||||
let columns = column_map
|
||||
.get(&("numbers".to_string(), ColumnType::I64))
|
||||
.unwrap();
|
||||
assert!(columns[0].is_some());
|
||||
assert!(columns[1].is_none());
|
||||
}
|
||||
{
|
||||
let columns = column_map
|
||||
.get(&("numbers2".to_string(), ColumnType::U64))
|
||||
.unwrap();
|
||||
assert!(columns[0].is_none());
|
||||
assert!(columns[1].is_some());
|
||||
}
|
||||
}
|
||||
|
||||
fn make_numerical_columnar_multiple_columns(
|
||||
columns: &[(&str, &[&[NumericalValue]])],
|
||||
) -> ColumnarReader {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
for (column_name, column_values) in columns {
|
||||
for (row_id, vals) in column_values.iter().enumerate() {
|
||||
for val in vals.iter() {
|
||||
dataframe_writer.record_numerical(row_id as u32, column_name, *val);
|
||||
}
|
||||
}
|
||||
}
|
||||
let num_rows = columns
|
||||
.iter()
|
||||
.map(|(_, val_rows)| val_rows.len() as RowId)
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(num_rows, None, &mut buffer)
|
||||
.unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
fn make_byte_columnar_multiple_columns(columns: &[(&str, &[&[&[u8]]])]) -> ColumnarReader {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
for (column_name, column_values) in columns {
|
||||
for (row_id, vals) in column_values.iter().enumerate() {
|
||||
for val in vals.iter() {
|
||||
dataframe_writer.record_bytes(row_id as u32, column_name, *val);
|
||||
}
|
||||
}
|
||||
}
|
||||
let num_rows = columns
|
||||
.iter()
|
||||
.map(|(_, val_rows)| val_rows.len() as RowId)
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(num_rows, None, &mut buffer)
|
||||
.unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
fn make_text_columnar_multiple_columns(columns: &[(&str, &[&[&str]])]) -> ColumnarReader {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
for (column_name, column_values) in columns {
|
||||
for (row_id, vals) in column_values.iter().enumerate() {
|
||||
for val in vals.iter() {
|
||||
dataframe_writer.record_str(row_id as u32, column_name, *val);
|
||||
}
|
||||
}
|
||||
}
|
||||
let num_rows = columns
|
||||
.iter()
|
||||
.map(|(_, val_rows)| val_rows.len() as RowId)
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(num_rows, None, &mut buffer)
|
||||
.unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_columnar_numbers() {
|
||||
let columnar1 =
|
||||
make_numerical_columnar_multiple_columns(&[("numbers", &[&[NumericalValue::from(-1f64)]])]);
|
||||
let columnar2 = make_numerical_columnar_multiple_columns(&[(
|
||||
"numbers",
|
||||
&[&[], &[NumericalValue::from(-3f64)]],
|
||||
)]);
|
||||
let mut buffer = Vec::new();
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let stack_merge_order = StackMergeOrder::stack(columnars);
|
||||
crate::columnar::merge_columnar(
|
||||
columnars,
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 3);
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("numbers").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::F64(vals) = dynamic_column else { panic!() };
|
||||
assert_eq!(vals.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(vals.first(0u32), Some(-1f64));
|
||||
assert_eq!(vals.first(1u32), None);
|
||||
assert_eq!(vals.first(2u32), Some(-3f64));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_columnar_texts() {
|
||||
let columnar1 = make_text_columnar_multiple_columns(&[("texts", &[&["a"]])]);
|
||||
let columnar2 = make_text_columnar_multiple_columns(&[("texts", &[&[], &["b"]])]);
|
||||
let mut buffer = Vec::new();
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let stack_merge_order = StackMergeOrder::stack(columnars);
|
||||
crate::columnar::merge_columnar(
|
||||
columnars,
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 3);
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("texts").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
|
||||
let get_str_for_ord = |ord| {
|
||||
let mut out = String::new();
|
||||
vals.ord_to_str(ord, &mut out).unwrap();
|
||||
out
|
||||
};
|
||||
|
||||
assert_eq!(vals.dictionary.num_terms(), 2);
|
||||
assert_eq!(get_str_for_ord(0), "a");
|
||||
assert_eq!(get_str_for_ord(1), "b");
|
||||
|
||||
let get_str_for_row = |row_id| {
|
||||
let term_ords: Vec<u64> = vals.term_ords(row_id).collect();
|
||||
assert!(term_ords.len() <= 1);
|
||||
let mut out = String::new();
|
||||
if term_ords.len() == 1 {
|
||||
vals.ord_to_str(term_ords[0], &mut out).unwrap();
|
||||
}
|
||||
out
|
||||
};
|
||||
|
||||
assert_eq!(get_str_for_row(0), "a");
|
||||
assert_eq!(get_str_for_row(1), "");
|
||||
assert_eq!(get_str_for_row(2), "b");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_columnar_byte() {
|
||||
let columnar1 = make_byte_columnar_multiple_columns(&[("bytes", &[&[b"bbbb"], &[b"baaa"]])]);
|
||||
let columnar2 = make_byte_columnar_multiple_columns(&[("bytes", &[&[], &[b"a"]])]);
|
||||
let mut buffer = Vec::new();
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let stack_merge_order = StackMergeOrder::stack(columnars);
|
||||
crate::columnar::merge_columnar(
|
||||
columnars,
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 4);
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("bytes").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else { panic!() };
|
||||
let get_bytes_for_ord = |ord| {
|
||||
let mut out = Vec::new();
|
||||
vals.ord_to_bytes(ord, &mut out).unwrap();
|
||||
out
|
||||
};
|
||||
|
||||
assert_eq!(vals.dictionary.num_terms(), 3);
|
||||
assert_eq!(get_bytes_for_ord(0), b"a");
|
||||
assert_eq!(get_bytes_for_ord(1), b"baaa");
|
||||
assert_eq!(get_bytes_for_ord(2), b"bbbb");
|
||||
|
||||
let get_bytes_for_row = |row_id| {
|
||||
let term_ords: Vec<u64> = vals.term_ords(row_id).collect();
|
||||
assert!(term_ords.len() <= 1);
|
||||
let mut out = Vec::new();
|
||||
if term_ords.len() == 1 {
|
||||
vals.ord_to_bytes(term_ords[0], &mut out).unwrap();
|
||||
}
|
||||
out
|
||||
};
|
||||
|
||||
assert_eq!(get_bytes_for_row(0), b"bbbb");
|
||||
assert_eq!(get_bytes_for_row(1), b"baaa");
|
||||
assert_eq!(get_bytes_for_row(2), b"");
|
||||
assert_eq!(get_bytes_for_row(3), b"a");
|
||||
}
|
||||
1
columnar/src/columnar/merge_index.rs
Normal file
1
columnar/src/columnar/merge_index.rs
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
mod column_type;
|
||||
mod format_version;
|
||||
mod merge;
|
||||
mod merge_index;
|
||||
mod reader;
|
||||
mod writer;
|
||||
|
||||
pub use column_type::{ColumnType, HasAssociatedColumnType};
|
||||
pub use merge::{merge_columnar, MergeDocOrder};
|
||||
pub use merge::{merge_columnar, MergeRowOrder, ShuffleMergeOrder, StackMergeOrder};
|
||||
pub use reader::ColumnarReader;
|
||||
pub use writer::ColumnarWriter;
|
||||
|
||||
@@ -6,6 +6,7 @@ use sstable::{Dictionary, RangeSSTable};
|
||||
|
||||
use crate::columnar::{format_version, ColumnType};
|
||||
use crate::dynamic_column::DynamicColumnHandle;
|
||||
use crate::RowId;
|
||||
|
||||
fn io_invalid_data(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::InvalidData, msg)
|
||||
@@ -13,9 +14,11 @@ fn io_invalid_data(msg: String) -> io::Error {
|
||||
|
||||
/// The ColumnarReader makes it possible to access a set of columns
|
||||
/// associated to field names.
|
||||
#[derive(Clone)]
|
||||
pub struct ColumnarReader {
|
||||
column_dictionary: Dictionary<RangeSSTable>,
|
||||
column_data: FileSlice,
|
||||
num_rows: RowId,
|
||||
}
|
||||
|
||||
impl ColumnarReader {
|
||||
@@ -27,23 +30,27 @@ impl ColumnarReader {
|
||||
|
||||
fn open_inner(file_slice: FileSlice) -> io::Result<ColumnarReader> {
|
||||
let (file_slice_without_sstable_len, footer_slice) = file_slice
|
||||
.split_from_end(mem::size_of::<u64>() + format_version::VERSION_FOOTER_NUM_BYTES);
|
||||
.split_from_end(mem::size_of::<u64>() + 4 + format_version::VERSION_FOOTER_NUM_BYTES);
|
||||
let footer_bytes = footer_slice.read_bytes()?;
|
||||
let (mut sstable_len_bytes, version_footer_bytes) =
|
||||
footer_bytes.rsplit(format_version::VERSION_FOOTER_NUM_BYTES);
|
||||
let sstable_len = u64::deserialize(&mut &footer_bytes[0..8])?;
|
||||
let num_rows = u32::deserialize(&mut &footer_bytes[8..12])?;
|
||||
let version_footer_bytes: [u8; format_version::VERSION_FOOTER_NUM_BYTES] =
|
||||
version_footer_bytes.as_slice().try_into().unwrap();
|
||||
footer_bytes[12..].try_into().unwrap();
|
||||
let _version = format_version::parse_footer(version_footer_bytes)?;
|
||||
let sstable_len = u64::deserialize(&mut sstable_len_bytes)?;
|
||||
let (column_data, sstable) =
|
||||
file_slice_without_sstable_len.split_from_end(sstable_len as usize);
|
||||
let column_dictionary = Dictionary::open(sstable)?;
|
||||
Ok(ColumnarReader {
|
||||
column_dictionary,
|
||||
column_data,
|
||||
num_rows,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.num_rows
|
||||
}
|
||||
|
||||
// TODO Add unit tests
|
||||
pub fn list_columns(&self) -> io::Result<Vec<(String, DynamicColumnHandle)>> {
|
||||
let mut stream = self.column_dictionary.stream()?;
|
||||
@@ -130,7 +137,7 @@ mod tests {
|
||||
columnar_writer.record_column_type("col1", ColumnType::Str, false);
|
||||
columnar_writer.record_column_type("col2", ColumnType::U64, false);
|
||||
let mut buffer = Vec::new();
|
||||
columnar_writer.serialize(1, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(1, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
let columns = columnar.list_columns().unwrap();
|
||||
assert_eq!(columns.len(), 2);
|
||||
@@ -146,7 +153,7 @@ mod tests {
|
||||
columnar_writer.record_column_type("count", ColumnType::U64, false);
|
||||
columnar_writer.record_numerical(1, "count", 1u64);
|
||||
let mut buffer = Vec::new();
|
||||
columnar_writer.serialize(2, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(2, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
let columns = columnar.list_columns().unwrap();
|
||||
assert_eq!(columns.len(), 1);
|
||||
|
||||
@@ -41,10 +41,31 @@ impl ColumnWriter {
|
||||
pub(super) fn operation_iterator<'a, V: SymbolValue>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
old_to_new_ids_opt: Option<&[RowId]>,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<V>> + 'a {
|
||||
buffer.clear();
|
||||
self.values.read_to_end(arena, buffer);
|
||||
if let Some(old_to_new_ids) = old_to_new_ids_opt {
|
||||
// TODO avoid the extra deserialization / serialization.
|
||||
let mut sorted_ops: Vec<(RowId, ColumnOperation<V>)> = Vec::new();
|
||||
let mut new_doc = 0u32;
|
||||
let mut cursor = &buffer[..];
|
||||
for op in std::iter::from_fn(|| ColumnOperation::<V>::deserialize(&mut cursor)) {
|
||||
if let ColumnOperation::NewDoc(doc) = &op {
|
||||
new_doc = old_to_new_ids[*doc as usize];
|
||||
sorted_ops.push((new_doc, ColumnOperation::NewDoc(new_doc)));
|
||||
} else {
|
||||
sorted_ops.push((new_doc, op));
|
||||
}
|
||||
}
|
||||
// stable sort is crucial here.
|
||||
sorted_ops.sort_by_key(|(new_doc_id, _)| *new_doc_id);
|
||||
buffer.clear();
|
||||
for (_, op) in sorted_ops {
|
||||
buffer.extend_from_slice(op.serialize().as_ref());
|
||||
}
|
||||
}
|
||||
let mut cursor: &[u8] = &buffer[..];
|
||||
std::iter::from_fn(move || ColumnOperation::deserialize(&mut cursor))
|
||||
}
|
||||
@@ -114,7 +135,7 @@ impl NumericalColumnWriter {
|
||||
/// State used to store what types are still acceptable
|
||||
/// after having seen a set of numerical values.
|
||||
#[derive(Clone, Copy)]
|
||||
enum CompatibleNumericalTypes {
|
||||
pub(crate) enum CompatibleNumericalTypes {
|
||||
Dynamic {
|
||||
all_values_within_i64_range: bool,
|
||||
all_values_within_u64_range: bool,
|
||||
@@ -132,7 +153,7 @@ impl Default for CompatibleNumericalTypes {
|
||||
}
|
||||
|
||||
impl CompatibleNumericalTypes {
|
||||
fn is_type_accepted(&self, numerical_type: NumericalType) -> bool {
|
||||
pub fn is_type_accepted(&self, numerical_type: NumericalType) -> bool {
|
||||
match self {
|
||||
CompatibleNumericalTypes::Dynamic {
|
||||
all_values_within_i64_range,
|
||||
@@ -148,7 +169,7 @@ impl CompatibleNumericalTypes {
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_value(&mut self, numerical_value: NumericalValue) {
|
||||
pub fn accept_value(&mut self, numerical_value: NumericalValue) {
|
||||
match self {
|
||||
CompatibleNumericalTypes::Dynamic {
|
||||
all_values_within_i64_range,
|
||||
@@ -189,10 +210,12 @@ impl CompatibleNumericalTypes {
|
||||
}
|
||||
|
||||
impl NumericalColumnWriter {
|
||||
pub fn column_type_and_cardinality(&self, num_docs: RowId) -> (NumericalType, Cardinality) {
|
||||
let numerical_type = self.compatible_numerical_types.to_numerical_type();
|
||||
let cardinality = self.column_writer.get_cardinality(num_docs);
|
||||
(numerical_type, cardinality)
|
||||
pub fn numerical_type(&self) -> NumericalType {
|
||||
self.compatible_numerical_types.to_numerical_type()
|
||||
}
|
||||
|
||||
pub fn cardinality(&self, num_docs: RowId) -> Cardinality {
|
||||
self.column_writer.get_cardinality(num_docs)
|
||||
}
|
||||
|
||||
pub fn record_numerical_value(
|
||||
@@ -208,9 +231,11 @@ impl NumericalColumnWriter {
|
||||
pub(super) fn operation_iterator<'a>(
|
||||
self,
|
||||
arena: &MemoryArena,
|
||||
old_to_new_ids: Option<&[RowId]>,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<NumericalValue>> + 'a {
|
||||
self.column_writer.operation_iterator(arena, buffer)
|
||||
self.column_writer
|
||||
.operation_iterator(arena, old_to_new_ids, buffer)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,9 +276,11 @@ impl StrOrBytesColumnWriter {
|
||||
pub(super) fn operation_iterator<'a>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
old_to_new_ids: Option<&[RowId]>,
|
||||
byte_buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<UnorderedId>> + 'a {
|
||||
self.column_writer.operation_iterator(arena, byte_buffer)
|
||||
self.column_writer
|
||||
.operation_iterator(arena, old_to_new_ids, byte_buffer)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,15 +7,16 @@ use std::io;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use column_operation::ColumnOperation;
|
||||
pub(crate) use column_writers::CompatibleNumericalTypes;
|
||||
use common::CountingWriter;
|
||||
use serializer::ColumnarSerializer;
|
||||
pub(crate) use serializer::ColumnarSerializer;
|
||||
use stacker::{Addr, ArenaHashMap, MemoryArena};
|
||||
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::column_values::{
|
||||
ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
|
||||
};
|
||||
use crate::columnar::column_type::{ColumnType, ColumnTypeCategory};
|
||||
use crate::columnar::column_type::ColumnType;
|
||||
use crate::columnar::writer::column_writers::{
|
||||
ColumnWriter, NumericalColumnWriter, StrOrBytesColumnWriter,
|
||||
};
|
||||
@@ -44,7 +45,7 @@ struct SpareBuffers {
|
||||
/// columnar_writer.record_str(1u32 /* doc id */, "product_name", "Apple");
|
||||
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10.5f64); //< uh oh we ended up mixing integer and floats.
|
||||
/// let mut wrt: Vec<u8> = Vec::new();
|
||||
/// columnar_writer.serialize(2u32, &mut wrt).unwrap();
|
||||
/// columnar_writer.serialize(2u32, None, &mut wrt).unwrap();
|
||||
/// ```
|
||||
pub struct ColumnarWriter {
|
||||
numerical_field_hash_map: ArenaHashMap,
|
||||
@@ -103,6 +104,48 @@ impl ColumnarWriter {
|
||||
+ self.datetime_field_hash_map.mem_usage()
|
||||
}
|
||||
|
||||
/// Returns the list of doc ids from 0..num_docs sorted by the `sort_field`
|
||||
/// column.
|
||||
///
|
||||
/// If the column is multivalued, use the first value for scoring.
|
||||
/// If no value is associated to a specific row, the document is assigned
|
||||
/// the lowest possible score.
|
||||
///
|
||||
/// The sort applied is stable.
|
||||
pub fn sort_order(&self, sort_field: &str, num_docs: RowId, reversed: bool) -> Vec<u32> {
|
||||
let Some(numerical_col_writer) =
|
||||
self.numerical_field_hash_map.get::<NumericalColumnWriter>(sort_field.as_bytes()) else {
|
||||
return Vec::new();
|
||||
};
|
||||
let mut symbols_buffer = Vec::new();
|
||||
let mut values = Vec::new();
|
||||
let mut last_doc_opt: Option<RowId> = None;
|
||||
for op in numerical_col_writer.operation_iterator(&self.arena, None, &mut symbols_buffer) {
|
||||
match op {
|
||||
ColumnOperation::NewDoc(doc) => {
|
||||
last_doc_opt = Some(doc);
|
||||
}
|
||||
ColumnOperation::Value(numerical_value) => {
|
||||
if let Some(last_doc) = last_doc_opt {
|
||||
let score: f32 = f64::coerce(numerical_value) as f32;
|
||||
values.push((score, last_doc));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for doc in values.len() as u32..num_docs {
|
||||
values.push((0.0f32, doc));
|
||||
}
|
||||
values.sort_by(|(left_score, _), (right_score, _)| {
|
||||
if reversed {
|
||||
right_score.partial_cmp(left_score).unwrap()
|
||||
} else {
|
||||
left_score.partial_cmp(right_score).unwrap()
|
||||
}
|
||||
});
|
||||
values.into_iter().map(|(_score, doc)| doc).collect()
|
||||
}
|
||||
|
||||
/// Records a column type. This is useful to bypass the coercion process,
|
||||
/// makes sure the empty is present in the resulting columnar, or set
|
||||
/// the `sort_values_within_row`.
|
||||
@@ -277,37 +320,47 @@ impl ColumnarWriter {
|
||||
},
|
||||
);
|
||||
}
|
||||
pub fn serialize(&mut self, num_docs: RowId, wrt: &mut dyn io::Write) -> io::Result<()> {
|
||||
pub fn serialize(
|
||||
&mut self,
|
||||
num_docs: RowId,
|
||||
old_to_new_row_ids: Option<&[RowId]>,
|
||||
wrt: &mut dyn io::Write,
|
||||
) -> io::Result<()> {
|
||||
let mut serializer = ColumnarSerializer::new(wrt);
|
||||
let mut columns: Vec<(&[u8], ColumnTypeCategory, Addr)> = self
|
||||
let mut columns: Vec<(&[u8], ColumnType, Addr)> = self
|
||||
.numerical_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Numerical, addr))
|
||||
.map(|(column_name, addr, _)| {
|
||||
let numerical_column_writer: NumericalColumnWriter =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let column_type = numerical_column_writer.numerical_type().into();
|
||||
(column_name, column_type, addr)
|
||||
})
|
||||
.collect();
|
||||
columns.extend(
|
||||
self.bytes_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, ColumnTypeCategory::Bytes, addr)),
|
||||
.map(|(term, addr, _)| (term, ColumnType::Bytes, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.str_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Str, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Str, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.bool_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Bool, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Bool, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.ip_addr_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::IpAddr, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::IpAddr, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.datetime_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::DateTime, addr)),
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::DateTime, addr)),
|
||||
);
|
||||
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
|
||||
|
||||
@@ -315,20 +368,24 @@ impl ColumnarWriter {
|
||||
let mut symbol_byte_buffer: Vec<u8> = Vec::new();
|
||||
for (column_name, column_type, addr) in columns {
|
||||
match column_type {
|
||||
ColumnTypeCategory::Bool => {
|
||||
ColumnType::Bool => {
|
||||
let column_writer: ColumnWriter = self.bool_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, ColumnType::Bool);
|
||||
serializer.serialize_column(column_name, column_type);
|
||||
serialize_bool_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::IpAddr => {
|
||||
ColumnType::IpAddr => {
|
||||
let column_writer: ColumnWriter = self.ip_addr_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
@@ -336,50 +393,64 @@ impl ColumnarWriter {
|
||||
serialize_ip_addr_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::Bytes | ColumnTypeCategory::Str => {
|
||||
let (column_type, str_column_writer): (ColumnType, StrOrBytesColumnWriter) =
|
||||
if column_type == ColumnTypeCategory::Bytes {
|
||||
(ColumnType::Bytes, self.bytes_field_hash_map.read(addr))
|
||||
ColumnType::Bytes | ColumnType::Str => {
|
||||
let str_or_bytes_column_writer: StrOrBytesColumnWriter =
|
||||
if column_type == ColumnType::Bytes {
|
||||
self.bytes_field_hash_map.read(addr)
|
||||
} else {
|
||||
(ColumnType::Str, self.str_field_hash_map.read(addr))
|
||||
self.str_field_hash_map.read(addr)
|
||||
};
|
||||
let dictionary_builder =
|
||||
&dictionaries[str_column_writer.dictionary_id as usize];
|
||||
let cardinality = str_column_writer.column_writer.get_cardinality(num_docs);
|
||||
&dictionaries[str_or_bytes_column_writer.dictionary_id as usize];
|
||||
let cardinality = str_or_bytes_column_writer
|
||||
.column_writer
|
||||
.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, column_type);
|
||||
serialize_bytes_or_str_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
str_column_writer.sort_values_within_row,
|
||||
str_or_bytes_column_writer.sort_values_within_row,
|
||||
dictionary_builder,
|
||||
str_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
str_or_bytes_column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::Numerical => {
|
||||
ColumnType::F64 | ColumnType::I64 | ColumnType::U64 => {
|
||||
let numerical_column_writer: NumericalColumnWriter =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let (numerical_type, cardinality) =
|
||||
numerical_column_writer.column_type_and_cardinality(num_docs);
|
||||
let cardinality = numerical_column_writer.cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, ColumnType::from(numerical_type));
|
||||
serializer.serialize_column(column_name, column_type);
|
||||
let numerical_type = column_type.numerical_type().unwrap();
|
||||
serialize_numerical_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
numerical_type,
|
||||
numerical_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
numerical_column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::DateTime => {
|
||||
ColumnType::DateTime => {
|
||||
let column_writer: ColumnWriter = self.datetime_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let mut column_serializer =
|
||||
@@ -388,18 +459,24 @@ impl ColumnarWriter {
|
||||
cardinality,
|
||||
num_docs,
|
||||
NumericalType::I64,
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
column_writer.operation_iterator(
|
||||
arena,
|
||||
old_to_new_row_ids,
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
};
|
||||
}
|
||||
serializer.finalize()?;
|
||||
serializer.finalize(num_docs)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize [Dictionary, Column, dictionary num bytes U32::LE]
|
||||
// Column: [Column Index, Column Values, column index num bytes U32::LE]
|
||||
fn serialize_bytes_or_str_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
@@ -547,7 +624,7 @@ fn send_to_serialize_column_mappable_to_u128<
|
||||
>(
|
||||
op_iterator: impl Iterator<Item = ColumnOperation<T>>,
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
num_rows: RowId,
|
||||
value_index_builders: &mut PreallocatedIndexBuilders,
|
||||
values: &mut Vec<T>,
|
||||
mut wrt: impl io::Write,
|
||||
@@ -569,31 +646,30 @@ where
|
||||
Cardinality::Optional => {
|
||||
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
|
||||
consume_operation_iterator(op_iterator, optional_index_builder, values);
|
||||
let optional_index = optional_index_builder.finish(num_docs);
|
||||
SerializableColumnIndex::Optional(Box::new(optional_index))
|
||||
let optional_index = optional_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Optional {
|
||||
num_rows,
|
||||
non_null_row_ids: Box::new(optional_index),
|
||||
}
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
|
||||
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_docs);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
|
||||
}
|
||||
};
|
||||
crate::column::serialize_column_mappable_to_u128(
|
||||
serializable_column_index,
|
||||
|| values.iter().cloned(),
|
||||
values.len() as u32,
|
||||
&&values[..],
|
||||
&mut wrt,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sort_values_within_row_in_place(
|
||||
multivalued_index: &impl ColumnValues<RowId>,
|
||||
values: &mut Vec<u64>,
|
||||
) {
|
||||
fn sort_values_within_row_in_place(multivalued_index: &[RowId], values: &mut Vec<u64>) {
|
||||
let mut start_index: usize = 0;
|
||||
for end_index in multivalued_index.iter() {
|
||||
for end_index in multivalued_index.iter().copied() {
|
||||
let end_index = end_index as usize;
|
||||
values[start_index..end_index].sort_unstable();
|
||||
start_index = end_index;
|
||||
@@ -603,7 +679,7 @@ fn sort_values_within_row_in_place(
|
||||
fn send_to_serialize_column_mappable_to_u64(
|
||||
op_iterator: impl Iterator<Item = ColumnOperation<u64>>,
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
num_rows: RowId,
|
||||
sort_values_within_row: bool,
|
||||
value_index_builders: &mut PreallocatedIndexBuilders,
|
||||
values: &mut Vec<u64>,
|
||||
@@ -625,22 +701,25 @@ where
|
||||
Cardinality::Optional => {
|
||||
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
|
||||
consume_operation_iterator(op_iterator, optional_index_builder, values);
|
||||
let optional_index = optional_index_builder.finish(num_docs);
|
||||
SerializableColumnIndex::Optional(Box::new(optional_index))
|
||||
let optional_index = optional_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Optional {
|
||||
non_null_row_ids: Box::new(optional_index),
|
||||
num_rows,
|
||||
}
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
|
||||
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_docs);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_rows);
|
||||
if sort_values_within_row {
|
||||
sort_values_within_row_in_place(&multivalued_index, values);
|
||||
sort_values_within_row_in_place(multivalued_index, values);
|
||||
}
|
||||
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
|
||||
}
|
||||
};
|
||||
crate::column::serialize_column_mappable_to_u64(
|
||||
serializable_column_index,
|
||||
&VecColumn::from(&values[..]),
|
||||
&&values[..],
|
||||
&mut wrt,
|
||||
)?;
|
||||
Ok(())
|
||||
@@ -693,7 +772,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Full);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.operation_iterator(&mut arena, None, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 6);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
@@ -722,7 +801,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.operation_iterator(&mut arena, None, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 4);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
|
||||
@@ -745,7 +824,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.operation_iterator(&mut arena, None, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 2);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
@@ -764,7 +843,7 @@ mod tests {
|
||||
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.operation_iterator(&mut arena, None, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 3);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use common::CountingWriter;
|
||||
use common::{BinarySerializable, CountingWriter};
|
||||
use sstable::value::RangeValueWriter;
|
||||
use sstable::RangeSSTable;
|
||||
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::RowId;
|
||||
|
||||
pub struct ColumnarSerializer<W: io::Write> {
|
||||
wrt: CountingWriter<W>,
|
||||
@@ -46,11 +47,12 @@ impl<W: io::Write> ColumnarSerializer<W> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn finalize(mut self) -> io::Result<()> {
|
||||
pub(crate) fn finalize(mut self, num_rows: RowId) -> io::Result<()> {
|
||||
let sstable_bytes: Vec<u8> = self.sstable_range.finish()?;
|
||||
let sstable_num_bytes: u64 = sstable_bytes.len() as u64;
|
||||
self.wrt.write_all(&sstable_bytes)?;
|
||||
self.wrt.write_all(&sstable_num_bytes.to_le_bytes()[..])?;
|
||||
num_rows.serialize(&mut self.wrt)?;
|
||||
self.wrt
|
||||
.write_all(&super::super::format_version::footer())?;
|
||||
self.wrt.flush()?;
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use crate::column_index::SerializableOptionalIndex;
|
||||
use crate::column_values::{ColumnValues, VecColumn};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::RowId;
|
||||
|
||||
/// The `IndexBuilder` interprets a sequence of
|
||||
@@ -29,34 +28,15 @@ pub struct OptionalIndexBuilder {
|
||||
docs: Vec<RowId>,
|
||||
}
|
||||
|
||||
struct SingleValueArrayIndex<'a> {
|
||||
// RowIds with a value, in a strictly increasing order
|
||||
row_ids: &'a [RowId],
|
||||
num_rows: RowId,
|
||||
}
|
||||
|
||||
impl<'a> SerializableOptionalIndex<'a> for SingleValueArrayIndex<'a> {
|
||||
fn num_rows(&self) -> RowId {
|
||||
self.num_rows
|
||||
}
|
||||
|
||||
fn non_null_rows(&self) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
Box::new(self.row_ids.iter().copied())
|
||||
}
|
||||
}
|
||||
|
||||
impl OptionalIndexBuilder {
|
||||
pub fn finish<'a>(&'a mut self, num_rows: RowId) -> impl SerializableOptionalIndex + 'a {
|
||||
pub fn finish<'a>(&'a mut self, num_rows: RowId) -> impl Iterable<RowId> + 'a {
|
||||
debug_assert!(self
|
||||
.docs
|
||||
.last()
|
||||
.copied()
|
||||
.map(|last_doc| last_doc < num_rows)
|
||||
.unwrap_or(true));
|
||||
SingleValueArrayIndex {
|
||||
row_ids: &self.docs[..],
|
||||
num_rows,
|
||||
}
|
||||
&self.docs[..]
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
@@ -84,14 +64,10 @@ pub struct MultivaluedIndexBuilder {
|
||||
}
|
||||
|
||||
impl MultivaluedIndexBuilder {
|
||||
pub fn finish(&mut self, num_docs: RowId) -> impl ColumnValues<u32> + '_ {
|
||||
pub fn finish(&mut self, num_docs: RowId) -> &[u32] {
|
||||
self.start_offsets
|
||||
.resize(num_docs as usize + 1, self.total_num_vals_seen);
|
||||
VecColumn {
|
||||
values: &&self.start_offsets[..],
|
||||
min_value: 0,
|
||||
max_value: self.start_offsets.last().copied().unwrap_or(0),
|
||||
}
|
||||
&self.start_offsets[..]
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
@@ -149,7 +125,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
&opt_value_index_builder
|
||||
.finish(1u32)
|
||||
.non_null_rows()
|
||||
.boxed_iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
&[0]
|
||||
);
|
||||
@@ -159,7 +135,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
&opt_value_index_builder
|
||||
.finish(2u32)
|
||||
.non_null_rows()
|
||||
.boxed_iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
&[1]
|
||||
);
|
||||
@@ -177,6 +153,7 @@ mod tests {
|
||||
multivalued_value_index_builder
|
||||
.finish(4u32)
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<Vec<u32>>(),
|
||||
vec![0, 0, 2, 3, 3]
|
||||
);
|
||||
@@ -188,6 +165,7 @@ mod tests {
|
||||
multivalued_value_index_builder
|
||||
.finish(4u32)
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<Vec<u32>>(),
|
||||
vec![0, 0, 0, 2, 2]
|
||||
);
|
||||
|
||||
@@ -8,7 +8,7 @@ use common::{HasLen, OwnedBytes};
|
||||
use crate::column::{BytesColumn, Column, StrColumn};
|
||||
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::{DateTime, NumericalType};
|
||||
use crate::{Cardinality, DateTime, NumericalType};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DynamicColumn {
|
||||
@@ -23,6 +23,18 @@ pub enum DynamicColumn {
|
||||
}
|
||||
|
||||
impl DynamicColumn {
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
DynamicColumn::Bool(c) => c.get_cardinality(),
|
||||
DynamicColumn::I64(c) => c.get_cardinality(),
|
||||
DynamicColumn::U64(c) => c.get_cardinality(),
|
||||
DynamicColumn::F64(c) => c.get_cardinality(),
|
||||
DynamicColumn::IpAddr(c) => c.get_cardinality(),
|
||||
DynamicColumn::DateTime(c) => c.get_cardinality(),
|
||||
DynamicColumn::Bytes(c) => c.ords().get_cardinality(),
|
||||
DynamicColumn::Str(c) => c.ords().get_cardinality(),
|
||||
}
|
||||
}
|
||||
pub fn column_type(&self) -> ColumnType {
|
||||
match self {
|
||||
DynamicColumn::Bool(_) => ColumnType::Bool,
|
||||
@@ -36,6 +48,14 @@ impl DynamicColumn {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn coerce_numerical(self, target_numerical_type: NumericalType) -> Option<Self> {
|
||||
match target_numerical_type {
|
||||
NumericalType::I64 => self.coerce_to_i64(),
|
||||
NumericalType::U64 => self.coerce_to_u64(),
|
||||
NumericalType::F64 => self.coerce_to_f64(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_numerical(&self) -> bool {
|
||||
self.column_type().numerical_type().is_some()
|
||||
}
|
||||
@@ -50,7 +70,7 @@ impl DynamicColumn {
|
||||
self.column_type().numerical_type() == Some(NumericalType::U64)
|
||||
}
|
||||
|
||||
pub fn coerce_to_f64(self) -> Option<DynamicColumn> {
|
||||
fn coerce_to_f64(self) -> Option<DynamicColumn> {
|
||||
match self {
|
||||
DynamicColumn::I64(column) => Some(DynamicColumn::F64(Column {
|
||||
idx: column.idx,
|
||||
@@ -64,7 +84,7 @@ impl DynamicColumn {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub fn coerce_to_i64(self) -> Option<DynamicColumn> {
|
||||
fn coerce_to_i64(self) -> Option<DynamicColumn> {
|
||||
match self {
|
||||
DynamicColumn::U64(column) => {
|
||||
if column.max_value() > i64::MAX as u64 {
|
||||
@@ -79,7 +99,7 @@ impl DynamicColumn {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub fn coerce_to_u64(self) -> Option<DynamicColumn> {
|
||||
fn coerce_to_u64(self) -> Option<DynamicColumn> {
|
||||
match self {
|
||||
DynamicColumn::I64(column) => {
|
||||
if column.min_value() < 0 {
|
||||
@@ -215,10 +235,8 @@ impl DynamicColumnHandle {
|
||||
|
||||
fn open_internal(&self, column_bytes: OwnedBytes) -> io::Result<DynamicColumn> {
|
||||
let dynamic_column: DynamicColumn = match self.column_type {
|
||||
ColumnType::Bytes => {
|
||||
crate::column::open_column_bytes::<BytesColumn>(column_bytes)?.into()
|
||||
}
|
||||
ColumnType::Str => crate::column::open_column_bytes::<StrColumn>(column_bytes)?.into(),
|
||||
ColumnType::Bytes => crate::column::open_column_bytes(column_bytes)?.into(),
|
||||
ColumnType::Str => crate::column::open_column_str(column_bytes)?.into(),
|
||||
ColumnType::I64 => crate::column::open_column_u64::<i64>(column_bytes)?.into(),
|
||||
ColumnType::U64 => crate::column::open_column_u64::<u64>(column_bytes)?.into(),
|
||||
ColumnType::F64 => crate::column::open_column_u64::<f64>(column_bytes)?.into(),
|
||||
|
||||
19
columnar/src/iterable.rs
Normal file
19
columnar/src/iterable.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use std::ops::Range;
|
||||
|
||||
pub trait Iterable<T = u64> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_>;
|
||||
}
|
||||
|
||||
impl<'a, T: Copy> Iterable<T> for &'a [T] {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
Box::new(self.iter().copied())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy> Iterable<T> for Range<T>
|
||||
where Range<T>: Iterator<Item = T>
|
||||
{
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
@@ -11,10 +11,11 @@ use std::io;
|
||||
|
||||
mod column;
|
||||
mod column_index;
|
||||
mod column_values;
|
||||
pub mod column_values;
|
||||
mod columnar;
|
||||
mod dictionary;
|
||||
mod dynamic_column;
|
||||
mod iterable;
|
||||
pub(crate) mod utils;
|
||||
mod value;
|
||||
|
||||
@@ -23,7 +24,7 @@ pub use column_index::ColumnIndex;
|
||||
pub use column_values::{ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
||||
pub use columnar::{
|
||||
merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
|
||||
MergeDocOrder,
|
||||
MergeRowOrder, ShuffleMergeOrder, StackMergeOrder,
|
||||
};
|
||||
use sstable::VoidSSTable;
|
||||
pub use value::{NumericalType, NumericalValue};
|
||||
@@ -31,6 +32,13 @@ pub use value::{NumericalType, NumericalValue};
|
||||
pub use self::dynamic_column::{DynamicColumn, DynamicColumnHandle};
|
||||
|
||||
pub type RowId = u32;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct RowAddr {
|
||||
pub segment_ord: u32,
|
||||
pub row_id: RowId,
|
||||
}
|
||||
|
||||
pub use sstable::Dictionary;
|
||||
pub type Streamer<'a> = sstable::Streamer<'a, VoidSSTable>;
|
||||
|
||||
@@ -39,6 +47,12 @@ pub struct DateTime {
|
||||
pub timestamp_micros: i64,
|
||||
}
|
||||
|
||||
impl DateTime {
|
||||
pub fn into_timestamp_micros(self) -> i64 {
|
||||
self.timestamp_micros
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct InvalidData;
|
||||
|
||||
@@ -66,6 +80,12 @@ pub enum Cardinality {
|
||||
}
|
||||
|
||||
impl Cardinality {
|
||||
pub fn is_optional(&self) -> bool {
|
||||
matches!(self, Cardinality::Optional)
|
||||
}
|
||||
pub fn is_multivalue(&self) -> bool {
|
||||
matches!(self, Cardinality::Multivalued)
|
||||
}
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ fn test_dataframe_writer_str() {
|
||||
dataframe_writer.record_str(1u32, "my_string", "hello");
|
||||
dataframe_writer.record_str(3u32, "my_string", "helloeee");
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
@@ -26,7 +26,7 @@ fn test_dataframe_writer_bytes() {
|
||||
dataframe_writer.record_bytes(1u32, "my_string", b"hello");
|
||||
dataframe_writer.record_bytes(3u32, "my_string", b"helloeee");
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
@@ -40,7 +40,7 @@ fn test_dataframe_writer_bool() {
|
||||
dataframe_writer.record_bool(1u32, "bool.value", false);
|
||||
dataframe_writer.record_bool(3u32, "bool.value", true);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("bool.value").unwrap();
|
||||
@@ -63,7 +63,7 @@ fn test_dataframe_writer_u64_multivalued() {
|
||||
dataframe_writer.record_numerical(6u32, "divisor", 2u64);
|
||||
dataframe_writer.record_numerical(6u32, "divisor", 3u64);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(7, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(7, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("divisor").unwrap();
|
||||
@@ -84,7 +84,7 @@ fn test_dataframe_writer_ip_addr() {
|
||||
dataframe_writer.record_ip_addr(1, "ip_addr", Ipv6Addr::from_u128(1001));
|
||||
dataframe_writer.record_ip_addr(3, "ip_addr", Ipv6Addr::from_u128(1050));
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("ip_addr").unwrap();
|
||||
@@ -113,7 +113,7 @@ fn test_dataframe_writer_numerical() {
|
||||
dataframe_writer.record_numerical(2u32, "srical.value", NumericalValue::U64(13u64));
|
||||
dataframe_writer.record_numerical(4u32, "srical.value", NumericalValue::U64(15u64));
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(6, &mut buffer).unwrap();
|
||||
dataframe_writer.serialize(6, None, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("srical.value").unwrap();
|
||||
@@ -144,7 +144,7 @@ fn test_dictionary_encoded_str() {
|
||||
columnar_writer.record_str(3, "my.column", "c");
|
||||
columnar_writer.record_str(3, "my.column2", "different_column!");
|
||||
columnar_writer.record_str(4, "my.column", "b");
|
||||
columnar_writer.serialize(5, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let col_handles = columnar_reader.read_columns("my.column").unwrap();
|
||||
@@ -176,7 +176,7 @@ fn test_dictionary_encoded_bytes() {
|
||||
columnar_writer.record_bytes(3, "my.column", b"c");
|
||||
columnar_writer.record_bytes(3, "my.column2", b"different_column!");
|
||||
columnar_writer.record_bytes(4, "my.column", b"b");
|
||||
columnar_writer.serialize(5, &mut buffer).unwrap();
|
||||
columnar_writer.serialize(5, None, &mut buffer).unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let col_handles = columnar_reader.read_columns("my.column").unwrap();
|
||||
|
||||
@@ -5,12 +5,37 @@ use byteorder::{ReadBytesExt, WriteBytesExt};
|
||||
|
||||
use crate::{Endianness, VInt};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Counter(u64);
|
||||
|
||||
impl io::Write for Counter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.0 += buf.len() as u64;
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
self.0 += buf.len() as u64;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for a simple binary serialization.
|
||||
pub trait BinarySerializable: fmt::Debug + Sized {
|
||||
/// Serialize
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>;
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()>;
|
||||
/// Deserialize
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self>;
|
||||
|
||||
fn num_bytes(&self) -> u64 {
|
||||
let mut counter = Counter::default();
|
||||
self.serialize(&mut counter).unwrap();
|
||||
counter.0
|
||||
}
|
||||
}
|
||||
|
||||
pub trait DeserializeFrom<T: BinarySerializable> {
|
||||
@@ -34,7 +59,7 @@ pub trait FixedSize: BinarySerializable {
|
||||
}
|
||||
|
||||
impl BinarySerializable for () {
|
||||
fn serialize<W: Write>(&self, _: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, _: &mut W) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
fn deserialize<R: Read>(_: &mut R) -> io::Result<Self> {
|
||||
@@ -47,7 +72,7 @@ impl FixedSize for () {
|
||||
}
|
||||
|
||||
impl<T: BinarySerializable> BinarySerializable for Vec<T> {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.len() as u64).serialize(writer)?;
|
||||
for it in self {
|
||||
it.serialize(writer)?;
|
||||
@@ -66,7 +91,7 @@ impl<T: BinarySerializable> BinarySerializable for Vec<T> {
|
||||
}
|
||||
|
||||
impl<Left: BinarySerializable, Right: BinarySerializable> BinarySerializable for (Left, Right) {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, write: &mut W) -> io::Result<()> {
|
||||
self.0.serialize(write)?;
|
||||
self.1.serialize(write)
|
||||
}
|
||||
@@ -81,7 +106,7 @@ impl<Left: BinarySerializable + FixedSize, Right: BinarySerializable + FixedSize
|
||||
}
|
||||
|
||||
impl BinarySerializable for u32 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u32::<Endianness>(*self)
|
||||
}
|
||||
|
||||
@@ -95,7 +120,7 @@ impl FixedSize for u32 {
|
||||
}
|
||||
|
||||
impl BinarySerializable for u16 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u16::<Endianness>(*self)
|
||||
}
|
||||
|
||||
@@ -109,7 +134,7 @@ impl FixedSize for u16 {
|
||||
}
|
||||
|
||||
impl BinarySerializable for u64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u64::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
@@ -122,7 +147,7 @@ impl FixedSize for u64 {
|
||||
}
|
||||
|
||||
impl BinarySerializable for u128 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u128::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
@@ -135,7 +160,7 @@ impl FixedSize for u128 {
|
||||
}
|
||||
|
||||
impl BinarySerializable for f32 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_f32::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
@@ -148,7 +173,7 @@ impl FixedSize for f32 {
|
||||
}
|
||||
|
||||
impl BinarySerializable for i64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_i64::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
@@ -161,7 +186,7 @@ impl FixedSize for i64 {
|
||||
}
|
||||
|
||||
impl BinarySerializable for f64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_f64::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
@@ -174,7 +199,7 @@ impl FixedSize for f64 {
|
||||
}
|
||||
|
||||
impl BinarySerializable for u8 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u8(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u8> {
|
||||
@@ -187,7 +212,7 @@ impl FixedSize for u8 {
|
||||
}
|
||||
|
||||
impl BinarySerializable for bool {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u8(u8::from(*self))
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
|
||||
@@ -208,7 +233,7 @@ impl FixedSize for bool {
|
||||
}
|
||||
|
||||
impl BinarySerializable for String {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let data: &[u8] = self.as_bytes();
|
||||
VInt(data.len() as u64).serialize(writer)?;
|
||||
writer.write_all(data)
|
||||
|
||||
@@ -44,7 +44,7 @@ pub fn deserialize_vint_u128(data: &[u8]) -> io::Result<(u128, &[u8])> {
|
||||
pub struct VIntU128(pub u128);
|
||||
|
||||
impl BinarySerializable for VIntU128 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let mut buffer = vec![];
|
||||
serialize_vint_u128(self.0, &mut buffer);
|
||||
writer.write_all(&buffer)
|
||||
@@ -211,7 +211,7 @@ impl VInt {
|
||||
}
|
||||
|
||||
impl BinarySerializable for VInt {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let mut buffer = [0u8; 10];
|
||||
let num_bytes = self.serialize_into(&mut buffer);
|
||||
writer.write_all(&buffer[0..num_bytes])
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
[package]
|
||||
name = "fastfield_codecs"
|
||||
version = "0.3.0"
|
||||
authors = ["Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
description = "Fast field codecs used by tantivy"
|
||||
documentation = "https://docs.rs/fastfield_codecs/"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
common = { version = "0.5", path = "../common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
|
||||
columnar = { version= "0.1", path="../columnar", package="tantivy-columnar" }
|
||||
prettytable-rs = {version="0.10.0", optional= true}
|
||||
rand = {version="0.8.3", optional= true}
|
||||
fastdivide = "0.4"
|
||||
log = "0.4"
|
||||
itertools = { version = "0.10.3" }
|
||||
measure_time = { version="0.8.2", optional=true}
|
||||
|
||||
[dev-dependencies]
|
||||
more-asserts = "0.3.0"
|
||||
proptest = "1.0.0"
|
||||
rand = "0.8.3"
|
||||
|
||||
[features]
|
||||
bin = ["prettytable-rs", "rand", "measure_time"]
|
||||
default = ["bin"]
|
||||
unstable = []
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
|
||||
|
||||
# Fast Field Codecs
|
||||
|
||||
This crate contains various fast field codecs, used to compress/decompress fast field data in tantivy.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributing is pretty straightforward. Since the bitpacking is the simplest compressor, you can check it for reference.
|
||||
|
||||
A codec needs to implement 2 traits:
|
||||
|
||||
- A reader implementing `FastFieldCodecReader` to read the codec.
|
||||
- A serializer implementing `FastFieldCodecSerializer` for compression estimation and codec name + id.
|
||||
|
||||
### Tests
|
||||
|
||||
Once the traits are implemented test and benchmark integration is pretty easy (see `test_with_codec_data_sets` and `bench.rs`).
|
||||
|
||||
Make sure to add the codec to the main.rs, which tests the compression ratio and estimation against different data sets. You can run it with:
|
||||
```
|
||||
cargo run --features bin
|
||||
```
|
||||
|
||||
### TODO
|
||||
- Add real world data sets in comparison
|
||||
- Add codec to cover sparse data sets
|
||||
|
||||
|
||||
### Codec Comparison
|
||||
```
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| | Compression Ratio | Compression Estimation |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| Autoincrement | | |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| LinearInterpol | 0.000039572664 | 0.000004396963 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| MultiLinearInterpol | 0.1477348 | 0.17275847 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| Bitpacked | 0.28126493 | 0.28125 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| Monotonically increasing concave | | |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| LinearInterpol | 0.25003937 | 0.26562938 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| MultiLinearInterpol | 0.190665 | 0.1883836 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| Bitpacked | 0.31251436 | 0.3125 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| Monotonically increasing convex | | |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| LinearInterpol | 0.25003937 | 0.28125438 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| MultiLinearInterpol | 0.18676 | 0.2040086 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| Bitpacked | 0.31251436 | 0.3125 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| Almost monotonically increasing | | |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| LinearInterpol | 0.14066513 | 0.1562544 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| MultiLinearInterpol | 0.16335973 | 0.17275847 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
| Bitpacked | 0.28126493 | 0.28125 |
|
||||
+----------------------------------+-------------------+------------------------+
|
||||
|
||||
```
|
||||
@@ -1,311 +0,0 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use fastfield_codecs::*;
|
||||
use rand::prelude::*;
|
||||
use test::Bencher;
|
||||
|
||||
use super::*;
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
fn generate_random() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||
.map(|el| el + random::<u16>() as u64)
|
||||
.collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation_gcd() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
||||
column: &[T],
|
||||
) -> Arc<dyn Column<T>> {
|
||||
let mut buffer = Vec::new();
|
||||
serialize(VecColumn::from(&column), &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
open(OwnedBytes::new(buffer)).unwrap()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = permutation[a as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = column.get_val(a as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
|
||||
const SINGLE_ITEM: u64 = 90;
|
||||
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
|
||||
const ONE_PERCENT_ITEM_RANGE: RangeInclusive<u64> = 49..=49;
|
||||
fn get_data_50percent_item() -> Vec<u128> {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
|
||||
let mut data = vec![];
|
||||
for _ in 0..300_000 {
|
||||
let val = rng.gen_range(1..=100);
|
||||
data.push(val);
|
||||
}
|
||||
data.push(SINGLE_ITEM);
|
||||
|
||||
data.shuffle(&mut rng);
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
}
|
||||
fn get_u128_column_random() -> Arc<dyn Column<u128>> {
|
||||
let permutation = generate_random();
|
||||
let permutation = permutation.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
get_u128_column_from_data(&permutation)
|
||||
}
|
||||
|
||||
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn Column<u128>> {
|
||||
let mut out = vec![];
|
||||
let iter_gen = || data.iter().cloned();
|
||||
serialize_u128(iter_gen, data.len() as u32, &mut out).unwrap();
|
||||
let out = OwnedBytes::new(out);
|
||||
open_u128::<u128>(out).unwrap()
|
||||
}
|
||||
|
||||
// U64 RANGE START
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
FIFTY_PERCENT_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_1percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
ONE_PERCENT_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
SINGLE_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U64 RANGE END
|
||||
|
||||
// U128 RANGE START
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
*SINGLE_ITEM_RANGE.start() as u128..=*SINGLE_ITEM_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U128 RANGE END
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let mut a = 0u128;
|
||||
for i in 0u64..column.num_vals() as u64 {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_stride5_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let n = column.num_vals();
|
||||
let mut a = 0u128;
|
||||
for i in (0..n / 5).map(|val| val * 5) {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += permutation[i as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0u32..n as u32 {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
|
||||
let permutation = generate_permutation_gcd();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..n {
|
||||
a += column.get_val(i as u32);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..permutation.len() {
|
||||
a += permutation[i as usize] as u64;
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
#![warn(missing_docs)]
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
//! # `fastfield_codecs`
|
||||
//!
|
||||
//! - Columnar storage of data for tantivy [`Column`].
|
||||
//! - Encode data in different codecs.
|
||||
//! - Monotonically map values to u64/u128
|
||||
|
||||
pub use columnar::ColumnValues as Column;
|
||||
@@ -2,9 +2,8 @@
|
||||
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use columnar::{Column, StrColumn};
|
||||
|
||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
||||
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
||||
@@ -14,9 +13,8 @@ use super::metric::{
|
||||
};
|
||||
use super::segment_agg_result::BucketCount;
|
||||
use super::VecWithNames;
|
||||
use crate::fastfield::{type_and_cardinality, MultiValuedFastFieldReader};
|
||||
use crate::schema::Type;
|
||||
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
|
||||
use crate::{SegmentReader, TantivyError};
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct AggregationsWithAccessor {
|
||||
@@ -37,38 +35,12 @@ impl AggregationsWithAccessor {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) enum FastFieldAccessor {
|
||||
Multi(MultiValuedFastFieldReader<u64>),
|
||||
Single(Arc<dyn Column<u64>>),
|
||||
}
|
||||
impl FastFieldAccessor {
|
||||
pub fn as_single(&self) -> Option<&dyn Column<u64>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(_) => None,
|
||||
FastFieldAccessor::Single(reader) => Some(&**reader),
|
||||
}
|
||||
}
|
||||
pub fn into_single(self) -> Option<Arc<dyn Column<u64>>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(_) => None,
|
||||
FastFieldAccessor::Single(reader) => Some(reader),
|
||||
}
|
||||
}
|
||||
pub fn as_multi(&self) -> Option<&MultiValuedFastFieldReader<u64>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(reader) => Some(reader),
|
||||
FastFieldAccessor::Single(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BucketAggregationWithAccessor {
|
||||
/// In general there can be buckets without fast field access, e.g. buckets that are created
|
||||
/// based on search terms. So eventually this needs to be Option or moved.
|
||||
pub(crate) accessor: FastFieldAccessor,
|
||||
pub(crate) inverted_index: Option<Arc<InvertedIndexReader>>,
|
||||
pub(crate) accessor: Column<u64>,
|
||||
pub(crate) str_dict_column: Option<StrColumn>,
|
||||
pub(crate) field_type: Type,
|
||||
pub(crate) bucket_agg: BucketAggregationType,
|
||||
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
||||
@@ -83,20 +55,19 @@ impl BucketAggregationWithAccessor {
|
||||
bucket_count: Rc<AtomicU32>,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<BucketAggregationWithAccessor> {
|
||||
let mut inverted_index = None;
|
||||
let mut str_dict_column = None;
|
||||
let (accessor, field_type) = match &bucket {
|
||||
BucketAggregationType::Range(RangeAggregation {
|
||||
field: field_name, ..
|
||||
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
|
||||
}) => get_ff_reader_and_validate(reader, field_name)?,
|
||||
BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: field_name, ..
|
||||
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
|
||||
}) => get_ff_reader_and_validate(reader, field_name)?,
|
||||
BucketAggregationType::Terms(TermsAggregation {
|
||||
field: field_name, ..
|
||||
}) => {
|
||||
let field = reader.schema().get_field(field_name)?;
|
||||
inverted_index = Some(reader.inverted_index(field)?);
|
||||
get_ff_reader_and_validate(reader, field_name, Cardinality::MultiValues)?
|
||||
str_dict_column = reader.fast_fields().str(&field_name)?;
|
||||
get_ff_reader_and_validate(reader, field_name)?
|
||||
}
|
||||
};
|
||||
let sub_aggregation = sub_aggregation.clone();
|
||||
@@ -110,7 +81,7 @@ impl BucketAggregationWithAccessor {
|
||||
max_bucket_count,
|
||||
)?,
|
||||
bucket_agg: bucket.clone(),
|
||||
inverted_index,
|
||||
str_dict_column,
|
||||
bucket_count: BucketCount {
|
||||
bucket_count,
|
||||
max_bucket_count,
|
||||
@@ -124,7 +95,7 @@ impl BucketAggregationWithAccessor {
|
||||
pub struct MetricAggregationWithAccessor {
|
||||
pub metric: MetricAggregation,
|
||||
pub field_type: Type,
|
||||
pub accessor: Arc<dyn Column>,
|
||||
pub accessor: Column<u64>,
|
||||
}
|
||||
|
||||
impl MetricAggregationWithAccessor {
|
||||
@@ -139,13 +110,10 @@ impl MetricAggregationWithAccessor {
|
||||
| MetricAggregation::Min(MinAggregation { field: field_name })
|
||||
| MetricAggregation::Stats(StatsAggregation { field: field_name })
|
||||
| MetricAggregation::Sum(SumAggregation { field: field_name }) => {
|
||||
let (accessor, field_type) =
|
||||
get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?;
|
||||
let (accessor, field_type) = get_ff_reader_and_validate(reader, field_name)?;
|
||||
|
||||
Ok(MetricAggregationWithAccessor {
|
||||
accessor: accessor
|
||||
.into_single()
|
||||
.expect("unexpected fast field cardinality"),
|
||||
accessor,
|
||||
field_type,
|
||||
metric: metric.clone(),
|
||||
})
|
||||
@@ -190,32 +158,22 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||
fn get_ff_reader_and_validate(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
cardinality: Cardinality,
|
||||
) -> crate::Result<(FastFieldAccessor, Type)> {
|
||||
) -> crate::Result<(columnar::Column<u64>, Type)> {
|
||||
let field = reader.schema().get_field(field_name)?;
|
||||
let field_type = reader.schema().get_field_entry(field).field_type();
|
||||
|
||||
if let Some((_ff_type, field_cardinality)) = type_and_cardinality(field_type) {
|
||||
if cardinality != field_cardinality {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Invalid field cardinality on field {} expected {:?}, but got {:?}",
|
||||
field_name, cardinality, field_cardinality
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Only fast fields of type f64, u64, i64 are supported, but got {:?} ",
|
||||
field_type.value_type()
|
||||
)));
|
||||
};
|
||||
// TODO we should get type metadata from columnar
|
||||
let field_type = reader
|
||||
.schema()
|
||||
.get_field_entry(field)
|
||||
.field_type()
|
||||
.value_type();
|
||||
// TODO Do validation
|
||||
|
||||
let ff_fields = reader.fast_fields();
|
||||
match cardinality {
|
||||
Cardinality::SingleValue => ff_fields
|
||||
.u64_lenient(field_name)
|
||||
.map(|field| (FastFieldAccessor::Single(field), field_type.value_type())),
|
||||
Cardinality::MultiValues => ff_fields
|
||||
.u64s_lenient(field_name)
|
||||
.map(|field| (FastFieldAccessor::Multi(field), field_type.value_type())),
|
||||
}
|
||||
let ff_field = ff_fields.u64_lenient(field_name)?.ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"No numerical fast field found for field: {}",
|
||||
field_name
|
||||
))
|
||||
})?;
|
||||
Ok((ff_field, field_type))
|
||||
}
|
||||
|
||||
126
src/aggregation/bucket/histogram/date_histogram.rs
Normal file
126
src/aggregation/bucket/histogram/date_histogram.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// DateHistogramAggregation is similar to `HistogramAggregation`, but it can only be used with date
|
||||
/// type.
|
||||
///
|
||||
/// Currently only **fixed time** intervals are supported. Calendar-aware time intervals are not
|
||||
/// supported.
|
||||
///
|
||||
/// Like the histogram, values are rounded down into the closest bucket.
|
||||
///
|
||||
/// For this calculation all fastfield values are converted to f64.
|
||||
///
|
||||
/// # Limitations/Compatibility
|
||||
/// Only fixed time intervals are supported.
|
||||
///
|
||||
/// # JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
/// "prices": {
|
||||
/// "date_histogram": {
|
||||
/// "field": "price",
|
||||
/// "fixed_interval": "30d"
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Response
|
||||
/// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry)
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct DateHistogramAggregationReq {
|
||||
/// The field to aggregate on.
|
||||
pub field: String,
|
||||
/// The interval to chunk your data range. Each bucket spans a value range of
|
||||
/// [0..fixed_interval). Accepted values
|
||||
///
|
||||
/// Fixed intervals are configured with the `fixed_interval` parameter.
|
||||
/// In contrast to calendar-aware intervals, fixed intervals are a fixed number of SI units and
|
||||
/// never deviate, regardless of where they fall on the calendar. One second is always
|
||||
/// composed of 1000ms. This allows fixed intervals to be specified in any multiple of the
|
||||
/// supported units. However, it means fixed intervals cannot express other units such as
|
||||
/// months, since the duration of a month is not a fixed quantity. Attempting to specify a
|
||||
/// calendar interval like month or quarter will return an Error.
|
||||
///
|
||||
/// The accepted units for fixed intervals are:
|
||||
/// * `ms`: milliseconds
|
||||
/// * `s`: seconds. Defined as 1000 milliseconds each.
|
||||
/// * `m`: minutes. Defined as 60 seconds each (60_000 milliseconds).
|
||||
/// * `h`: hours. Defined as 60 minutes each (3_600_000 milliseconds).
|
||||
/// * `d`: days. Defined as 24 hours (86_400_000 milliseconds).
|
||||
///
|
||||
/// Fractional time values are not supported, but you can address this by shifting to another
|
||||
/// time unit (e.g., `1.5h` could instead be specified as `90m`).
|
||||
pub fixed_interval: String,
|
||||
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
|
||||
/// 1))`.
|
||||
pub offset: Option<String>,
|
||||
/// Whether to return the buckets as a hash map
|
||||
#[serde(default)]
|
||||
pub keyed: bool,
|
||||
}
|
||||
|
||||
impl DateHistogramAggregationReq {
|
||||
fn validate(&self) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
/// Errors when parsing the fixed interval for `DateHistogramAggregationReq`.
|
||||
pub enum DateHistogramParseError {
|
||||
/// Unit not recognized in passed String
|
||||
UnitNotRecognized(String),
|
||||
/// Number not found in passed String
|
||||
NumberMissing(String),
|
||||
/// Unit not found in passed String
|
||||
UnitMissing(String),
|
||||
}
|
||||
|
||||
fn parse_into_milliseconds(input: &str) -> Result<u64, DateHistogramParseError> {
|
||||
let split_boundary = input
|
||||
.char_indices()
|
||||
.take_while(|(pos, el)| el.is_numeric())
|
||||
.count();
|
||||
let (number, unit) = input.split_at(split_boundary);
|
||||
if number.is_empty() {
|
||||
return Err(DateHistogramParseError::NumberMissing(input.to_string()));
|
||||
}
|
||||
if unit.is_empty() {
|
||||
return Err(DateHistogramParseError::UnitMissing(input.to_string()));
|
||||
}
|
||||
let number: u64 = number.parse().unwrap();
|
||||
let multiplier_from_unit = match unit {
|
||||
"ms" => 1,
|
||||
"s" => 1000,
|
||||
"m" => 60 * 1000,
|
||||
"h" => 60 * 60 * 1000,
|
||||
"d" => 24 * 60 * 60 * 1000,
|
||||
_ => return Err(DateHistogramParseError::UnitNotRecognized(unit.to_string())),
|
||||
};
|
||||
|
||||
Ok(number * multiplier_from_unit)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parser_test() {
|
||||
assert_eq!(parse_into_milliseconds("1m").unwrap(), 60_000);
|
||||
assert_eq!(parse_into_milliseconds("2m").unwrap(), 120_000);
|
||||
assert_eq!(
|
||||
parse_into_milliseconds("2y").unwrap_err(),
|
||||
DateHistogramParseError::UnitNotRecognized("y".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
parse_into_milliseconds("2000").unwrap_err(),
|
||||
DateHistogramParseError::UnitMissing("2000".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
parse_into_milliseconds("ms").unwrap_err(),
|
||||
DateHistogramParseError::NumberMissing("ms".to_string())
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::Display;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use columnar::Column;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -13,7 +13,9 @@ use crate::aggregation::agg_result::BucketEntry;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
GenericSegmentAggregationResultsCollector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, format_date};
|
||||
use crate::schema::{Schema, Type};
|
||||
use crate::{DocId, TantivyError};
|
||||
@@ -62,7 +64,6 @@ use crate::{DocId, TantivyError};
|
||||
///
|
||||
/// Response
|
||||
/// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry)
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct HistogramAggregation {
|
||||
/// The field to aggregate on.
|
||||
@@ -184,7 +185,7 @@ pub(crate) struct SegmentHistogramBucketEntry {
|
||||
impl SegmentHistogramBucketEntry {
|
||||
pub(crate) fn into_intermediate_bucket_entry(
|
||||
self,
|
||||
sub_aggregation: SegmentAggregationResultsCollector,
|
||||
sub_aggregation: GenericSegmentAggregationResultsCollector,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateHistogramBucketEntry> {
|
||||
Ok(IntermediateHistogramBucketEntry {
|
||||
@@ -198,11 +199,11 @@ impl SegmentHistogramBucketEntry {
|
||||
|
||||
/// The collector puts values from the fast field into the correct buckets and does a conversion to
|
||||
/// the correct datatype.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SegmentHistogramCollector {
|
||||
/// The buckets containing the aggregation data.
|
||||
buckets: Vec<SegmentHistogramBucketEntry>,
|
||||
sub_aggregations: Option<Vec<SegmentAggregationResultsCollector>>,
|
||||
sub_aggregations: Option<Vec<GenericSegmentAggregationResultsCollector>>,
|
||||
field_type: Type,
|
||||
interval: f64,
|
||||
offset: f64,
|
||||
@@ -283,7 +284,7 @@ impl SegmentHistogramCollector {
|
||||
req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
field_type: Type,
|
||||
accessor: &dyn Column<u64>,
|
||||
accessor: &Column<u64>,
|
||||
) -> crate::Result<Self> {
|
||||
req.validate()?;
|
||||
let min = f64_from_fastfield_u64(accessor.min_value(), &field_type);
|
||||
@@ -300,7 +301,7 @@ impl SegmentHistogramCollector {
|
||||
None
|
||||
} else {
|
||||
let sub_aggregation =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(sub_aggregation)?;
|
||||
GenericSegmentAggregationResultsCollector::from_req_and_validate(sub_aggregation)?;
|
||||
Some(buckets.iter().map(|_| sub_aggregation.clone()).collect())
|
||||
};
|
||||
|
||||
@@ -335,7 +336,7 @@ impl SegmentHistogramCollector {
|
||||
#[inline]
|
||||
pub(crate) fn collect_block(
|
||||
&mut self,
|
||||
doc: &[DocId],
|
||||
docs: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) -> crate::Result<()> {
|
||||
@@ -346,64 +347,20 @@ impl SegmentHistogramCollector {
|
||||
let get_bucket_num =
|
||||
|val| (get_bucket_num_f64(val, interval, offset) as i64 - first_bucket_num) as usize;
|
||||
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinatility");
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0]));
|
||||
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1]));
|
||||
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2]));
|
||||
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3]));
|
||||
let accessor = &bucket_with_accessor.accessor;
|
||||
for doc in docs {
|
||||
for val in accessor.values(*doc) {
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
|
||||
let bucket_pos0 = get_bucket_num(val0);
|
||||
let bucket_pos1 = get_bucket_num(val1);
|
||||
let bucket_pos2 = get_bucket_num(val2);
|
||||
let bucket_pos3 = get_bucket_num(val3);
|
||||
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val0,
|
||||
&bounds,
|
||||
bucket_pos0,
|
||||
docs[0],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val1,
|
||||
&bounds,
|
||||
bucket_pos1,
|
||||
docs[1],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val2,
|
||||
&bounds,
|
||||
bucket_pos2,
|
||||
docs[2],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val3,
|
||||
&bounds,
|
||||
bucket_pos3,
|
||||
docs[3],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
let val = f64_from_fastfield_u64(accessor.get_val(doc), &self.field_type);
|
||||
if !bounds.contains(val) {
|
||||
continue;
|
||||
let bucket_pos = get_bucket_num(val);
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val,
|
||||
&bounds,
|
||||
bucket_pos,
|
||||
*doc,
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
}
|
||||
let bucket_pos = (get_bucket_num_f64(val, self.interval, self.offset) as i64
|
||||
- self.first_bucket_num) as usize;
|
||||
|
||||
debug_assert_eq!(
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset)
|
||||
);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {
|
||||
|
||||
@@ -1,2 +1,4 @@
|
||||
mod date_histogram;
|
||||
mod histogram;
|
||||
pub use date_histogram::*;
|
||||
pub use histogram::*;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -11,7 +11,9 @@ use crate::aggregation::agg_req_with_accessor::{
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
BucketCount, GenericSegmentAggregationResultsCollector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{
|
||||
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
|
||||
};
|
||||
@@ -114,7 +116,7 @@ impl From<Range<u64>> for InternalRangeAggregationRange {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct SegmentRangeAndBucketEntry {
|
||||
range: Range<u64>,
|
||||
bucket: SegmentRangeBucketEntry,
|
||||
@@ -122,18 +124,18 @@ pub(crate) struct SegmentRangeAndBucketEntry {
|
||||
|
||||
/// The collector puts values from the fast field into the correct buckets and does a conversion to
|
||||
/// the correct datatype.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SegmentRangeCollector {
|
||||
/// The buckets containing the aggregation data.
|
||||
buckets: Vec<SegmentRangeAndBucketEntry>,
|
||||
field_type: Type,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SegmentRangeBucketEntry {
|
||||
pub key: Key,
|
||||
pub doc_count: u64,
|
||||
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
|
||||
pub sub_aggregation: Option<GenericSegmentAggregationResultsCollector>,
|
||||
/// The from range of the bucket. Equals `f64::MIN` when `None`.
|
||||
pub from: Option<f64>,
|
||||
/// The to range of the bucket. Equals `f64::MAX` when `None`. Open interval, `to` is not
|
||||
@@ -227,9 +229,11 @@ impl SegmentRangeCollector {
|
||||
let sub_aggregation = if sub_aggregation.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(SegmentAggregationResultsCollector::from_req_and_validate(
|
||||
sub_aggregation,
|
||||
)?)
|
||||
Some(
|
||||
GenericSegmentAggregationResultsCollector::from_req_and_validate(
|
||||
sub_aggregation,
|
||||
)?,
|
||||
)
|
||||
};
|
||||
|
||||
Ok(SegmentRangeAndBucketEntry {
|
||||
@@ -257,35 +261,18 @@ impl SegmentRangeCollector {
|
||||
#[inline]
|
||||
pub(crate) fn collect_block(
|
||||
&mut self,
|
||||
doc: &[DocId],
|
||||
docs: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) -> crate::Result<()> {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinality");
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = accessor.get_val(docs[0]);
|
||||
let val2 = accessor.get_val(docs[1]);
|
||||
let val3 = accessor.get_val(docs[2]);
|
||||
let val4 = accessor.get_val(docs[3]);
|
||||
let bucket_pos1 = self.get_bucket_pos(val1);
|
||||
let bucket_pos2 = self.get_bucket_pos(val2);
|
||||
let bucket_pos3 = self.get_bucket_pos(val3);
|
||||
let bucket_pos4 = self.get_bucket_pos(val4);
|
||||
let accessor = &bucket_with_accessor.accessor;
|
||||
for doc in docs {
|
||||
for val in accessor.values(*doc) {
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
}
|
||||
|
||||
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
let val = accessor.get_val(doc);
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
for bucket in &mut self.buckets {
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
@@ -434,7 +421,7 @@ pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> crate::Resu
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use columnar::Column;
|
||||
use itertools::Itertools;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -11,9 +12,11 @@ use crate::aggregation::agg_req_with_accessor::{
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateBucketResult, IntermediateTermBucketEntry, IntermediateTermBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, GenericSegmentAggregationResultsCollector,
|
||||
SegmentAggregationCollector,
|
||||
};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::MultiValuedFastFieldReader;
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
@@ -196,17 +199,16 @@ impl TermsAggregationInternal {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug, Default)]
|
||||
/// Container to store term_ids and their buckets.
|
||||
struct TermBuckets {
|
||||
pub(crate) entries: FxHashMap<u32, TermBucketEntry>,
|
||||
blueprint: Option<SegmentAggregationResultsCollector>,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Default)]
|
||||
#[derive(Clone, Default)]
|
||||
struct TermBucketEntry {
|
||||
doc_count: u64,
|
||||
sub_aggregations: Option<SegmentAggregationResultsCollector>,
|
||||
sub_aggregations: Option<Box<dyn SegmentAggregationCollector>>,
|
||||
}
|
||||
|
||||
impl Debug for TermBucketEntry {
|
||||
@@ -218,7 +220,7 @@ impl Debug for TermBucketEntry {
|
||||
}
|
||||
|
||||
impl TermBucketEntry {
|
||||
fn from_blueprint(blueprint: &Option<SegmentAggregationResultsCollector>) -> Self {
|
||||
fn from_blueprint(blueprint: &Option<Box<dyn SegmentAggregationCollector>>) -> Self {
|
||||
Self {
|
||||
doc_count: 0,
|
||||
sub_aggregations: blueprint.clone(),
|
||||
@@ -247,46 +249,11 @@ impl TermBuckets {
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
_max_term_id: usize,
|
||||
) -> crate::Result<Self> {
|
||||
let has_sub_aggregations = sub_aggregation.is_empty();
|
||||
|
||||
let blueprint = if has_sub_aggregations {
|
||||
let sub_aggregation =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(sub_aggregation)?;
|
||||
Some(sub_aggregation)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(TermBuckets {
|
||||
blueprint,
|
||||
entries: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn increment_bucket(
|
||||
&mut self,
|
||||
term_ids: &[u64],
|
||||
doc: DocId,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
bucket_count: &BucketCount,
|
||||
blueprint: &Option<SegmentAggregationResultsCollector>,
|
||||
) -> crate::Result<()> {
|
||||
for &term_id in term_ids {
|
||||
let entry = self.entries.entry(term_id as u32).or_insert_with(|| {
|
||||
bucket_count.add_count(1);
|
||||
|
||||
TermBucketEntry::from_blueprint(blueprint)
|
||||
});
|
||||
entry.doc_count += 1;
|
||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||
sub_aggregations.collect(doc, sub_aggregation)?;
|
||||
}
|
||||
}
|
||||
bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||
for entry in &mut self.entries.values_mut() {
|
||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||
@@ -299,13 +266,12 @@ impl TermBuckets {
|
||||
|
||||
/// The collector puts values from the fast field into the correct buckets and does a conversion to
|
||||
/// the correct datatype.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SegmentTermCollector {
|
||||
/// The buckets containing the aggregation data.
|
||||
term_buckets: TermBuckets,
|
||||
req: TermsAggregationInternal,
|
||||
field_type: Type,
|
||||
blueprint: Option<SegmentAggregationResultsCollector>,
|
||||
blueprint: Option<Box<dyn SegmentAggregationCollector>>,
|
||||
}
|
||||
|
||||
pub(crate) fn get_agg_name_and_property(name: &str) -> (&str, &str) {
|
||||
@@ -317,12 +283,8 @@ impl SegmentTermCollector {
|
||||
pub(crate) fn from_req_and_validate(
|
||||
req: &TermsAggregation,
|
||||
sub_aggregations: &AggregationsWithAccessor,
|
||||
field_type: Type,
|
||||
accessor: &MultiValuedFastFieldReader<u64>,
|
||||
) -> crate::Result<Self> {
|
||||
let max_term_id = accessor.max_value();
|
||||
let term_buckets =
|
||||
TermBuckets::from_req_and_validate(sub_aggregations, max_term_id as usize)?;
|
||||
let term_buckets = TermBuckets::default();
|
||||
|
||||
if let Some(custom_order) = req.order.as_ref() {
|
||||
// Validate sub aggregtion exists
|
||||
@@ -340,8 +302,7 @@ impl SegmentTermCollector {
|
||||
|
||||
let has_sub_aggregations = !sub_aggregations.is_empty();
|
||||
let blueprint = if has_sub_aggregations {
|
||||
let sub_aggregation =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(sub_aggregations)?;
|
||||
let sub_aggregation = build_segment_agg_collector(sub_aggregations)?;
|
||||
Some(sub_aggregation)
|
||||
} else {
|
||||
None
|
||||
@@ -350,7 +311,6 @@ impl SegmentTermCollector {
|
||||
Ok(SegmentTermCollector {
|
||||
req: TermsAggregationInternal::from_req(req),
|
||||
term_buckets,
|
||||
field_type,
|
||||
blueprint,
|
||||
})
|
||||
}
|
||||
@@ -368,7 +328,14 @@ impl SegmentTermCollector {
|
||||
|
||||
match self.req.order.target {
|
||||
OrderTarget::Key => {
|
||||
// defer order and cut_off after loading the texts from the dictionary
|
||||
// We rely on the fact, that term ordinals match the order of the strings
|
||||
// TODO: We could have a special collector, that keeps only TOP n results at any
|
||||
// time.
|
||||
if self.req.order.order == Order::Desc {
|
||||
entries.sort_unstable_by_key(|bucket| std::cmp::Reverse(bucket.0));
|
||||
} else {
|
||||
entries.sort_unstable_by_key(|bucket| bucket.0);
|
||||
}
|
||||
}
|
||||
OrderTarget::SubAggregation(_name) => {
|
||||
// don't sort and cut off since it's hard to make assumptions on the quality of the
|
||||
@@ -384,34 +351,40 @@ impl SegmentTermCollector {
|
||||
}
|
||||
}
|
||||
|
||||
let (term_doc_count_before_cutoff, mut sum_other_doc_count) =
|
||||
if order_by_key || order_by_sub_aggregation {
|
||||
(0, 0)
|
||||
} else {
|
||||
cut_off_buckets(&mut entries, self.req.segment_size as usize)
|
||||
};
|
||||
let (term_doc_count_before_cutoff, mut sum_other_doc_count) = if order_by_sub_aggregation {
|
||||
(0, 0)
|
||||
} else {
|
||||
cut_off_buckets(&mut entries, self.req.segment_size as usize)
|
||||
};
|
||||
|
||||
let inverted_index = agg_with_accessor
|
||||
.inverted_index
|
||||
.str_dict_column
|
||||
.as_ref()
|
||||
.expect("internal error: inverted index not loaded for term aggregation");
|
||||
let term_dict = inverted_index.terms();
|
||||
let term_dict = inverted_index;
|
||||
|
||||
let mut dict: FxHashMap<String, IntermediateTermBucketEntry> = Default::default();
|
||||
let mut buffer = vec![];
|
||||
let mut buffer = String::new();
|
||||
for (term_id, entry) in entries {
|
||||
term_dict
|
||||
.ord_to_term(term_id as u64, &mut buffer)
|
||||
.expect("could not find term");
|
||||
if !term_dict.ord_to_str(term_id as u64, &mut buffer)? {
|
||||
return Err(TantivyError::InternalError(format!(
|
||||
"Couldn't find term_id {} in dict",
|
||||
term_id
|
||||
)));
|
||||
}
|
||||
dict.insert(
|
||||
String::from_utf8(buffer.to_vec())
|
||||
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?,
|
||||
buffer.to_string(),
|
||||
entry.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
||||
);
|
||||
}
|
||||
if self.req.min_doc_count == 0 {
|
||||
let mut stream = term_dict.stream()?;
|
||||
// TODO: Handle rev streaming for descending sorting by keys
|
||||
let mut stream = term_dict.dictionary().stream()?;
|
||||
while let Some((key, _ord)) = stream.next() {
|
||||
if dict.len() >= self.req.segment_size as usize {
|
||||
break;
|
||||
}
|
||||
|
||||
let key = std::str::from_utf8(key)
|
||||
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
|
||||
if !dict.contains_key(key) {
|
||||
@@ -446,65 +419,26 @@ impl SegmentTermCollector {
|
||||
#[inline]
|
||||
pub(crate) fn collect_block(
|
||||
&mut self,
|
||||
doc: &[DocId],
|
||||
docs: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) -> crate::Result<()> {
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
.as_multi()
|
||||
.expect("unexpected fast field cardinatility");
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
let mut vals1 = vec![];
|
||||
let mut vals2 = vec![];
|
||||
let mut vals3 = vec![];
|
||||
let mut vals4 = vec![];
|
||||
for docs in iter.by_ref() {
|
||||
accessor.get_vals(docs[0], &mut vals1);
|
||||
accessor.get_vals(docs[1], &mut vals2);
|
||||
accessor.get_vals(docs[2], &mut vals3);
|
||||
accessor.get_vals(docs[3], &mut vals4);
|
||||
let accessor = &bucket_with_accessor.accessor;
|
||||
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals1,
|
||||
docs[0],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals2,
|
||||
docs[1],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals3,
|
||||
docs[2],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals4,
|
||||
docs[3],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
)?;
|
||||
for doc in docs {
|
||||
for term_id in accessor.values(*doc) {
|
||||
let entry = self
|
||||
.term_buckets
|
||||
.entries
|
||||
.entry(term_id as u32)
|
||||
.or_insert_with(|| TermBucketEntry::from_blueprint(&self.blueprint));
|
||||
entry.doc_count += 1;
|
||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||
sub_aggregations.collect(*doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
accessor.get_vals(doc, &mut vals1);
|
||||
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals1,
|
||||
doc,
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
)?;
|
||||
}
|
||||
if force_flush {
|
||||
self.term_buckets
|
||||
.force_flush(&bucket_with_accessor.sub_aggregation)?;
|
||||
@@ -1207,36 +1141,37 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_term_bucket_limit() -> crate::Result<()> {
|
||||
let terms: Vec<String> = (0..100_000).map(|el| el.to_string()).collect();
|
||||
let terms_per_segment = vec![terms.iter().map(|el| el.as_str()).collect()];
|
||||
// TODO reenable with memory limit
|
||||
//#[test]
|
||||
// fn terms_aggregation_term_bucket_limit() -> crate::Result<()> {
|
||||
// let terms: Vec<String> = (0..100_000).map(|el| el.to_string()).collect();
|
||||
// let terms_per_segment = vec![terms.iter().map(|el| el.as_str()).collect()];
|
||||
|
||||
let index = get_test_index_from_terms(true, &terms_per_segment)?;
|
||||
// let index = get_test_index_from_terms(true, &terms_per_segment)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "string_id".to_string(),
|
||||
min_doc_count: Some(0),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
// let agg_req: Aggregations = vec![(
|
||||
//"my_texts".to_string(),
|
||||
// Aggregation::Bucket(BucketAggregation {
|
||||
// bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
// field: "string_id".to_string(),
|
||||
// min_doc_count: Some(0),
|
||||
//..Default::default()
|
||||
//}),
|
||||
// sub_aggregation: Default::default(),
|
||||
//}),
|
||||
//)]
|
||||
//.into_iter()
|
||||
//.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None);
|
||||
// let res = exec_request_with_query(agg_req, &index, None);
|
||||
|
||||
assert!(res.is_err());
|
||||
// assert!(res.is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Ok(())
|
||||
//}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_multi_token_per_doc() -> crate::Result<()> {
|
||||
fn terms_aggregation_different_tokenizer_on_ff_test() -> crate::Result<()> {
|
||||
let terms = vec!["Hello Hello", "Hallo Hallo"];
|
||||
|
||||
let index = get_test_index_from_terms(true, &[terms])?;
|
||||
@@ -1256,12 +1191,13 @@ mod tests {
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None).unwrap();
|
||||
println!("{}", serde_json::to_string_pretty(&res).unwrap());
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "hello");
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "Hallo Hallo");
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 1);
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "hallo");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 2);
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "Hello Hello");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1352,68 +1288,3 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use itertools::Itertools;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn get_collector_with_buckets(num_docs: u64) -> TermBuckets {
|
||||
TermBuckets::from_req_and_validate(&Default::default(), num_docs as usize).unwrap()
|
||||
}
|
||||
|
||||
fn get_rand_terms(total_terms: u64, num_terms_returned: u64) -> Vec<u64> {
|
||||
let mut rng = thread_rng();
|
||||
|
||||
let all_terms = (0..total_terms - 1).collect_vec();
|
||||
|
||||
let mut vals = vec![];
|
||||
for _ in 0..num_terms_returned {
|
||||
let val = all_terms.as_slice().choose(&mut rng).unwrap();
|
||||
vals.push(*val);
|
||||
}
|
||||
|
||||
vals
|
||||
}
|
||||
|
||||
fn bench_term_buckets(b: &mut test::Bencher, num_terms: u64, total_terms: u64) {
|
||||
let mut collector = get_collector_with_buckets(total_terms);
|
||||
let vals = get_rand_terms(total_terms, num_terms);
|
||||
let aggregations_with_accessor: AggregationsWithAccessor = Default::default();
|
||||
let bucket_count: BucketCount = BucketCount {
|
||||
bucket_count: Default::default(),
|
||||
max_bucket_count: 1_000_001u32,
|
||||
};
|
||||
b.iter(|| {
|
||||
for &val in &vals {
|
||||
collector
|
||||
.increment_bucket(&[val], 0, &aggregations_with_accessor, &bucket_count, &None)
|
||||
.unwrap();
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_term_buckets_500_of_1_000_000(b: &mut test::Bencher) {
|
||||
bench_term_buckets(b, 500u64, 1_000_000u64)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_term_buckets_1_000_000_of_50_000(b: &mut test::Bencher) {
|
||||
bench_term_buckets(b, 1_000_000u64, 50_000u64)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_term_buckets_1_000_000_of_50(b: &mut test::Bencher) {
|
||||
bench_term_buckets(b, 1_000_000u64, 50u64)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_term_buckets_1_000_000_of_1_000_000(b: &mut test::Bencher) {
|
||||
bench_term_buckets(b, 1_000_000u64, 1_000_000u64)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,10 @@ use super::agg_req::Aggregations;
|
||||
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use super::agg_result::AggregationResults;
|
||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use super::segment_agg_result::{
|
||||
build_segment_agg_collector, GenericSegmentAggregationResultsCollector,
|
||||
SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::schema::Schema;
|
||||
@@ -137,7 +140,7 @@ fn merge_fruits(
|
||||
/// `AggregationSegmentCollector` does the aggregation collection on a segment.
|
||||
pub struct AggregationSegmentCollector {
|
||||
aggs_with_accessor: AggregationsWithAccessor,
|
||||
result: SegmentAggregationResultsCollector,
|
||||
result: Box<dyn SegmentAggregationCollector>,
|
||||
error: Option<TantivyError>,
|
||||
}
|
||||
|
||||
@@ -151,8 +154,7 @@ impl AggregationSegmentCollector {
|
||||
) -> crate::Result<Self> {
|
||||
let aggs_with_accessor =
|
||||
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
|
||||
let result =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?;
|
||||
let result = build_segment_agg_collector(&aggs_with_accessor)?;
|
||||
Ok(AggregationSegmentCollector {
|
||||
aggs_with_accessor,
|
||||
result,
|
||||
|
||||
@@ -222,24 +222,23 @@ pub enum IntermediateMetricResult {
|
||||
|
||||
impl From<SegmentMetricResultCollector> for IntermediateMetricResult {
|
||||
fn from(tree: SegmentMetricResultCollector) -> Self {
|
||||
use super::metric::SegmentStatsType;
|
||||
match tree {
|
||||
SegmentMetricResultCollector::Stats(collector) => match collector.collecting_for {
|
||||
super::metric::SegmentStatsType::Average => IntermediateMetricResult::Average(
|
||||
SegmentStatsType::Average => IntermediateMetricResult::Average(
|
||||
IntermediateAverage::from_collector(collector),
|
||||
),
|
||||
super::metric::SegmentStatsType::Count => {
|
||||
SegmentStatsType::Count => {
|
||||
IntermediateMetricResult::Count(IntermediateCount::from_collector(collector))
|
||||
}
|
||||
super::metric::SegmentStatsType::Max => {
|
||||
SegmentStatsType::Max => {
|
||||
IntermediateMetricResult::Max(IntermediateMax::from_collector(collector))
|
||||
}
|
||||
super::metric::SegmentStatsType::Min => {
|
||||
SegmentStatsType::Min => {
|
||||
IntermediateMetricResult::Min(IntermediateMin::from_collector(collector))
|
||||
}
|
||||
super::metric::SegmentStatsType::Stats => {
|
||||
IntermediateMetricResult::Stats(collector.stats)
|
||||
}
|
||||
super::metric::SegmentStatsType::Sum => {
|
||||
SegmentStatsType::Stats => IntermediateMetricResult::Stats(collector.stats),
|
||||
SegmentStatsType::Sum => {
|
||||
IntermediateMetricResult::Sum(IntermediateSum::from_collector(collector))
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
use fastfield_codecs::Column;
|
||||
use columnar::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, VecWithNames};
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
@@ -160,27 +166,74 @@ impl SegmentStatsCollector {
|
||||
stats: IntermediateStats::default(),
|
||||
}
|
||||
}
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = field.get_val(docs[0]);
|
||||
let val2 = field.get_val(docs[1]);
|
||||
let val3 = field.get_val(docs[2]);
|
||||
let val4 = field.get_val(docs[3]);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
let val4 = f64_from_fastfield_u64(val4, &self.field_type);
|
||||
pub(crate) fn collect_block(&mut self, docs: &[DocId], field: &Column<u64>) {
|
||||
// TODO special case for Required, Optional column type
|
||||
for doc in docs {
|
||||
for val in field.values(*doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentAggregationCollector for SegmentStatsCollector {
|
||||
fn into_intermediate_aggregations_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateAggregationResults> {
|
||||
let name = agg_with_accessor.metrics.keys[0].to_string();
|
||||
|
||||
let intermediate_metric_result = match self.collecting_for {
|
||||
SegmentStatsType::Average => {
|
||||
IntermediateMetricResult::Average(IntermediateAverage::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Count => {
|
||||
IntermediateMetricResult::Count(IntermediateCount::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Max => {
|
||||
IntermediateMetricResult::Max(IntermediateMax::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Min => {
|
||||
IntermediateMetricResult::Min(IntermediateMin::from_collector(*self))
|
||||
}
|
||||
SegmentStatsType::Stats => IntermediateMetricResult::Stats(self.stats),
|
||||
SegmentStatsType::Sum => {
|
||||
IntermediateMetricResult::Sum(IntermediateSum::from_collector(*self))
|
||||
}
|
||||
};
|
||||
|
||||
let metrics = Some(VecWithNames::from_entries(vec![(
|
||||
name,
|
||||
intermediate_metric_result,
|
||||
)]));
|
||||
|
||||
Ok(IntermediateAggregationResults {
|
||||
metrics,
|
||||
buckets: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let accessor = &agg_with_accessor.metrics.values[0].accessor;
|
||||
for val in accessor.values(doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val1);
|
||||
self.stats.collect(val2);
|
||||
self.stats.collect(val3);
|
||||
self.stats.collect(val4);
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
let val = field.get_val(doc);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush_staged_docs(
|
||||
&mut self,
|
||||
_agg_with_accessor: &AggregationsWithAccessor,
|
||||
_force_flush: bool,
|
||||
) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -172,8 +172,8 @@ pub use collector::{
|
||||
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
||||
MAX_BUCKET_COUNT,
|
||||
};
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
pub(crate) use date::format_date;
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -182,7 +182,7 @@ use crate::schema::Type;
|
||||
/// Represents an associative array `(key => values)` in a very efficient manner.
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub(crate) struct VecWithNames<T: Clone> {
|
||||
values: Vec<T>,
|
||||
pub(crate) values: Vec<T>,
|
||||
keys: Vec<String>,
|
||||
}
|
||||
|
||||
@@ -248,9 +248,6 @@ impl<T: Clone> VecWithNames<T> {
|
||||
fn values_mut(&mut self) -> impl Iterator<Item = &mut T> + '_ {
|
||||
self.values.iter_mut()
|
||||
}
|
||||
fn entries(&self) -> impl Iterator<Item = (&str, &T)> + '_ {
|
||||
self.keys().zip(self.values.iter())
|
||||
}
|
||||
fn is_empty(&self) -> bool {
|
||||
self.keys.is_empty()
|
||||
}
|
||||
@@ -336,8 +333,9 @@ mod tests {
|
||||
use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use crate::aggregation::segment_agg_result::DOC_BLOCK_SIZE;
|
||||
use crate::aggregation::DistributedAggregationCollector;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{Cardinality, IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use crate::{DateTime, Index, Term};
|
||||
|
||||
fn get_avg_req(field_name: &str) -> Aggregation {
|
||||
@@ -432,8 +430,7 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype.clone());
|
||||
let text_field_id = schema_builder.add_text_field("text_id", text_fieldtype);
|
||||
let string_field_id = schema_builder.add_text_field("string_id", STRING | FAST);
|
||||
let score_fieldtype =
|
||||
crate::schema::NumericOptions::default().set_fast();
|
||||
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
@@ -445,6 +442,7 @@ mod tests {
|
||||
{
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?;
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
for values in segment_and_values {
|
||||
for (i, term) in values {
|
||||
let i = *i;
|
||||
@@ -656,13 +654,11 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
schema_builder.add_text_field("dummy_text", STRING);
|
||||
let score_fieldtype =
|
||||
crate::schema::NumericOptions::default().set_fast();
|
||||
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
|
||||
let multivalue =
|
||||
crate::schema::NumericOptions::default().set_fast();
|
||||
let multivalue = crate::schema::NumericOptions::default().set_fast();
|
||||
let scores_field_i64 = schema_builder.add_i64_field("scores_i64", multivalue);
|
||||
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
@@ -1147,7 +1143,7 @@ mod tests {
|
||||
let agg_res = avg_on_field("dummy_text");
|
||||
assert_eq!(
|
||||
format!("{:?}", agg_res),
|
||||
r#"InvalidArgument("Only fast fields of type f64, u64, i64 are supported, but got Str ")"#
|
||||
r#"InvalidArgument("No numerical fast field found for field: dummy_text")"#
|
||||
);
|
||||
|
||||
let agg_res = avg_on_field("not_exist_field");
|
||||
@@ -1173,11 +1169,14 @@ mod tests {
|
||||
use test::{self, Bencher};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::bucket::{HistogramAggregation, HistogramBounds, TermsAggregation};
|
||||
use crate::aggregation::bucket::{
|
||||
CustomOrder, HistogramAggregation, HistogramBounds, Order, OrderTarget,
|
||||
TermsAggregation,
|
||||
};
|
||||
use crate::aggregation::metric::StatsAggregation;
|
||||
use crate::query::AllQuery;
|
||||
|
||||
fn get_test_index_bench(merge_segments: bool) -> crate::Result<Index> {
|
||||
fn get_test_index_bench(_merge_segments: bool) -> crate::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_fieldtype = crate::schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
@@ -1189,20 +1188,19 @@ mod tests {
|
||||
schema_builder.add_text_field("text_many_terms", STRING | FAST);
|
||||
let text_field_few_terms =
|
||||
schema_builder.add_text_field("text_few_terms", STRING | FAST);
|
||||
let score_fieldtype =
|
||||
crate::schema::NumericOptions::default().set_fast();
|
||||
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||
let score_field_f64 =
|
||||
schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
let index = Index::create_from_tempdir(schema_builder.build())?;
|
||||
let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"];
|
||||
let many_terms_data = (0..15_000)
|
||||
let many_terms_data = (0..150_000)
|
||||
.map(|num| format!("author{}", num))
|
||||
.collect::<Vec<_>>();
|
||||
{
|
||||
let mut rng = thread_rng();
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000)?;
|
||||
// writing the segment
|
||||
for _ in 0..1_000_000 {
|
||||
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
||||
@@ -1217,14 +1215,6 @@ mod tests {
|
||||
}
|
||||
index_writer.commit()?;
|
||||
}
|
||||
if merge_segments {
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
@@ -1376,7 +1366,42 @@ mod tests {
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_aggregation_terms_many(b: &mut Bencher) {
|
||||
fn bench_aggregation_terms_many_with_sub_agg(b: &mut Bencher) {
|
||||
let index = get_test_index_bench(false).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let sub_agg_req: Aggregations = vec![(
|
||||
"average_f64".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score_f64".to_string()),
|
||||
)),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "text_many_terms".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req,
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_aggregation_terms_many2(b: &mut Bencher) {
|
||||
let index = get_test_index_bench(false).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
@@ -1401,6 +1426,36 @@ mod tests {
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_aggregation_terms_many_order_by_term(b: &mut Bencher) {
|
||||
let index = get_test_index_bench(false).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "text_many_terms".to_string(),
|
||||
order: Some(CustomOrder {
|
||||
order: Order::Desc,
|
||||
target: OrderTarget::Key,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_aggregation_range_only(b: &mut Bencher) {
|
||||
let index = get_test_index_bench(false).unwrap();
|
||||
|
||||
@@ -25,15 +25,89 @@ use crate::{DocId, TantivyError};
|
||||
pub(crate) const DOC_BLOCK_SIZE: usize = 64;
|
||||
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub(crate) struct SegmentAggregationResultsCollector {
|
||||
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
||||
fn into_intermediate_aggregations_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateAggregationResults>;
|
||||
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()>;
|
||||
|
||||
fn flush_staged_docs(
|
||||
&mut self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
force_flush: bool,
|
||||
) -> crate::Result<()>;
|
||||
}
|
||||
|
||||
pub(crate) trait CollectorClone {
|
||||
fn clone_box(&self) -> Box<dyn SegmentAggregationCollector>;
|
||||
}
|
||||
|
||||
impl<T> CollectorClone for T
|
||||
where T: 'static + SegmentAggregationCollector + Clone
|
||||
{
|
||||
fn clone_box(&self) -> Box<dyn SegmentAggregationCollector> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<dyn SegmentAggregationCollector> {
|
||||
fn clone(&self) -> Box<dyn SegmentAggregationCollector> {
|
||||
self.clone_box()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn build_segment_agg_collector(
|
||||
req: &AggregationsWithAccessor,
|
||||
) -> crate::Result<Box<dyn SegmentAggregationCollector>> {
|
||||
// Single metric special case
|
||||
if req.buckets.is_empty() && req.metrics.len() == 1 {
|
||||
let req = &req.metrics.values[0];
|
||||
let stats_collector = match &req.metric {
|
||||
MetricAggregation::Average(AverageAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Average)
|
||||
}
|
||||
MetricAggregation::Count(CountAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Count)
|
||||
}
|
||||
MetricAggregation::Max(MaxAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Max)
|
||||
}
|
||||
MetricAggregation::Min(MinAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Min)
|
||||
}
|
||||
MetricAggregation::Stats(StatsAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Stats)
|
||||
}
|
||||
MetricAggregation::Sum(SumAggregation { .. }) => {
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Sum)
|
||||
}
|
||||
};
|
||||
|
||||
return Ok(Box::new(stats_collector));
|
||||
}
|
||||
|
||||
let agg = GenericSegmentAggregationResultsCollector::from_req_and_validate(req)?;
|
||||
Ok(Box::new(agg))
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// The GenericSegmentAggregationResultsCollector is the generic version of the collector, which
|
||||
/// can handle arbitrary complexity of sub-aggregations. Ideally we never have to pick this one
|
||||
/// and can provide specialized versions instead, that remove some of its overhead.
|
||||
pub(crate) struct GenericSegmentAggregationResultsCollector {
|
||||
pub(crate) metrics: Option<VecWithNames<SegmentMetricResultCollector>>,
|
||||
pub(crate) buckets: Option<VecWithNames<SegmentBucketResultCollector>>,
|
||||
staged_docs: DocBlock,
|
||||
num_staged_docs: usize,
|
||||
}
|
||||
|
||||
impl Default for SegmentAggregationResultsCollector {
|
||||
impl Default for GenericSegmentAggregationResultsCollector {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metrics: Default::default(),
|
||||
@@ -44,7 +118,7 @@ impl Default for SegmentAggregationResultsCollector {
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for SegmentAggregationResultsCollector {
|
||||
impl Debug for GenericSegmentAggregationResultsCollector {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SegmentAggregationResultsCollector")
|
||||
.field("metrics", &self.metrics)
|
||||
@@ -55,9 +129,9 @@ impl Debug for SegmentAggregationResultsCollector {
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentAggregationResultsCollector {
|
||||
pub fn into_intermediate_aggregations_result(
|
||||
self,
|
||||
impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
|
||||
fn into_intermediate_aggregations_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateAggregationResults> {
|
||||
let buckets = if let Some(buckets) = self.buckets {
|
||||
@@ -75,47 +149,7 @@ impl SegmentAggregationResultsCollector {
|
||||
Ok(IntermediateAggregationResults { metrics, buckets })
|
||||
}
|
||||
|
||||
pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> {
|
||||
let buckets = req
|
||||
.buckets
|
||||
.entries()
|
||||
.map(|(key, req)| {
|
||||
Ok((
|
||||
key.to_string(),
|
||||
SegmentBucketResultCollector::from_req_and_validate(req)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
let metrics = req
|
||||
.metrics
|
||||
.entries()
|
||||
.map(|(key, req)| {
|
||||
Ok((
|
||||
key.to_string(),
|
||||
SegmentMetricResultCollector::from_req_and_validate(req)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
let metrics = if metrics.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VecWithNames::from_entries(metrics))
|
||||
};
|
||||
let buckets = if buckets.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VecWithNames::from_entries(buckets))
|
||||
};
|
||||
Ok(SegmentAggregationResultsCollector {
|
||||
metrics,
|
||||
buckets,
|
||||
staged_docs: [0; DOC_BLOCK_SIZE],
|
||||
num_staged_docs: 0,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn collect(
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
@@ -128,7 +162,7 @@ impl SegmentAggregationResultsCollector {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn flush_staged_docs(
|
||||
fn flush_staged_docs(
|
||||
&mut self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
force_flush: bool,
|
||||
@@ -162,6 +196,66 @@ impl SegmentAggregationResultsCollector {
|
||||
}
|
||||
}
|
||||
|
||||
impl GenericSegmentAggregationResultsCollector {
|
||||
pub fn into_intermediate_aggregations_result(
|
||||
self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateAggregationResults> {
|
||||
let buckets = if let Some(buckets) = self.buckets {
|
||||
let entries = buckets
|
||||
.into_iter()
|
||||
.zip(agg_with_accessor.buckets.values())
|
||||
.map(|((key, bucket), acc)| Ok((key, bucket.into_intermediate_bucket_result(acc)?)))
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
Some(VecWithNames::from_entries(entries))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let metrics = self.metrics.map(VecWithNames::from_other);
|
||||
|
||||
Ok(IntermediateAggregationResults { metrics, buckets })
|
||||
}
|
||||
|
||||
pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> {
|
||||
let buckets = req
|
||||
.buckets
|
||||
.iter()
|
||||
.map(|(key, req)| {
|
||||
Ok((
|
||||
key.to_string(),
|
||||
SegmentBucketResultCollector::from_req_and_validate(req)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
let metrics = req
|
||||
.metrics
|
||||
.iter()
|
||||
.map(|(key, req)| {
|
||||
Ok((
|
||||
key.to_string(),
|
||||
SegmentMetricResultCollector::from_req_and_validate(req)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
let metrics = if metrics.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VecWithNames::from_entries(metrics))
|
||||
};
|
||||
let buckets = if buckets.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VecWithNames::from_entries(buckets))
|
||||
};
|
||||
Ok(GenericSegmentAggregationResultsCollector {
|
||||
metrics,
|
||||
buckets,
|
||||
staged_docs: [0; DOC_BLOCK_SIZE],
|
||||
num_staged_docs: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) enum SegmentMetricResultCollector {
|
||||
Stats(SegmentStatsCollector),
|
||||
@@ -205,7 +299,7 @@ impl SegmentMetricResultCollector {
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], metric: &MetricAggregationWithAccessor) {
|
||||
match self {
|
||||
SegmentMetricResultCollector::Stats(stats_collector) => {
|
||||
stats_collector.collect_block(doc, &*metric.accessor);
|
||||
stats_collector.collect_block(doc, &metric.accessor);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -215,7 +309,7 @@ impl SegmentMetricResultCollector {
|
||||
/// segments.
|
||||
/// The typical structure of Map<Key, Bucket> is not suitable during collection for performance
|
||||
/// reasons.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) enum SegmentBucketResultCollector {
|
||||
Range(SegmentRangeCollector),
|
||||
Histogram(Box<SegmentHistogramCollector>),
|
||||
@@ -243,14 +337,7 @@ impl SegmentBucketResultCollector {
|
||||
pub fn from_req_and_validate(req: &BucketAggregationWithAccessor) -> crate::Result<Self> {
|
||||
match &req.bucket_agg {
|
||||
BucketAggregationType::Terms(terms_req) => Ok(Self::Terms(Box::new(
|
||||
SegmentTermCollector::from_req_and_validate(
|
||||
terms_req,
|
||||
&req.sub_aggregation,
|
||||
req.field_type,
|
||||
req.accessor
|
||||
.as_multi()
|
||||
.expect("unexpected fast field cardinality"),
|
||||
)?,
|
||||
SegmentTermCollector::from_req_and_validate(terms_req, &req.sub_aggregation)?,
|
||||
))),
|
||||
BucketAggregationType::Range(range_req) => {
|
||||
Ok(Self::Range(SegmentRangeCollector::from_req_and_validate(
|
||||
@@ -265,9 +352,7 @@ impl SegmentBucketResultCollector {
|
||||
histogram,
|
||||
&req.sub_aggregation,
|
||||
req.field_type,
|
||||
req.accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinality"),
|
||||
&req.accessor,
|
||||
)?,
|
||||
))),
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// }
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// let mut facet_collector = FacetCollector::for_field("facet");
|
||||
/// facet_collector.add_facet("/category/fiction");
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
///
|
||||
@@ -829,7 +829,7 @@ mod bench {
|
||||
let reader = index.reader().unwrap();
|
||||
b.iter(|| {
|
||||
let searcher = reader.searcher();
|
||||
let facet_collector = FacetCollector::for_field(facet_field);
|
||||
let facet_collector = FacetCollector::for_field("facet");
|
||||
searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use columnar::{DynamicColumn, HasAssociatedColumnType};
|
||||
use fastfield_codecs::Column;
|
||||
use columnar::{ColumnValues, DynamicColumn, HasAssociatedColumnType};
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::schema::Field;
|
||||
@@ -121,7 +120,7 @@ where
|
||||
|
||||
let fast_field_reader = segment_reader
|
||||
.fast_fields()
|
||||
.typed_column_first_or_default(schema.get_field_name(self.field))?;
|
||||
.column_first_or_default(schema.get_field_name(self.field))?;
|
||||
|
||||
let segment_collector = self
|
||||
.collector
|
||||
@@ -152,7 +151,7 @@ where
|
||||
TPredicate: 'static,
|
||||
DynamicColumn: Into<Option<columnar::Column<TPredicateValue>>>,
|
||||
{
|
||||
fast_field_reader: Arc<dyn Column<TPredicateValue>>,
|
||||
fast_field_reader: Arc<dyn ColumnValues<TPredicateValue>>,
|
||||
segment_collector: TSegmentCollector,
|
||||
predicate: TPredicate,
|
||||
t_predicate_value: PhantomData<TPredicateValue>,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use columnar::ColumnValues;
|
||||
use fastdivide::DividerU64;
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::fastfield::{FastFieldNotAvailableError, FastValue};
|
||||
@@ -87,7 +87,7 @@ impl HistogramComputer {
|
||||
}
|
||||
pub struct SegmentHistogramCollector {
|
||||
histogram_computer: HistogramComputer,
|
||||
column_u64: Arc<dyn Column<u64>>,
|
||||
column_u64: Arc<dyn ColumnValues<u64>>,
|
||||
}
|
||||
|
||||
impl SegmentCollector for SegmentHistogramCollector {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use columnar::{BytesColumn, ColumnValues};
|
||||
|
||||
use super::*;
|
||||
use crate::collector::{Count, FilterCollector, TopDocs};
|
||||
use crate::core::SegmentReader;
|
||||
use crate::query::{AllQuery, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, TEXT};
|
||||
use crate::schema::{Schema, FAST, TEXT};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::{doc, DateTime, DocAddress, DocId, Document, Index, Score, Searcher, SegmentOrdinal};
|
||||
@@ -160,7 +160,7 @@ pub struct FastFieldTestCollector {
|
||||
|
||||
pub struct FastFieldSegmentCollector {
|
||||
vals: Vec<u64>,
|
||||
reader: Arc<dyn Column<u64>>,
|
||||
reader: Arc<dyn columnar::ColumnValues>,
|
||||
}
|
||||
|
||||
impl FastFieldTestCollector {
|
||||
@@ -212,62 +212,73 @@ impl SegmentCollector for FastFieldSegmentCollector {
|
||||
}
|
||||
}
|
||||
|
||||
// /// Collects in order all of the fast field bytes for all of the
|
||||
// /// docs in the `DocSet`
|
||||
// ///
|
||||
// /// This collector is mainly useful for tests.
|
||||
// pub struct BytesFastFieldTestCollector {
|
||||
// field: Field,
|
||||
// }
|
||||
/// Collects in order all of the fast field bytes for all of the
|
||||
/// docs in the `DocSet`
|
||||
///
|
||||
/// This collector is mainly useful for tests.
|
||||
/// It is very slow.
|
||||
pub struct BytesFastFieldTestCollector {
|
||||
field: String,
|
||||
}
|
||||
|
||||
// pub struct BytesFastFieldSegmentCollector {
|
||||
// vals: Vec<u8>,
|
||||
// reader: BytesFastFieldReader,
|
||||
// }
|
||||
pub struct BytesFastFieldSegmentCollector {
|
||||
vals: Vec<u8>,
|
||||
column_opt: Option<BytesColumn>,
|
||||
buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
// impl BytesFastFieldTestCollector {
|
||||
// pub fn for_field(field: Field) -> BytesFastFieldTestCollector {
|
||||
// BytesFastFieldTestCollector { field }
|
||||
// }
|
||||
// }
|
||||
impl BytesFastFieldTestCollector {
|
||||
pub fn for_field(field: impl ToString) -> BytesFastFieldTestCollector {
|
||||
BytesFastFieldTestCollector {
|
||||
field: field.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// impl Collector for BytesFastFieldTestCollector {
|
||||
// type Fruit = Vec<u8>;
|
||||
// type Child = BytesFastFieldSegmentCollector;
|
||||
impl Collector for BytesFastFieldTestCollector {
|
||||
type Fruit = Vec<u8>;
|
||||
type Child = BytesFastFieldSegmentCollector;
|
||||
|
||||
// fn for_segment(
|
||||
// &self,
|
||||
// _segment_local_id: u32,
|
||||
// segment_reader: &SegmentReader,
|
||||
// ) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||
// let reader = segment_reader.fast_fields().bytes(self.field)?;
|
||||
// Ok(BytesFastFieldSegmentCollector {
|
||||
// vals: Vec::new(),
|
||||
// reader,
|
||||
// })
|
||||
// }
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||
let column_opt = segment_reader.fast_fields().bytes(&self.field)?;
|
||||
Ok(BytesFastFieldSegmentCollector {
|
||||
vals: Vec::new(),
|
||||
column_opt,
|
||||
buffer: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
// fn requires_scoring(&self) -> bool {
|
||||
// false
|
||||
// }
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
// fn merge_fruits(&self, children: Vec<Vec<u8>>) -> crate::Result<Vec<u8>> {
|
||||
// Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
|
||||
// }
|
||||
// }
|
||||
fn merge_fruits(&self, children: Vec<Vec<u8>>) -> crate::Result<Vec<u8>> {
|
||||
Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
|
||||
}
|
||||
}
|
||||
|
||||
// impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||
// type Fruit = Vec<u8>;
|
||||
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||
type Fruit = Vec<u8>;
|
||||
|
||||
// fn collect(&mut self, doc: u32, _score: Score) {
|
||||
// let data = self.reader.get_bytes(doc);
|
||||
// self.vals.extend(data);
|
||||
// }
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
if let Some(column) = self.column_opt.as_ref() {
|
||||
for term_ord in column.term_ords(doc) {
|
||||
let (vals, buffer) = (&mut self.vals, &mut self.buffer);
|
||||
if column.ord_to_bytes(term_ord, buffer).unwrap() {
|
||||
vals.extend(&buffer[..]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fn harvest(self) -> <Self as SegmentCollector>::Fruit {
|
||||
// self.vals
|
||||
// }
|
||||
// }
|
||||
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
|
||||
self.vals
|
||||
}
|
||||
}
|
||||
|
||||
fn make_test_searcher() -> crate::Result<Searcher> {
|
||||
let schema = Schema::builder().build();
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use columnar::ColumnValues;
|
||||
|
||||
use super::Collector;
|
||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||
@@ -14,7 +14,6 @@ use crate::collector::{
|
||||
};
|
||||
use crate::fastfield::{FastFieldNotAvailableError, FastValue};
|
||||
use crate::query::Weight;
|
||||
use crate::schema::Field;
|
||||
use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
||||
|
||||
struct FastFieldConvertCollector<
|
||||
@@ -133,7 +132,7 @@ impl fmt::Debug for TopDocs {
|
||||
}
|
||||
|
||||
struct ScorerByFastFieldReader {
|
||||
sort_column: Arc<dyn Column<u64>>,
|
||||
sort_column: Arc<dyn ColumnValues<u64>>,
|
||||
}
|
||||
|
||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||
@@ -236,7 +235,6 @@ impl TopDocs {
|
||||
/// # use tantivy::query::{Query, QueryParser};
|
||||
/// use tantivy::Searcher;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
@@ -253,7 +251,7 @@ impl TopDocs {
|
||||
/// # index_writer.commit()?;
|
||||
/// # let reader = index.reader()?;
|
||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query)?;
|
||||
/// # assert_eq!(top_docs,
|
||||
/// # vec![(97u64, DocAddress::new(0u32, 1)),
|
||||
/// # (80u64, DocAddress::new(0u32, 3))]);
|
||||
@@ -263,8 +261,7 @@ impl TopDocs {
|
||||
/// /// collects the top 10 documents, order by the u64-`field`
|
||||
/// /// given in argument.
|
||||
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
||||
/// query: &dyn Query,
|
||||
/// rating_field: Field)
|
||||
/// query: &dyn Query)
|
||||
/// -> tantivy::Result<Vec<(u64, DocAddress)>> {
|
||||
///
|
||||
/// // This is where we build our topdocs collector
|
||||
@@ -272,7 +269,7 @@ impl TopDocs {
|
||||
/// // Note the `rating_field` needs to be a FAST field here.
|
||||
/// let top_books_by_rating = TopDocs
|
||||
/// ::with_limit(10)
|
||||
/// .order_by_u64_field(rating_field);
|
||||
/// .order_by_u64_field("rating");
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `u64` in the pair is the value of our fast field for
|
||||
@@ -324,22 +321,21 @@ impl TopDocs {
|
||||
/// # use tantivy::query::{Query, AllQuery};
|
||||
/// use tantivy::Searcher;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
/// # let title = schema_builder.add_text_field("company", TEXT);
|
||||
/// # let rating = schema_builder.add_i64_field("revenue", FAST);
|
||||
/// # let revenue = schema_builder.add_i64_field("revenue", FAST);
|
||||
/// # let schema = schema_builder.build();
|
||||
/// #
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||
/// # index_writer.add_document(doc!(title => "MadCow Inc.", rating => 92_000_000i64))?;
|
||||
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", rating => 119_000_000i64))?;
|
||||
/// # index_writer.add_document(doc!(title => "Declining Cow", rating => -63_000_000i64))?;
|
||||
/// # index_writer.add_document(doc!(title => "MadCow Inc.", revenue => 92_000_000i64))?;
|
||||
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", revenue => 119_000_000i64))?;
|
||||
/// # index_writer.add_document(doc!(title => "Declining Cow", revenue => -63_000_000i64))?;
|
||||
/// # assert!(index_writer.commit().is_ok());
|
||||
/// # let reader = index.reader()?;
|
||||
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, rating)?;
|
||||
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, "revenue")?;
|
||||
/// # assert_eq!(top_docs,
|
||||
/// # vec![(119_000_000i64, DocAddress::new(0, 1)),
|
||||
/// # (92_000_000i64, DocAddress::new(0, 0))]);
|
||||
@@ -350,7 +346,7 @@ impl TopDocs {
|
||||
/// /// given in argument.
|
||||
/// fn docs_sorted_by_revenue(searcher: &Searcher,
|
||||
/// query: &dyn Query,
|
||||
/// revenue_field: Field)
|
||||
/// revenue_field: &str)
|
||||
/// -> tantivy::Result<Vec<(i64, DocAddress)>> {
|
||||
///
|
||||
/// // This is where we build our topdocs collector
|
||||
@@ -359,7 +355,7 @@ impl TopDocs {
|
||||
/// // type `sort_by_field`. revenue_field here is a FAST i64 field.
|
||||
/// let top_company_by_revenue = TopDocs
|
||||
/// ::with_limit(2)
|
||||
/// .order_by_fast_field(revenue_field);
|
||||
/// .order_by_fast_field("revenue");
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `i64` in the pair is the value of our fast field for
|
||||
|
||||
@@ -905,12 +905,14 @@ mod tests {
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let index = Index::create(directory.clone(), schema, IndexSettings::default())?;
|
||||
|
||||
let mut writer = index.writer_with_num_threads(8, 24_000_000).unwrap();
|
||||
for i in 0u64..8_000u64 {
|
||||
writer.add_document(doc!(field => i))?;
|
||||
let mut writer = index.writer_with_num_threads(1, 32_000_000).unwrap();
|
||||
for _seg in 0..8 {
|
||||
for i in 0u64..1_000u64 {
|
||||
writer.add_document(doc!(field => i))?;
|
||||
}
|
||||
writer.commit()?;
|
||||
}
|
||||
|
||||
writer.commit()?;
|
||||
let mem_right_after_commit = directory.total_mem_usage();
|
||||
|
||||
let reader = index
|
||||
|
||||
@@ -249,7 +249,7 @@ impl SearcherInner {
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
doc_store_cache_size: usize,
|
||||
doc_store_cache_num_blocks: usize,
|
||||
) -> io::Result<SearcherInner> {
|
||||
assert_eq!(
|
||||
&segment_readers
|
||||
@@ -261,7 +261,7 @@ impl SearcherInner {
|
||||
);
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size))
|
||||
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_num_blocks))
|
||||
.collect::<io::Result<Vec<_>>>()?;
|
||||
|
||||
Ok(SearcherInner {
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::directory::{CompositeFile, FileSlice};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{intersect_alive_bitsets, AliveBitSet, FacetReader, FastFieldReaders};
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
||||
use crate::schema::{Field, FieldType, IndexRecordOption, Schema, Type};
|
||||
use crate::schema::{Field, IndexRecordOption, Schema, Type};
|
||||
use crate::space_usage::SegmentSpaceUsage;
|
||||
use crate::store::StoreReader;
|
||||
use crate::termdict::TermDictionary;
|
||||
@@ -99,7 +99,7 @@ impl SegmentReader {
|
||||
"`{field_name}` is not a facet field.`"
|
||||
)));
|
||||
}
|
||||
let Some(facet_column) = self.fast_fields().str_column_opt(field_name)? else {
|
||||
let Some(facet_column) = self.fast_fields().str(field_name)? else {
|
||||
panic!("Facet Field `{field_name}` is missing. This should not happen");
|
||||
};
|
||||
Ok(FacetReader::new(facet_column))
|
||||
@@ -128,9 +128,12 @@ impl SegmentReader {
|
||||
&self.fieldnorm_readers
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `StoreReader`.
|
||||
pub fn get_store_reader(&self, cache_size: usize) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone(), cache_size)
|
||||
/// Accessor to the segment's [`StoreReader`](crate::store::StoreReader).
|
||||
///
|
||||
/// `cache_num_blocks` sets the number of decompressed blocks to be cached in an LRU.
|
||||
/// The size of blocks is configurable, this should be reflexted in the
|
||||
pub fn get_store_reader(&self, cache_num_blocks: usize) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone(), cache_num_blocks)
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
|
||||
@@ -22,7 +22,7 @@ impl FileAddr {
|
||||
}
|
||||
|
||||
impl BinarySerializable for FileAddr {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.field.serialize(writer)?;
|
||||
VInt(self.idx as u64).serialize(writer)?;
|
||||
Ok(())
|
||||
|
||||
@@ -21,15 +21,15 @@
|
||||
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
pub use columnar::Column;
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
pub use fastfield_codecs::Column;
|
||||
|
||||
pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet};
|
||||
pub use self::error::{FastFieldNotAvailableError, Result};
|
||||
pub use self::facet_reader::FacetReader;
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub use self::writer::FastFieldsWriter;
|
||||
use crate::schema::{Type, Value};
|
||||
use crate::schema::Type;
|
||||
use crate::DateTime;
|
||||
|
||||
mod alive_bitset;
|
||||
@@ -115,33 +115,13 @@ impl columnar::MonotonicallyMappableToU64 for DateTime {
|
||||
}
|
||||
}
|
||||
|
||||
fn unexpected_value(expected: &str, actual: &Value) -> crate::TantivyError {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
"Expected a {:?} in fast field, but got {:?}",
|
||||
expected, actual
|
||||
))
|
||||
}
|
||||
|
||||
fn value_to_u64(value: &Value) -> crate::Result<u64> {
|
||||
let value = match value {
|
||||
Value::U64(val) => val.to_u64(),
|
||||
Value::I64(val) => val.to_u64(),
|
||||
Value::F64(val) => val.to_u64(),
|
||||
Value::Bool(val) => val.to_u64(),
|
||||
Value::Date(val) => val.to_u64(),
|
||||
_ => return Err(unexpected_value("u64/i64/f64/bool/date", value)),
|
||||
};
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use columnar::Column;
|
||||
use common::{HasLen, TerminatingWrite};
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::prelude::SliceRandom;
|
||||
@@ -149,7 +129,7 @@ mod tests {
|
||||
use rand::{Rng, SeedableRng};
|
||||
|
||||
use super::*;
|
||||
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
||||
use crate::directory::{Directory, RamDirectory, WritePtr};
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::schema::{
|
||||
Document, Facet, FacetOptions, Field, Schema, SchemaBuilder, FAST, INDEXED, STRING, TEXT,
|
||||
@@ -191,7 +171,7 @@ mod tests {
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
|
||||
assert_eq!(file.len(), 157);
|
||||
assert_eq!(file.len(), 161);
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let column = fast_field_readers.u64("field").unwrap();
|
||||
assert_eq!(column.get_val(0), 13u64);
|
||||
@@ -238,7 +218,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 185);
|
||||
assert_eq!(file.len(), 189);
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let col = fast_field_readers.u64("field").unwrap();
|
||||
assert_eq!(col.get_val(0), 4u64);
|
||||
@@ -268,7 +248,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 158);
|
||||
assert_eq!(file.len(), 162);
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let fast_field_reader = fast_field_readers.u64("field").unwrap();
|
||||
for doc in 0..10_000 {
|
||||
@@ -297,7 +277,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 80166);
|
||||
assert_eq!(file.len(), 4557);
|
||||
{
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let col = fast_field_readers.u64("field").unwrap();
|
||||
@@ -327,7 +307,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 329_usize);
|
||||
assert_eq!(file.len(), 333_usize);
|
||||
|
||||
{
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
@@ -438,39 +418,38 @@ mod tests {
|
||||
test_intfastfield_permutation_with_data(permutation);
|
||||
}
|
||||
|
||||
// TODO reenable when merge is here.
|
||||
// #[test]
|
||||
// fn test_merge_missing_date_fast_field() {
|
||||
// let mut schema_builder = Schema::builder();
|
||||
// let date_field = schema_builder.add_date_field("date", FAST);
|
||||
// let schema = schema_builder.build();
|
||||
// let index = Index::create_in_ram(schema);
|
||||
// let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
// index_writer
|
||||
// .add_document(doc!(date_field =>DateTime::from_utc(OffsetDateTime::now_utc())))
|
||||
// .unwrap();
|
||||
// index_writer.commit().unwrap();
|
||||
// index_writer.add_document(doc!()).unwrap();
|
||||
// index_writer.commit().unwrap();
|
||||
// let reader = index.reader().unwrap();
|
||||
// let segment_ids: Vec<SegmentId> = reader
|
||||
// .searcher()
|
||||
// .segment_readers()
|
||||
// .iter()
|
||||
// .map(SegmentReader::segment_id)
|
||||
// .collect();
|
||||
// assert_eq!(segment_ids.len(), 2);
|
||||
// index_writer.merge(&segment_ids[..]).wait().unwrap();
|
||||
// reader.reload().unwrap();
|
||||
// assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
// }
|
||||
#[test]
|
||||
fn test_merge_missing_date_fast_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer
|
||||
.add_document(doc!(date_field =>DateTime::from_utc(OffsetDateTime::now_utc())))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let segment_ids: Vec<SegmentId> = reader
|
||||
.searcher()
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(SegmentReader::segment_id)
|
||||
.collect();
|
||||
assert_eq!(segment_ids.len(), 2);
|
||||
index_writer.merge(&segment_ids[..]).wait().unwrap();
|
||||
reader.reload().unwrap();
|
||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
}
|
||||
|
||||
// fn get_vals_for_docs(column: &columnar::Column<u64>, docs: Range<u32>) -> Vec<u64> {
|
||||
// docs.into_iter()
|
||||
// .flat_map(|doc| column.values(doc))
|
||||
// .collect()
|
||||
// }
|
||||
fn get_vals_for_docs(column: &Column<u64>, docs: Range<u32>) -> Vec<u64> {
|
||||
docs.into_iter()
|
||||
.flat_map(|doc| column.values(doc))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_fastfield() {
|
||||
@@ -526,57 +505,61 @@ mod tests {
|
||||
assert!(str_column.ord_to_str(0, &mut str_term).unwrap());
|
||||
assert_eq!("AAAAA", &str_term);
|
||||
|
||||
// let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
// assert_eq!(inverted_index.terms().num_terms(), 3);
|
||||
// let mut bytes = vec![];
|
||||
// assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
|
||||
// assert_eq!(bytes, "aaaaa".as_bytes());
|
||||
// }
|
||||
let inverted_index = segment_reader.inverted_index(text_field).unwrap();
|
||||
assert_eq!(inverted_index.terms().num_terms(), 3);
|
||||
let mut bytes = vec![];
|
||||
assert!(inverted_index.terms().ord_to_term(0, &mut bytes).unwrap());
|
||||
assert_eq!(bytes, "aaaaa".as_bytes());
|
||||
}
|
||||
|
||||
// {
|
||||
// // second segment
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
{
|
||||
// second segment
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "AAAAA", // term_ord 0
|
||||
// ))?;
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
text_field => "AAAAA", // term_ord 0
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "CCCCC AAAAA", // term_ord 1, after merge 2
|
||||
// ))?;
|
||||
index_writer
|
||||
.add_document(doc!(
|
||||
text_field => "CCCCC AAAAA", // term_ord 1, after merge 2
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
// index_writer.add_document(doc!())?;
|
||||
// index_writer.commit()?;
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// assert_eq!(searcher.segment_readers().len(), 2);
|
||||
// let segment_reader = searcher.segment_reader(1);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let text_fast_field = fast_fields.u64s("text").unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 2);
|
||||
let segment_reader = searcher.segment_reader(1);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let text_fast_field = fast_fields.str("text").unwrap().unwrap();
|
||||
|
||||
// assert_eq!(get_vals_for_docs(&text_fast_field, 0..3), vec![0, 1, 0]);
|
||||
assert_eq!(&get_vals_for_docs(&text_fast_field.ords(), 0..2), &[0, 1]);
|
||||
}
|
||||
|
||||
// TODO uncomment once merging is available
|
||||
// Merging the segments
|
||||
// {
|
||||
// let segment_ids = index.searchable_segment_ids()?;
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
// index_writer.merge(&segment_ids).wait()?;
|
||||
// index_writer.wait_merging_threads()?;
|
||||
// }
|
||||
//
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// let segment_reader = searcher.segment_reader(0);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let text_fast_field = fast_fields.u64s("text").unwrap();
|
||||
//
|
||||
// assert_eq!(
|
||||
// get_vals_for_docs(&text_fast_field, 0..8),
|
||||
// vec![1, 0, 0, 0, 1, 3 /* next segment */, 0, 2, 0]
|
||||
// );
|
||||
{
|
||||
let segment_ids = index.searchable_segment_ids().unwrap();
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer.merge(&segment_ids).wait().unwrap();
|
||||
index_writer.wait_merging_threads().unwrap();
|
||||
}
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let text_column = fast_fields.str("text").unwrap().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
get_vals_for_docs(text_column.ords(), 0..8),
|
||||
vec![1, 0, 0, 0, 1, 3 /* next segment */, 0, 2]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -590,11 +573,7 @@ mod tests {
|
||||
writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let str_column = segment_reader
|
||||
.fast_fields()
|
||||
.str_column_opt("text")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let str_column = segment_reader.fast_fields().str("text").unwrap().unwrap();
|
||||
// The string values are not sorted here.
|
||||
let term_ords: Vec<u64> = str_column.term_ords(0u32).collect();
|
||||
assert_eq!(&term_ords, &[1, 0]);
|
||||
@@ -620,155 +599,162 @@ mod tests {
|
||||
assert_eq!(&facet_ords, &[0, 1]);
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_string_fastfield() -> crate::Result<()> {
|
||||
// let mut schema_builder = Schema::builder();
|
||||
// let text_field = schema_builder.add_text_field("text", STRING | FAST);
|
||||
// let schema = schema_builder.build();
|
||||
// let index = Index::create_in_ram(schema);
|
||||
#[test]
|
||||
fn test_string_fastfield() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", STRING | FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
// {
|
||||
// // first segment
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
// index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "BBBBB", // term_ord 1
|
||||
// ))?;
|
||||
// index_writer.add_document(doc!())?;
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "AAAAA", // term_ord 0
|
||||
// ))?;
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "AAAAA", // term_ord 0
|
||||
// ))?;
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "zumberthree", // term_ord 2, after merge term_ord 3
|
||||
// ))?;
|
||||
{
|
||||
// first segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "BBBBB", // term_ord 1
|
||||
))?;
|
||||
index_writer.add_document(doc!())?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "AAAAA", // term_ord 0
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "AAAAA", // term_ord 0
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "zumberthree", // term_ord 2, after merge term_ord 3
|
||||
))?;
|
||||
|
||||
// index_writer.add_document(doc!())?;
|
||||
// index_writer.commit()?;
|
||||
index_writer.add_document(doc!())?;
|
||||
index_writer.commit()?;
|
||||
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// assert_eq!(searcher.segment_readers().len(), 1);
|
||||
// let segment_reader = searcher.segment_reader(0);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let text_col = fast_fields.str("text").unwrap().unwrap();
|
||||
|
||||
// assert_eq!(get_vals_for_docs(&text_fast_field, 0..6), vec![1, 0, 0, 2]);
|
||||
assert_eq!(get_vals_for_docs(&text_col.ords(), 0..6), vec![1, 0, 0, 2]);
|
||||
|
||||
// let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
// assert_eq!(inverted_index.terms().num_terms(), 3);
|
||||
// let mut bytes = vec![];
|
||||
// assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
|
||||
// assert_eq!(bytes, "AAAAA".as_bytes());
|
||||
// }
|
||||
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||
assert_eq!(inverted_index.terms().num_terms(), 3);
|
||||
let mut bytes = vec![];
|
||||
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
|
||||
assert_eq!(bytes, "AAAAA".as_bytes());
|
||||
}
|
||||
|
||||
// {
|
||||
// // second segment
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
{
|
||||
// second segment
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "AAAAA", // term_ord 0
|
||||
// ))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "AAAAA", // term_ord 0
|
||||
))?;
|
||||
|
||||
// index_writer.add_document(doc!(
|
||||
// text_field => "CCCCC", // term_ord 1, after merge 2
|
||||
// ))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "CCCCC", // term_ord 1, after merge 2
|
||||
))?;
|
||||
|
||||
// index_writer.add_document(doc!())?;
|
||||
// index_writer.commit()?;
|
||||
index_writer.add_document(doc!())?;
|
||||
index_writer.commit()?;
|
||||
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// assert_eq!(searcher.segment_readers().len(), 2);
|
||||
// let segment_reader = searcher.segment_reader(1);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 2);
|
||||
let segment_reader = searcher.segment_reader(1);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let text_fast_field = fast_fields.str("text").unwrap().unwrap();
|
||||
|
||||
// assert_eq!(get_vals_for_docs(&text_fast_field, 0..2), vec![0, 1]);
|
||||
// }
|
||||
// // Merging the segments
|
||||
// {
|
||||
// let segment_ids = index.searchable_segment_ids()?;
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
// index_writer.merge(&segment_ids).wait()?;
|
||||
// index_writer.wait_merging_threads()?;
|
||||
// }
|
||||
assert_eq!(&get_vals_for_docs(text_fast_field.ords(), 0..2), &[0, 1]);
|
||||
}
|
||||
// Merging the segments
|
||||
{
|
||||
let segment_ids = index.searchable_segment_ids()?;
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// let segment_reader = searcher.segment_reader(0);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let text_fast_field = fast_fields.str("text").unwrap().unwrap();
|
||||
|
||||
// assert_eq!(
|
||||
// get_vals_for_docs(&text_fast_field, 0..9),
|
||||
// vec![1, 0, 0, 3 /* next segment */, 0, 2]
|
||||
// );
|
||||
assert_eq!(
|
||||
get_vals_for_docs(&text_fast_field.ords(), 0..9),
|
||||
vec![1, 0, 0, 3 /* next segment */, 0, 2]
|
||||
);
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_datefastfield() -> crate::Result<()> {
|
||||
// let mut schema_builder = Schema::builder();
|
||||
// let date_field = schema_builder.add_date_field(
|
||||
// "date",
|
||||
// DateOptions::from(FAST).set_precision(DatePrecision::Microseconds),
|
||||
// );
|
||||
// let multi_date_field = schema_builder.add_date_field(
|
||||
// "multi_date",
|
||||
// DateOptions::default()
|
||||
// .set_precision(DatePrecision::Microseconds)
|
||||
// .set_fast(),
|
||||
// );
|
||||
// let schema = schema_builder.build();
|
||||
// let index = Index::create_in_ram(schema);
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
// index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
// index_writer.add_document(doc!(
|
||||
// date_field => DateTime::from_u64(1i64.to_u64()),
|
||||
// multi_date_field => DateTime::from_u64(2i64.to_u64()),
|
||||
// multi_date_field => DateTime::from_u64(3i64.to_u64())
|
||||
// ))?;
|
||||
// index_writer.add_document(doc!(
|
||||
// date_field => DateTime::from_u64(4i64.to_u64())
|
||||
// ))?;
|
||||
// index_writer.add_document(doc!(
|
||||
// multi_date_field => DateTime::from_u64(5i64.to_u64()),
|
||||
// multi_date_field => DateTime::from_u64(6i64.to_u64())
|
||||
// ))?;
|
||||
// index_writer.commit()?;
|
||||
// let reader = index.reader()?;
|
||||
// let searcher = reader.searcher();
|
||||
// assert_eq!(searcher.segment_readers().len(), 1);
|
||||
// let segment_reader = searcher.segment_reader(0);
|
||||
// let fast_fields = segment_reader.fast_fields();
|
||||
// let date_fast_field = fast_fields.date(date_field).unwrap();
|
||||
// let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
||||
// let mut dates = vec![];
|
||||
// {
|
||||
// assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64);
|
||||
// dates_fast_field.get_vals(0u32, &mut dates);
|
||||
// assert_eq!(dates.len(), 2);
|
||||
// assert_eq!(dates[0].into_timestamp_micros(), 2i64);
|
||||
// assert_eq!(dates[1].into_timestamp_micros(), 3i64);
|
||||
// }
|
||||
// {
|
||||
// assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64);
|
||||
// dates_fast_field.get_vals(1u32, &mut dates);
|
||||
// assert!(dates.is_empty());
|
||||
// }
|
||||
// {
|
||||
// assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64);
|
||||
// dates_fast_field.get_vals(2u32, &mut dates);
|
||||
// assert_eq!(dates.len(), 2);
|
||||
// assert_eq!(dates[0].into_timestamp_micros(), 5i64);
|
||||
// assert_eq!(dates[1].into_timestamp_micros(), 6i64);
|
||||
// }
|
||||
// Ok(())
|
||||
// }
|
||||
#[test]
|
||||
fn test_datefastfield() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field(
|
||||
"date",
|
||||
DateOptions::from(FAST).set_precision(DatePrecision::Microseconds),
|
||||
);
|
||||
let multi_date_field = schema_builder.add_date_field(
|
||||
"multi_date",
|
||||
DateOptions::default()
|
||||
.set_precision(DatePrecision::Microseconds)
|
||||
.set_fast(),
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(
|
||||
date_field => DateTime::from_u64(1i64.to_u64()),
|
||||
multi_date_field => DateTime::from_u64(2i64.to_u64()),
|
||||
multi_date_field => DateTime::from_u64(3i64.to_u64())
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
date_field => DateTime::from_u64(4i64.to_u64())
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
multi_date_field => DateTime::from_u64(5i64.to_u64()),
|
||||
multi_date_field => DateTime::from_u64(6i64.to_u64())
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let date_fast_field = fast_fields
|
||||
.column_opt::<columnar::DateTime>("date")
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.first_or_default_col(Default::default());
|
||||
let dates_fast_field = fast_fields
|
||||
.column_opt::<columnar::DateTime>("multi_date")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let mut dates = vec![];
|
||||
{
|
||||
assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64);
|
||||
dates_fast_field.fill_vals(0u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].into_timestamp_micros(), 2i64);
|
||||
assert_eq!(dates[1].into_timestamp_micros(), 3i64);
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64);
|
||||
dates_fast_field.fill_vals(1u32, &mut dates);
|
||||
assert!(dates.is_empty());
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64);
|
||||
dates_fast_field.fill_vals(2u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].into_timestamp_micros(), 5i64);
|
||||
assert_eq!(dates[1].into_timestamp_micros(), 6i64);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield_bool_small() {
|
||||
@@ -795,7 +781,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 171);
|
||||
assert_eq!(file.len(), 175);
|
||||
let fast_field_readers = FastFieldReaders::open(file).unwrap();
|
||||
let bool_col = fast_field_readers.bool("field_bool").unwrap();
|
||||
assert_eq!(bool_col.get_val(0), true);
|
||||
@@ -827,7 +813,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 183);
|
||||
assert_eq!(file.len(), 187);
|
||||
let readers = FastFieldReaders::open(file).unwrap();
|
||||
let bool_col = readers.bool("field_bool").unwrap();
|
||||
for i in 0..25 {
|
||||
@@ -852,7 +838,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 173);
|
||||
assert_eq!(file.len(), 177);
|
||||
let fastfield_readers = FastFieldReaders::open(file).unwrap();
|
||||
let col = fastfield_readers.bool("field_bool").unwrap();
|
||||
assert_eq!(col.get_val(0), false);
|
||||
@@ -978,8 +964,7 @@ mod tests {
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let fastfields = searcher.segment_reader(0u32).fast_fields();
|
||||
let column: columnar::Column<Ipv6Addr> =
|
||||
fastfields.typed_column_opt("ip").unwrap().unwrap();
|
||||
let column: Column<Ipv6Addr> = fastfields.column_opt("ip").unwrap().unwrap();
|
||||
assert_eq!(column.num_rows(), 3);
|
||||
assert_eq!(column.first(0), None);
|
||||
assert_eq!(column.first(1), Some(ip_addr));
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use columnar::{
|
||||
BytesColumn, ColumnType, ColumnValues, ColumnarReader, DynamicColumn, DynamicColumnHandle,
|
||||
HasAssociatedColumnType, StrColumn,
|
||||
BytesColumn, Column, ColumnType, ColumnValues, ColumnarReader, DynamicColumn,
|
||||
DynamicColumnHandle, HasAssociatedColumnType, StrColumn,
|
||||
};
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::directory::FileSlice;
|
||||
use crate::schema::{Field, Schema};
|
||||
use crate::schema::Schema;
|
||||
use crate::space_usage::{FieldUsage, PerFieldSpaceUsage};
|
||||
|
||||
/// Provides access to all of the BitpackedFastFieldReader.
|
||||
@@ -28,6 +26,10 @@ impl FastFieldReaders {
|
||||
Ok(FastFieldReaders { columnar })
|
||||
}
|
||||
|
||||
pub(crate) fn columnar(&self) -> &ColumnarReader {
|
||||
self.columnar.as_ref()
|
||||
}
|
||||
|
||||
pub(crate) fn space_usage(&self, schema: &Schema) -> io::Result<PerFieldSpaceUsage> {
|
||||
let mut per_field_usages: Vec<FieldUsage> = Default::default();
|
||||
for (field, field_entry) in schema.fields() {
|
||||
@@ -44,33 +46,18 @@ impl FastFieldReaders {
|
||||
Ok(PerFieldSpaceUsage::new(per_field_usages))
|
||||
}
|
||||
|
||||
pub fn typed_column_opt<T>(
|
||||
&self,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Option<columnar::Column<T>>>
|
||||
/// Returns a typed column associated to a given field name.
|
||||
///
|
||||
/// If no column associated with that field_name exists,
|
||||
/// or existing columns do not have the required type,
|
||||
/// returns `None`.
|
||||
pub fn column_opt<T>(&self, field_name: &str) -> crate::Result<Option<Column<T>>>
|
||||
where
|
||||
T: PartialOrd + Copy + HasAssociatedColumnType + Send + Sync + 'static,
|
||||
DynamicColumn: Into<Option<columnar::Column<T>>>,
|
||||
DynamicColumn: Into<Option<Column<T>>>,
|
||||
{
|
||||
let column_type = T::column_type();
|
||||
let Some(dynamic_column_handle) = self.column_handle(field_name, column_type)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let dynamic_column = dynamic_column_handle.open()?;
|
||||
Ok(dynamic_column.into())
|
||||
}
|
||||
|
||||
pub fn bytes_column_opt(&self, field_name: &str) -> crate::Result<Option<BytesColumn>> {
|
||||
let Some(dynamic_column_handle) = self.column_handle(field_name, ColumnType::Bytes)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let dynamic_column = dynamic_column_handle.open()?;
|
||||
Ok(dynamic_column.into())
|
||||
}
|
||||
pub fn str_column_opt(&self, field_name: &str) -> crate::Result<Option<StrColumn>> {
|
||||
let Some(dynamic_column_handle) = self.column_handle(field_name, ColumnType::Str)?
|
||||
let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, column_type)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
@@ -78,6 +65,7 @@ impl FastFieldReaders {
|
||||
Ok(dynamic_column.into())
|
||||
}
|
||||
|
||||
/// Returns the number of `bytes` associated with a column.
|
||||
pub fn column_num_bytes(&self, field: &str) -> crate::Result<usize> {
|
||||
Ok(self
|
||||
.columnar
|
||||
@@ -87,12 +75,17 @@ impl FastFieldReaders {
|
||||
.sum())
|
||||
}
|
||||
|
||||
pub fn typed_column_first_or_default<T>(&self, field: &str) -> crate::Result<Arc<dyn Column<T>>>
|
||||
/// Returns a typed column value object.
|
||||
///
|
||||
/// In that column value:
|
||||
/// - Rows with no value are associated with the default value.
|
||||
/// - Rows with several values are associated with the first value.
|
||||
pub fn column_first_or_default<T>(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<T>>>
|
||||
where
|
||||
T: PartialOrd + Copy + HasAssociatedColumnType + Send + Sync + 'static,
|
||||
DynamicColumn: Into<Option<columnar::Column<T>>>,
|
||||
DynamicColumn: Into<Option<Column<T>>>,
|
||||
{
|
||||
let col_opt: Option<columnar::Column<T>> = self.typed_column_opt(field)?;
|
||||
let col_opt: Option<Column<T>> = self.column_opt(field)?;
|
||||
if let Some(col) = col_opt {
|
||||
Ok(col.first_or_default_col(T::default_value()))
|
||||
} else {
|
||||
@@ -106,32 +99,45 @@ impl FastFieldReaders {
|
||||
///
|
||||
/// If `field` is not a u64 fast field, this method returns an Error.
|
||||
pub fn u64(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<u64>>> {
|
||||
self.typed_column_first_or_default(field)
|
||||
self.column_first_or_default(field)
|
||||
}
|
||||
|
||||
/// Returns the `date` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a date fast field, this method returns an Error.
|
||||
pub fn date(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<columnar::DateTime>>> {
|
||||
self.typed_column_first_or_default(field)
|
||||
self.column_first_or_default(field)
|
||||
}
|
||||
|
||||
/// Returns the `ip` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u128 fast field, this method returns an Error.
|
||||
pub fn ip_addr(&self, field: &str) -> crate::Result<Arc<dyn Column<Ipv6Addr>>> {
|
||||
self.typed_column_first_or_default(field)
|
||||
pub fn ip_addr(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<Ipv6Addr>>> {
|
||||
self.column_first_or_default(field)
|
||||
}
|
||||
|
||||
pub fn str(&self, field: &str) -> crate::Result<Option<columnar::StrColumn>> {
|
||||
self.str_column_opt(field)
|
||||
/// Returns a `str` column.
|
||||
pub fn str(&self, field_name: &str) -> crate::Result<Option<StrColumn>> {
|
||||
let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, ColumnType::Str)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let dynamic_column = dynamic_column_handle.open()?;
|
||||
Ok(dynamic_column.into())
|
||||
}
|
||||
|
||||
pub fn bytes(&self, field: &str) -> crate::Result<Option<columnar::BytesColumn>> {
|
||||
self.bytes_column_opt(field)
|
||||
/// Returns a `bytes` column.
|
||||
pub fn bytes(&self, field_name: &str) -> crate::Result<Option<BytesColumn>> {
|
||||
let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, ColumnType::Bytes)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
let dynamic_column = dynamic_column_handle.open()?;
|
||||
Ok(dynamic_column.into())
|
||||
}
|
||||
|
||||
pub fn column_handle(
|
||||
/// Returning a `dynamic_column_handle`.
|
||||
pub fn dynamic_column_handle(
|
||||
&self,
|
||||
field_name: &str,
|
||||
column_type: ColumnType,
|
||||
@@ -145,7 +151,9 @@ impl FastFieldReaders {
|
||||
Ok(dynamic_column_handle_opt)
|
||||
}
|
||||
|
||||
pub fn u64_lenient(&self, field_name: &str) -> crate::Result<Option<columnar::Column<u64>>> {
|
||||
/// Returns the `u64` column used to represent any `u64`-mapped typed (i64, u64, f64, DateTime).
|
||||
#[doc(hidden)]
|
||||
pub fn u64_lenient(&self, field_name: &str) -> crate::Result<Option<Column<u64>>> {
|
||||
for col in self.columnar.read_columns(field_name)? {
|
||||
if let Some(col_u64) = col.open_u64_lenient()? {
|
||||
return Ok(Some(col_u64));
|
||||
@@ -157,21 +165,21 @@ impl FastFieldReaders {
|
||||
/// Returns the `i64` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a i64 fast field, this method returns an Error.
|
||||
pub fn i64(&self, field_name: &str) -> crate::Result<Arc<dyn Column<i64>>> {
|
||||
self.typed_column_first_or_default(field_name)
|
||||
pub fn i64(&self, field_name: &str) -> crate::Result<Arc<dyn ColumnValues<i64>>> {
|
||||
self.column_first_or_default(field_name)
|
||||
}
|
||||
|
||||
/// Returns the `f64` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a f64 fast field, this method returns an Error.
|
||||
pub fn f64(&self, field_name: &str) -> crate::Result<Arc<dyn Column<f64>>> {
|
||||
self.typed_column_first_or_default(field_name)
|
||||
pub fn f64(&self, field_name: &str) -> crate::Result<Arc<dyn ColumnValues<f64>>> {
|
||||
self.column_first_or_default(field_name)
|
||||
}
|
||||
|
||||
/// Returns the `bool` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a bool fast field, this method returns an Error.
|
||||
pub fn bool(&self, field_name: &str) -> crate::Result<Arc<dyn Column<bool>>> {
|
||||
self.typed_column_first_or_default(field_name)
|
||||
pub fn bool(&self, field_name: &str) -> crate::Result<Arc<dyn ColumnValues<bool>>> {
|
||||
self.column_first_or_default(field_name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,6 +67,16 @@ impl FastFieldsWriter {
|
||||
self.columnar_writer.mem_usage()
|
||||
}
|
||||
|
||||
pub(crate) fn sort_order(
|
||||
&self,
|
||||
sort_field: &str,
|
||||
num_docs: DocId,
|
||||
reversed: bool,
|
||||
) -> Vec<DocId> {
|
||||
self.columnar_writer
|
||||
.sort_order(sort_field, num_docs, reversed)
|
||||
}
|
||||
|
||||
/// Indexes all of the fastfields of a new document.
|
||||
pub fn add_document(&mut self, doc: &Document) -> crate::Result<()> {
|
||||
let doc_id = self.num_docs;
|
||||
@@ -143,11 +153,13 @@ impl FastFieldsWriter {
|
||||
pub fn serialize(
|
||||
mut self,
|
||||
wrt: &mut dyn io::Write,
|
||||
doc_id_map: Option<&DocIdMapping>,
|
||||
doc_id_map_opt: Option<&DocIdMapping>,
|
||||
) -> io::Result<()> {
|
||||
assert!(doc_id_map.is_none()); // TODO handle doc id map
|
||||
let num_docs = self.num_docs;
|
||||
self.columnar_writer.serialize(num_docs, wrt)?;
|
||||
let old_to_new_row_ids =
|
||||
doc_id_map_opt.map(|doc_id_mapping| doc_id_mapping.old_to_new_ids());
|
||||
self.columnar_writer
|
||||
.serialize(num_docs, old_to_new_row_ids, wrt)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,27 +1,44 @@
|
||||
//! This module is used when sorting the index by a property, e.g.
|
||||
//! to get mappings from old doc_id to new doc_id and vice versa, after sorting
|
||||
|
||||
use std::cmp::Reverse;
|
||||
use common::ReadOnlyBitSet;
|
||||
|
||||
use super::SegmentWriter;
|
||||
use crate::schema::{Field, Schema};
|
||||
use crate::{DocAddress, DocId, IndexSortByField, Order, TantivyError};
|
||||
use crate::{DocAddress, DocId, IndexSortByField, TantivyError};
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq)]
|
||||
pub enum MappingType {
|
||||
Stacked,
|
||||
StackedWithDeletes,
|
||||
Shuffled,
|
||||
}
|
||||
|
||||
/// Struct to provide mapping from new doc_id to old doc_id and segment.
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SegmentDocIdMapping {
|
||||
new_doc_id_to_old_doc_addr: Vec<DocAddress>,
|
||||
is_trivial: bool,
|
||||
pub(crate) new_doc_id_to_old_doc_addr: Vec<DocAddress>,
|
||||
pub(crate) alive_bitsets: Vec<Option<ReadOnlyBitSet>>,
|
||||
mapping_type: MappingType,
|
||||
}
|
||||
|
||||
impl SegmentDocIdMapping {
|
||||
pub(crate) fn new(new_doc_id_to_old_and_segment: Vec<DocAddress>, is_trivial: bool) -> Self {
|
||||
pub(crate) fn new(
|
||||
new_doc_id_to_old_doc_addr: Vec<DocAddress>,
|
||||
mapping_type: MappingType,
|
||||
alive_bitsets: Vec<Option<ReadOnlyBitSet>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
new_doc_id_to_old_doc_addr: new_doc_id_to_old_and_segment,
|
||||
is_trivial,
|
||||
new_doc_id_to_old_doc_addr,
|
||||
mapping_type,
|
||||
alive_bitsets,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mapping_type(&self) -> MappingType {
|
||||
self.mapping_type
|
||||
}
|
||||
|
||||
/// Returns an iterator over the old document addresses, ordered by the new document ids.
|
||||
///
|
||||
/// In the returned `DocAddress`, the `segment_ord` is the ordinal of targeted segment
|
||||
@@ -30,16 +47,20 @@ impl SegmentDocIdMapping {
|
||||
self.new_doc_id_to_old_doc_addr.iter().copied()
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.new_doc_id_to_old_doc_addr.len()
|
||||
}
|
||||
|
||||
/// This flags means the segments are simply stacked in the order of their ordinal.
|
||||
/// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)]
|
||||
///
|
||||
/// The different segment may present some deletes, in which case it is expressed by skipping a
|
||||
/// `DocId`. [(0, 1), (0, 3)] <--- here doc_id=0 and doc_id=1 have been deleted
|
||||
///
|
||||
/// Being trivial is equivalent to having the `new_doc_id_to_old_doc_addr` array sorted.
|
||||
///
|
||||
/// This allows for some optimization.
|
||||
pub(crate) fn is_trivial(&self) -> bool {
|
||||
self.is_trivial
|
||||
match self.mapping_type {
|
||||
MappingType::Stacked | MappingType::StackedWithDeletes => true,
|
||||
MappingType::Shuffled => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,6 +101,11 @@ impl DocIdMapping {
|
||||
pub fn iter_old_doc_ids(&self) -> impl Iterator<Item = DocId> + Clone + '_ {
|
||||
self.new_doc_id_to_old.iter().cloned()
|
||||
}
|
||||
|
||||
pub fn old_to_new_ids(&self) -> &[DocId] {
|
||||
&self.old_doc_id_to_new[..]
|
||||
}
|
||||
|
||||
/// Remaps a given array to the new doc ids.
|
||||
pub fn remap<T: Copy>(&self, els: &[T]) -> Vec<T> {
|
||||
self.new_doc_id_to_old
|
||||
@@ -113,36 +139,15 @@ pub(crate) fn get_doc_id_mapping_from_field(
|
||||
sort_by_field: IndexSortByField,
|
||||
segment_writer: &SegmentWriter,
|
||||
) -> crate::Result<DocIdMapping> {
|
||||
todo!()
|
||||
// let schema = segment_writer.segment_serializer.segment().schema();
|
||||
// let field_id = expect_field_id_for_sort_field(&schema, &sort_by_field)?; // for now expect
|
||||
// fastfield, but not strictly required
|
||||
// let fast_field = segment_writer
|
||||
// .fast_field_writers
|
||||
// .get_field_writer(field_id)
|
||||
// .ok_or_else(|| {
|
||||
// TantivyError::InvalidArgument(format!(
|
||||
// "sort index by field is required to be a fast field {:?}",
|
||||
// sort_by_field.field
|
||||
// ))
|
||||
// })?;
|
||||
|
||||
// // create new doc_id to old doc_id index (used in fast_field_writers)
|
||||
// let mut doc_id_and_data = fast_field
|
||||
// .iter()
|
||||
// .enumerate()
|
||||
// .map(|el| (el.0 as DocId, el.1))
|
||||
// .collect::<Vec<_>>();
|
||||
// if sort_by_field.order == Order::Desc {
|
||||
// doc_id_and_data.sort_by_key(|k| Reverse(k.1));
|
||||
// } else {
|
||||
// doc_id_and_data.sort_by_key(|k| k.1);
|
||||
// }
|
||||
// let new_doc_id_to_old = doc_id_and_data
|
||||
// .into_iter()
|
||||
// .map(|el| el.0)
|
||||
// .collect::<Vec<_>>();
|
||||
// Ok(DocIdMapping::from_new_id_to_old_id(new_doc_id_to_old))
|
||||
let schema = segment_writer.segment_serializer.segment().schema();
|
||||
expect_field_id_for_sort_field(&schema, &sort_by_field)?; // for now expect
|
||||
let new_doc_id_to_old = segment_writer.fast_field_writers.sort_order(
|
||||
sort_by_field.field.as_str(),
|
||||
segment_writer.max_doc(),
|
||||
sort_by_field.order.is_desc(),
|
||||
);
|
||||
// create new doc_id to old doc_id index (used in fast_field_writers)
|
||||
Ok(DocIdMapping::from_new_id_to_old_id(new_doc_id_to_old))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,18 +1,19 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use columnar::{
|
||||
ColumnValues, ColumnarReader, MergeRowOrder, RowAddr, ShuffleMergeOrder, StackMergeOrder,
|
||||
};
|
||||
use common::ReadOnlyBitSet;
|
||||
use itertools::Itertools;
|
||||
use measure_time::debug_time;
|
||||
|
||||
// use super::sorted_doc_id_multivalue_column::RemappedDocIdMultiValueIndexColumn;
|
||||
use crate::core::{Segment, SegmentReader};
|
||||
use crate::directory::WritePtr;
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{AliveBitSet, Column, FastFieldNotAvailableError};
|
||||
use crate::fastfield::{AliveBitSet, FastFieldNotAvailableError};
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
||||
// use crate::indexer::sorted_doc_id_multivalue_column::RemappedDocIdMultiValueColumn;
|
||||
use crate::indexer::doc_id_mapping::{MappingType, SegmentDocIdMapping};
|
||||
use crate::indexer::SegmentSerializer;
|
||||
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
|
||||
use crate::schema::{Field, FieldType, Schema};
|
||||
@@ -85,29 +86,6 @@ pub struct IndexMerger {
|
||||
max_doc: u32,
|
||||
}
|
||||
|
||||
struct TermOrdinalMapping {
|
||||
per_segment_new_term_ordinals: Vec<Vec<TermOrdinal>>,
|
||||
}
|
||||
|
||||
impl TermOrdinalMapping {
|
||||
fn new(max_term_ords: Vec<TermOrdinal>) -> TermOrdinalMapping {
|
||||
TermOrdinalMapping {
|
||||
per_segment_new_term_ordinals: max_term_ords
|
||||
.into_iter()
|
||||
.map(|max_term_ord| vec![TermOrdinal::default(); max_term_ord as usize])
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn register_from_to(&mut self, segment_ord: usize, from_ord: TermOrdinal, to_ord: TermOrdinal) {
|
||||
self.per_segment_new_term_ordinals[segment_ord][from_ord as usize] = to_ord;
|
||||
}
|
||||
|
||||
fn get_segment(&self, segment_ord: usize) -> &[TermOrdinal] {
|
||||
&(self.per_segment_new_term_ordinals[segment_ord])[..]
|
||||
}
|
||||
}
|
||||
|
||||
struct DeltaComputer {
|
||||
buffer: Vec<u32>,
|
||||
}
|
||||
@@ -132,6 +110,31 @@ impl DeltaComputer {
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_to_merge_order<'a>(
|
||||
columnars: &[&'a ColumnarReader],
|
||||
doc_id_mapping: SegmentDocIdMapping,
|
||||
) -> MergeRowOrder {
|
||||
match doc_id_mapping.mapping_type() {
|
||||
MappingType::Stacked => MergeRowOrder::Stack(StackMergeOrder::stack(columnars)),
|
||||
MappingType::StackedWithDeletes | MappingType::Shuffled => {
|
||||
// RUST/LLVM is amazing. The following conversion is actually a no-op:
|
||||
// no allocation, no copy.
|
||||
let new_row_id_to_old_row_id: Vec<RowAddr> = doc_id_mapping
|
||||
.new_doc_id_to_old_doc_addr
|
||||
.into_iter()
|
||||
.map(|doc_addr| RowAddr {
|
||||
segment_ord: doc_addr.segment_ord,
|
||||
row_id: doc_addr.doc_id,
|
||||
})
|
||||
.collect();
|
||||
MergeRowOrder::Shuffled(ShuffleMergeOrder {
|
||||
new_row_id_to_old_row_id,
|
||||
alive_bitsets: doc_id_mapping.alive_bitsets,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMerger {
|
||||
pub fn open(
|
||||
schema: Schema,
|
||||
@@ -223,7 +226,6 @@ impl IndexMerger {
|
||||
let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize);
|
||||
for field in fields {
|
||||
fieldnorms_data.clear();
|
||||
|
||||
let fieldnorms_readers: Vec<FieldNormReader> = self
|
||||
.readers
|
||||
.iter()
|
||||
@@ -234,7 +236,6 @@ impl IndexMerger {
|
||||
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(old_doc_addr.doc_id);
|
||||
fieldnorms_data.push(fieldnorm_id);
|
||||
}
|
||||
|
||||
fieldnorms_serializer.serialize_field(field, &fieldnorms_data[..])?;
|
||||
}
|
||||
fieldnorms_serializer.close()?;
|
||||
@@ -244,67 +245,16 @@ impl IndexMerger {
|
||||
fn write_fast_fields(
|
||||
&self,
|
||||
fast_field_wrt: &mut WritePtr,
|
||||
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
doc_id_mapping: SegmentDocIdMapping,
|
||||
) -> crate::Result<()> {
|
||||
debug_time!("wrie-fast-fields");
|
||||
for (_field, field_entry) in self.schema.fields() {
|
||||
if field_entry.is_fast() {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
|
||||
// for (field, field_entry) in self.schema.fields() {
|
||||
// let field_type = field_entry.field_type();
|
||||
// match field_type {
|
||||
// FieldType::Facet(_) | FieldType::Str(_) if field_type.is_fast() => {
|
||||
// let term_ordinal_mapping = term_ord_mappings.remove(&field).expect(
|
||||
// "Logic Error in Tantivy (Please report). Facet field should have required \
|
||||
// a`term_ordinal_mapping`.",
|
||||
// );
|
||||
// self.write_term_id_fast_field(
|
||||
// field,
|
||||
// &term_ordinal_mapping,
|
||||
// fast_field_serializer,
|
||||
// doc_id_mapping,
|
||||
// )?;
|
||||
// }
|
||||
// FieldType::U64(ref options)
|
||||
// | FieldType::I64(ref options)
|
||||
// | FieldType::F64(ref options)
|
||||
// | FieldType::Bool(ref options) => {
|
||||
// todo!()
|
||||
// }
|
||||
// FieldType::Date(ref options) => {
|
||||
// if options.is_fast() {
|
||||
// todo!();
|
||||
// }
|
||||
// Some(Cardinality::SingleValue) => {
|
||||
// self.write_single_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
||||
// }
|
||||
// Some(Cardinality::MultiValues) => {
|
||||
// self.write_multi_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
||||
// }
|
||||
// None => {}
|
||||
// },
|
||||
// FieldType::Bytes(byte_options) => {
|
||||
// if byte_options.is_fast() {
|
||||
// self.write_bytes_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
||||
// }
|
||||
// }
|
||||
// FieldType::IpAddr(options) => {
|
||||
// if options.is_fast() {
|
||||
// todo!();
|
||||
// }
|
||||
// },
|
||||
//
|
||||
// FieldType::JsonObject(_) | FieldType::Facet(_) | FieldType::Str(_) => {
|
||||
// We don't handle json fast field for the moment
|
||||
// They can be implemented using what is done
|
||||
// for facets in the future
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
debug_time!("write-fast-fields");
|
||||
let columnars: Vec<&ColumnarReader> = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| reader.fast_fields().columnar())
|
||||
.collect();
|
||||
let merge_row_order = convert_to_merge_order(&columnars[..], doc_id_mapping);
|
||||
columnar::merge_columnar(&columnars[..], merge_row_order, fast_field_wrt)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -334,7 +284,7 @@ impl IndexMerger {
|
||||
pub(crate) fn get_sort_field_accessor(
|
||||
reader: &SegmentReader,
|
||||
sort_by_field: &IndexSortByField,
|
||||
) -> crate::Result<Arc<dyn Column>> {
|
||||
) -> crate::Result<Arc<dyn ColumnValues>> {
|
||||
reader.schema().get_field(&sort_by_field.field)?;
|
||||
let value_accessor = reader
|
||||
.fast_fields()
|
||||
@@ -348,7 +298,7 @@ impl IndexMerger {
|
||||
pub(crate) fn get_reader_with_sort_field_accessor(
|
||||
&self,
|
||||
sort_by_field: &IndexSortByField,
|
||||
) -> crate::Result<Vec<(SegmentOrdinal, Arc<dyn Column>)>> {
|
||||
) -> crate::Result<Vec<(SegmentOrdinal, Arc<dyn ColumnValues>)>> {
|
||||
let reader_ordinal_and_field_accessors = self
|
||||
.readers
|
||||
.iter()
|
||||
@@ -369,7 +319,7 @@ impl IndexMerger {
|
||||
/// doc_id.
|
||||
/// ReaderWithOrdinal will include the ordinal position of the
|
||||
/// reader in self.readers.
|
||||
pub(crate) fn generate_doc_id_mapping(
|
||||
pub(crate) fn generate_doc_id_mapping_with_sort_by_field(
|
||||
&self,
|
||||
sort_by_field: &IndexSortByField,
|
||||
) -> crate::Result<SegmentDocIdMapping> {
|
||||
@@ -414,7 +364,23 @@ impl IndexMerger {
|
||||
segment_ord,
|
||||
}),
|
||||
);
|
||||
Ok(SegmentDocIdMapping::new(sorted_doc_ids, false))
|
||||
|
||||
let alive_bitsets: Vec<Option<ReadOnlyBitSet>> = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|segment_reader| {
|
||||
if let Some(alive_bitset) = segment_reader.alive_bitset() {
|
||||
Some(alive_bitset.bitset().clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(SegmentDocIdMapping::new(
|
||||
sorted_doc_ids,
|
||||
MappingType::Shuffled,
|
||||
alive_bitsets,
|
||||
))
|
||||
}
|
||||
|
||||
/// Creates a mapping if the segments are stacked. this is helpful to merge codelines between
|
||||
@@ -439,17 +405,39 @@ impl IndexMerger {
|
||||
})
|
||||
}),
|
||||
);
|
||||
Ok(SegmentDocIdMapping::new(mapping, true))
|
||||
|
||||
let has_deletes: bool = self.readers.iter().any(SegmentReader::has_deletes);
|
||||
let mapping_type = if has_deletes {
|
||||
MappingType::StackedWithDeletes
|
||||
} else {
|
||||
MappingType::Stacked
|
||||
};
|
||||
let alive_bitsets: Vec<Option<ReadOnlyBitSet>> = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| {
|
||||
if let Some(bitset) = reader.alive_bitset() {
|
||||
Some(bitset.bitset().clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(SegmentDocIdMapping::new(
|
||||
mapping,
|
||||
mapping_type,
|
||||
alive_bitsets,
|
||||
))
|
||||
}
|
||||
|
||||
fn write_postings_for_field(
|
||||
&self,
|
||||
indexed_field: Field,
|
||||
field_type: &FieldType,
|
||||
_field_type: &FieldType,
|
||||
serializer: &mut InvertedIndexSerializer,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
) -> crate::Result<Option<TermOrdinalMapping>> {
|
||||
) -> crate::Result<()> {
|
||||
debug_time!("write-postings-for-field");
|
||||
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
|
||||
let mut delta_computer = DeltaComputer::new();
|
||||
@@ -469,14 +457,6 @@ impl IndexMerger {
|
||||
max_term_ords.push(terms.num_terms() as u64);
|
||||
}
|
||||
|
||||
let mut term_ord_mapping_opt = match field_type {
|
||||
FieldType::Facet(_) => Some(TermOrdinalMapping::new(max_term_ords)),
|
||||
FieldType::Str(options) if options.is_fast() => {
|
||||
Some(TermOrdinalMapping::new(max_term_ords))
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let mut merged_terms = TermMerger::new(field_term_streams);
|
||||
|
||||
// map from segment doc ids to the resulting merged segment doc id.
|
||||
@@ -559,13 +539,7 @@ impl IndexMerger {
|
||||
continue;
|
||||
}
|
||||
|
||||
let to_term_ord = field_serializer.new_term(term_bytes, total_doc_freq)?;
|
||||
|
||||
if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt {
|
||||
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
|
||||
term_ord_mapping.register_from_to(segment_ord, from_term_ord, to_term_ord);
|
||||
}
|
||||
}
|
||||
field_serializer.new_term(term_bytes, total_doc_freq)?;
|
||||
|
||||
// We can now serialize this postings, by pushing each document to the
|
||||
// postings serializer.
|
||||
@@ -617,7 +591,7 @@ impl IndexMerger {
|
||||
field_serializer.close_term()?;
|
||||
}
|
||||
field_serializer.close()?;
|
||||
Ok(term_ord_mapping_opt)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_postings(
|
||||
@@ -625,23 +599,20 @@ impl IndexMerger {
|
||||
serializer: &mut InvertedIndexSerializer,
|
||||
fieldnorm_readers: FieldNormReaders,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
|
||||
let mut term_ordinal_mappings = HashMap::new();
|
||||
) -> crate::Result<()> {
|
||||
for (field, field_entry) in self.schema.fields() {
|
||||
let fieldnorm_reader = fieldnorm_readers.get_field(field)?;
|
||||
if field_entry.is_indexed() {
|
||||
if let Some(term_ordinal_mapping) = self.write_postings_for_field(
|
||||
self.write_postings_for_field(
|
||||
field,
|
||||
field_entry.field_type(),
|
||||
serializer,
|
||||
fieldnorm_reader,
|
||||
doc_id_mapping,
|
||||
)? {
|
||||
term_ordinal_mappings.insert(field, term_ordinal_mapping);
|
||||
}
|
||||
)?;
|
||||
}
|
||||
}
|
||||
Ok(term_ordinal_mappings)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_storable_fields(
|
||||
@@ -726,7 +697,7 @@ impl IndexMerger {
|
||||
if self.is_disjunct_and_sorted_on_sort_property(sort_by_field)? {
|
||||
self.get_doc_id_from_concatenated_data()?
|
||||
} else {
|
||||
self.generate_doc_id_mapping(sort_by_field)?
|
||||
self.generate_doc_id_mapping_with_sort_by_field(sort_by_field)?
|
||||
}
|
||||
} else {
|
||||
self.get_doc_id_from_concatenated_data()?
|
||||
@@ -740,19 +711,17 @@ impl IndexMerger {
|
||||
.segment()
|
||||
.open_read(SegmentComponent::FieldNorms)?;
|
||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||
let term_ord_mappings = self.write_postings(
|
||||
self.write_postings(
|
||||
serializer.get_postings_serializer(),
|
||||
fieldnorm_readers,
|
||||
&doc_id_mapping,
|
||||
)?;
|
||||
debug!("write-fastfields");
|
||||
self.write_fast_fields(
|
||||
serializer.get_fast_field_write(),
|
||||
term_ord_mappings,
|
||||
&doc_id_mapping,
|
||||
)?;
|
||||
|
||||
debug!("write-storagefields");
|
||||
self.write_storable_fields(serializer.get_store_writer(), &doc_id_mapping)?;
|
||||
debug!("write-fastfields");
|
||||
self.write_fast_fields(serializer.get_fast_field_write(), doc_id_mapping)?;
|
||||
|
||||
debug!("close-serializer");
|
||||
serializer.close()?;
|
||||
Ok(self.max_doc)
|
||||
@@ -761,11 +730,14 @@ impl IndexMerger {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use byteorder::{BigEndian, ReadBytesExt};
|
||||
|
||||
use columnar::Column;
|
||||
use schema::FAST;
|
||||
|
||||
use crate::collector::tests::{FastFieldTestCollector, TEST_COLLECTOR_WITH_SCORE};
|
||||
use crate::collector::Count;
|
||||
use crate::collector::tests::{
|
||||
BytesFastFieldTestCollector, FastFieldTestCollector, TEST_COLLECTOR_WITH_SCORE,
|
||||
};
|
||||
use crate::collector::{Count, FacetCollector};
|
||||
use crate::core::Index;
|
||||
use crate::query::{AllQuery, BooleanQuery, EnableScoring, Scorer, TermQuery};
|
||||
use crate::schema::{
|
||||
@@ -774,7 +746,7 @@ mod tests {
|
||||
};
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::{
|
||||
assert_nearly_equals, schema, DateTime, DocAddress, DocSet, IndexSettings,
|
||||
assert_nearly_equals, schema, DateTime, DocAddress, DocId, DocSet, IndexSettings,
|
||||
IndexSortByField, IndexWriter, Order, Searcher, SegmentId,
|
||||
};
|
||||
|
||||
@@ -907,27 +879,27 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
// {
|
||||
// let get_fast_vals = |terms: Vec<Term>| {
|
||||
// let query = BooleanQuery::new_multiterms_query(terms);
|
||||
// searcher.search(&query, &FastFieldTestCollector::for_field(score_field))
|
||||
// };
|
||||
// let get_fast_vals_bytes = |terms: Vec<Term>| {
|
||||
// let query = BooleanQuery::new_multiterms_query(terms);
|
||||
// searcher.search(
|
||||
// &query,
|
||||
// &BytesFastFieldTestCollector::for_field(bytes_score_field),
|
||||
// )
|
||||
// };
|
||||
// assert_eq!(
|
||||
// get_fast_vals(vec![Term::from_field_text(text_field, "a")])?,
|
||||
// vec![5, 7, 13]
|
||||
// );
|
||||
// assert_eq!(
|
||||
// get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")])?,
|
||||
// vec![0, 0, 0, 5, 0, 0, 0, 7, 0, 0, 0, 13]
|
||||
// );
|
||||
// }
|
||||
{
|
||||
let get_fast_vals = |terms: Vec<Term>| {
|
||||
let query = BooleanQuery::new_multiterms_query(terms);
|
||||
searcher.search(&query, &FastFieldTestCollector::for_field("score"))
|
||||
};
|
||||
let get_fast_vals_bytes = |terms: Vec<Term>| {
|
||||
let query = BooleanQuery::new_multiterms_query(terms);
|
||||
searcher.search(
|
||||
&query,
|
||||
&BytesFastFieldTestCollector::for_field("score_bytes"),
|
||||
)
|
||||
};
|
||||
assert_eq!(
|
||||
get_fast_vals(vec![Term::from_field_text(text_field, "a")])?,
|
||||
vec![5, 7, 13]
|
||||
);
|
||||
assert_eq!(
|
||||
get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")])?,
|
||||
vec![0, 0, 0, 5, 0, 0, 0, 7, 0, 0, 0, 13]
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1240,209 +1212,206 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO re-enable
|
||||
// #[test]
|
||||
// fn test_merge_facets_sort_none() {
|
||||
// test_merge_facets(None, true)
|
||||
// }
|
||||
#[test]
|
||||
fn test_merge_facets_sort_none() {
|
||||
test_merge_facets(None, true)
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_merge_facets_sort_asc() {
|
||||
// // In the merge case this will go through the doc_id mapping code
|
||||
// test_merge_facets(
|
||||
// Some(IndexSettings {
|
||||
// sort_by_field: Some(IndexSortByField {
|
||||
// field: "intval".to_string(),
|
||||
// order: Order::Desc,
|
||||
// }),
|
||||
// ..Default::default()
|
||||
// }),
|
||||
// true,
|
||||
// );
|
||||
// // In the merge case this will not go through the doc_id mapping code, because the data
|
||||
// is // sorted and disjunct
|
||||
// test_merge_facets(
|
||||
// Some(IndexSettings {
|
||||
// sort_by_field: Some(IndexSortByField {
|
||||
// field: "intval".to_string(),
|
||||
// order: Order::Desc,
|
||||
// }),
|
||||
// ..Default::default()
|
||||
// }),
|
||||
// false,
|
||||
// );
|
||||
// }
|
||||
#[test]
|
||||
fn test_merge_facets_sort_asc() {
|
||||
// In the merge case this will go through the doc_id mapping code
|
||||
test_merge_facets(
|
||||
Some(IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "intval".to_string(),
|
||||
order: Order::Desc,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
true,
|
||||
);
|
||||
// In the merge case this will not go through the doc_id mapping code, because the data
|
||||
// sorted and disjunct
|
||||
test_merge_facets(
|
||||
Some(IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "intval".to_string(),
|
||||
order: Order::Desc,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
false,
|
||||
);
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_merge_facets_sort_desc() {
|
||||
// // In the merge case this will go through the doc_id mapping code
|
||||
// test_merge_facets(
|
||||
// Some(IndexSettings {
|
||||
// sort_by_field: Some(IndexSortByField {
|
||||
// field: "intval".to_string(),
|
||||
// order: Order::Desc,
|
||||
// }),
|
||||
// ..Default::default()
|
||||
// }),
|
||||
// true,
|
||||
// );
|
||||
// // In the merge case this will not go through the doc_id mapping code, because the data
|
||||
// is // sorted and disjunct
|
||||
// test_merge_facets(
|
||||
// Some(IndexSettings {
|
||||
// sort_by_field: Some(IndexSortByField {
|
||||
// field: "intval".to_string(),
|
||||
// order: Order::Desc,
|
||||
// }),
|
||||
// ..Default::default()
|
||||
// }),
|
||||
// false,
|
||||
// );
|
||||
// }
|
||||
#[test]
|
||||
fn test_merge_facets_sort_desc() {
|
||||
// In the merge case this will go through the doc_id mapping code
|
||||
test_merge_facets(
|
||||
Some(IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "intval".to_string(),
|
||||
order: Order::Desc,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
true,
|
||||
);
|
||||
// In the merge case this will not go through the doc_id mapping code, because the data
|
||||
// sorted and disjunct
|
||||
test_merge_facets(
|
||||
Some(IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "intval".to_string(),
|
||||
order: Order::Desc,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
false,
|
||||
);
|
||||
}
|
||||
|
||||
// force_segment_value_overlap forces the int value for sorting to have overlapping min and max
|
||||
// ranges between segments so that merge algorithm can't apply certain optimizations
|
||||
// fn test_merge_facets(index_settings: Option<IndexSettings>, force_segment_value_overlap:
|
||||
// bool) { let mut schema_builder = schema::Schema::builder();
|
||||
// let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
|
||||
// let int_options = NumericOptions::default()
|
||||
// .set_fast()
|
||||
// .set_indexed();
|
||||
// let int_field = schema_builder.add_u64_field("intval", int_options);
|
||||
// let mut index_builder = Index::builder().schema(schema_builder.build());
|
||||
// if let Some(settings) = index_settings {
|
||||
// index_builder = index_builder.settings(settings);
|
||||
// }
|
||||
// let index = index_builder.create_in_ram().unwrap();
|
||||
// // let index = Index::create_in_ram(schema_builder.build());
|
||||
// let reader = index.reader().unwrap();
|
||||
// let mut int_val = 0;
|
||||
// {
|
||||
// let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// let index_doc =
|
||||
// |index_writer: &mut IndexWriter, doc_facets: &[&str], int_val: &mut u64| {
|
||||
// let mut doc = Document::default();
|
||||
// for facet in doc_facets {
|
||||
// doc.add_facet(facet_field, Facet::from(facet));
|
||||
// }
|
||||
// doc.add_u64(int_field, *int_val);
|
||||
// *int_val += 1;
|
||||
// index_writer.add_document(doc).unwrap();
|
||||
// };
|
||||
fn test_merge_facets(index_settings: Option<IndexSettings>, force_segment_value_overlap: bool) {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
|
||||
let int_options = NumericOptions::default().set_fast().set_indexed();
|
||||
let int_field = schema_builder.add_u64_field("intval", int_options);
|
||||
let mut index_builder = Index::builder().schema(schema_builder.build());
|
||||
if let Some(settings) = index_settings {
|
||||
index_builder = index_builder.settings(settings);
|
||||
}
|
||||
let index = index_builder.create_in_ram().unwrap();
|
||||
// let index = Index::create_in_ram(schema_builder.build());
|
||||
let reader = index.reader().unwrap();
|
||||
let mut int_val = 0;
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let index_doc =
|
||||
|index_writer: &mut IndexWriter, doc_facets: &[&str], int_val: &mut u64| {
|
||||
let mut doc = Document::default();
|
||||
for facet in doc_facets {
|
||||
doc.add_facet(facet_field, Facet::from(facet));
|
||||
}
|
||||
doc.add_u64(int_field, *int_val);
|
||||
*int_val += 1;
|
||||
index_writer.add_document(doc).unwrap();
|
||||
};
|
||||
|
||||
// index_doc(
|
||||
// &mut index_writer,
|
||||
// &["/top/a/firstdoc", "/top/b"],
|
||||
// &mut int_val,
|
||||
// );
|
||||
// index_doc(
|
||||
// &mut index_writer,
|
||||
// &["/top/a/firstdoc", "/top/b", "/top/c"],
|
||||
// &mut int_val,
|
||||
// );
|
||||
// index_doc(&mut index_writer, &["/top/a", "/top/b"], &mut int_val);
|
||||
// index_doc(&mut index_writer, &["/top/a"], &mut int_val);
|
||||
index_doc(
|
||||
&mut index_writer,
|
||||
&["/top/a/firstdoc", "/top/b"],
|
||||
&mut int_val,
|
||||
);
|
||||
index_doc(
|
||||
&mut index_writer,
|
||||
&["/top/a/firstdoc", "/top/b", "/top/c"],
|
||||
&mut int_val,
|
||||
);
|
||||
index_doc(&mut index_writer, &["/top/a", "/top/b"], &mut int_val);
|
||||
index_doc(&mut index_writer, &["/top/a"], &mut int_val);
|
||||
|
||||
// index_doc(&mut index_writer, &["/top/b", "/top/d"], &mut int_val);
|
||||
// if force_segment_value_overlap {
|
||||
// index_doc(&mut index_writer, &["/top/d"], &mut 0);
|
||||
// index_doc(&mut index_writer, &["/top/e"], &mut 10);
|
||||
// index_writer.commit().expect("committed");
|
||||
// index_doc(&mut index_writer, &["/top/a"], &mut 5); // 5 is between 0 - 10 so the
|
||||
// // segments don' have disjunct
|
||||
// // ranges
|
||||
// } else {
|
||||
// index_doc(&mut index_writer, &["/top/d"], &mut int_val);
|
||||
// index_doc(&mut index_writer, &["/top/e"], &mut int_val);
|
||||
// index_writer.commit().expect("committed");
|
||||
// index_doc(&mut index_writer, &["/top/a"], &mut int_val);
|
||||
// }
|
||||
// index_doc(&mut index_writer, &["/top/b"], &mut int_val);
|
||||
// index_doc(&mut index_writer, &["/top/c"], &mut int_val);
|
||||
// index_writer.commit().expect("committed");
|
||||
index_doc(&mut index_writer, &["/top/b", "/top/d"], &mut int_val);
|
||||
if force_segment_value_overlap {
|
||||
index_doc(&mut index_writer, &["/top/d"], &mut 0);
|
||||
index_doc(&mut index_writer, &["/top/e"], &mut 10);
|
||||
index_writer.commit().expect("committed");
|
||||
index_doc(&mut index_writer, &["/top/a"], &mut 5); // 5 is between 0 - 10 so the
|
||||
// segments don' have disjunct
|
||||
// ranges
|
||||
} else {
|
||||
index_doc(&mut index_writer, &["/top/d"], &mut int_val);
|
||||
index_doc(&mut index_writer, &["/top/e"], &mut int_val);
|
||||
index_writer.commit().expect("committed");
|
||||
index_doc(&mut index_writer, &["/top/a"], &mut int_val);
|
||||
}
|
||||
index_doc(&mut index_writer, &["/top/b"], &mut int_val);
|
||||
index_doc(&mut index_writer, &["/top/c"], &mut int_val);
|
||||
index_writer.commit().expect("committed");
|
||||
|
||||
// index_doc(&mut index_writer, &["/top/e", "/top/f"], &mut int_val);
|
||||
// index_writer.commit().expect("committed");
|
||||
// }
|
||||
index_doc(&mut index_writer, &["/top/e", "/top/f"], &mut int_val);
|
||||
index_writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
// reader.reload().unwrap();
|
||||
// let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| {
|
||||
// let searcher = reader.searcher();
|
||||
// let mut facet_collector = FacetCollector::for_field(facet_field);
|
||||
// facet_collector.add_facet(Facet::from("/top"));
|
||||
// let (count, facet_counts) = searcher
|
||||
// .search(&AllQuery, &(Count, facet_collector))
|
||||
// .unwrap();
|
||||
// assert_eq!(count, expected_num_docs);
|
||||
// let facets: Vec<(String, u64)> = facet_counts
|
||||
// .get("/top")
|
||||
// .map(|(facet, count)| (facet.to_string(), count))
|
||||
// .collect();
|
||||
// assert_eq!(
|
||||
// facets,
|
||||
// expected
|
||||
// .iter()
|
||||
// .map(|&(facet_str, count)| (String::from(facet_str), count))
|
||||
// .collect::<Vec<_>>()
|
||||
// );
|
||||
// };
|
||||
// test_searcher(
|
||||
// 11,
|
||||
// &[
|
||||
// ("/top/a", 5),
|
||||
// ("/top/b", 5),
|
||||
// ("/top/c", 2),
|
||||
// ("/top/d", 2),
|
||||
// ("/top/e", 2),
|
||||
// ("/top/f", 1),
|
||||
// ],
|
||||
// );
|
||||
// // Merging the segments
|
||||
// {
|
||||
// let segment_ids = index
|
||||
// .searchable_segment_ids()
|
||||
// .expect("Searchable segments failed.");
|
||||
// let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// index_writer
|
||||
// .merge(&segment_ids)
|
||||
// .wait()
|
||||
// .expect("Merging failed");
|
||||
// index_writer.wait_merging_threads().unwrap();
|
||||
// reader.reload().unwrap();
|
||||
// test_searcher(
|
||||
// 11,
|
||||
// &[
|
||||
// ("/top/a", 5),
|
||||
// ("/top/b", 5),
|
||||
// ("/top/c", 2),
|
||||
// ("/top/d", 2),
|
||||
// ("/top/e", 2),
|
||||
// ("/top/f", 1),
|
||||
// ],
|
||||
// );
|
||||
// }
|
||||
reader.reload().unwrap();
|
||||
let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| {
|
||||
let searcher = reader.searcher();
|
||||
let mut facet_collector = FacetCollector::for_field("facet");
|
||||
facet_collector.add_facet(Facet::from("/top"));
|
||||
let (count, facet_counts) = searcher
|
||||
.search(&AllQuery, &(Count, facet_collector))
|
||||
.unwrap();
|
||||
assert_eq!(count, expected_num_docs);
|
||||
let facets: Vec<(String, u64)> = facet_counts
|
||||
.get("/top")
|
||||
.map(|(facet, count)| (facet.to_string(), count))
|
||||
.collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
expected
|
||||
.iter()
|
||||
.map(|&(facet_str, count)| (String::from(facet_str), count))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
};
|
||||
test_searcher(
|
||||
11,
|
||||
&[
|
||||
("/top/a", 5),
|
||||
("/top/b", 5),
|
||||
("/top/c", 2),
|
||||
("/top/d", 2),
|
||||
("/top/e", 2),
|
||||
("/top/f", 1),
|
||||
],
|
||||
);
|
||||
// Merging the segments
|
||||
{
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.merge(&segment_ids)
|
||||
.wait()
|
||||
.expect("Merging failed");
|
||||
index_writer.wait_merging_threads().unwrap();
|
||||
reader.reload().unwrap();
|
||||
test_searcher(
|
||||
11,
|
||||
&[
|
||||
("/top/a", 5),
|
||||
("/top/b", 5),
|
||||
("/top/c", 2),
|
||||
("/top/d", 2),
|
||||
("/top/e", 2),
|
||||
("/top/f", 1),
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
// // Deleting one term
|
||||
// {
|
||||
// let mut index_writer = index.writer_for_tests().unwrap();
|
||||
// let facet = Facet::from_path(vec!["top", "a", "firstdoc"]);
|
||||
// let facet_term = Term::from_facet(facet_field, &facet);
|
||||
// index_writer.delete_term(facet_term);
|
||||
// index_writer.commit().unwrap();
|
||||
// reader.reload().unwrap();
|
||||
// test_searcher(
|
||||
// 9,
|
||||
// &[
|
||||
// ("/top/a", 3),
|
||||
// ("/top/b", 3),
|
||||
// ("/top/c", 1),
|
||||
// ("/top/d", 2),
|
||||
// ("/top/e", 2),
|
||||
// ("/top/f", 1),
|
||||
// ],
|
||||
// );
|
||||
// }
|
||||
// }
|
||||
// Deleting one term
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let facet = Facet::from_path(vec!["top", "a", "firstdoc"]);
|
||||
let facet_term = Term::from_facet(facet_field, &facet);
|
||||
index_writer.delete_term(facet_term);
|
||||
index_writer.commit().unwrap();
|
||||
reader.reload().unwrap();
|
||||
test_searcher(
|
||||
9,
|
||||
&[
|
||||
("/top/a", 3),
|
||||
("/top/b", 3),
|
||||
("/top/c", 1),
|
||||
("/top/d", 2),
|
||||
("/top/e", 2),
|
||||
("/top/f", 1),
|
||||
],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug_merge() -> crate::Result<()> {
|
||||
@@ -1511,6 +1480,13 @@ mod tests {
|
||||
let int_field = schema_builder.add_u64_field("intvals", int_options);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
|
||||
let mut vals: Vec<u64> = Vec::new();
|
||||
let mut test_vals = move |col: &Column<u64>, doc: DocId, expected: &[u64]| {
|
||||
vals.clear();
|
||||
vals.extend(col.values(doc));
|
||||
assert_eq!(&vals[..], expected);
|
||||
};
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| {
|
||||
@@ -1536,49 +1512,42 @@ mod tests {
|
||||
}
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let mut vals: Vec<u64> = Vec::new();
|
||||
|
||||
{
|
||||
let segment = searcher.segment_reader(0u32);
|
||||
// let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
||||
|
||||
// ff_reader.get_vals(0, &mut vals);
|
||||
// assert_eq!(&vals, &[1, 2]);
|
||||
|
||||
// ff_reader.get_vals(1, &mut vals);
|
||||
// assert_eq!(&vals, &[1, 2, 3]);
|
||||
|
||||
// ff_reader.get_vals(2, &mut vals);
|
||||
// assert_eq!(&vals, &[4, 5]);
|
||||
|
||||
// ff_reader.get_vals(3, &mut vals);
|
||||
// assert_eq!(&vals, &[1, 2]);
|
||||
|
||||
// ff_reader.get_vals(4, &mut vals);
|
||||
// assert_eq!(&vals, &[1, 5]);
|
||||
|
||||
// ff_reader.get_vals(5, &mut vals);
|
||||
// assert_eq!(&vals, &[3]);
|
||||
|
||||
// ff_reader.get_vals(6, &mut vals);
|
||||
// assert_eq!(&vals, &[17]);
|
||||
let column = segment
|
||||
.fast_fields()
|
||||
.column_opt::<u64>("intvals")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
test_vals(&column, 0, &[1, 2]);
|
||||
test_vals(&column, 1, &[1, 2, 3]);
|
||||
test_vals(&column, 2, &[4, 5]);
|
||||
test_vals(&column, 3, &[1, 2]);
|
||||
test_vals(&column, 4, &[1, 5]);
|
||||
test_vals(&column, 5, &[3]);
|
||||
test_vals(&column, 6, &[17]);
|
||||
}
|
||||
|
||||
{
|
||||
let segment = searcher.segment_reader(1u32);
|
||||
// let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
||||
// ff_reader.get_vals(0, &mut vals);
|
||||
// assert_eq!(&vals, &[28, 27]);
|
||||
|
||||
// ff_reader.get_vals(1, &mut vals);
|
||||
// assert_eq!(&vals, &[1_000]);
|
||||
let col = segment
|
||||
.fast_fields()
|
||||
.column_opt::<u64>("intvals")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
test_vals(&col, 0, &[28, 27]);
|
||||
test_vals(&col, 1, &[1000]);
|
||||
}
|
||||
|
||||
{
|
||||
let segment = searcher.segment_reader(2u32);
|
||||
// let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
||||
// ff_reader.get_vals(0, &mut vals);
|
||||
// assert_eq!(&vals, &[20]);
|
||||
let col = segment
|
||||
.fast_fields()
|
||||
.column_opt::<u64>("intvals")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
test_vals(&col, 0, &[20]);
|
||||
}
|
||||
|
||||
// Merging the segments
|
||||
@@ -1593,37 +1562,21 @@ mod tests {
|
||||
{
|
||||
let searcher = reader.searcher();
|
||||
let segment = searcher.segment_reader(0u32);
|
||||
// let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
||||
|
||||
// ff_reader.get_vals(0, &mut vals);
|
||||
// assert_eq!(&vals, &[1, 2]);
|
||||
|
||||
// ff_reader.get_vals(1, &mut vals);
|
||||
// assert_eq!(&vals, &[1, 2, 3]);
|
||||
|
||||
// ff_reader.get_vals(2, &mut vals);
|
||||
// assert_eq!(&vals, &[4, 5]);
|
||||
|
||||
// ff_reader.get_vals(3, &mut vals);
|
||||
// assert_eq!(&vals, &[1, 2]);
|
||||
|
||||
// ff_reader.get_vals(4, &mut vals);
|
||||
// assert_eq!(&vals, &[1, 5]);
|
||||
|
||||
// ff_reader.get_vals(5, &mut vals);
|
||||
// assert_eq!(&vals, &[3]);
|
||||
|
||||
// ff_reader.get_vals(6, &mut vals);
|
||||
// assert_eq!(&vals, &[17]);
|
||||
|
||||
// ff_reader.get_vals(7, &mut vals);
|
||||
// assert_eq!(&vals, &[28, 27]);
|
||||
|
||||
// ff_reader.get_vals(8, &mut vals);
|
||||
// assert_eq!(&vals, &[1_000]);
|
||||
|
||||
// ff_reader.get_vals(9, &mut vals);
|
||||
// assert_eq!(&vals, &[20]);
|
||||
let col = segment
|
||||
.fast_fields()
|
||||
.column_opt::<u64>("intvals")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
test_vals(&col, 0, &[1, 2]);
|
||||
test_vals(&col, 1, &[1, 2, 3]);
|
||||
test_vals(&col, 2, &[4, 5]);
|
||||
test_vals(&col, 3, &[1, 2]);
|
||||
test_vals(&col, 4, &[1, 5]);
|
||||
test_vals(&col, 5, &[3]);
|
||||
test_vals(&col, 6, &[17]);
|
||||
test_vals(&col, 7, &[28, 27]);
|
||||
test_vals(&col, 8, &[1000]);
|
||||
test_vals(&col, 9, &[20]);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -155,6 +155,7 @@ mod tests {
|
||||
fn test_merge_sorted_index_desc_not_disjunct() {
|
||||
test_merge_sorted_index_desc_(false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_sorted_index_desc_disjunct() {
|
||||
test_merge_sorted_index_desc_(true);
|
||||
@@ -476,12 +477,11 @@ mod bench_sorted_index_merge {
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use test::{self, Bencher};
|
||||
|
||||
use crate::core::Index;
|
||||
use crate::indexer::merger::IndexMerger;
|
||||
use crate::schema::{Cardinality, NumericOptions, Schema};
|
||||
use crate::schema::{NumericOptions, Schema};
|
||||
use crate::{IndexSettings, IndexSortByField, IndexWriter, Order};
|
||||
fn create_index(sort_by_field: Option<IndexSortByField>) -> Index {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -512,42 +512,42 @@ mod bench_sorted_index_merge {
|
||||
index
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn create_sorted_index_walk_overkmerge_on_merge_fastfield(
|
||||
b: &mut Bencher,
|
||||
) -> crate::Result<()> {
|
||||
let sort_by_field = IndexSortByField {
|
||||
field: "intval".to_string(),
|
||||
order: Order::Desc,
|
||||
};
|
||||
let index = create_index(Some(sort_by_field.clone()));
|
||||
let segments = index.searchable_segments().unwrap();
|
||||
let merger: IndexMerger =
|
||||
IndexMerger::open(index.schema(), index.settings().clone(), &segments[..])?;
|
||||
let doc_id_mapping = merger.generate_doc_id_mapping(&sort_by_field).unwrap();
|
||||
b.iter(|| {
|
||||
let sorted_doc_ids = doc_id_mapping.iter_old_doc_addrs().map(|doc_addr| {
|
||||
let reader = &merger.readers[doc_addr.segment_ord as usize];
|
||||
let u64_reader: Arc<dyn Column<u64>> = reader
|
||||
.fast_fields()
|
||||
.typed_fast_field_reader("intval")
|
||||
.expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
it should never happen.",
|
||||
);
|
||||
(doc_addr.doc_id, reader, u64_reader)
|
||||
});
|
||||
// add values in order of the new doc_ids
|
||||
let mut val = 0;
|
||||
for (doc_id, _reader, field_reader) in sorted_doc_ids {
|
||||
val = field_reader.get_val(doc_id);
|
||||
}
|
||||
//#[bench]
|
||||
// fn create_sorted_index_walk_overkmerge_on_merge_fastfield(
|
||||
// b: &mut Bencher,
|
||||
//) -> crate::Result<()> {
|
||||
// let sort_by_field = IndexSortByField {
|
||||
// field: "intval".to_string(),
|
||||
// order: Order::Desc,
|
||||
//};
|
||||
// let index = create_index(Some(sort_by_field.clone()));
|
||||
// let segments = index.searchable_segments().unwrap();
|
||||
// let merger: IndexMerger =
|
||||
// IndexMerger::open(index.schema(), index.settings().clone(), &segments[..])?;
|
||||
// let doc_id_mapping = merger.generate_doc_id_mapping(&sort_by_field).unwrap();
|
||||
// b.iter(|| {
|
||||
// let sorted_doc_ids = doc_id_mapping.iter_old_doc_addrs().map(|doc_addr| {
|
||||
// let reader = &merger.readers[doc_addr.segment_ord as usize];
|
||||
// let u64_reader: Arc<dyn Column<u64>> = reader
|
||||
//.fast_fields()
|
||||
//.typed_fast_field_reader("intval")
|
||||
//.expect(
|
||||
//"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
// it should never happen.",
|
||||
//);
|
||||
//(doc_addr.doc_id, reader, u64_reader)
|
||||
//});
|
||||
//// add values in order of the new doc_ids
|
||||
// let mut val = 0;
|
||||
// for (doc_id, _reader, field_reader) in sorted_doc_ids {
|
||||
// val = field_reader.get_val(doc_id);
|
||||
//}
|
||||
|
||||
val
|
||||
});
|
||||
// val
|
||||
//});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Ok(())
|
||||
//}
|
||||
#[bench]
|
||||
fn create_sorted_index_create_doc_id_mapping(b: &mut Bencher) -> crate::Result<()> {
|
||||
let sort_by_field = IndexSortByField {
|
||||
|
||||
@@ -19,8 +19,6 @@ mod segment_register;
|
||||
pub mod segment_serializer;
|
||||
pub mod segment_updater;
|
||||
mod segment_writer;
|
||||
// mod sorted_doc_id_column;
|
||||
// mod sorted_doc_id_multivalue_column;
|
||||
mod stamper;
|
||||
|
||||
use crossbeam_channel as channel;
|
||||
@@ -57,9 +55,9 @@ type AddBatchReceiver = channel::Receiver<AddBatch>;
|
||||
#[cfg(feature = "mmap")]
|
||||
#[cfg(test)]
|
||||
mod tests_mmap {
|
||||
use crate::collector::Count;
|
||||
|
||||
// use crate::query::QueryParser;
|
||||
use crate::schema::{JsonObjectOptions, Schema, TEXT};
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::{Index, Term};
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,169 +0,0 @@
|
||||
use std::cmp;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::flat_map_with_buffer::FlatMapWithBufferIter;
|
||||
use crate::fastfield::{MultiValueIndex, MultiValuedFastFieldReader};
|
||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
||||
use crate::{DocAddress, SegmentReader};
|
||||
|
||||
pub(crate) struct RemappedDocIdMultiValueColumn<'a> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
fast_field_readers: Vec<MultiValuedFastFieldReader<u64>>,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u32,
|
||||
}
|
||||
|
||||
impl<'a> RemappedDocIdMultiValueColumn<'a> {
|
||||
pub(crate) fn new(
|
||||
readers: &'a [SegmentReader],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
field: &str,
|
||||
) -> Self {
|
||||
// Our values are bitpacked and we need to know what should be
|
||||
// our bitwidth and our minimum value before serializing any values.
|
||||
//
|
||||
// Computing those is non-trivial if some documents are deleted.
|
||||
// We go through a complete first pass to compute the minimum and the
|
||||
// maximum value and initialize our Serializer.
|
||||
let mut num_vals = 0;
|
||||
let mut min_value = u64::MAX;
|
||||
let mut max_value = u64::MIN;
|
||||
let mut vals = Vec::new();
|
||||
let mut fast_field_readers = Vec::with_capacity(readers.len());
|
||||
for reader in readers {
|
||||
let ff_reader: MultiValuedFastFieldReader<u64> = reader
|
||||
.fast_fields()
|
||||
.typed_fast_field_multi_reader::<u64>(field)
|
||||
.expect(
|
||||
"Failed to find multivalued fast field reader. This is a bug in tantivy. \
|
||||
Please report.",
|
||||
);
|
||||
for doc in reader.doc_ids_alive() {
|
||||
ff_reader.get_vals(doc, &mut vals);
|
||||
for &val in &vals {
|
||||
min_value = cmp::min(val, min_value);
|
||||
max_value = cmp::max(val, max_value);
|
||||
}
|
||||
num_vals += vals.len();
|
||||
}
|
||||
fast_field_readers.push(ff_reader);
|
||||
// TODO optimize when no deletes
|
||||
}
|
||||
if min_value > max_value {
|
||||
min_value = 0;
|
||||
max_value = 0;
|
||||
}
|
||||
RemappedDocIdMultiValueColumn {
|
||||
doc_id_mapping,
|
||||
fast_field_readers,
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: num_vals as u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Column for RemappedDocIdMultiValueColumn<'a> {
|
||||
fn get_val(&self, _pos: u32) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(
|
||||
self.doc_id_mapping
|
||||
.iter_old_doc_addrs()
|
||||
.flat_map_with_buffer(|old_doc_addr: DocAddress, buffer| {
|
||||
let ff_reader = &self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
||||
ff_reader.get_vals(old_doc_addr.doc_id, buffer);
|
||||
}),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
multi_value_length_readers: Vec<&'a MultiValueIndex>,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u32,
|
||||
}
|
||||
|
||||
impl<'a> RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
pub(crate) fn new(
|
||||
segment_and_ff_readers: &'a [(&'a SegmentReader, &'a MultiValueIndex)],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
) -> Self {
|
||||
// We go through a complete first pass to compute the minimum and the
|
||||
// maximum value and initialize our Column.
|
||||
let mut num_vals = 0;
|
||||
let min_value = 0;
|
||||
let mut max_value = 0;
|
||||
let mut multi_value_length_readers = Vec::with_capacity(segment_and_ff_readers.len());
|
||||
for segment_and_ff_reader in segment_and_ff_readers {
|
||||
let segment_reader = segment_and_ff_reader.0;
|
||||
let multi_value_length_reader = segment_and_ff_reader.1;
|
||||
if !segment_reader.has_deletes() {
|
||||
max_value += multi_value_length_reader.total_num_vals() as u64;
|
||||
} else {
|
||||
for doc in segment_reader.doc_ids_alive() {
|
||||
max_value += multi_value_length_reader.num_vals_for_doc(doc) as u64;
|
||||
}
|
||||
}
|
||||
num_vals += segment_reader.num_docs();
|
||||
multi_value_length_readers.push(multi_value_length_reader);
|
||||
}
|
||||
// The value range is always get_val(doc)..get_val(doc + 1)
|
||||
num_vals += 1;
|
||||
Self {
|
||||
doc_id_mapping,
|
||||
multi_value_length_readers,
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Column for RemappedDocIdMultiValueIndexColumn<'a> {
|
||||
fn get_val(&self, _pos: u32) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
let mut offset = 0;
|
||||
Box::new(
|
||||
std::iter::once(0).chain(self.doc_id_mapping.iter_old_doc_addrs().map(
|
||||
move |old_doc_addr| {
|
||||
let ff_reader =
|
||||
&self.multi_value_length_readers[old_doc_addr.segment_ord as usize];
|
||||
offset += ff_reader.num_vals_for_doc(old_doc_addr.doc_id);
|
||||
offset as u64
|
||||
},
|
||||
)),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
@@ -279,7 +279,7 @@ mod indexer;
|
||||
pub mod error;
|
||||
pub mod tokenizer;
|
||||
|
||||
// pub mod aggregation;
|
||||
pub mod aggregation;
|
||||
pub mod collector;
|
||||
pub mod directory;
|
||||
pub mod fastfield;
|
||||
|
||||
@@ -29,8 +29,6 @@ pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
||||
pub(crate) use self::skip::{BlockInfo, SkipReader};
|
||||
pub use self::term_info::TermInfo;
|
||||
|
||||
pub(crate) type UnorderedTermId = stacker::UnorderedId;
|
||||
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
|
||||
pub(crate) enum FreqReadingOption {
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::Range;
|
||||
|
||||
use rustc_hash::FxHashMap;
|
||||
use stacker::Addr;
|
||||
|
||||
use crate::fieldnorm::FieldNormReaders;
|
||||
@@ -11,10 +9,8 @@ use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::postings::recorder::{BufferLender, Recorder};
|
||||
use crate::postings::{
|
||||
FieldSerializer, IndexingContext, InvertedIndexSerializer, PerFieldPostingsWriter,
|
||||
UnorderedTermId,
|
||||
};
|
||||
use crate::schema::{Field, FieldType, Schema, Term};
|
||||
use crate::termdict::TermOrdinal;
|
||||
use crate::schema::{Field, Term};
|
||||
use crate::tokenizer::{Token, TokenStream, MAX_TOKEN_LEN};
|
||||
use crate::DocId;
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_
|
||||
use crate::postings::skip::SkipSerializer;
|
||||
use crate::query::Bm25Weight;
|
||||
use crate::schema::{Field, FieldEntry, FieldType, IndexRecordOption, Schema};
|
||||
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
|
||||
use crate::termdict::TermDictionaryBuilder;
|
||||
use crate::{DocId, Score};
|
||||
|
||||
/// `InvertedIndexSerializer` is in charge of serializing
|
||||
@@ -109,7 +109,6 @@ pub struct FieldSerializer<'a> {
|
||||
positions_serializer_opt: Option<PositionSerializer<&'a mut CountingWriter<WritePtr>>>,
|
||||
current_term_info: TermInfo,
|
||||
term_open: bool,
|
||||
num_terms: TermOrdinal,
|
||||
}
|
||||
|
||||
impl<'a> FieldSerializer<'a> {
|
||||
@@ -148,7 +147,6 @@ impl<'a> FieldSerializer<'a> {
|
||||
positions_serializer_opt,
|
||||
current_term_info: TermInfo::default(),
|
||||
term_open: false,
|
||||
num_terms: TermOrdinal::default(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -171,20 +169,17 @@ impl<'a> FieldSerializer<'a> {
|
||||
/// * term - the term. It needs to come after the previous term according to the lexicographical
|
||||
/// order.
|
||||
/// * term_doc_freq - return the number of document containing the term.
|
||||
pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<TermOrdinal> {
|
||||
pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<()> {
|
||||
assert!(
|
||||
!self.term_open,
|
||||
"Called new_term, while the previous term was not closed."
|
||||
);
|
||||
|
||||
self.term_open = true;
|
||||
self.postings_serializer.clear();
|
||||
self.current_term_info = self.current_term_info();
|
||||
self.term_dictionary_builder.insert_key(term)?;
|
||||
let term_ordinal = self.num_terms;
|
||||
self.num_terms += 1;
|
||||
self.postings_serializer.new_term(term_doc_freq);
|
||||
Ok(term_ordinal)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serialize the information that a document contains for the current term:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user