Compare commits

..

10 Commits

Author SHA1 Message Date
Paul Masurel
e9af721e60 Fixing compile 2023-02-05 18:05:29 +01:00
Paul Masurel
88ed3d8b48 Switching back to iterable. 2023-02-02 14:02:10 +01:00
Paul Masurel
aa8408a979 Updated TODO 2023-02-02 08:30:14 +01:00
Paul Masurel
4319d8c1bd Reenabling bench 2023-02-02 13:09:19 +09:00
Paul Masurel
33d18d0424 Plugged fastfield merge
Fixing unit tests.
Fixing gcd
stats isolation
2023-02-02 12:07:56 +09:00
Pascal Seitz
3de018c49f add merge for bytes/str column 2023-02-01 12:13:25 +08:00
Paul Masurel
96485f21d6 Added merge code for trivial mapping u64 & u128
Added rank
2023-01-30 10:05:40 +09:00
Pascal Seitz
1330e6f10d prepare for merge 2023-01-25 11:32:08 +09:00
Paul Masurel
5086914304 Integration of columnar 2023-01-24 18:18:05 +09:00
Paul Masurel
d7a8053cc2 Introduced a select cursor. 2023-01-20 23:27:39 +09:00
86 changed files with 4006 additions and 5029 deletions

View File

@@ -23,7 +23,7 @@ regex = { version = "1.5.5", default-features = false, features = ["std", "unico
aho-corasick = "0.7" aho-corasick = "0.7"
tantivy-fst = "0.4.0" tantivy-fst = "0.4.0"
memmap2 = { version = "0.5.3", optional = true } memmap2 = { version = "0.5.3", optional = true }
lz4_flex = { version = "0.10", default-features = false, features = ["checked-decode"], optional = true } lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
brotli = { version = "3.3.4", optional = true } brotli = { version = "3.3.4", optional = true }
zstd = { version = "0.12", optional = true, default-features = false } zstd = { version = "0.12", optional = true, default-features = false }
snap = { version = "1.0.5", optional = true } snap = { version = "1.0.5", optional = true }
@@ -55,12 +55,12 @@ measure_time = "0.8.2"
async-trait = "0.1.53" async-trait = "0.1.53"
arc-swap = "1.5.0" arc-swap = "1.5.0"
columnar = { version="0.1", path="./columnar", package ="tantivy-columnar" }
sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true } sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true }
stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" } stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" }
tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" } tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" }
tantivy-bitpacker = { version= "0.3", path="./bitpacker" } tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
common = { version= "0.5", path = "./common/", package = "tantivy-common" } columnar = { version= "0.1", path="./columnar", package="tantivy-columnar" }
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
tokenizer-api = { version="0.1", path="./tokenizer-api", package="tantivy-tokenizer-api" } tokenizer-api = { version="0.1", path="./tokenizer-api", package="tantivy-tokenizer-api" }
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]

View File

@@ -5,28 +5,24 @@ edition = "2021"
license = "MIT" license = "MIT"
[dependencies] [dependencies]
itertools = "0.10.5"
log = "0.4.17"
fnv = "1.0.7"
fastdivide = "0.4.0"
rand = { version = "0.8.5", optional = true }
measure_time = { version = "0.8.2", optional = true }
prettytable-rs = { version = "0.10.0", optional = true }
stacker = { path = "../stacker", package="tantivy-stacker"} stacker = { path = "../stacker", package="tantivy-stacker"}
serde_json = "1"
thiserror = "1"
fnv = "1"
sstable = { path = "../sstable", package = "tantivy-sstable" } sstable = { path = "../sstable", package = "tantivy-sstable" }
common = { path = "../common", package = "tantivy-common" } common = { path = "../common", package = "tantivy-common" }
itertools = "0.10"
log = "0.4"
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" } tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
prettytable-rs = {version="0.10.0", optional= true}
rand = {version="0.8.3", optional= true}
fastdivide = "0.4"
measure_time = { version="0.8.2", optional=true}
[dev-dependencies] [dev-dependencies]
proptest = "1" proptest = "1"
more-asserts = "0.3.1" more-asserts = "0.3.0"
rand = "0.8.5" rand = "0.8.3"
criterion = "0.4"
[features] [features]
unstable = [] unstable = []
[[bench]]
name = "bench_index"
harness = false

6
columnar/Makefile Normal file
View File

@@ -0,0 +1,6 @@
test:
echo "Run test only... No examples."
cargo test --tests --lib
fmt:
cargo +nightly fmt --all

View File

@@ -1,91 +0,0 @@
use std::ops::Range;
use criterion::*;
use rand::prelude::*;
use tantivy_columnar::column_index::MultiValueIndex;
use tantivy_columnar::RowId;
const WINDOW: usize = 40;
fn bench_multi_value_index_util(
len_range: Range<u32>,
num_rows: RowId,
select_value_ratio: f64,
b: &mut criterion::Bencher,
) {
let mut start_index: Vec<RowId> = vec![0u32];
let mut cursor: u32 = 0u32;
let mut rng = StdRng::from_seed([16u8; 32]);
for i in 0..num_rows {
let num_vals = rng.gen_range(len_range.clone());
cursor += num_vals;
start_index.push(cursor);
}
let select_rows: Vec<RowId> = (0u32..cursor)
.filter(|i| rng.gen_bool(select_value_ratio))
.collect();
let mv_index = MultiValueIndex::for_test(&start_index);
// mv_index.select_batch_in_place(0, &mut select_rows[..]);
let mut buffer = Vec::new();
b.iter(|| {
let mut start_row = 0u32;
let mut len = 0;
for chunk in select_rows.chunks(WINDOW) {
buffer.clear();
buffer.extend_from_slice(chunk);
mv_index.select_batch_in_place(start_row, &mut buffer);
start_row = buffer.last().copied().unwrap();
len += buffer.len()
}
assert_eq!(len, 4303);
len
});
}
fn bench_multi_value_index_util2(
len_range: Range<u32>,
num_rows: RowId,
select_value_ratio: f64,
b: &mut criterion::Bencher,
) {
let mut start_index: Vec<RowId> = vec![0u32];
let mut cursor: u32 = 0u32;
let mut rng = StdRng::from_seed([16u8; 32]);
for i in 0..num_rows {
let num_vals = rng.gen_range(len_range.clone());
cursor += num_vals;
start_index.push(cursor);
}
let select_rows: Vec<RowId> = (0u32..cursor)
.filter(|i| rng.gen_bool(select_value_ratio))
.collect();
let mv_index = MultiValueIndex::for_test(&start_index);
// mv_index.select_batch_in_place(0, &mut select_rows[..]);
let mut buffer = Vec::new();
b.iter(|| {
let mut mv_index_cursor = mv_index.select_cursor();
let mut len = 0;
for chunk in select_rows.chunks(WINDOW) {
buffer.clear();
buffer.extend_from_slice(chunk);
mv_index_cursor.select_batch_in_place(&mut buffer);
len += buffer.len();
}
assert_eq!(len, 4303);
len
});
}
fn select_benchmark(c: &mut criterion::Criterion) {
c.bench_function("bench_multi_value_index_10_100", |b| {
bench_multi_value_index_util(0..10, 100_000, 0.01f64, b)
});
c.bench_function("bench_multi_value_cursor_index_10_100", |b| {
bench_multi_value_index_util2(0..10, 100_000, 0.01f64, b)
});
}
criterion_group!(benches, select_benchmark);
criterion_main!(benches);

View File

@@ -28,7 +28,12 @@ fn get_u128_column_random() -> Arc<dyn ColumnValues<u128>> {
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn ColumnValues<u128>> { fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn ColumnValues<u128>> {
let mut out = vec![]; let mut out = vec![];
tantivy_columnar::column_values::serialize_column_values_u128(&data, &mut out).unwrap(); tantivy_columnar::column_values::serialize_column_values_u128(
&(|| data.iter().copied()),
data.len() as u32,
&mut out,
)
.unwrap();
let out = OwnedBytes::new(out); let out = OwnedBytes::new(out);
tantivy_columnar::column_values::open_u128_mapped::<u128>(out).unwrap() tantivy_columnar::column_values::open_u128_mapped::<u128>(out).unwrap()
} }
@@ -36,7 +41,7 @@ fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn ColumnValues<u128>> {
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50; const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
const SINGLE_ITEM: u64 = 90; const SINGLE_ITEM: u64 = 90;
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90; const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
const ONE_PERCENT_ITEM_RANGE: RangeInclusive<u64> = 49..=49;
fn get_data_50percent_item() -> Vec<u128> { fn get_data_50percent_item() -> Vec<u128> {
let mut rng = StdRng::from_seed([1u8; 32]); let mut rng = StdRng::from_seed([1u8; 32]);

View File

@@ -5,7 +5,9 @@ use std::ops::RangeInclusive;
use std::sync::Arc; use std::sync::Arc;
use rand::prelude::*; use rand::prelude::*;
use tantivy_columnar::column_values::{serialize_and_load_u64_based_column_values, CodecType}; use tantivy_columnar::column_values::{
serialize_and_load_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
};
use tantivy_columnar::*; use tantivy_columnar::*;
use test::Bencher; use test::Bencher;
@@ -178,7 +180,7 @@ fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
b.iter(|| { b.iter(|| {
let mut a = 0u64; let mut a = 0u64;
for i in 0u32..n as u32 { for i in 0u32..n as u32 {
a += column_ref.get_val(i); a += column.get_val(i);
} }
a a
}); });

View File

@@ -1,17 +0,0 @@
[package]
name = "tantivy-columnar-cli"
version = "0.1.0"
edition = "2021"
license = "MIT"
[dependencies]
columnar = {path="../", package="tantivy-columnar"}
serde_json = "1"
serde_json_borrow = {git="https://github.com/PSeitz/serde_json_borrow/"}
serde = "1"
[workspace]
members = []
[profile.release]
debug = true

View File

@@ -1,134 +0,0 @@
use columnar::ColumnarWriter;
use columnar::NumericalValue;
use serde_json_borrow;
use std::fs::File;
use std::io;
use std::io::BufRead;
use std::io::BufReader;
use std::time::Instant;
#[derive(Default)]
struct JsonStack {
path: String,
stack: Vec<usize>,
}
impl JsonStack {
fn push(&mut self, seg: &str) {
let len = self.path.len();
self.stack.push(len);
self.path.push('.');
self.path.push_str(seg);
}
fn pop(&mut self) {
if let Some(len) = self.stack.pop() {
self.path.truncate(len);
}
}
fn path(&self) -> &str {
&self.path[1..]
}
}
fn append_json_to_columnar(
doc: u32,
json_value: &serde_json_borrow::Value,
columnar: &mut ColumnarWriter,
stack: &mut JsonStack,
) -> usize {
let mut count = 0;
match json_value {
serde_json_borrow::Value::Null => {}
serde_json_borrow::Value::Bool(val) => {
columnar.record_numerical(
doc,
stack.path(),
NumericalValue::from(if *val { 1u64 } else { 0u64 }),
);
count += 1;
}
serde_json_borrow::Value::Number(num) => {
let numerical_value: NumericalValue = if let Some(num_i64) = num.as_i64() {
num_i64.into()
} else if let Some(num_u64) = num.as_u64() {
num_u64.into()
} else if let Some(num_f64) = num.as_f64() {
num_f64.into()
} else {
panic!();
};
count += 1;
columnar.record_numerical(
doc,
stack.path(),
numerical_value,
);
}
serde_json_borrow::Value::Str(msg) => {
columnar.record_str(
doc,
stack.path(),
msg,
);
count += 1;
},
serde_json_borrow::Value::Array(vals) => {
for val in vals {
count += append_json_to_columnar(doc, val, columnar, stack);
}
},
serde_json_borrow::Value::Object(json_map) => {
for (child_key, child_val) in json_map {
stack.push(child_key);
count += append_json_to_columnar(doc, child_val, columnar, stack);
stack.pop();
}
},
}
count
}
fn main() -> io::Result<()> {
let file = File::open("gh_small.json")?;
let mut reader = BufReader::new(file);
let mut line = String::with_capacity(100);
let mut columnar = columnar::ColumnarWriter::default();
let mut doc = 0;
let start = Instant::now();
let mut stack = JsonStack::default();
let mut total_count = 0;
let start_build = Instant::now();
loop {
line.clear();
let len = reader.read_line(&mut line)?;
if len == 0 {
break;
}
let Ok(json_value) = serde_json::from_str::<serde_json_borrow::Value>(&line) else { continue; };
total_count += append_json_to_columnar(doc, &json_value, &mut columnar, &mut stack);
doc += 1;
}
println!("Build in {:?}", start_build.elapsed());
println!("value count {total_count}");
let mut buffer = Vec::new();
let start_serialize = Instant::now();
columnar.serialize(doc, None, &mut buffer)?;
println!("Serialized in {:?}", start_serialize.elapsed());
println!("num docs: {doc}, {:?}", start.elapsed());
println!("buffer len {} MB", buffer.len() / 1_000_000);
let columnar = columnar::ColumnarReader::open(buffer)?;
for (column_name, dynamic_column) in columnar.list_columns()? {
let num_bytes = dynamic_column.num_bytes();
let typ = dynamic_column.column_type();
if num_bytes > 1_000_000 {
println!("{column_name} {typ:?} {} KB", num_bytes / 1_000);
}
}
println!("{} columns", columnar.num_columns());
Ok(())
}

View File

@@ -1,16 +1,14 @@
# zero to one # zero to one
* merges with non trivial mapping (deletes / sort)
* emission of the sort mapping.
+ muttivaued range queries restrat frm the beginning all of the time.
* revisit line codec * revisit line codec
* removal of all rows of a column in the schema due to deletes * removal of all rows of a column in the schema due to deletes
* add columns from schema on merge
* Plugging JSON * Plugging JSON
* replug examples replug examples
* move datetime to quickwit common
* switch to nanos
* reintroduce the gcd map.
# Perf and Size # Perf and Size
* remove alloc in `ord_to_term`
* re-add ZSTD compression for dictionaries * re-add ZSTD compression for dictionaries
no systematic monotonic mapping no systematic monotonic mapping
consider removing multilinear consider removing multilinear

View File

@@ -56,6 +56,12 @@ impl BytesColumn {
#[derive(Clone)] #[derive(Clone)]
pub struct StrColumn(BytesColumn); pub struct StrColumn(BytesColumn);
impl From<BytesColumn> for StrColumn {
fn from(bytes_col: BytesColumn) -> Self {
StrColumn(bytes_col)
}
}
impl From<StrColumn> for BytesColumn { impl From<StrColumn> for BytesColumn {
fn from(str_column: StrColumn) -> BytesColumn { fn from(str_column: StrColumn) -> BytesColumn {
str_column.0 str_column.0
@@ -63,10 +69,6 @@ impl From<StrColumn> for BytesColumn {
} }
impl StrColumn { impl StrColumn {
pub(crate) fn wrap(bytes_column: BytesColumn) -> StrColumn {
StrColumn(bytes_column)
}
pub fn dictionary(&self) -> &Dictionary<VoidSSTable> { pub fn dictionary(&self) -> &Dictionary<VoidSSTable> {
self.0.dictionary.as_ref() self.0.dictionary.as_ref()
} }

View File

@@ -9,8 +9,8 @@ use std::sync::Arc;
use common::BinarySerializable; use common::BinarySerializable;
pub use dictionary_encoded::{BytesColumn, StrColumn}; pub use dictionary_encoded::{BytesColumn, StrColumn};
pub use serialize::{ pub use serialize::{
open_column_bytes, open_column_str, open_column_u128, open_column_u64, open_column_bytes, open_column_u128, open_column_u64, serialize_column_mappable_to_u128,
serialize_column_mappable_to_u128, serialize_column_mappable_to_u64, serialize_column_mappable_to_u64,
}; };
use crate::column_index::ColumnIndex; use crate::column_index::ColumnIndex;
@@ -41,7 +41,6 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
pub fn get_cardinality(&self) -> Cardinality { pub fn get_cardinality(&self) -> Cardinality {
self.idx.get_cardinality() self.idx.get_cardinality()
} }
pub fn num_rows(&self) -> RowId { pub fn num_rows(&self) -> RowId {
match &self.idx { match &self.idx {
ColumnIndex::Full => self.values.num_vals() as u32, ColumnIndex::Full => self.values.num_vals() as u32,
@@ -71,15 +70,6 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
.map(|value_row_id: RowId| self.values.get_val(value_row_id)) .map(|value_row_id: RowId| self.values.get_val(value_row_id))
} }
/// Fils the output vector with the (possibly multiple values that are associated_with
/// `row_id`.
///
/// This method clears the `output` vector.
pub fn fill_vals(&self, row_id: RowId, output: &mut Vec<T>) {
output.clear();
output.extend(self.values(row_id));
}
pub fn first_or_default_col(self, default_value: T) -> Arc<dyn ColumnValues<T>> { pub fn first_or_default_col(self, default_value: T) -> Arc<dyn ColumnValues<T>> {
Arc::new(FirstValueWithDefault { Arc::new(FirstValueWithDefault {
column: self, column: self,
@@ -133,7 +123,7 @@ impl<T: PartialOrd + Debug + Send + Sync + Copy + 'static> ColumnValues<T>
match &self.column.idx { match &self.column.idx {
ColumnIndex::Full => self.column.values.num_vals(), ColumnIndex::Full => self.column.values.num_vals(),
ColumnIndex::Optional(optional_idx) => optional_idx.num_rows(), ColumnIndex::Optional(optional_idx) => optional_idx.num_rows(),
ColumnIndex::Multivalued(multivalue_idx) => multivalue_idx.num_rows(), ColumnIndex::Multivalued(_) => todo!(),
} }
} }
} }

View File

@@ -1,3 +1,4 @@
use std::fmt::Debug;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
use std::sync::Arc; use std::sync::Arc;
@@ -11,20 +12,20 @@ use crate::column_values::serialize::serialize_column_values_u128;
use crate::column_values::u64_based::{serialize_u64_based_column_values, CodecType}; use crate::column_values::u64_based::{serialize_u64_based_column_values, CodecType};
use crate::column_values::{MonotonicallyMappableToU128, MonotonicallyMappableToU64}; use crate::column_values::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
use crate::iterable::Iterable; use crate::iterable::Iterable;
use crate::StrColumn;
pub fn serialize_column_mappable_to_u128<T: MonotonicallyMappableToU128>( pub fn serialize_column_mappable_to_u128<T: MonotonicallyMappableToU128>(
column_index: SerializableColumnIndex<'_>, column_index: SerializableColumnIndex<'_>,
iterable: &dyn Iterable<T>, iterable: &dyn Iterable<T>,
num_vals: u32,
output: &mut impl Write, output: &mut impl Write,
) -> io::Result<()> { ) -> io::Result<()> {
let column_index_num_bytes = serialize_column_index(column_index, output)?; let column_index_num_bytes = serialize_column_index(column_index, output)?;
serialize_column_values_u128(iterable, output)?; serialize_column_values_u128(iterable, num_vals, output)?;
output.write_all(&column_index_num_bytes.to_le_bytes())?; output.write_all(&column_index_num_bytes.to_le_bytes())?;
Ok(()) Ok(())
} }
pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64>( pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64 + Debug>(
column_index: SerializableColumnIndex<'_>, column_index: SerializableColumnIndex<'_>,
column_values: &impl Iterable<T>, column_values: &impl Iterable<T>,
output: &mut impl Write, output: &mut impl Write,
@@ -76,19 +77,15 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
}) })
} }
pub fn open_column_bytes(data: OwnedBytes) -> io::Result<BytesColumn> { pub fn open_column_bytes<T: From<BytesColumn>>(data: OwnedBytes) -> io::Result<T> {
let (body, dictionary_len_bytes) = data.rsplit(4); let (body, dictionary_len_bytes) = data.rsplit(4);
let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap()); let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap());
let (dictionary_bytes, column_bytes) = body.split(dictionary_len as usize); let (dictionary_bytes, column_bytes) = body.split(dictionary_len as usize);
let dictionary = Arc::new(Dictionary::from_bytes(dictionary_bytes)?); let dictionary = Arc::new(Dictionary::from_bytes(dictionary_bytes)?);
let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes)?; let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes)?;
Ok(BytesColumn { let bytes_column = BytesColumn {
dictionary, dictionary,
term_ord_column, term_ord_column,
}) };
} Ok(bytes_column.into())
pub fn open_column_str(data: OwnedBytes) -> io::Result<StrColumn> {
let bytes_column = open_column_bytes(data)?;
Ok(StrColumn::wrap(bytes_column))
} }

View File

@@ -1,19 +1,29 @@
use std::iter; use std::iter;
use crate::column_index::{SerializableColumnIndex, Set}; use crate::column_index::{
multivalued_index, serialize_column_index, SerializableColumnIndex, Set,
};
use crate::iterable::Iterable; use crate::iterable::Iterable;
use crate::{Cardinality, ColumnIndex, RowId, StackMergeOrder}; use crate::{Cardinality, ColumnIndex, MergeRowOrder, RowId, StackMergeOrder};
/// Simple case: fn detect_cardinality(columns: &[Option<ColumnIndex>]) -> Cardinality {
/// The new mapping just consists in stacking the different column indexes. columns
/// .iter()
/// There are no sort nor deletes involved. .flatten()
pub fn merge_column_index_stacked<'a>( .map(ColumnIndex::get_cardinality)
.max()
.unwrap_or(Cardinality::Full)
}
pub fn stack_column_index<'a>(
columns: &'a [Option<ColumnIndex>], columns: &'a [Option<ColumnIndex>],
cardinality_after_merge: Cardinality, merge_row_order: &'a MergeRowOrder,
stack_merge_order: &'a StackMergeOrder,
) -> SerializableColumnIndex<'a> { ) -> SerializableColumnIndex<'a> {
match cardinality_after_merge { let MergeRowOrder::Stack(stack_merge_order) = merge_row_order else {
panic!("only supporting stacking at the moment.");
};
let cardinality = detect_cardinality(columns);
match cardinality {
Cardinality::Full => SerializableColumnIndex::Full, Cardinality::Full => SerializableColumnIndex::Full,
Cardinality::Optional => SerializableColumnIndex::Optional { Cardinality::Optional => SerializableColumnIndex::Optional {
non_null_row_ids: Box::new(StackedOptionalIndex { non_null_row_ids: Box::new(StackedOptionalIndex {
@@ -50,7 +60,7 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
Some(ColumnIndex::Optional(optional_index)) => Box::new( Some(ColumnIndex::Optional(optional_index)) => Box::new(
optional_index optional_index
.iter_rows() .iter_rows()
.map(move |row_id: RowId| columnar_row_range.start + row_id), .map(move |row_id: RowId| row_id + columnar_row_range.start),
), ),
Some(ColumnIndex::Multivalued(_)) => { Some(ColumnIndex::Multivalued(_)) => {
panic!("No multivalued index is allowed when stacking column index"); panic!("No multivalued index is allowed when stacking column index");
@@ -131,6 +141,13 @@ fn stack_multivalued_indexes<'a>(
})) }))
} }
fn stack_multivalued_index<'a>(
columns: &'a [Option<ColumnIndex>],
stack_merge_order: &StackMergeOrder,
) -> Box<dyn Iterable<RowId> + 'a> {
todo!()
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::RowId; use crate::RowId;

View File

@@ -1,136 +0,0 @@
mod shuffled;
mod stacked;
use shuffled::merge_column_index_shuffled;
use stacked::merge_column_index_stacked;
use crate::column_index::SerializableColumnIndex;
use crate::{Cardinality, ColumnIndex, MergeRowOrder};
// For simplification, we never have cardinality go down due to deletes.
fn detect_cardinality(columns: &[Option<ColumnIndex>]) -> Cardinality {
columns
.iter()
.flatten()
.map(ColumnIndex::get_cardinality)
.max()
.unwrap_or(Cardinality::Full)
}
pub fn merge_column_index<'a>(
columns: &'a [Option<ColumnIndex>],
merge_row_order: &'a MergeRowOrder,
) -> SerializableColumnIndex<'a> {
// For simplification, we do not try to detect whether the cardinality could be
// downgraded thanks to deletes.
let cardinality_after_merge = detect_cardinality(columns);
match merge_row_order {
MergeRowOrder::Stack(stack_merge_order) => {
merge_column_index_stacked(columns, cardinality_after_merge, stack_merge_order)
}
MergeRowOrder::Shuffled(complex_merge_order) => {
merge_column_index_shuffled(columns, cardinality_after_merge, complex_merge_order)
}
}
}
// TODO actually, the shuffled code path is a bit too general.
// In practise, we do not really shuffle everything.
// The merge order restricted to a specific column keeps the original row order.
//
// This may offer some optimization that we have not explored yet.
#[cfg(test)]
mod tests {
use crate::column_index::merge::detect_cardinality;
use crate::column_index::multivalued_index::MultiValueIndex;
use crate::column_index::{merge_column_index, OptionalIndex, SerializableColumnIndex};
use crate::{Cardinality, ColumnIndex, MergeRowOrder, RowAddr, RowId, ShuffleMergeOrder};
#[test]
fn test_detect_cardinality() {
assert_eq!(detect_cardinality(&[]), Cardinality::Full);
let optional_index: ColumnIndex = OptionalIndex::for_test(1, &[]).into();
let multivalued_index: ColumnIndex = MultiValueIndex::for_test(&[0, 1]).into();
assert_eq!(
detect_cardinality(&[Some(optional_index.clone()), None]),
Cardinality::Optional
);
assert_eq!(
detect_cardinality(&[Some(optional_index.clone()), Some(ColumnIndex::Full)]),
Cardinality::Optional
);
assert_eq!(
detect_cardinality(&[Some(multivalued_index.clone()), None]),
Cardinality::Multivalued
);
assert_eq!(
detect_cardinality(&[
Some(multivalued_index.clone()),
Some(optional_index.clone())
]),
Cardinality::Multivalued
);
assert_eq!(
detect_cardinality(&[Some(optional_index), Some(multivalued_index)]),
Cardinality::Multivalued
);
}
#[test]
fn test_merge_index_multivalued_sorted() {
let column_indexes: Vec<Option<ColumnIndex>> =
vec![Some(MultiValueIndex::for_test(&[0, 2, 5]).into())];
let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test(
&[2],
vec![
RowAddr {
segment_ord: 0u32,
row_id: 1u32,
},
RowAddr {
segment_ord: 0u32,
row_id: 0u32,
},
],
)
.into();
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
else { panic!("Excpected a multivalued index") };
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
assert_eq!(&start_indexes, &[0, 3, 5]);
}
#[test]
fn test_merge_index_multivalued_sorted_several_segment() {
let column_indexes: Vec<Option<ColumnIndex>> = vec![
Some(MultiValueIndex::for_test(&[0, 2, 5]).into()),
None,
Some(MultiValueIndex::for_test(&[0, 1, 4]).into()),
];
let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test(
&[2, 0, 2],
vec![
RowAddr {
segment_ord: 2u32,
row_id: 1u32,
},
RowAddr {
segment_ord: 0u32,
row_id: 0u32,
},
RowAddr {
segment_ord: 2u32,
row_id: 0u32,
},
],
)
.into();
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index
else { panic!("Excpected a multivalued index") };
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
assert_eq!(&start_indexes, &[0, 3, 5, 6]);
}
}

View File

@@ -1,171 +0,0 @@
use std::iter;
use crate::column_index::{SerializableColumnIndex, Set};
use crate::iterable::Iterable;
use crate::{Cardinality, ColumnIndex, RowId, ShuffleMergeOrder};
pub fn merge_column_index_shuffled<'a>(
column_indexes: &'a [Option<ColumnIndex>],
cardinality_after_merge: Cardinality,
shuffle_merge_order: &'a ShuffleMergeOrder,
) -> SerializableColumnIndex<'a> {
match cardinality_after_merge {
Cardinality::Full => SerializableColumnIndex::Full,
Cardinality::Optional => {
let non_null_row_ids =
merge_column_index_shuffled_optional(column_indexes, shuffle_merge_order);
SerializableColumnIndex::Optional {
non_null_row_ids,
num_rows: shuffle_merge_order.num_rows(),
}
}
Cardinality::Multivalued => {
let multivalue_start_index =
merge_column_index_shuffled_multivalued(column_indexes, shuffle_merge_order);
SerializableColumnIndex::Multivalued(multivalue_start_index)
}
}
}
/// Merge several column indexes into one, ordering rows according to the merge_order passed as
/// argument. While it is true that the `merge_order` may imply deletes and hence could in theory a
/// multivalued index into an optional one, this is not supported today for simplification.
///
/// In other words the column_indexes passed as argument may NOT be multivalued.
fn merge_column_index_shuffled_optional<'a>(
column_indexes: &'a [Option<ColumnIndex>],
merge_order: &'a ShuffleMergeOrder,
) -> Box<dyn Iterable<RowId> + 'a> {
Box::new(ShuffledOptionalIndex {
column_indexes,
merge_order,
})
}
struct ShuffledOptionalIndex<'a> {
column_indexes: &'a [Option<ColumnIndex>],
merge_order: &'a ShuffleMergeOrder,
}
impl<'a> Iterable<u32> for ShuffledOptionalIndex<'a> {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
Box::new(self.merge_order
.iter_new_to_old_row_addrs()
.enumerate()
.filter_map(|(new_row_id, old_row_addr)| {
let Some(column_index) = &self.column_indexes[old_row_addr.segment_ord as usize] else {
return None;
};
let row_id = new_row_id as u32;
if column_index.has_value(old_row_addr.row_id) {
Some(row_id)
} else {
None
}
}))
}
}
fn merge_column_index_shuffled_multivalued<'a>(
column_indexes: &'a [Option<ColumnIndex>],
merge_order: &'a ShuffleMergeOrder,
) -> Box<dyn Iterable<RowId> + 'a> {
Box::new(ShuffledMultivaluedIndex {
column_indexes,
merge_order,
})
}
struct ShuffledMultivaluedIndex<'a> {
column_indexes: &'a [Option<ColumnIndex>],
merge_order: &'a ShuffleMergeOrder,
}
fn iter_num_values<'a>(
column_indexes: &'a [Option<ColumnIndex>],
merge_order: &'a ShuffleMergeOrder,
) -> impl Iterator<Item = u32> + 'a {
merge_order.iter_new_to_old_row_addrs().map(|row_addr| {
let Some(column_index) = &column_indexes[row_addr.segment_ord as usize] else {
// No values in the entire column. It surely means there are 0 values associated to this row.
return 0u32;
};
match column_index {
ColumnIndex::Full => 1,
ColumnIndex::Optional(optional_index) => {
if optional_index.contains(row_addr.row_id) {
1u32
} else {
0u32
}
}
ColumnIndex::Multivalued(multivalued_index) => {
multivalued_index.range(row_addr.row_id).len() as u32
}
}
})
}
/// Transforms an iterator containing the number of vals per row (with `num_rows` elements)
/// into a `start_offset` iterator starting at 0 and (with `num_rows + 1` element)
fn integrate_num_vals(num_vals: impl Iterator<Item = u32>) -> impl Iterator<Item = RowId> {
iter::once(0u32).chain(num_vals.scan(0, |state, num_vals| {
*state += num_vals;
Some(*state)
}))
}
impl<'a> Iterable<u32> for ShuffledMultivaluedIndex<'a> {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
let num_vals_per_row = iter_num_values(self.column_indexes, self.merge_order);
Box::new(integrate_num_vals(num_vals_per_row))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::column_index::OptionalIndex;
use crate::RowAddr;
#[test]
fn test_integrate_num_vals_empty() {
assert!(integrate_num_vals(iter::empty()).eq(iter::once(0)));
}
#[test]
fn test_integrate_num_vals_one_el() {
assert!(integrate_num_vals(iter::once(10)).eq([0, 10].into_iter()));
}
#[test]
fn test_integrate_num_vals_several() {
assert!(integrate_num_vals([3, 0, 10, 20].into_iter()).eq([0, 3, 3, 13, 33].into_iter()));
}
#[test]
fn test_merge_column_index_optional_shuffle() {
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
let column_indexes = vec![Some(optional_index), Some(ColumnIndex::Full)];
let row_addrs = vec![
RowAddr {
segment_ord: 0u32,
row_id: 1u32,
},
RowAddr {
segment_ord: 1u32,
row_id: 0u32,
},
];
let shuffle_merge_order = ShuffleMergeOrder::for_test(&[2, 1], row_addrs);
let serializable_index = merge_column_index_shuffled(
&column_indexes[..],
Cardinality::Optional,
&shuffle_merge_order,
);
let SerializableColumnIndex::Optional { non_null_row_ids, num_rows } = serializable_index else { panic!() };
assert_eq!(num_rows, 2);
let non_null_rows: Vec<RowId> = non_null_row_ids.boxed_iter().collect();
assert_eq!(&non_null_rows, &[1]);
}
}

View File

@@ -5,94 +5,13 @@ mod serialize;
use std::ops::Range; use std::ops::Range;
pub use merge::merge_column_index; pub use merge::stack_column_index;
pub use optional_index::{OptionalIndex, Set}; pub use optional_index::{OptionalIndex, Set};
pub use serialize::{open_column_index, serialize_column_index, SerializableColumnIndex}; pub use serialize::{open_column_index, serialize_column_index, SerializableColumnIndex};
pub use crate::column_index::multivalued_index::{MultiValueIndex, MultiValueIndexCursor}; use crate::column_index::multivalued_index::MultiValueIndex;
use crate::column_index::optional_index::OptionalIndexSelectCursor;
use crate::{Cardinality, RowId}; use crate::{Cardinality, RowId};
pub struct ColumnIndexSelectCursor {
last_rank: Option<RowId>,
cardinality_specific_impl: CardinalitySpecificSelectCursor,
}
impl From<CardinalitySpecificSelectCursor> for ColumnIndexSelectCursor {
fn from(cardinality_specific_impl: CardinalitySpecificSelectCursor) -> Self {
ColumnIndexSelectCursor {
last_rank: None,
cardinality_specific_impl,
}
}
}
enum CardinalitySpecificSelectCursor {
Full,
Optional(OptionalIndexSelectCursor),
Multivalued(MultiValueIndexCursor),
}
/// This cursor object point is to compute batches of `select` operations.
///
/// Regardless of cardinality, a column index can always be seen as a mapping
/// from row_id -> start_value_row_id. By definition, it is increasing.
/// If `left <= right, column_index[left] <= column_index[right]`.
///
/// The select operation then identifies, given a value row id, which row it
/// belong to: it is the inverse mapping.
///
/// As a more formal definition, `select(rank)` is defined as the only `i` such that
/// mapping[i] <= rank and mapping[i+1] < rank.
/// Another way to define it is to say that it is the last i such that
/// mapping[i] <= rank.
/// Finally it can be defined as the number of `row_id` such that
/// mapping[i] <= rank.
///
/// `select_batch_in_place` is a complex function that copmutes
/// select operation in batches and in place.
///
/// For optimization reasons, it only supports supplying ever striclty increasing
/// values of `rank_ids`, even cross calls.
///
/// It is also required from the caller, to only supply rank_ids lower than max(mapping).
/// Within those condition, the returned `row_ids` are guaranteed to be unique.
///
/// # Panics
///
/// Panics if the supplied rank_ids are not increasing from one call to another.
/// We only check that the `rank_ids` Vec is increasing in debug mode for
/// performance reason.
impl ColumnIndexSelectCursor {
/// Returns a list of
pub fn select_batch_in_place(&mut self, rank_ids: &mut Vec<RowId>) {
// `rank_ids` has to be sorted.
debug_assert!(rank_ids.windows(2).all(|window| window[0] < window[1]));
// Two consecutive calls must pass strictly increasing `rank_ids`.
let (Some(first_rank), Some(new_last_rank)) = (rank_ids.first().copied(), rank_ids.last().copied()) else {
// rank_ids is empty, there is nothing to do.
return;
};
if let Some(last_rank) = self.last_rank {
assert!(last_rank < first_rank);
}
self.last_rank = Some(new_last_rank);
match &mut self.cardinality_specific_impl {
CardinalitySpecificSelectCursor::Full => {
// No need to do anything:
// `value_idx` and `row_idx` are the same.
}
CardinalitySpecificSelectCursor::Optional(optional_index) => {
optional_index.select_batch_in_place(&mut rank_ids[..]);
}
CardinalitySpecificSelectCursor::Multivalued(multivalued_index) => {
// TODO important: avoid using 0u32, and restart from the beginning all of the time.
multivalued_index.select_batch_in_place(rank_ids)
}
}
}
}
#[derive(Clone)] #[derive(Clone)]
pub enum ColumnIndex { pub enum ColumnIndex {
Full, Full,
@@ -102,18 +21,6 @@ pub enum ColumnIndex {
Multivalued(MultiValueIndex), Multivalued(MultiValueIndex),
} }
impl From<OptionalIndex> for ColumnIndex {
fn from(optional_index: OptionalIndex) -> ColumnIndex {
ColumnIndex::Optional(optional_index)
}
}
impl From<MultiValueIndex> for ColumnIndex {
fn from(multi_value_index: MultiValueIndex) -> ColumnIndex {
ColumnIndex::Multivalued(multi_value_index)
}
}
impl ColumnIndex { impl ColumnIndex {
pub fn get_cardinality(&self) -> Cardinality { pub fn get_cardinality(&self) -> Cardinality {
match self { match self {
@@ -123,17 +30,6 @@ impl ColumnIndex {
} }
} }
/// Returns true if and only if there are at least one value associated to the row.
pub fn has_value(&self, row_id: RowId) -> bool {
match self {
ColumnIndex::Full => true,
ColumnIndex::Optional(optional_index) => optional_index.contains(row_id),
ColumnIndex::Multivalued(multivalued_index) => {
multivalued_index.range(row_id).len() > 0
}
}
}
pub fn value_row_ids(&self, row_id: RowId) -> Range<RowId> { pub fn value_row_ids(&self, row_id: RowId) -> Range<RowId> {
match self { match self {
ColumnIndex::Full => row_id..row_id + 1, ColumnIndex::Full => row_id..row_id + 1,
@@ -148,15 +44,18 @@ impl ColumnIndex {
} }
} }
pub fn select_cursor(&self) -> ColumnIndexSelectCursor { pub fn select_batch_in_place(&self, rank_ids: &mut Vec<RowId>) {
match self { match self {
ColumnIndex::Full => CardinalitySpecificSelectCursor::Full.into(), ColumnIndex::Full => {
// No need to do anything:
// value_idx and row_idx are the same.
}
ColumnIndex::Optional(optional_index) => { ColumnIndex::Optional(optional_index) => {
CardinalitySpecificSelectCursor::Optional(optional_index.select_cursor()).into() optional_index.select_batch(&mut rank_ids[..]);
} }
ColumnIndex::Multivalued(multivalued_index) => { ColumnIndex::Multivalued(multivalued_index) => {
CardinalitySpecificSelectCursor::Multivalued(multivalued_index.select_cursor()) // TODO important: avoid using 0u32, and restart from the beginning all of the time.
.into() multivalued_index.select_batch_in_place(0u32, rank_ids)
} }
} }
} }

View File

@@ -35,14 +35,13 @@ pub struct MultiValueIndex {
pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>, pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>,
} }
impl MultiValueIndex { impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex {
pub fn for_test(start_offsets: &[RowId]) -> MultiValueIndex { fn from(start_index_column: Arc<dyn ColumnValues<RowId>>) -> Self {
let mut buffer = Vec::new(); MultiValueIndex { start_index_column }
serialize_multivalued_index(&start_offsets, &mut buffer).unwrap();
let bytes = OwnedBytes::new(buffer);
open_multivalued_index(bytes).unwrap()
} }
}
impl MultiValueIndex {
/// Returns `[start, end)`, such that the values associated with /// Returns `[start, end)`, such that the values associated with
/// the given document are `start..end`. /// the given document are `start..end`.
#[inline] #[inline]
@@ -58,302 +57,78 @@ impl MultiValueIndex {
self.start_index_column.num_vals() - 1 self.start_index_column.num_vals() - 1
} }
pub fn select_cursor(&self) -> MultiValueIndexCursor { /// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
MultiValueIndexCursor { /// row_ids. Positions are converted inplace to docids.
multivalued_index: self.clone(),
row_cursor: 0u32,
}
}
}
pub struct MultiValueIndexCursor {
multivalued_index: MultiValueIndex,
row_cursor: RowId,
}
impl MultiValueIndexCursor {
/// See contract in `ColumnIndexSelectCursor`.
/// ///
/// Multi valued cardinality is special for two different /// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
/// ranks `rank_left` and `rank_right`, we can end up with /// index.
/// the same `select(rank_left)` and `select(rank_right)`.
/// ///
/// For this reason, this function includes extra complexity /// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically
/// to prevent the cursor from emitting the same row_id. /// increasing positions.
/// - From a last call, by skipping ranks mapping to ///
/// the same row_id /// TODO: Instead of a linear scan we can employ a exponential search into binary search to
/// - With the batch, by simply deduplicating the output. /// match a docid to its value position.
pub fn select_batch_in_place(&mut self, ranks: &mut Vec<RowId>) { #[allow(clippy::bool_to_int_with_if)]
pub(crate) fn select_batch_in_place(&self, row_start: RowId, ranks: &mut Vec<u32>) {
if ranks.is_empty() { if ranks.is_empty() {
return; return;
} }
let mut row_cursor = self.row_cursor; let mut cur_doc = row_start;
let mut last_doc = None;
let mut write_cursor_id = usize::MAX; assert!(self.start_index_column.get_val(row_start) as u32 <= ranks[0]);
let mut last_written_row_id = u32::MAX;
// We skip all of the ranks that we already passed. let mut write_doc_pos = 0;
// for i in 0..ranks.len() {
// It is possible in the case of multivalued, for a the first let pos = ranks[i];
// few rank to belong to the same row_id as the last rank loop {
// of the previous call. let end = self.start_index_column.get_val(cur_doc + 1) as u32;
let start_bound = self if end > pos {
.multivalued_index ranks[write_doc_pos] = cur_doc;
.start_index_column write_doc_pos += if last_doc == Some(cur_doc) { 0 } else { 1 };
.get_val(row_cursor); last_doc = Some(cur_doc);
break;
let mut skip = 0; }
while ranks[skip] < start_bound { cur_doc += 1;
skip += 1;
if skip == ranks.len() {
ranks.clear();
return;
} }
} }
ranks.truncate(write_doc_pos);
for i in skip..ranks.len() {
let rank = ranks[i];
let row_id = loop {
// TODO See if we can find a way to introduce a function in
// ColumnValue to remove dynamic dispatch.
// This is tricky however... because it only applies to T=u32.
//
// TODO consider using exponential search.
let end = self
.multivalued_index
.start_index_column
.get_val(row_cursor + 1) as u32;
if end > rank {
break row_cursor;
}
row_cursor += 1;
};
// We remove duplicates in a branchless fashion: we only advance
// the write cursor when we are writing a value different from
// the last written value.
write_cursor_id =
write_cursor_id.wrapping_add(if row_id == last_written_row_id { 0 } else { 1 });
ranks[write_cursor_id] = row_id;
last_written_row_id = row_id;
}
self.row_cursor = row_cursor + 1;
ranks.truncate(write_cursor_id + 1);
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::ops::Range;
use std::sync::Arc; use std::sync::Arc;
use super::MultiValueIndex; use super::MultiValueIndex;
use crate::column_values::IterColumn; use crate::column_values::IterColumn;
use crate::{ColumnValues, RowId}; use crate::{ColumnValues, RowId};
use proptest::prelude::*;
fn index_to_pos_helper(index: &MultiValueIndex, positions: &[u32]) -> Vec<u32> { fn index_to_pos_helper(
index: &MultiValueIndex,
doc_id_range: Range<u32>,
positions: &[u32],
) -> Vec<u32> {
let mut positions = positions.to_vec(); let mut positions = positions.to_vec();
let mut cursor = index.select_cursor(); index.select_batch_in_place(doc_id_range.start, &mut positions);
cursor.select_batch_in_place(&mut positions);
positions positions
} }
// Value row id ranges are [0..10, 10..12, 12..15, etc.]
const START_OFFSETS: &[RowId] = &[0, 10, 12, 15, 22, 23];
#[track_caller]
fn test_multivalue_select_cursor_aux(
start_offsets: &'static [RowId],
ranks: &[RowId],
expected: &[RowId],
) {
let column: Arc<dyn ColumnValues<RowId>> =
Arc::new(IterColumn::from(start_offsets.iter().copied()));
let index = MultiValueIndex {
start_index_column: column,
};
assert_eq!(&index_to_pos_helper(&index, &ranks), expected);
}
#[test] #[test]
fn test_multivalue_select_cursor_empty() { fn test_positions_to_docid() {
test_multivalue_select_cursor_aux(START_OFFSETS, &[], &[]); let offsets: Vec<RowId> = vec![0, 10, 12, 15, 22, 23]; // docid values are [0..10, 10..12, 12..15, etc.]
} let column: Arc<dyn ColumnValues<RowId>> = Arc::new(IterColumn::from(offsets.into_iter()));
let index = MultiValueIndex::from(column);
#[test] assert_eq!(index.num_rows(), 5);
fn test_multivalue_select_cursor_single() { let positions = &[10u32, 11, 15, 20, 21, 22];
test_multivalue_select_cursor_aux(START_OFFSETS, &[9], &[0]); assert_eq!(index_to_pos_helper(&index, 0..5, positions), vec![1, 3, 4]);
test_multivalue_select_cursor_aux(START_OFFSETS, &[10], &[1]); assert_eq!(index_to_pos_helper(&index, 1..5, positions), vec![1, 3, 4]);
test_multivalue_select_cursor_aux(START_OFFSETS, &[11], &[1]); assert_eq!(index_to_pos_helper(&index, 0..5, &[9]), vec![0]);
test_multivalue_select_cursor_aux(START_OFFSETS, &[11], &[1]); assert_eq!(index_to_pos_helper(&index, 1..5, &[10]), vec![1]);
test_multivalue_select_cursor_aux(START_OFFSETS, &[12], &[2]); assert_eq!(index_to_pos_helper(&index, 1..5, &[11]), vec![1]);
} assert_eq!(index_to_pos_helper(&index, 2..5, &[12]), vec![2]);
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14]), vec![2]);
#[test] assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14, 15]), vec![2, 3]);
fn test_multivalue_select_cursor_duplicates() {
test_multivalue_select_cursor_aux(START_OFFSETS, &[12, 14], &[2]);
}
#[test]
fn test_multivalue_select_cursor_complex() {
test_multivalue_select_cursor_aux(START_OFFSETS, &[10, 11, 15, 20, 21, 22], &[1, 3, 4])
}
#[test]
fn test_multivalue_select_corner_case_skip_all() {
let column: Arc<dyn ColumnValues<RowId>> =
Arc::new(IterColumn::from([0, 10].into_iter()));
let index = MultiValueIndex {
start_index_column: column,
};
let mut cursor = index.select_cursor();
{
let mut ranks = vec![0];
cursor.select_batch_in_place(&mut ranks);
assert_eq!(ranks, &[0]);
}
{
let mut ranks = vec![5];
cursor.select_batch_in_place(&mut ranks);
assert_eq!(ranks, &[]);
}
}
#[test]
fn test_multi_value_index_cursor_bug() {
let column: Arc<dyn ColumnValues<RowId>> =
Arc::new(IterColumn::from([0, 10].into_iter()));
let index = MultiValueIndex {
start_index_column: column,
};
let mut cursor = index.select_cursor();
{
let mut ranks = vec![0];
cursor.select_batch_in_place(&mut ranks);
assert_eq!(ranks, &[0]);
}
{
let mut ranks = vec![4];
cursor.select_batch_in_place(&mut ranks);
assert_eq!(ranks, &[]);
}
{
let mut ranks = vec![9];
cursor.select_batch_in_place(&mut ranks);
assert_eq!(ranks, &[]);
}
}
#[test]
fn test_multivalue_select_cursor_skip_already_emitted() {
let column: Arc<dyn ColumnValues<RowId>> =
Arc::new(IterColumn::from(START_OFFSETS.iter().copied()));
let index = MultiValueIndex {
start_index_column: column,
};
let mut cursor = index.select_cursor();
{
let mut ranks = vec![1, 10];
cursor.select_batch_in_place(&mut ranks);
assert_eq!(ranks, &[0, 1]);
}
{
// Here we skip row_id = 1.
let mut ranks = vec![11, 12];
cursor.select_batch_in_place(&mut ranks);
assert_eq!(ranks, &[2]);
}
}
fn start_index_strategy() -> impl Strategy<Value = Vec<RowId>> {
proptest::collection::vec(0u32..3u32, 1..6)
.prop_map(|deltas: Vec<u32>| {
let mut start_offsets: Vec<RowId> = Vec::with_capacity(deltas.len() + 1);
let mut cumul = 0u32;
start_offsets.push(cumul);
for delta in deltas {
cumul += delta;
if cumul >= 10 {
break;
}
start_offsets.push(cumul);
}
start_offsets.push(10);
start_offsets
})
}
fn query_strategy() -> impl Strategy<Value = Vec<Vec<RowId>> > {
proptest::collection::btree_set(0u32..10u32, 1..=10)
.prop_flat_map(|els| {
let els: Vec<RowId> = els.into_iter().collect();
proptest::collection::btree_set(0..els.len(), 0..els.len())
.prop_map(move |mut split_positions| {
split_positions.insert(els.len());
let mut queries: Vec<Vec<RowId>> = Vec::with_capacity(split_positions.len() + 1);
let mut cursor = 0;
for split_position in split_positions {
queries.push(els[cursor..split_position].to_vec());
cursor = split_position;
}
queries
})
})
}
/// Simple inefficient implementation used for reference.
struct SimpleSelectCursor {
start_indexes: Vec<RowId>,
last_emitted_row_id: Option<RowId>,
}
impl SimpleSelectCursor {
fn select(&self, rank: u32) -> RowId {
for i in 0..self.start_indexes.len() - 1 {
if self.start_indexes[i] <= rank && self.start_indexes[i + 1] > rank{
return i as u32;
}
}
panic!();
}
fn select_batch_in_place(&mut self, ranks: &mut Vec<RowId>) {
if ranks.is_empty() {
return;
}
for rank in ranks.iter_mut() {
*rank = self.select(*rank);
}
ranks.dedup();
if ranks.first().copied() == self.last_emitted_row_id {
ranks.remove(0);
}
if let Some(last_emitted) = ranks.last().copied() {
self.last_emitted_row_id = Some(last_emitted);
}
}
}
proptest! {
#[test]
fn test_multi_value_index_cursor_proptest(start_indexes in start_index_strategy(), mut queries in query_strategy()) {
let mut simple_select_cursor = SimpleSelectCursor {
start_indexes: start_indexes.clone(),
last_emitted_row_id: None
};
let column: Arc<dyn ColumnValues<RowId>> =
Arc::new(IterColumn::from(start_indexes.into_iter()));
let index = MultiValueIndex { start_index_column: column };
let mut select_cursor = index.select_cursor();
for query in queries.iter_mut() {
let mut query_clone = query.clone();
select_cursor.select_batch_in_place(query);
simple_select_cursor.select_batch_in_place(&mut query_clone);
assert_eq!(&query[..], &query_clone[..]);
}
}
} }
} }

View File

@@ -88,6 +88,22 @@ pub struct OptionalIndex {
block_metas: Arc<[BlockMeta]>, block_metas: Arc<[BlockMeta]>,
} }
impl OptionalIndex {
pub fn num_rows(&self) -> RowId {
self.num_rows
}
pub fn num_non_nulls(&self) -> RowId {
self.num_non_null_rows
}
pub fn iter_rows<'a>(&'a self) -> impl Iterator<Item = RowId> + 'a {
// TODO optimize
let mut select_batch = self.select_cursor();
(0..self.num_non_null_rows).map(move |rank| select_batch.select(rank))
}
}
/// Splits a value address into lower and upper 16bits. /// Splits a value address into lower and upper 16bits.
/// The lower 16 bits are the value in the block /// The lower 16 bits are the value in the block
/// The upper 16 bits are the block index /// The upper 16 bits are the block index
@@ -118,17 +134,17 @@ impl<'a> BlockSelectCursor<'a> {
} }
} }
} }
pub struct OptionalIndexSelectCursor { pub struct OptionalIndexSelectCursor<'a> {
current_block_cursor: BlockSelectCursor<'static>, current_block_cursor: BlockSelectCursor<'a>,
current_block_id: u16, current_block_id: u16,
// The current block is guaranteed to contain ranks < end_rank. // The current block is guaranteed to contain ranks < end_rank.
current_block_end_rank: RowId, current_block_end_rank: RowId,
optional_index: OptionalIndex, optional_index: &'a OptionalIndex,
block_doc_idx_start: RowId, block_doc_idx_start: RowId,
num_null_rows_before_block: RowId, num_null_rows_before_block: RowId,
} }
impl OptionalIndexSelectCursor { impl<'a> OptionalIndexSelectCursor<'a> {
fn search_and_load_block(&mut self, rank: RowId) { fn search_and_load_block(&mut self, rank: RowId) {
if rank < self.current_block_end_rank { if rank < self.current_block_end_rank {
// we are already in the right block // we are already in the right block
@@ -145,23 +161,14 @@ impl OptionalIndexSelectCursor {
let block_meta = self.optional_index.block_metas[self.current_block_id as usize]; let block_meta = self.optional_index.block_metas[self.current_block_id as usize];
self.num_null_rows_before_block = block_meta.non_null_rows_before_block; self.num_null_rows_before_block = block_meta.non_null_rows_before_block;
let block: Block<'_> = self.optional_index.block(block_meta); let block: Block<'_> = self.optional_index.block(block_meta);
let current_block_cursor = match block { self.current_block_cursor = match block {
Block::Dense(dense_block) => BlockSelectCursor::Dense(dense_block.select_cursor()), Block::Dense(dense_block) => BlockSelectCursor::Dense(dense_block.select_cursor()),
Block::Sparse(sparse_block) => BlockSelectCursor::Sparse(sparse_block.select_cursor()), Block::Sparse(sparse_block) => BlockSelectCursor::Sparse(sparse_block.select_cursor()),
}; };
// We are building a self-owned `OptionalIndexSelectCursor`.
self.current_block_cursor = unsafe { std::mem::transmute(current_block_cursor) };
}
pub fn select_batch_in_place(&mut self, ranks: &mut [RowId]) {
// TODO see if we can batch at the block level as well for optimization purposes.
for rank in ranks {
*rank = self.select(*rank);
}
} }
} }
impl<'a> SelectCursor<RowId> for OptionalIndexSelectCursor { impl<'a> SelectCursor<RowId> for OptionalIndexSelectCursor<'a> {
fn select(&mut self, rank: RowId) -> RowId { fn select(&mut self, rank: RowId) -> RowId {
self.search_and_load_block(rank); self.search_and_load_block(rank);
let index_in_block = (rank - self.num_null_rows_before_block) as u16; let index_in_block = (rank - self.num_null_rows_before_block) as u16;
@@ -170,7 +177,7 @@ impl<'a> SelectCursor<RowId> for OptionalIndexSelectCursor {
} }
impl Set<RowId> for OptionalIndex { impl Set<RowId> for OptionalIndex {
type SelectCursor<'a> = OptionalIndexSelectCursor; type SelectCursor<'b> = OptionalIndexSelectCursor<'b> where Self: 'b;
// Check if value at position is not null. // Check if value at position is not null.
#[inline] #[inline]
fn contains(&self, row_id: RowId) -> bool { fn contains(&self, row_id: RowId) -> bool {
@@ -229,14 +236,14 @@ impl Set<RowId> for OptionalIndex {
block_doc_idx_start + in_block_rank as u32 block_doc_idx_start + in_block_rank as u32
} }
fn select_cursor(&self) -> OptionalIndexSelectCursor { fn select_cursor<'b>(&'b self) -> OptionalIndexSelectCursor<'b> {
OptionalIndexSelectCursor { OptionalIndexSelectCursor {
current_block_cursor: BlockSelectCursor::Sparse( current_block_cursor: BlockSelectCursor::Sparse(
SparseBlockCodec::open(b"").select_cursor(), SparseBlockCodec::open(b"").select_cursor(),
), ),
current_block_id: 0u16, current_block_id: 0u16,
current_block_end_rank: 0u32, //< this is sufficient to force the first load current_block_end_rank: 0u32, //< this is sufficient to force the first load
optional_index: self.clone(), optional_index: self,
block_doc_idx_start: 0u32, block_doc_idx_start: 0u32,
num_null_rows_before_block: 0u32, num_null_rows_before_block: 0u32,
} }
@@ -244,31 +251,6 @@ impl Set<RowId> for OptionalIndex {
} }
impl OptionalIndex { impl OptionalIndex {
pub fn for_test(num_rows: RowId, row_ids: &[RowId]) -> OptionalIndex {
assert!(row_ids
.last()
.copied()
.map(|last_row_id| last_row_id < num_rows)
.unwrap_or(true));
let mut buffer = Vec::new();
serialize_optional_index(&row_ids, num_rows, &mut buffer).unwrap();
let bytes = OwnedBytes::new(buffer);
open_optional_index(bytes).unwrap()
}
pub fn num_rows(&self) -> RowId {
self.num_rows
}
pub fn num_non_nulls(&self) -> RowId {
self.num_non_null_rows
}
pub fn iter_rows<'a>(&'a self) -> impl Iterator<Item = RowId> + 'a {
// TODO optimize
let mut select_batch = self.select_cursor();
(0..self.num_non_null_rows).map(move |rank| select_batch.select(rank))
}
pub fn select_batch(&self, ranks: &mut [RowId]) { pub fn select_batch(&self, ranks: &mut [RowId]) {
let mut select_cursor = self.select_cursor(); let mut select_cursor = self.select_cursor();
for rank in ranks.iter_mut() { for rank in ranks.iter_mut() {

View File

@@ -107,43 +107,59 @@ fn test_null_index(data: &[bool]) {
#[test] #[test]
fn test_optional_index_test_translation() { fn test_optional_index_test_translation() {
let optional_index = OptionalIndex::for_test(4, &[0, 2]); let mut out = vec![];
let mut select_cursor = optional_index.select_cursor(); let iter = &[true, false, true, false];
serialize_optional_index(&&iter[..], iter.len() as u32, &mut out).unwrap();
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
let mut select_cursor = null_index.select_cursor();
assert_eq!(select_cursor.select(0), 0); assert_eq!(select_cursor.select(0), 0);
assert_eq!(select_cursor.select(1), 2); assert_eq!(select_cursor.select(1), 2);
} }
#[test] #[test]
fn test_optional_index_translate() { fn test_optional_index_translate() {
let optional_index = OptionalIndex::for_test(4, &[0, 2]); let mut out = vec![];
assert_eq!(optional_index.rank_if_exists(0), Some(0)); let iter = &[true, false, true, false];
assert_eq!(optional_index.rank_if_exists(2), Some(1)); serialize_optional_index(&&iter[..], iter.len() as RowId, &mut out).unwrap();
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
assert_eq!(null_index.rank_if_exists(0), Some(0));
assert_eq!(null_index.rank_if_exists(2), Some(1));
} }
#[test] #[test]
fn test_optional_index_small() { fn test_optional_index_small() {
let optional_index = OptionalIndex::for_test(4, &[0, 2]); let mut out = vec![];
assert!(optional_index.contains(0)); let iter = &[true, false, true, false];
assert!(!optional_index.contains(1)); serialize_optional_index(&&iter[..], iter.len() as RowId, &mut out).unwrap();
assert!(optional_index.contains(2)); let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
assert!(!optional_index.contains(3)); assert!(null_index.contains(0));
assert!(!null_index.contains(1));
assert!(null_index.contains(2));
assert!(!null_index.contains(3));
} }
#[test] #[test]
fn test_optional_index_large() { fn test_optional_index_large() {
let row_ids = &[ELEMENTS_PER_BLOCK, ELEMENTS_PER_BLOCK + 1]; let mut docs = vec![];
let optional_index = OptionalIndex::for_test(ELEMENTS_PER_BLOCK + 2, row_ids); docs.extend((0..ELEMENTS_PER_BLOCK).map(|_idx| false));
assert!(!optional_index.contains(0)); docs.extend((0..=1).map(|_idx| true));
assert!(!optional_index.contains(100));
assert!(!optional_index.contains(ELEMENTS_PER_BLOCK - 1)); let mut out = vec![];
assert!(optional_index.contains(ELEMENTS_PER_BLOCK)); serialize_optional_index(&&docs[..], docs.len() as RowId, &mut out).unwrap();
assert!(optional_index.contains(ELEMENTS_PER_BLOCK + 1)); let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
assert!(!null_index.contains(0));
assert!(!null_index.contains(100));
assert!(!null_index.contains(ELEMENTS_PER_BLOCK - 1));
assert!(null_index.contains(ELEMENTS_PER_BLOCK));
assert!(null_index.contains(ELEMENTS_PER_BLOCK + 1));
} }
fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) { fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) {
let optional_index = OptionalIndex::for_test(num_rows, row_ids); let mut buffer: Vec<u8> = Vec::new();
assert_eq!(optional_index.num_rows(), num_rows); serialize_optional_index(&row_ids, num_rows, &mut buffer).unwrap();
assert!(optional_index.iter_rows().eq(row_ids.iter().copied())); let null_index = open_optional_index(OwnedBytes::new(buffer)).unwrap();
assert_eq!(null_index.num_rows(), num_rows);
assert!(null_index.iter_rows().eq(row_ids.iter().copied()));
} }
#[test] #[test]
@@ -152,8 +168,10 @@ fn test_optional_index_iter_empty() {
} }
fn test_optional_index_rank_aux(row_ids: &[RowId]) { fn test_optional_index_rank_aux(row_ids: &[RowId]) {
let mut buffer: Vec<u8> = Vec::new();
let num_rows = row_ids.last().copied().unwrap_or(0u32) + 1; let num_rows = row_ids.last().copied().unwrap_or(0u32) + 1;
let null_index = OptionalIndex::for_test(num_rows, row_ids); serialize_optional_index(&row_ids, num_rows, &mut buffer).unwrap();
let null_index = open_optional_index(OwnedBytes::new(buffer)).unwrap();
assert_eq!(null_index.num_rows(), num_rows); assert_eq!(null_index.num_rows(), num_rows);
for (row_id, row_val) in row_ids.iter().copied().enumerate() { for (row_id, row_val) in row_ids.iter().copied().enumerate() {
assert_eq!(null_index.rank(row_val), row_id as u32); assert_eq!(null_index.rank(row_val), row_id as u32);
@@ -189,16 +207,6 @@ fn test_optional_index_iter_dense_block() {
test_optional_index_iter_aux(&block, 3 * BLOCK_SIZE); test_optional_index_iter_aux(&block, 3 * BLOCK_SIZE);
} }
#[test]
fn test_optional_index_for_tests() {
let optional_index = OptionalIndex::for_test(4, &[1, 2]);
assert!(!optional_index.contains(0));
assert!(optional_index.contains(1));
assert!(optional_index.contains(2));
assert!(!optional_index.contains(3));
assert_eq!(optional_index.num_rows(), 4);
}
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {

View File

@@ -6,6 +6,7 @@ use std::sync::Arc;
use tantivy_bitpacker::minmax; use tantivy_bitpacker::minmax;
use crate::column_values::monotonic_mapping::StrictlyMonotonicFn; use crate::column_values::monotonic_mapping::StrictlyMonotonicFn;
use crate::iterable::Iterable;
/// `ColumnValues` provides access to a dense field column. /// `ColumnValues` provides access to a dense field column.
/// ///
@@ -79,6 +80,12 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
} }
} }
impl<'a, T: PartialOrd> Iterable<T> for &'a [Arc<dyn ColumnValues<T>>] {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
Box::new(self.iter().flat_map(|column_value| column_value.iter()))
}
}
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> { impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
#[inline(always)] #[inline(always)]
fn get_val(&self, idx: u32) -> T { fn get_val(&self, idx: u32) -> T {

View File

@@ -22,7 +22,6 @@ use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
use crate::column_values::compact_space::build_compact_space::get_compact_space; use crate::column_values::compact_space::build_compact_space::get_compact_space;
use crate::column_values::ColumnValues; use crate::column_values::ColumnValues;
use crate::RowId;
mod blank_range; mod blank_range;
mod build_compact_space; mod build_compact_space;
@@ -159,30 +158,23 @@ impl CompactSpace {
pub struct CompactSpaceCompressor { pub struct CompactSpaceCompressor {
params: IPCodecParams, params: IPCodecParams,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct IPCodecParams { pub struct IPCodecParams {
compact_space: CompactSpace, compact_space: CompactSpace,
bit_unpacker: BitUnpacker, bit_unpacker: BitUnpacker,
min_value: u128, min_value: u128,
max_value: u128, max_value: u128,
num_vals: RowId, num_vals: u32,
num_bits: u8, num_bits: u8,
} }
impl CompactSpaceCompressor { impl CompactSpaceCompressor {
pub fn num_vals(&self) -> RowId {
self.params.num_vals
}
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals. /// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
pub fn train_from(iter: impl Iterator<Item = u128>) -> Self { pub fn train_from(iter: impl Iterator<Item = u128>, num_vals: u32) -> Self {
let mut values_sorted = BTreeSet::new(); let mut values_sorted = BTreeSet::new();
let mut total_num_values = 0u32; values_sorted.extend(iter);
for val in iter { let total_num_values = num_vals;
total_num_values += 1u32;
values_sorted.insert(val);
}
let compact_space = let compact_space =
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS); get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
let amplitude_compact_space = compact_space.amplitude_compact_space(); let amplitude_compact_space = compact_space.amplitude_compact_space();
@@ -458,352 +450,364 @@ impl CompactSpaceDecompressor {
} }
} }
#[cfg(test)] // TODO reenable what can be reenabled.
mod tests { // #[cfg(test)]
// mod tests {
use itertools::Itertools; //
// use super::*;
use super::*; // use crate::column::format_version::read_format_version;
use crate::column_values::serialize::U128Header; // use crate::column::column_footer::read_null_index_footer;
use crate::column_values::{open_u128_mapped, serialize_column_values_u128}; // use crate::column::serialize::U128Header;
// use crate::column::{open_u128, serialize_u128};
#[test] //
fn compact_space_test() { // #[test]
let ips = &[ // fn compact_space_test() {
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260, // let ips = &[
] // 2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
.into_iter() // ]
.collect(); // .into_iter()
let compact_space = get_compact_space(ips, ips.len() as u32, 11); // .collect();
let amplitude = compact_space.amplitude_compact_space(); // let compact_space = get_compact_space(ips, ips.len() as u32, 11);
assert_eq!(amplitude, 17); // let amplitude = compact_space.amplitude_compact_space();
assert_eq!(1, compact_space.u128_to_compact(2).unwrap()); // assert_eq!(amplitude, 17);
assert_eq!(2, compact_space.u128_to_compact(3).unwrap()); // assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
assert_eq!(compact_space.u128_to_compact(100).unwrap_err(), 1); // assert_eq!(2, compact_space.u128_to_compact(3).unwrap());
// assert_eq!(compact_space.u128_to_compact(100).unwrap_err(), 1);
for (num1, num2) in (0..3).tuple_windows() { //
assert_eq!( // for (num1, num2) in (0..3).tuple_windows() {
compact_space.get_range_mapping(num1).compact_end() + 1, // assert_eq!(
compact_space.get_range_mapping(num2).compact_start // compact_space.get_range_mapping(num1).compact_end() + 1,
); // compact_space.get_range_mapping(num2).compact_start
} // );
// }
let mut output: Vec<u8> = Vec::new(); //
compact_space.serialize(&mut output).unwrap(); // let mut output: Vec<u8> = Vec::new();
// compact_space.serialize(&mut output).unwrap();
assert_eq!( //
compact_space, // assert_eq!(
CompactSpace::deserialize(&mut &output[..]).unwrap() // compact_space,
); // CompactSpace::deserialize(&mut &output[..]).unwrap()
// );
for ip in ips { //
let compact = compact_space.u128_to_compact(*ip).unwrap(); // for ip in ips {
assert_eq!(compact_space.compact_to_u128(compact), *ip); // let compact = compact_space.u128_to_compact(*ip).unwrap();
} // assert_eq!(compact_space.compact_to_u128(compact), *ip);
} // }
// }
#[test] //
fn compact_space_amplitude_test() { // #[test]
let ips = &[100000u128, 1000000].into_iter().collect(); // fn compact_space_amplitude_test() {
let compact_space = get_compact_space(ips, ips.len() as u32, 1); // let ips = &[100000u128, 1000000].into_iter().collect();
let amplitude = compact_space.amplitude_compact_space(); // let compact_space = get_compact_space(ips, ips.len() as u32, 1);
assert_eq!(amplitude, 2); // let amplitude = compact_space.amplitude_compact_space();
} // assert_eq!(amplitude, 2);
// }
fn test_all(mut data: OwnedBytes, expected: &[u128]) { //
let _header = U128Header::deserialize(&mut data); // fn test_all(mut data: OwnedBytes, expected: &[u128]) {
let decompressor = CompactSpaceDecompressor::open(data).unwrap(); // let _header = U128Header::deserialize(&mut data);
for (idx, expected_val) in expected.iter().cloned().enumerate() { // let decompressor = CompactSpaceDecompressor::open(data).unwrap();
let val = decompressor.get(idx as u32); // for (idx, expected_val) in expected.iter().cloned().enumerate() {
assert_eq!(val, expected_val); // let val = decompressor.get(idx as u32);
// assert_eq!(val, expected_val);
let test_range = |range: RangeInclusive<u128>| { //
let expected_positions = expected // let test_range = |range: RangeInclusive<u128>| {
.iter() // let expected_positions = expected
.positions(|val| range.contains(val)) // .iter()
.map(|pos| pos as u32) // .positions(|val| range.contains(val))
.collect::<Vec<_>>(); // .map(|pos| pos as u32)
let mut positions = Vec::new(); // .collect::<Vec<_>>();
decompressor.get_positions_for_value_range( // let mut positions = Vec::new();
range, // decompressor.get_positions_for_value_range(
0..decompressor.num_vals(), // range,
&mut positions, // 0..decompressor.num_vals(),
); // &mut positions,
assert_eq!(positions, expected_positions); // );
}; // assert_eq!(positions, expected_positions);
// };
test_range(expected_val.saturating_sub(1)..=expected_val); //
test_range(expected_val..=expected_val); // test_range(expected_val.saturating_sub(1)..=expected_val);
test_range(expected_val..=expected_val.saturating_add(1)); // test_range(expected_val..=expected_val);
test_range(expected_val.saturating_sub(1)..=expected_val.saturating_add(1)); // test_range(expected_val..=expected_val.saturating_add(1));
} // test_range(expected_val.saturating_sub(1)..=expected_val.saturating_add(1));
} // }
// }
fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes { //
let mut out = Vec::new(); // fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
serialize_column_values_u128(&u128_vals, &mut out).unwrap(); // let mut out = Vec::new();
let data = OwnedBytes::new(out); // serialize_u128(
test_all(data.clone(), u128_vals); // || u128_vals.iter().cloned(),
data // u128_vals.len() as u32,
} // &mut out,
// )
#[test] // .unwrap();
fn test_range_1() { //
let vals = &[ // let data = OwnedBytes::new(out);
1u128, // let (data, _format_version) = read_format_version(data).unwrap();
100u128, // let (data, _null_index_footer) = read_null_index_footer(data).unwrap();
3u128, // test_all(data.clone(), u128_vals);
99999u128, //
100000u128, // data
100001u128, // }
4_000_211_221u128, //
4_000_211_222u128, // #[test]
333u128, // fn test_range_1() {
]; // let vals = &[
let mut data = test_aux_vals(vals); // 1u128,
// 100u128,
let _header = U128Header::deserialize(&mut data); // 3u128,
let decomp = CompactSpaceDecompressor::open(data).unwrap(); // 99999u128,
let complete_range = 0..vals.len() as u32; // 100000u128,
for (pos, val) in vals.iter().enumerate() { // 100001u128,
let val = *val; // 4_000_211_221u128,
let pos = pos as u32; // 4_000_211_222u128,
let mut positions = Vec::new(); // 333u128,
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions); // ];
assert_eq!(positions, vec![pos]); // let mut data = test_aux_vals(vals);
} //
// let _header = U128Header::deserialize(&mut data);
// handle docid range out of bounds // let decomp = CompactSpaceDecompressor::open(data).unwrap();
let positions: Vec<u32> = get_positions_for_value_range_helper(&decomp, 0..=1, 1..u32::MAX); // let complete_range = 0..vals.len() as u32;
assert!(positions.is_empty()); // for (pos, val) in vals.iter().enumerate() {
// let val = *val;
let positions = // let pos = pos as u32;
get_positions_for_value_range_helper(&decomp, 0..=1, complete_range.clone()); // let mut positions = Vec::new();
assert_eq!(positions, vec![0]); // decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
let positions = // assert_eq!(positions, vec![pos]);
get_positions_for_value_range_helper(&decomp, 0..=2, complete_range.clone()); // }
assert_eq!(positions, vec![0]); //
let positions = // handle docid range out of bounds
get_positions_for_value_range_helper(&decomp, 0..=3, complete_range.clone()); // let positions: Vec<u32> = get_positions_for_value_range_helper(&decomp, 0..=1, 1..u32::MAX);
assert_eq!(positions, vec![0, 2]); // assert!(positions.is_empty());
assert_eq!( //
get_positions_for_value_range_helper( // let positions =
&decomp, // get_positions_for_value_range_helper(&decomp, 0..=1, complete_range.clone());
99999u128..=99999u128, // assert_eq!(positions, vec![0]);
complete_range.clone() // let positions =
), // get_positions_for_value_range_helper(&decomp, 0..=2, complete_range.clone());
vec![3] // assert_eq!(positions, vec![0]);
); // let positions =
assert_eq!( // get_positions_for_value_range_helper(&decomp, 0..=3, complete_range.clone());
get_positions_for_value_range_helper( // assert_eq!(positions, vec![0, 2]);
&decomp, // assert_eq!(
99999u128..=100000u128, // get_positions_for_value_range_helper(
complete_range.clone() // &decomp,
), // 99999u128..=99999u128,
vec![3, 4] // complete_range.clone()
); // ),
assert_eq!( // vec![3]
get_positions_for_value_range_helper( // );
&decomp, // assert_eq!(
99998u128..=100000u128, // get_positions_for_value_range_helper(
complete_range.clone() // &decomp,
), // 99999u128..=100000u128,
vec![3, 4] // complete_range.clone()
); // ),
assert_eq!( // vec![3, 4]
&get_positions_for_value_range_helper( // );
&decomp, // assert_eq!(
99998u128..=99999u128, // get_positions_for_value_range_helper(
complete_range.clone() // &decomp,
), // 99998u128..=100000u128,
&[3] // complete_range.clone()
); // ),
assert!(get_positions_for_value_range_helper( // vec![3, 4]
&decomp, // );
99998u128..=99998u128, // assert_eq!(
complete_range.clone() // &get_positions_for_value_range_helper(
) // &decomp,
.is_empty()); // 99998u128..=99999u128,
assert_eq!( // complete_range.clone()
&get_positions_for_value_range_helper( // ),
&decomp, // &[3]
333u128..=333u128, // );
complete_range.clone() // assert!(get_positions_for_value_range_helper(
), // &decomp,
&[8] // 99998u128..=99998u128,
); // complete_range.clone()
assert_eq!( // )
&get_positions_for_value_range_helper( // .is_empty());
&decomp, // assert_eq!(
332u128..=333u128, // &get_positions_for_value_range_helper(
complete_range.clone() // &decomp,
), // 333u128..=333u128,
&[8] // complete_range.clone()
); // ),
assert_eq!( // &[8]
&get_positions_for_value_range_helper( // );
&decomp, // assert_eq!(
332u128..=334u128, // &get_positions_for_value_range_helper(
complete_range.clone() // &decomp,
), // 332u128..=333u128,
&[8] // complete_range.clone()
); // ),
assert_eq!( // &[8]
&get_positions_for_value_range_helper( // );
&decomp, // assert_eq!(
333u128..=334u128, // &get_positions_for_value_range_helper(
complete_range.clone() // &decomp,
), // 332u128..=334u128,
&[8] // complete_range.clone()
); // ),
// &[8]
assert_eq!( // );
&get_positions_for_value_range_helper( // assert_eq!(
&decomp, // &get_positions_for_value_range_helper(
4_000_211_221u128..=5_000_000_000u128, // &decomp,
complete_range // 333u128..=334u128,
), // complete_range.clone()
&[6, 7] // ),
); // &[8]
} // );
//
#[test] // assert_eq!(
fn test_empty() { // &get_positions_for_value_range_helper(
let vals = &[]; // &decomp,
let data = test_aux_vals(vals); // 4_000_211_221u128..=5_000_000_000u128,
let _decomp = CompactSpaceDecompressor::open(data).unwrap(); // complete_range
} // ),
// &[6, 7]
#[test] // );
fn test_range_2() { // }
let vals = &[ //
100u128, // #[test]
99999u128, // fn test_empty() {
100000u128, // let vals = &[];
100001u128, // let data = test_aux_vals(vals);
4_000_211_221u128, // let _decomp = CompactSpaceDecompressor::open(data).unwrap();
4_000_211_222u128, // }
333u128, //
]; // #[test]
let mut data = test_aux_vals(vals); // fn test_range_2() {
let _header = U128Header::deserialize(&mut data); // let vals = &[
let decomp = CompactSpaceDecompressor::open(data).unwrap(); // 100u128,
let complete_range = 0..vals.len() as u32; // 99999u128,
assert!( // 100000u128,
&get_positions_for_value_range_helper(&decomp, 0..=5, complete_range.clone()) // 100001u128,
.is_empty(), // 4_000_211_221u128,
); // 4_000_211_222u128,
assert_eq!( // 333u128,
&get_positions_for_value_range_helper(&decomp, 0..=100, complete_range.clone()), // ];
&[0] // let mut data = test_aux_vals(vals);
); // let _header = U128Header::deserialize(&mut data);
assert_eq!( // let decomp = CompactSpaceDecompressor::open(data).unwrap();
&get_positions_for_value_range_helper(&decomp, 0..=105, complete_range), // let complete_range = 0..vals.len() as u32;
&[0] // assert!(
); // &get_positions_for_value_range_helper(&decomp, 0..=5, complete_range.clone())
} // .is_empty(),
// );
fn get_positions_for_value_range_helper<C: ColumnValues<T> + ?Sized, T: PartialOrd>( // assert_eq!(
column: &C, // &get_positions_for_value_range_helper(&decomp, 0..=100, complete_range.clone()),
value_range: RangeInclusive<T>, // &[0]
doc_id_range: Range<u32>, // );
) -> Vec<u32> { // assert_eq!(
let mut positions = Vec::new(); // &get_positions_for_value_range_helper(&decomp, 0..=105, complete_range),
column.get_docids_for_value_range(value_range, doc_id_range, &mut positions); // &[0]
positions // );
} // }
//
#[test] // fn get_positions_for_value_range_helper<C: Column<T> + ?Sized, T: PartialOrd>(
fn test_range_3() { // column: &C,
let vals = &[ // value_range: RangeInclusive<T>,
200u128, // doc_id_range: Range<u32>,
201, // ) -> Vec<u32> {
202, // let mut positions = Vec::new();
203, // column.get_docids_for_value_range(value_range, doc_id_range, &mut positions);
204, // positions
204, // }
206, //
207, // #[test]
208, // fn test_range_3() {
209, // let vals = &[
210, // 200u128,
1_000_000, // 201,
5_000_000_000, // 202,
]; // 203,
let mut out = Vec::new(); // 204,
serialize_column_values_u128(&&vals[..], &mut out).unwrap(); // 204,
let decomp = open_u128_mapped(OwnedBytes::new(out)).unwrap(); // 206,
let complete_range = 0..vals.len() as u32; // 207,
// 208,
assert_eq!( // 209,
get_positions_for_value_range_helper(&*decomp, 199..=200, complete_range.clone()), // 210,
vec![0] // 1_000_000,
); // 5_000_000_000,
// ];
assert_eq!( // let mut out = Vec::new();
get_positions_for_value_range_helper(&*decomp, 199..=201, complete_range.clone()), // serialize_u128(|| vals.iter().cloned(), vals.len() as u32, &mut out).unwrap();
vec![0, 1] // let decomp = open_u128::<u128>(OwnedBytes::new(out)).unwrap();
); // let complete_range = 0..vals.len() as u32;
//
assert_eq!( // assert_eq!(
get_positions_for_value_range_helper(&*decomp, 200..=200, complete_range.clone()), // get_positions_for_value_range_helper(&*decomp, 199..=200, complete_range.clone()),
vec![0] // vec![0]
); // );
//
assert_eq!( // assert_eq!(
get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range), // get_positions_for_value_range_helper(&*decomp, 199..=201, complete_range.clone()),
vec![11] // vec![0, 1]
); // );
} //
// assert_eq!(
#[test] // get_positions_for_value_range_helper(&*decomp, 200..=200, complete_range.clone()),
fn test_bug1() { // vec![0]
let vals = &[9223372036854775806]; // );
let _data = test_aux_vals(vals); //
} // assert_eq!(
// get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range),
#[test] // vec![11]
fn test_bug2() { // );
let vals = &[340282366920938463463374607431768211455u128]; // }
let _data = test_aux_vals(vals); //
} // #[test]
// fn test_bug1() {
#[test] // let vals = &[9223372036854775806];
fn test_bug3() { // let _data = test_aux_vals(vals);
let vals = &[340282366920938463463374607431768211454]; // }
let _data = test_aux_vals(vals); //
} // #[test]
// fn test_bug2() {
#[test] // let vals = &[340282366920938463463374607431768211455u128];
fn test_bug4() { // let _data = test_aux_vals(vals);
let vals = &[340282366920938463463374607431768211455, 0]; // }
let _data = test_aux_vals(vals); //
} // #[test]
// fn test_bug3() {
#[test] // let vals = &[340282366920938463463374607431768211454];
fn test_first_large_gaps() { // let _data = test_aux_vals(vals);
let vals = &[1_000_000_000u128; 100]; // }
let _data = test_aux_vals(vals); //
} // #[test]
// fn test_bug4() {
use proptest::prelude::*; // let vals = &[340282366920938463463374607431768211455, 0];
// let _data = test_aux_vals(vals);
fn num_strategy() -> impl Strategy<Value = u128> { // }
prop_oneof![ //
1 => prop::num::u128::ANY.prop_map(|num| u128::MAX - (num % 10) ), // #[test]
1 => prop::num::u128::ANY.prop_map(|num| i64::MAX as u128 + 5 - (num % 10) ), // fn test_first_large_gaps() {
1 => prop::num::u128::ANY.prop_map(|num| i128::MAX as u128 + 5 - (num % 10) ), // let vals = &[1_000_000_000u128; 100];
1 => prop::num::u128::ANY.prop_map(|num| num % 10 ), // let _data = test_aux_vals(vals);
20 => prop::num::u128::ANY, // }
] // use itertools::Itertools;
} // use proptest::prelude::*;
//
proptest! { // fn num_strategy() -> impl Strategy<Value = u128> {
#![proptest_config(ProptestConfig::with_cases(10))] // prop_oneof![
// 1 => prop::num::u128::ANY.prop_map(|num| u128::MAX - (num % 10) ),
#[test] // 1 => prop::num::u128::ANY.prop_map(|num| i64::MAX as u128 + 5 - (num % 10) ),
fn compress_decompress_random(vals in proptest::collection::vec(num_strategy() , 1..1000)) { // 1 => prop::num::u128::ANY.prop_map(|num| i128::MAX as u128 + 5 - (num % 10) ),
let _data = test_aux_vals(&vals); // 1 => prop::num::u128::ANY.prop_map(|num| num % 10 ),
} // 20 => prop::num::u128::ANY,
} // ]
} // }
//
// proptest! {
// #![proptest_config(ProptestConfig::with_cases(10))]
//
// #[test]
// fn compress_decompress_random(vals in proptest::collection::vec(num_strategy()
// , 1..1000)) {
// let _data = test_aux_vals(&vals);
// }
// }
// }
//

View File

@@ -0,0 +1,222 @@
#[macro_use]
extern crate prettytable;
use std::collections::HashSet;
use std::env;
use std::io::BufRead;
use std::net::{IpAddr, Ipv6Addr};
use std::str::FromStr;
use common::OwnedBytes;
use fastfield_codecs::{open_u128, serialize_u128, Column, FastFieldCodecType, VecColumn};
use itertools::Itertools;
use measure_time::print_time;
use prettytable::{Cell, Row, Table};
fn print_set_stats(ip_addrs: &[u128]) {
println!("NumIps\t{}", ip_addrs.len());
let ip_addr_set: HashSet<u128> = ip_addrs.iter().cloned().collect();
println!("NumUniqueIps\t{}", ip_addr_set.len());
let ratio_unique = ip_addr_set.len() as f64 / ip_addrs.len() as f64;
println!("RatioUniqueOverTotal\t{ratio_unique:.4}");
// histogram
let mut ip_addrs = ip_addrs.to_vec();
ip_addrs.sort();
let mut cnts: Vec<usize> = ip_addrs
.into_iter()
.dedup_with_count()
.map(|(cnt, _)| cnt)
.collect();
cnts.sort();
let top_256_cnt: usize = cnts.iter().rev().take(256).sum();
let top_128_cnt: usize = cnts.iter().rev().take(128).sum();
let top_64_cnt: usize = cnts.iter().rev().take(64).sum();
let top_8_cnt: usize = cnts.iter().rev().take(8).sum();
let total: usize = cnts.iter().sum();
println!("{}", total);
println!("{}", top_256_cnt);
println!("{}", top_128_cnt);
println!("Percentage Top8 {:02}", top_8_cnt as f32 / total as f32);
println!("Percentage Top64 {:02}", top_64_cnt as f32 / total as f32);
println!("Percentage Top128 {:02}", top_128_cnt as f32 / total as f32);
println!("Percentage Top256 {:02}", top_256_cnt as f32 / total as f32);
let mut cnts: Vec<(usize, usize)> = cnts.into_iter().dedup_with_count().collect();
cnts.sort_by(|a, b| {
if a.1 == b.1 {
a.0.cmp(&b.0)
} else {
b.1.cmp(&a.1)
}
});
}
fn ip_dataset() -> Vec<u128> {
let mut ip_addr_v4 = 0;
let stdin = std::io::stdin();
let ip_addrs: Vec<u128> = stdin
.lock()
.lines()
.flat_map(|line| {
let line = line.unwrap();
let line = line.trim();
let ip_addr = IpAddr::from_str(line.trim()).ok()?;
if ip_addr.is_ipv4() {
ip_addr_v4 += 1;
}
let ip_addr_v6: Ipv6Addr = match ip_addr {
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
IpAddr::V6(v6) => v6,
};
Some(ip_addr_v6)
})
.map(|ip_v6| u128::from_be_bytes(ip_v6.octets()))
.collect();
println!("IpAddrsAny\t{}", ip_addrs.len());
println!("IpAddrsV4\t{}", ip_addr_v4);
ip_addrs
}
fn bench_ip() {
let dataset = ip_dataset();
print_set_stats(&dataset);
// Chunks
{
let mut data = vec![];
for dataset in dataset.chunks(500_000) {
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
}
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
println!("Compression 50_000 chunks {:.4}", compression);
println!(
"Num Bits per elem {:.2}",
(data.len() * 8) as f32 / dataset.len() as f32
);
}
let mut data = vec![];
{
print_time!("creation");
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
}
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
println!("Compression {:.2}", compression);
println!(
"Num Bits per elem {:.2}",
(data.len() * 8) as f32 / dataset.len() as f32
);
let decompressor = open_u128::<u128>(OwnedBytes::new(data)).unwrap();
// Sample some ranges
let mut doc_values = Vec::new();
for value in dataset.iter().take(1110).skip(1100).cloned() {
doc_values.clear();
print_time!("get range");
decompressor.get_docids_for_value_range(
value..=value,
0..decompressor.num_vals(),
&mut doc_values,
);
println!("{:?}", doc_values.len());
}
}
fn main() {
if env::args().nth(1).unwrap() == "bench_ip" {
bench_ip();
return;
}
let mut table = Table::new();
// Add a row per time
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
for (data, data_set_name) in get_codec_test_data_sets() {
let results: Vec<(f32, f32, FastFieldCodecType)> = [
serialize_with_codec(&data, FastFieldCodecType::Bitpacked),
serialize_with_codec(&data, FastFieldCodecType::Linear),
serialize_with_codec(&data, FastFieldCodecType::BlockwiseLinear),
]
.into_iter()
.flatten()
.collect();
let best_compression_ratio_codec = results
.iter()
.min_by(|&res1, &res2| res1.partial_cmp(res2).unwrap())
.cloned()
.unwrap();
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
for (est, comp, codec_type) in results {
let est_cell = est.to_string();
let ratio_cell = comp.to_string();
let style = if comp == best_compression_ratio_codec.1 {
"Fb"
} else {
""
};
table.add_row(Row::new(vec![
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
Cell::new(&ratio_cell).style_spec(style),
Cell::new(&est_cell).style_spec(""),
]));
}
}
table.printstd();
}
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
let mut data_and_names = vec![];
let data = (1000..=200_000_u64).collect::<Vec<_>>();
data_and_names.push((data, "Autoincrement"));
let mut current_cumulative = 0;
let data = (1..=200_000_u64)
.map(|num| {
let num = (num as f32 + num as f32).log10() as u64;
current_cumulative += num;
current_cumulative
})
.collect::<Vec<_>>();
// let data = (1..=200000_u64).map(|num| num + num).collect::<Vec<_>>();
data_and_names.push((data, "Monotonically increasing concave"));
let mut current_cumulative = 0;
let data = (1..=200_000_u64)
.map(|num| {
let num = (200_000.0 - num as f32).log10() as u64;
current_cumulative += num;
current_cumulative
})
.collect::<Vec<_>>();
data_and_names.push((data, "Monotonically increasing convex"));
let data = (1000..=200_000_u64)
.map(|num| num + rand::random::<u8>() as u64)
.collect::<Vec<_>>();
data_and_names.push((data, "Almost monotonically increasing"));
data_and_names
}
pub fn serialize_with_codec(
data: &[u64],
codec_type: FastFieldCodecType,
) -> Option<(f32, f32, FastFieldCodecType)> {
let col = VecColumn::from(data);
let estimation = fastfield_codecs::estimate(&col, codec_type)?;
let mut out = Vec::new();
fastfield_codecs::serialize(&col, &mut out, &[codec_type]).ok()?;
let actual_compression = out.len() as f32 / (col.num_vals() * 8) as f32;
Some((estimation, actual_compression, codec_type))
}

View File

@@ -36,48 +36,6 @@ pub use u64_based::{
}; };
pub use self::column::{monotonic_map_column, ColumnValues, IterColumn, VecColumn}; pub use self::column::{monotonic_map_column, ColumnValues, IterColumn, VecColumn};
use crate::iterable::Iterable;
use crate::{ColumnIndex, MergeRowOrder};
pub(crate) struct MergedColumnValues<'a, T> {
pub(crate) column_indexes: &'a [Option<ColumnIndex>],
pub(crate) column_values: &'a [Option<Arc<dyn ColumnValues<T>>>],
pub(crate) merge_row_order: &'a MergeRowOrder,
}
impl<'a, T: Copy + PartialOrd + Debug> Iterable<T> for MergedColumnValues<'a, T> {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
match self.merge_row_order {
MergeRowOrder::Stack(_) => {
Box::new(self
.column_values
.iter()
.flatten()
.flat_map(|column_value| column_value.iter()))
},
MergeRowOrder::Shuffled(shuffle_merge_order) => {
Box::new(shuffle_merge_order
.iter_new_to_old_row_addrs()
.flat_map(|row_addr| {
let Some(column_index) = self.column_indexes[row_addr.segment_ord as usize].as_ref() else {
return None;
};
let Some(column_values) = self.column_values[row_addr.segment_ord as usize].as_ref() else {
return None;
};
let value_range = column_index.value_row_ids(row_addr.row_id);
Some((value_range, column_values))
})
.flat_map(|(value_range, column_values)| {
value_range
.into_iter()
.map(|val| column_values.get_val(val))
})
)
},
}
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
#[repr(u8)] #[repr(u8)]

View File

@@ -47,18 +47,20 @@ impl BinarySerializable for U128Header {
/// Serializes u128 values with the compact space codec. /// Serializes u128 values with the compact space codec.
pub fn serialize_column_values_u128<T: MonotonicallyMappableToU128>( pub fn serialize_column_values_u128<T: MonotonicallyMappableToU128>(
iterable: &dyn Iterable<T>, iterable: &dyn Iterable<T>,
num_vals: u32,
output: &mut impl io::Write, output: &mut impl io::Write,
) -> io::Result<()> { ) -> io::Result<()> {
let header = U128Header {
num_vals,
codec_type: U128FastFieldCodecType::CompactSpace,
};
header.serialize(output)?;
let compressor = CompactSpaceCompressor::train_from( let compressor = CompactSpaceCompressor::train_from(
iterable iterable
.boxed_iter() .boxed_iter()
.map(MonotonicallyMappableToU128::to_u128), .map(MonotonicallyMappableToU128::to_u128),
num_vals,
); );
let header = U128Header {
num_vals: compressor.num_vals(),
codec_type: U128FastFieldCodecType::CompactSpace,
};
header.serialize(output)?;
compressor.compress_into( compressor.compress_into(
iterable iterable
.boxed_iter() .boxed_iter()
@@ -72,7 +74,7 @@ pub fn serialize_column_values_u128<T: MonotonicallyMappableToU128>(
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::column_values::u64_based::{ use crate::column_values::u64_based::{
serialize_and_load_u64_based_column_values, serialize_u64_based_column_values, self, serialize_and_load_u64_based_column_values, serialize_u64_based_column_values,
ALL_U64_CODEC_TYPES, ALL_U64_CODEC_TYPES,
}; };
use crate::column_values::CodecType; use crate::column_values::CodecType;

View File

@@ -67,6 +67,19 @@ impl Line {
self.intercept.wrapping_add(linear_part) self.intercept.wrapping_add(linear_part)
} }
// Same as train, but the intercept is only estimated from provided sample positions
pub fn estimate(sample_positions_and_values: &[(u64, u64)]) -> Self {
let first_val = sample_positions_and_values[0].1;
let last_val = sample_positions_and_values[sample_positions_and_values.len() - 1].1;
let num_vals = sample_positions_and_values[sample_positions_and_values.len() - 1].0 + 1;
Self::train_from(
first_val,
last_val,
num_vals as u32,
sample_positions_and_values.iter().cloned(),
)
}
// Intercept is only computed from provided positions // Intercept is only computed from provided positions
pub fn train_from( pub fn train_from(
first_val: u64, first_val: u64,

View File

@@ -7,7 +7,7 @@ use super::line::Line;
use super::ColumnValues; use super::ColumnValues;
use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats}; use crate::column_values::u64_based::{ColumnCodec, ColumnCodecEstimator, Stats};
use crate::column_values::VecColumn; use crate::column_values::VecColumn;
use crate::RowId; use crate::{MonotonicallyMappableToU64, RowId};
const HALF_SPACE: u64 = u64::MAX / 2; const HALF_SPACE: u64 = u64::MAX / 2;
const LINE_ESTIMATION_BLOCK_LEN: usize = 512; const LINE_ESTIMATION_BLOCK_LEN: usize = 512;

View File

@@ -10,7 +10,6 @@ use std::sync::Arc;
use common::{BinarySerializable, OwnedBytes}; use common::{BinarySerializable, OwnedBytes};
use crate::column_index::MultiValueIndex;
use crate::column_values::monotonic_mapping::{ use crate::column_values::monotonic_mapping::{
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal, StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
}; };

View File

@@ -4,22 +4,24 @@ use std::net::Ipv6Addr;
use crate::value::NumericalType; use crate::value::NumericalType;
use crate::InvalidData; use crate::InvalidData;
/// The column type represents the column type. /// The column type represents the column type and can fit on 6-bits.
/// Any changes need to be propagated to `COLUMN_TYPES`. ///
/// - bits[0..3]: Column category type.
/// - bits[3..6]: Numerical type if necessary.
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy, Ord, PartialOrd)] #[derive(Hash, Eq, PartialEq, Debug, Clone, Copy, Ord, PartialOrd)]
#[repr(u8)] #[repr(u8)]
pub enum ColumnType { pub enum ColumnType {
I64 = 0u8, I64 = 0u8,
U64 = 1u8, U64 = 1u8,
F64 = 2u8, F64 = 2u8,
Bytes = 3u8, Bytes = 10u8,
Str = 4u8, Str = 14u8,
Bool = 5u8, Bool = 18u8,
IpAddr = 6u8, IpAddr = 22u8,
DateTime = 7u8, DateTime = 26u8,
} }
// The order needs to match _exactly_ the order in the enum #[cfg(test)]
const COLUMN_TYPES: [ColumnType; 8] = [ const COLUMN_TYPES: [ColumnType; 8] = [
ColumnType::I64, ColumnType::I64,
ColumnType::U64, ColumnType::U64,
@@ -37,7 +39,18 @@ impl ColumnType {
} }
pub(crate) fn try_from_code(code: u8) -> Result<ColumnType, InvalidData> { pub(crate) fn try_from_code(code: u8) -> Result<ColumnType, InvalidData> {
COLUMN_TYPES.get(code as usize).copied().ok_or(InvalidData) use ColumnType::*;
match code {
0u8 => Ok(I64),
1u8 => Ok(U64),
2u8 => Ok(F64),
10u8 => Ok(Bytes),
14u8 => Ok(Str),
18u8 => Ok(Bool),
22u8 => Ok(IpAddr),
26u8 => Ok(Self::DateTime),
_ => Err(InvalidData),
}
} }
} }
@@ -130,20 +143,70 @@ impl HasAssociatedColumnType for Ipv6Addr {
} }
} }
/// Column types are grouped into different categories that
/// corresponds to the different types of `JsonValue` types.
///
/// The columnar writer will apply coercion rules to make sure that
/// at most one column exist per `ColumnTypeCategory`.
///
/// See also [README.md].
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]
#[repr(u8)]
pub enum ColumnTypeCategory {
Bool,
Str,
Numerical,
DateTime,
Bytes,
IpAddr,
}
impl From<ColumnType> for ColumnTypeCategory {
fn from(column_type: ColumnType) -> Self {
match column_type {
ColumnType::I64 => ColumnTypeCategory::Numerical,
ColumnType::U64 => ColumnTypeCategory::Numerical,
ColumnType::F64 => ColumnTypeCategory::Numerical,
ColumnType::Bytes => ColumnTypeCategory::Bytes,
ColumnType::Str => ColumnTypeCategory::Str,
ColumnType::Bool => ColumnTypeCategory::Bool,
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
ColumnType::DateTime => ColumnTypeCategory::DateTime,
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::collections::HashSet;
use super::*; use super::*;
use crate::Cardinality; use crate::Cardinality;
#[test] #[test]
fn test_column_type_to_code() { fn test_column_type_to_code() {
for (code, expected_column_type) in super::COLUMN_TYPES.iter().copied().enumerate() { let mut column_type_set: HashSet<ColumnType> = HashSet::new();
if let Ok(column_type) = ColumnType::try_from_code(code as u8) { for code in u8::MIN..=u8::MAX {
assert_eq!(column_type, expected_column_type); if let Ok(column_type) = ColumnType::try_from_code(code) {
assert_eq!(column_type.to_code(), code);
assert!(column_type_set.insert(column_type));
} }
} }
for code in COLUMN_TYPES.len() as u8..=u8::MAX { assert_eq!(column_type_set.len(), super::COLUMN_TYPES.len());
assert!(ColumnType::try_from_code(code as u8).is_err()); }
#[test]
fn test_column_category_sort_consistent_with_column_type_sort() {
// This is a very important property because we
// we need to serialize colunmn in the right order.
let mut column_types: Vec<ColumnType> = super::COLUMN_TYPES.iter().copied().collect();
column_types.sort_by_key(|col| col.to_code());
let column_categories: Vec<ColumnTypeCategory> = column_types
.into_iter()
.map(ColumnTypeCategory::from)
.collect();
for (prev, next) in column_categories.iter().zip(column_categories.iter()) {
assert!(prev <= next);
} }
} }

View File

@@ -1,130 +1,68 @@
use std::io::{self, Write}; use std::io::{self, Write};
use common::{BitSet, CountingWriter, ReadOnlyBitSet}; use common::CountingWriter;
use sstable::{SSTable, TermOrdinal}; use sstable::{SSTable, TermOrdinal};
use super::term_merger::TermMerger; use super::term_merger::TermMerger;
use crate::column::serialize_column_mappable_to_u64; use crate::column::serialize_column_mappable_to_u64;
use crate::column_index::SerializableColumnIndex; use crate::column_index::SerializableColumnIndex;
use crate::iterable::Iterable; use crate::iterable::Iterable;
use crate::{BytesColumn, MergeRowOrder, ShuffleMergeOrder}; use crate::BytesColumn;
// Serialize [Dictionary, Column, dictionary num bytes U32::LE] // Serialize [Dictionary, Column, dictionary num bytes U32::LE]
// Column: [Column Index, Column Values, column index num bytes U32::LE] // Column: [Column Index, Column Values, column index num bytes U32::LE]
pub fn merge_bytes_or_str_column( pub fn merge_bytes_or_str_column(
column_index: SerializableColumnIndex<'_>, column_index: SerializableColumnIndex<'_>,
bytes_columns: &[Option<BytesColumn>], bytes_columns: &[BytesColumn],
merge_row_order: &MergeRowOrder,
output: &mut impl Write, output: &mut impl Write,
) -> io::Result<()> { ) -> io::Result<()> {
// Serialize dict and generate mapping for values // Serialize dict and generate mapping for values
let mut output = CountingWriter::wrap(output); let mut output = CountingWriter::wrap(output);
// TODO !!! Remove useless terms. let term_ord_mapping = serialize_merged_dict(bytes_columns, &mut output)?;
let term_ord_mapping = serialize_merged_dict(bytes_columns, merge_row_order, &mut output)?;
let dictionary_num_bytes: u32 = output.written_bytes() as u32; let dictionary_num_bytes: u32 = output.written_bytes() as u32;
let output = output.finish(); let output = output.finish();
let remapped_term_ordinals_values = RemappedTermOrdinalsValues { let remapped_term_ordinals_values = RemappedTermOrdinalsValues {
bytes_columns, bytes_columns,
term_ord_mapping: &term_ord_mapping, term_ord_mapping: &term_ord_mapping,
merge_row_order,
}; };
serialize_column_mappable_to_u64(column_index, &remapped_term_ordinals_values, output)?; serialize_column_mappable_to_u64(column_index, &remapped_term_ordinals_values, output)?;
// serialize_bytes_or_str_column(column_index, bytes_columns, &term_ord_mapping, output)?;
output.write_all(&dictionary_num_bytes.to_le_bytes())?; output.write_all(&dictionary_num_bytes.to_le_bytes())?;
Ok(()) Ok(())
} }
struct RemappedTermOrdinalsValues<'a> { struct RemappedTermOrdinalsValues<'a> {
bytes_columns: &'a [Option<BytesColumn>], bytes_columns: &'a [BytesColumn],
term_ord_mapping: &'a TermOrdinalMapping, term_ord_mapping: &'a TermOrdinalMapping,
merge_row_order: &'a MergeRowOrder,
} }
impl<'a> Iterable for RemappedTermOrdinalsValues<'a> { impl<'a> Iterable for RemappedTermOrdinalsValues<'a> {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u64> + '_> { fn boxed_iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
match self.merge_row_order {
MergeRowOrder::Stack(_) => self.boxed_iter_stacked(),
MergeRowOrder::Shuffled(shuffle_merge_order) => {
self.boxed_iter_shuffled(shuffle_merge_order)
}
}
}
}
impl<'a> RemappedTermOrdinalsValues<'a> {
fn boxed_iter_stacked(&self) -> Box<dyn Iterator<Item = u64> + '_> {
let iter = self let iter = self
.bytes_columns .bytes_columns
.iter() .iter()
.enumerate() .enumerate()
.flat_map(|(segment_ord, byte_column)| { .flat_map(|(segment_ord, byte_column)| {
let segment_ord = self.term_ord_mapping.get_segment(segment_ord as u32); let segment_ord = self.term_ord_mapping.get_segment(segment_ord);
byte_column.into_iter().flat_map(move |bytes_column| { byte_column
bytes_column .ords()
.ords() .values
.values .iter()
.iter() .map(move |term_ord| segment_ord[term_ord as usize])
.map(move |term_ord| segment_ord[term_ord as usize])
})
}); });
// TODO see if we can better decompose the mapping / and the stacking // TODO see if we can better decompose the mapping / and the stacking
Box::new(iter) Box::new(iter)
} }
fn boxed_iter_shuffled<'b>(
&'b self,
shuffle_merge_order: &'b ShuffleMergeOrder,
) -> Box<dyn Iterator<Item = u64> + 'b> {
Box::new(
shuffle_merge_order
.iter_new_to_old_row_addrs()
.flat_map(move |old_addr| {
let segment_ord = self.term_ord_mapping.get_segment(old_addr.segment_ord);
self.bytes_columns[old_addr.segment_ord as usize]
.as_ref()
.into_iter()
.flat_map(move |bytes_column| {
bytes_column
.term_ords(old_addr.row_id)
.map(|old_term_ord: u64| segment_ord[old_term_ord as usize])
})
}),
)
}
}
fn compute_term_bitset(column: &BytesColumn, row_bitset: &ReadOnlyBitSet) -> BitSet {
let num_terms = column.dictionary().num_terms();
let mut term_bitset = BitSet::with_max_value(num_terms as u32);
for row_id in row_bitset.iter() {
for term_ord in column.term_ord_column.values(row_id) {
term_bitset.insert(term_ord as u32);
}
}
term_bitset
}
fn is_term_present(bitsets: &[Option<BitSet>], term_merger: &TermMerger) -> bool {
for (segment_ord, from_term_ord) in term_merger.matching_segments() {
if let Some(bitset) = bitsets[segment_ord].as_ref() {
if bitset.contains(from_term_ord as u32) {
return true;
}
} else {
return true;
}
}
false
} }
fn serialize_merged_dict( fn serialize_merged_dict(
bytes_columns: &[Option<BytesColumn>], bytes_columns: &[BytesColumn],
merge_row_order: &MergeRowOrder,
output: &mut impl Write, output: &mut impl Write,
) -> io::Result<TermOrdinalMapping> { ) -> io::Result<TermOrdinalMapping> {
let mut term_ord_mapping = TermOrdinalMapping::default(); let mut term_ord_mapping = TermOrdinalMapping::default();
let mut field_term_streams = Vec::new(); let mut field_term_streams = Vec::new();
for column in bytes_columns.iter().flatten() { for column in bytes_columns {
term_ord_mapping.add_segment(column.dictionary.num_terms()); term_ord_mapping.add_segment(column.dictionary.num_terms());
let terms = column.dictionary.stream()?; let terms = column.dictionary.stream()?;
field_term_streams.push(terms); field_term_streams.push(terms);
@@ -133,57 +71,21 @@ fn serialize_merged_dict(
let mut merged_terms = TermMerger::new(field_term_streams); let mut merged_terms = TermMerger::new(field_term_streams);
let mut sstable_builder = sstable::VoidSSTable::writer(output); let mut sstable_builder = sstable::VoidSSTable::writer(output);
// TODO support complex `merge_row_order`. let mut current_term_ord = 0;
match merge_row_order { while merged_terms.advance() {
MergeRowOrder::Stack(_) => { let term_bytes: &[u8] = merged_terms.key();
let mut current_term_ord = 0;
while merged_terms.advance() { sstable_builder.insert(term_bytes, &())?;
let term_bytes: &[u8] = merged_terms.key(); for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
sstable_builder.insert(term_bytes, &())?; term_ord_mapping.register_from_to(segment_ord, from_term_ord, current_term_ord);
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
term_ord_mapping.register_from_to(segment_ord, from_term_ord, current_term_ord);
}
current_term_ord += 1;
}
sstable_builder.finish()?;
}
MergeRowOrder::Shuffled(shuffle_merge_order) => {
assert_eq!(shuffle_merge_order.alive_bitsets.len(), bytes_columns.len());
let mut term_bitsets: Vec<Option<BitSet>> = Vec::with_capacity(bytes_columns.len());
for (alive_bitset_opt, bytes_column_opt) in shuffle_merge_order
.alive_bitsets
.iter()
.zip(bytes_columns.iter())
{
match (alive_bitset_opt, bytes_column_opt) {
(Some(alive_bitset), Some(bytes_column)) => {
let term_bitset = compute_term_bitset(bytes_column, alive_bitset);
term_bitsets.push(Some(term_bitset));
}
_ => {
term_bitsets.push(None);
}
}
}
let mut current_term_ord = 0;
while merged_terms.advance() {
let term_bytes: &[u8] = merged_terms.key();
if !is_term_present(&term_bitsets[..], &merged_terms) {
continue;
}
sstable_builder.insert(term_bytes, &())?;
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
term_ord_mapping.register_from_to(segment_ord, from_term_ord, current_term_ord);
}
current_term_ord += 1;
}
sstable_builder.finish()?;
} }
current_term_ord += 1;
} }
sstable_builder.finish()?;
Ok(term_ord_mapping) Ok(term_ord_mapping)
} }
#[derive(Default, Debug)] #[derive(Default)]
struct TermOrdinalMapping { struct TermOrdinalMapping {
per_segment_new_term_ordinals: Vec<Vec<TermOrdinal>>, per_segment_new_term_ordinals: Vec<Vec<TermOrdinal>>,
} }
@@ -198,7 +100,7 @@ impl TermOrdinalMapping {
self.per_segment_new_term_ordinals[segment_ord][from_ord as usize] = to_ord; self.per_segment_new_term_ordinals[segment_ord][from_ord as usize] = to_ord;
} }
fn get_segment(&self, segment_ord: u32) -> &[TermOrdinal] { fn get_segment(&self, segment_ord: usize) -> &[TermOrdinal] {
&(self.per_segment_new_term_ordinals[segment_ord as usize])[..] &(self.per_segment_new_term_ordinals[segment_ord])[..]
} }
} }

View File

@@ -1,8 +1,6 @@
use std::ops::Range; use std::ops::Range;
use common::{BitSet, OwnedBytes, ReadOnlyBitSet}; use crate::{column, ColumnarReader, RowId};
use crate::{ColumnarReader, RowAddr, RowId};
pub struct StackMergeOrder { pub struct StackMergeOrder {
// This does not start at 0. The first row is the number of // This does not start at 0. The first row is the number of
@@ -44,75 +42,19 @@ pub enum MergeRowOrder {
/// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder /// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order. /// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order.
/// .. /// ..
/// No documents is deleted.
Stack(StackMergeOrder), Stack(StackMergeOrder),
/// Some more complex mapping, that may interleaves rows from the different readers and /// Some more complex mapping, that can interleaves rows from the different readers and
/// drop rows, or do both. /// possibly drop rows.
Shuffled(ShuffleMergeOrder), Complex(()),
}
impl From<StackMergeOrder> for MergeRowOrder {
fn from(stack_merge_order: StackMergeOrder) -> MergeRowOrder {
MergeRowOrder::Stack(stack_merge_order)
}
}
impl From<ShuffleMergeOrder> for MergeRowOrder {
fn from(shuffle_merge_order: ShuffleMergeOrder) -> MergeRowOrder {
MergeRowOrder::Shuffled(shuffle_merge_order)
}
} }
impl MergeRowOrder { impl MergeRowOrder {
pub fn num_rows(&self) -> RowId { pub fn num_rows(&self) -> RowId {
match self { match self {
MergeRowOrder::Stack(stack_row_order) => stack_row_order.num_rows(), MergeRowOrder::Stack(stack_row_order) => stack_row_order.num_rows(),
MergeRowOrder::Shuffled(complex_mapping) => complex_mapping.num_rows(), MergeRowOrder::Complex(_) => {
todo!()
}
} }
} }
} }
pub struct ShuffleMergeOrder {
pub new_row_id_to_old_row_id: Vec<RowAddr>,
pub alive_bitsets: Vec<Option<ReadOnlyBitSet>>,
}
impl ShuffleMergeOrder {
pub fn for_test(
segment_num_rows: &[RowId],
new_row_id_to_old_row_id: Vec<RowAddr>,
) -> ShuffleMergeOrder {
let mut alive_bitsets: Vec<BitSet> = segment_num_rows
.iter()
.map(|&num_rows| BitSet::with_max_value(num_rows))
.collect();
for &RowAddr {
segment_ord,
row_id,
} in &new_row_id_to_old_row_id
{
alive_bitsets[segment_ord as usize].insert(row_id);
}
let alive_bitsets: Vec<Option<ReadOnlyBitSet>> = alive_bitsets
.into_iter()
.map(|alive_bitset| {
let mut buffer = Vec::new();
alive_bitset.serialize(&mut buffer).unwrap();
let data = OwnedBytes::new(buffer);
Some(ReadOnlyBitSet::open(data))
})
.collect();
ShuffleMergeOrder {
new_row_id_to_old_row_id,
alive_bitsets,
}
}
pub fn num_rows(&self) -> RowId {
self.new_row_id_to_old_row_id.len() as RowId
}
pub fn iter_new_to_old_row_addrs(&self) -> impl Iterator<Item = RowAddr> + '_ {
self.new_row_id_to_old_row_id.iter().copied()
}
}

View File

@@ -9,54 +9,24 @@ use std::io;
use std::net::Ipv6Addr; use std::net::Ipv6Addr;
use std::sync::Arc; use std::sync::Arc;
pub use merge_mapping::{MergeRowOrder, ShuffleMergeOrder, StackMergeOrder}; pub use merge_mapping::{MergeRowOrder, StackMergeOrder};
use super::writer::ColumnarSerializer; use super::writer::ColumnarSerializer;
use crate::column::{serialize_column_mappable_to_u128, serialize_column_mappable_to_u64}; use crate::column::{serialize_column_mappable_to_u128, serialize_column_mappable_to_u64};
use crate::column_values::MergedColumnValues; use crate::column_index::stack_column_index;
use crate::columnar::column_type::ColumnTypeCategory;
use crate::columnar::merge::merge_dict_column::merge_bytes_or_str_column; use crate::columnar::merge::merge_dict_column::merge_bytes_or_str_column;
use crate::columnar::writer::CompatibleNumericalTypes; use crate::columnar::writer::CompatibleNumericalTypes;
use crate::columnar::ColumnarReader; use crate::columnar::ColumnarReader;
use crate::dynamic_column::DynamicColumn; use crate::dynamic_column::DynamicColumn;
use crate::{ use crate::{
BytesColumn, Column, ColumnIndex, ColumnType, ColumnValues, NumericalType, NumericalValue, BytesColumn, Column, ColumnIndex, ColumnType, ColumnValues, MonotonicallyMappableToU128,
NumericalType, NumericalValue,
}; };
/// Column types are grouped into different categories.
/// After merge, all columns belonging to the same category are coerced to
/// the same column type.
///
/// In practise, today, only Numerical colummns are coerced into one type today.
///
/// See also [README.md].
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
enum ColumnTypeCategory {
Bool,
Str,
Numerical,
DateTime,
Bytes,
IpAddr,
}
impl From<ColumnType> for ColumnTypeCategory {
fn from(column_type: ColumnType) -> Self {
match column_type {
ColumnType::I64 => ColumnTypeCategory::Numerical,
ColumnType::U64 => ColumnTypeCategory::Numerical,
ColumnType::F64 => ColumnTypeCategory::Numerical,
ColumnType::Bytes => ColumnTypeCategory::Bytes,
ColumnType::Str => ColumnTypeCategory::Str,
ColumnType::Bool => ColumnTypeCategory::Bool,
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
ColumnType::DateTime => ColumnTypeCategory::DateTime,
}
}
}
pub fn merge_columnar( pub fn merge_columnar(
columnar_readers: &[&ColumnarReader], columnar_readers: &[&ColumnarReader],
merge_row_order: MergeRowOrder, mapping: MergeRowOrder,
output: &mut impl io::Write, output: &mut impl io::Write,
) -> io::Result<()> { ) -> io::Result<()> {
let mut serializer = ColumnarSerializer::new(output); let mut serializer = ColumnarSerializer::new(output);
@@ -65,14 +35,9 @@ pub fn merge_columnar(
for ((column_name, column_type), columns) in columns_to_merge { for ((column_name, column_type), columns) in columns_to_merge {
let mut column_serializer = let mut column_serializer =
serializer.serialize_column(column_name.as_bytes(), column_type); serializer.serialize_column(column_name.as_bytes(), column_type);
merge_column( merge_column(column_type, columns, &mapping, &mut column_serializer)?;
column_type,
columns,
&merge_row_order,
&mut column_serializer,
)?;
} }
serializer.finalize(merge_row_order.num_rows())?; serializer.finalize(mapping.num_rows())?;
Ok(()) Ok(())
} }
@@ -88,7 +53,7 @@ fn dynamic_column_to_u64_monotonic(dynamic_column: DynamicColumn) -> Option<Colu
} }
} }
fn merge_column( pub fn merge_column(
column_type: ColumnType, column_type: ColumnType,
columns: Vec<Option<DynamicColumn>>, columns: Vec<Option<DynamicColumn>>,
merge_row_order: &MergeRowOrder, merge_row_order: &MergeRowOrder,
@@ -101,74 +66,63 @@ fn merge_column(
| ColumnType::DateTime | ColumnType::DateTime
| ColumnType::Bool => { | ColumnType::Bool => {
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len()); let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
let mut column_values: Vec<Option<Arc<dyn ColumnValues>>> = let mut column_values: Vec<Arc<dyn ColumnValues>> = Vec::with_capacity(columns.len());
Vec::with_capacity(columns.len());
for dynamic_column_opt in columns { for dynamic_column_opt in columns {
if let Some(Column { idx, values }) = if let Some(Column { idx, values }) =
dynamic_column_opt.and_then(dynamic_column_to_u64_monotonic) dynamic_column_opt.and_then(dynamic_column_to_u64_monotonic)
{ {
column_indexes.push(Some(idx)); column_indexes.push(Some(idx));
column_values.push(Some(values)); column_values.push(values);
} else { } else {
column_indexes.push(None); column_indexes.push(None);
column_values.push(None);
} }
} }
let merged_column_index = let merged_column_index =
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order); crate::column_index::stack_column_index(&column_indexes[..], merge_row_order);
let merge_column_values = MergedColumnValues { serialize_column_mappable_to_u64(merged_column_index, &&column_values[..], wrt)?;
column_indexes: &column_indexes[..],
column_values: &column_values[..],
merge_row_order,
};
serialize_column_mappable_to_u64(merged_column_index, &merge_column_values, wrt)?;
} }
ColumnType::IpAddr => { ColumnType::IpAddr => {
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len()); let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
let mut column_values: Vec<Option<Arc<dyn ColumnValues<Ipv6Addr>>>> = let mut column_values: Vec<Arc<dyn ColumnValues<Ipv6Addr>>> =
Vec::with_capacity(columns.len()); Vec::with_capacity(columns.len());
let mut num_values = 0;
for dynamic_column_opt in columns { for dynamic_column_opt in columns {
if let Some(DynamicColumn::IpAddr(Column { idx, values })) = dynamic_column_opt { if let Some(DynamicColumn::IpAddr(Column { idx, values })) = dynamic_column_opt {
num_values += values.num_vals();
column_indexes.push(Some(idx)); column_indexes.push(Some(idx));
column_values.push(Some(values)); column_values.push(values);
} else { } else {
column_indexes.push(None); column_indexes.push(None);
column_values.push(None);
} }
} }
let merged_column_index = let merged_column_index =
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order); crate::column_index::stack_column_index(&column_indexes[..], merge_row_order);
let merge_column_values = MergedColumnValues { serialize_column_mappable_to_u128(
column_indexes: &column_indexes[..], merged_column_index,
column_values: &column_values, &&column_values[..],
merge_row_order, num_values,
}; wrt,
)?;
serialize_column_mappable_to_u128(merged_column_index, &merge_column_values, wrt)?;
} }
ColumnType::Bytes | ColumnType::Str => { ColumnType::Bytes | ColumnType::Str => {
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len()); let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
let mut bytes_columns: Vec<Option<BytesColumn>> = Vec::with_capacity(columns.len()); let mut bytes_columns: Vec<BytesColumn> = Vec::with_capacity(columns.len());
for dynamic_column_opt in columns { for dynamic_column_opt in columns {
match dynamic_column_opt { match dynamic_column_opt {
Some(DynamicColumn::Str(str_column)) => { Some(DynamicColumn::Str(str_column)) => {
column_indexes.push(Some(str_column.term_ord_column.idx.clone())); column_indexes.push(Some(str_column.term_ord_column.idx.clone()));
bytes_columns.push(Some(str_column.into())); bytes_columns.push(str_column.into());
} }
Some(DynamicColumn::Bytes(bytes_column)) => { Some(DynamicColumn::Bytes(bytes_column)) => {
column_indexes.push(Some(bytes_column.term_ord_column.idx.clone())); column_indexes.push(Some(bytes_column.term_ord_column.idx.clone()));
bytes_columns.push(Some(bytes_column)); bytes_columns.push(bytes_column);
}
_ => {
column_indexes.push(None);
bytes_columns.push(None);
} }
_ => column_indexes.push(None),
} }
} }
let merged_column_index = let merged_column_index =
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order); crate::column_index::stack_column_index(&column_indexes[..], merge_row_order);
merge_bytes_or_str_column(merged_column_index, &bytes_columns, merge_row_order, wrt)?; merge_bytes_or_str_column(merged_column_index, &bytes_columns, wrt)?;
} }
} }
Ok(()) Ok(())

View File

@@ -0,0 +1,169 @@
use std::cmp;
use fastfield_codecs::Column;
use super::flat_map_with_buffer::FlatMapWithBufferIter;
use crate::fastfield::{MultiValueIndex, MultiValuedFastFieldReader};
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
use crate::{DocAddress, SegmentReader};
pub(crate) struct RemappedDocIdMultiValueColumn<'a> {
doc_id_mapping: &'a SegmentDocIdMapping,
fast_field_readers: Vec<MultiValuedFastFieldReader<u64>>,
min_value: u64,
max_value: u64,
num_vals: u32,
}
impl<'a> RemappedDocIdMultiValueColumn<'a> {
pub(crate) fn new(
readers: &'a [SegmentReader],
doc_id_mapping: &'a SegmentDocIdMapping,
field: &str,
) -> Self {
// Our values are bitpacked and we need to know what should be
// our bitwidth and our minimum value before serializing any values.
//
// Computing those is non-trivial if some documents are deleted.
// We go through a complete first pass to compute the minimum and the
// maximum value and initialize our Serializer.
let mut num_vals = 0;
let mut min_value = u64::MAX;
let mut max_value = u64::MIN;
let mut vals = Vec::new();
let mut fast_field_readers = Vec::with_capacity(readers.len());
for reader in readers {
let ff_reader: MultiValuedFastFieldReader<u64> = reader
.fast_fields()
.typed_fast_field_multi_reader::<u64>(field)
.expect(
"Failed to find multivalued fast field reader. This is a bug in tantivy. \
Please report.",
);
for doc in reader.doc_ids_alive() {
ff_reader.get_vals(doc, &mut vals);
for &val in &vals {
min_value = cmp::min(val, min_value);
max_value = cmp::max(val, max_value);
}
num_vals += vals.len();
}
fast_field_readers.push(ff_reader);
// TODO optimize when no deletes
}
if min_value > max_value {
min_value = 0;
max_value = 0;
}
RemappedDocIdMultiValueColumn {
doc_id_mapping,
fast_field_readers,
min_value,
max_value,
num_vals: num_vals as u32,
}
}
}
impl<'a> Column for RemappedDocIdMultiValueColumn<'a> {
fn get_val(&self, _pos: u32) -> u64 {
unimplemented!()
}
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
Box::new(
self.doc_id_mapping
.iter_old_doc_addrs()
.flat_map_with_buffer(|old_doc_addr: DocAddress, buffer| {
let ff_reader = &self.fast_field_readers[old_doc_addr.segment_ord as usize];
ff_reader.get_vals(old_doc_addr.doc_id, buffer);
}),
)
}
fn min_value(&self) -> u64 {
self.min_value
}
fn max_value(&self) -> u64 {
self.max_value
}
fn num_vals(&self) -> u32 {
self.num_vals
}
}
pub(crate) struct RemappedDocIdMultiValueIndexColumn<'a> {
doc_id_mapping: &'a SegmentDocIdMapping,
multi_value_length_readers: Vec<&'a MultiValueIndex>,
min_value: u64,
max_value: u64,
num_vals: u32,
}
impl<'a> RemappedDocIdMultiValueIndexColumn<'a> {
pub(crate) fn new(
segment_and_ff_readers: &'a [(&'a SegmentReader, &'a MultiValueIndex)],
doc_id_mapping: &'a SegmentDocIdMapping,
) -> Self {
// We go through a complete first pass to compute the minimum and the
// maximum value and initialize our Column.
let mut num_vals = 0;
let min_value = 0;
let mut max_value = 0;
let mut multi_value_length_readers = Vec::with_capacity(segment_and_ff_readers.len());
for segment_and_ff_reader in segment_and_ff_readers {
let segment_reader = segment_and_ff_reader.0;
let multi_value_length_reader = segment_and_ff_reader.1;
if !segment_reader.has_deletes() {
max_value += multi_value_length_reader.total_num_vals() as u64;
} else {
for doc in segment_reader.doc_ids_alive() {
max_value += multi_value_length_reader.num_vals_for_doc(doc) as u64;
}
}
num_vals += segment_reader.num_docs();
multi_value_length_readers.push(multi_value_length_reader);
}
// The value range is always get_val(doc)..get_val(doc + 1)
num_vals += 1;
Self {
doc_id_mapping,
multi_value_length_readers,
min_value,
max_value,
num_vals,
}
}
}
impl<'a> Column for RemappedDocIdMultiValueIndexColumn<'a> {
fn get_val(&self, _pos: u32) -> u64 {
unimplemented!()
}
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
let mut offset = 0;
Box::new(
std::iter::once(0).chain(self.doc_id_mapping.iter_old_doc_addrs().map(
move |old_doc_addr| {
let ff_reader =
&self.multi_value_length_readers[old_doc_addr.segment_ord as usize];
offset += ff_reader.num_vals_for_doc(old_doc_addr.doc_id);
offset as u64
},
)),
)
}
fn min_value(&self) -> u64 {
self.min_value
}
fn max_value(&self) -> u64 {
self.max_value
}
fn num_vals(&self) -> u32 {
self.num_vals
}
}

View File

@@ -12,7 +12,7 @@ fn make_columnar<T: Into<NumericalValue> + HasAssociatedColumnType + Copy>(
} }
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
dataframe_writer dataframe_writer
.serialize(vals.len() as RowId, None, &mut buffer) .serialize(vals.len() as RowId, &mut buffer)
.unwrap(); .unwrap();
ColumnarReader::open(buffer).unwrap() ColumnarReader::open(buffer).unwrap()
} }
@@ -90,9 +90,7 @@ fn make_numerical_columnar_multiple_columns(
.max() .max()
.unwrap_or(0u32); .unwrap_or(0u32);
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
dataframe_writer dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
.serialize(num_rows, None, &mut buffer)
.unwrap();
ColumnarReader::open(buffer).unwrap() ColumnarReader::open(buffer).unwrap()
} }
@@ -111,9 +109,7 @@ fn make_byte_columnar_multiple_columns(columns: &[(&str, &[&[&[u8]]])]) -> Colum
.max() .max()
.unwrap_or(0u32); .unwrap_or(0u32);
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
dataframe_writer dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
.serialize(num_rows, None, &mut buffer)
.unwrap();
ColumnarReader::open(buffer).unwrap() ColumnarReader::open(buffer).unwrap()
} }
@@ -132,9 +128,7 @@ fn make_text_columnar_multiple_columns(columns: &[(&str, &[&[&str]])]) -> Column
.max() .max()
.unwrap_or(0u32); .unwrap_or(0u32);
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
dataframe_writer dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
.serialize(num_rows, None, &mut buffer)
.unwrap();
ColumnarReader::open(buffer).unwrap() ColumnarReader::open(buffer).unwrap()
} }

View File

@@ -6,6 +6,6 @@ mod reader;
mod writer; mod writer;
pub use column_type::{ColumnType, HasAssociatedColumnType}; pub use column_type::{ColumnType, HasAssociatedColumnType};
pub use merge::{merge_columnar, MergeRowOrder, ShuffleMergeOrder, StackMergeOrder}; pub use merge::{merge_columnar, MergeRowOrder, StackMergeOrder};
pub use reader::ColumnarReader; pub use reader::ColumnarReader;
pub use writer::ColumnarWriter; pub use writer::ColumnarWriter;

View File

@@ -137,7 +137,7 @@ mod tests {
columnar_writer.record_column_type("col1", ColumnType::Str, false); columnar_writer.record_column_type("col1", ColumnType::Str, false);
columnar_writer.record_column_type("col2", ColumnType::U64, false); columnar_writer.record_column_type("col2", ColumnType::U64, false);
let mut buffer = Vec::new(); let mut buffer = Vec::new();
columnar_writer.serialize(1, None, &mut buffer).unwrap(); columnar_writer.serialize(1, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap(); let columnar = ColumnarReader::open(buffer).unwrap();
let columns = columnar.list_columns().unwrap(); let columns = columnar.list_columns().unwrap();
assert_eq!(columns.len(), 2); assert_eq!(columns.len(), 2);
@@ -153,7 +153,7 @@ mod tests {
columnar_writer.record_column_type("count", ColumnType::U64, false); columnar_writer.record_column_type("count", ColumnType::U64, false);
columnar_writer.record_numerical(1, "count", 1u64); columnar_writer.record_numerical(1, "count", 1u64);
let mut buffer = Vec::new(); let mut buffer = Vec::new();
columnar_writer.serialize(2, None, &mut buffer).unwrap(); columnar_writer.serialize(2, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap(); let columnar = ColumnarReader::open(buffer).unwrap();
let columns = columnar.list_columns().unwrap(); let columns = columnar.list_columns().unwrap();
assert_eq!(columns.len(), 1); assert_eq!(columns.len(), 1);

View File

@@ -41,31 +41,10 @@ impl ColumnWriter {
pub(super) fn operation_iterator<'a, V: SymbolValue>( pub(super) fn operation_iterator<'a, V: SymbolValue>(
&self, &self,
arena: &MemoryArena, arena: &MemoryArena,
old_to_new_ids_opt: Option<&[RowId]>,
buffer: &'a mut Vec<u8>, buffer: &'a mut Vec<u8>,
) -> impl Iterator<Item = ColumnOperation<V>> + 'a { ) -> impl Iterator<Item = ColumnOperation<V>> + 'a {
buffer.clear(); buffer.clear();
self.values.read_to_end(arena, buffer); self.values.read_to_end(arena, buffer);
if let Some(old_to_new_ids) = old_to_new_ids_opt {
// TODO avoid the extra deserialization / serialization.
let mut sorted_ops: Vec<(RowId, ColumnOperation<V>)> = Vec::new();
let mut new_doc = 0u32;
let mut cursor = &buffer[..];
for op in std::iter::from_fn(|| ColumnOperation::<V>::deserialize(&mut cursor)) {
if let ColumnOperation::NewDoc(doc) = &op {
new_doc = old_to_new_ids[*doc as usize];
sorted_ops.push((new_doc, ColumnOperation::NewDoc(new_doc)));
} else {
sorted_ops.push((new_doc, op));
}
}
// stable sort is crucial here.
sorted_ops.sort_by_key(|(new_doc_id, _)| *new_doc_id);
buffer.clear();
for (_, op) in sorted_ops {
buffer.extend_from_slice(op.serialize().as_ref());
}
}
let mut cursor: &[u8] = &buffer[..]; let mut cursor: &[u8] = &buffer[..];
std::iter::from_fn(move || ColumnOperation::deserialize(&mut cursor)) std::iter::from_fn(move || ColumnOperation::deserialize(&mut cursor))
} }
@@ -210,12 +189,10 @@ impl CompatibleNumericalTypes {
} }
impl NumericalColumnWriter { impl NumericalColumnWriter {
pub fn numerical_type(&self) -> NumericalType { pub fn column_type_and_cardinality(&self, num_docs: RowId) -> (NumericalType, Cardinality) {
self.compatible_numerical_types.to_numerical_type() let numerical_type = self.compatible_numerical_types.to_numerical_type();
} let cardinality = self.column_writer.get_cardinality(num_docs);
(numerical_type, cardinality)
pub fn cardinality(&self, num_docs: RowId) -> Cardinality {
self.column_writer.get_cardinality(num_docs)
} }
pub fn record_numerical_value( pub fn record_numerical_value(
@@ -231,11 +208,9 @@ impl NumericalColumnWriter {
pub(super) fn operation_iterator<'a>( pub(super) fn operation_iterator<'a>(
self, self,
arena: &MemoryArena, arena: &MemoryArena,
old_to_new_ids: Option<&[RowId]>,
buffer: &'a mut Vec<u8>, buffer: &'a mut Vec<u8>,
) -> impl Iterator<Item = ColumnOperation<NumericalValue>> + 'a { ) -> impl Iterator<Item = ColumnOperation<NumericalValue>> + 'a {
self.column_writer self.column_writer.operation_iterator(arena, buffer)
.operation_iterator(arena, old_to_new_ids, buffer)
} }
} }
@@ -276,11 +251,9 @@ impl StrOrBytesColumnWriter {
pub(super) fn operation_iterator<'a>( pub(super) fn operation_iterator<'a>(
&self, &self,
arena: &MemoryArena, arena: &MemoryArena,
old_to_new_ids: Option<&[RowId]>,
byte_buffer: &'a mut Vec<u8>, byte_buffer: &'a mut Vec<u8>,
) -> impl Iterator<Item = ColumnOperation<UnorderedId>> + 'a { ) -> impl Iterator<Item = ColumnOperation<UnorderedId>> + 'a {
self.column_writer self.column_writer.operation_iterator(arena, byte_buffer)
.operation_iterator(arena, old_to_new_ids, byte_buffer)
} }
} }

View File

@@ -16,7 +16,7 @@ use crate::column_index::SerializableColumnIndex;
use crate::column_values::{ use crate::column_values::{
ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn, ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
}; };
use crate::columnar::column_type::ColumnType; use crate::columnar::column_type::{ColumnType, ColumnTypeCategory};
use crate::columnar::writer::column_writers::{ use crate::columnar::writer::column_writers::{
ColumnWriter, NumericalColumnWriter, StrOrBytesColumnWriter, ColumnWriter, NumericalColumnWriter, StrOrBytesColumnWriter,
}; };
@@ -45,7 +45,7 @@ struct SpareBuffers {
/// columnar_writer.record_str(1u32 /* doc id */, "product_name", "Apple"); /// columnar_writer.record_str(1u32 /* doc id */, "product_name", "Apple");
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10.5f64); //< uh oh we ended up mixing integer and floats. /// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10.5f64); //< uh oh we ended up mixing integer and floats.
/// let mut wrt: Vec<u8> = Vec::new(); /// let mut wrt: Vec<u8> = Vec::new();
/// columnar_writer.serialize(2u32, None, &mut wrt).unwrap(); /// columnar_writer.serialize(2u32, &mut wrt).unwrap();
/// ``` /// ```
pub struct ColumnarWriter { pub struct ColumnarWriter {
numerical_field_hash_map: ArenaHashMap, numerical_field_hash_map: ArenaHashMap,
@@ -104,48 +104,6 @@ impl ColumnarWriter {
+ self.datetime_field_hash_map.mem_usage() + self.datetime_field_hash_map.mem_usage()
} }
/// Returns the list of doc ids from 0..num_docs sorted by the `sort_field`
/// column.
///
/// If the column is multivalued, use the first value for scoring.
/// If no value is associated to a specific row, the document is assigned
/// the lowest possible score.
///
/// The sort applied is stable.
pub fn sort_order(&self, sort_field: &str, num_docs: RowId, reversed: bool) -> Vec<u32> {
let Some(numerical_col_writer) =
self.numerical_field_hash_map.get::<NumericalColumnWriter>(sort_field.as_bytes()) else {
return Vec::new();
};
let mut symbols_buffer = Vec::new();
let mut values = Vec::new();
let mut last_doc_opt: Option<RowId> = None;
for op in numerical_col_writer.operation_iterator(&self.arena, None, &mut symbols_buffer) {
match op {
ColumnOperation::NewDoc(doc) => {
last_doc_opt = Some(doc);
}
ColumnOperation::Value(numerical_value) => {
if let Some(last_doc) = last_doc_opt {
let score: f32 = f64::coerce(numerical_value) as f32;
values.push((score, last_doc));
}
}
}
}
for doc in values.len() as u32..num_docs {
values.push((0.0f32, doc));
}
values.sort_by(|(left_score, _), (right_score, _)| {
if reversed {
right_score.partial_cmp(left_score).unwrap()
} else {
left_score.partial_cmp(right_score).unwrap()
}
});
values.into_iter().map(|(_score, doc)| doc).collect()
}
/// Records a column type. This is useful to bypass the coercion process, /// Records a column type. This is useful to bypass the coercion process,
/// makes sure the empty is present in the resulting columnar, or set /// makes sure the empty is present in the resulting columnar, or set
/// the `sort_values_within_row`. /// the `sort_values_within_row`.
@@ -320,47 +278,37 @@ impl ColumnarWriter {
}, },
); );
} }
pub fn serialize( pub fn serialize(&mut self, num_docs: RowId, wrt: &mut dyn io::Write) -> io::Result<()> {
&mut self,
num_docs: RowId,
old_to_new_row_ids: Option<&[RowId]>,
wrt: &mut dyn io::Write,
) -> io::Result<()> {
let mut serializer = ColumnarSerializer::new(wrt); let mut serializer = ColumnarSerializer::new(wrt);
let mut columns: Vec<(&[u8], ColumnType, Addr)> = self let mut columns: Vec<(&[u8], ColumnTypeCategory, Addr)> = self
.numerical_field_hash_map .numerical_field_hash_map
.iter() .iter()
.map(|(column_name, addr, _)| { .map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Numerical, addr))
let numerical_column_writer: NumericalColumnWriter =
self.numerical_field_hash_map.read(addr);
let column_type = numerical_column_writer.numerical_type().into();
(column_name, column_type, addr)
})
.collect(); .collect();
columns.extend( columns.extend(
self.bytes_field_hash_map self.bytes_field_hash_map
.iter() .iter()
.map(|(term, addr, _)| (term, ColumnType::Bytes, addr)), .map(|(term, addr, _)| (term, ColumnTypeCategory::Bytes, addr)),
); );
columns.extend( columns.extend(
self.str_field_hash_map self.str_field_hash_map
.iter() .iter()
.map(|(column_name, addr, _)| (column_name, ColumnType::Str, addr)), .map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Str, addr)),
); );
columns.extend( columns.extend(
self.bool_field_hash_map self.bool_field_hash_map
.iter() .iter()
.map(|(column_name, addr, _)| (column_name, ColumnType::Bool, addr)), .map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Bool, addr)),
); );
columns.extend( columns.extend(
self.ip_addr_field_hash_map self.ip_addr_field_hash_map
.iter() .iter()
.map(|(column_name, addr, _)| (column_name, ColumnType::IpAddr, addr)), .map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::IpAddr, addr)),
); );
columns.extend( columns.extend(
self.datetime_field_hash_map self.datetime_field_hash_map
.iter() .iter()
.map(|(column_name, addr, _)| (column_name, ColumnType::DateTime, addr)), .map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::DateTime, addr)),
); );
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type)); columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
@@ -368,24 +316,20 @@ impl ColumnarWriter {
let mut symbol_byte_buffer: Vec<u8> = Vec::new(); let mut symbol_byte_buffer: Vec<u8> = Vec::new();
for (column_name, column_type, addr) in columns { for (column_name, column_type, addr) in columns {
match column_type { match column_type {
ColumnType::Bool => { ColumnTypeCategory::Bool => {
let column_writer: ColumnWriter = self.bool_field_hash_map.read(addr); let column_writer: ColumnWriter = self.bool_field_hash_map.read(addr);
let cardinality = column_writer.get_cardinality(num_docs); let cardinality = column_writer.get_cardinality(num_docs);
let mut column_serializer = let mut column_serializer =
serializer.serialize_column(column_name, column_type); serializer.serialize_column(column_name, ColumnType::Bool);
serialize_bool_column( serialize_bool_column(
cardinality, cardinality,
num_docs, num_docs,
column_writer.operation_iterator( column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
arena,
old_to_new_row_ids,
&mut symbol_byte_buffer,
),
buffers, buffers,
&mut column_serializer, &mut column_serializer,
)?; )?;
} }
ColumnType::IpAddr => { ColumnTypeCategory::IpAddr => {
let column_writer: ColumnWriter = self.ip_addr_field_hash_map.read(addr); let column_writer: ColumnWriter = self.ip_addr_field_hash_map.read(addr);
let cardinality = column_writer.get_cardinality(num_docs); let cardinality = column_writer.get_cardinality(num_docs);
let mut column_serializer = let mut column_serializer =
@@ -393,64 +337,50 @@ impl ColumnarWriter {
serialize_ip_addr_column( serialize_ip_addr_column(
cardinality, cardinality,
num_docs, num_docs,
column_writer.operation_iterator( column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
arena,
old_to_new_row_ids,
&mut symbol_byte_buffer,
),
buffers, buffers,
&mut column_serializer, &mut column_serializer,
)?; )?;
} }
ColumnType::Bytes | ColumnType::Str => { ColumnTypeCategory::Bytes | ColumnTypeCategory::Str => {
let str_or_bytes_column_writer: StrOrBytesColumnWriter = let (column_type, str_column_writer): (ColumnType, StrOrBytesColumnWriter) =
if column_type == ColumnType::Bytes { if column_type == ColumnTypeCategory::Bytes {
self.bytes_field_hash_map.read(addr) (ColumnType::Bytes, self.bytes_field_hash_map.read(addr))
} else { } else {
self.str_field_hash_map.read(addr) (ColumnType::Str, self.str_field_hash_map.read(addr))
}; };
let dictionary_builder = let dictionary_builder =
&dictionaries[str_or_bytes_column_writer.dictionary_id as usize]; &dictionaries[str_column_writer.dictionary_id as usize];
let cardinality = str_or_bytes_column_writer let cardinality = str_column_writer.column_writer.get_cardinality(num_docs);
.column_writer
.get_cardinality(num_docs);
let mut column_serializer = let mut column_serializer =
serializer.serialize_column(column_name, column_type); serializer.serialize_column(column_name, column_type);
serialize_bytes_or_str_column( serialize_bytes_or_str_column(
cardinality, cardinality,
num_docs, num_docs,
str_or_bytes_column_writer.sort_values_within_row, str_column_writer.sort_values_within_row,
dictionary_builder, dictionary_builder,
str_or_bytes_column_writer.operation_iterator( str_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
arena,
old_to_new_row_ids,
&mut symbol_byte_buffer,
),
buffers, buffers,
&mut column_serializer, &mut column_serializer,
)?; )?;
} }
ColumnType::F64 | ColumnType::I64 | ColumnType::U64 => { ColumnTypeCategory::Numerical => {
let numerical_column_writer: NumericalColumnWriter = let numerical_column_writer: NumericalColumnWriter =
self.numerical_field_hash_map.read(addr); self.numerical_field_hash_map.read(addr);
let cardinality = numerical_column_writer.cardinality(num_docs); let (numerical_type, cardinality) =
numerical_column_writer.column_type_and_cardinality(num_docs);
let mut column_serializer = let mut column_serializer =
serializer.serialize_column(column_name, column_type); serializer.serialize_column(column_name, ColumnType::from(numerical_type));
let numerical_type = column_type.numerical_type().unwrap();
serialize_numerical_column( serialize_numerical_column(
cardinality, cardinality,
num_docs, num_docs,
numerical_type, numerical_type,
numerical_column_writer.operation_iterator( numerical_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
arena,
old_to_new_row_ids,
&mut symbol_byte_buffer,
),
buffers, buffers,
&mut column_serializer, &mut column_serializer,
)?; )?;
} }
ColumnType::DateTime => { ColumnTypeCategory::DateTime => {
let column_writer: ColumnWriter = self.datetime_field_hash_map.read(addr); let column_writer: ColumnWriter = self.datetime_field_hash_map.read(addr);
let cardinality = column_writer.get_cardinality(num_docs); let cardinality = column_writer.get_cardinality(num_docs);
let mut column_serializer = let mut column_serializer =
@@ -459,11 +389,7 @@ impl ColumnarWriter {
cardinality, cardinality,
num_docs, num_docs,
NumericalType::I64, NumericalType::I64,
column_writer.operation_iterator( column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
arena,
old_to_new_row_ids,
&mut symbol_byte_buffer,
),
buffers, buffers,
&mut column_serializer, &mut column_serializer,
)?; )?;
@@ -662,6 +588,7 @@ where
crate::column::serialize_column_mappable_to_u128( crate::column::serialize_column_mappable_to_u128(
serializable_column_index, serializable_column_index,
&&values[..], &&values[..],
values.len() as u32,
&mut wrt, &mut wrt,
)?; )?;
Ok(()) Ok(())
@@ -772,7 +699,7 @@ mod tests {
assert_eq!(column_writer.get_cardinality(3), Cardinality::Full); assert_eq!(column_writer.get_cardinality(3), Cardinality::Full);
let mut buffer = Vec::new(); let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&mut arena, None, &mut buffer) .operation_iterator(&mut arena, &mut buffer)
.collect(); .collect();
assert_eq!(symbols.len(), 6); assert_eq!(symbols.len(), 6);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32))); assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
@@ -801,7 +728,7 @@ mod tests {
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional); assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
let mut buffer = Vec::new(); let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&mut arena, None, &mut buffer) .operation_iterator(&mut arena, &mut buffer)
.collect(); .collect();
assert_eq!(symbols.len(), 4); assert_eq!(symbols.len(), 4);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32))); assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
@@ -824,7 +751,7 @@ mod tests {
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional); assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
let mut buffer = Vec::new(); let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&mut arena, None, &mut buffer) .operation_iterator(&mut arena, &mut buffer)
.collect(); .collect();
assert_eq!(symbols.len(), 2); assert_eq!(symbols.len(), 2);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32))); assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
@@ -843,7 +770,7 @@ mod tests {
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued); assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
let mut buffer = Vec::new(); let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&mut arena, None, &mut buffer) .operation_iterator(&mut arena, &mut buffer)
.collect(); .collect();
assert_eq!(symbols.len(), 3); assert_eq!(symbols.len(), 3);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32))); assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));

View File

@@ -235,8 +235,10 @@ impl DynamicColumnHandle {
fn open_internal(&self, column_bytes: OwnedBytes) -> io::Result<DynamicColumn> { fn open_internal(&self, column_bytes: OwnedBytes) -> io::Result<DynamicColumn> {
let dynamic_column: DynamicColumn = match self.column_type { let dynamic_column: DynamicColumn = match self.column_type {
ColumnType::Bytes => crate::column::open_column_bytes(column_bytes)?.into(), ColumnType::Bytes => {
ColumnType::Str => crate::column::open_column_str(column_bytes)?.into(), crate::column::open_column_bytes::<BytesColumn>(column_bytes)?.into()
}
ColumnType::Str => crate::column::open_column_bytes::<StrColumn>(column_bytes)?.into(),
ColumnType::I64 => crate::column::open_column_u64::<i64>(column_bytes)?.into(), ColumnType::I64 => crate::column::open_column_u64::<i64>(column_bytes)?.into(),
ColumnType::U64 => crate::column::open_column_u64::<u64>(column_bytes)?.into(), ColumnType::U64 => crate::column::open_column_u64::<u64>(column_bytes)?.into(),
ColumnType::F64 => crate::column::open_column_u64::<f64>(column_bytes)?.into(), ColumnType::F64 => crate::column::open_column_u64::<f64>(column_bytes)?.into(),

View File

@@ -10,7 +10,7 @@ extern crate test;
use std::io; use std::io;
mod column; mod column;
pub mod column_index; mod column_index;
pub mod column_values; pub mod column_values;
mod columnar; mod columnar;
mod dictionary; mod dictionary;
@@ -24,7 +24,7 @@ pub use column_index::ColumnIndex;
pub use column_values::{ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64}; pub use column_values::{ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64};
pub use columnar::{ pub use columnar::{
merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType, merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
MergeRowOrder, ShuffleMergeOrder, StackMergeOrder, MergeRowOrder, StackMergeOrder,
}; };
use sstable::VoidSSTable; use sstable::VoidSSTable;
pub use value::{NumericalType, NumericalValue}; pub use value::{NumericalType, NumericalValue};
@@ -32,13 +32,6 @@ pub use value::{NumericalType, NumericalValue};
pub use self::dynamic_column::{DynamicColumn, DynamicColumnHandle}; pub use self::dynamic_column::{DynamicColumn, DynamicColumnHandle};
pub type RowId = u32; pub type RowId = u32;
#[derive(Clone, Copy)]
pub struct RowAddr {
pub segment_ord: u32,
pub row_id: RowId,
}
pub use sstable::Dictionary; pub use sstable::Dictionary;
pub type Streamer<'a> = sstable::Streamer<'a, VoidSSTable>; pub type Streamer<'a> = sstable::Streamer<'a, VoidSSTable>;
@@ -47,12 +40,6 @@ pub struct DateTime {
pub timestamp_micros: i64, pub timestamp_micros: i64,
} }
impl DateTime {
pub fn into_timestamp_micros(self) -> i64 {
self.timestamp_micros
}
}
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct InvalidData; pub struct InvalidData;

View File

@@ -12,7 +12,7 @@ fn test_dataframe_writer_str() {
dataframe_writer.record_str(1u32, "my_string", "hello"); dataframe_writer.record_str(1u32, "my_string", "hello");
dataframe_writer.record_str(3u32, "my_string", "helloeee"); dataframe_writer.record_str(3u32, "my_string", "helloeee");
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, None, &mut buffer).unwrap(); dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap(); let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1); assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap(); let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
@@ -26,7 +26,7 @@ fn test_dataframe_writer_bytes() {
dataframe_writer.record_bytes(1u32, "my_string", b"hello"); dataframe_writer.record_bytes(1u32, "my_string", b"hello");
dataframe_writer.record_bytes(3u32, "my_string", b"helloeee"); dataframe_writer.record_bytes(3u32, "my_string", b"helloeee");
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, None, &mut buffer).unwrap(); dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap(); let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1); assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap(); let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
@@ -40,7 +40,7 @@ fn test_dataframe_writer_bool() {
dataframe_writer.record_bool(1u32, "bool.value", false); dataframe_writer.record_bool(1u32, "bool.value", false);
dataframe_writer.record_bool(3u32, "bool.value", true); dataframe_writer.record_bool(3u32, "bool.value", true);
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, None, &mut buffer).unwrap(); dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap(); let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1); assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("bool.value").unwrap(); let cols: Vec<DynamicColumnHandle> = columnar.read_columns("bool.value").unwrap();
@@ -63,7 +63,7 @@ fn test_dataframe_writer_u64_multivalued() {
dataframe_writer.record_numerical(6u32, "divisor", 2u64); dataframe_writer.record_numerical(6u32, "divisor", 2u64);
dataframe_writer.record_numerical(6u32, "divisor", 3u64); dataframe_writer.record_numerical(6u32, "divisor", 3u64);
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(7, None, &mut buffer).unwrap(); dataframe_writer.serialize(7, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap(); let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1); assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("divisor").unwrap(); let cols: Vec<DynamicColumnHandle> = columnar.read_columns("divisor").unwrap();
@@ -84,7 +84,7 @@ fn test_dataframe_writer_ip_addr() {
dataframe_writer.record_ip_addr(1, "ip_addr", Ipv6Addr::from_u128(1001)); dataframe_writer.record_ip_addr(1, "ip_addr", Ipv6Addr::from_u128(1001));
dataframe_writer.record_ip_addr(3, "ip_addr", Ipv6Addr::from_u128(1050)); dataframe_writer.record_ip_addr(3, "ip_addr", Ipv6Addr::from_u128(1050));
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, None, &mut buffer).unwrap(); dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap(); let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1); assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("ip_addr").unwrap(); let cols: Vec<DynamicColumnHandle> = columnar.read_columns("ip_addr").unwrap();
@@ -113,7 +113,7 @@ fn test_dataframe_writer_numerical() {
dataframe_writer.record_numerical(2u32, "srical.value", NumericalValue::U64(13u64)); dataframe_writer.record_numerical(2u32, "srical.value", NumericalValue::U64(13u64));
dataframe_writer.record_numerical(4u32, "srical.value", NumericalValue::U64(15u64)); dataframe_writer.record_numerical(4u32, "srical.value", NumericalValue::U64(15u64));
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(6, None, &mut buffer).unwrap(); dataframe_writer.serialize(6, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap(); let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1); assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("srical.value").unwrap(); let cols: Vec<DynamicColumnHandle> = columnar.read_columns("srical.value").unwrap();
@@ -144,7 +144,7 @@ fn test_dictionary_encoded_str() {
columnar_writer.record_str(3, "my.column", "c"); columnar_writer.record_str(3, "my.column", "c");
columnar_writer.record_str(3, "my.column2", "different_column!"); columnar_writer.record_str(3, "my.column2", "different_column!");
columnar_writer.record_str(4, "my.column", "b"); columnar_writer.record_str(4, "my.column", "b");
columnar_writer.serialize(5, None, &mut buffer).unwrap(); columnar_writer.serialize(5, &mut buffer).unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap(); let columnar_reader = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar_reader.num_columns(), 2); assert_eq!(columnar_reader.num_columns(), 2);
let col_handles = columnar_reader.read_columns("my.column").unwrap(); let col_handles = columnar_reader.read_columns("my.column").unwrap();
@@ -176,7 +176,7 @@ fn test_dictionary_encoded_bytes() {
columnar_writer.record_bytes(3, "my.column", b"c"); columnar_writer.record_bytes(3, "my.column", b"c");
columnar_writer.record_bytes(3, "my.column2", b"different_column!"); columnar_writer.record_bytes(3, "my.column2", b"different_column!");
columnar_writer.record_bytes(4, "my.column", b"b"); columnar_writer.record_bytes(4, "my.column", b"b");
columnar_writer.serialize(5, None, &mut buffer).unwrap(); columnar_writer.serialize(5, &mut buffer).unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap(); let columnar_reader = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar_reader.num_columns(), 2); assert_eq!(columnar_reader.num_columns(), 2);
let col_handles = columnar_reader.read_columns("my.column").unwrap(); let col_handles = columnar_reader.read_columns("my.column").unwrap();

View File

@@ -2,8 +2,9 @@
use std::rc::Rc; use std::rc::Rc;
use std::sync::atomic::AtomicU32; use std::sync::atomic::AtomicU32;
use std::sync::Arc;
use columnar::{Column, StrColumn}; use fastfield_codecs::Column;
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation}; use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation}; use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
@@ -13,8 +14,9 @@ use super::metric::{
}; };
use super::segment_agg_result::BucketCount; use super::segment_agg_result::BucketCount;
use super::VecWithNames; use super::VecWithNames;
use crate::fastfield::{type_and_cardinality, MultiValuedFastFieldReader};
use crate::schema::Type; use crate::schema::Type;
use crate::{SegmentReader, TantivyError}; use crate::{InvertedIndexReader, SegmentReader, TantivyError};
#[derive(Clone, Default)] #[derive(Clone, Default)]
pub(crate) struct AggregationsWithAccessor { pub(crate) struct AggregationsWithAccessor {
@@ -35,12 +37,38 @@ impl AggregationsWithAccessor {
} }
} }
#[derive(Clone)]
pub(crate) enum FastFieldAccessor {
Multi(MultiValuedFastFieldReader<u64>),
Single(Arc<dyn Column<u64>>),
}
impl FastFieldAccessor {
pub fn as_single(&self) -> Option<&dyn Column<u64>> {
match self {
FastFieldAccessor::Multi(_) => None,
FastFieldAccessor::Single(reader) => Some(&**reader),
}
}
pub fn into_single(self) -> Option<Arc<dyn Column<u64>>> {
match self {
FastFieldAccessor::Multi(_) => None,
FastFieldAccessor::Single(reader) => Some(reader),
}
}
pub fn as_multi(&self) -> Option<&MultiValuedFastFieldReader<u64>> {
match self {
FastFieldAccessor::Multi(reader) => Some(reader),
FastFieldAccessor::Single(_) => None,
}
}
}
#[derive(Clone)] #[derive(Clone)]
pub struct BucketAggregationWithAccessor { pub struct BucketAggregationWithAccessor {
/// In general there can be buckets without fast field access, e.g. buckets that are created /// In general there can be buckets without fast field access, e.g. buckets that are created
/// based on search terms. So eventually this needs to be Option or moved. /// based on search terms. So eventually this needs to be Option or moved.
pub(crate) accessor: Column<u64>, pub(crate) accessor: FastFieldAccessor,
pub(crate) str_dict_column: Option<StrColumn>, pub(crate) inverted_index: Option<Arc<InvertedIndexReader>>,
pub(crate) field_type: Type, pub(crate) field_type: Type,
pub(crate) bucket_agg: BucketAggregationType, pub(crate) bucket_agg: BucketAggregationType,
pub(crate) sub_aggregation: AggregationsWithAccessor, pub(crate) sub_aggregation: AggregationsWithAccessor,
@@ -55,19 +83,20 @@ impl BucketAggregationWithAccessor {
bucket_count: Rc<AtomicU32>, bucket_count: Rc<AtomicU32>,
max_bucket_count: u32, max_bucket_count: u32,
) -> crate::Result<BucketAggregationWithAccessor> { ) -> crate::Result<BucketAggregationWithAccessor> {
let mut str_dict_column = None; let mut inverted_index = None;
let (accessor, field_type) = match &bucket { let (accessor, field_type) = match &bucket {
BucketAggregationType::Range(RangeAggregation { BucketAggregationType::Range(RangeAggregation {
field: field_name, .. field: field_name, ..
}) => get_ff_reader_and_validate(reader, field_name)?, }) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
BucketAggregationType::Histogram(HistogramAggregation { BucketAggregationType::Histogram(HistogramAggregation {
field: field_name, .. field: field_name, ..
}) => get_ff_reader_and_validate(reader, field_name)?, }) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
BucketAggregationType::Terms(TermsAggregation { BucketAggregationType::Terms(TermsAggregation {
field: field_name, .. field: field_name, ..
}) => { }) => {
str_dict_column = reader.fast_fields().str(&field_name)?; let field = reader.schema().get_field(field_name)?;
get_ff_reader_and_validate(reader, field_name)? inverted_index = Some(reader.inverted_index(field)?);
get_ff_reader_and_validate(reader, field_name, Cardinality::MultiValues)?
} }
}; };
let sub_aggregation = sub_aggregation.clone(); let sub_aggregation = sub_aggregation.clone();
@@ -81,7 +110,7 @@ impl BucketAggregationWithAccessor {
max_bucket_count, max_bucket_count,
)?, )?,
bucket_agg: bucket.clone(), bucket_agg: bucket.clone(),
str_dict_column, inverted_index,
bucket_count: BucketCount { bucket_count: BucketCount {
bucket_count, bucket_count,
max_bucket_count, max_bucket_count,
@@ -95,7 +124,7 @@ impl BucketAggregationWithAccessor {
pub struct MetricAggregationWithAccessor { pub struct MetricAggregationWithAccessor {
pub metric: MetricAggregation, pub metric: MetricAggregation,
pub field_type: Type, pub field_type: Type,
pub accessor: Column<u64>, pub accessor: Arc<dyn Column>,
} }
impl MetricAggregationWithAccessor { impl MetricAggregationWithAccessor {
@@ -110,10 +139,13 @@ impl MetricAggregationWithAccessor {
| MetricAggregation::Min(MinAggregation { field: field_name }) | MetricAggregation::Min(MinAggregation { field: field_name })
| MetricAggregation::Stats(StatsAggregation { field: field_name }) | MetricAggregation::Stats(StatsAggregation { field: field_name })
| MetricAggregation::Sum(SumAggregation { field: field_name }) => { | MetricAggregation::Sum(SumAggregation { field: field_name }) => {
let (accessor, field_type) = get_ff_reader_and_validate(reader, field_name)?; let (accessor, field_type) =
get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?;
Ok(MetricAggregationWithAccessor { Ok(MetricAggregationWithAccessor {
accessor, accessor: accessor
.into_single()
.expect("unexpected fast field cardinality"),
field_type, field_type,
metric: metric.clone(), metric: metric.clone(),
}) })
@@ -158,22 +190,32 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
fn get_ff_reader_and_validate( fn get_ff_reader_and_validate(
reader: &SegmentReader, reader: &SegmentReader,
field_name: &str, field_name: &str,
) -> crate::Result<(columnar::Column<u64>, Type)> { cardinality: Cardinality,
) -> crate::Result<(FastFieldAccessor, Type)> {
let field = reader.schema().get_field(field_name)?; let field = reader.schema().get_field(field_name)?;
// TODO we should get type metadata from columnar let field_type = reader.schema().get_field_entry(field).field_type();
let field_type = reader
.schema() if let Some((_ff_type, field_cardinality)) = type_and_cardinality(field_type) {
.get_field_entry(field) if cardinality != field_cardinality {
.field_type() return Err(TantivyError::InvalidArgument(format!(
.value_type(); "Invalid field cardinality on field {} expected {:?}, but got {:?}",
// TODO Do validation field_name, cardinality, field_cardinality
)));
}
} else {
return Err(TantivyError::InvalidArgument(format!(
"Only fast fields of type f64, u64, i64 are supported, but got {:?} ",
field_type.value_type()
)));
};
let ff_fields = reader.fast_fields(); let ff_fields = reader.fast_fields();
let ff_field = ff_fields.u64_lenient(field_name)?.ok_or_else(|| { match cardinality {
TantivyError::InvalidArgument(format!( Cardinality::SingleValue => ff_fields
"No numerical fast field found for field: {}", .u64_lenient(field_name)
field_name .map(|field| (FastFieldAccessor::Single(field), field_type.value_type())),
)) Cardinality::MultiValues => ff_fields
})?; .u64s_lenient(field_name)
Ok((ff_field, field_type)) .map(|field| (FastFieldAccessor::Multi(field), field_type.value_type())),
}
} }

View File

@@ -1,126 +0,0 @@
use serde::{Deserialize, Serialize};
/// DateHistogramAggregation is similar to `HistogramAggregation`, but it can only be used with date
/// type.
///
/// Currently only **fixed time** intervals are supported. Calendar-aware time intervals are not
/// supported.
///
/// Like the histogram, values are rounded down into the closest bucket.
///
/// For this calculation all fastfield values are converted to f64.
///
/// # Limitations/Compatibility
/// Only fixed time intervals are supported.
///
/// # JSON Format
/// ```json
/// {
/// "prices": {
/// "date_histogram": {
/// "field": "price",
/// "fixed_interval": "30d"
/// }
/// }
/// }
/// ```
///
/// Response
/// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry)
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct DateHistogramAggregationReq {
/// The field to aggregate on.
pub field: String,
/// The interval to chunk your data range. Each bucket spans a value range of
/// [0..fixed_interval). Accepted values
///
/// Fixed intervals are configured with the `fixed_interval` parameter.
/// In contrast to calendar-aware intervals, fixed intervals are a fixed number of SI units and
/// never deviate, regardless of where they fall on the calendar. One second is always
/// composed of 1000ms. This allows fixed intervals to be specified in any multiple of the
/// supported units. However, it means fixed intervals cannot express other units such as
/// months, since the duration of a month is not a fixed quantity. Attempting to specify a
/// calendar interval like month or quarter will return an Error.
///
/// The accepted units for fixed intervals are:
/// * `ms`: milliseconds
/// * `s`: seconds. Defined as 1000 milliseconds each.
/// * `m`: minutes. Defined as 60 seconds each (60_000 milliseconds).
/// * `h`: hours. Defined as 60 minutes each (3_600_000 milliseconds).
/// * `d`: days. Defined as 24 hours (86_400_000 milliseconds).
///
/// Fractional time values are not supported, but you can address this by shifting to another
/// time unit (e.g., `1.5h` could instead be specified as `90m`).
pub fixed_interval: String,
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
/// 1))`.
pub offset: Option<String>,
/// Whether to return the buckets as a hash map
#[serde(default)]
pub keyed: bool,
}
impl DateHistogramAggregationReq {
fn validate(&self) -> crate::Result<()> {
Ok(())
}
}
#[derive(Debug, PartialEq, Eq)]
/// Errors when parsing the fixed interval for `DateHistogramAggregationReq`.
pub enum DateHistogramParseError {
/// Unit not recognized in passed String
UnitNotRecognized(String),
/// Number not found in passed String
NumberMissing(String),
/// Unit not found in passed String
UnitMissing(String),
}
fn parse_into_milliseconds(input: &str) -> Result<u64, DateHistogramParseError> {
let split_boundary = input
.char_indices()
.take_while(|(pos, el)| el.is_numeric())
.count();
let (number, unit) = input.split_at(split_boundary);
if number.is_empty() {
return Err(DateHistogramParseError::NumberMissing(input.to_string()));
}
if unit.is_empty() {
return Err(DateHistogramParseError::UnitMissing(input.to_string()));
}
let number: u64 = number.parse().unwrap();
let multiplier_from_unit = match unit {
"ms" => 1,
"s" => 1000,
"m" => 60 * 1000,
"h" => 60 * 60 * 1000,
"d" => 24 * 60 * 60 * 1000,
_ => return Err(DateHistogramParseError::UnitNotRecognized(unit.to_string())),
};
Ok(number * multiplier_from_unit)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parser_test() {
assert_eq!(parse_into_milliseconds("1m").unwrap(), 60_000);
assert_eq!(parse_into_milliseconds("2m").unwrap(), 120_000);
assert_eq!(
parse_into_milliseconds("2y").unwrap_err(),
DateHistogramParseError::UnitNotRecognized("y".to_string())
);
assert_eq!(
parse_into_milliseconds("2000").unwrap_err(),
DateHistogramParseError::UnitMissing("2000".to_string())
);
assert_eq!(
parse_into_milliseconds("ms").unwrap_err(),
DateHistogramParseError::NumberMissing("ms".to_string())
);
}
}

View File

@@ -1,7 +1,7 @@
use std::cmp::Ordering; use std::cmp::Ordering;
use std::fmt::Display; use std::fmt::Display;
use columnar::Column; use fastfield_codecs::Column;
use itertools::Itertools; use itertools::Itertools;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -13,9 +13,7 @@ use crate::aggregation::agg_result::BucketEntry;
use crate::aggregation::intermediate_agg_result::{ use crate::aggregation::intermediate_agg_result::{
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry, IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
}; };
use crate::aggregation::segment_agg_result::{ use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
GenericSegmentAggregationResultsCollector, SegmentAggregationCollector,
};
use crate::aggregation::{f64_from_fastfield_u64, format_date}; use crate::aggregation::{f64_from_fastfield_u64, format_date};
use crate::schema::{Schema, Type}; use crate::schema::{Schema, Type};
use crate::{DocId, TantivyError}; use crate::{DocId, TantivyError};
@@ -64,6 +62,7 @@ use crate::{DocId, TantivyError};
/// ///
/// Response /// Response
/// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry) /// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry)
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct HistogramAggregation { pub struct HistogramAggregation {
/// The field to aggregate on. /// The field to aggregate on.
@@ -185,7 +184,7 @@ pub(crate) struct SegmentHistogramBucketEntry {
impl SegmentHistogramBucketEntry { impl SegmentHistogramBucketEntry {
pub(crate) fn into_intermediate_bucket_entry( pub(crate) fn into_intermediate_bucket_entry(
self, self,
sub_aggregation: GenericSegmentAggregationResultsCollector, sub_aggregation: SegmentAggregationResultsCollector,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<IntermediateHistogramBucketEntry> { ) -> crate::Result<IntermediateHistogramBucketEntry> {
Ok(IntermediateHistogramBucketEntry { Ok(IntermediateHistogramBucketEntry {
@@ -199,11 +198,11 @@ impl SegmentHistogramBucketEntry {
/// The collector puts values from the fast field into the correct buckets and does a conversion to /// The collector puts values from the fast field into the correct buckets and does a conversion to
/// the correct datatype. /// the correct datatype.
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq)]
pub struct SegmentHistogramCollector { pub struct SegmentHistogramCollector {
/// The buckets containing the aggregation data. /// The buckets containing the aggregation data.
buckets: Vec<SegmentHistogramBucketEntry>, buckets: Vec<SegmentHistogramBucketEntry>,
sub_aggregations: Option<Vec<GenericSegmentAggregationResultsCollector>>, sub_aggregations: Option<Vec<SegmentAggregationResultsCollector>>,
field_type: Type, field_type: Type,
interval: f64, interval: f64,
offset: f64, offset: f64,
@@ -284,7 +283,7 @@ impl SegmentHistogramCollector {
req: &HistogramAggregation, req: &HistogramAggregation,
sub_aggregation: &AggregationsWithAccessor, sub_aggregation: &AggregationsWithAccessor,
field_type: Type, field_type: Type,
accessor: &Column<u64>, accessor: &dyn Column<u64>,
) -> crate::Result<Self> { ) -> crate::Result<Self> {
req.validate()?; req.validate()?;
let min = f64_from_fastfield_u64(accessor.min_value(), &field_type); let min = f64_from_fastfield_u64(accessor.min_value(), &field_type);
@@ -301,7 +300,7 @@ impl SegmentHistogramCollector {
None None
} else { } else {
let sub_aggregation = let sub_aggregation =
GenericSegmentAggregationResultsCollector::from_req_and_validate(sub_aggregation)?; SegmentAggregationResultsCollector::from_req_and_validate(sub_aggregation)?;
Some(buckets.iter().map(|_| sub_aggregation.clone()).collect()) Some(buckets.iter().map(|_| sub_aggregation.clone()).collect())
}; };
@@ -336,7 +335,7 @@ impl SegmentHistogramCollector {
#[inline] #[inline]
pub(crate) fn collect_block( pub(crate) fn collect_block(
&mut self, &mut self,
docs: &[DocId], doc: &[DocId],
bucket_with_accessor: &BucketAggregationWithAccessor, bucket_with_accessor: &BucketAggregationWithAccessor,
force_flush: bool, force_flush: bool,
) -> crate::Result<()> { ) -> crate::Result<()> {
@@ -347,20 +346,64 @@ impl SegmentHistogramCollector {
let get_bucket_num = let get_bucket_num =
|val| (get_bucket_num_f64(val, interval, offset) as i64 - first_bucket_num) as usize; |val| (get_bucket_num_f64(val, interval, offset) as i64 - first_bucket_num) as usize;
let accessor = &bucket_with_accessor.accessor; let accessor = bucket_with_accessor
for doc in docs { .accessor
for val in accessor.values(*doc) { .as_single()
let val = self.f64_from_fastfield_u64(val); .expect("unexpected fast field cardinatility");
let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() {
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0]));
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1]));
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2]));
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3]));
let bucket_pos = get_bucket_num(val); let bucket_pos0 = get_bucket_num(val0);
self.increment_bucket_if_in_bounds( let bucket_pos1 = get_bucket_num(val1);
val, let bucket_pos2 = get_bucket_num(val2);
&bounds, let bucket_pos3 = get_bucket_num(val3);
bucket_pos,
*doc, self.increment_bucket_if_in_bounds(
&bucket_with_accessor.sub_aggregation, val0,
)?; &bounds,
bucket_pos0,
docs[0],
&bucket_with_accessor.sub_aggregation,
)?;
self.increment_bucket_if_in_bounds(
val1,
&bounds,
bucket_pos1,
docs[1],
&bucket_with_accessor.sub_aggregation,
)?;
self.increment_bucket_if_in_bounds(
val2,
&bounds,
bucket_pos2,
docs[2],
&bucket_with_accessor.sub_aggregation,
)?;
self.increment_bucket_if_in_bounds(
val3,
&bounds,
bucket_pos3,
docs[3],
&bucket_with_accessor.sub_aggregation,
)?;
}
for &doc in iter.remainder() {
let val = f64_from_fastfield_u64(accessor.get_val(doc), &self.field_type);
if !bounds.contains(val) {
continue;
} }
let bucket_pos = (get_bucket_num_f64(val, self.interval, self.offset) as i64
- self.first_bucket_num) as usize;
debug_assert_eq!(
self.buckets[bucket_pos].key,
get_bucket_val(val, self.interval, self.offset)
);
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
} }
if force_flush { if force_flush {
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() { if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {

View File

@@ -1,4 +1,2 @@
mod date_histogram;
mod histogram; mod histogram;
pub use date_histogram::*;
pub use histogram::*; pub use histogram::*;

View File

@@ -1,7 +1,7 @@
use std::fmt::Debug; use std::fmt::Debug;
use std::ops::Range; use std::ops::Range;
use columnar::MonotonicallyMappableToU64; use fastfield_codecs::MonotonicallyMappableToU64;
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -11,9 +11,7 @@ use crate::aggregation::agg_req_with_accessor::{
use crate::aggregation::intermediate_agg_result::{ use crate::aggregation::intermediate_agg_result::{
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult, IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
}; };
use crate::aggregation::segment_agg_result::{ use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
BucketCount, GenericSegmentAggregationResultsCollector, SegmentAggregationCollector,
};
use crate::aggregation::{ use crate::aggregation::{
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey, f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
}; };
@@ -116,7 +114,7 @@ impl From<Range<u64>> for InternalRangeAggregationRange {
} }
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq)]
pub(crate) struct SegmentRangeAndBucketEntry { pub(crate) struct SegmentRangeAndBucketEntry {
range: Range<u64>, range: Range<u64>,
bucket: SegmentRangeBucketEntry, bucket: SegmentRangeBucketEntry,
@@ -124,18 +122,18 @@ pub(crate) struct SegmentRangeAndBucketEntry {
/// The collector puts values from the fast field into the correct buckets and does a conversion to /// The collector puts values from the fast field into the correct buckets and does a conversion to
/// the correct datatype. /// the correct datatype.
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq)]
pub struct SegmentRangeCollector { pub struct SegmentRangeCollector {
/// The buckets containing the aggregation data. /// The buckets containing the aggregation data.
buckets: Vec<SegmentRangeAndBucketEntry>, buckets: Vec<SegmentRangeAndBucketEntry>,
field_type: Type, field_type: Type,
} }
#[derive(Clone)] #[derive(Clone, PartialEq)]
pub(crate) struct SegmentRangeBucketEntry { pub(crate) struct SegmentRangeBucketEntry {
pub key: Key, pub key: Key,
pub doc_count: u64, pub doc_count: u64,
pub sub_aggregation: Option<GenericSegmentAggregationResultsCollector>, pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
/// The from range of the bucket. Equals `f64::MIN` when `None`. /// The from range of the bucket. Equals `f64::MIN` when `None`.
pub from: Option<f64>, pub from: Option<f64>,
/// The to range of the bucket. Equals `f64::MAX` when `None`. Open interval, `to` is not /// The to range of the bucket. Equals `f64::MAX` when `None`. Open interval, `to` is not
@@ -229,11 +227,9 @@ impl SegmentRangeCollector {
let sub_aggregation = if sub_aggregation.is_empty() { let sub_aggregation = if sub_aggregation.is_empty() {
None None
} else { } else {
Some( Some(SegmentAggregationResultsCollector::from_req_and_validate(
GenericSegmentAggregationResultsCollector::from_req_and_validate( sub_aggregation,
sub_aggregation, )?)
)?,
)
}; };
Ok(SegmentRangeAndBucketEntry { Ok(SegmentRangeAndBucketEntry {
@@ -261,18 +257,35 @@ impl SegmentRangeCollector {
#[inline] #[inline]
pub(crate) fn collect_block( pub(crate) fn collect_block(
&mut self, &mut self,
docs: &[DocId], doc: &[DocId],
bucket_with_accessor: &BucketAggregationWithAccessor, bucket_with_accessor: &BucketAggregationWithAccessor,
force_flush: bool, force_flush: bool,
) -> crate::Result<()> { ) -> crate::Result<()> {
let accessor = &bucket_with_accessor.accessor; let mut iter = doc.chunks_exact(4);
for doc in docs { let accessor = bucket_with_accessor
for val in accessor.values(*doc) { .accessor
let bucket_pos = self.get_bucket_pos(val); .as_single()
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?; .expect("unexpected fast field cardinality");
} for docs in iter.by_ref() {
} let val1 = accessor.get_val(docs[0]);
let val2 = accessor.get_val(docs[1]);
let val3 = accessor.get_val(docs[2]);
let val4 = accessor.get_val(docs[3]);
let bucket_pos1 = self.get_bucket_pos(val1);
let bucket_pos2 = self.get_bucket_pos(val2);
let bucket_pos3 = self.get_bucket_pos(val3);
let bucket_pos4 = self.get_bucket_pos(val4);
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation)?;
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation)?;
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
}
for &doc in iter.remainder() {
let val = accessor.get_val(doc);
let bucket_pos = self.get_bucket_pos(val);
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
}
if force_flush { if force_flush {
for bucket in &mut self.buckets { for bucket in &mut self.buckets {
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation { if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
@@ -421,7 +434,7 @@ pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> crate::Resu
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use columnar::MonotonicallyMappableToU64; use fastfield_codecs::MonotonicallyMappableToU64;
use serde_json::Value; use serde_json::Value;
use super::*; use super::*;

View File

@@ -1,6 +1,5 @@
use std::fmt::Debug; use std::fmt::Debug;
use columnar::Column;
use itertools::Itertools; use itertools::Itertools;
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -12,11 +11,9 @@ use crate::aggregation::agg_req_with_accessor::{
use crate::aggregation::intermediate_agg_result::{ use crate::aggregation::intermediate_agg_result::{
IntermediateBucketResult, IntermediateTermBucketEntry, IntermediateTermBucketResult, IntermediateBucketResult, IntermediateTermBucketEntry, IntermediateTermBucketResult,
}; };
use crate::aggregation::segment_agg_result::{ use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
build_segment_agg_collector, GenericSegmentAggregationResultsCollector,
SegmentAggregationCollector,
};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::fastfield::MultiValuedFastFieldReader;
use crate::schema::Type; use crate::schema::Type;
use crate::{DocId, TantivyError}; use crate::{DocId, TantivyError};
@@ -199,16 +196,17 @@ impl TermsAggregationInternal {
} }
} }
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, PartialEq)]
/// Container to store term_ids and their buckets. /// Container to store term_ids and their buckets.
struct TermBuckets { struct TermBuckets {
pub(crate) entries: FxHashMap<u32, TermBucketEntry>, pub(crate) entries: FxHashMap<u32, TermBucketEntry>,
blueprint: Option<SegmentAggregationResultsCollector>,
} }
#[derive(Clone, Default)] #[derive(Clone, PartialEq, Default)]
struct TermBucketEntry { struct TermBucketEntry {
doc_count: u64, doc_count: u64,
sub_aggregations: Option<Box<dyn SegmentAggregationCollector>>, sub_aggregations: Option<SegmentAggregationResultsCollector>,
} }
impl Debug for TermBucketEntry { impl Debug for TermBucketEntry {
@@ -220,7 +218,7 @@ impl Debug for TermBucketEntry {
} }
impl TermBucketEntry { impl TermBucketEntry {
fn from_blueprint(blueprint: &Option<Box<dyn SegmentAggregationCollector>>) -> Self { fn from_blueprint(blueprint: &Option<SegmentAggregationResultsCollector>) -> Self {
Self { Self {
doc_count: 0, doc_count: 0,
sub_aggregations: blueprint.clone(), sub_aggregations: blueprint.clone(),
@@ -249,11 +247,46 @@ impl TermBuckets {
sub_aggregation: &AggregationsWithAccessor, sub_aggregation: &AggregationsWithAccessor,
_max_term_id: usize, _max_term_id: usize,
) -> crate::Result<Self> { ) -> crate::Result<Self> {
let has_sub_aggregations = sub_aggregation.is_empty();
let blueprint = if has_sub_aggregations {
let sub_aggregation =
SegmentAggregationResultsCollector::from_req_and_validate(sub_aggregation)?;
Some(sub_aggregation)
} else {
None
};
Ok(TermBuckets { Ok(TermBuckets {
blueprint,
entries: Default::default(), entries: Default::default(),
}) })
} }
fn increment_bucket(
&mut self,
term_ids: &[u64],
doc: DocId,
sub_aggregation: &AggregationsWithAccessor,
bucket_count: &BucketCount,
blueprint: &Option<SegmentAggregationResultsCollector>,
) -> crate::Result<()> {
for &term_id in term_ids {
let entry = self.entries.entry(term_id as u32).or_insert_with(|| {
bucket_count.add_count(1);
TermBucketEntry::from_blueprint(blueprint)
});
entry.doc_count += 1;
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
sub_aggregations.collect(doc, sub_aggregation)?;
}
}
bucket_count.validate_bucket_count()?;
Ok(())
}
fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> { fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
for entry in &mut self.entries.values_mut() { for entry in &mut self.entries.values_mut() {
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() { if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
@@ -266,12 +299,13 @@ impl TermBuckets {
/// The collector puts values from the fast field into the correct buckets and does a conversion to /// The collector puts values from the fast field into the correct buckets and does a conversion to
/// the correct datatype. /// the correct datatype.
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq)]
pub struct SegmentTermCollector { pub struct SegmentTermCollector {
/// The buckets containing the aggregation data. /// The buckets containing the aggregation data.
term_buckets: TermBuckets, term_buckets: TermBuckets,
req: TermsAggregationInternal, req: TermsAggregationInternal,
blueprint: Option<Box<dyn SegmentAggregationCollector>>, field_type: Type,
blueprint: Option<SegmentAggregationResultsCollector>,
} }
pub(crate) fn get_agg_name_and_property(name: &str) -> (&str, &str) { pub(crate) fn get_agg_name_and_property(name: &str) -> (&str, &str) {
@@ -283,8 +317,12 @@ impl SegmentTermCollector {
pub(crate) fn from_req_and_validate( pub(crate) fn from_req_and_validate(
req: &TermsAggregation, req: &TermsAggregation,
sub_aggregations: &AggregationsWithAccessor, sub_aggregations: &AggregationsWithAccessor,
field_type: Type,
accessor: &MultiValuedFastFieldReader<u64>,
) -> crate::Result<Self> { ) -> crate::Result<Self> {
let term_buckets = TermBuckets::default(); let max_term_id = accessor.max_value();
let term_buckets =
TermBuckets::from_req_and_validate(sub_aggregations, max_term_id as usize)?;
if let Some(custom_order) = req.order.as_ref() { if let Some(custom_order) = req.order.as_ref() {
// Validate sub aggregtion exists // Validate sub aggregtion exists
@@ -302,7 +340,8 @@ impl SegmentTermCollector {
let has_sub_aggregations = !sub_aggregations.is_empty(); let has_sub_aggregations = !sub_aggregations.is_empty();
let blueprint = if has_sub_aggregations { let blueprint = if has_sub_aggregations {
let sub_aggregation = build_segment_agg_collector(sub_aggregations)?; let sub_aggregation =
SegmentAggregationResultsCollector::from_req_and_validate(sub_aggregations)?;
Some(sub_aggregation) Some(sub_aggregation)
} else { } else {
None None
@@ -311,6 +350,7 @@ impl SegmentTermCollector {
Ok(SegmentTermCollector { Ok(SegmentTermCollector {
req: TermsAggregationInternal::from_req(req), req: TermsAggregationInternal::from_req(req),
term_buckets, term_buckets,
field_type,
blueprint, blueprint,
}) })
} }
@@ -322,19 +362,13 @@ impl SegmentTermCollector {
let mut entries: Vec<(u32, TermBucketEntry)> = let mut entries: Vec<(u32, TermBucketEntry)> =
self.term_buckets.entries.into_iter().collect(); self.term_buckets.entries.into_iter().collect();
let order_by_key = self.req.order.target == OrderTarget::Key;
let order_by_sub_aggregation = let order_by_sub_aggregation =
matches!(self.req.order.target, OrderTarget::SubAggregation(_)); matches!(self.req.order.target, OrderTarget::SubAggregation(_));
match self.req.order.target { match self.req.order.target {
OrderTarget::Key => { OrderTarget::Key => {
// We rely on the fact, that term ordinals match the order of the strings // defer order and cut_off after loading the texts from the dictionary
// TODO: We could have a special collector, that keeps only TOP n results at any
// time.
if self.req.order.order == Order::Desc {
entries.sort_unstable_by_key(|bucket| std::cmp::Reverse(bucket.0));
} else {
entries.sort_unstable_by_key(|bucket| bucket.0);
}
} }
OrderTarget::SubAggregation(_name) => { OrderTarget::SubAggregation(_name) => {
// don't sort and cut off since it's hard to make assumptions on the quality of the // don't sort and cut off since it's hard to make assumptions on the quality of the
@@ -350,40 +384,34 @@ impl SegmentTermCollector {
} }
} }
let (term_doc_count_before_cutoff, sum_other_doc_count) = if order_by_sub_aggregation { let (term_doc_count_before_cutoff, mut sum_other_doc_count) =
(0, 0) if order_by_key || order_by_sub_aggregation {
} else { (0, 0)
cut_off_buckets(&mut entries, self.req.segment_size as usize) } else {
}; cut_off_buckets(&mut entries, self.req.segment_size as usize)
};
let inverted_index = agg_with_accessor let inverted_index = agg_with_accessor
.str_dict_column .inverted_index
.as_ref() .as_ref()
.expect("internal error: inverted index not loaded for term aggregation"); .expect("internal error: inverted index not loaded for term aggregation");
let term_dict = inverted_index; let term_dict = inverted_index.terms();
let mut dict: FxHashMap<String, IntermediateTermBucketEntry> = Default::default(); let mut dict: FxHashMap<String, IntermediateTermBucketEntry> = Default::default();
let mut buffer = String::new(); let mut buffer = vec![];
for (term_id, entry) in entries { for (term_id, entry) in entries {
if !term_dict.ord_to_str(term_id as u64, &mut buffer)? { term_dict
return Err(TantivyError::InternalError(format!( .ord_to_term(term_id as u64, &mut buffer)
"Couldn't find term_id {} in dict", .expect("could not find term");
term_id
)));
}
dict.insert( dict.insert(
buffer.to_string(), String::from_utf8(buffer.to_vec())
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?,
entry.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?, entry.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
); );
} }
if self.req.min_doc_count == 0 { if self.req.min_doc_count == 0 {
// TODO: Handle rev streaming for descending sorting by keys let mut stream = term_dict.stream()?;
let mut stream = term_dict.dictionary().stream()?;
while let Some((key, _ord)) = stream.next() { while let Some((key, _ord)) = stream.next() {
if dict.len() >= self.req.segment_size as usize {
break;
}
let key = std::str::from_utf8(key) let key = std::str::from_utf8(key)
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?; .map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
if !dict.contains_key(key) { if !dict.contains_key(key) {
@@ -392,6 +420,20 @@ impl SegmentTermCollector {
} }
} }
if order_by_key {
let mut dict_entries = dict.into_iter().collect_vec();
if self.req.order.order == Order::Desc {
dict_entries.sort_unstable_by(|(key1, _), (key2, _)| key1.cmp(key2));
} else {
dict_entries.sort_unstable_by(|(key1, _), (key2, _)| key2.cmp(key1));
}
let (_, sum_other_docs) =
cut_off_buckets(&mut dict_entries, self.req.segment_size as usize);
sum_other_doc_count += sum_other_docs;
dict = dict_entries.into_iter().collect();
}
Ok(IntermediateBucketResult::Terms( Ok(IntermediateBucketResult::Terms(
IntermediateTermBucketResult { IntermediateTermBucketResult {
entries: dict, entries: dict,
@@ -404,26 +446,65 @@ impl SegmentTermCollector {
#[inline] #[inline]
pub(crate) fn collect_block( pub(crate) fn collect_block(
&mut self, &mut self,
docs: &[DocId], doc: &[DocId],
bucket_with_accessor: &BucketAggregationWithAccessor, bucket_with_accessor: &BucketAggregationWithAccessor,
force_flush: bool, force_flush: bool,
) -> crate::Result<()> { ) -> crate::Result<()> {
let accessor = &bucket_with_accessor.accessor; let accessor = bucket_with_accessor
.accessor
.as_multi()
.expect("unexpected fast field cardinatility");
let mut iter = doc.chunks_exact(4);
let mut vals1 = vec![];
let mut vals2 = vec![];
let mut vals3 = vec![];
let mut vals4 = vec![];
for docs in iter.by_ref() {
accessor.get_vals(docs[0], &mut vals1);
accessor.get_vals(docs[1], &mut vals2);
accessor.get_vals(docs[2], &mut vals3);
accessor.get_vals(docs[3], &mut vals4);
for doc in docs { self.term_buckets.increment_bucket(
for term_id in accessor.values(*doc) { &vals1,
let entry = self docs[0],
.term_buckets &bucket_with_accessor.sub_aggregation,
.entries &bucket_with_accessor.bucket_count,
.entry(term_id as u32) &self.blueprint,
.or_insert_with(|| TermBucketEntry::from_blueprint(&self.blueprint)); )?;
entry.doc_count += 1; self.term_buckets.increment_bucket(
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() { &vals2,
sub_aggregations.collect(*doc, &bucket_with_accessor.sub_aggregation)?; docs[1],
} &bucket_with_accessor.sub_aggregation,
} &bucket_with_accessor.bucket_count,
&self.blueprint,
)?;
self.term_buckets.increment_bucket(
&vals3,
docs[2],
&bucket_with_accessor.sub_aggregation,
&bucket_with_accessor.bucket_count,
&self.blueprint,
)?;
self.term_buckets.increment_bucket(
&vals4,
docs[3],
&bucket_with_accessor.sub_aggregation,
&bucket_with_accessor.bucket_count,
&self.blueprint,
)?;
} }
for &doc in iter.remainder() {
accessor.get_vals(doc, &mut vals1);
self.term_buckets.increment_bucket(
&vals1,
doc,
&bucket_with_accessor.sub_aggregation,
&bucket_with_accessor.bucket_count,
&self.blueprint,
)?;
}
if force_flush { if force_flush {
self.term_buckets self.term_buckets
.force_flush(&bucket_with_accessor.sub_aggregation)?; .force_flush(&bucket_with_accessor.sub_aggregation)?;
@@ -842,14 +923,14 @@ mod tests {
]; ];
let index = get_test_index_from_values_and_terms(merge_segments, &segment_and_terms)?; let index = get_test_index_from_values_and_terms(merge_segments, &segment_and_terms)?;
// key asc // key desc
let agg_req: Aggregations = vec![( let agg_req: Aggregations = vec![(
"my_texts".to_string(), "my_texts".to_string(),
Aggregation::Bucket(BucketAggregation { Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation { bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(), field: "string_id".to_string(),
order: Some(CustomOrder { order: Some(CustomOrder {
order: Order::Asc, order: Order::Desc,
target: OrderTarget::Key, target: OrderTarget::Key,
}), }),
..Default::default() ..Default::default()
@@ -876,7 +957,7 @@ mod tests {
bucket_agg: BucketAggregationType::Terms(TermsAggregation { bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(), field: "string_id".to_string(),
order: Some(CustomOrder { order: Some(CustomOrder {
order: Order::Asc, order: Order::Desc,
target: OrderTarget::Key, target: OrderTarget::Key,
}), }),
size: Some(2), size: Some(2),
@@ -900,14 +981,14 @@ mod tests {
assert_eq!(res["my_texts"]["sum_other_doc_count"], 3); assert_eq!(res["my_texts"]["sum_other_doc_count"], 3);
// key asc and segment_size cut_off // key desc and segment_size cut_off
let agg_req: Aggregations = vec![( let agg_req: Aggregations = vec![(
"my_texts".to_string(), "my_texts".to_string(),
Aggregation::Bucket(BucketAggregation { Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation { bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(), field: "string_id".to_string(),
order: Some(CustomOrder { order: Some(CustomOrder {
order: Order::Asc, order: Order::Desc,
target: OrderTarget::Key, target: OrderTarget::Key,
}), }),
size: Some(2), size: Some(2),
@@ -930,14 +1011,14 @@ mod tests {
serde_json::Value::Null serde_json::Value::Null
); );
// key desc // key asc
let agg_req: Aggregations = vec![( let agg_req: Aggregations = vec![(
"my_texts".to_string(), "my_texts".to_string(),
Aggregation::Bucket(BucketAggregation { Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation { bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(), field: "string_id".to_string(),
order: Some(CustomOrder { order: Some(CustomOrder {
order: Order::Desc, order: Order::Asc,
target: OrderTarget::Key, target: OrderTarget::Key,
}), }),
..Default::default() ..Default::default()
@@ -957,14 +1038,14 @@ mod tests {
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 5); assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 5);
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0); assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
// key desc, size cut_off // key asc, size cut_off
let agg_req: Aggregations = vec![( let agg_req: Aggregations = vec![(
"my_texts".to_string(), "my_texts".to_string(),
Aggregation::Bucket(BucketAggregation { Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation { bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(), field: "string_id".to_string(),
order: Some(CustomOrder { order: Some(CustomOrder {
order: Order::Desc, order: Order::Asc,
target: OrderTarget::Key, target: OrderTarget::Key,
}), }),
size: Some(2), size: Some(2),
@@ -987,14 +1068,14 @@ mod tests {
); );
assert_eq!(res["my_texts"]["sum_other_doc_count"], 5); assert_eq!(res["my_texts"]["sum_other_doc_count"], 5);
// key desc, segment_size cut_off // key asc, segment_size cut_off
let agg_req: Aggregations = vec![( let agg_req: Aggregations = vec![(
"my_texts".to_string(), "my_texts".to_string(),
Aggregation::Bucket(BucketAggregation { Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation { bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(), field: "string_id".to_string(),
order: Some(CustomOrder { order: Some(CustomOrder {
order: Order::Desc, order: Order::Asc,
target: OrderTarget::Key, target: OrderTarget::Key,
}), }),
size: Some(2), size: Some(2),
@@ -1126,37 +1207,36 @@ mod tests {
Ok(()) Ok(())
} }
// TODO reenable with memory limit #[test]
//#[test] fn terms_aggregation_term_bucket_limit() -> crate::Result<()> {
// fn terms_aggregation_term_bucket_limit() -> crate::Result<()> { let terms: Vec<String> = (0..100_000).map(|el| el.to_string()).collect();
// let terms: Vec<String> = (0..100_000).map(|el| el.to_string()).collect(); let terms_per_segment = vec![terms.iter().map(|el| el.as_str()).collect()];
// let terms_per_segment = vec![terms.iter().map(|el| el.as_str()).collect()];
// let index = get_test_index_from_terms(true, &terms_per_segment)?; let index = get_test_index_from_terms(true, &terms_per_segment)?;
// let agg_req: Aggregations = vec![( let agg_req: Aggregations = vec![(
//"my_texts".to_string(), "my_texts".to_string(),
// Aggregation::Bucket(BucketAggregation { Aggregation::Bucket(BucketAggregation {
// bucket_agg: BucketAggregationType::Terms(TermsAggregation { bucket_agg: BucketAggregationType::Terms(TermsAggregation {
// field: "string_id".to_string(), field: "string_id".to_string(),
// min_doc_count: Some(0), min_doc_count: Some(0),
//..Default::default() ..Default::default()
//}), }),
// sub_aggregation: Default::default(), sub_aggregation: Default::default(),
//}), }),
//)] )]
//.into_iter() .into_iter()
//.collect(); .collect();
// let res = exec_request_with_query(agg_req, &index, None); let res = exec_request_with_query(agg_req, &index, None);
// assert!(res.is_err()); assert!(res.is_err());
// Ok(()) Ok(())
//} }
#[test] #[test]
fn terms_aggregation_different_tokenizer_on_ff_test() -> crate::Result<()> { fn terms_aggregation_multi_token_per_doc() -> crate::Result<()> {
let terms = vec!["Hello Hello", "Hallo Hallo"]; let terms = vec!["Hello Hello", "Hallo Hallo"];
let index = get_test_index_from_terms(true, &[terms])?; let index = get_test_index_from_terms(true, &[terms])?;
@@ -1176,13 +1256,12 @@ mod tests {
.collect(); .collect();
let res = exec_request_with_query(agg_req, &index, None).unwrap(); let res = exec_request_with_query(agg_req, &index, None).unwrap();
println!("{}", serde_json::to_string_pretty(&res).unwrap());
assert_eq!(res["my_texts"]["buckets"][0]["key"], "Hallo Hallo"); assert_eq!(res["my_texts"]["buckets"][0]["key"], "hello");
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 1); assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
assert_eq!(res["my_texts"]["buckets"][1]["key"], "Hello Hello"); assert_eq!(res["my_texts"]["buckets"][1]["key"], "hallo");
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 1); assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 2);
Ok(()) Ok(())
} }
@@ -1273,3 +1352,68 @@ mod tests {
Ok(()) Ok(())
} }
} }
#[cfg(all(test, feature = "unstable"))]
mod bench {
use itertools::Itertools;
use rand::seq::SliceRandom;
use rand::thread_rng;
use super::*;
fn get_collector_with_buckets(num_docs: u64) -> TermBuckets {
TermBuckets::from_req_and_validate(&Default::default(), num_docs as usize).unwrap()
}
fn get_rand_terms(total_terms: u64, num_terms_returned: u64) -> Vec<u64> {
let mut rng = thread_rng();
let all_terms = (0..total_terms - 1).collect_vec();
let mut vals = vec![];
for _ in 0..num_terms_returned {
let val = all_terms.as_slice().choose(&mut rng).unwrap();
vals.push(*val);
}
vals
}
fn bench_term_buckets(b: &mut test::Bencher, num_terms: u64, total_terms: u64) {
let mut collector = get_collector_with_buckets(total_terms);
let vals = get_rand_terms(total_terms, num_terms);
let aggregations_with_accessor: AggregationsWithAccessor = Default::default();
let bucket_count: BucketCount = BucketCount {
bucket_count: Default::default(),
max_bucket_count: 1_000_001u32,
};
b.iter(|| {
for &val in &vals {
collector
.increment_bucket(&[val], 0, &aggregations_with_accessor, &bucket_count, &None)
.unwrap();
}
})
}
#[bench]
fn bench_term_buckets_500_of_1_000_000(b: &mut test::Bencher) {
bench_term_buckets(b, 500u64, 1_000_000u64)
}
#[bench]
fn bench_term_buckets_1_000_000_of_50_000(b: &mut test::Bencher) {
bench_term_buckets(b, 1_000_000u64, 50_000u64)
}
#[bench]
fn bench_term_buckets_1_000_000_of_50(b: &mut test::Bencher) {
bench_term_buckets(b, 1_000_000u64, 50u64)
}
#[bench]
fn bench_term_buckets_1_000_000_of_1_000_000(b: &mut test::Bencher) {
bench_term_buckets(b, 1_000_000u64, 1_000_000u64)
}
}

View File

@@ -4,10 +4,7 @@ use super::agg_req::Aggregations;
use super::agg_req_with_accessor::AggregationsWithAccessor; use super::agg_req_with_accessor::AggregationsWithAccessor;
use super::agg_result::AggregationResults; use super::agg_result::AggregationResults;
use super::intermediate_agg_result::IntermediateAggregationResults; use super::intermediate_agg_result::IntermediateAggregationResults;
use super::segment_agg_result::{ use super::segment_agg_result::SegmentAggregationResultsCollector;
build_segment_agg_collector, GenericSegmentAggregationResultsCollector,
SegmentAggregationCollector,
};
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate; use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
use crate::collector::{Collector, SegmentCollector}; use crate::collector::{Collector, SegmentCollector};
use crate::schema::Schema; use crate::schema::Schema;
@@ -140,7 +137,7 @@ fn merge_fruits(
/// `AggregationSegmentCollector` does the aggregation collection on a segment. /// `AggregationSegmentCollector` does the aggregation collection on a segment.
pub struct AggregationSegmentCollector { pub struct AggregationSegmentCollector {
aggs_with_accessor: AggregationsWithAccessor, aggs_with_accessor: AggregationsWithAccessor,
result: Box<dyn SegmentAggregationCollector>, result: SegmentAggregationResultsCollector,
error: Option<TantivyError>, error: Option<TantivyError>,
} }
@@ -154,7 +151,8 @@ impl AggregationSegmentCollector {
) -> crate::Result<Self> { ) -> crate::Result<Self> {
let aggs_with_accessor = let aggs_with_accessor =
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?; get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
let result = build_segment_agg_collector(&aggs_with_accessor)?; let result =
SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?;
Ok(AggregationSegmentCollector { Ok(AggregationSegmentCollector {
aggs_with_accessor, aggs_with_accessor,
result, result,

View File

@@ -222,23 +222,24 @@ pub enum IntermediateMetricResult {
impl From<SegmentMetricResultCollector> for IntermediateMetricResult { impl From<SegmentMetricResultCollector> for IntermediateMetricResult {
fn from(tree: SegmentMetricResultCollector) -> Self { fn from(tree: SegmentMetricResultCollector) -> Self {
use super::metric::SegmentStatsType;
match tree { match tree {
SegmentMetricResultCollector::Stats(collector) => match collector.collecting_for { SegmentMetricResultCollector::Stats(collector) => match collector.collecting_for {
SegmentStatsType::Average => IntermediateMetricResult::Average( super::metric::SegmentStatsType::Average => IntermediateMetricResult::Average(
IntermediateAverage::from_collector(collector), IntermediateAverage::from_collector(collector),
), ),
SegmentStatsType::Count => { super::metric::SegmentStatsType::Count => {
IntermediateMetricResult::Count(IntermediateCount::from_collector(collector)) IntermediateMetricResult::Count(IntermediateCount::from_collector(collector))
} }
SegmentStatsType::Max => { super::metric::SegmentStatsType::Max => {
IntermediateMetricResult::Max(IntermediateMax::from_collector(collector)) IntermediateMetricResult::Max(IntermediateMax::from_collector(collector))
} }
SegmentStatsType::Min => { super::metric::SegmentStatsType::Min => {
IntermediateMetricResult::Min(IntermediateMin::from_collector(collector)) IntermediateMetricResult::Min(IntermediateMin::from_collector(collector))
} }
SegmentStatsType::Stats => IntermediateMetricResult::Stats(collector.stats), super::metric::SegmentStatsType::Stats => {
SegmentStatsType::Sum => { IntermediateMetricResult::Stats(collector.stats)
}
super::metric::SegmentStatsType::Sum => {
IntermediateMetricResult::Sum(IntermediateSum::from_collector(collector)) IntermediateMetricResult::Sum(IntermediateSum::from_collector(collector))
} }
}, },
@@ -498,7 +499,7 @@ impl IntermediateTermBucketResult {
match req.order.target { match req.order.target {
OrderTarget::Key => { OrderTarget::Key => {
buckets.sort_by(|left, right| { buckets.sort_by(|left, right| {
if req.order.order == Order::Asc { if req.order.order == Order::Desc {
left.key.partial_cmp(&right.key) left.key.partial_cmp(&right.key)
} else { } else {
right.key.partial_cmp(&left.key) right.key.partial_cmp(&left.key)

View File

@@ -1,13 +1,7 @@
use columnar::Column; use fastfield_codecs::Column;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::*; use crate::aggregation::f64_from_fastfield_u64;
use crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor;
use crate::aggregation::intermediate_agg_result::{
IntermediateAggregationResults, IntermediateMetricResult,
};
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
use crate::aggregation::{f64_from_fastfield_u64, VecWithNames};
use crate::schema::Type; use crate::schema::Type;
use crate::{DocId, TantivyError}; use crate::{DocId, TantivyError};
@@ -166,74 +160,27 @@ impl SegmentStatsCollector {
stats: IntermediateStats::default(), stats: IntermediateStats::default(),
} }
} }
pub(crate) fn collect_block(&mut self, docs: &[DocId], field: &Column<u64>) { pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
// TODO special case for Required, Optional column type let mut iter = doc.chunks_exact(4);
for doc in docs { for docs in iter.by_ref() {
for val in field.values(*doc) { let val1 = field.get_val(docs[0]);
let val1 = f64_from_fastfield_u64(val, &self.field_type); let val2 = field.get_val(docs[1]);
self.stats.collect(val1); let val3 = field.get_val(docs[2]);
} let val4 = field.get_val(docs[3]);
} let val1 = f64_from_fastfield_u64(val1, &self.field_type);
} let val2 = f64_from_fastfield_u64(val2, &self.field_type);
} let val3 = f64_from_fastfield_u64(val3, &self.field_type);
let val4 = f64_from_fastfield_u64(val4, &self.field_type);
impl SegmentAggregationCollector for SegmentStatsCollector {
fn into_intermediate_aggregations_result(
self: Box<Self>,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<IntermediateAggregationResults> {
let name = agg_with_accessor.metrics.keys[0].to_string();
let intermediate_metric_result = match self.collecting_for {
SegmentStatsType::Average => {
IntermediateMetricResult::Average(IntermediateAverage::from_collector(*self))
}
SegmentStatsType::Count => {
IntermediateMetricResult::Count(IntermediateCount::from_collector(*self))
}
SegmentStatsType::Max => {
IntermediateMetricResult::Max(IntermediateMax::from_collector(*self))
}
SegmentStatsType::Min => {
IntermediateMetricResult::Min(IntermediateMin::from_collector(*self))
}
SegmentStatsType::Stats => IntermediateMetricResult::Stats(self.stats),
SegmentStatsType::Sum => {
IntermediateMetricResult::Sum(IntermediateSum::from_collector(*self))
}
};
let metrics = Some(VecWithNames::from_entries(vec![(
name,
intermediate_metric_result,
)]));
Ok(IntermediateAggregationResults {
metrics,
buckets: None,
})
}
fn collect(
&mut self,
doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<()> {
let accessor = &agg_with_accessor.metrics.values[0].accessor;
for val in accessor.values(doc) {
let val1 = f64_from_fastfield_u64(val, &self.field_type);
self.stats.collect(val1); self.stats.collect(val1);
self.stats.collect(val2);
self.stats.collect(val3);
self.stats.collect(val4);
}
for &doc in iter.remainder() {
let val = field.get_val(doc);
let val = f64_from_fastfield_u64(val, &self.field_type);
self.stats.collect(val);
} }
Ok(())
}
fn flush_staged_docs(
&mut self,
_agg_with_accessor: &AggregationsWithAccessor,
_force_flush: bool,
) -> crate::Result<()> {
Ok(())
} }
} }

View File

@@ -172,8 +172,8 @@ pub use collector::{
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector, AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
MAX_BUCKET_COUNT, MAX_BUCKET_COUNT,
}; };
use columnar::MonotonicallyMappableToU64;
pub(crate) use date::format_date; pub(crate) use date::format_date;
use fastfield_codecs::MonotonicallyMappableToU64;
use itertools::Itertools; use itertools::Itertools;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -182,7 +182,7 @@ use crate::schema::Type;
/// Represents an associative array `(key => values)` in a very efficient manner. /// Represents an associative array `(key => values)` in a very efficient manner.
#[derive(Clone, PartialEq, Serialize, Deserialize)] #[derive(Clone, PartialEq, Serialize, Deserialize)]
pub(crate) struct VecWithNames<T: Clone> { pub(crate) struct VecWithNames<T: Clone> {
pub(crate) values: Vec<T>, values: Vec<T>,
keys: Vec<String>, keys: Vec<String>,
} }
@@ -248,6 +248,9 @@ impl<T: Clone> VecWithNames<T> {
fn values_mut(&mut self) -> impl Iterator<Item = &mut T> + '_ { fn values_mut(&mut self) -> impl Iterator<Item = &mut T> + '_ {
self.values.iter_mut() self.values.iter_mut()
} }
fn entries(&self) -> impl Iterator<Item = (&str, &T)> + '_ {
self.keys().zip(self.values.iter())
}
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
self.keys.is_empty() self.keys.is_empty()
} }
@@ -333,9 +336,8 @@ mod tests {
use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults; use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults;
use crate::aggregation::segment_agg_result::DOC_BLOCK_SIZE; use crate::aggregation::segment_agg_result::DOC_BLOCK_SIZE;
use crate::aggregation::DistributedAggregationCollector; use crate::aggregation::DistributedAggregationCollector;
use crate::indexer::NoMergePolicy;
use crate::query::{AllQuery, TermQuery}; use crate::query::{AllQuery, TermQuery};
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING}; use crate::schema::{Cardinality, IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
use crate::{DateTime, Index, Term}; use crate::{DateTime, Index, Term};
fn get_avg_req(field_name: &str) -> Aggregation { fn get_avg_req(field_name: &str) -> Aggregation {
@@ -430,7 +432,8 @@ mod tests {
let text_field = schema_builder.add_text_field("text", text_fieldtype.clone()); let text_field = schema_builder.add_text_field("text", text_fieldtype.clone());
let text_field_id = schema_builder.add_text_field("text_id", text_fieldtype); let text_field_id = schema_builder.add_text_field("text_id", text_fieldtype);
let string_field_id = schema_builder.add_text_field("string_id", STRING | FAST); let string_field_id = schema_builder.add_text_field("string_id", STRING | FAST);
let score_fieldtype = crate::schema::NumericOptions::default().set_fast(); let score_fieldtype =
crate::schema::NumericOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone()); let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone()); let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype); let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
@@ -442,7 +445,6 @@ mod tests {
{ {
// let mut index_writer = index.writer_for_tests()?; // let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?; let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?;
index_writer.set_merge_policy(Box::new(NoMergePolicy));
for values in segment_and_values { for values in segment_and_values {
for (i, term) in values { for (i, term) in values {
let i = *i; let i = *i;
@@ -654,11 +656,13 @@ mod tests {
let text_field = schema_builder.add_text_field("text", text_fieldtype); let text_field = schema_builder.add_text_field("text", text_fieldtype);
let date_field = schema_builder.add_date_field("date", FAST); let date_field = schema_builder.add_date_field("date", FAST);
schema_builder.add_text_field("dummy_text", STRING); schema_builder.add_text_field("dummy_text", STRING);
let score_fieldtype = crate::schema::NumericOptions::default().set_fast(); let score_fieldtype =
crate::schema::NumericOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone()); let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone()); let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
let multivalue = crate::schema::NumericOptions::default().set_fast(); let multivalue =
crate::schema::NumericOptions::default().set_fast();
let scores_field_i64 = schema_builder.add_i64_field("scores_i64", multivalue); let scores_field_i64 = schema_builder.add_i64_field("scores_i64", multivalue);
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype); let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
@@ -1143,7 +1147,7 @@ mod tests {
let agg_res = avg_on_field("dummy_text"); let agg_res = avg_on_field("dummy_text");
assert_eq!( assert_eq!(
format!("{:?}", agg_res), format!("{:?}", agg_res),
r#"InvalidArgument("No numerical fast field found for field: dummy_text")"# r#"InvalidArgument("Only fast fields of type f64, u64, i64 are supported, but got Str ")"#
); );
let agg_res = avg_on_field("not_exist_field"); let agg_res = avg_on_field("not_exist_field");
@@ -1152,6 +1156,12 @@ mod tests {
r#"FieldNotFound("not_exist_field")"# r#"FieldNotFound("not_exist_field")"#
); );
let agg_res = avg_on_field("scores_i64");
assert_eq!(
format!("{:?}", agg_res),
r#"InvalidArgument("Invalid field cardinality on field scores_i64 expected SingleValue, but got MultiValues")"#
);
Ok(()) Ok(())
} }
@@ -1163,14 +1173,11 @@ mod tests {
use test::{self, Bencher}; use test::{self, Bencher};
use super::*; use super::*;
use crate::aggregation::bucket::{ use crate::aggregation::bucket::{HistogramAggregation, HistogramBounds, TermsAggregation};
CustomOrder, HistogramAggregation, HistogramBounds, Order, OrderTarget,
TermsAggregation,
};
use crate::aggregation::metric::StatsAggregation; use crate::aggregation::metric::StatsAggregation;
use crate::query::AllQuery; use crate::query::AllQuery;
fn get_test_index_bench(_merge_segments: bool) -> crate::Result<Index> { fn get_test_index_bench(merge_segments: bool) -> crate::Result<Index> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_fieldtype = crate::schema::TextOptions::default() let text_fieldtype = crate::schema::TextOptions::default()
.set_indexing_options( .set_indexing_options(
@@ -1182,19 +1189,20 @@ mod tests {
schema_builder.add_text_field("text_many_terms", STRING | FAST); schema_builder.add_text_field("text_many_terms", STRING | FAST);
let text_field_few_terms = let text_field_few_terms =
schema_builder.add_text_field("text_few_terms", STRING | FAST); schema_builder.add_text_field("text_few_terms", STRING | FAST);
let score_fieldtype = crate::schema::NumericOptions::default().set_fast(); let score_fieldtype =
crate::schema::NumericOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone()); let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
let score_field_f64 = let score_field_f64 =
schema_builder.add_f64_field("score_f64", score_fieldtype.clone()); schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype); let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
let index = Index::create_from_tempdir(schema_builder.build())?; let index = Index::create_from_tempdir(schema_builder.build())?;
let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"]; let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"];
let many_terms_data = (0..150_000) let many_terms_data = (0..15_000)
.map(|num| format!("author{}", num)) .map(|num| format!("author{}", num))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
{ {
let mut rng = thread_rng(); let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(1, 100_000_000)?; let mut index_writer = index.writer_for_tests()?;
// writing the segment // writing the segment
for _ in 0..1_000_000 { for _ in 0..1_000_000 {
let val: f64 = rng.gen_range(0.0..1_000_000.0); let val: f64 = rng.gen_range(0.0..1_000_000.0);
@@ -1209,6 +1217,14 @@ mod tests {
} }
index_writer.commit()?; index_writer.commit()?;
} }
if merge_segments {
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests()?;
index_writer.merge(&segment_ids).wait()?;
index_writer.wait_merging_threads()?;
}
Ok(index) Ok(index)
} }
@@ -1360,42 +1376,7 @@ mod tests {
} }
#[bench] #[bench]
fn bench_aggregation_terms_many_with_sub_agg(b: &mut Bencher) { fn bench_aggregation_terms_many(b: &mut Bencher) {
let index = get_test_index_bench(false).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let sub_agg_req: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
)]
.into_iter()
.collect();
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_many_terms".to_string(),
..Default::default()
}),
sub_aggregation: sub_agg_req,
}),
)]
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
#[bench]
fn bench_aggregation_terms_many2(b: &mut Bencher) {
let index = get_test_index_bench(false).unwrap(); let index = get_test_index_bench(false).unwrap();
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
@@ -1420,36 +1401,6 @@ mod tests {
}); });
} }
#[bench]
fn bench_aggregation_terms_many_order_by_term(b: &mut Bencher) {
let index = get_test_index_bench(false).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_many_terms".to_string(),
order: Some(CustomOrder {
order: Order::Desc,
target: OrderTarget::Key,
}),
..Default::default()
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
#[bench] #[bench]
fn bench_aggregation_range_only(b: &mut Bencher) { fn bench_aggregation_range_only(b: &mut Bencher) {
let index = get_test_index_bench(false).unwrap(); let index = get_test_index_bench(false).unwrap();

View File

@@ -25,89 +25,15 @@ use crate::{DocId, TantivyError};
pub(crate) const DOC_BLOCK_SIZE: usize = 64; pub(crate) const DOC_BLOCK_SIZE: usize = 64;
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE]; pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug { #[derive(Clone, PartialEq)]
fn into_intermediate_aggregations_result( pub(crate) struct SegmentAggregationResultsCollector {
self: Box<Self>,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<IntermediateAggregationResults>;
fn collect(
&mut self,
doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<()>;
fn flush_staged_docs(
&mut self,
agg_with_accessor: &AggregationsWithAccessor,
force_flush: bool,
) -> crate::Result<()>;
}
pub(crate) trait CollectorClone {
fn clone_box(&self) -> Box<dyn SegmentAggregationCollector>;
}
impl<T> CollectorClone for T
where T: 'static + SegmentAggregationCollector + Clone
{
fn clone_box(&self) -> Box<dyn SegmentAggregationCollector> {
Box::new(self.clone())
}
}
impl Clone for Box<dyn SegmentAggregationCollector> {
fn clone(&self) -> Box<dyn SegmentAggregationCollector> {
self.clone_box()
}
}
pub(crate) fn build_segment_agg_collector(
req: &AggregationsWithAccessor,
) -> crate::Result<Box<dyn SegmentAggregationCollector>> {
// Single metric special case
if req.buckets.is_empty() && req.metrics.len() == 1 {
let req = &req.metrics.values[0];
let stats_collector = match &req.metric {
MetricAggregation::Average(AverageAggregation { .. }) => {
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Average)
}
MetricAggregation::Count(CountAggregation { .. }) => {
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Count)
}
MetricAggregation::Max(MaxAggregation { .. }) => {
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Max)
}
MetricAggregation::Min(MinAggregation { .. }) => {
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Min)
}
MetricAggregation::Stats(StatsAggregation { .. }) => {
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Stats)
}
MetricAggregation::Sum(SumAggregation { .. }) => {
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Sum)
}
};
return Ok(Box::new(stats_collector));
}
let agg = GenericSegmentAggregationResultsCollector::from_req_and_validate(req)?;
Ok(Box::new(agg))
}
#[derive(Clone)]
/// The GenericSegmentAggregationResultsCollector is the generic version of the collector, which
/// can handle arbitrary complexity of sub-aggregations. Ideally we never have to pick this one
/// and can provide specialized versions instead, that remove some of its overhead.
pub(crate) struct GenericSegmentAggregationResultsCollector {
pub(crate) metrics: Option<VecWithNames<SegmentMetricResultCollector>>, pub(crate) metrics: Option<VecWithNames<SegmentMetricResultCollector>>,
pub(crate) buckets: Option<VecWithNames<SegmentBucketResultCollector>>, pub(crate) buckets: Option<VecWithNames<SegmentBucketResultCollector>>,
staged_docs: DocBlock, staged_docs: DocBlock,
num_staged_docs: usize, num_staged_docs: usize,
} }
impl Default for GenericSegmentAggregationResultsCollector { impl Default for SegmentAggregationResultsCollector {
fn default() -> Self { fn default() -> Self {
Self { Self {
metrics: Default::default(), metrics: Default::default(),
@@ -118,7 +44,7 @@ impl Default for GenericSegmentAggregationResultsCollector {
} }
} }
impl Debug for GenericSegmentAggregationResultsCollector { impl Debug for SegmentAggregationResultsCollector {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SegmentAggregationResultsCollector") f.debug_struct("SegmentAggregationResultsCollector")
.field("metrics", &self.metrics) .field("metrics", &self.metrics)
@@ -129,9 +55,9 @@ impl Debug for GenericSegmentAggregationResultsCollector {
} }
} }
impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector { impl SegmentAggregationResultsCollector {
fn into_intermediate_aggregations_result( pub fn into_intermediate_aggregations_result(
self: Box<Self>, self,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<IntermediateAggregationResults> { ) -> crate::Result<IntermediateAggregationResults> {
let buckets = if let Some(buckets) = self.buckets { let buckets = if let Some(buckets) = self.buckets {
@@ -149,7 +75,47 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
Ok(IntermediateAggregationResults { metrics, buckets }) Ok(IntermediateAggregationResults { metrics, buckets })
} }
fn collect( pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> {
let buckets = req
.buckets
.entries()
.map(|(key, req)| {
Ok((
key.to_string(),
SegmentBucketResultCollector::from_req_and_validate(req)?,
))
})
.collect::<crate::Result<Vec<(String, _)>>>()?;
let metrics = req
.metrics
.entries()
.map(|(key, req)| {
Ok((
key.to_string(),
SegmentMetricResultCollector::from_req_and_validate(req)?,
))
})
.collect::<crate::Result<Vec<(String, _)>>>()?;
let metrics = if metrics.is_empty() {
None
} else {
Some(VecWithNames::from_entries(metrics))
};
let buckets = if buckets.is_empty() {
None
} else {
Some(VecWithNames::from_entries(buckets))
};
Ok(SegmentAggregationResultsCollector {
metrics,
buckets,
staged_docs: [0; DOC_BLOCK_SIZE],
num_staged_docs: 0,
})
}
#[inline]
pub(crate) fn collect(
&mut self, &mut self,
doc: crate::DocId, doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &AggregationsWithAccessor,
@@ -162,7 +128,7 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
Ok(()) Ok(())
} }
fn flush_staged_docs( pub(crate) fn flush_staged_docs(
&mut self, &mut self,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &AggregationsWithAccessor,
force_flush: bool, force_flush: bool,
@@ -196,66 +162,6 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
} }
} }
impl GenericSegmentAggregationResultsCollector {
pub fn into_intermediate_aggregations_result(
self,
agg_with_accessor: &AggregationsWithAccessor,
) -> crate::Result<IntermediateAggregationResults> {
let buckets = if let Some(buckets) = self.buckets {
let entries = buckets
.into_iter()
.zip(agg_with_accessor.buckets.values())
.map(|((key, bucket), acc)| Ok((key, bucket.into_intermediate_bucket_result(acc)?)))
.collect::<crate::Result<Vec<(String, _)>>>()?;
Some(VecWithNames::from_entries(entries))
} else {
None
};
let metrics = self.metrics.map(VecWithNames::from_other);
Ok(IntermediateAggregationResults { metrics, buckets })
}
pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> {
let buckets = req
.buckets
.iter()
.map(|(key, req)| {
Ok((
key.to_string(),
SegmentBucketResultCollector::from_req_and_validate(req)?,
))
})
.collect::<crate::Result<Vec<(String, _)>>>()?;
let metrics = req
.metrics
.iter()
.map(|(key, req)| {
Ok((
key.to_string(),
SegmentMetricResultCollector::from_req_and_validate(req)?,
))
})
.collect::<crate::Result<Vec<(String, _)>>>()?;
let metrics = if metrics.is_empty() {
None
} else {
Some(VecWithNames::from_entries(metrics))
};
let buckets = if buckets.is_empty() {
None
} else {
Some(VecWithNames::from_entries(buckets))
};
Ok(GenericSegmentAggregationResultsCollector {
metrics,
buckets,
staged_docs: [0; DOC_BLOCK_SIZE],
num_staged_docs: 0,
})
}
}
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub(crate) enum SegmentMetricResultCollector { pub(crate) enum SegmentMetricResultCollector {
Stats(SegmentStatsCollector), Stats(SegmentStatsCollector),
@@ -299,7 +205,7 @@ impl SegmentMetricResultCollector {
pub(crate) fn collect_block(&mut self, doc: &[DocId], metric: &MetricAggregationWithAccessor) { pub(crate) fn collect_block(&mut self, doc: &[DocId], metric: &MetricAggregationWithAccessor) {
match self { match self {
SegmentMetricResultCollector::Stats(stats_collector) => { SegmentMetricResultCollector::Stats(stats_collector) => {
stats_collector.collect_block(doc, &metric.accessor); stats_collector.collect_block(doc, &*metric.accessor);
} }
} }
} }
@@ -309,7 +215,7 @@ impl SegmentMetricResultCollector {
/// segments. /// segments.
/// The typical structure of Map<Key, Bucket> is not suitable during collection for performance /// The typical structure of Map<Key, Bucket> is not suitable during collection for performance
/// reasons. /// reasons.
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq)]
pub(crate) enum SegmentBucketResultCollector { pub(crate) enum SegmentBucketResultCollector {
Range(SegmentRangeCollector), Range(SegmentRangeCollector),
Histogram(Box<SegmentHistogramCollector>), Histogram(Box<SegmentHistogramCollector>),
@@ -337,7 +243,14 @@ impl SegmentBucketResultCollector {
pub fn from_req_and_validate(req: &BucketAggregationWithAccessor) -> crate::Result<Self> { pub fn from_req_and_validate(req: &BucketAggregationWithAccessor) -> crate::Result<Self> {
match &req.bucket_agg { match &req.bucket_agg {
BucketAggregationType::Terms(terms_req) => Ok(Self::Terms(Box::new( BucketAggregationType::Terms(terms_req) => Ok(Self::Terms(Box::new(
SegmentTermCollector::from_req_and_validate(terms_req, &req.sub_aggregation)?, SegmentTermCollector::from_req_and_validate(
terms_req,
&req.sub_aggregation,
req.field_type,
req.accessor
.as_multi()
.expect("unexpected fast field cardinality"),
)?,
))), ))),
BucketAggregationType::Range(range_req) => { BucketAggregationType::Range(range_req) => {
Ok(Self::Range(SegmentRangeCollector::from_req_and_validate( Ok(Self::Range(SegmentRangeCollector::from_req_and_validate(
@@ -352,7 +265,9 @@ impl SegmentBucketResultCollector {
histogram, histogram,
&req.sub_aggregation, &req.sub_aggregation,
req.field_type, req.field_type,
&req.accessor, req.accessor
.as_single()
.expect("unexpected fast field cardinality"),
)?, )?,
))), ))),
} }

View File

@@ -150,7 +150,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// } /// }
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field("facet"); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?; /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
/// ///
@@ -829,7 +829,7 @@ mod bench {
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
b.iter(|| { b.iter(|| {
let searcher = reader.searcher(); let searcher = reader.searcher();
let facet_collector = FacetCollector::for_field("facet"); let facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &facet_collector).unwrap(); searcher.search(&AllQuery, &facet_collector).unwrap();
}); });
} }

View File

@@ -120,7 +120,7 @@ where
let fast_field_reader = segment_reader let fast_field_reader = segment_reader
.fast_fields() .fast_fields()
.column_first_or_default(schema.get_field_name(self.field))?; .typed_column_first_or_default(schema.get_field_name(self.field))?;
let segment_collector = self let segment_collector = self
.collector .collector

View File

@@ -1,6 +1,6 @@
use std::sync::Arc; use std::sync::Arc;
use columnar::{BytesColumn, ColumnValues}; use columnar::ColumnValues;
use super::*; use super::*;
use crate::collector::{Count, FilterCollector, TopDocs}; use crate::collector::{Count, FilterCollector, TopDocs};
@@ -212,73 +212,62 @@ impl SegmentCollector for FastFieldSegmentCollector {
} }
} }
/// Collects in order all of the fast field bytes for all of the // /// Collects in order all of the fast field bytes for all of the
/// docs in the `DocSet` // /// docs in the `DocSet`
/// // ///
/// This collector is mainly useful for tests. // /// This collector is mainly useful for tests.
/// It is very slow. // pub struct BytesFastFieldTestCollector {
pub struct BytesFastFieldTestCollector { // field: Field,
field: String, // }
}
pub struct BytesFastFieldSegmentCollector { // pub struct BytesFastFieldSegmentCollector {
vals: Vec<u8>, // vals: Vec<u8>,
column_opt: Option<BytesColumn>, // reader: BytesFastFieldReader,
buffer: Vec<u8>, // }
}
impl BytesFastFieldTestCollector { // impl BytesFastFieldTestCollector {
pub fn for_field(field: impl ToString) -> BytesFastFieldTestCollector { // pub fn for_field(field: Field) -> BytesFastFieldTestCollector {
BytesFastFieldTestCollector { // BytesFastFieldTestCollector { field }
field: field.to_string(), // }
} // }
}
}
impl Collector for BytesFastFieldTestCollector { // impl Collector for BytesFastFieldTestCollector {
type Fruit = Vec<u8>; // type Fruit = Vec<u8>;
type Child = BytesFastFieldSegmentCollector; // type Child = BytesFastFieldSegmentCollector;
fn for_segment( // fn for_segment(
&self, // &self,
_segment_local_id: u32, // _segment_local_id: u32,
segment_reader: &SegmentReader, // segment_reader: &SegmentReader,
) -> crate::Result<BytesFastFieldSegmentCollector> { // ) -> crate::Result<BytesFastFieldSegmentCollector> {
let column_opt = segment_reader.fast_fields().bytes(&self.field)?; // let reader = segment_reader.fast_fields().bytes(self.field)?;
Ok(BytesFastFieldSegmentCollector { // Ok(BytesFastFieldSegmentCollector {
vals: Vec::new(), // vals: Vec::new(),
column_opt, // reader,
buffer: Vec::new(), // })
}) // }
}
fn requires_scoring(&self) -> bool { // fn requires_scoring(&self) -> bool {
false // false
} // }
fn merge_fruits(&self, children: Vec<Vec<u8>>) -> crate::Result<Vec<u8>> { // fn merge_fruits(&self, children: Vec<Vec<u8>>) -> crate::Result<Vec<u8>> {
Ok(children.into_iter().flat_map(|c| c.into_iter()).collect()) // Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
} // }
} // }
impl SegmentCollector for BytesFastFieldSegmentCollector { // impl SegmentCollector for BytesFastFieldSegmentCollector {
type Fruit = Vec<u8>; // type Fruit = Vec<u8>;
fn collect(&mut self, doc: DocId, _score: Score) { // fn collect(&mut self, doc: u32, _score: Score) {
if let Some(column) = self.column_opt.as_ref() { // let data = self.reader.get_bytes(doc);
for term_ord in column.term_ords(doc) { // self.vals.extend(data);
let (vals, buffer) = (&mut self.vals, &mut self.buffer); // }
if column.ord_to_bytes(term_ord, buffer).unwrap() {
vals.extend(&buffer[..]);
}
}
}
}
fn harvest(self) -> <Self as SegmentCollector>::Fruit { // fn harvest(self) -> <Self as SegmentCollector>::Fruit {
self.vals // self.vals
} // }
} // }
fn make_test_searcher() -> crate::Result<Searcher> { fn make_test_searcher() -> crate::Result<Searcher> {
let schema = Schema::builder().build(); let schema = Schema::builder().build();

View File

@@ -235,6 +235,7 @@ impl TopDocs {
/// # use tantivy::query::{Query, QueryParser}; /// # use tantivy::query::{Query, QueryParser};
/// use tantivy::Searcher; /// use tantivy::Searcher;
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
/// ///
/// # fn main() -> tantivy::Result<()> { /// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = Schema::builder(); /// # let mut schema_builder = Schema::builder();
@@ -251,7 +252,7 @@ impl TopDocs {
/// # index_writer.commit()?; /// # index_writer.commit()?;
/// # let reader = index.reader()?; /// # let reader = index.reader()?;
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?; /// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query)?; /// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs, /// # assert_eq!(top_docs,
/// # vec![(97u64, DocAddress::new(0u32, 1)), /// # vec![(97u64, DocAddress::new(0u32, 1)),
/// # (80u64, DocAddress::new(0u32, 3))]); /// # (80u64, DocAddress::new(0u32, 3))]);
@@ -261,7 +262,8 @@ impl TopDocs {
/// /// collects the top 10 documents, order by the u64-`field` /// /// collects the top 10 documents, order by the u64-`field`
/// /// given in argument. /// /// given in argument.
/// fn docs_sorted_by_rating(searcher: &Searcher, /// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &dyn Query) /// query: &dyn Query,
/// rating_field: Field)
/// -> tantivy::Result<Vec<(u64, DocAddress)>> { /// -> tantivy::Result<Vec<(u64, DocAddress)>> {
/// ///
/// // This is where we build our topdocs collector /// // This is where we build our topdocs collector
@@ -269,7 +271,7 @@ impl TopDocs {
/// // Note the `rating_field` needs to be a FAST field here. /// // Note the `rating_field` needs to be a FAST field here.
/// let top_books_by_rating = TopDocs /// let top_books_by_rating = TopDocs
/// ::with_limit(10) /// ::with_limit(10)
/// .order_by_u64_field("rating"); /// .order_by_u64_field(rating_field);
/// ///
/// // ... and here are our documents. Note this is a simple vec. /// // ... and here are our documents. Note this is a simple vec.
/// // The `u64` in the pair is the value of our fast field for /// // The `u64` in the pair is the value of our fast field for
@@ -321,21 +323,22 @@ impl TopDocs {
/// # use tantivy::query::{Query, AllQuery}; /// # use tantivy::query::{Query, AllQuery};
/// use tantivy::Searcher; /// use tantivy::Searcher;
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
/// ///
/// # fn main() -> tantivy::Result<()> { /// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = Schema::builder(); /// # let mut schema_builder = Schema::builder();
/// # let title = schema_builder.add_text_field("company", TEXT); /// # let title = schema_builder.add_text_field("company", TEXT);
/// # let revenue = schema_builder.add_i64_field("revenue", FAST); /// # let rating = schema_builder.add_i64_field("revenue", FAST);
/// # let schema = schema_builder.build(); /// # let schema = schema_builder.build();
/// # /// #
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?; /// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
/// # index_writer.add_document(doc!(title => "MadCow Inc.", revenue => 92_000_000i64))?; /// # index_writer.add_document(doc!(title => "MadCow Inc.", rating => 92_000_000i64))?;
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", revenue => 119_000_000i64))?; /// # index_writer.add_document(doc!(title => "Zozo Cow KKK", rating => 119_000_000i64))?;
/// # index_writer.add_document(doc!(title => "Declining Cow", revenue => -63_000_000i64))?; /// # index_writer.add_document(doc!(title => "Declining Cow", rating => -63_000_000i64))?;
/// # assert!(index_writer.commit().is_ok()); /// # assert!(index_writer.commit().is_ok());
/// # let reader = index.reader()?; /// # let reader = index.reader()?;
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, "revenue")?; /// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, rating)?;
/// # assert_eq!(top_docs, /// # assert_eq!(top_docs,
/// # vec![(119_000_000i64, DocAddress::new(0, 1)), /// # vec![(119_000_000i64, DocAddress::new(0, 1)),
/// # (92_000_000i64, DocAddress::new(0, 0))]); /// # (92_000_000i64, DocAddress::new(0, 0))]);
@@ -346,7 +349,7 @@ impl TopDocs {
/// /// given in argument. /// /// given in argument.
/// fn docs_sorted_by_revenue(searcher: &Searcher, /// fn docs_sorted_by_revenue(searcher: &Searcher,
/// query: &dyn Query, /// query: &dyn Query,
/// revenue_field: &str) /// revenue_field: Field)
/// -> tantivy::Result<Vec<(i64, DocAddress)>> { /// -> tantivy::Result<Vec<(i64, DocAddress)>> {
/// ///
/// // This is where we build our topdocs collector /// // This is where we build our topdocs collector
@@ -355,7 +358,7 @@ impl TopDocs {
/// // type `sort_by_field`. revenue_field here is a FAST i64 field. /// // type `sort_by_field`. revenue_field here is a FAST i64 field.
/// let top_company_by_revenue = TopDocs /// let top_company_by_revenue = TopDocs
/// ::with_limit(2) /// ::with_limit(2)
/// .order_by_fast_field("revenue"); /// .order_by_fast_field(revenue_field);
/// ///
/// // ... and here are our documents. Note this is a simple vec. /// // ... and here are our documents. Note this is a simple vec.
/// // The `i64` in the pair is the value of our fast field for /// // The `i64` in the pair is the value of our fast field for

View File

@@ -905,14 +905,12 @@ mod tests {
let field = schema.get_field("num_likes").unwrap(); let field = schema.get_field("num_likes").unwrap();
let index = Index::create(directory.clone(), schema, IndexSettings::default())?; let index = Index::create(directory.clone(), schema, IndexSettings::default())?;
let mut writer = index.writer_with_num_threads(1, 32_000_000).unwrap(); let mut writer = index.writer_with_num_threads(8, 24_000_000).unwrap();
for _seg in 0..8 { for i in 0u64..8_000u64 {
for i in 0u64..1_000u64 { writer.add_document(doc!(field => i))?;
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
} }
writer.commit()?;
let mem_right_after_commit = directory.total_mem_usage(); let mem_right_after_commit = directory.total_mem_usage();
let reader = index let reader = index

View File

@@ -135,8 +135,6 @@ impl InvertedIndexReader {
term_info: &TermInfo, term_info: &TermInfo,
option: IndexRecordOption, option: IndexRecordOption,
) -> io::Result<SegmentPostings> { ) -> io::Result<SegmentPostings> {
let option = option.downgrade(self.record_option);
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?; let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
let position_reader = { let position_reader = {
if option.has_positions() { if option.has_positions() {

View File

@@ -249,7 +249,7 @@ impl SearcherInner {
index: Index, index: Index,
segment_readers: Vec<SegmentReader>, segment_readers: Vec<SegmentReader>,
generation: TrackedObject<SearcherGeneration>, generation: TrackedObject<SearcherGeneration>,
doc_store_cache_num_blocks: usize, doc_store_cache_size: usize,
) -> io::Result<SearcherInner> { ) -> io::Result<SearcherInner> {
assert_eq!( assert_eq!(
&segment_readers &segment_readers
@@ -261,7 +261,7 @@ impl SearcherInner {
); );
let store_readers: Vec<StoreReader> = segment_readers let store_readers: Vec<StoreReader> = segment_readers
.iter() .iter()
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_num_blocks)) .map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size))
.collect::<io::Result<Vec<_>>>()?; .collect::<io::Result<Vec<_>>>()?;
Ok(SearcherInner { Ok(SearcherInner {

View File

@@ -9,7 +9,7 @@ use crate::directory::{CompositeFile, FileSlice};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::fastfield::{intersect_alive_bitsets, AliveBitSet, FacetReader, FastFieldReaders}; use crate::fastfield::{intersect_alive_bitsets, AliveBitSet, FacetReader, FastFieldReaders};
use crate::fieldnorm::{FieldNormReader, FieldNormReaders}; use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::schema::{Field, IndexRecordOption, Schema, Type}; use crate::schema::{Field, FieldType, IndexRecordOption, Schema, Type};
use crate::space_usage::SegmentSpaceUsage; use crate::space_usage::SegmentSpaceUsage;
use crate::store::StoreReader; use crate::store::StoreReader;
use crate::termdict::TermDictionary; use crate::termdict::TermDictionary;
@@ -99,7 +99,7 @@ impl SegmentReader {
"`{field_name}` is not a facet field.`" "`{field_name}` is not a facet field.`"
))); )));
} }
let Some(facet_column) = self.fast_fields().str(field_name)? else { let Some(facet_column) = self.fast_fields().str_column_opt(field_name)? else {
panic!("Facet Field `{field_name}` is missing. This should not happen"); panic!("Facet Field `{field_name}` is missing. This should not happen");
}; };
Ok(FacetReader::new(facet_column)) Ok(FacetReader::new(facet_column))
@@ -128,12 +128,9 @@ impl SegmentReader {
&self.fieldnorm_readers &self.fieldnorm_readers
} }
/// Accessor to the segment's [`StoreReader`](crate::store::StoreReader). /// Accessor to the segment's `StoreReader`.
/// pub fn get_store_reader(&self, cache_size: usize) -> io::Result<StoreReader> {
/// `cache_num_blocks` sets the number of decompressed blocks to be cached in an LRU. StoreReader::open(self.store_file.clone(), cache_size)
/// The size of blocks is configurable, this should be reflexted in the
pub fn get_store_reader(&self, cache_num_blocks: usize) -> io::Result<StoreReader> {
StoreReader::open(self.store_file.clone(), cache_num_blocks)
} }
/// Open a new segment for reading. /// Open a new segment for reading.

View File

@@ -29,7 +29,7 @@ pub use self::error::{FastFieldNotAvailableError, Result};
pub use self::facet_reader::FacetReader; pub use self::facet_reader::FacetReader;
pub use self::readers::FastFieldReaders; pub use self::readers::FastFieldReaders;
pub use self::writer::FastFieldsWriter; pub use self::writer::FastFieldsWriter;
use crate::schema::Type; use crate::schema::{Type, Value};
use crate::DateTime; use crate::DateTime;
mod alive_bitset; mod alive_bitset;
@@ -115,13 +115,31 @@ impl columnar::MonotonicallyMappableToU64 for DateTime {
} }
} }
fn unexpected_value(expected: &str, actual: &Value) -> crate::TantivyError {
crate::TantivyError::SchemaError(format!(
"Expected a {:?} in fast field, but got {:?}",
expected, actual
))
}
fn value_to_u64(value: &Value) -> crate::Result<u64> {
let value = match value {
Value::U64(val) => val.to_u64(),
Value::I64(val) => val.to_u64(),
Value::F64(val) => val.to_u64(),
Value::Bool(val) => val.to_u64(),
Value::Date(val) => val.to_u64(),
_ => return Err(unexpected_value("u64/i64/f64/bool/date", value)),
};
Ok(value)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::ops::{Range, RangeInclusive}; use std::ops::RangeInclusive;
use std::path::Path; use std::path::Path;
use columnar::Column;
use common::{HasLen, TerminatingWrite}; use common::{HasLen, TerminatingWrite};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
@@ -129,7 +147,7 @@ mod tests {
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
use super::*; use super::*;
use crate::directory::{Directory, RamDirectory, WritePtr}; use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
use crate::merge_policy::NoMergePolicy; use crate::merge_policy::NoMergePolicy;
use crate::schema::{ use crate::schema::{
Document, Facet, FacetOptions, Field, Schema, SchemaBuilder, FAST, INDEXED, STRING, TEXT, Document, Facet, FacetOptions, Field, Schema, SchemaBuilder, FAST, INDEXED, STRING, TEXT,
@@ -418,38 +436,39 @@ mod tests {
test_intfastfield_permutation_with_data(permutation); test_intfastfield_permutation_with_data(permutation);
} }
#[test] // TODO reenable when merge is here.
fn test_merge_missing_date_fast_field() { // #[test]
let mut schema_builder = Schema::builder(); // fn test_merge_missing_date_fast_field() {
let date_field = schema_builder.add_date_field("date", FAST); // let mut schema_builder = Schema::builder();
let schema = schema_builder.build(); // let date_field = schema_builder.add_date_field("date", FAST);
let index = Index::create_in_ram(schema); // let schema = schema_builder.build();
let mut index_writer = index.writer_for_tests().unwrap(); // let index = Index::create_in_ram(schema);
index_writer.set_merge_policy(Box::new(NoMergePolicy)); // let mut index_writer = index.writer_for_tests().unwrap();
index_writer // index_writer.set_merge_policy(Box::new(NoMergePolicy));
.add_document(doc!(date_field =>DateTime::from_utc(OffsetDateTime::now_utc()))) // index_writer
.unwrap(); // .add_document(doc!(date_field =>DateTime::from_utc(OffsetDateTime::now_utc())))
index_writer.commit().unwrap(); // .unwrap();
index_writer.add_document(doc!()).unwrap(); // index_writer.commit().unwrap();
index_writer.commit().unwrap(); // index_writer.add_document(doc!()).unwrap();
let reader = index.reader().unwrap(); // index_writer.commit().unwrap();
let segment_ids: Vec<SegmentId> = reader // let reader = index.reader().unwrap();
.searcher() // let segment_ids: Vec<SegmentId> = reader
.segment_readers() // .searcher()
.iter() // .segment_readers()
.map(SegmentReader::segment_id) // .iter()
.collect(); // .map(SegmentReader::segment_id)
assert_eq!(segment_ids.len(), 2); // .collect();
index_writer.merge(&segment_ids[..]).wait().unwrap(); // assert_eq!(segment_ids.len(), 2);
reader.reload().unwrap(); // index_writer.merge(&segment_ids[..]).wait().unwrap();
assert_eq!(reader.searcher().segment_readers().len(), 1); // reader.reload().unwrap();
} // assert_eq!(reader.searcher().segment_readers().len(), 1);
// }
fn get_vals_for_docs(column: &Column<u64>, docs: Range<u32>) -> Vec<u64> { // fn get_vals_for_docs(column: &columnar::Column<u64>, docs: Range<u32>) -> Vec<u64> {
docs.into_iter() // docs.into_iter()
.flat_map(|doc| column.values(doc)) // .flat_map(|doc| column.values(doc))
.collect() // .collect()
} // }
#[test] #[test]
fn test_text_fastfield() { fn test_text_fastfield() {
@@ -505,61 +524,57 @@ mod tests {
assert!(str_column.ord_to_str(0, &mut str_term).unwrap()); assert!(str_column.ord_to_str(0, &mut str_term).unwrap());
assert_eq!("AAAAA", &str_term); assert_eq!("AAAAA", &str_term);
let inverted_index = segment_reader.inverted_index(text_field).unwrap(); // let inverted_index = segment_reader.inverted_index(text_field)?;
assert_eq!(inverted_index.terms().num_terms(), 3); // assert_eq!(inverted_index.terms().num_terms(), 3);
let mut bytes = vec![]; // let mut bytes = vec![];
assert!(inverted_index.terms().ord_to_term(0, &mut bytes).unwrap()); // assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
assert_eq!(bytes, "aaaaa".as_bytes()); // assert_eq!(bytes, "aaaaa".as_bytes());
} // }
{ // {
// second segment // // second segment
let mut index_writer = index.writer_for_tests().unwrap(); // let mut index_writer = index.writer_for_tests()?;
index_writer // index_writer.add_document(doc!(
.add_document(doc!( // text_field => "AAAAA", // term_ord 0
text_field => "AAAAA", // term_ord 0 // ))?;
))
.unwrap();
index_writer // index_writer.add_document(doc!(
.add_document(doc!( // text_field => "CCCCC AAAAA", // term_ord 1, after merge 2
text_field => "CCCCC AAAAA", // term_ord 1, after merge 2 // ))?;
))
.unwrap();
index_writer.add_document(doc!()).unwrap(); // index_writer.add_document(doc!())?;
index_writer.commit().unwrap(); // index_writer.commit()?;
let reader = index.reader().unwrap(); // let reader = index.reader()?;
let searcher = reader.searcher(); // let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2); // assert_eq!(searcher.segment_readers().len(), 2);
let segment_reader = searcher.segment_reader(1); // let segment_reader = searcher.segment_reader(1);
let fast_fields = segment_reader.fast_fields(); // let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.str("text").unwrap().unwrap(); // let text_fast_field = fast_fields.u64s("text").unwrap();
assert_eq!(&get_vals_for_docs(&text_fast_field.ords(), 0..2), &[0, 1]); // assert_eq!(get_vals_for_docs(&text_fast_field, 0..3), vec![0, 1, 0]);
} }
// TODO uncomment once merging is available // TODO uncomment once merging is available
// Merging the segments // Merging the segments
{ // {
let segment_ids = index.searchable_segment_ids().unwrap(); // let segment_ids = index.searchable_segment_ids()?;
let mut index_writer = index.writer_for_tests().unwrap(); // let mut index_writer = index.writer_for_tests()?;
index_writer.merge(&segment_ids).wait().unwrap(); // index_writer.merge(&segment_ids).wait()?;
index_writer.wait_merging_threads().unwrap(); // index_writer.wait_merging_threads()?;
} // }
//
let reader = index.reader().unwrap(); // let reader = index.reader()?;
let searcher = reader.searcher(); // let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); // let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields(); // let fast_fields = segment_reader.fast_fields();
let text_column = fast_fields.str("text").unwrap().unwrap(); // let text_fast_field = fast_fields.u64s("text").unwrap();
//
assert_eq!( // assert_eq!(
get_vals_for_docs(text_column.ords(), 0..8), // get_vals_for_docs(&text_fast_field, 0..8),
vec![1, 0, 0, 0, 1, 3 /* next segment */, 0, 2] // vec![1, 0, 0, 0, 1, 3 /* next segment */, 0, 2, 0]
); // );
} }
#[test] #[test]
@@ -573,7 +588,11 @@ mod tests {
writer.commit().unwrap(); writer.commit().unwrap();
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let str_column = segment_reader.fast_fields().str("text").unwrap().unwrap(); let str_column = segment_reader
.fast_fields()
.str_column_opt("text")
.unwrap()
.unwrap();
// The string values are not sorted here. // The string values are not sorted here.
let term_ords: Vec<u64> = str_column.term_ords(0u32).collect(); let term_ords: Vec<u64> = str_column.term_ords(0u32).collect();
assert_eq!(&term_ords, &[1, 0]); assert_eq!(&term_ords, &[1, 0]);
@@ -599,162 +618,155 @@ mod tests {
assert_eq!(&facet_ords, &[0, 1]); assert_eq!(&facet_ords, &[0, 1]);
} }
#[test] // #[test]
fn test_string_fastfield() -> crate::Result<()> { // fn test_string_fastfield() -> crate::Result<()> {
let mut schema_builder = Schema::builder(); // let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING | FAST); // let text_field = schema_builder.add_text_field("text", STRING | FAST);
let schema = schema_builder.build(); // let schema = schema_builder.build();
let index = Index::create_in_ram(schema); // let index = Index::create_in_ram(schema);
{ // {
// first segment // // first segment
let mut index_writer = index.writer_for_tests()?; // let mut index_writer = index.writer_for_tests()?;
index_writer.set_merge_policy(Box::new(NoMergePolicy)); // index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!( // index_writer.add_document(doc!(
text_field => "BBBBB", // term_ord 1 // text_field => "BBBBB", // term_ord 1
))?; // ))?;
index_writer.add_document(doc!())?; // index_writer.add_document(doc!())?;
index_writer.add_document(doc!( // index_writer.add_document(doc!(
text_field => "AAAAA", // term_ord 0 // text_field => "AAAAA", // term_ord 0
))?; // ))?;
index_writer.add_document(doc!( // index_writer.add_document(doc!(
text_field => "AAAAA", // term_ord 0 // text_field => "AAAAA", // term_ord 0
))?; // ))?;
index_writer.add_document(doc!( // index_writer.add_document(doc!(
text_field => "zumberthree", // term_ord 2, after merge term_ord 3 // text_field => "zumberthree", // term_ord 2, after merge term_ord 3
))?; // ))?;
index_writer.add_document(doc!())?; // index_writer.add_document(doc!())?;
index_writer.commit()?; // index_writer.commit()?;
let reader = index.reader()?; // let reader = index.reader()?;
let searcher = reader.searcher(); // let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); // assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0); // let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields(); // let fast_fields = segment_reader.fast_fields();
let text_col = fast_fields.str("text").unwrap().unwrap(); // let text_fast_field = fast_fields.u64s(text_field).unwrap();
assert_eq!(get_vals_for_docs(&text_col.ords(), 0..6), vec![1, 0, 0, 2]); // assert_eq!(get_vals_for_docs(&text_fast_field, 0..6), vec![1, 0, 0, 2]);
let inverted_index = segment_reader.inverted_index(text_field)?; // let inverted_index = segment_reader.inverted_index(text_field)?;
assert_eq!(inverted_index.terms().num_terms(), 3); // assert_eq!(inverted_index.terms().num_terms(), 3);
let mut bytes = vec![]; // let mut bytes = vec![];
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?); // assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
assert_eq!(bytes, "AAAAA".as_bytes()); // assert_eq!(bytes, "AAAAA".as_bytes());
} // }
{ // {
// second segment // // second segment
let mut index_writer = index.writer_for_tests()?; // let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!( // index_writer.add_document(doc!(
text_field => "AAAAA", // term_ord 0 // text_field => "AAAAA", // term_ord 0
))?; // ))?;
index_writer.add_document(doc!( // index_writer.add_document(doc!(
text_field => "CCCCC", // term_ord 1, after merge 2 // text_field => "CCCCC", // term_ord 1, after merge 2
))?; // ))?;
index_writer.add_document(doc!())?; // index_writer.add_document(doc!())?;
index_writer.commit()?; // index_writer.commit()?;
let reader = index.reader()?; // let reader = index.reader()?;
let searcher = reader.searcher(); // let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2); // assert_eq!(searcher.segment_readers().len(), 2);
let segment_reader = searcher.segment_reader(1); // let segment_reader = searcher.segment_reader(1);
let fast_fields = segment_reader.fast_fields(); // let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.str("text").unwrap().unwrap(); // let text_fast_field = fast_fields.u64s(text_field).unwrap();
assert_eq!(&get_vals_for_docs(text_fast_field.ords(), 0..2), &[0, 1]); // assert_eq!(get_vals_for_docs(&text_fast_field, 0..2), vec![0, 1]);
} // }
// Merging the segments // // Merging the segments
{ // {
let segment_ids = index.searchable_segment_ids()?; // let segment_ids = index.searchable_segment_ids()?;
let mut index_writer = index.writer_for_tests()?; // let mut index_writer = index.writer_for_tests()?;
index_writer.merge(&segment_ids).wait()?; // index_writer.merge(&segment_ids).wait()?;
index_writer.wait_merging_threads()?; // index_writer.wait_merging_threads()?;
} // }
let reader = index.reader()?; // let reader = index.reader()?;
let searcher = reader.searcher(); // let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); // let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields(); // let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.str("text").unwrap().unwrap(); // let text_fast_field = fast_fields.u64s(text_field).unwrap();
assert_eq!( // assert_eq!(
get_vals_for_docs(&text_fast_field.ords(), 0..9), // get_vals_for_docs(&text_fast_field, 0..9),
vec![1, 0, 0, 3 /* next segment */, 0, 2] // vec![1, 0, 0, 3 /* next segment */, 0, 2]
); // );
Ok(()) // Ok(())
} // }
#[test] // #[test]
fn test_datefastfield() -> crate::Result<()> { // fn test_datefastfield() -> crate::Result<()> {
let mut schema_builder = Schema::builder(); // let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field( // let date_field = schema_builder.add_date_field(
"date", // "date",
DateOptions::from(FAST).set_precision(DatePrecision::Microseconds), // DateOptions::from(FAST).set_precision(DatePrecision::Microseconds),
); // );
let multi_date_field = schema_builder.add_date_field( // let multi_date_field = schema_builder.add_date_field(
"multi_date", // "multi_date",
DateOptions::default() // DateOptions::default()
.set_precision(DatePrecision::Microseconds) // .set_precision(DatePrecision::Microseconds)
.set_fast(), // .set_fast(),
); // );
let schema = schema_builder.build(); // let schema = schema_builder.build();
let index = Index::create_in_ram(schema); // let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; // let mut index_writer = index.writer_for_tests()?;
index_writer.set_merge_policy(Box::new(NoMergePolicy)); // index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!( // index_writer.add_document(doc!(
date_field => DateTime::from_u64(1i64.to_u64()), // date_field => DateTime::from_u64(1i64.to_u64()),
multi_date_field => DateTime::from_u64(2i64.to_u64()), // multi_date_field => DateTime::from_u64(2i64.to_u64()),
multi_date_field => DateTime::from_u64(3i64.to_u64()) // multi_date_field => DateTime::from_u64(3i64.to_u64())
))?; // ))?;
index_writer.add_document(doc!( // index_writer.add_document(doc!(
date_field => DateTime::from_u64(4i64.to_u64()) // date_field => DateTime::from_u64(4i64.to_u64())
))?; // ))?;
index_writer.add_document(doc!( // index_writer.add_document(doc!(
multi_date_field => DateTime::from_u64(5i64.to_u64()), // multi_date_field => DateTime::from_u64(5i64.to_u64()),
multi_date_field => DateTime::from_u64(6i64.to_u64()) // multi_date_field => DateTime::from_u64(6i64.to_u64())
))?; // ))?;
index_writer.commit()?; // index_writer.commit()?;
let reader = index.reader()?; // let reader = index.reader()?;
let searcher = reader.searcher(); // let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); // assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0); // let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields(); // let fast_fields = segment_reader.fast_fields();
let date_fast_field = fast_fields // let date_fast_field = fast_fields.date(date_field).unwrap();
.column_opt::<columnar::DateTime>("date") // let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
.unwrap() // let mut dates = vec![];
.unwrap() // {
.first_or_default_col(Default::default()); // assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64);
let dates_fast_field = fast_fields // dates_fast_field.get_vals(0u32, &mut dates);
.column_opt::<columnar::DateTime>("multi_date") // assert_eq!(dates.len(), 2);
.unwrap() // assert_eq!(dates[0].into_timestamp_micros(), 2i64);
.unwrap(); // assert_eq!(dates[1].into_timestamp_micros(), 3i64);
let mut dates = vec![]; // }
{ // {
assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64); // assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64);
dates_fast_field.fill_vals(0u32, &mut dates); // dates_fast_field.get_vals(1u32, &mut dates);
assert_eq!(dates.len(), 2); // assert!(dates.is_empty());
assert_eq!(dates[0].into_timestamp_micros(), 2i64); // }
assert_eq!(dates[1].into_timestamp_micros(), 3i64); // {
} // assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64);
{ // dates_fast_field.get_vals(2u32, &mut dates);
assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64); // assert_eq!(dates.len(), 2);
dates_fast_field.fill_vals(1u32, &mut dates); // assert_eq!(dates[0].into_timestamp_micros(), 5i64);
assert!(dates.is_empty()); // assert_eq!(dates[1].into_timestamp_micros(), 6i64);
} // }
{ // Ok(())
assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64); // }
dates_fast_field.fill_vals(2u32, &mut dates);
assert_eq!(dates.len(), 2);
assert_eq!(dates[0].into_timestamp_micros(), 5i64);
assert_eq!(dates[1].into_timestamp_micros(), 6i64);
}
Ok(())
}
#[test] #[test]
pub fn test_fastfield_bool_small() { pub fn test_fastfield_bool_small() {
@@ -964,7 +976,8 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let fastfields = searcher.segment_reader(0u32).fast_fields(); let fastfields = searcher.segment_reader(0u32).fast_fields();
let column: Column<Ipv6Addr> = fastfields.column_opt("ip").unwrap().unwrap(); let column: columnar::Column<Ipv6Addr> =
fastfields.typed_column_opt("ip").unwrap().unwrap();
assert_eq!(column.num_rows(), 3); assert_eq!(column.num_rows(), 3);
assert_eq!(column.first(0), None); assert_eq!(column.first(0), None);
assert_eq!(column.first(1), Some(ip_addr)); assert_eq!(column.first(1), Some(ip_addr));

View File

@@ -3,8 +3,8 @@ use std::net::Ipv6Addr;
use std::sync::Arc; use std::sync::Arc;
use columnar::{ use columnar::{
BytesColumn, Column, ColumnType, ColumnValues, ColumnarReader, DynamicColumn, BytesColumn, ColumnType, ColumnValues, ColumnarReader, DynamicColumn, DynamicColumnHandle,
DynamicColumnHandle, HasAssociatedColumnType, StrColumn, HasAssociatedColumnType, StrColumn,
}; };
use crate::directory::FileSlice; use crate::directory::FileSlice;
@@ -46,18 +46,33 @@ impl FastFieldReaders {
Ok(PerFieldSpaceUsage::new(per_field_usages)) Ok(PerFieldSpaceUsage::new(per_field_usages))
} }
/// Returns a typed column associated to a given field name. pub fn typed_column_opt<T>(
/// &self,
/// If no column associated with that field_name exists, field_name: &str,
/// or existing columns do not have the required type, ) -> crate::Result<Option<columnar::Column<T>>>
/// returns `None`.
pub fn column_opt<T>(&self, field_name: &str) -> crate::Result<Option<Column<T>>>
where where
T: PartialOrd + Copy + HasAssociatedColumnType + Send + Sync + 'static, T: PartialOrd + Copy + HasAssociatedColumnType + Send + Sync + 'static,
DynamicColumn: Into<Option<Column<T>>>, DynamicColumn: Into<Option<columnar::Column<T>>>,
{ {
let column_type = T::column_type(); let column_type = T::column_type();
let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, column_type)? let Some(dynamic_column_handle) = self.column_handle(field_name, column_type)?
else {
return Ok(None);
};
let dynamic_column = dynamic_column_handle.open()?;
Ok(dynamic_column.into())
}
pub fn bytes_column_opt(&self, field_name: &str) -> crate::Result<Option<BytesColumn>> {
let Some(dynamic_column_handle) = self.column_handle(field_name, ColumnType::Bytes)?
else {
return Ok(None);
};
let dynamic_column = dynamic_column_handle.open()?;
Ok(dynamic_column.into())
}
pub fn str_column_opt(&self, field_name: &str) -> crate::Result<Option<StrColumn>> {
let Some(dynamic_column_handle) = self.column_handle(field_name, ColumnType::Str)?
else { else {
return Ok(None); return Ok(None);
}; };
@@ -65,7 +80,6 @@ impl FastFieldReaders {
Ok(dynamic_column.into()) Ok(dynamic_column.into())
} }
/// Returns the number of `bytes` associated with a column.
pub fn column_num_bytes(&self, field: &str) -> crate::Result<usize> { pub fn column_num_bytes(&self, field: &str) -> crate::Result<usize> {
Ok(self Ok(self
.columnar .columnar
@@ -75,17 +89,15 @@ impl FastFieldReaders {
.sum()) .sum())
} }
/// Returns a typed column value object. pub fn typed_column_first_or_default<T>(
/// &self,
/// In that column value: field: &str,
/// - Rows with no value are associated with the default value. ) -> crate::Result<Arc<dyn ColumnValues<T>>>
/// - Rows with several values are associated with the first value.
pub fn column_first_or_default<T>(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<T>>>
where where
T: PartialOrd + Copy + HasAssociatedColumnType + Send + Sync + 'static, T: PartialOrd + Copy + HasAssociatedColumnType + Send + Sync + 'static,
DynamicColumn: Into<Option<Column<T>>>, DynamicColumn: Into<Option<columnar::Column<T>>>,
{ {
let col_opt: Option<Column<T>> = self.column_opt(field)?; let col_opt: Option<columnar::Column<T>> = self.typed_column_opt(field)?;
if let Some(col) = col_opt { if let Some(col) = col_opt {
Ok(col.first_or_default_col(T::default_value())) Ok(col.first_or_default_col(T::default_value()))
} else { } else {
@@ -99,45 +111,32 @@ impl FastFieldReaders {
/// ///
/// If `field` is not a u64 fast field, this method returns an Error. /// If `field` is not a u64 fast field, this method returns an Error.
pub fn u64(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<u64>>> { pub fn u64(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<u64>>> {
self.column_first_or_default(field) self.typed_column_first_or_default(field)
} }
/// Returns the `date` fast field reader reader associated with `field`. /// Returns the `date` fast field reader reader associated with `field`.
/// ///
/// If `field` is not a date fast field, this method returns an Error. /// If `field` is not a date fast field, this method returns an Error.
pub fn date(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<columnar::DateTime>>> { pub fn date(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<columnar::DateTime>>> {
self.column_first_or_default(field) self.typed_column_first_or_default(field)
} }
/// Returns the `ip` fast field reader reader associated to `field`. /// Returns the `ip` fast field reader reader associated to `field`.
/// ///
/// If `field` is not a u128 fast field, this method returns an Error. /// If `field` is not a u128 fast field, this method returns an Error.
pub fn ip_addr(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<Ipv6Addr>>> { pub fn ip_addr(&self, field: &str) -> crate::Result<Arc<dyn ColumnValues<Ipv6Addr>>> {
self.column_first_or_default(field) self.typed_column_first_or_default(field)
} }
/// Returns a `str` column. pub fn str(&self, field: &str) -> crate::Result<Option<columnar::StrColumn>> {
pub fn str(&self, field_name: &str) -> crate::Result<Option<StrColumn>> { self.str_column_opt(field)
let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, ColumnType::Str)?
else {
return Ok(None);
};
let dynamic_column = dynamic_column_handle.open()?;
Ok(dynamic_column.into())
} }
/// Returns a `bytes` column. pub fn bytes(&self, field: &str) -> crate::Result<Option<columnar::BytesColumn>> {
pub fn bytes(&self, field_name: &str) -> crate::Result<Option<BytesColumn>> { self.bytes_column_opt(field)
let Some(dynamic_column_handle) = self.dynamic_column_handle(field_name, ColumnType::Bytes)?
else {
return Ok(None);
};
let dynamic_column = dynamic_column_handle.open()?;
Ok(dynamic_column.into())
} }
/// Returning a `dynamic_column_handle`. pub fn column_handle(
pub fn dynamic_column_handle(
&self, &self,
field_name: &str, field_name: &str,
column_type: ColumnType, column_type: ColumnType,
@@ -151,9 +150,7 @@ impl FastFieldReaders {
Ok(dynamic_column_handle_opt) Ok(dynamic_column_handle_opt)
} }
/// Returns the `u64` column used to represent any `u64`-mapped typed (i64, u64, f64, DateTime). pub fn u64_lenient(&self, field_name: &str) -> crate::Result<Option<columnar::Column<u64>>> {
#[doc(hidden)]
pub fn u64_lenient(&self, field_name: &str) -> crate::Result<Option<Column<u64>>> {
for col in self.columnar.read_columns(field_name)? { for col in self.columnar.read_columns(field_name)? {
if let Some(col_u64) = col.open_u64_lenient()? { if let Some(col_u64) = col.open_u64_lenient()? {
return Ok(Some(col_u64)); return Ok(Some(col_u64));
@@ -166,20 +163,20 @@ impl FastFieldReaders {
/// ///
/// If `field` is not a i64 fast field, this method returns an Error. /// If `field` is not a i64 fast field, this method returns an Error.
pub fn i64(&self, field_name: &str) -> crate::Result<Arc<dyn ColumnValues<i64>>> { pub fn i64(&self, field_name: &str) -> crate::Result<Arc<dyn ColumnValues<i64>>> {
self.column_first_or_default(field_name) self.typed_column_first_or_default(field_name)
} }
/// Returns the `f64` fast field reader reader associated with `field`. /// Returns the `f64` fast field reader reader associated with `field`.
/// ///
/// If `field` is not a f64 fast field, this method returns an Error. /// If `field` is not a f64 fast field, this method returns an Error.
pub fn f64(&self, field_name: &str) -> crate::Result<Arc<dyn ColumnValues<f64>>> { pub fn f64(&self, field_name: &str) -> crate::Result<Arc<dyn ColumnValues<f64>>> {
self.column_first_or_default(field_name) self.typed_column_first_or_default(field_name)
} }
/// Returns the `bool` fast field reader reader associated with `field`. /// Returns the `bool` fast field reader reader associated with `field`.
/// ///
/// If `field` is not a bool fast field, this method returns an Error. /// If `field` is not a bool fast field, this method returns an Error.
pub fn bool(&self, field_name: &str) -> crate::Result<Arc<dyn ColumnValues<bool>>> { pub fn bool(&self, field_name: &str) -> crate::Result<Arc<dyn ColumnValues<bool>>> {
self.column_first_or_default(field_name) self.typed_column_first_or_default(field_name)
} }
} }

View File

@@ -67,16 +67,6 @@ impl FastFieldsWriter {
self.columnar_writer.mem_usage() self.columnar_writer.mem_usage()
} }
pub(crate) fn sort_order(
&self,
sort_field: &str,
num_docs: DocId,
reversed: bool,
) -> Vec<DocId> {
self.columnar_writer
.sort_order(sort_field, num_docs, reversed)
}
/// Indexes all of the fastfields of a new document. /// Indexes all of the fastfields of a new document.
pub fn add_document(&mut self, doc: &Document) -> crate::Result<()> { pub fn add_document(&mut self, doc: &Document) -> crate::Result<()> {
let doc_id = self.num_docs; let doc_id = self.num_docs;
@@ -153,13 +143,11 @@ impl FastFieldsWriter {
pub fn serialize( pub fn serialize(
mut self, mut self,
wrt: &mut dyn io::Write, wrt: &mut dyn io::Write,
doc_id_map_opt: Option<&DocIdMapping>, doc_id_map: Option<&DocIdMapping>,
) -> io::Result<()> { ) -> io::Result<()> {
assert!(doc_id_map.is_none()); // TODO handle doc id map
let num_docs = self.num_docs; let num_docs = self.num_docs;
let old_to_new_row_ids = self.columnar_writer.serialize(num_docs, wrt)?;
doc_id_map_opt.map(|doc_id_mapping| doc_id_mapping.old_to_new_ids());
self.columnar_writer
.serialize(num_docs, old_to_new_row_ids, wrt)?;
Ok(()) Ok(())
} }
} }

View File

@@ -1,44 +1,27 @@
//! This module is used when sorting the index by a property, e.g. //! This module is used when sorting the index by a property, e.g.
//! to get mappings from old doc_id to new doc_id and vice versa, after sorting //! to get mappings from old doc_id to new doc_id and vice versa, after sorting
use common::ReadOnlyBitSet; use std::cmp::Reverse;
use super::SegmentWriter; use super::SegmentWriter;
use crate::schema::{Field, Schema}; use crate::schema::{Field, Schema};
use crate::{DocAddress, DocId, IndexSortByField, TantivyError}; use crate::{DocAddress, DocId, IndexSortByField, Order, TantivyError};
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum MappingType {
Stacked,
StackedWithDeletes,
Shuffled,
}
/// Struct to provide mapping from new doc_id to old doc_id and segment. /// Struct to provide mapping from new doc_id to old doc_id and segment.
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct SegmentDocIdMapping { pub(crate) struct SegmentDocIdMapping {
pub(crate) new_doc_id_to_old_doc_addr: Vec<DocAddress>, new_doc_id_to_old_doc_addr: Vec<DocAddress>,
pub(crate) alive_bitsets: Vec<Option<ReadOnlyBitSet>>, is_trivial: bool,
mapping_type: MappingType,
} }
impl SegmentDocIdMapping { impl SegmentDocIdMapping {
pub(crate) fn new( pub(crate) fn new(new_doc_id_to_old_and_segment: Vec<DocAddress>, is_trivial: bool) -> Self {
new_doc_id_to_old_doc_addr: Vec<DocAddress>,
mapping_type: MappingType,
alive_bitsets: Vec<Option<ReadOnlyBitSet>>,
) -> Self {
Self { Self {
new_doc_id_to_old_doc_addr, new_doc_id_to_old_doc_addr: new_doc_id_to_old_and_segment,
mapping_type, is_trivial,
alive_bitsets,
} }
} }
pub fn mapping_type(&self) -> MappingType {
self.mapping_type
}
/// Returns an iterator over the old document addresses, ordered by the new document ids. /// Returns an iterator over the old document addresses, ordered by the new document ids.
/// ///
/// In the returned `DocAddress`, the `segment_ord` is the ordinal of targeted segment /// In the returned `DocAddress`, the `segment_ord` is the ordinal of targeted segment
@@ -47,6 +30,10 @@ impl SegmentDocIdMapping {
self.new_doc_id_to_old_doc_addr.iter().copied() self.new_doc_id_to_old_doc_addr.iter().copied()
} }
pub(crate) fn len(&self) -> usize {
self.new_doc_id_to_old_doc_addr.len()
}
/// This flags means the segments are simply stacked in the order of their ordinal. /// This flags means the segments are simply stacked in the order of their ordinal.
/// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)] /// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)]
/// ///
@@ -57,10 +44,7 @@ impl SegmentDocIdMapping {
/// ///
/// This allows for some optimization. /// This allows for some optimization.
pub(crate) fn is_trivial(&self) -> bool { pub(crate) fn is_trivial(&self) -> bool {
match self.mapping_type { self.is_trivial
MappingType::Stacked | MappingType::StackedWithDeletes => true,
MappingType::Shuffled => false,
}
} }
} }
@@ -101,11 +85,6 @@ impl DocIdMapping {
pub fn iter_old_doc_ids(&self) -> impl Iterator<Item = DocId> + Clone + '_ { pub fn iter_old_doc_ids(&self) -> impl Iterator<Item = DocId> + Clone + '_ {
self.new_doc_id_to_old.iter().cloned() self.new_doc_id_to_old.iter().cloned()
} }
pub fn old_to_new_ids(&self) -> &[DocId] {
&self.old_doc_id_to_new[..]
}
/// Remaps a given array to the new doc ids. /// Remaps a given array to the new doc ids.
pub fn remap<T: Copy>(&self, els: &[T]) -> Vec<T> { pub fn remap<T: Copy>(&self, els: &[T]) -> Vec<T> {
self.new_doc_id_to_old self.new_doc_id_to_old
@@ -139,15 +118,36 @@ pub(crate) fn get_doc_id_mapping_from_field(
sort_by_field: IndexSortByField, sort_by_field: IndexSortByField,
segment_writer: &SegmentWriter, segment_writer: &SegmentWriter,
) -> crate::Result<DocIdMapping> { ) -> crate::Result<DocIdMapping> {
let schema = segment_writer.segment_serializer.segment().schema(); todo!()
expect_field_id_for_sort_field(&schema, &sort_by_field)?; // for now expect // let schema = segment_writer.segment_serializer.segment().schema();
let new_doc_id_to_old = segment_writer.fast_field_writers.sort_order( // let field_id = expect_field_id_for_sort_field(&schema, &sort_by_field)?; // for now expect
sort_by_field.field.as_str(), // fastfield, but not strictly required
segment_writer.max_doc(), // let fast_field = segment_writer
sort_by_field.order.is_desc(), // .fast_field_writers
); // .get_field_writer(field_id)
// create new doc_id to old doc_id index (used in fast_field_writers) // .ok_or_else(|| {
Ok(DocIdMapping::from_new_id_to_old_id(new_doc_id_to_old)) // TantivyError::InvalidArgument(format!(
// "sort index by field is required to be a fast field {:?}",
// sort_by_field.field
// ))
// })?;
// // create new doc_id to old doc_id index (used in fast_field_writers)
// let mut doc_id_and_data = fast_field
// .iter()
// .enumerate()
// .map(|el| (el.0 as DocId, el.1))
// .collect::<Vec<_>>();
// if sort_by_field.order == Order::Desc {
// doc_id_and_data.sort_by_key(|k| Reverse(k.1));
// } else {
// doc_id_and_data.sort_by_key(|k| k.1);
// }
// let new_doc_id_to_old = doc_id_and_data
// .into_iter()
// .map(|el| el.0)
// .collect::<Vec<_>>();
// Ok(DocIdMapping::from_new_id_to_old_id(new_doc_id_to_old))
} }
#[cfg(test)] #[cfg(test)]

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +1,19 @@
use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use columnar::{ use columnar::{ColumnValues, ColumnarReader, MergeRowOrder, StackMergeOrder};
ColumnValues, ColumnarReader, MergeRowOrder, RowAddr, ShuffleMergeOrder, StackMergeOrder,
};
use common::ReadOnlyBitSet;
use itertools::Itertools; use itertools::Itertools;
use measure_time::debug_time; use measure_time::debug_time;
// use super::sorted_doc_id_multivalue_column::RemappedDocIdMultiValueIndexColumn;
use crate::core::{Segment, SegmentReader}; use crate::core::{Segment, SegmentReader};
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::docset::{DocSet, TERMINATED}; use crate::docset::{DocSet, TERMINATED};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::fastfield::{AliveBitSet, FastFieldNotAvailableError}; use crate::fastfield::{AliveBitSet, FastFieldNotAvailableError};
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter}; use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
use crate::indexer::doc_id_mapping::{MappingType, SegmentDocIdMapping}; use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
// use crate::indexer::sorted_doc_id_multivalue_column::RemappedDocIdMultiValueColumn;
use crate::indexer::SegmentSerializer; use crate::indexer::SegmentSerializer;
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings}; use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
use crate::schema::{Field, FieldType, Schema}; use crate::schema::{Field, FieldType, Schema};
@@ -110,31 +110,6 @@ impl DeltaComputer {
} }
} }
fn convert_to_merge_order<'a>(
columnars: &[&'a ColumnarReader],
doc_id_mapping: SegmentDocIdMapping,
) -> MergeRowOrder {
match doc_id_mapping.mapping_type() {
MappingType::Stacked => MergeRowOrder::Stack(StackMergeOrder::stack(columnars)),
MappingType::StackedWithDeletes | MappingType::Shuffled => {
// RUST/LLVM is amazing. The following conversion is actually a no-op:
// no allocation, no copy.
let new_row_id_to_old_row_id: Vec<RowAddr> = doc_id_mapping
.new_doc_id_to_old_doc_addr
.into_iter()
.map(|doc_addr| RowAddr {
segment_ord: doc_addr.segment_ord,
row_id: doc_addr.doc_id,
})
.collect();
MergeRowOrder::Shuffled(ShuffleMergeOrder {
new_row_id_to_old_row_id,
alive_bitsets: doc_id_mapping.alive_bitsets,
})
}
}
}
impl IndexMerger { impl IndexMerger {
pub fn open( pub fn open(
schema: Schema, schema: Schema,
@@ -226,6 +201,7 @@ impl IndexMerger {
let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize); let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize);
for field in fields { for field in fields {
fieldnorms_data.clear(); fieldnorms_data.clear();
let fieldnorms_readers: Vec<FieldNormReader> = self let fieldnorms_readers: Vec<FieldNormReader> = self
.readers .readers
.iter() .iter()
@@ -236,6 +212,7 @@ impl IndexMerger {
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(old_doc_addr.doc_id); let fieldnorm_id = fieldnorms_reader.fieldnorm_id(old_doc_addr.doc_id);
fieldnorms_data.push(fieldnorm_id); fieldnorms_data.push(fieldnorm_id);
} }
fieldnorms_serializer.serialize_field(field, &fieldnorms_data[..])?; fieldnorms_serializer.serialize_field(field, &fieldnorms_data[..])?;
} }
fieldnorms_serializer.close()?; fieldnorms_serializer.close()?;
@@ -245,7 +222,7 @@ impl IndexMerger {
fn write_fast_fields( fn write_fast_fields(
&self, &self,
fast_field_wrt: &mut WritePtr, fast_field_wrt: &mut WritePtr,
doc_id_mapping: SegmentDocIdMapping, doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> { ) -> crate::Result<()> {
debug_time!("write-fast-fields"); debug_time!("write-fast-fields");
let columnars: Vec<&ColumnarReader> = self let columnars: Vec<&ColumnarReader> = self
@@ -253,7 +230,10 @@ impl IndexMerger {
.iter() .iter()
.map(|reader| reader.fast_fields().columnar()) .map(|reader| reader.fast_fields().columnar())
.collect(); .collect();
let merge_row_order = convert_to_merge_order(&columnars[..], doc_id_mapping); if !doc_id_mapping.is_trivial() {
todo!()
}
let merge_row_order = MergeRowOrder::Stack(StackMergeOrder::stack(&columnars[..]));
columnar::merge_columnar(&columnars[..], merge_row_order, fast_field_wrt)?; columnar::merge_columnar(&columnars[..], merge_row_order, fast_field_wrt)?;
Ok(()) Ok(())
} }
@@ -364,23 +344,7 @@ impl IndexMerger {
segment_ord, segment_ord,
}), }),
); );
Ok(SegmentDocIdMapping::new(sorted_doc_ids, false))
let alive_bitsets: Vec<Option<ReadOnlyBitSet>> = self
.readers
.iter()
.map(|segment_reader| {
if let Some(alive_bitset) = segment_reader.alive_bitset() {
Some(alive_bitset.bitset().clone())
} else {
None
}
})
.collect();
Ok(SegmentDocIdMapping::new(
sorted_doc_ids,
MappingType::Shuffled,
alive_bitsets,
))
} }
/// Creates a mapping if the segments are stacked. this is helpful to merge codelines between /// Creates a mapping if the segments are stacked. this is helpful to merge codelines between
@@ -405,35 +369,13 @@ impl IndexMerger {
}) })
}), }),
); );
Ok(SegmentDocIdMapping::new(mapping, true))
let has_deletes: bool = self.readers.iter().any(SegmentReader::has_deletes);
let mapping_type = if has_deletes {
MappingType::StackedWithDeletes
} else {
MappingType::Stacked
};
let alive_bitsets: Vec<Option<ReadOnlyBitSet>> = self
.readers
.iter()
.map(|reader| {
if let Some(bitset) = reader.alive_bitset() {
Some(bitset.bitset().clone())
} else {
None
}
})
.collect();
Ok(SegmentDocIdMapping::new(
mapping,
mapping_type,
alive_bitsets,
))
} }
fn write_postings_for_field( fn write_postings_for_field(
&self, &self,
indexed_field: Field, indexed_field: Field,
_field_type: &FieldType, field_type: &FieldType,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
fieldnorm_reader: Option<FieldNormReader>, fieldnorm_reader: Option<FieldNormReader>,
doc_id_mapping: &SegmentDocIdMapping, doc_id_mapping: &SegmentDocIdMapping,
@@ -539,7 +481,7 @@ impl IndexMerger {
continue; continue;
} }
field_serializer.new_term(term_bytes, total_doc_freq)?; let to_term_ord = field_serializer.new_term(term_bytes, total_doc_freq)?;
// We can now serialize this postings, by pushing each document to the // We can now serialize this postings, by pushing each document to the
// postings serializer. // postings serializer.
@@ -609,7 +551,7 @@ impl IndexMerger {
serializer, serializer,
fieldnorm_reader, fieldnorm_reader,
doc_id_mapping, doc_id_mapping,
)?; );
} }
} }
Ok(()) Ok(())
@@ -716,12 +658,10 @@ impl IndexMerger {
fieldnorm_readers, fieldnorm_readers,
&doc_id_mapping, &doc_id_mapping,
)?; )?;
debug!("write-fastfields");
self.write_fast_fields(serializer.get_fast_field_write(), &doc_id_mapping)?;
debug!("write-storagefields"); debug!("write-storagefields");
self.write_storable_fields(serializer.get_store_writer(), &doc_id_mapping)?; self.write_storable_fields(serializer.get_store_writer(), &doc_id_mapping)?;
debug!("write-fastfields");
self.write_fast_fields(serializer.get_fast_field_write(), doc_id_mapping)?;
debug!("close-serializer"); debug!("close-serializer");
serializer.close()?; serializer.close()?;
Ok(self.max_doc) Ok(self.max_doc)
@@ -730,14 +670,11 @@ impl IndexMerger {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use byteorder::{BigEndian, ReadBytesExt};
use columnar::Column;
use schema::FAST; use schema::FAST;
use crate::collector::tests::{ use crate::collector::tests::{FastFieldTestCollector, TEST_COLLECTOR_WITH_SCORE};
BytesFastFieldTestCollector, FastFieldTestCollector, TEST_COLLECTOR_WITH_SCORE, use crate::collector::Count;
};
use crate::collector::{Count, FacetCollector};
use crate::core::Index; use crate::core::Index;
use crate::query::{AllQuery, BooleanQuery, EnableScoring, Scorer, TermQuery}; use crate::query::{AllQuery, BooleanQuery, EnableScoring, Scorer, TermQuery};
use crate::schema::{ use crate::schema::{
@@ -746,7 +683,7 @@ mod tests {
}; };
use crate::time::OffsetDateTime; use crate::time::OffsetDateTime;
use crate::{ use crate::{
assert_nearly_equals, schema, DateTime, DocAddress, DocId, DocSet, IndexSettings, assert_nearly_equals, schema, DateTime, DocAddress, DocSet, IndexSettings,
IndexSortByField, IndexWriter, Order, Searcher, SegmentId, IndexSortByField, IndexWriter, Order, Searcher, SegmentId,
}; };
@@ -879,27 +816,27 @@ mod tests {
); );
} }
{ // {
let get_fast_vals = |terms: Vec<Term>| { // let get_fast_vals = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); // let query = BooleanQuery::new_multiterms_query(terms);
searcher.search(&query, &FastFieldTestCollector::for_field("score")) // searcher.search(&query, &FastFieldTestCollector::for_field(score_field))
}; // };
let get_fast_vals_bytes = |terms: Vec<Term>| { // let get_fast_vals_bytes = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); // let query = BooleanQuery::new_multiterms_query(terms);
searcher.search( // searcher.search(
&query, // &query,
&BytesFastFieldTestCollector::for_field("score_bytes"), // &BytesFastFieldTestCollector::for_field(bytes_score_field),
) // )
}; // };
assert_eq!( // assert_eq!(
get_fast_vals(vec![Term::from_field_text(text_field, "a")])?, // get_fast_vals(vec![Term::from_field_text(text_field, "a")])?,
vec![5, 7, 13] // vec![5, 7, 13]
); // );
assert_eq!( // assert_eq!(
get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")])?, // get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")])?,
vec![0, 0, 0, 5, 0, 0, 0, 7, 0, 0, 0, 13] // vec![0, 0, 0, 5, 0, 0, 0, 7, 0, 0, 0, 13]
); // );
} // }
} }
Ok(()) Ok(())
} }
@@ -1212,206 +1149,209 @@ mod tests {
Ok(()) Ok(())
} }
#[test] // TODO re-enable
fn test_merge_facets_sort_none() { // #[test]
test_merge_facets(None, true) // fn test_merge_facets_sort_none() {
} // test_merge_facets(None, true)
// }
#[test] // #[test]
fn test_merge_facets_sort_asc() { // fn test_merge_facets_sort_asc() {
// In the merge case this will go through the doc_id mapping code // // In the merge case this will go through the doc_id mapping code
test_merge_facets( // test_merge_facets(
Some(IndexSettings { // Some(IndexSettings {
sort_by_field: Some(IndexSortByField { // sort_by_field: Some(IndexSortByField {
field: "intval".to_string(), // field: "intval".to_string(),
order: Order::Desc, // order: Order::Desc,
}), // }),
..Default::default() // ..Default::default()
}), // }),
true, // true,
); // );
// In the merge case this will not go through the doc_id mapping code, because the data // // In the merge case this will not go through the doc_id mapping code, because the data
// sorted and disjunct // is // sorted and disjunct
test_merge_facets( // test_merge_facets(
Some(IndexSettings { // Some(IndexSettings {
sort_by_field: Some(IndexSortByField { // sort_by_field: Some(IndexSortByField {
field: "intval".to_string(), // field: "intval".to_string(),
order: Order::Desc, // order: Order::Desc,
}), // }),
..Default::default() // ..Default::default()
}), // }),
false, // false,
); // );
} // }
#[test] // #[test]
fn test_merge_facets_sort_desc() { // fn test_merge_facets_sort_desc() {
// In the merge case this will go through the doc_id mapping code // // In the merge case this will go through the doc_id mapping code
test_merge_facets( // test_merge_facets(
Some(IndexSettings { // Some(IndexSettings {
sort_by_field: Some(IndexSortByField { // sort_by_field: Some(IndexSortByField {
field: "intval".to_string(), // field: "intval".to_string(),
order: Order::Desc, // order: Order::Desc,
}), // }),
..Default::default() // ..Default::default()
}), // }),
true, // true,
); // );
// In the merge case this will not go through the doc_id mapping code, because the data // // In the merge case this will not go through the doc_id mapping code, because the data
// sorted and disjunct // is // sorted and disjunct
test_merge_facets( // test_merge_facets(
Some(IndexSettings { // Some(IndexSettings {
sort_by_field: Some(IndexSortByField { // sort_by_field: Some(IndexSortByField {
field: "intval".to_string(), // field: "intval".to_string(),
order: Order::Desc, // order: Order::Desc,
}), // }),
..Default::default() // ..Default::default()
}), // }),
false, // false,
); // );
} // }
// force_segment_value_overlap forces the int value for sorting to have overlapping min and max // force_segment_value_overlap forces the int value for sorting to have overlapping min and max
// ranges between segments so that merge algorithm can't apply certain optimizations // ranges between segments so that merge algorithm can't apply certain optimizations
fn test_merge_facets(index_settings: Option<IndexSettings>, force_segment_value_overlap: bool) { // fn test_merge_facets(index_settings: Option<IndexSettings>, force_segment_value_overlap:
let mut schema_builder = schema::Schema::builder(); // bool) { let mut schema_builder = schema::Schema::builder();
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default()); // let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
let int_options = NumericOptions::default().set_fast().set_indexed(); // let int_options = NumericOptions::default()
let int_field = schema_builder.add_u64_field("intval", int_options); // .set_fast()
let mut index_builder = Index::builder().schema(schema_builder.build()); // .set_indexed();
if let Some(settings) = index_settings { // let int_field = schema_builder.add_u64_field("intval", int_options);
index_builder = index_builder.settings(settings); // let mut index_builder = Index::builder().schema(schema_builder.build());
} // if let Some(settings) = index_settings {
let index = index_builder.create_in_ram().unwrap(); // index_builder = index_builder.settings(settings);
// let index = Index::create_in_ram(schema_builder.build()); // }
let reader = index.reader().unwrap(); // let index = index_builder.create_in_ram().unwrap();
let mut int_val = 0; // // let index = Index::create_in_ram(schema_builder.build());
{ // let reader = index.reader().unwrap();
let mut index_writer = index.writer_for_tests().unwrap(); // let mut int_val = 0;
let index_doc = // {
|index_writer: &mut IndexWriter, doc_facets: &[&str], int_val: &mut u64| { // let mut index_writer = index.writer_for_tests().unwrap();
let mut doc = Document::default(); // let index_doc =
for facet in doc_facets { // |index_writer: &mut IndexWriter, doc_facets: &[&str], int_val: &mut u64| {
doc.add_facet(facet_field, Facet::from(facet)); // let mut doc = Document::default();
} // for facet in doc_facets {
doc.add_u64(int_field, *int_val); // doc.add_facet(facet_field, Facet::from(facet));
*int_val += 1; // }
index_writer.add_document(doc).unwrap(); // doc.add_u64(int_field, *int_val);
}; // *int_val += 1;
// index_writer.add_document(doc).unwrap();
// };
index_doc( // index_doc(
&mut index_writer, // &mut index_writer,
&["/top/a/firstdoc", "/top/b"], // &["/top/a/firstdoc", "/top/b"],
&mut int_val, // &mut int_val,
); // );
index_doc( // index_doc(
&mut index_writer, // &mut index_writer,
&["/top/a/firstdoc", "/top/b", "/top/c"], // &["/top/a/firstdoc", "/top/b", "/top/c"],
&mut int_val, // &mut int_val,
); // );
index_doc(&mut index_writer, &["/top/a", "/top/b"], &mut int_val); // index_doc(&mut index_writer, &["/top/a", "/top/b"], &mut int_val);
index_doc(&mut index_writer, &["/top/a"], &mut int_val); // index_doc(&mut index_writer, &["/top/a"], &mut int_val);
index_doc(&mut index_writer, &["/top/b", "/top/d"], &mut int_val); // index_doc(&mut index_writer, &["/top/b", "/top/d"], &mut int_val);
if force_segment_value_overlap { // if force_segment_value_overlap {
index_doc(&mut index_writer, &["/top/d"], &mut 0); // index_doc(&mut index_writer, &["/top/d"], &mut 0);
index_doc(&mut index_writer, &["/top/e"], &mut 10); // index_doc(&mut index_writer, &["/top/e"], &mut 10);
index_writer.commit().expect("committed"); // index_writer.commit().expect("committed");
index_doc(&mut index_writer, &["/top/a"], &mut 5); // 5 is between 0 - 10 so the // index_doc(&mut index_writer, &["/top/a"], &mut 5); // 5 is between 0 - 10 so the
// segments don' have disjunct // // segments don' have disjunct
// ranges // // ranges
} else { // } else {
index_doc(&mut index_writer, &["/top/d"], &mut int_val); // index_doc(&mut index_writer, &["/top/d"], &mut int_val);
index_doc(&mut index_writer, &["/top/e"], &mut int_val); // index_doc(&mut index_writer, &["/top/e"], &mut int_val);
index_writer.commit().expect("committed"); // index_writer.commit().expect("committed");
index_doc(&mut index_writer, &["/top/a"], &mut int_val); // index_doc(&mut index_writer, &["/top/a"], &mut int_val);
} // }
index_doc(&mut index_writer, &["/top/b"], &mut int_val); // index_doc(&mut index_writer, &["/top/b"], &mut int_val);
index_doc(&mut index_writer, &["/top/c"], &mut int_val); // index_doc(&mut index_writer, &["/top/c"], &mut int_val);
index_writer.commit().expect("committed"); // index_writer.commit().expect("committed");
index_doc(&mut index_writer, &["/top/e", "/top/f"], &mut int_val); // index_doc(&mut index_writer, &["/top/e", "/top/f"], &mut int_val);
index_writer.commit().expect("committed"); // index_writer.commit().expect("committed");
} // }
reader.reload().unwrap(); // reader.reload().unwrap();
let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| { // let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| {
let searcher = reader.searcher(); // let searcher = reader.searcher();
let mut facet_collector = FacetCollector::for_field("facet"); // let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top")); // facet_collector.add_facet(Facet::from("/top"));
let (count, facet_counts) = searcher // let (count, facet_counts) = searcher
.search(&AllQuery, &(Count, facet_collector)) // .search(&AllQuery, &(Count, facet_collector))
.unwrap(); // .unwrap();
assert_eq!(count, expected_num_docs); // assert_eq!(count, expected_num_docs);
let facets: Vec<(String, u64)> = facet_counts // let facets: Vec<(String, u64)> = facet_counts
.get("/top") // .get("/top")
.map(|(facet, count)| (facet.to_string(), count)) // .map(|(facet, count)| (facet.to_string(), count))
.collect(); // .collect();
assert_eq!( // assert_eq!(
facets, // facets,
expected // expected
.iter() // .iter()
.map(|&(facet_str, count)| (String::from(facet_str), count)) // .map(|&(facet_str, count)| (String::from(facet_str), count))
.collect::<Vec<_>>() // .collect::<Vec<_>>()
); // );
}; // };
test_searcher( // test_searcher(
11, // 11,
&[ // &[
("/top/a", 5), // ("/top/a", 5),
("/top/b", 5), // ("/top/b", 5),
("/top/c", 2), // ("/top/c", 2),
("/top/d", 2), // ("/top/d", 2),
("/top/e", 2), // ("/top/e", 2),
("/top/f", 1), // ("/top/f", 1),
], // ],
); // );
// Merging the segments // // Merging the segments
{ // {
let segment_ids = index // let segment_ids = index
.searchable_segment_ids() // .searchable_segment_ids()
.expect("Searchable segments failed."); // .expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests().unwrap(); // let mut index_writer = index.writer_for_tests().unwrap();
index_writer // index_writer
.merge(&segment_ids) // .merge(&segment_ids)
.wait() // .wait()
.expect("Merging failed"); // .expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); // index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap(); // reader.reload().unwrap();
test_searcher( // test_searcher(
11, // 11,
&[ // &[
("/top/a", 5), // ("/top/a", 5),
("/top/b", 5), // ("/top/b", 5),
("/top/c", 2), // ("/top/c", 2),
("/top/d", 2), // ("/top/d", 2),
("/top/e", 2), // ("/top/e", 2),
("/top/f", 1), // ("/top/f", 1),
], // ],
); // );
} // }
// Deleting one term // // Deleting one term
{ // {
let mut index_writer = index.writer_for_tests().unwrap(); // let mut index_writer = index.writer_for_tests().unwrap();
let facet = Facet::from_path(vec!["top", "a", "firstdoc"]); // let facet = Facet::from_path(vec!["top", "a", "firstdoc"]);
let facet_term = Term::from_facet(facet_field, &facet); // let facet_term = Term::from_facet(facet_field, &facet);
index_writer.delete_term(facet_term); // index_writer.delete_term(facet_term);
index_writer.commit().unwrap(); // index_writer.commit().unwrap();
reader.reload().unwrap(); // reader.reload().unwrap();
test_searcher( // test_searcher(
9, // 9,
&[ // &[
("/top/a", 3), // ("/top/a", 3),
("/top/b", 3), // ("/top/b", 3),
("/top/c", 1), // ("/top/c", 1),
("/top/d", 2), // ("/top/d", 2),
("/top/e", 2), // ("/top/e", 2),
("/top/f", 1), // ("/top/f", 1),
], // ],
); // );
} // }
} // }
#[test] #[test]
fn test_bug_merge() -> crate::Result<()> { fn test_bug_merge() -> crate::Result<()> {
@@ -1480,13 +1420,6 @@ mod tests {
let int_field = schema_builder.add_u64_field("intvals", int_options); let int_field = schema_builder.add_u64_field("intvals", int_options);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut vals: Vec<u64> = Vec::new();
let mut test_vals = move |col: &Column<u64>, doc: DocId, expected: &[u64]| {
vals.clear();
vals.extend(col.values(doc));
assert_eq!(&vals[..], expected);
};
{ {
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_for_tests()?;
let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| { let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| {
@@ -1512,42 +1445,49 @@ mod tests {
} }
let reader = index.reader()?; let reader = index.reader()?;
let searcher = reader.searcher(); let searcher = reader.searcher();
let mut vals: Vec<u64> = Vec::new();
{ {
let segment = searcher.segment_reader(0u32); let segment = searcher.segment_reader(0u32);
let column = segment // let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
.fast_fields()
.column_opt::<u64>("intvals") // ff_reader.get_vals(0, &mut vals);
.unwrap() // assert_eq!(&vals, &[1, 2]);
.unwrap();
test_vals(&column, 0, &[1, 2]); // ff_reader.get_vals(1, &mut vals);
test_vals(&column, 1, &[1, 2, 3]); // assert_eq!(&vals, &[1, 2, 3]);
test_vals(&column, 2, &[4, 5]);
test_vals(&column, 3, &[1, 2]); // ff_reader.get_vals(2, &mut vals);
test_vals(&column, 4, &[1, 5]); // assert_eq!(&vals, &[4, 5]);
test_vals(&column, 5, &[3]);
test_vals(&column, 6, &[17]); // ff_reader.get_vals(3, &mut vals);
// assert_eq!(&vals, &[1, 2]);
// ff_reader.get_vals(4, &mut vals);
// assert_eq!(&vals, &[1, 5]);
// ff_reader.get_vals(5, &mut vals);
// assert_eq!(&vals, &[3]);
// ff_reader.get_vals(6, &mut vals);
// assert_eq!(&vals, &[17]);
} }
{ {
let segment = searcher.segment_reader(1u32); let segment = searcher.segment_reader(1u32);
let col = segment // let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
.fast_fields() // ff_reader.get_vals(0, &mut vals);
.column_opt::<u64>("intvals") // assert_eq!(&vals, &[28, 27]);
.unwrap()
.unwrap(); // ff_reader.get_vals(1, &mut vals);
test_vals(&col, 0, &[28, 27]); // assert_eq!(&vals, &[1_000]);
test_vals(&col, 1, &[1000]);
} }
{ {
let segment = searcher.segment_reader(2u32); let segment = searcher.segment_reader(2u32);
let col = segment // let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
.fast_fields() // ff_reader.get_vals(0, &mut vals);
.column_opt::<u64>("intvals") // assert_eq!(&vals, &[20]);
.unwrap()
.unwrap();
test_vals(&col, 0, &[20]);
} }
// Merging the segments // Merging the segments
@@ -1562,21 +1502,37 @@ mod tests {
{ {
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment = searcher.segment_reader(0u32); let segment = searcher.segment_reader(0u32);
let col = segment // let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
.fast_fields()
.column_opt::<u64>("intvals") // ff_reader.get_vals(0, &mut vals);
.unwrap() // assert_eq!(&vals, &[1, 2]);
.unwrap();
test_vals(&col, 0, &[1, 2]); // ff_reader.get_vals(1, &mut vals);
test_vals(&col, 1, &[1, 2, 3]); // assert_eq!(&vals, &[1, 2, 3]);
test_vals(&col, 2, &[4, 5]);
test_vals(&col, 3, &[1, 2]); // ff_reader.get_vals(2, &mut vals);
test_vals(&col, 4, &[1, 5]); // assert_eq!(&vals, &[4, 5]);
test_vals(&col, 5, &[3]);
test_vals(&col, 6, &[17]); // ff_reader.get_vals(3, &mut vals);
test_vals(&col, 7, &[28, 27]); // assert_eq!(&vals, &[1, 2]);
test_vals(&col, 8, &[1000]);
test_vals(&col, 9, &[20]); // ff_reader.get_vals(4, &mut vals);
// assert_eq!(&vals, &[1, 5]);
// ff_reader.get_vals(5, &mut vals);
// assert_eq!(&vals, &[3]);
// ff_reader.get_vals(6, &mut vals);
// assert_eq!(&vals, &[17]);
// ff_reader.get_vals(7, &mut vals);
// assert_eq!(&vals, &[28, 27]);
// ff_reader.get_vals(8, &mut vals);
// assert_eq!(&vals, &[1_000]);
// ff_reader.get_vals(9, &mut vals);
// assert_eq!(&vals, &[20]);
} }
Ok(()) Ok(())
} }

View File

@@ -155,7 +155,6 @@ mod tests {
fn test_merge_sorted_index_desc_not_disjunct() { fn test_merge_sorted_index_desc_not_disjunct() {
test_merge_sorted_index_desc_(false); test_merge_sorted_index_desc_(false);
} }
#[test] #[test]
fn test_merge_sorted_index_desc_disjunct() { fn test_merge_sorted_index_desc_disjunct() {
test_merge_sorted_index_desc_(true); test_merge_sorted_index_desc_(true);
@@ -477,11 +476,12 @@ mod bench_sorted_index_merge {
use std::sync::Arc; use std::sync::Arc;
use fastfield_codecs::Column;
use test::{self, Bencher}; use test::{self, Bencher};
use crate::core::Index; use crate::core::Index;
use crate::indexer::merger::IndexMerger; use crate::indexer::merger::IndexMerger;
use crate::schema::{NumericOptions, Schema}; use crate::schema::{Cardinality, NumericOptions, Schema};
use crate::{IndexSettings, IndexSortByField, IndexWriter, Order}; use crate::{IndexSettings, IndexSortByField, IndexWriter, Order};
fn create_index(sort_by_field: Option<IndexSortByField>) -> Index { fn create_index(sort_by_field: Option<IndexSortByField>) -> Index {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -512,42 +512,42 @@ mod bench_sorted_index_merge {
index index
} }
//#[bench] #[bench]
// fn create_sorted_index_walk_overkmerge_on_merge_fastfield( fn create_sorted_index_walk_overkmerge_on_merge_fastfield(
// b: &mut Bencher, b: &mut Bencher,
//) -> crate::Result<()> { ) -> crate::Result<()> {
// let sort_by_field = IndexSortByField { let sort_by_field = IndexSortByField {
// field: "intval".to_string(), field: "intval".to_string(),
// order: Order::Desc, order: Order::Desc,
//}; };
// let index = create_index(Some(sort_by_field.clone())); let index = create_index(Some(sort_by_field.clone()));
// let segments = index.searchable_segments().unwrap(); let segments = index.searchable_segments().unwrap();
// let merger: IndexMerger = let merger: IndexMerger =
// IndexMerger::open(index.schema(), index.settings().clone(), &segments[..])?; IndexMerger::open(index.schema(), index.settings().clone(), &segments[..])?;
// let doc_id_mapping = merger.generate_doc_id_mapping(&sort_by_field).unwrap(); let doc_id_mapping = merger.generate_doc_id_mapping(&sort_by_field).unwrap();
// b.iter(|| { b.iter(|| {
// let sorted_doc_ids = doc_id_mapping.iter_old_doc_addrs().map(|doc_addr| { let sorted_doc_ids = doc_id_mapping.iter_old_doc_addrs().map(|doc_addr| {
// let reader = &merger.readers[doc_addr.segment_ord as usize]; let reader = &merger.readers[doc_addr.segment_ord as usize];
// let u64_reader: Arc<dyn Column<u64>> = reader let u64_reader: Arc<dyn Column<u64>> = reader
//.fast_fields() .fast_fields()
//.typed_fast_field_reader("intval") .typed_fast_field_reader("intval")
//.expect( .expect(
//"Failed to find a reader for single fast field. This is a tantivy bug and \ "Failed to find a reader for single fast field. This is a tantivy bug and \
// it should never happen.", it should never happen.",
//); );
//(doc_addr.doc_id, reader, u64_reader) (doc_addr.doc_id, reader, u64_reader)
//}); });
//// add values in order of the new doc_ids // add values in order of the new doc_ids
// let mut val = 0; let mut val = 0;
// for (doc_id, _reader, field_reader) in sorted_doc_ids { for (doc_id, _reader, field_reader) in sorted_doc_ids {
// val = field_reader.get_val(doc_id); val = field_reader.get_val(doc_id);
//} }
// val val
//}); });
// Ok(()) Ok(())
//} }
#[bench] #[bench]
fn create_sorted_index_create_doc_id_mapping(b: &mut Bencher) -> crate::Result<()> { fn create_sorted_index_create_doc_id_mapping(b: &mut Bencher) -> crate::Result<()> {
let sort_by_field = IndexSortByField { let sort_by_field = IndexSortByField {

View File

@@ -55,9 +55,9 @@ type AddBatchReceiver = channel::Receiver<AddBatch>;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
#[cfg(test)] #[cfg(test)]
mod tests_mmap { mod tests_mmap {
use crate::collector::Count;
// use crate::query::QueryParser; // use crate::query::QueryParser;
use crate::schema::{Schema, TEXT}; use crate::schema::{JsonObjectOptions, Schema, TEXT};
use crate::{Index, Term}; use crate::{Index, Term};
#[test] #[test]

View File

@@ -819,23 +819,20 @@ mod tests {
// This is a bit of a contrived example. // This is a bit of a contrived example.
let tokens = PreTokenizedString { let tokens = PreTokenizedString {
text: "contrived-example".to_string(), //< I can't think of a use case where this corner case happens in real life. text: "contrived-example".to_string(), //< I can't think of a use case where this corner case happens in real life.
tokens: vec![ tokens: vec![Token { // Not the last token, yet ends after the last token.
Token { offset_from: 0,
// Not the last token, yet ends after the last token. offset_to: 14,
offset_from: 0, position: 0,
offset_to: 14, text: "long_token".to_string(),
position: 0, position_length: 3,
text: "long_token".to_string(), },
position_length: 3, Token {
}, offset_from: 0,
Token { offset_to: 14,
offset_from: 0, position: 1,
offset_to: 14, text: "short".to_string(),
position: 1, position_length: 1,
text: "short".to_string(), }],
position_length: 1,
},
],
}; };
doc.add_pre_tokenized_text(text, tokens); doc.add_pre_tokenized_text(text, tokens);
doc.add_text(text, "hello"); doc.add_text(text, "hello");

View File

@@ -279,7 +279,7 @@ mod indexer;
pub mod error; pub mod error;
pub mod tokenizer; pub mod tokenizer;
pub mod aggregation; // pub mod aggregation;
pub mod collector; pub mod collector;
pub mod directory; pub mod directory;
pub mod fastfield; pub mod fastfield;

View File

@@ -29,6 +29,8 @@ pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
pub(crate) use self::skip::{BlockInfo, SkipReader}; pub(crate) use self::skip::{BlockInfo, SkipReader};
pub use self::term_info::TermInfo; pub use self::term_info::TermInfo;
pub(crate) type UnorderedTermId = stacker::UnorderedId;
#[allow(clippy::enum_variant_names)] #[allow(clippy::enum_variant_names)]
#[derive(Debug, PartialEq, Clone, Copy, Eq)] #[derive(Debug, PartialEq, Clone, Copy, Eq)]
pub(crate) enum FreqReadingOption { pub(crate) enum FreqReadingOption {

View File

@@ -1,7 +1,9 @@
use std::collections::HashMap;
use std::io; use std::io;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::ops::Range; use std::ops::Range;
use rustc_hash::FxHashMap;
use stacker::Addr; use stacker::Addr;
use crate::fieldnorm::FieldNormReaders; use crate::fieldnorm::FieldNormReaders;
@@ -9,8 +11,10 @@ use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::postings::recorder::{BufferLender, Recorder}; use crate::postings::recorder::{BufferLender, Recorder};
use crate::postings::{ use crate::postings::{
FieldSerializer, IndexingContext, InvertedIndexSerializer, PerFieldPostingsWriter, FieldSerializer, IndexingContext, InvertedIndexSerializer, PerFieldPostingsWriter,
UnorderedTermId,
}; };
use crate::schema::{Field, Term}; use crate::schema::{Field, FieldType, Schema, Term};
use crate::termdict::TermOrdinal;
use crate::tokenizer::{Token, TokenStream, MAX_TOKEN_LEN}; use crate::tokenizer::{Token, TokenStream, MAX_TOKEN_LEN};
use crate::DocId; use crate::DocId;

View File

@@ -13,7 +13,7 @@ use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_
use crate::postings::skip::SkipSerializer; use crate::postings::skip::SkipSerializer;
use crate::query::Bm25Weight; use crate::query::Bm25Weight;
use crate::schema::{Field, FieldEntry, FieldType, IndexRecordOption, Schema}; use crate::schema::{Field, FieldEntry, FieldType, IndexRecordOption, Schema};
use crate::termdict::TermDictionaryBuilder; use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::{DocId, Score}; use crate::{DocId, Score};
/// `InvertedIndexSerializer` is in charge of serializing /// `InvertedIndexSerializer` is in charge of serializing
@@ -109,6 +109,7 @@ pub struct FieldSerializer<'a> {
positions_serializer_opt: Option<PositionSerializer<&'a mut CountingWriter<WritePtr>>>, positions_serializer_opt: Option<PositionSerializer<&'a mut CountingWriter<WritePtr>>>,
current_term_info: TermInfo, current_term_info: TermInfo,
term_open: bool, term_open: bool,
num_terms: TermOrdinal,
} }
impl<'a> FieldSerializer<'a> { impl<'a> FieldSerializer<'a> {
@@ -147,6 +148,7 @@ impl<'a> FieldSerializer<'a> {
positions_serializer_opt, positions_serializer_opt,
current_term_info: TermInfo::default(), current_term_info: TermInfo::default(),
term_open: false, term_open: false,
num_terms: TermOrdinal::default(),
}) })
} }
@@ -169,17 +171,20 @@ impl<'a> FieldSerializer<'a> {
/// * term - the term. It needs to come after the previous term according to the lexicographical /// * term - the term. It needs to come after the previous term according to the lexicographical
/// order. /// order.
/// * term_doc_freq - return the number of document containing the term. /// * term_doc_freq - return the number of document containing the term.
pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<()> { pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<TermOrdinal> {
assert!( assert!(
!self.term_open, !self.term_open,
"Called new_term, while the previous term was not closed." "Called new_term, while the previous term was not closed."
); );
self.term_open = true; self.term_open = true;
self.postings_serializer.clear(); self.postings_serializer.clear();
self.current_term_info = self.current_term_info(); self.current_term_info = self.current_term_info();
self.term_dictionary_builder.insert_key(term)?; self.term_dictionary_builder.insert_key(term)?;
let term_ordinal = self.num_terms;
self.num_terms += 1;
self.postings_serializer.new_term(term_doc_freq); self.postings_serializer.new_term(term_doc_freq);
Ok(()) Ok(term_ordinal)
} }
/// Serialize the information that a document contains for the current term: /// Serialize the information that a document contains for the current term:

View File

@@ -50,7 +50,7 @@ pub use self::more_like_this::{MoreLikeThisQuery, MoreLikeThisQueryBuilder};
pub use self::phrase_query::PhraseQuery; pub use self::phrase_query::PhraseQuery;
pub use self::query::{EnableScoring, Query, QueryClone}; pub use self::query::{EnableScoring, Query, QueryClone};
pub use self::query_parser::{QueryParser, QueryParserError}; pub use self::query_parser::{QueryParser, QueryParserError};
pub use self::range_query::RangeQuery; // pub use self::range_query::RangeQuery;
pub use self::regex_query::RegexQuery; pub use self::regex_query::RegexQuery;
pub use self::reqopt_scorer::RequiredOptionalScorer; pub use self::reqopt_scorer::RequiredOptionalScorer;
pub use self::score_combiner::{ pub use self::score_combiner::{

View File

@@ -1,9 +1,7 @@
use core::fmt::Debug; use core::fmt::Debug;
use std::ops::RangeInclusive; use std::ops::RangeInclusive;
use std::sync::Arc;
use columnar::column_index::ColumnIndexSelectCursor; use columnar::Column;
use columnar::{Column, ColumnValues};
use crate::fastfield::MakeZero; use crate::fastfield::MakeZero;
use crate::{DocId, DocSet, TERMINATED}; use crate::{DocId, DocSet, TERMINATED};
@@ -45,9 +43,7 @@ impl VecCursor {
pub(crate) struct RangeDocSet<T: MakeZero> { pub(crate) struct RangeDocSet<T: MakeZero> {
/// The range filter on the values. /// The range filter on the values.
value_range: RangeInclusive<T>, value_range: RangeInclusive<T>,
column_index_select_cursor: ColumnIndexSelectCursor, column: Column<T>,
column_values: Arc<dyn ColumnValues<T>>,
/// The next docid start range to fetch (inclusive). /// The next docid start range to fetch (inclusive).
next_fetch_start: u32, next_fetch_start: u32,
/// Number of docs range checked in a batch. /// Number of docs range checked in a batch.
@@ -67,15 +63,13 @@ pub(crate) struct RangeDocSet<T: MakeZero> {
const DEFAULT_FETCH_HORIZON: u32 = 128; const DEFAULT_FETCH_HORIZON: u32 = 128;
impl<T: MakeZero + Send + Sync + PartialOrd + Copy + Debug + 'static> RangeDocSet<T> { impl<T: MakeZero + Send + Sync + PartialOrd + Copy + Debug + 'static> RangeDocSet<T> {
pub(crate) fn new(value_range: RangeInclusive<T>, column: Column<T>) -> Self { pub(crate) fn new(value_range: RangeInclusive<T>, column: Column<T>) -> Self {
let column_index_select_cursor = column.select_cursor();
let mut range_docset = Self { let mut range_docset = Self {
value_range, value_range,
column_values: column.values, column,
loaded_docs: VecCursor::new(), loaded_docs: VecCursor::new(),
next_fetch_start: 0, next_fetch_start: 0,
fetch_horizon: DEFAULT_FETCH_HORIZON, fetch_horizon: DEFAULT_FETCH_HORIZON,
last_seek_pos_opt: None, last_seek_pos_opt: None,
column_index_select_cursor,
}; };
range_docset.reset_fetch_range(); range_docset.reset_fetch_range();
range_docset.fetch_block(); range_docset.fetch_block();
@@ -112,21 +106,26 @@ impl<T: MakeZero + Send + Sync + PartialOrd + Copy + Debug + 'static> RangeDocSe
fn fetch_horizon(&mut self, horizon: u32) -> bool { fn fetch_horizon(&mut self, horizon: u32) -> bool {
let mut finished_to_end = false; let mut finished_to_end = false;
let limit = self.column_values.num_vals(); let limit = self.column.values.num_vals();
let mut end = self.next_fetch_start + horizon; let mut end = self.next_fetch_start + horizon;
if end >= limit { if end >= limit {
end = limit; end = limit;
finished_to_end = true; finished_to_end = true;
} }
let last_value = self.loaded_docs.last_value();
let doc_buffer: &mut Vec<DocId> = self.loaded_docs.get_cleared_data(); let doc_buffer: &mut Vec<DocId> = self.loaded_docs.get_cleared_data();
self.column_values.get_docids_for_value_range( self.column.values.get_docids_for_value_range(
self.value_range.clone(), self.value_range.clone(),
self.next_fetch_start..end, self.next_fetch_start..end,
doc_buffer, doc_buffer,
); );
self.column_index_select_cursor self.column.idx.select_batch_in_place(doc_buffer);
.select_batch_in_place(doc_buffer); if let Some(last_value) = last_value {
while self.loaded_docs.current() == Some(last_value) {
self.loaded_docs.next();
}
}
self.next_fetch_start = end; self.next_fetch_start = end;
finished_to_end finished_to_end
@@ -139,7 +138,7 @@ impl<T: MakeZero + Send + Sync + PartialOrd + Copy + Debug + 'static> DocSet for
if let Some(docid) = self.loaded_docs.next() { if let Some(docid) = self.loaded_docs.next() {
return docid; return docid;
} }
if self.next_fetch_start >= self.column_values.num_vals() { if self.next_fetch_start >= self.column.values.num_vals() {
return TERMINATED; return TERMINATED;
} }
self.fetch_block(); self.fetch_block();

View File

@@ -399,8 +399,6 @@ mod tests {
use std::ops::Bound; use std::ops::Bound;
use std::str::FromStr; use std::str::FromStr;
use rand::seq::SliceRandom;
use super::RangeQuery; use super::RangeQuery;
use crate::collector::{Count, TopDocs}; use crate::collector::{Count, TopDocs};
use crate::query::QueryParser; use crate::query::QueryParser;
@@ -508,8 +506,8 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_with_num_threads(1, 60_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(2, 60_000_000).unwrap();
let mut docs = Vec::new();
for i in 1..100 { for i in 1..100 {
let mut doc = Document::new(); let mut doc = Document::new();
for j in 1..100 { for j in 1..100 {
@@ -517,18 +515,9 @@ mod tests {
doc.add_f64(float_field, j as f64); doc.add_f64(float_field, j as f64);
} }
} }
docs.push(doc); index_writer.add_document(doc)?;
} }
docs.shuffle(&mut rand::thread_rng());
let mut docs_it = docs.into_iter();
for doc in (&mut docs_it).take(50) {
index_writer.add_document(doc)?;
}
index_writer.commit()?;
for doc in docs_it {
index_writer.add_document(doc)?;
}
index_writer.commit()?; index_writer.commit()?;
} }
let reader = index.reader()?; let reader = index.reader()?;

View File

@@ -41,7 +41,7 @@ impl IPFastFieldRangeWeight {
impl Weight for IPFastFieldRangeWeight { impl Weight for IPFastFieldRangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let Some(ip_addr_column): Option<Column<Ipv6Addr>> = reader.fast_fields() let Some(ip_addr_column): Option<Column<Ipv6Addr>> = reader.fast_fields()
.column_opt(&self.field)? else { .typed_column_opt(&self.field)? else {
return Ok(Box::new(EmptyScorer)) return Ok(Box::new(EmptyScorer))
}; };
let value_range = bound_to_value_range( let value_range = bound_to_value_range(
@@ -88,7 +88,7 @@ fn bound_to_value_range(
} }
#[cfg(test)] #[cfg(test)]
pub mod tests { mod tests {
use proptest::prelude::ProptestConfig; use proptest::prelude::ProptestConfig;
use proptest::strategy::Strategy; use proptest::strategy::Strategy;
use proptest::{prop_oneof, proptest}; use proptest::{prop_oneof, proptest};
@@ -188,7 +188,7 @@ pub mod tests {
assert_eq!(count, 2); assert_eq!(count, 2);
} }
pub fn create_index_from_docs(docs: &[Doc]) -> Index { fn create_index_from_docs(docs: &[Doc]) -> Index {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let ip_field = schema_builder.add_ip_addr_field("ip", STORED | FAST); let ip_field = schema_builder.add_ip_addr_field("ip", STORED | FAST);
let ips_field = schema_builder.add_ip_addr_field("ips", FAST | INDEXED); let ips_field = schema_builder.add_ip_addr_field("ips", FAST | INDEXED);

View File

@@ -86,10 +86,12 @@ fn bound_to_value_range<T: MonotonicallyMappableToU64>(
} }
#[cfg(test)] #[cfg(test)]
pub mod tests { mod tests {
use std::ops::{Bound, RangeInclusive}; use std::ops::{Bound, RangeInclusive};
use proptest::prelude::*; use proptest::prelude::ProptestConfig;
use proptest::strategy::Strategy;
use proptest::{prop_oneof, proptest};
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
use rand::SeedableRng; use rand::SeedableRng;
@@ -128,13 +130,15 @@ pub mod tests {
} }
} }
proptest! { // TODO re-enable once merge is replugged.
#![proptest_config(ProptestConfig::with_cases(10))] //
#[test] // proptest! {
fn test_range_for_docs_prop(ops in proptest::collection::vec(operation_strategy(), 1..1000)) { // #![proptest_config(ProptestConfig::with_cases(10))]
assert!(test_id_range_for_docs(ops).is_ok()); // #[test]
} // fn test_range_for_docs_prop(ops in proptest::collection::vec(operation_strategy(),
} // 1..1000)) { assert!(test_id_range_for_docs(ops).is_ok());
// }
// }
#[test] #[test]
fn range_regression1_test() { fn range_regression1_test() {
@@ -187,7 +191,7 @@ pub mod tests {
assert!(test_id_range_for_docs(ops).is_ok()); assert!(test_id_range_for_docs(ops).is_ok());
} }
pub fn create_index_from_docs(docs: &[Doc]) -> Index { fn create_index_from_docs(docs: &[Doc]) -> Index {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let id_u64_field = schema_builder.add_u64_field("id", INDEXED | STORED | FAST); let id_u64_field = schema_builder.add_u64_field("id", INDEXED | STORED | FAST);
let ids_u64_field = let ids_u64_field =
@@ -210,7 +214,7 @@ pub mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap(); let mut index_writer = index.writer(3_000_000).unwrap();
for doc in docs.iter() { for doc in docs.iter() {
index_writer index_writer
.add_document(doc!( .add_document(doc!(

View File

@@ -109,7 +109,6 @@ impl TermQuery {
} else { } else {
IndexRecordOption::Basic IndexRecordOption::Basic
}; };
Ok(TermWeight::new( Ok(TermWeight::new(
self.term.clone(), self.term.clone(),
index_record_option, index_record_option,

View File

@@ -44,7 +44,7 @@ pub struct IndexReaderBuilder {
index: Index, index: Index,
warmers: Vec<Weak<dyn Warmer>>, warmers: Vec<Weak<dyn Warmer>>,
num_warming_threads: usize, num_warming_threads: usize,
doc_store_cache_num_blocks: usize, doc_store_cache_size: usize,
} }
impl IndexReaderBuilder { impl IndexReaderBuilder {
@@ -55,7 +55,7 @@ impl IndexReaderBuilder {
index, index,
warmers: Vec::new(), warmers: Vec::new(),
num_warming_threads: 1, num_warming_threads: 1,
doc_store_cache_num_blocks: DOCSTORE_CACHE_CAPACITY, doc_store_cache_size: DOCSTORE_CACHE_CAPACITY,
} }
} }
@@ -72,7 +72,7 @@ impl IndexReaderBuilder {
searcher_generation_inventory.clone(), searcher_generation_inventory.clone(),
)?; )?;
let inner_reader = InnerIndexReader::new( let inner_reader = InnerIndexReader::new(
self.doc_store_cache_num_blocks, self.doc_store_cache_size,
self.index, self.index,
warming_state, warming_state,
searcher_generation_inventory, searcher_generation_inventory,
@@ -119,11 +119,8 @@ impl IndexReaderBuilder {
/// ///
/// The doc store readers cache by default DOCSTORE_CACHE_CAPACITY(100) decompressed blocks. /// The doc store readers cache by default DOCSTORE_CACHE_CAPACITY(100) decompressed blocks.
#[must_use] #[must_use]
pub fn doc_store_cache_num_blocks( pub fn doc_store_cache_size(mut self, doc_store_cache_size: usize) -> IndexReaderBuilder {
mut self, self.doc_store_cache_size = doc_store_cache_size;
doc_store_cache_num_blocks: usize,
) -> IndexReaderBuilder {
self.doc_store_cache_num_blocks = doc_store_cache_num_blocks;
self self
} }
@@ -154,7 +151,7 @@ impl TryInto<IndexReader> for IndexReaderBuilder {
} }
struct InnerIndexReader { struct InnerIndexReader {
doc_store_cache_num_blocks: usize, doc_store_cache_size: usize,
index: Index, index: Index,
warming_state: WarmingState, warming_state: WarmingState,
searcher: arc_swap::ArcSwap<SearcherInner>, searcher: arc_swap::ArcSwap<SearcherInner>,
@@ -164,7 +161,7 @@ struct InnerIndexReader {
impl InnerIndexReader { impl InnerIndexReader {
fn new( fn new(
doc_store_cache_num_blocks: usize, doc_store_cache_size: usize,
index: Index, index: Index,
warming_state: WarmingState, warming_state: WarmingState,
// The searcher_generation_inventory is not used as source, but as target to track the // The searcher_generation_inventory is not used as source, but as target to track the
@@ -175,13 +172,13 @@ impl InnerIndexReader {
let searcher = Self::create_searcher( let searcher = Self::create_searcher(
&index, &index,
doc_store_cache_num_blocks, doc_store_cache_size,
&warming_state, &warming_state,
&searcher_generation_counter, &searcher_generation_counter,
&searcher_generation_inventory, &searcher_generation_inventory,
)?; )?;
Ok(InnerIndexReader { Ok(InnerIndexReader {
doc_store_cache_num_blocks, doc_store_cache_size,
index, index,
warming_state, warming_state,
searcher: ArcSwap::from(searcher), searcher: ArcSwap::from(searcher),
@@ -217,7 +214,7 @@ impl InnerIndexReader {
fn create_searcher( fn create_searcher(
index: &Index, index: &Index,
doc_store_cache_num_blocks: usize, doc_store_cache_size: usize,
warming_state: &WarmingState, warming_state: &WarmingState,
searcher_generation_counter: &Arc<AtomicU64>, searcher_generation_counter: &Arc<AtomicU64>,
searcher_generation_inventory: &Inventory<SearcherGeneration>, searcher_generation_inventory: &Inventory<SearcherGeneration>,
@@ -235,7 +232,7 @@ impl InnerIndexReader {
index.clone(), index.clone(),
segment_readers, segment_readers,
searcher_generation, searcher_generation,
doc_store_cache_num_blocks, doc_store_cache_size,
)?); )?);
warming_state.warm_new_searcher_generation(&searcher.clone().into())?; warming_state.warm_new_searcher_generation(&searcher.clone().into())?;
@@ -245,7 +242,7 @@ impl InnerIndexReader {
fn reload(&self) -> crate::Result<()> { fn reload(&self) -> crate::Result<()> {
let searcher = Self::create_searcher( let searcher = Self::create_searcher(
&self.index, &self.index,
self.doc_store_cache_num_blocks, self.doc_store_cache_size,
&self.warming_state, &self.warming_state,
&self.searcher_generation_counter, &self.searcher_generation_counter,
&self.searcher_generation_inventory, &self.searcher_generation_inventory,

View File

@@ -49,17 +49,4 @@ impl IndexRecordOption {
IndexRecordOption::WithFreqsAndPositions => true, IndexRecordOption::WithFreqsAndPositions => true,
} }
} }
/// Downgrades to the next level if provided `IndexRecordOption` is unavailable.
pub fn downgrade(&self, other: IndexRecordOption) -> IndexRecordOption {
use IndexRecordOption::*;
match (other, self) {
(WithFreqsAndPositions, WithFreqsAndPositions) => WithFreqsAndPositions,
(WithFreqs, WithFreqs) => WithFreqs,
(WithFreqsAndPositions, WithFreqs) => WithFreqs,
(WithFreqs, WithFreqsAndPositions) => WithFreqs,
_ => Basic,
}
}
} }

View File

@@ -366,7 +366,7 @@ where B: AsRef<[u8]>
/// ///
/// If the term is a string, its value is utf-8 encoded. /// If the term is a string, its value is utf-8 encoded.
/// If the term is a u64, its value is encoded according /// If the term is a u64, its value is encoded according
/// to `byteorder::BigEndian`. /// to `byteorder::LittleEndian`.
pub fn value_bytes(&self) -> &[u8] { pub fn value_bytes(&self) -> &[u8] {
&self.0.as_ref()[TERM_METADATA_LENGTH..] &self.0.as_ref()[TERM_METADATA_LENGTH..]
} }
@@ -375,7 +375,7 @@ where B: AsRef<[u8]>
/// ///
/// Do NOT rely on this byte representation in the index. /// Do NOT rely on this byte representation in the index.
/// This value is likely to change in the future. /// This value is likely to change in the future.
pub fn as_slice(&self) -> &[u8] { pub(crate) fn as_slice(&self) -> &[u8] {
self.0.as_ref() self.0.as_ref()
} }
} }

View File

@@ -4,8 +4,8 @@
//! order to be handled in the `Store`. //! order to be handled in the `Store`.
//! //!
//! Internally, documents (or rather their stored fields) are serialized to a buffer. //! Internally, documents (or rather their stored fields) are serialized to a buffer.
//! When the buffer exceeds `block_size` (defaults to 16K), the buffer is compressed using `brotli`, //! When the buffer exceeds 16K, the buffer is compressed using `brotli`, `LZ4` or `snappy`
//! `LZ4` or `snappy` and the resulting block is written to disk. //! and the resulting block is written to disk.
//! //!
//! One can then request for a specific `DocId`. //! One can then request for a specific `DocId`.
//! A skip list helps navigating to the right block, //! A skip list helps navigating to the right block,
@@ -28,6 +28,8 @@
//! - at the segment level, the //! - at the segment level, the
//! [`SegmentReader`'s `doc` method](../struct.SegmentReader.html#method.doc) //! [`SegmentReader`'s `doc` method](../struct.SegmentReader.html#method.doc)
//! - at the index level, the [`Searcher::doc()`](crate::Searcher::doc) method //! - at the index level, the [`Searcher::doc()`](crate::Searcher::doc) method
//!
//! !
mod compressors; mod compressors;
mod decompressors; mod decompressors;

View File

@@ -114,10 +114,7 @@ impl Sum for CacheStats {
impl StoreReader { impl StoreReader {
/// Opens a store reader /// Opens a store reader
/// pub fn open(store_file: FileSlice, cache_size: usize) -> io::Result<StoreReader> {
/// `cache_num_blocks` sets the number of decompressed blocks to be cached in an LRU.
/// The size of blocks is configurable, this should be reflexted in the
pub fn open(store_file: FileSlice, cache_num_blocks: usize) -> io::Result<StoreReader> {
let (footer, data_and_offset) = DocStoreFooter::extract_footer(store_file)?; let (footer, data_and_offset) = DocStoreFooter::extract_footer(store_file)?;
let (data_file, offset_index_file) = data_and_offset.split(footer.offset as usize); let (data_file, offset_index_file) = data_and_offset.split(footer.offset as usize);
@@ -128,8 +125,8 @@ impl StoreReader {
decompressor: footer.decompressor, decompressor: footer.decompressor,
data: data_file, data: data_file,
cache: BlockCache { cache: BlockCache {
cache: NonZeroUsize::new(cache_num_blocks) cache: NonZeroUsize::new(cache_size)
.map(|cache_num_blocks| Mutex::new(LruCache::new(cache_num_blocks))), .map(|cache_size| Mutex::new(LruCache::new(cache_size))),
cache_hits: Default::default(), cache_hits: Default::default(),
cache_misses: Default::default(), cache_misses: Default::default(),
}, },

File diff suppressed because it is too large Load Diff

View File

@@ -58,6 +58,11 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
Ok(TSSTable::writer(wrt)) Ok(TSSTable::writer(wrt))
} }
pub(crate) fn sstable_reader(&self) -> io::Result<Reader<'static, TSSTable::ValueReader>> {
let data = self.sstable_slice.read_bytes()?;
Ok(TSSTable::reader(data))
}
pub(crate) fn sstable_reader_block( pub(crate) fn sstable_reader_block(
&self, &self,
block_addr: BlockAddr, block_addr: BlockAddr,

View File

@@ -209,25 +209,6 @@ impl ArenaHashMap {
} }
} }
/// Get a value associated to a key.
pub fn get<V>(&self, key: &[u8]) -> Option<V>
where V: Copy + 'static {
let hash = murmurhash2(key);
let mut probe = self.probe(hash);
loop {
let bucket = probe.next_probe();
let kv: KeyValue = self.table[bucket];
if kv.is_empty() {
return None;
} else if kv.hash == hash {
if let Some(val_addr) = self.get_value_addr_if_key_match(key, kv.key_value_addr) {
let v = self.memory_arena.read(val_addr);
return Some(v);
}
}
}
}
/// `update` create a new entry for a given key if it does not exist /// `update` create a new entry for a given key if it does not exist
/// or updates the existing entry. /// or updates the existing entry.
/// ///
@@ -238,13 +219,14 @@ impl ArenaHashMap {
/// will be in charge of returning a default value. /// will be in charge of returning a default value.
/// If the key already as an associated value, then it will be passed /// If the key already as an associated value, then it will be passed
/// `Some(previous_value)`. /// `Some(previous_value)`.
pub fn mutate_or_create<V>( pub fn mutate_or_create<V, TMutator>(
&mut self, &mut self,
key: &[u8], key: &[u8],
mut updater: impl FnMut(Option<V>) -> V, mut updater: TMutator,
) -> UnorderedId ) -> UnorderedId
where where
V: Copy + 'static, V: Copy + 'static,
TMutator: FnMut(Option<V>) -> V,
{ {
if self.is_saturated() { if self.is_saturated() {
self.resize(); self.resize();