Compare commits

..

2 Commits

Author SHA1 Message Date
Paul Masurel
cd0af6d6b1 first commit to show how to impl u128 2023-01-17 13:44:34 +09:00
Pascal Seitz
49baa15f0f start migrate Field to &str
start migrate Field to &str in preparation of columnar
return Result for get_field
2023-01-17 13:34:21 +09:00
58 changed files with 1037 additions and 2079 deletions

View File

@@ -41,7 +41,7 @@ Your mileage WILL vary depending on the nature of queries and their load.
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene)
- `&[u8]` fast fields
- Text, i64, u64, f64, dates, ip, bool, and hierarchical facet fields
- Text, i64, u64, f64, dates, and hierarchical facet fields
- Compressed document store (LZ4, Zstd, None, Brotli, Snap)
- Range queries
- Faceted search
@@ -80,21 +80,56 @@ There are many ways to support this project.
# Contributing code
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
Feel free to update CHANGELOG.md with your contribution.
## Tokenizer
When implementing a tokenizer for tantivy depend on the `tantivy-tokenizer-api` crate.
## Minimum supported Rust version
Tantivy currently requires at least Rust 1.62 or later to compile.
## Clone and build locally
Tantivy compiles on stable Rust.
To check out and run tests, you can simply run:
```bash
git clone https://github.com/quickwit-oss/tantivy.git
cd tantivy
cargo test
git clone https://github.com/quickwit-oss/tantivy.git
cd tantivy
cargo build
```
## Run tests
Some tests will not run with just `cargo test` because of `fail-rs`.
To run the tests exhaustively, run `./run-tests.sh`.
## Debug
You might find it useful to step through the programme with a debugger.
### A failing test
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that the `target/` directory exists. Use this bash script to find the name of the most recent debug build of Tantivy and run it under `rust-gdb`:
```bash
find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY
```
Now that you are in `rust-gdb`, you can set breakpoints on lines and methods that match your source code and run the debug executable with flags that you normally pass to `cargo test` like this:
```bash
$gdb run --test-threads 1 --test $NAME_OF_TEST
```
### An example
By default, `rustc` compiles everything in the `examples/` directory in debug mode. This makes it easy for you to make examples to reproduce bugs:
```bash
rust-gdb target/debug/examples/$EXAMPLE_NAME
$ gdb run
```
# Companies Using Tantivy

View File

@@ -34,7 +34,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let index = Index::create_in_ram(schema.clone());
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split('\n') {
for doc_json in HDFS_LOGS.trim().split("\n") {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
@@ -46,7 +46,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split('\n') {
for doc_json in HDFS_LOGS.trim().split("\n") {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
@@ -59,7 +59,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let index = Index::create_in_ram(schema_with_store.clone());
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split('\n') {
for doc_json in HDFS_LOGS.trim().split("\n") {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
@@ -71,7 +71,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let index = Index::create_in_ram(schema_with_store.clone());
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split('\n') {
for doc_json in HDFS_LOGS.trim().split("\n") {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
@@ -85,7 +85,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let json_field = dynamic_schema.get_field("json").unwrap();
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split('\n') {
for doc_json in HDFS_LOGS.trim().split("\n") {
let json_val: serde_json::Map<String, serde_json::Value> =
serde_json::from_str(doc_json).unwrap();
let doc = tantivy::doc!(json_field=>json_val);
@@ -101,7 +101,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let json_field = dynamic_schema.get_field("json").unwrap();
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split('\n') {
for doc_json in HDFS_LOGS.trim().split("\n") {
let json_val: serde_json::Map<String, serde_json::Value> =
serde_json::from_str(doc_json).unwrap();
let doc = tantivy::doc!(json_field=>json_val);

View File

@@ -15,7 +15,3 @@ homepage = "https://github.com/quickwit-oss/tantivy"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
[dev-dependencies]
rand = "0.8"
proptest = "1"

View File

@@ -4,39 +4,9 @@ extern crate test;
#[cfg(test)]
mod tests {
use rand::seq::IteratorRandom;
use rand::thread_rng;
use tantivy_bitpacker::{BitPacker, BitUnpacker, BlockedBitpacker};
use tantivy_bitpacker::BlockedBitpacker;
use test::Bencher;
#[inline(never)]
fn create_bitpacked_data(bit_width: u8, num_els: u32) -> Vec<u8> {
let mut bitpacker = BitPacker::new();
let mut buffer = Vec::new();
for _ in 0..num_els {
// the values do not matter.
bitpacker.write(0u64, bit_width, &mut buffer).unwrap();
bitpacker.flush(&mut buffer).unwrap();
}
buffer
}
#[bench]
fn bench_bitpacking_read(b: &mut Bencher) {
let bit_width = 3;
let num_els = 1_000_000u32;
let bit_unpacker = BitUnpacker::new(bit_width);
let data = create_bitpacked_data(bit_width, num_els);
let idxs: Vec<u32> = (0..num_els).choose_multiple(&mut thread_rng(), 100_000);
b.iter(|| {
let mut out = 0u64;
for &idx in &idxs {
out = out.wrapping_add(bit_unpacker.get(idx, &data[..]));
}
out
});
}
#[bench]
fn bench_blockedbitp_read(b: &mut Bencher) {
let mut blocked_bitpacker = BlockedBitpacker::new();
@@ -44,9 +14,9 @@ mod tests {
blocked_bitpacker.add(val * val);
}
b.iter(|| {
let mut out = 0u64;
let mut out = 0;
for val in 0..=21500 {
out = out.wrapping_add(blocked_bitpacker.get(val));
out = blocked_bitpacker.get(val);
}
out
});

View File

@@ -56,31 +56,27 @@ impl BitPacker {
pub fn close<TWrite: io::Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
self.flush(output)?;
// Padding the write file to simplify reads.
output.write_all(&[0u8; 7])?;
Ok(())
}
}
#[derive(Clone, Debug, Default, Copy)]
#[derive(Clone, Debug, Default)]
pub struct BitUnpacker {
num_bits: u32,
num_bits: u64,
mask: u64,
}
impl BitUnpacker {
/// Creates a bit unpacker, that assumes the same bitwidth for all values.
///
/// The bitunpacker works by doing an unaligned read of 8 bytes.
/// For this reason, values of `num_bits` between
/// [57..63] are forbidden.
pub fn new(num_bits: u8) -> BitUnpacker {
assert!(num_bits <= 7 * 8 || num_bits == 64);
let mask: u64 = if num_bits == 64 {
!0u64
} else {
(1u64 << num_bits) - 1u64
};
BitUnpacker {
num_bits: u32::from(num_bits),
num_bits: u64::from(num_bits),
mask,
}
}
@@ -91,40 +87,28 @@ impl BitUnpacker {
#[inline]
pub fn get(&self, idx: u32, data: &[u8]) -> u64 {
let addr_in_bits = idx * self.num_bits;
let addr = (addr_in_bits >> 3) as usize;
if addr + 8 > data.len() {
if self.num_bits == 0 {
return 0;
}
let bit_shift = addr_in_bits & 7;
return self.get_slow_path(addr, bit_shift, data);
if self.num_bits == 0 {
return 0u64;
}
let addr_in_bits = idx * self.num_bits as u32;
let addr = (addr_in_bits >> 3) as usize;
let bit_shift = addr_in_bits & 7;
debug_assert!(
addr + 8 <= data.len(),
"The fast field field should have been padded with 7 bytes."
);
let bytes: [u8; 8] = (&data[addr..addr + 8]).try_into().unwrap();
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
let val_shifted = val_unshifted_unmasked >> bit_shift;
val_shifted & self.mask
}
#[inline(never)]
fn get_slow_path(&self, addr: usize, bit_shift: u32, data: &[u8]) -> u64 {
let mut bytes: [u8; 8] = [0u8; 8];
let available_bytes = data.len() - addr;
// This function is meant to only be called if we did not have 8 bytes to load.
debug_assert!(available_bytes < 8);
bytes[..available_bytes].copy_from_slice(&data[addr..]);
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
let val_shifted = val_unshifted_unmasked >> bit_shift;
val_shifted & self.mask
}
}
#[cfg(test)]
mod test {
use super::{BitPacker, BitUnpacker};
fn create_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>, Vec<u8>) {
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>, Vec<u8>) {
let mut data = Vec::new();
let mut bitpacker = BitPacker::new();
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
@@ -135,13 +119,13 @@ mod test {
bitpacker.write(val, num_bits, &mut data).unwrap();
}
bitpacker.close(&mut data).unwrap();
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8);
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
let bitunpacker = BitUnpacker::new(num_bits);
(bitunpacker, vals, data)
}
fn test_bitpacker_util(len: usize, num_bits: u8) {
let (bitunpacker, vals, data) = create_bitpacker(len, num_bits);
let (bitunpacker, vals, data) = create_fastfield_bitpacker(len, num_bits);
for (i, val) in vals.iter().enumerate() {
assert_eq!(bitunpacker.get(i as u32, &data), *val);
}
@@ -155,49 +139,4 @@ mod test {
test_bitpacker_util(6, 14);
test_bitpacker_util(1000, 14);
}
use proptest::prelude::*;
fn num_bits_strategy() -> impl Strategy<Value = u8> {
prop_oneof!(Just(0), Just(1), 2u8..56u8, Just(56), Just(64),)
}
fn vals_strategy() -> impl Strategy<Value = (u8, Vec<u64>)> {
(num_bits_strategy(), 0usize..100usize).prop_flat_map(|(num_bits, len)| {
let max_val = if num_bits == 64 {
u64::MAX
} else {
(1u64 << num_bits as u32) - 1
};
let vals = proptest::collection::vec(0..=max_val, len);
vals.prop_map(move |vals| (num_bits, vals))
})
}
fn test_bitpacker_aux(num_bits: u8, vals: &[u64]) {
let mut buffer: Vec<u8> = Vec::new();
let mut bitpacker = BitPacker::new();
for &val in vals {
bitpacker.write(val, num_bits, &mut buffer).unwrap();
}
bitpacker.flush(&mut buffer).unwrap();
assert_eq!(buffer.len(), (vals.len() * num_bits as usize + 7) / 8);
let bitunpacker = BitUnpacker::new(num_bits);
let max_val = if num_bits == 64 {
u64::MAX
} else {
(1u64 << num_bits) - 1
};
for (i, val) in vals.iter().copied().enumerate() {
assert!(val <= max_val);
assert_eq!(bitunpacker.get(i as u32, &buffer), val);
}
}
proptest::proptest! {
#[test]
fn test_bitpacker_proptest((num_bits, vals) in vals_strategy()) {
test_bitpacker_aux(num_bits, &vals);
}
}
}

View File

@@ -35,7 +35,6 @@ remove all doc_id occurences -> row_id
use the rank & select naming in unit tests branch.
multi-linear -> blockwise
linear codec -> simply a multiplication for the index column
rename columnar to something more explicit, like column_dictionary or columnar_table
# Other
fix enhance column-cli

View File

@@ -5,16 +5,9 @@ use std::sync::Arc;
use sstable::{Dictionary, VoidSSTable};
use crate::column::Column;
use crate::RowId;
use crate::column_index::ColumnIndex;
/// Dictionary encoded column.
///
/// The column simply gives access to a regular u64-column that, in
/// which the values are term-ordinals.
///
/// These ordinals are ids uniquely identify the bytes that are stored in
/// the column. These ordinals are small, and sorted in the same order
/// as the term_ord_column.
#[derive(Clone)]
pub struct BytesColumn {
pub(crate) dictionary: Arc<Dictionary<VoidSSTable>>,
@@ -22,57 +15,26 @@ pub struct BytesColumn {
}
impl BytesColumn {
/// Fills the given `output` buffer with the term associated to the ordinal `ord`.
///
/// Returns `false` if the term does not exist (e.g. `term_ord` is greater or equal to the
/// overll number of terms).
pub fn ord_to_bytes(&self, ord: u64, output: &mut Vec<u8>) -> io::Result<bool> {
self.dictionary.ord_to_term(ord, output)
pub fn term_ord_to_str(&self, term_ord: u64, output: &mut Vec<u8>) -> io::Result<bool> {
self.dictionary.ord_to_term(term_ord, output)
}
/// Returns the number of rows in the column.
pub fn num_rows(&self) -> RowId {
self.term_ord_column.num_rows()
}
/// Returns the column of ordinals
pub fn ords(&self) -> &Column<u64> {
pub fn term_ords(&self) -> &Column<u64> {
&self.term_ord_column
}
}
#[derive(Clone)]
pub struct StrColumn(BytesColumn);
impl From<BytesColumn> for StrColumn {
fn from(bytes_col: BytesColumn) -> Self {
StrColumn(bytes_col)
}
}
impl StrColumn {
/// Fills the buffer
pub fn ord_to_str(&self, term_ord: u64, output: &mut String) -> io::Result<bool> {
unsafe {
let buf = output.as_mut_vec();
self.0.dictionary.ord_to_term(term_ord, buf)?;
// TODO consider remove checks if it hurts performance.
if std::str::from_utf8(buf.as_slice()).is_err() {
buf.clear();
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Not valid utf-8",
));
}
}
Ok(true)
}
}
impl Deref for StrColumn {
type Target = BytesColumn;
impl Deref for BytesColumn {
type Target = ColumnIndex<'static>;
fn deref(&self) -> &Self::Target {
&self.0
&**self.term_ords()
}
}
#[cfg(test)]
mod tests {
use crate::{ColumnarReader, ColumnarWriter};
}

View File

@@ -5,11 +5,8 @@ use std::ops::Deref;
use std::sync::Arc;
use common::BinarySerializable;
pub use dictionary_encoded::{BytesColumn, StrColumn};
pub use serialize::{
open_column_bytes, open_column_u128, open_column_u64, serialize_column_mappable_to_u128,
serialize_column_mappable_to_u64,
};
pub use dictionary_encoded::BytesColumn;
pub use serialize::{open_column_bytes, open_column_u64, serialize_column_u64};
use crate::column_index::ColumnIndex;
use crate::column_values::ColumnValues;
@@ -21,43 +18,21 @@ pub struct Column<T> {
pub values: Arc<dyn ColumnValues<T>>,
}
use crate::column_index::Set;
impl<T: PartialOrd> Column<T> {
pub fn num_rows(&self) -> RowId {
pub fn first(&self, row_id: RowId) -> Option<T> {
match &self.idx {
ColumnIndex::Full => self.values.num_vals() as u32,
ColumnIndex::Optional(optional_index) => optional_index.num_rows(),
ColumnIndex::Multivalued(col_index) => {
// The multivalued index contains all value start row_id,
// and one extra value at the end with the overall number of rows.
col_index.num_vals() - 1
ColumnIndex::Full => Some(self.values.get_val(row_id)),
ColumnIndex::Optional(opt_idx) => {
let value_row_idx = opt_idx.rank_if_exists(row_id)?;
Some(self.values.get_val(value_row_idx))
}
ColumnIndex::Multivalued(_multivalued_index) => {
todo!();
}
}
}
pub fn min_value(&self) -> T {
self.values.min_value()
}
pub fn max_value(&self) -> T {
self.values.max_value()
}
}
impl<T: PartialOrd + Copy + Send + Sync + 'static> Column<T> {
pub fn first(&self, row_id: RowId) -> Option<T> {
self.values(row_id).next()
}
pub fn values(&self, row_id: RowId) -> impl Iterator<Item = T> + '_ {
self.value_row_ids(row_id)
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
}
pub fn first_or_default_col(self, default_value: T) -> Arc<dyn ColumnValues<T>> {
Arc::new(FirstValueWithDefault {
column: self,
default_value,
})
}
}
impl<T> Deref for Column<T> {
@@ -79,31 +54,3 @@ impl BinarySerializable for Cardinality {
Ok(cardinality)
}
}
// TODO simplify or optimize
struct FirstValueWithDefault<T: Copy> {
column: Column<T>,
default_value: T,
}
impl<T: PartialOrd + Send + Sync + Copy + 'static> ColumnValues<T> for FirstValueWithDefault<T> {
fn get_val(&self, idx: u32) -> T {
self.column.first(idx).unwrap_or(self.default_value)
}
fn min_value(&self) -> T {
self.column.values.min_value()
}
fn max_value(&self) -> T {
self.column.values.max_value()
}
fn num_vals(&self) -> u32 {
match &self.column.idx {
ColumnIndex::Full => self.column.values.num_vals(),
ColumnIndex::Optional(optional_idx) => optional_idx.num_rows(),
ColumnIndex::Multivalued(_) => todo!(),
}
}
}

View File

@@ -2,51 +2,24 @@ use std::io;
use std::io::Write;
use std::sync::Arc;
use common::OwnedBytes;
use common::{CountingWriter, OwnedBytes};
use sstable::Dictionary;
use crate::column::{BytesColumn, Column};
use crate::column_index::{serialize_column_index, SerializableColumnIndex};
use crate::column_values::serialize::serialize_column_values_u128;
use crate::column_values::{
serialize_column_values, ColumnValues, FastFieldCodecType, MonotonicallyMappableToU128,
MonotonicallyMappableToU64,
serialize_column_values, ColumnValues, MonotonicallyMappableToU64, ALL_CODEC_TYPES,
};
pub fn serialize_column_mappable_to_u128<
F: Fn() -> I,
I: Iterator<Item = T>,
T: MonotonicallyMappableToU128,
>(
column_index: SerializableColumnIndex<'_>,
column_values: F,
num_vals: u32,
output: &mut impl Write,
) -> io::Result<()> {
let column_index_num_bytes = serialize_column_index(column_index, output)?;
serialize_column_values_u128(
|| column_values().map(|val| val.to_u128()),
num_vals,
output,
)?;
output.write_all(&column_index_num_bytes.to_le_bytes())?;
Ok(())
}
pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64>(
pub fn serialize_column_u64<T: MonotonicallyMappableToU64>(
column_index: SerializableColumnIndex<'_>,
column_values: &impl ColumnValues<T>,
output: &mut impl Write,
) -> io::Result<()> {
let column_index_num_bytes = serialize_column_index(column_index, output)?;
serialize_column_values(
column_values,
&[
FastFieldCodecType::Bitpacked,
FastFieldCodecType::BlockwiseLinear,
],
output,
)?;
let mut counting_writer = CountingWriter::wrap(output);
serialize_column_index(column_index, &mut counting_writer)?;
let column_index_num_bytes = counting_writer.written_bytes() as u32;
let output = counting_writer.finish();
serialize_column_values(column_values, &ALL_CODEC_TYPES[..], output)?;
output.write_all(&column_index_num_bytes.to_le_bytes())?;
Ok(())
}
@@ -68,34 +41,14 @@ pub fn open_column_u64<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::
})
}
pub fn open_column_u128<T: MonotonicallyMappableToU128>(
bytes: OwnedBytes,
) -> io::Result<Column<T>> {
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
let column_index_num_bytes = u32::from_le_bytes(
column_index_num_bytes_payload
.as_slice()
.try_into()
.unwrap(),
);
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
let column_index = crate::column_index::open_column_index(column_index_data)?;
let column_values = crate::column_values::open_u128_mapped(column_values_data)?;
Ok(Column {
idx: column_index,
values: column_values,
})
}
pub fn open_column_bytes<T: From<BytesColumn>>(data: OwnedBytes) -> io::Result<T> {
pub fn open_column_bytes(data: OwnedBytes) -> io::Result<BytesColumn> {
let (body, dictionary_len_bytes) = data.rsplit(4);
let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap());
let (dictionary_bytes, column_bytes) = body.split(dictionary_len as usize);
let dictionary = Arc::new(Dictionary::from_bytes(dictionary_bytes)?);
let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes)?;
let bytes_column = BytesColumn {
Ok(BytesColumn {
dictionary,
term_ord_column,
};
Ok(bytes_column.into())
})
}

View File

@@ -2,7 +2,6 @@ mod multivalued_index;
mod optional_index;
mod serialize;
use std::ops::Range;
use std::sync::Arc;
pub use optional_index::{OptionalIndex, SerializableOptionalIndex, Set};
@@ -15,12 +14,8 @@ use crate::{Cardinality, RowId};
pub enum ColumnIndex<'a> {
Full,
Optional(OptionalIndex),
// TODO Remove the static by fixing the codec if possible.
/// The column values enclosed contains for all row_id,
/// the value start_index.
///
/// In addition, at index num_rows, an extra value is added
/// containing the overal number of values.
// TODO remove the Arc<dyn> apart from serialization this is not
// dynamic at all.
Multivalued(Arc<dyn ColumnValues<RowId> + 'a>),
}
@@ -33,22 +28,13 @@ impl<'a> ColumnIndex<'a> {
}
}
pub fn value_row_ids(&self, row_id: RowId) -> Range<RowId> {
pub fn num_rows(&self) -> RowId {
match self {
ColumnIndex::Full => row_id..row_id + 1,
ColumnIndex::Optional(optional_index) => {
if let Some(val) = optional_index.rank_if_exists(row_id) {
val..val + 1
} else {
0..0
}
}
ColumnIndex::Multivalued(multivalued_index) => {
let multivalued_index_ref = &**multivalued_index;
let start: u32 = multivalued_index_ref.get_val(row_id);
let end: u32 = multivalued_index_ref.get_val(row_id + 1);
start..end
ColumnIndex::Full => {
todo!()
}
ColumnIndex::Optional(optional_index) => optional_index.num_rows(),
ColumnIndex::Multivalued(multivalued_index) => multivalued_index.num_vals() - 1,
}
}
}

View File

@@ -11,11 +11,11 @@ use crate::RowId;
pub struct MultivaluedIndex(Arc<dyn ColumnValues<RowId>>);
pub fn serialize_multivalued_index(
multivalued_index: &dyn ColumnValues<RowId>,
multivalued_index: MultivaluedIndex,
output: &mut impl Write,
) -> io::Result<()> {
crate::column_values::serialize_column_values(
&*multivalued_index,
&*multivalued_index.0,
&[FastFieldCodecType::Bitpacked, FastFieldCodecType::Linear],
output,
)?;
@@ -23,7 +23,5 @@ pub fn serialize_multivalued_index(
}
pub fn open_multivalued_index(bytes: OwnedBytes) -> io::Result<Arc<dyn ColumnValues<RowId>>> {
let start_index_column: Arc<dyn ColumnValues<RowId>> =
crate::column_values::open_u64_mapped(bytes)?;
Ok(start_index_column)
todo!();
}

View File

@@ -5,8 +5,8 @@ use std::sync::Arc;
mod set;
mod set_block;
use common::{BinarySerializable, OwnedBytes, VInt};
pub use set::{Set, SetCodec, SelectCursor};
use common::{BinarySerializable, GroupByIteratorExtended, OwnedBytes, VInt};
pub use set::{Set, SetCodec};
use set_block::{
DenseBlock, DenseBlockCodec, SparseBlock, SparseBlockCodec, DENSE_BLOCK_NUM_BYTES,
};
@@ -115,59 +115,7 @@ fn row_addr_from_row_id(row_id: RowId) -> RowAddr {
}
}
enum BlockSelectCursor<'a> {
Dense(<DenseBlock<'a> as Set<u16>>::SelectCursor<'a>),
Sparse(<SparseBlock<'a> as Set<u16>>::SelectCursor<'a>),
}
impl<'a> BlockSelectCursor<'a> {
fn select(&mut self, rank: u16) -> u16 {
match self {
BlockSelectCursor::Dense(dense_select_cursor) => dense_select_cursor.select(rank),
BlockSelectCursor::Sparse(sparse_select_cursor) => sparse_select_cursor.select(rank),
}
}
}
pub struct OptionalIndexSelectCursor<'a> {
current_block_cursor: BlockSelectCursor<'a>,
current_block_id: u16,
// The current block is guaranteed to contain ranks < end_rank.
current_block_end_rank: RowId,
optional_index: &'a OptionalIndex,
block_doc_idx_start: RowId,
num_null_rows_before_block: RowId,
}
impl<'a> OptionalIndexSelectCursor<'a> {
fn search_and_load_block(&mut self, rank: RowId) {
if rank < self.current_block_end_rank {
// we are already in the right block
return;
}
self.current_block_id = self.optional_index.find_block(rank, self.current_block_id);
self.current_block_end_rank = self.optional_index.block_metas.get(self.current_block_id as usize + 1).map(|block_meta| block_meta.non_null_rows_before_block).unwrap_or(u32::MAX);
self.block_doc_idx_start = (self.current_block_id as u32) * ELEMENTS_PER_BLOCK;
let block_meta = self.optional_index.block_metas[self.current_block_id as usize];
self.num_null_rows_before_block = block_meta.non_null_rows_before_block;
let block: Block<'_> = self.optional_index.block(block_meta);
self.current_block_cursor = match block {
Block::Dense(dense_block) => BlockSelectCursor::Dense(dense_block.select_cursor()),
Block::Sparse(sparse_block) => BlockSelectCursor::Sparse(sparse_block.select_cursor()),
};
}
}
impl<'a> SelectCursor<RowId> for OptionalIndexSelectCursor<'a> {
fn select(&mut self, rank: RowId) -> RowId {
self.search_and_load_block(rank);
let index_in_block = (rank - self.num_null_rows_before_block) as u16;
self.current_block_cursor.select(index_in_block) as RowId + self.block_doc_idx_start
}
}
impl Set<RowId> for OptionalIndex {
type SelectCursor<'b> = OptionalIndexSelectCursor<'b> where Self: 'b;
// Check if value at position is not null.
#[inline]
fn contains(&self, row_id: RowId) -> bool {
@@ -200,7 +148,7 @@ impl Set<RowId> for OptionalIndex {
#[inline]
fn select(&self, rank: RowId) -> RowId {
let block_pos = self.find_block(rank, 0);
let block_doc_idx_start = (block_pos as u32) * ELEMENTS_PER_BLOCK;
let block_doc_idx_start = block_pos * ELEMENTS_PER_BLOCK;
let block_meta = self.block_metas[block_pos as usize];
let block: Block<'_> = self.block(block_meta);
let index_in_block = (rank - block_meta.non_null_rows_before_block) as u16;
@@ -211,27 +159,39 @@ impl Set<RowId> for OptionalIndex {
block_doc_idx_start + in_block_rank as u32
}
fn select_cursor<'b>(&'b self) -> OptionalIndexSelectCursor<'b> {
OptionalIndexSelectCursor {
current_block_cursor: BlockSelectCursor::Sparse(SparseBlockCodec::open(b"").select_cursor()),
current_block_id: 0u16,
current_block_end_rank: 0u32, //< this is sufficient to force the first load
optional_index: self,
block_doc_idx_start: 0u32,
num_null_rows_before_block: 0u32,
fn select_batch(&self, ranks: &[u32], output_idxs: &mut [u32]) {
let mut block_pos = 0u32;
let mut start = 0;
let group_by_it = ranks.iter().copied().group_by(move |codec_idx| {
block_pos = self.find_block(*codec_idx, block_pos);
block_pos
});
for (block_pos, block_iter) in group_by_it {
let block_doc_idx_start = block_pos * ELEMENTS_PER_BLOCK;
let block_meta = self.block_metas[block_pos as usize];
let block: Block<'_> = self.block(block_meta);
let offset = block_meta.non_null_rows_before_block;
let indexes_in_block_iter =
block_iter.map(move |codec_idx| (codec_idx - offset) as u16);
match block {
Block::Dense(dense_block) => {
for in_offset in dense_block.select_iter(indexes_in_block_iter) {
output_idxs[start] = in_offset as u32 + block_doc_idx_start;
start += 1;
}
}
Block::Sparse(sparse_block) => {
for in_offset in sparse_block.select_iter(indexes_in_block_iter) {
output_idxs[start] = in_offset as u32 + block_doc_idx_start;
start += 1;
}
}
};
}
}
}
impl OptionalIndex {
pub fn select_batch(&self, ranks: &mut [RowId]) {
let mut select_cursor = self.select_cursor();
for rank in ranks.iter_mut() {
*rank = select_cursor.select(*rank);
}
}
#[inline]
fn block<'a>(&'a self, block_meta: BlockMeta) -> Block<'a> {
let BlockMeta {
@@ -254,14 +214,14 @@ impl OptionalIndex {
}
#[inline]
fn find_block(&self, dense_idx: u32, start_block_pos: u16) -> u16 {
for block_pos in start_block_pos..self.block_metas.len() as u16 {
fn find_block(&self, dense_idx: u32, start_block_pos: u32) -> u32 {
for block_pos in start_block_pos..self.block_metas.len() as u32 {
let offset = self.block_metas[block_pos as usize].non_null_rows_before_block;
if offset > dense_idx {
return block_pos - 1u16;
return block_pos - 1;
}
}
self.block_metas.len() as u16 - 1u16
self.block_metas.len() as u32 - 1u32
}
// TODO Add a good API for the codec_idx to original_idx translation.

View File

@@ -13,19 +13,7 @@ pub trait SetCodec {
fn open<'a>(data: &'a [u8]) -> Self::Reader<'a>;
}
/// Stateful object that makes it possible to compute several select in a row,
/// provided the rank passed as argument are increasing.
pub trait SelectCursor<T> {
// May panic if rank is greater than the number of elements in the Set,
// or if rank is < than value provided in the previous call.
fn select(&mut self, rank: T) -> T;
}
pub trait Set<T> {
type SelectCursor<'b>: SelectCursor<T> where Self: 'b;
/// Returns true if the elements is contained in the Set
fn contains(&self, el: T) -> bool;
@@ -40,6 +28,11 @@ pub trait Set<T> {
/// May panic if rank is greater than the number of elements in the Set.
fn select(&self, rank: T) -> T;
/// Creates a brand new select cursor.
fn select_cursor<'b>(&'b self,) -> Self::SelectCursor<'b>;
/// Batch version of select.
/// `ranks` is assumed to be sorted.
///
/// # Panics
///
/// May panic if rank is greater than the number of elements in the Set.
fn select_batch(&self, ranks: &[T], outputs: &mut [T]);
}

View File

@@ -1,7 +1,7 @@
mod dense;
mod set_block;
mod sparse;
pub use dense::{DenseBlock, DenseBlockCodec, DENSE_BLOCK_NUM_BYTES};
pub use set_block::{DenseBlock, DenseBlockCodec, DENSE_BLOCK_NUM_BYTES};
pub use sparse::{SparseBlock, SparseBlockCodec};
#[cfg(test)]

View File

@@ -3,7 +3,7 @@ use std::io::{self, Write};
use common::BinarySerializable;
use crate::column_index::optional_index::{Set, SetCodec, SelectCursor, ELEMENTS_PER_BLOCK};
use crate::column_index::optional_index::{Set, SetCodec, ELEMENTS_PER_BLOCK};
#[inline(always)]
fn get_bit_at(input: u64, n: u16) -> bool {
@@ -105,24 +105,7 @@ impl DenseMiniBlock {
#[derive(Copy, Clone)]
pub struct DenseBlock<'a>(&'a [u8]);
pub struct DenseBlockSelectCursor<'a> {
block_id: u16,
dense_block: DenseBlock<'a>,
}
impl<'a> SelectCursor<u16> for DenseBlockSelectCursor<'a> {
#[inline]
fn select(&mut self, rank: u16) -> u16 {
self.block_id = self.dense_block.find_miniblock_containing_rank(rank, self.block_id).unwrap();
let index_block = self.dense_block.mini_block(self.block_id);
let in_block_rank = rank - index_block.rank;
self.block_id * ELEMENTS_PER_MINI_BLOCK + select_u64(index_block.bitvec, in_block_rank)
}
}
impl<'a> Set<u16> for DenseBlock<'a> {
type SelectCursor<'b> = DenseBlockSelectCursor<'a> where Self: 'b;
#[inline(always)]
fn contains(&self, el: u16) -> bool {
let mini_block_id = el / ELEMENTS_PER_MINI_BLOCK;
@@ -153,15 +136,37 @@ impl<'a> Set<u16> for DenseBlock<'a> {
block_id * ELEMENTS_PER_MINI_BLOCK + select_u64(index_block.bitvec, in_block_rank)
}
#[inline(always)]
fn select_cursor<'b>(&'b self,) -> Self::SelectCursor<'b> {
DenseBlockSelectCursor {
block_id: 0,
dense_block: *self,
fn select_batch(&self, ranks: &[u16], outputs: &mut [u16]) {
let orig_ids = self.select_iter(ranks.iter().copied());
for (output, original_id) in outputs.iter_mut().zip(orig_ids) {
*output = original_id;
}
}
}
impl<'a> DenseBlock<'a> {
/// Iterator verison of select.
///
/// # Panics
/// Panics if one of the rank is higher than the number of elements in the set.
pub fn select_iter<'b>(
&self,
rank_it: impl Iterator<Item = u16> + 'b,
) -> impl Iterator<Item = u16> + 'b
where
Self: 'b,
{
let mut block_id = 0u16;
let me = *self;
rank_it.map(move |rank| {
block_id = me.find_miniblock_containing_rank(rank, block_id).unwrap();
let index_block = me.mini_block(block_id);
let in_block_rank = rank - index_block.rank;
block_id * ELEMENTS_PER_MINI_BLOCK + select_u64(index_block.bitvec, in_block_rank)
})
}
}
impl<'a> DenseBlock<'a> {
#[inline]
fn mini_block(&self, mini_block_id: u16) -> DenseMiniBlock {

View File

@@ -1,4 +1,4 @@
use crate::column_index::optional_index::{Set, SetCodec, SelectCursor};
use crate::column_index::optional_index::{Set, SetCodec};
pub struct SparseBlockCodec;
@@ -24,17 +24,7 @@ impl SetCodec for SparseBlockCodec {
#[derive(Copy, Clone)]
pub struct SparseBlock<'a>(&'a [u8]);
impl<'a> SelectCursor<u16> for SparseBlock<'a> {
#[inline]
fn select(&mut self, rank: u16) -> u16 {
<SparseBlock<'a> as Set<u16>>::select(self, rank)
}
}
impl<'a> Set<u16> for SparseBlock<'a> {
type SelectCursor<'b> = Self where Self: 'b;
#[inline(always)]
fn contains(&self, el: u16) -> bool {
self.binary_search(el).is_ok()
@@ -51,11 +41,12 @@ impl<'a> Set<u16> for SparseBlock<'a> {
u16::from_le_bytes(self.0[offset..offset + 2].try_into().unwrap())
}
#[inline(always)]
fn select_cursor<'b>(&'b self,) -> Self::SelectCursor<'b> {
*self
fn select_batch(&self, ranks: &[u16], outputs: &mut [u16]) {
let orig_ids = self.select_iter(ranks.iter().copied());
for (output, original_id) in outputs.iter_mut().zip(orig_ids) {
*output = original_id;
}
}
}
#[inline(always)]
@@ -105,4 +96,17 @@ impl<'a> SparseBlock<'a> {
}
Err(left)
}
pub fn select_iter<'b>(
&self,
iter: impl Iterator<Item = u16> + 'b,
) -> impl Iterator<Item = u16> + 'b
where
Self: 'b,
{
iter.map(|codec_id| {
let offset = codec_id as usize * 2;
u16::from_le_bytes(self.0[offset..offset + 2].try_into().unwrap())
})
}
}

View File

@@ -1,8 +1,8 @@
use std::collections::HashMap;
use crate::column_index::optional_index::set_block::dense::DENSE_BLOCK_NUM_BYTES;
use crate::column_index::optional_index::set_block::set_block::DENSE_BLOCK_NUM_BYTES;
use crate::column_index::optional_index::set_block::{DenseBlockCodec, SparseBlockCodec};
use crate::column_index::optional_index::{Set, SetCodec, SelectCursor};
use crate::column_index::optional_index::{Set, SetCodec};
fn test_set_helper<C: SetCodec<Item = u16>>(vals: &[u16]) -> usize {
let mut buffer = Vec::new();
@@ -51,7 +51,6 @@ fn test_sparse_block_set_u16_max() {
use proptest::prelude::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(1))]
#[test]
fn test_prop_test_dense(els in proptest::collection::btree_set(0..=u16::MAX, 0..=u16::MAX as usize)) {
let vals: Vec<u16> = els.into_iter().collect();
@@ -74,10 +73,12 @@ fn test_simple_translate_codec_codec_idx_to_original_idx_dense() {
.unwrap();
let tested_set = DenseBlockCodec::open(buffer.as_slice());
assert!(tested_set.contains(1));
let mut select_cursor = tested_set.select_cursor();
assert_eq!(select_cursor.select(0), 1);
assert_eq!(select_cursor.select(1), 3);
assert_eq!(select_cursor.select(2), 17);
assert_eq!(
&tested_set
.select_iter([0, 1, 2, 5].iter().copied())
.collect::<Vec<u16>>(),
&[1, 3, 17, 30_001]
);
}
#[test]
@@ -86,10 +87,12 @@ fn test_simple_translate_codec_idx_to_original_idx_sparse() {
SparseBlockCodec::serialize([1, 3, 17].iter().copied(), &mut buffer).unwrap();
let tested_set = SparseBlockCodec::open(buffer.as_slice());
assert!(tested_set.contains(1));
let mut select_cursor = tested_set.select_cursor();
assert_eq!(SelectCursor::select(&mut select_cursor, 0), 1);
assert_eq!(SelectCursor::select(&mut select_cursor, 1), 3);
assert_eq!(SelectCursor::select(&mut select_cursor, 2), 17);
assert_eq!(
&tested_set
.select_iter([0, 1, 2].iter().copied())
.collect::<Vec<u16>>(),
&[1, 3, 17]
);
}
#[test]
@@ -98,8 +101,10 @@ fn test_simple_translate_codec_idx_to_original_idx_dense() {
DenseBlockCodec::serialize(0u16..150u16, &mut buffer).unwrap();
let tested_set = DenseBlockCodec::open(buffer.as_slice());
assert!(tested_set.contains(1));
let mut select_cursor = tested_set.select_cursor();
for i in 0..150 {
assert_eq!(i, select_cursor.select(i));
}
let rg = 0u16..150u16;
let els: Vec<u16> = rg.clone().collect();
assert_eq!(
&tested_set.select_iter(rg.clone()).collect::<Vec<u16>>(),
&els
);
}

View File

@@ -41,10 +41,9 @@ fn test_with_random_sets_simple() {
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
let ranks: Vec<u32> = (65_472u32..65_473u32).collect();
let els: Vec<u32> = ranks.iter().copied().map(|rank| rank + 10).collect();
let mut select_cursor = null_index.select_cursor();
for (rank, el) in ranks.iter().copied().zip(els.iter().copied()) {
assert_eq!(select_cursor.select(rank), el);
}
let mut output = vec![0u32; ranks.len()];
null_index.select_batch(&ranks[..], &mut output[..]);
assert_eq!(&output, &els);
}
#[test]
@@ -92,10 +91,11 @@ fn test_null_index(data: &[bool]) {
.filter(|(_pos, val)| **val)
.map(|(pos, _val)| pos as u32)
.collect();
let mut select_iter = null_index.select_cursor();
for i in 0..orig_idx_with_value.len() {
assert_eq!(select_iter.select(i as u32), orig_idx_with_value[i]);
}
let ids: Vec<u32> = (0..orig_idx_with_value.len() as u32).collect();
let mut output = vec![0u32; ids.len()];
null_index.select_batch(&ids[..], &mut output);
// assert_eq!(&output[0..100], &orig_idx_with_value[0..100]);
assert_eq!(output, orig_idx_with_value);
let step_size = (orig_idx_with_value.len() / 100).max(1);
for (dense_idx, orig_idx) in orig_idx_with_value.iter().enumerate().step_by(step_size) {
@@ -115,9 +115,9 @@ fn test_optional_index_test_translation() {
let iter = &[true, false, true, false];
serialize_optional_index(&&iter[..], &mut out).unwrap();
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
let mut select_cursor = null_index.select_cursor();
assert_eq!(select_cursor.select(0), 0);
assert_eq!(select_cursor.select(1), 2);
let mut output = vec![0u32; 2];
null_index.select_batch(&[0, 1], &mut output);
assert_eq!(output, &[0, 2]);
}
#[test]
@@ -175,6 +175,7 @@ mod bench {
.map(|_| rng.gen_bool(fill_ratio))
.collect();
serialize_optional_index(&&vals[..], &mut out).unwrap();
let codec = open_optional_index(OwnedBytes::new(out)).unwrap();
codec
}
@@ -310,8 +311,7 @@ mod bench {
};
let mut output = vec![0u32; idxs.len()];
bench.iter(|| {
output.copy_from_slice(&idxs[..]);
codec.select_batch(&mut output);
codec.select_batch(&idxs[..], &mut output);
});
}

View File

@@ -1,20 +1,19 @@
use std::io;
use std::io::Write;
use common::{CountingWriter, OwnedBytes};
use common::OwnedBytes;
use crate::column_index::multivalued_index::serialize_multivalued_index;
use crate::column_index::multivalued_index::{serialize_multivalued_index, MultivaluedIndex};
use crate::column_index::optional_index::serialize_optional_index;
use crate::column_index::{ColumnIndex, SerializableOptionalIndex};
use crate::column_values::ColumnValues;
use crate::{Cardinality, RowId};
use crate::Cardinality;
pub enum SerializableColumnIndex<'a> {
Full,
Optional(Box<dyn SerializableOptionalIndex<'a> + 'a>),
// TODO remove the Arc<dyn> apart from serialization this is not
// dynamic at all.
Multivalued(Box<dyn ColumnValues<RowId> + 'a>),
Multivalued(MultivaluedIndex),
}
impl<'a> SerializableColumnIndex<'a> {
@@ -30,21 +29,19 @@ impl<'a> SerializableColumnIndex<'a> {
pub fn serialize_column_index(
column_index: SerializableColumnIndex,
output: &mut impl Write,
) -> io::Result<u32> {
let mut output = CountingWriter::wrap(output);
) -> io::Result<()> {
let cardinality = column_index.get_cardinality().to_code();
output.write_all(&[cardinality])?;
match column_index {
SerializableColumnIndex::Full => {}
SerializableColumnIndex::Optional(optional_index) => {
serialize_optional_index(&*optional_index, &mut output)?
serialize_optional_index(&*optional_index, output)?
}
SerializableColumnIndex::Multivalued(multivalued_index) => {
serialize_multivalued_index(&*multivalued_index, &mut output)?
serialize_multivalued_index(multivalued_index, output)?
}
}
let column_index_num_bytes = output.written_bytes() as u32;
Ok(column_index_num_bytes)
Ok(())
}
pub fn open_column_index(mut bytes: OwnedBytes) -> io::Result<ColumnIndex<'static>> {

View File

@@ -78,32 +78,6 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
}
}
impl<T: Copy + PartialOrd> ColumnValues<T> for std::sync::Arc<dyn ColumnValues<T>> {
fn get_val(&self, idx: u32) -> T {
self.as_ref().get_val(idx)
}
fn min_value(&self) -> T {
self.as_ref().min_value()
}
fn max_value(&self) -> T {
self.as_ref().max_value()
}
fn num_vals(&self) -> u32 {
self.as_ref().num_vals()
}
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
self.as_ref().iter()
}
fn get_range(&self, start: u64, output: &mut [T]) {
self.as_ref().get_range(start, output)
}
}
impl<'a, C: ColumnValues<T> + ?Sized, T: Copy + PartialOrd> ColumnValues<T> for &'a C {
fn get_val(&self, idx: u32) -> T {
(*self).get_val(idx)

View File

@@ -28,7 +28,7 @@ mod compact_space;
mod line;
mod linear;
pub(crate) mod monotonic_mapping;
pub(crate) mod monotonic_mapping_u128;
// mod monotonic_mapping_u128;
mod column;
mod column_with_cardinality;
@@ -37,10 +37,8 @@ pub mod serialize;
pub use self::column::{monotonic_map_column, ColumnValues, IterColumn, VecColumn};
pub use self::monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
pub use self::monotonic_mapping_u128::MonotonicallyMappableToU128;
#[cfg(test)]
pub use self::serialize::tests::serialize_and_load;
pub use self::serialize::{serialize_column_values, NormalizedHeader};
// pub use self::monotonic_mapping_u128::MonotonicallyMappableToU128;
pub use self::serialize::{serialize_and_load, serialize_column_values, NormalizedHeader};
use crate::column_values::bitpacked::BitpackedCodec;
use crate::column_values::blockwise_linear::BlockwiseLinearCodec;
use crate::column_values::linear::LinearCodec;
@@ -124,17 +122,19 @@ impl U128FastFieldCodecType {
}
/// Returns the correct codec reader wrapped in the `Arc` for the data.
pub fn open_u128_mapped<T: MonotonicallyMappableToU128>(
mut bytes: OwnedBytes,
) -> io::Result<Arc<dyn ColumnValues<T>>> {
let header = U128Header::deserialize(&mut bytes)?;
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
let reader = CompactSpaceDecompressor::open(bytes)?;
let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<T>> =
StrictlyMonotonicMappingToInternal::<T>::new().into();
Ok(Arc::new(monotonic_map_column(reader, inverted)))
}
// pub fn open_u128<Item: MonotonicallyMappableToU128>(
// bytes: OwnedBytes,
// ) -> io::Result<Arc<dyn Column<Item>>> {
// todo!();
// // let (bytes, _format_version) = read_format_version(bytes)?;
// // let (mut bytes, _null_index_footer) = read_null_index_footer(bytes)?;
// // let header = U128Header::deserialize(&mut bytes)?;
// // assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
// // let reader = CompactSpaceDecompressor::open(bytes)?;
// // let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<Item>> =
// // StrictlyMonotonicMappingToInternal::<Item>::new().into();
// // Ok(Arc::new(monotonic_map_column(reader, inverted)))
// }
/// Returns the correct codec reader wrapped in the `Arc` for the data.
pub fn open_u64_mapped<T: MonotonicallyMappableToU64>(
@@ -198,6 +198,13 @@ pub(crate) trait FastFieldCodec: 'static {
fn estimate(column: &dyn ColumnValues) -> Option<f32>;
}
/// The list of all available codecs for u64 convertible data.
pub const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
FastFieldCodecType::Bitpacked,
FastFieldCodecType::BlockwiseLinear,
FastFieldCodecType::Linear,
];
#[cfg(all(test, feature = "unstable"))]
mod bench {
use std::sync::Arc;

View File

@@ -2,7 +2,6 @@ use std::marker::PhantomData;
use fastdivide::DividerU64;
use super::MonotonicallyMappableToU128;
use crate::RowId;
/// Monotonic maps a value to u64 value space.
@@ -81,20 +80,21 @@ impl<T> StrictlyMonotonicMappingToInternal<T> {
}
}
impl<External: MonotonicallyMappableToU128, T: MonotonicallyMappableToU128>
StrictlyMonotonicFn<External, u128> for StrictlyMonotonicMappingToInternal<T>
where T: MonotonicallyMappableToU128
{
#[inline(always)]
fn mapping(&self, inp: External) -> u128 {
External::to_u128(inp)
}
// TODO
// impl<External: MonotonicallyMappableToU128, T: MonotonicallyMappableToU128>
// StrictlyMonotonicFn<External, u128> for StrictlyMonotonicMappingToInternal<T>
// where T: MonotonicallyMappableToU128
// {
// #[inline(always)]
// fn mapping(&self, inp: External) -> u128 {
// External::to_u128(inp)
// }
#[inline(always)]
fn inverse(&self, out: u128) -> External {
External::from_u128(out)
}
}
// #[inline(always)]
// fn inverse(&self, out: u128) -> External {
// External::from_u128(out)
// }
// }
impl<External: MonotonicallyMappableToU64, T: MonotonicallyMappableToU64>
StrictlyMonotonicFn<External, u64> for StrictlyMonotonicMappingToInternal<T>
@@ -194,20 +194,6 @@ impl MonotonicallyMappableToU64 for i64 {
}
}
impl MonotonicallyMappableToU64 for crate::DateTime {
#[inline(always)]
fn to_u64(self) -> u64 {
common::i64_to_u64(self.timestamp_micros)
}
#[inline(always)]
fn from_u64(val: u64) -> Self {
crate::DateTime {
timestamp_micros: common::u64_to_i64(val),
}
}
}
impl MonotonicallyMappableToU64 for bool {
#[inline(always)]
fn to_u64(self) -> u64 {

View File

@@ -19,8 +19,9 @@
use std::io;
use std::num::NonZeroU64;
use std::sync::Arc;
use common::{BinarySerializable, VInt};
use common::{BinarySerializable, OwnedBytes, VInt};
use log::warn;
use super::bitpacked::BitpackedCodec;
@@ -32,9 +33,8 @@ use super::monotonic_mapping::{
};
use super::{
monotonic_map_column, ColumnValues, FastFieldCodec, FastFieldCodecType,
MonotonicallyMappableToU64, U128FastFieldCodecType,
MonotonicallyMappableToU64, U128FastFieldCodecType, VecColumn, ALL_CODEC_TYPES,
};
use crate::column_values::compact_space::CompactSpaceCompressor;
/// The normalized header gives some parameters after applying the following
/// normalization of the vector:
@@ -160,23 +160,55 @@ impl BinarySerializable for Header {
}
}
/// Serializes u128 values with the compact space codec.
pub fn serialize_column_values_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
iter_gen: F,
num_vals: u32,
output: &mut impl io::Write,
) -> io::Result<()> {
let header = U128Header {
num_vals,
codec_type: U128FastFieldCodecType::CompactSpace,
};
header.serialize(output)?;
let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
compressor.compress_into(iter_gen(), output)?;
Ok(())
/// Return estimated compression for given codec in the value range [0.0..1.0], where 1.0 means no
/// compression.
pub(crate) fn estimate<T: MonotonicallyMappableToU64>(
typed_column: impl ColumnValues<T>,
codec_type: FastFieldCodecType,
) -> Option<f32> {
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
let min_value = column.min_value();
let gcd = super::gcd::find_gcd(column.iter().map(|val| val - min_value))
.filter(|gcd| gcd.get() > 1u64);
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(
gcd.map(|gcd| gcd.get()).unwrap_or(1u64),
min_value,
);
let normalized_column = monotonic_map_column(&column, mapping);
match codec_type {
FastFieldCodecType::Bitpacked => BitpackedCodec::estimate(&normalized_column),
FastFieldCodecType::Linear => LinearCodec::estimate(&normalized_column),
FastFieldCodecType::BlockwiseLinear => BlockwiseLinearCodec::estimate(&normalized_column),
}
}
// TODO
/// Serializes u128 values with the compact space codec.
// pub fn serialize_u128_new<F: Fn() -> I, I: Iterator<Item = u128>>(
// value_index: ColumnIndex,
// iter_gen: F,
// num_vals: u32,
// output: &mut impl io::Write,
// ) -> io::Result<()> {
// let header = U128Header {
// num_vals,
// codec_type: U128FastFieldCodecType::CompactSpace,
// };
// header.serialize(output)?;
// let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
// compressor.compress_into(iter_gen(), output).unwrap();
// let null_index_footer = ColumnFooter {
// cardinality: value_index.get_cardinality(),
// null_index_codec: NullIndexCodec::Full,
// null_index_byte_range: 0..0,
// };
// append_null_index_footer(output, null_index_footer)?;
// append_format_version(output)?;
// Ok(())
// }
/// Serializes the column with the codec with the best estimate on the data.
pub fn serialize_column_values<T: MonotonicallyMappableToU64>(
typed_column: impl ColumnValues<T>,
@@ -247,29 +279,20 @@ pub(crate) fn serialize_given_codec(
Ok(())
}
/// Helper function to serialize a column (autodetect from all codecs) and then open it
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
column: &[T],
) -> Arc<dyn ColumnValues<T>> {
let mut buffer = Vec::new();
super::serialize_column_values(&VecColumn::from(&column), &ALL_CODEC_TYPES, &mut buffer)
.unwrap();
super::open_u64_mapped(OwnedBytes::new(buffer)).unwrap()
}
#[cfg(test)]
pub mod tests {
use std::sync::Arc;
use common::OwnedBytes;
mod tests {
use super::*;
use crate::column_values::{open_u64_mapped, VecColumn};
const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
FastFieldCodecType::Bitpacked,
FastFieldCodecType::Linear,
FastFieldCodecType::BlockwiseLinear,
];
/// Helper function to serialize a column (autodetect from all codecs) and then open it
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
column: &[T],
) -> Arc<dyn ColumnValues<T>> {
let mut buffer = Vec::new();
serialize_column_values(&VecColumn::from(&column), &ALL_CODEC_TYPES, &mut buffer).unwrap();
open_u64_mapped(OwnedBytes::new(buffer)).unwrap()
}
#[test]
fn test_serialize_deserialize_u128_header() {
let original = U128Header {
@@ -296,7 +319,7 @@ pub mod tests {
serialize_column_values(&col, &ALL_CODEC_TYPES, &mut buffer).unwrap();
// TODO put the header as a footer so that it serves as a padding.
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
assert_eq!(buffer.len(), 5 + 1);
assert_eq!(buffer.len(), 5 + 1 + 7);
}
#[test]
@@ -305,7 +328,7 @@ pub mod tests {
let col = VecColumn::from(&[true][..]);
serialize_column_values(&col, &ALL_CODEC_TYPES, &mut buffer).unwrap();
// 5 bytes of header, 0 bytes of value, 7 bytes of padding.
assert_eq!(buffer.len(), 5);
assert_eq!(buffer.len(), 5 + 7);
}
#[test]
@@ -315,6 +338,6 @@ pub mod tests {
let col = VecColumn::from(&vals[..]);
serialize_column_values(&col, &[FastFieldCodecType::Bitpacked], &mut buffer).unwrap();
// Values are stored over 3 bits.
assert_eq!(buffer.len(), 7 + (3 * 80 / 8));
assert_eq!(buffer.len(), 7 + (3 * 80 / 8) + 7);
}
}

View File

@@ -1,5 +1,4 @@
use std::net::Ipv6Addr;
use crate::utils::{place_bits, select_bits};
use crate::value::NumericalType;
use crate::InvalidData;
@@ -8,152 +7,63 @@ use crate::InvalidData;
/// - bits[0..3]: Column category type.
/// - bits[3..6]: Numerical type if necessary.
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)]
#[repr(u8)]
pub enum ColumnType {
I64 = 0u8,
U64 = 1u8,
F64 = 2u8,
Bytes = 10u8,
Str = 14u8,
Bool = 18u8,
IpAddr = 22u8,
DateTime = 26u8,
Bytes,
Numerical(NumericalType),
Bool,
IpAddr,
}
#[cfg(test)]
const COLUMN_TYPES: [ColumnType; 8] = [
ColumnType::I64,
ColumnType::U64,
ColumnType::F64,
ColumnType::Bytes,
ColumnType::Str,
ColumnType::Bool,
ColumnType::IpAddr,
ColumnType::DateTime,
];
impl ColumnType {
pub fn to_code(self) -> u8 {
self as u8
/// Encoded over 6 bits.
pub(crate) fn to_code(self) -> u8 {
let column_type_category;
let numerical_type_code: u8;
match self {
ColumnType::Bytes => {
column_type_category = ColumnTypeCategory::Str;
numerical_type_code = 0u8;
}
ColumnType::Numerical(numerical_type) => {
column_type_category = ColumnTypeCategory::Numerical;
numerical_type_code = numerical_type.to_code();
}
ColumnType::Bool => {
column_type_category = ColumnTypeCategory::Bool;
numerical_type_code = 0u8;
}
}
place_bits::<0, 3>(column_type_category.to_code()) | place_bits::<3, 6>(numerical_type_code)
}
pub(crate) fn try_from_code(code: u8) -> Result<ColumnType, InvalidData> {
use ColumnType::*;
match code {
0u8 => Ok(I64),
1u8 => Ok(U64),
2u8 => Ok(F64),
10u8 => Ok(Bytes),
14u8 => Ok(Str),
18u8 => Ok(Bool),
22u8 => Ok(IpAddr),
26u8 => Ok(Self::DateTime),
_ => Err(InvalidData),
if select_bits::<6, 8>(code) != 0u8 {
return Err(InvalidData);
}
}
}
impl From<NumericalType> for ColumnType {
fn from(numerical_type: NumericalType) -> Self {
match numerical_type {
NumericalType::I64 => ColumnType::I64,
NumericalType::U64 => ColumnType::U64,
NumericalType::F64 => ColumnType::F64,
let column_type_category_code = select_bits::<0, 3>(code);
let numerical_type_code = select_bits::<3, 6>(code);
let column_type_category = ColumnTypeCategory::try_from_code(column_type_category_code)?;
match column_type_category {
ColumnTypeCategory::Bool => {
if numerical_type_code != 0u8 {
return Err(InvalidData);
}
Ok(ColumnType::Bool)
}
ColumnTypeCategory::Str => {
if numerical_type_code != 0u8 {
return Err(InvalidData);
}
Ok(ColumnType::Bytes)
}
ColumnTypeCategory::Numerical => {
let numerical_type = NumericalType::try_from_code(numerical_type_code)?;
Ok(ColumnType::Numerical(numerical_type))
}
}
}
}
impl ColumnType {
/// get column type category
pub(crate) fn column_type_category(self) -> ColumnTypeCategory {
match self {
ColumnType::I64 | ColumnType::U64 | ColumnType::F64 => ColumnTypeCategory::Numerical,
ColumnType::Bytes => ColumnTypeCategory::Bytes,
ColumnType::Str => ColumnTypeCategory::Str,
ColumnType::Bool => ColumnTypeCategory::Bool,
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
ColumnType::DateTime => ColumnTypeCategory::DateTime,
}
}
pub fn numerical_type(&self) -> Option<NumericalType> {
match self {
ColumnType::I64 => Some(NumericalType::I64),
ColumnType::U64 => Some(NumericalType::U64),
ColumnType::F64 => Some(NumericalType::F64),
ColumnType::Bytes
| ColumnType::Str
| ColumnType::Bool
| ColumnType::IpAddr
| ColumnType::DateTime => None,
}
}
}
// TODO remove if possible
pub trait HasAssociatedColumnType: 'static + Send + Sync + Copy + PartialOrd {
fn column_type() -> ColumnType;
fn default_value() -> Self;
}
impl HasAssociatedColumnType for u64 {
fn column_type() -> ColumnType {
ColumnType::U64
}
fn default_value() -> Self {
0u64
}
}
impl HasAssociatedColumnType for i64 {
fn column_type() -> ColumnType {
ColumnType::I64
}
fn default_value() -> Self {
0i64
}
}
impl HasAssociatedColumnType for f64 {
fn column_type() -> ColumnType {
ColumnType::F64
}
fn default_value() -> Self {
Default::default()
}
}
impl HasAssociatedColumnType for bool {
fn column_type() -> ColumnType {
ColumnType::Bool
}
fn default_value() -> Self {
Default::default()
}
}
impl HasAssociatedColumnType for crate::DateTime {
fn column_type() -> ColumnType {
ColumnType::DateTime
}
fn default_value() -> Self {
Default::default()
}
}
impl HasAssociatedColumnType for Ipv6Addr {
fn column_type() -> ColumnType {
ColumnType::IpAddr
}
fn default_value() -> Self {
Ipv6Addr::from([0u8; 16])
}
}
/// Column types are grouped into different categories that
/// corresponds to the different types of `JsonValue` types.
///
@@ -161,28 +71,25 @@ impl HasAssociatedColumnType for Ipv6Addr {
/// at most one column exist per `ColumnTypeCategory`.
///
/// See also [README.md].
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)]
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
#[repr(u8)]
pub enum ColumnTypeCategory {
Bool,
Str,
Numerical,
DateTime,
Bytes,
IpAddr,
pub(crate) enum ColumnTypeCategory {
Bool = 0u8,
Str = 1u8,
Numerical = 2u8,
}
impl From<ColumnType> for ColumnTypeCategory {
fn from(column_type: ColumnType) -> Self {
match column_type {
ColumnType::I64 => ColumnTypeCategory::Numerical,
ColumnType::U64 => ColumnTypeCategory::Numerical,
ColumnType::F64 => ColumnTypeCategory::Numerical,
ColumnType::Bytes => ColumnTypeCategory::Bytes,
ColumnType::Str => ColumnTypeCategory::Str,
ColumnType::Bool => ColumnTypeCategory::Bool,
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
ColumnType::DateTime => ColumnTypeCategory::DateTime,
impl ColumnTypeCategory {
pub fn to_code(self) -> u8 {
self as u8
}
pub fn try_from_code(code: u8) -> Result<Self, InvalidData> {
match code {
0u8 => Ok(Self::Bool),
1u8 => Ok(Self::Str),
2u8 => Ok(Self::Numerical),
_ => Err(InvalidData),
}
}
}
@@ -203,22 +110,7 @@ mod tests {
assert!(column_type_set.insert(column_type));
}
}
assert_eq!(column_type_set.len(), super::COLUMN_TYPES.len());
}
#[test]
fn test_column_category_sort_consistent_with_column_type_sort() {
// This is a very important property because we
// we need to serialize colunmn in the right order.
let mut column_types: Vec<ColumnType> = super::COLUMN_TYPES.iter().copied().collect();
column_types.sort_by_key(|col| col.to_code());
let column_categories: Vec<ColumnTypeCategory> = column_types
.into_iter()
.map(ColumnTypeCategory::from)
.collect();
for (prev, next) in column_categories.iter().zip(column_categories.iter()) {
assert!(prev <= next);
}
assert_eq!(column_type_set.len(), 2 + 3);
}
#[test]

View File

@@ -1,176 +0,0 @@
use std::collections::HashMap;
use std::io;
use super::column_type::ColumnTypeCategory;
use crate::columnar::ColumnarReader;
use crate::dynamic_column::DynamicColumn;
pub enum MergeDocOrder {
/// Columnar tables are simply stacked one above the other.
/// If the i-th columnar_readers has n_rows_i rows, then
/// in the resulting columnar,
/// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order.
/// ..
Stack,
/// Some more complex mapping, that can interleaves rows from the different readers and
/// possibly drop rows.
Complex(()),
}
pub fn merge_columnar(
_columnar_readers: &[ColumnarReader],
mapping: MergeDocOrder,
_output: &mut impl io::Write,
) -> io::Result<()> {
match mapping {
MergeDocOrder::Stack => {
// implement me :)
todo!();
}
MergeDocOrder::Complex(_) => {
// for later
todo!();
}
}
}
pub fn collect_columns(
columnar_readers: &[&ColumnarReader],
) -> io::Result<HashMap<String, HashMap<ColumnTypeCategory, Vec<DynamicColumn>>>> {
// Each column name may have multiple types of column associated.
// For merging we are interested in the same column type category since they can be merged.
let mut field_name_to_group: HashMap<String, HashMap<ColumnTypeCategory, Vec<DynamicColumn>>> =
HashMap::new();
for columnar_reader in columnar_readers {
let column_name_and_handle = columnar_reader.list_columns()?;
for (column_name, handle) in column_name_and_handle {
let column_type_to_handles = field_name_to_group
.entry(column_name.to_string())
.or_default();
let columns = column_type_to_handles
.entry(handle.column_type().column_type_category())
.or_default();
columns.push(handle.open()?);
}
}
normalize_columns(&mut field_name_to_group);
Ok(field_name_to_group)
}
/// Cast numerical type columns to the same type
pub(crate) fn normalize_columns(
map: &mut HashMap<String, HashMap<ColumnTypeCategory, Vec<DynamicColumn>>>,
) {
for (_field_name, type_category_to_columns) in map.iter_mut() {
for (type_category, columns) in type_category_to_columns {
if type_category == &ColumnTypeCategory::Numerical {
let casted_columns = cast_to_common_numerical_column(&columns);
*columns = casted_columns;
}
}
}
}
/// Receives a list of columns of numerical types (u64, i64, f64)
///
/// Returns a list of `DynamicColumn` which are all of the same numerical type
fn cast_to_common_numerical_column(columns: &[DynamicColumn]) -> Vec<DynamicColumn> {
assert!(columns
.iter()
.all(|column| column.column_type().numerical_type().is_some()));
let coerce_to_i64: Vec<_> = columns
.iter()
.map(|column| column.clone().coerce_to_i64())
.collect();
if coerce_to_i64.iter().all(|column| column.is_some()) {
return coerce_to_i64
.into_iter()
.map(|column| column.unwrap())
.collect();
}
let coerce_to_u64: Vec<_> = columns
.iter()
.map(|column| column.clone().coerce_to_u64())
.collect();
if coerce_to_u64.iter().all(|column| column.is_some()) {
return coerce_to_u64
.into_iter()
.map(|column| column.unwrap())
.collect();
}
columns
.iter()
.map(|column| {
column
.clone()
.coerce_to_f64()
.expect("couldn't cast column to f64")
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ColumnarWriter;
#[test]
fn test_column_coercion() {
// i64 type
let columnar1 = {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(1u32, "numbers", 1i64);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(2, &mut buffer).unwrap();
ColumnarReader::open(buffer).unwrap()
};
// u64 type
let columnar2 = {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(1u32, "numbers", u64::MAX - 100);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(2, &mut buffer).unwrap();
ColumnarReader::open(buffer).unwrap()
};
// f64 type
let columnar3 = {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(1u32, "numbers", 30.5);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(2, &mut buffer).unwrap();
ColumnarReader::open(buffer).unwrap()
};
let column_map = collect_columns(&[&columnar1, &columnar2, &columnar3]).unwrap();
assert_eq!(column_map.len(), 1);
let cat_to_columns = column_map.get("numbers").unwrap();
assert_eq!(cat_to_columns.len(), 1);
let numerical = cat_to_columns.get(&ColumnTypeCategory::Numerical).unwrap();
assert!(numerical.iter().all(|column| column.is_f64()));
let column_map = collect_columns(&[&columnar1, &columnar1]).unwrap();
assert_eq!(column_map.len(), 1);
let cat_to_columns = column_map.get("numbers").unwrap();
assert_eq!(cat_to_columns.len(), 1);
let numerical = cat_to_columns.get(&ColumnTypeCategory::Numerical).unwrap();
assert!(numerical.iter().all(|column| column.is_i64()));
let column_map = collect_columns(&[&columnar2, &columnar2]).unwrap();
assert_eq!(column_map.len(), 1);
let cat_to_columns = column_map.get("numbers").unwrap();
assert_eq!(cat_to_columns.len(), 1);
let numerical = cat_to_columns.get(&ColumnTypeCategory::Numerical).unwrap();
assert!(numerical.iter().all(|column| column.is_u64()));
}
}

View File

@@ -1,10 +1,28 @@
// Copyright (C) 2022 Quickwit, Inc.
//
// Quickwit is offered under the AGPL v3.0 and as commercial software.
// For commercial licensing, contact us at hello@quickwit.io.
//
// AGPL:
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
mod column_type;
mod format_version;
mod merge;
mod reader;
mod writer;
pub use column_type::{ColumnType, HasAssociatedColumnType};
pub use merge::{merge_columnar, MergeDocOrder};
pub use column_type::ColumnType;
pub use reader::ColumnarReader;
pub use writer::ColumnarWriter;

View File

@@ -44,7 +44,7 @@ impl ColumnarReader {
})
}
// TODO Add unit tests
// TODO fix ugly API
pub fn list_columns(&self) -> io::Result<Vec<(String, DynamicColumnHandle)>> {
let mut stream = self.column_dictionary.stream()?;
let mut results = Vec::new();
@@ -55,8 +55,7 @@ impl ColumnarReader {
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
let range = stream.value().clone();
let column_name =
// The last two bytes are respectively the 0u8 separator and the column_type.
String::from_utf8_lossy(&key_bytes[..key_bytes.len() - 2]).to_string();
String::from_utf8_lossy(&key_bytes[..key_bytes.len() - 1]).to_string();
let file_slice = self
.column_data
.slice(range.start as usize..range.end as usize);

View File

@@ -27,12 +27,12 @@ struct ColumnOperationMetadata {
impl ColumnOperationMetadata {
fn to_code(self) -> u8 {
place_bits::<0, 6>(self.len) | place_bits::<6, 8>(self.op_type.to_code())
place_bits::<0, 4>(self.len) | place_bits::<4, 8>(self.op_type.to_code())
}
fn try_from_code(code: u8) -> Result<Self, InvalidData> {
let len = select_bits::<0, 6>(code);
let typ_code = select_bits::<6, 8>(code);
let len = select_bits::<0, 4>(code);
let typ_code = select_bits::<4, 8>(code);
let column_type = ColumnOperationType::try_from_code(typ_code)?;
Ok(ColumnOperationMetadata {
op_type: column_type,
@@ -146,19 +146,19 @@ impl SymbolValue for bool {
impl SymbolValue for Ipv6Addr {
fn serialize(self, buffer: &mut [u8]) -> u8 {
buffer[0..16].copy_from_slice(&self.octets());
16
// maybe not ueseful to use VIntEncoding for the mooment since we only use it for IP addr.
// We could roll our own RLE compression but it is overkill. let's stick to 8 bytes.
todo!();
}
fn deserialize(bytes: &[u8]) -> Self {
let octets: [u8; 16] = bytes[0..16].try_into().unwrap();
Ipv6Addr::from(octets)
todo!();
}
}
#[derive(Default)]
struct MiniBuffer {
pub bytes: [u8; 17],
pub bytes: [u8; 10],
pub len: u8,
}

View File

@@ -102,29 +102,18 @@ pub(crate) struct NumericalColumnWriter {
column_writer: ColumnWriter,
}
impl NumericalColumnWriter {
pub fn force_numerical_type(&mut self, numerical_type: NumericalType) {
assert!(self
.compatible_numerical_types
.is_type_accepted(numerical_type));
self.compatible_numerical_types = CompatibleNumericalTypes::StaticType(numerical_type);
}
}
/// State used to store what types are still acceptable
/// after having seen a set of numerical values.
#[derive(Clone, Copy)]
enum CompatibleNumericalTypes {
Dynamic {
all_values_within_i64_range: bool,
all_values_within_u64_range: bool,
},
StaticType(NumericalType),
struct CompatibleNumericalTypes {
all_values_within_i64_range: bool,
all_values_within_u64_range: bool,
// f64 is always acceptable.
}
impl Default for CompatibleNumericalTypes {
fn default() -> CompatibleNumericalTypes {
CompatibleNumericalTypes::Dynamic {
CompatibleNumericalTypes {
all_values_within_i64_range: true,
all_values_within_u64_range: true,
}
@@ -132,54 +121,31 @@ impl Default for CompatibleNumericalTypes {
}
impl CompatibleNumericalTypes {
fn is_type_accepted(&self, numerical_type: NumericalType) -> bool {
match self {
CompatibleNumericalTypes::Dynamic {
all_values_within_i64_range,
all_values_within_u64_range,
} => match numerical_type {
NumericalType::I64 => *all_values_within_i64_range,
NumericalType::U64 => *all_values_within_u64_range,
NumericalType::F64 => true,
},
CompatibleNumericalTypes::StaticType(static_numerical_type) => {
*static_numerical_type == numerical_type
}
}
}
fn accept_value(&mut self, numerical_value: NumericalValue) {
match self {
CompatibleNumericalTypes::Dynamic {
all_values_within_i64_range,
all_values_within_u64_range,
} => match numerical_value {
NumericalValue::I64(val_i64) => {
let value_within_u64_range = val_i64 >= 0i64;
*all_values_within_u64_range &= value_within_u64_range;
}
NumericalValue::U64(val_u64) => {
let value_within_i64_range = val_u64 < i64::MAX as u64;
*all_values_within_i64_range &= value_within_i64_range;
}
NumericalValue::F64(_) => {
*all_values_within_i64_range = false;
*all_values_within_u64_range = false;
}
},
CompatibleNumericalTypes::StaticType(typ) => {
assert_eq!(numerical_value.numerical_type(), *typ);
match numerical_value {
NumericalValue::I64(val_i64) => {
let value_within_u64_range = val_i64 >= 0i64;
self.all_values_within_u64_range &= value_within_u64_range;
}
NumericalValue::U64(val_u64) => {
let value_within_i64_range = val_u64 < i64::MAX as u64;
self.all_values_within_i64_range &= value_within_i64_range;
}
NumericalValue::F64(_) => {
self.all_values_within_i64_range = false;
self.all_values_within_u64_range = false;
}
}
}
pub fn to_numerical_type(self) -> NumericalType {
for numerical_type in [NumericalType::I64, NumericalType::U64] {
if self.is_type_accepted(numerical_type) {
return numerical_type;
}
if self.all_values_within_i64_range {
NumericalType::I64
} else if self.all_values_within_u64_range {
NumericalType::U64
} else {
NumericalType::F64
}
NumericalType::F64
}
}
@@ -209,15 +175,15 @@ impl NumericalColumnWriter {
}
}
#[derive(Copy, Clone)]
pub(crate) struct StrOrBytesColumnWriter {
#[derive(Copy, Clone, Default)]
pub(crate) struct StrColumnWriter {
pub(crate) dictionary_id: u32,
pub(crate) column_writer: ColumnWriter,
}
impl StrOrBytesColumnWriter {
pub(crate) fn with_dictionary_id(dictionary_id: u32) -> StrOrBytesColumnWriter {
StrOrBytesColumnWriter {
impl StrColumnWriter {
pub(crate) fn with_dictionary_id(dictionary_id: u32) -> StrColumnWriter {
StrColumnWriter {
dictionary_id,
column_writer: Default::default(),
}
@@ -296,27 +262,4 @@ mod tests {
test_column_writer_coercion_aux(&[1i64.into(), 1u64.into()], NumericalType::I64);
test_column_writer_coercion_aux(&[u64::MAX.into(), (-1i64).into()], NumericalType::F64);
}
#[test]
#[should_panic]
fn test_compatible_numerical_types_static_incompatible_type() {
let mut compatible_numerical_types =
CompatibleNumericalTypes::StaticType(NumericalType::U64);
compatible_numerical_types.accept_value(NumericalValue::I64(1i64));
}
#[test]
fn test_compatible_numerical_types_static_different_type_forbidden() {
let mut compatible_numerical_types =
CompatibleNumericalTypes::StaticType(NumericalType::U64);
compatible_numerical_types.accept_value(NumericalValue::U64(u64::MAX));
}
#[test]
fn test_compatible_numerical_types_static() {
for typ in [NumericalType::I64, NumericalType::I64, NumericalType::F64] {
let compatible_numerical_types = CompatibleNumericalTypes::StaticType(typ);
assert_eq!(compatible_numerical_types.to_numerical_type(), typ);
}
}
}

View File

@@ -12,12 +12,10 @@ use serializer::ColumnarSerializer;
use stacker::{Addr, ArenaHashMap, MemoryArena};
use crate::column_index::SerializableColumnIndex;
use crate::column_values::{
ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
};
use crate::column_values::{ColumnValues, MonotonicallyMappableToU64, VecColumn};
use crate::columnar::column_type::{ColumnType, ColumnTypeCategory};
use crate::columnar::writer::column_writers::{
ColumnWriter, NumericalColumnWriter, StrOrBytesColumnWriter,
ColumnWriter, NumericalColumnWriter, StrColumnWriter,
};
use crate::columnar::writer::value_index::{IndexBuilder, PreallocatedIndexBuilders};
use crate::dictionary::{DictionaryBuilder, TermIdMapping, UnorderedId};
@@ -33,7 +31,6 @@ struct SpareBuffers {
u64_values: Vec<u64>,
f64_values: Vec<f64>,
bool_values: Vec<bool>,
ip_addr_values: Vec<Ipv6Addr>,
}
/// Makes it possible to create a new columnar.
@@ -51,11 +48,9 @@ struct SpareBuffers {
/// ```
pub struct ColumnarWriter {
numerical_field_hash_map: ArenaHashMap,
datetime_field_hash_map: ArenaHashMap,
bool_field_hash_map: ArenaHashMap,
ip_addr_field_hash_map: ArenaHashMap,
bytes_field_hash_map: ArenaHashMap,
str_field_hash_map: ArenaHashMap,
arena: MemoryArena,
// Dictionaries used to store dictionary-encoded values.
dictionaries: Vec<DictionaryBuilder>,
@@ -67,10 +62,7 @@ impl Default for ColumnarWriter {
ColumnarWriter {
numerical_field_hash_map: ArenaHashMap::new(10_000),
bool_field_hash_map: ArenaHashMap::new(10_000),
ip_addr_field_hash_map: ArenaHashMap::new(10_000),
bytes_field_hash_map: ArenaHashMap::new(10_000),
str_field_hash_map: ArenaHashMap::new(10_000),
datetime_field_hash_map: ArenaHashMap::new(10_000),
dictionaries: Vec::new(),
arena: MemoryArena::default(),
buffers: SpareBuffers::default(),
@@ -78,115 +70,20 @@ impl Default for ColumnarWriter {
}
}
#[inline]
fn mutate_or_create_column<V, TMutator>(
arena_hash_map: &mut ArenaHashMap,
column_name: &str,
updater: TMutator,
) where
V: Copy + 'static,
TMutator: FnMut(Option<V>) -> V,
{
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
arena_hash_map.mutate_or_create(column_name.as_bytes(), updater);
}
impl ColumnarWriter {
pub fn mem_usage(&self) -> usize {
// TODO add dictionary builders.
self.arena.mem_usage()
+ self.numerical_field_hash_map.mem_usage()
+ self.bool_field_hash_map.mem_usage()
+ self.bytes_field_hash_map.mem_usage()
+ self.str_field_hash_map.mem_usage()
+ self.ip_addr_field_hash_map.mem_usage()
+ self.datetime_field_hash_map.mem_usage()
}
pub fn record_column_type(&mut self, column_name: &str, column_type: ColumnType) {
match column_type {
ColumnType::Str | ColumnType::Bytes => {
let (hash_map, dictionaries) = (
if column_type == ColumnType::Str {
&mut self.str_field_hash_map
} else {
&mut self.bytes_field_hash_map
},
&mut self.dictionaries,
);
mutate_or_create_column(
hash_map,
column_name,
|column_opt: Option<StrOrBytesColumnWriter>| {
if let Some(column_writer) = column_opt {
column_writer
} else {
let dictionary_id = dictionaries.len() as u32;
dictionaries.push(DictionaryBuilder::default());
StrOrBytesColumnWriter::with_dictionary_id(dictionary_id)
}
},
);
}
ColumnType::Bool => {
mutate_or_create_column(
&mut self.bool_field_hash_map,
column_name,
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
);
}
ColumnType::DateTime => {
mutate_or_create_column(
&mut self.datetime_field_hash_map,
column_name,
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
);
}
ColumnType::I64 | ColumnType::F64 | ColumnType::U64 => {
let numerical_type = column_type.numerical_type().unwrap();
mutate_or_create_column(
&mut self.numerical_field_hash_map,
column_name,
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.force_numerical_type(numerical_type);
column
},
);
}
ColumnType::IpAddr => mutate_or_create_column(
&mut self.ip_addr_field_hash_map,
column_name,
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
),
}
}
pub fn force_numerical_type(&mut self, column_name: &str, numerical_type: NumericalType) {
mutate_or_create_column(
&mut self.numerical_field_hash_map,
column_name,
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.force_numerical_type(numerical_type);
column
},
);
}
pub fn record_numerical<T: Into<NumericalValue> + Copy>(
&mut self,
doc: RowId,
column_name: &str,
numerical_value: T,
) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
mutate_or_create_column(
hash_map,
column_name,
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.record_numerical_value(doc, numerical_value.into(), arena);
@@ -212,45 +109,22 @@ impl ColumnarWriter {
}
pub fn record_bool(&mut self, doc: RowId, column_name: &str, val: bool) {
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, val, arena);
column
});
}
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: crate::DateTime) {
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, NumericalValue::I64(datetime.timestamp_micros), arena);
column
});
}
pub fn record_str(&mut self, doc: RowId, column_name: &str, value: &str) {
let (hash_map, arena, dictionaries) = (
&mut self.str_field_hash_map,
&mut self.arena,
&mut self.dictionaries,
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<StrOrBytesColumnWriter>| {
let mut column: StrOrBytesColumnWriter = column_opt.unwrap_or_else(|| {
// Each column has its own dictionary
let dictionary_id = dictionaries.len() as u32;
dictionaries.push(DictionaryBuilder::default());
StrOrBytesColumnWriter::with_dictionary_id(dictionary_id)
});
column.record_bytes(doc, value.as_bytes(), dictionaries, arena);
|column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, val, arena);
column
},
);
}
pub fn record_bytes(&mut self, doc: RowId, column_name: &str, value: &[u8]) {
pub fn record_str(&mut self, doc: RowId, column_name: &str, value: &str) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
@@ -262,56 +136,41 @@ impl ColumnarWriter {
);
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<StrOrBytesColumnWriter>| {
let mut column: StrOrBytesColumnWriter = column_opt.unwrap_or_else(|| {
|column_opt: Option<StrColumnWriter>| {
let mut column: StrColumnWriter = column_opt.unwrap_or_else(|| {
// Each column has its own dictionary
let dictionary_id = dictionaries.len() as u32;
dictionaries.push(DictionaryBuilder::default());
StrOrBytesColumnWriter::with_dictionary_id(dictionary_id)
StrColumnWriter::with_dictionary_id(dictionary_id)
});
column.record_bytes(doc, value, dictionaries, arena);
column.record_bytes(doc, value.as_bytes(), dictionaries, arena);
column
},
);
}
pub fn serialize(&mut self, num_docs: RowId, wrt: &mut dyn io::Write) -> io::Result<()> {
let mut serializer = ColumnarSerializer::new(wrt);
let mut columns: Vec<(&[u8], ColumnTypeCategory, Addr)> = self
let mut field_columns: Vec<(&[u8], ColumnTypeCategory, Addr)> = self
.numerical_field_hash_map
.iter()
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Numerical, addr))
.map(|(term, addr, _)| (term, ColumnTypeCategory::Numerical, addr))
.collect();
columns.extend(
field_columns.extend(
self.bytes_field_hash_map
.iter()
.map(|(term, addr, _)| (term, ColumnTypeCategory::Bytes, addr)),
.map(|(term, addr, _)| (term, ColumnTypeCategory::Str, addr)),
);
columns.extend(
self.str_field_hash_map
.iter()
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Str, addr)),
);
columns.extend(
field_columns.extend(
self.bool_field_hash_map
.iter()
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::Bool, addr)),
.map(|(term, addr, _)| (term, ColumnTypeCategory::Bool, addr)),
);
columns.extend(
self.ip_addr_field_hash_map
.iter()
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::IpAddr, addr)),
);
columns.extend(
self.datetime_field_hash_map
.iter()
.map(|(column_name, addr, _)| (column_name, ColumnTypeCategory::DateTime, addr)),
);
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
field_columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
let (arena, buffers, dictionaries) = (&self.arena, &mut self.buffers, &self.dictionaries);
let mut symbol_byte_buffer: Vec<u8> = Vec::new();
for (column_name, column_type, addr) in columns {
match column_type {
for (column_name, bytes_or_numerical, addr) in field_columns {
match bytes_or_numerical {
ColumnTypeCategory::Bool => {
let column_writer: ColumnWriter = self.bool_field_hash_map.read(addr);
let cardinality = column_writer.get_cardinality(num_docs);
@@ -325,32 +184,14 @@ impl ColumnarWriter {
&mut column_serializer,
)?;
}
ColumnTypeCategory::IpAddr => {
let column_writer: ColumnWriter = self.ip_addr_field_hash_map.read(addr);
let cardinality = column_writer.get_cardinality(num_docs);
let mut column_serializer =
serializer.serialize_column(column_name, ColumnType::IpAddr);
serialize_ip_addr_column(
cardinality,
num_docs,
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
&mut column_serializer,
)?;
}
ColumnTypeCategory::Bytes | ColumnTypeCategory::Str => {
let (column_type, str_column_writer): (ColumnType, StrOrBytesColumnWriter) =
if column_type == ColumnTypeCategory::Bytes {
(ColumnType::Bytes, self.bytes_field_hash_map.read(addr))
} else {
(ColumnType::Str, self.str_field_hash_map.read(addr))
};
ColumnTypeCategory::Str => {
let str_column_writer: StrColumnWriter = self.bytes_field_hash_map.read(addr);
let dictionary_builder =
&dictionaries[str_column_writer.dictionary_id as usize];
let cardinality = str_column_writer.column_writer.get_cardinality(num_docs);
let mut column_serializer =
serializer.serialize_column(column_name, column_type);
serialize_bytes_or_str_column(
serializer.serialize_column(column_name, ColumnType::Bytes);
serialize_bytes_column(
cardinality,
num_docs,
dictionary_builder,
@@ -364,8 +205,8 @@ impl ColumnarWriter {
self.numerical_field_hash_map.read(addr);
let (numerical_type, cardinality) =
numerical_column_writer.column_type_and_cardinality(num_docs);
let mut column_serializer =
serializer.serialize_column(column_name, ColumnType::from(numerical_type));
let mut column_serializer = serializer
.serialize_column(column_name, ColumnType::Numerical(numerical_type));
serialize_numerical_column(
cardinality,
num_docs,
@@ -375,20 +216,6 @@ impl ColumnarWriter {
&mut column_serializer,
)?;
}
ColumnTypeCategory::DateTime => {
let column_writer: ColumnWriter = self.datetime_field_hash_map.read(addr);
let cardinality = column_writer.get_cardinality(num_docs);
let mut column_serializer =
serializer.serialize_column(column_name, ColumnType::DateTime);
serialize_numerical_column(
cardinality,
num_docs,
NumericalType::I64,
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
&mut column_serializer,
)?;
}
};
}
serializer.finalize()?;
@@ -396,7 +223,7 @@ impl ColumnarWriter {
}
}
fn serialize_bytes_or_str_column(
fn serialize_bytes_column(
cardinality: Cardinality,
num_docs: RowId,
dictionary_builder: &DictionaryBuilder,
@@ -423,7 +250,7 @@ fn serialize_bytes_or_str_column(
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
}
});
send_to_serialize_column_mappable_to_u64(
serialize_column(
operation_iterator,
cardinality,
num_docs,
@@ -452,7 +279,7 @@ fn serialize_numerical_column(
} = buffers;
match numerical_type {
NumericalType::I64 => {
send_to_serialize_column_mappable_to_u64(
serialize_column(
coerce_numerical_symbol::<i64>(op_iterator),
cardinality,
num_docs,
@@ -462,7 +289,7 @@ fn serialize_numerical_column(
)?;
}
NumericalType::U64 => {
send_to_serialize_column_mappable_to_u64(
serialize_column(
coerce_numerical_symbol::<u64>(op_iterator),
cardinality,
num_docs,
@@ -472,7 +299,7 @@ fn serialize_numerical_column(
)?;
}
NumericalType::F64 => {
send_to_serialize_column_mappable_to_u64(
serialize_column(
coerce_numerical_symbol::<f64>(op_iterator),
cardinality,
num_docs,
@@ -497,7 +324,7 @@ fn serialize_bool_column(
bool_values,
..
} = buffers;
send_to_serialize_column_mappable_to_u64(
serialize_column(
column_operations_it,
cardinality,
num_docs,
@@ -508,76 +335,7 @@ fn serialize_bool_column(
Ok(())
}
fn serialize_ip_addr_column(
cardinality: Cardinality,
num_docs: RowId,
column_operations_it: impl Iterator<Item = ColumnOperation<Ipv6Addr>>,
buffers: &mut SpareBuffers,
wrt: &mut impl io::Write,
) -> io::Result<()> {
let SpareBuffers {
value_index_builders,
ip_addr_values,
..
} = buffers;
send_to_serialize_column_mappable_to_u128(
column_operations_it,
cardinality,
num_docs,
value_index_builders,
ip_addr_values,
wrt,
)?;
Ok(())
}
fn send_to_serialize_column_mappable_to_u128<
T: Copy + std::fmt::Debug + Send + Sync + MonotonicallyMappableToU128 + PartialOrd,
>(
op_iterator: impl Iterator<Item = ColumnOperation<T>>,
cardinality: Cardinality,
num_docs: RowId,
value_index_builders: &mut PreallocatedIndexBuilders,
values: &mut Vec<T>,
mut wrt: impl io::Write,
) -> io::Result<()>
where
for<'a> VecColumn<'a, T>: ColumnValues<T>,
{
values.clear();
// TODO: split index and values
let serializable_column_index = match cardinality {
Cardinality::Full => {
consume_operation_iterator(
op_iterator,
value_index_builders.borrow_required_index_builder(),
values,
);
SerializableColumnIndex::Full
}
Cardinality::Optional => {
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
consume_operation_iterator(op_iterator, optional_index_builder, values);
let optional_index = optional_index_builder.finish(num_docs);
SerializableColumnIndex::Optional(Box::new(optional_index))
}
Cardinality::Multivalued => {
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
let multivalued_index = multivalued_index_builder.finish(num_docs);
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
}
};
crate::column::serialize_column_mappable_to_u128(
serializable_column_index,
|| values.iter().cloned(),
values.len() as u32,
&mut wrt,
)?;
Ok(())
}
fn send_to_serialize_column_mappable_to_u64<
fn serialize_column<
T: Copy + Default + std::fmt::Debug + Send + Sync + MonotonicallyMappableToU64 + PartialOrd,
>(
op_iterator: impl Iterator<Item = ColumnOperation<T>>,
@@ -610,10 +368,11 @@ where
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
let multivalued_index = multivalued_index_builder.finish(num_docs);
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
todo!();
// SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
}
};
crate::column::serialize_column_mappable_to_u64(
crate::column::serialize_column_u64(
serializable_column_index,
&VecColumn::from(&values[..]),
&mut wrt,
@@ -651,12 +410,59 @@ fn consume_operation_iterator<T: std::fmt::Debug, TIndexBuilder: IndexBuilder>(
}
}
// /// Serializes the column with the codec with the best estimate on the data.
// fn serialize_numerical<T: MonotonicallyMappableToU64>(
// value_index: ValueIndexInfo,
// typed_column: impl Column<T>,
// output: &mut impl io::Write,
// codecs: &[FastFieldCodecType],
// ) -> io::Result<()> {
// let counting_writer = CountingWriter::wrap(output);
// serialize_value_index(value_index, output)?;
// let value_index_len = counting_writer.written_bytes();
// let output = counting_writer.finish();
// serialize_column(value_index, output)?;
// let column = monotonic_map_column(
// typed_column,
// crate::column::monotonic_mapping::StrictlyMonotonicMappingToInternal::<T>::new(),
// );
// let header = Header::compute_header(&column, codecs).ok_or_else(|| {
// io::Error::new(
// io::ErrorKind::InvalidInput,
// format!(
// "Data cannot be serialized with this list of codec. {:?}",
// codecs
// ),
// )
// })?;
// header.serialize(output)?;
// let normalized_column = header.normalize_column(column);
// assert_eq!(normalized_column.min_value(), 0u64);
// serialize_given_codec(normalized_column, header.codec_type, output)?;
// let column_header = ColumnFooter {
// value_index_len: todo!(),
// cardinality: todo!(),
// };
// let null_index_footer = NullIndexFooter {
// cardinality: value_index.get_cardinality(),
// null_index_codec: NullIndexCodec::Full,
// null_index_byte_range: 0..0,
// };
// append_null_index_footer(output, null_index_footer)?;
// Ok(())
// }
#[cfg(test)]
mod tests {
use column_operation::ColumnOperation;
use stacker::MemoryArena;
use crate::columnar::writer::column_operation::ColumnOperation;
use crate::{Cardinality, NumericalValue};
use super::*;
use crate::value::NumericalValue;
#[test]
fn test_column_writer_required_simple() {

View File

@@ -97,10 +97,10 @@ mod tests {
#[test]
fn test_prepare_key_bytes() {
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
prepare_key(b"root\0child", ColumnType::Str, &mut buffer);
prepare_key(b"root\0child", ColumnType::Bytes, &mut buffer);
assert_eq!(buffer.len(), 12);
assert_eq!(&buffer[..10], b"root\0child");
assert_eq!(buffer[10], 0u8);
assert_eq!(buffer[11], ColumnType::Str.to_code());
assert_eq!(buffer[11], ColumnType::Bytes.to_code());
}
}

View File

@@ -45,6 +45,16 @@ impl<'a> SerializableOptionalIndex<'a> for SingleValueArrayIndex<'a> {
}
}
impl OptionalIndexBuilder {
fn num_non_nulls(&self) -> u32 {
self.docs.len() as u32
}
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
Box::new(self.docs.iter().copied())
}
}
impl OptionalIndexBuilder {
pub fn finish<'a>(&'a mut self, num_rows: RowId) -> impl SerializableOptionalIndex + 'a {
debug_assert!(self
@@ -86,7 +96,7 @@ pub struct MultivaluedIndexBuilder {
impl MultivaluedIndexBuilder {
pub fn finish(&mut self, num_docs: RowId) -> impl ColumnValues<u32> + '_ {
self.start_offsets
.resize(num_docs as usize + 1, self.total_num_vals_seen);
.resize(num_docs as usize, self.total_num_vals_seen);
VecColumn {
values: &&self.start_offsets[..],
min_value: 0,
@@ -178,7 +188,7 @@ mod tests {
.finish(4u32)
.iter()
.collect::<Vec<u32>>(),
vec![0, 0, 2, 3, 3]
vec![0, 0, 2, 3]
);
multivalued_value_index_builder.reset();
multivalued_value_index_builder.record_row(2u32);
@@ -189,7 +199,7 @@ mod tests {
.finish(4u32)
.iter()
.collect::<Vec<u32>>(),
vec![0, 0, 0, 2, 2]
vec![0, 0, 0, 2]
);
}
}

View File

@@ -1,14 +1,12 @@
use std::io;
use std::net::Ipv6Addr;
use std::sync::Arc;
use std::net::IpAddr;
use common::file_slice::FileSlice;
use common::{HasLen, OwnedBytes};
use crate::column::{BytesColumn, Column, StrColumn};
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
use crate::column::{BytesColumn, Column};
use crate::columnar::ColumnType;
use crate::{DateTime, NumericalType};
use crate::DateTime;
#[derive(Clone)]
pub enum DynamicColumn {
@@ -16,163 +14,41 @@ pub enum DynamicColumn {
I64(Column<i64>),
U64(Column<u64>),
F64(Column<f64>),
IpAddr(Column<Ipv6Addr>),
IpAddr(Column<IpAddr>),
DateTime(Column<DateTime>),
Bytes(BytesColumn),
Str(StrColumn),
Str(BytesColumn),
}
impl DynamicColumn {
pub fn column_type(&self) -> ColumnType {
match self {
DynamicColumn::Bool(_) => ColumnType::Bool,
DynamicColumn::I64(_) => ColumnType::I64,
DynamicColumn::U64(_) => ColumnType::U64,
DynamicColumn::F64(_) => ColumnType::F64,
DynamicColumn::IpAddr(_) => ColumnType::IpAddr,
DynamicColumn::DateTime(_) => ColumnType::DateTime,
DynamicColumn::Bytes(_) => ColumnType::Bytes,
DynamicColumn::Str(_) => ColumnType::Str,
}
}
pub fn is_numerical(&self) -> bool {
self.column_type().numerical_type().is_some()
}
pub fn is_f64(&self) -> bool {
self.column_type().numerical_type() == Some(NumericalType::F64)
}
pub fn is_i64(&self) -> bool {
self.column_type().numerical_type() == Some(NumericalType::I64)
}
pub fn is_u64(&self) -> bool {
self.column_type().numerical_type() == Some(NumericalType::U64)
}
pub fn coerce_to_f64(self) -> Option<DynamicColumn> {
match self {
DynamicColumn::I64(column) => Some(DynamicColumn::F64(Column {
idx: column.idx,
values: Arc::new(monotonic_map_column(column.values, MapI64ToF64)),
})),
DynamicColumn::U64(column) => Some(DynamicColumn::F64(Column {
idx: column.idx,
values: Arc::new(monotonic_map_column(column.values, MapU64ToF64)),
})),
DynamicColumn::F64(_) => Some(self),
_ => None,
}
}
pub fn coerce_to_i64(self) -> Option<DynamicColumn> {
match self {
DynamicColumn::U64(column) => {
if column.max_value() > i64::MAX as u64 {
return None;
}
Some(DynamicColumn::I64(Column {
idx: column.idx,
values: Arc::new(monotonic_map_column(column.values, MapU64ToI64)),
}))
}
DynamicColumn::I64(_) => Some(self),
_ => None,
}
}
pub fn coerce_to_u64(self) -> Option<DynamicColumn> {
match self {
DynamicColumn::I64(column) => {
if column.min_value() < 0 {
return None;
}
Some(DynamicColumn::U64(Column {
idx: column.idx,
values: Arc::new(monotonic_map_column(column.values, MapI64ToU64)),
}))
}
DynamicColumn::U64(_) => Some(self),
_ => None,
}
impl From<Column<i64>> for DynamicColumn {
fn from(column_i64: Column<i64>) -> Self {
DynamicColumn::I64(column_i64)
}
}
struct MapI64ToF64;
impl StrictlyMonotonicFn<i64, f64> for MapI64ToF64 {
#[inline(always)]
fn mapping(&self, inp: i64) -> f64 {
inp as f64
}
#[inline(always)]
fn inverse(&self, out: f64) -> i64 {
out as i64
impl From<Column<u64>> for DynamicColumn {
fn from(column_u64: Column<u64>) -> Self {
DynamicColumn::U64(column_u64)
}
}
struct MapU64ToF64;
impl StrictlyMonotonicFn<u64, f64> for MapU64ToF64 {
#[inline(always)]
fn mapping(&self, inp: u64) -> f64 {
inp as f64
}
#[inline(always)]
fn inverse(&self, out: f64) -> u64 {
out as u64
impl From<Column<f64>> for DynamicColumn {
fn from(column_f64: Column<f64>) -> Self {
DynamicColumn::F64(column_f64)
}
}
struct MapU64ToI64;
impl StrictlyMonotonicFn<u64, i64> for MapU64ToI64 {
#[inline(always)]
fn mapping(&self, inp: u64) -> i64 {
inp as i64
}
#[inline(always)]
fn inverse(&self, out: i64) -> u64 {
out as u64
impl From<Column<bool>> for DynamicColumn {
fn from(bool_column: Column<bool>) -> Self {
DynamicColumn::Bool(bool_column)
}
}
struct MapI64ToU64;
impl StrictlyMonotonicFn<i64, u64> for MapI64ToU64 {
#[inline(always)]
fn mapping(&self, inp: i64) -> u64 {
inp as u64
}
#[inline(always)]
fn inverse(&self, out: u64) -> i64 {
out as i64
impl From<BytesColumn> for DynamicColumn {
fn from(dictionary_encoded_col: BytesColumn) -> Self {
DynamicColumn::Str(dictionary_encoded_col)
}
}
macro_rules! static_dynamic_conversions {
($typ:ty, $enum_name:ident) => {
impl Into<Option<$typ>> for DynamicColumn {
fn into(self) -> Option<$typ> {
if let DynamicColumn::$enum_name(col) = self {
Some(col)
} else {
None
}
}
}
impl From<$typ> for DynamicColumn {
fn from(typed_column: $typ) -> Self {
DynamicColumn::$enum_name(typed_column)
}
}
};
}
static_dynamic_conversions!(Column<bool>, Bool);
static_dynamic_conversions!(Column<u64>, U64);
static_dynamic_conversions!(Column<i64>, I64);
static_dynamic_conversions!(Column<f64>, F64);
static_dynamic_conversions!(Column<crate::DateTime>, DateTime);
static_dynamic_conversions!(StrColumn, Str);
static_dynamic_conversions!(BytesColumn, Bytes);
static_dynamic_conversions!(Column<Ipv6Addr>, IpAddr);
#[derive(Clone)]
pub struct DynamicColumnHandle {
pub(crate) file_slice: FileSlice,
@@ -180,53 +56,31 @@ pub struct DynamicColumnHandle {
}
impl DynamicColumnHandle {
// TODO rename load
pub fn open(&self) -> io::Result<DynamicColumn> {
let column_bytes: OwnedBytes = self.file_slice.read_bytes()?;
self.open_internal(column_bytes)
}
// TODO rename load_async
pub async fn open_async(&self) -> io::Result<DynamicColumn> {
let column_bytes: OwnedBytes = self.file_slice.read_bytes_async().await?;
self.open_internal(column_bytes)
}
/// Returns the `u64` fast field reader reader associated with `fields` of types
/// Str, u64, i64, f64, or datetime.
///
/// If not, the fastfield reader will returns the u64-value associated with the original
/// FastValue.
pub fn open_u64_lenient(&self) -> io::Result<Option<Column<u64>>> {
let column_bytes = self.file_slice.read_bytes()?;
match self.column_type {
ColumnType::Str | ColumnType::Bytes => {
let column: BytesColumn = crate::column::open_column_bytes(column_bytes)?;
Ok(Some(column.term_ord_column))
}
ColumnType::Bool => Ok(None),
ColumnType::IpAddr => Ok(None),
ColumnType::I64 | ColumnType::U64 | ColumnType::F64 | ColumnType::DateTime => {
let column = crate::column::open_column_u64::<u64>(column_bytes)?;
Ok(Some(column))
}
}
}
fn open_internal(&self, column_bytes: OwnedBytes) -> io::Result<DynamicColumn> {
let dynamic_column: DynamicColumn = match self.column_type {
ColumnType::Bytes => {
crate::column::open_column_bytes::<BytesColumn>(column_bytes)?.into()
}
ColumnType::Str => crate::column::open_column_bytes::<StrColumn>(column_bytes)?.into(),
ColumnType::I64 => crate::column::open_column_u64::<i64>(column_bytes)?.into(),
ColumnType::U64 => crate::column::open_column_u64::<u64>(column_bytes)?.into(),
ColumnType::F64 => crate::column::open_column_u64::<f64>(column_bytes)?.into(),
ColumnType::Bytes => crate::column::open_column_bytes(column_bytes)?.into(),
ColumnType::Numerical(numerical_type) => match numerical_type {
crate::NumericalType::I64 => {
crate::column::open_column_u64::<i64>(column_bytes)?.into()
}
crate::NumericalType::U64 => {
crate::column::open_column_u64::<u64>(column_bytes)?.into()
}
crate::NumericalType::F64 => {
crate::column::open_column_u64::<f64>(column_bytes)?.into()
}
},
ColumnType::Bool => crate::column::open_column_u64::<bool>(column_bytes)?.into(),
ColumnType::IpAddr => crate::column::open_column_u128::<Ipv6Addr>(column_bytes)?.into(),
ColumnType::DateTime => {
crate::column::open_column_u64::<crate::DateTime>(column_bytes)?.into()
}
};
Ok(dynamic_column)
}

View File

@@ -18,21 +18,16 @@ mod dynamic_column;
pub(crate) mod utils;
mod value;
pub use column::{BytesColumn, Column, StrColumn};
pub use column_values::ColumnValues;
pub use columnar::{
merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
MergeDocOrder,
};
pub use columnar::{ColumnarReader, ColumnarWriter};
pub use value::{NumericalType, NumericalValue};
pub use self::dynamic_column::{DynamicColumn, DynamicColumnHandle};
// pub use self::dynamic_column::DynamicColumnHandle;
pub type RowId = u32;
#[derive(Clone, Copy, PartialOrd, PartialEq, Default, Debug)]
#[derive(Clone, Copy)]
pub struct DateTime {
pub timestamp_micros: i64,
timestamp_micros: i64,
}
#[derive(Copy, Clone, Debug)]

View File

@@ -1,13 +1,10 @@
use std::net::Ipv6Addr;
use crate::column_values::MonotonicallyMappableToU128;
use crate::columnar::ColumnType;
use crate::dynamic_column::{DynamicColumn, DynamicColumnHandle};
use crate::value::NumericalValue;
use crate::{Cardinality, ColumnarReader, ColumnarWriter};
#[test]
fn test_dataframe_writer_str() {
fn test_dataframe_writer_bytes() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_str(1u32, "my_string", "hello");
dataframe_writer.record_str(3u32, "my_string", "helloeee");
@@ -17,21 +14,7 @@ fn test_dataframe_writer_str() {
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 158);
}
#[test]
fn test_dataframe_writer_bytes() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_bytes(1u32, "my_string", b"hello");
dataframe_writer.record_bytes(3u32, "my_string", b"helloeee");
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 158);
assert_eq!(cols[0].num_bytes(), 165);
}
#[test]
@@ -45,7 +28,7 @@ fn test_dataframe_writer_bool() {
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("bool.value").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 22);
assert_eq!(cols[0].num_bytes(), 29);
assert_eq!(cols[0].column_type(), ColumnType::Bool);
let dyn_bool_col = cols[0].open().unwrap();
let DynamicColumn::Bool(bool_col) = dyn_bool_col else { panic!(); };
@@ -53,59 +36,6 @@ fn test_dataframe_writer_bool() {
assert_eq!(&vals, &[None, Some(false), None, Some(true), None,]);
}
#[test]
fn test_dataframe_writer_u64_multivalued() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(2u32, "divisor", 2u64);
dataframe_writer.record_numerical(3u32, "divisor", 3u64);
dataframe_writer.record_numerical(4u32, "divisor", 2u64);
dataframe_writer.record_numerical(5u32, "divisor", 5u64);
dataframe_writer.record_numerical(6u32, "divisor", 2u64);
dataframe_writer.record_numerical(6u32, "divisor", 3u64);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(7, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("divisor").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 29);
let dyn_i64_col = cols[0].open().unwrap();
let DynamicColumn::I64(divisor_col) = dyn_i64_col else { panic!(); };
assert_eq!(
divisor_col.get_cardinality(),
crate::Cardinality::Multivalued
);
assert_eq!(divisor_col.num_rows(), 7);
}
#[test]
fn test_dataframe_writer_ip_addr() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_ip_addr(1, "ip_addr", Ipv6Addr::from_u128(1001));
dataframe_writer.record_ip_addr(3, "ip_addr", Ipv6Addr::from_u128(1050));
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("ip_addr").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 42);
assert_eq!(cols[0].column_type(), ColumnType::IpAddr);
let dyn_bool_col = cols[0].open().unwrap();
let DynamicColumn::IpAddr(ip_col) = dyn_bool_col else { panic!(); };
let vals: Vec<Option<Ipv6Addr>> = (0..5).map(|row_id| ip_col.first(row_id)).collect();
assert_eq!(
&vals,
&[
None,
Some(Ipv6Addr::from_u128(1001)),
None,
Some(Ipv6Addr::from_u128(1050)),
None,
]
);
}
#[test]
fn test_dataframe_writer_numerical() {
let mut dataframe_writer = ColumnarWriter::default();
@@ -123,7 +53,7 @@ fn test_dataframe_writer_numerical() {
// - header 14 bytes
// - vals 8 //< due to padding? could have been 1byte?.
// - null footer 6 bytes
assert_eq!(cols[0].num_bytes(), 33);
assert_eq!(cols[0].num_bytes(), 40);
let column = cols[0].open().unwrap();
let DynamicColumn::I64(column_i64) = column else { panic!(); };
assert_eq!(column_i64.idx.get_cardinality(), Cardinality::Optional);
@@ -137,76 +67,18 @@ fn test_dataframe_writer_numerical() {
}
#[test]
fn test_dictionary_encoded_str() {
fn test_dictionary_encoded() {
let mut buffer = Vec::new();
let mut columnar_writer = ColumnarWriter::default();
columnar_writer.record_str(1, "my.column", "a");
columnar_writer.record_str(3, "my.column", "c");
columnar_writer.record_str(1, "my.column", "my.key");
columnar_writer.record_str(3, "my.column", "my.key2");
columnar_writer.record_str(3, "my.column2", "different_column!");
columnar_writer.record_str(4, "my.column", "b");
columnar_writer.serialize(5, &mut buffer).unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar_reader.num_columns(), 2);
let col_handles = columnar_reader.read_columns("my.column").unwrap();
assert_eq!(col_handles.len(), 1);
let DynamicColumn::Str(str_col) = col_handles[0].open().unwrap() else { panic!(); };
let index: Vec<Option<u64>> = (0..5).map(|row_id| str_col.ords().first(row_id)).collect();
assert_eq!(index, &[None, Some(0), None, Some(2), Some(1)]);
assert_eq!(str_col.num_rows(), 5);
let mut term_buffer = String::new();
let term_ords = str_col.ords();
assert_eq!(term_ords.first(0), None);
assert_eq!(term_ords.first(1), Some(0));
str_col.ord_to_str(0u64, &mut term_buffer).unwrap();
assert_eq!(term_buffer, "a");
assert_eq!(term_ords.first(2), None);
assert_eq!(term_ords.first(3), Some(2));
str_col.ord_to_str(2u64, &mut term_buffer).unwrap();
assert_eq!(term_buffer, "c");
assert_eq!(term_ords.first(4), Some(1));
str_col.ord_to_str(1u64, &mut term_buffer).unwrap();
assert_eq!(term_buffer, "b");
}
#[test]
fn test_dictionary_encoded_bytes() {
let mut buffer = Vec::new();
let mut columnar_writer = ColumnarWriter::default();
columnar_writer.record_bytes(1, "my.column", b"a");
columnar_writer.record_bytes(3, "my.column", b"c");
columnar_writer.record_bytes(3, "my.column2", b"different_column!");
columnar_writer.record_bytes(4, "my.column", b"b");
columnar_writer.serialize(5, &mut buffer).unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar_reader.num_columns(), 2);
let col_handles = columnar_reader.read_columns("my.column").unwrap();
assert_eq!(col_handles.len(), 1);
let DynamicColumn::Bytes(bytes_col) = col_handles[0].open().unwrap() else { panic!(); };
let index: Vec<Option<u64>> = (0..5)
.map(|row_id| bytes_col.ords().first(row_id))
.collect();
assert_eq!(index, &[None, Some(0), None, Some(2), Some(1)]);
assert_eq!(bytes_col.num_rows(), 5);
let mut term_buffer = Vec::new();
let term_ords = bytes_col.ords();
assert_eq!(term_ords.first(0), None);
assert_eq!(term_ords.first(1), Some(0));
bytes_col
.dictionary
.ord_to_term(0u64, &mut term_buffer)
.unwrap();
assert_eq!(term_buffer, b"a");
assert_eq!(term_ords.first(2), None);
assert_eq!(term_ords.first(3), Some(2));
bytes_col
.dictionary
.ord_to_term(2u64, &mut term_buffer)
.unwrap();
assert_eq!(term_buffer, b"c");
assert_eq!(term_ords.first(4), Some(1));
bytes_col
.dictionary
.ord_to_term(1u64, &mut term_buffer)
.unwrap();
assert_eq!(term_buffer, b"b");
// let term_ords = (0..)
}

View File

@@ -1,22 +1,12 @@
use crate::InvalidData;
#[derive(Copy, Clone, PartialEq, Debug)]
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum NumericalValue {
I64(i64),
U64(u64),
F64(f64),
}
impl NumericalValue {
pub fn numerical_type(&self) -> NumericalType {
match self {
NumericalValue::I64(_) => NumericalType::I64,
NumericalValue::U64(_) => NumericalType::U64,
NumericalValue::F64(_) => NumericalType::F64,
}
}
}
impl From<u64> for NumericalValue {
fn from(val: u64) -> NumericalValue {
NumericalValue::U64(val)
@@ -35,6 +25,18 @@ impl From<f64> for NumericalValue {
}
}
impl NumericalValue {
pub fn numerical_type(&self) -> NumericalType {
match self {
NumericalValue::F64(_) => NumericalType::F64,
NumericalValue::I64(_) => NumericalType::I64,
NumericalValue::U64(_) => NumericalType::U64,
}
}
}
impl Eq for NumericalValue {}
#[derive(Clone, Copy, Debug, Default, Hash, Eq, PartialEq)]
#[repr(u8)]
pub enum NumericalType {
@@ -104,13 +106,6 @@ impl Coerce for f64 {
}
}
impl Coerce for crate::DateTime {
fn coerce(value: NumericalValue) -> Self {
let timestamp_micros = i64::coerce(value);
crate::DateTime { timestamp_micros }
}
}
#[cfg(test)]
mod tests {
use super::NumericalType;

View File

@@ -27,7 +27,7 @@ fn main() -> tantivy::Result<()> {
let score_fieldtype =
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
let highscore_field = schema_builder.add_f64_field("highscore", score_fieldtype.clone());
let price_field = schema_builder.add_f64_field("price", score_fieldtype);
let price_field = schema_builder.add_f64_field("price", score_fieldtype.clone());
let schema = schema_builder.build();
@@ -112,7 +112,7 @@ fn main() -> tantivy::Result<()> {
],
..Default::default()
}),
sub_aggregation: sub_agg_req_1,
sub_aggregation: sub_agg_req_1.clone(),
}),
)]
.into_iter()
@@ -123,7 +123,7 @@ fn main() -> tantivy::Result<()> {
let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
let res: Value = serde_json::to_value(agg_res)?;
let res: Value = serde_json::to_value(&agg_res)?;
println!("{}", serde_json::to_string_pretty(&res)?);
Ok(())

View File

@@ -402,8 +402,8 @@ mod tests {
let mut buffer = Vec::new();
let col = VecColumn::from(&[false, true][..]);
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
// 5 bytes of header, 1 byte of value
assert_eq!(buffer.len(), 3 + 5 + 1 + 4 + 2);
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
assert_eq!(buffer.len(), 3 + 5 + 8 + 4 + 2);
}
#[test]
@@ -411,8 +411,8 @@ mod tests {
let mut buffer = Vec::new();
let col = VecColumn::from(&[true][..]);
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
// 5 bytes of header, 0 bytes of value
assert_eq!(buffer.len(), 3 + 5 + 4 + 2);
// 5 bytes of header, 0 bytes of value, 7 bytes of padding.
assert_eq!(buffer.len(), 3 + 5 + 7 + 4 + 2);
}
#[test]
@@ -422,6 +422,6 @@ mod tests {
let col = VecColumn::from(&vals[..]);
serialize(col, &mut buffer, &[FastFieldCodecType::Bitpacked]).unwrap();
// Values are stored over 3 bits.
assert_eq!(buffer.len(), 3 + 7 + (3 * 80 / 8) + 4 + 2);
assert_eq!(buffer.len(), 3 + 7 + (3 * 80 / 8) + 7 + 4 + 2);
}
}

2
run-tests.sh Executable file
View File

@@ -0,0 +1,2 @@
#!/bin/bash
cargo test

View File

@@ -6,13 +6,14 @@ use super::{IntermediateStats, SegmentStatsCollector};
/// A single-value metric aggregation that computes the average of numeric values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "avg": {
/// "field": "score"
/// "field": "score",
/// }
/// }
/// ```

View File

@@ -6,13 +6,14 @@ use super::{IntermediateStats, SegmentStatsCollector};
/// A single-value metric aggregation that counts the number of values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "value_count": {
/// "field": "score"
/// "field": "score",
/// }
/// }
/// ```

View File

@@ -6,13 +6,14 @@ use super::{IntermediateStats, SegmentStatsCollector};
/// A single-value metric aggregation that computes the maximum of numeric values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "max": {
/// "field": "score"
/// "field": "score",
/// }
/// }
/// ```

View File

@@ -6,13 +6,14 @@ use super::{IntermediateStats, SegmentStatsCollector};
/// A single-value metric aggregation that computes the minimum of numeric values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "min": {
/// "field": "score"
/// "field": "score",
/// }
/// }
/// ```

View File

@@ -80,12 +80,12 @@ mod tests {
"price_stats": { "stats": { "field": "price" } },
"price_sum": { "sum": { "field": "price" } }
}"#;
let aggregations: Aggregations = serde_json::from_str(aggregations_json).unwrap();
let aggregations: Aggregations = serde_json::from_str(&aggregations_json).unwrap();
let collector = AggregationCollector::from_aggs(aggregations, None, index.schema());
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let aggregations_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
let aggregations_res_json = serde_json::to_value(aggregations_res).unwrap();
let aggregations_res_json = serde_json::to_value(&aggregations_res).unwrap();
assert_eq!(aggregations_res_json["price_avg"]["value"], 2.5);
assert_eq!(aggregations_res_json["price_count"]["value"], 6.0);

View File

@@ -7,13 +7,14 @@ use crate::{DocId, TantivyError};
/// A multi-value metric aggregation that computes a collection of statistics on numeric values that
/// are extracted from the aggregated documents.
/// Supported field types are `u64`, `i64`, and `f64`.
/// See [`Stats`] for returned statistics.
///
/// # JSON Format
/// ```json
/// {
/// "stats": {
/// "field": "score"
/// "field": "score",
/// }
/// }
/// ```

View File

@@ -6,13 +6,14 @@ use super::{IntermediateStats, SegmentStatsCollector};
/// A single-value metric aggregation that sums up numeric values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "sum": {
/// "field": "score"
/// "field": "score",
/// }
/// }
/// ```

View File

@@ -1,5 +1,6 @@
//! # Aggregations
//!
//!
//! An aggregation summarizes your data as statistics on buckets or metrics.
//!
//! Aggregations can provide answer to questions like:
@@ -40,10 +41,6 @@
//! - [Metric](metric)
//! - [Average](metric::AverageAggregation)
//! - [Stats](metric::StatsAggregation)
//! - [Min](metric::MinAggregation)
//! - [Max](metric::MaxAggregation)
//! - [Sum](metric::SumAggregation)
//! - [Count](metric::CountAggregation)
//!
//! # Example
//! Compute the average metric, by building [`agg_req::Aggregations`], which is built from an
@@ -78,7 +75,7 @@
//! }
//! ```
//! # Example JSON
//! Requests are compatible with the elasticsearch JSON request format.
//! Requests are compatible with the elasticsearch json request format.
//!
//! ```
//! use tantivy::aggregation::agg_req::Aggregations;

View File

@@ -226,7 +226,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 27);
assert_eq!(file.len(), 34);
let composite_file = CompositeFile::open(&file)?;
let fast_field_bytes = composite_file.open_read(*FIELD).unwrap().read_bytes()?;
let fast_field_reader = open::<u64>(fast_field_bytes)?;
@@ -275,7 +275,7 @@ mod tests {
serializer.close()?;
}
let file = directory.open_read(path)?;
assert_eq!(file.len(), 55);
assert_eq!(file.len(), 62);
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite
@@ -316,7 +316,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 28);
assert_eq!(file.len(), 35);
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite
@@ -355,7 +355,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 80042);
assert_eq!(file.len(), 80049);
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite
@@ -397,7 +397,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 42_usize);
assert_eq!(file.len(), 49_usize);
{
let fast_fields_composite = CompositeFile::open(&file)?;
@@ -836,7 +836,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 26);
assert_eq!(file.len(), 33);
let composite_file = CompositeFile::open(&file)?;
let data = composite_file.open_read(field).unwrap().read_bytes()?;
let fast_field_reader = open::<bool>(data)?;
@@ -874,7 +874,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 38);
assert_eq!(file.len(), 45);
let composite_file = CompositeFile::open(&file)?;
let data = composite_file.open_read(field).unwrap().read_bytes()?;
let fast_field_reader = open::<bool>(data)?;
@@ -906,7 +906,7 @@ mod tests {
}
let file = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&file)?;
assert_eq!(file.len(), 25);
assert_eq!(file.len(), 32);
let data = composite_file.open_read(field).unwrap().read_bytes()?;
let fast_field_reader = open::<bool>(data)?;
assert_eq!(fast_field_reader.get_val(0), false);
@@ -940,10 +940,10 @@ mod tests {
pub fn test_gcd_date() -> crate::Result<()> {
let size_prec_sec =
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Seconds)?;
assert_eq!(size_prec_sec, 5 + 4 + 21 + (1_000 * 13) / 8); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
assert_eq!(size_prec_sec, 5 + 4 + 28 + (1_000 * 13) / 8); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
let size_prec_micro =
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Microseconds)?;
assert_eq!(size_prec_micro, 5 + 4 + 19 + (1_000 * 33) / 8); // 33 bits per val = ceil(log_2(number of microsecsseconds in 2hours);
assert_eq!(size_prec_micro, 5 + 4 + 26 + (1_000 * 33) / 8); // 33 bits per val = ceil(log_2(number of microsecsseconds in 2hours);
Ok(())
}
@@ -1014,7 +1014,7 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment = &searcher.segment_readers()[0];
let field = segment.fast_fields().u64("url_norm_hash").unwrap();
let field = segment.fast_fields().u64(num_field).unwrap();
let numbers = vec![100, 200, 300];
let test_range = |range: RangeInclusive<u64>| {
@@ -1063,7 +1063,7 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment = &searcher.segment_readers()[0];
let field = segment.fast_fields().u64("url_norm_hash").unwrap();
let field = segment.fast_fields().u64(num_field).unwrap();
let numbers = vec![1000, 1001, 1003];
let test_range = |range: RangeInclusive<u64>| {

View File

@@ -159,7 +159,7 @@ mod tests {
let searcher = reader.searcher();
let reader = searcher.segment_reader(0);
let date_ff_reader = reader.fast_fields().dates("multi_date_field").unwrap();
let date_ff_reader = reader.fast_fields().dates(date_field).unwrap();
let mut docids = vec![];
date_ff_reader.get_docids_for_value_range(
DateTime::from_utc(first_time_stamp)..=DateTime::from_utc(two_secs_ahead),
@@ -173,7 +173,7 @@ mod tests {
assert_eq!(
count_multiples(RangeQuery::new_date(
"multi_date_field".to_string(),
date_field,
DateTime::from_utc(first_time_stamp)..DateTime::from_utc(two_secs_ahead)
)),
1

322
src/indexer/demuxer.rs Normal file
View File

@@ -0,0 +1,322 @@
use common::BitSet;
use itertools::Itertools;
use crate::fastfield::AliveBitSet;
use crate::{merge_filtered_segments, Directory, Index, IndexSettings, Segment, SegmentOrdinal};
/// DemuxMapping can be used to reorganize data from multiple segments.
///
/// DemuxMapping is useful in a multitenant settings, in which each document might actually belong
/// to a different tenant. It allows to reorganize documents as follows:
///
/// e.g. if you have two tenant ids TENANT_A and TENANT_B and two segments with
/// the documents (simplified)
/// Seg 1 [TENANT_A, TENANT_B]
/// Seg 2 [TENANT_A, TENANT_B]
///
/// You may want to group your documents to
/// Seg 1 [TENANT_A, TENANT_A]
/// Seg 2 [TENANT_B, TENANT_B]
///
/// Demuxing is the tool for that.
/// Semantically you can define a mapping from [old segment ordinal, old doc_id] -> [new segment
/// ordinal].
#[derive(Debug, Default)]
pub struct DemuxMapping {
/// [index old segment ordinal] -> [index doc_id] = new segment ordinal
mapping: Vec<DocIdToSegmentOrdinal>,
}
/// DocIdToSegmentOrdinal maps from doc_id within a segment to the new segment ordinal for demuxing.
///
/// For every source segment there is a `DocIdToSegmentOrdinal` to distribute its doc_ids.
#[derive(Debug, Default)]
pub struct DocIdToSegmentOrdinal {
doc_id_index_to_segment_ord: Vec<SegmentOrdinal>,
}
impl DocIdToSegmentOrdinal {
/// Creates a new DocIdToSegmentOrdinal with size of num_doc_ids.
/// Initially all doc_ids point to segment ordinal 0 and need to be set
/// the via `set` method.
pub fn with_max_doc(max_doc: usize) -> Self {
DocIdToSegmentOrdinal {
doc_id_index_to_segment_ord: vec![0; max_doc],
}
}
/// Returns the number of documents in this mapping.
/// It should be equal to the `max_doc` of the segment it targets.
pub fn max_doc(&self) -> u32 {
self.doc_id_index_to_segment_ord.len() as u32
}
/// Associates a doc_id with an output `SegmentOrdinal`.
pub fn set(&mut self, doc_id: u32, segment_ord: SegmentOrdinal) {
self.doc_id_index_to_segment_ord[doc_id as usize] = segment_ord;
}
/// Iterates over the new SegmentOrdinal in the order of the doc_id.
pub fn iter(&self) -> impl Iterator<Item = SegmentOrdinal> + '_ {
self.doc_id_index_to_segment_ord.iter().cloned()
}
}
impl DemuxMapping {
/// Adds a DocIdToSegmentOrdinal. The order of the pus calls
/// defines the old segment ordinal. e.g. first push = ordinal 0.
pub fn add(&mut self, segment_mapping: DocIdToSegmentOrdinal) {
self.mapping.push(segment_mapping);
}
/// Returns the old number of segments.
pub fn get_old_num_segments(&self) -> usize {
self.mapping.len()
}
}
fn docs_for_segment_ord(
doc_id_to_segment_ord: &DocIdToSegmentOrdinal,
target_segment_ord: SegmentOrdinal,
) -> AliveBitSet {
let mut bitset = BitSet::with_max_value(doc_id_to_segment_ord.max_doc());
for doc_id in doc_id_to_segment_ord
.iter()
.enumerate()
.filter(|(_doc_id, new_segment_ord)| *new_segment_ord == target_segment_ord)
.map(|(doc_id, _)| doc_id)
{
// add document if segment ordinal = target segment ordinal
bitset.insert(doc_id as u32);
}
AliveBitSet::from_bitset(&bitset)
}
fn get_alive_bitsets(
demux_mapping: &DemuxMapping,
target_segment_ord: SegmentOrdinal,
) -> Vec<AliveBitSet> {
demux_mapping
.mapping
.iter()
.map(|doc_id_to_segment_ord| {
docs_for_segment_ord(doc_id_to_segment_ord, target_segment_ord)
})
.collect_vec()
}
/// Demux the segments according to `demux_mapping`. See `DemuxMapping`.
/// The number of output_directories need to match max new segment ordinal from `demux_mapping`.
///
/// The ordinal of `segments` need to match the ordinals provided in `demux_mapping`.
pub fn demux(
segments: &[Segment],
demux_mapping: &DemuxMapping,
target_settings: IndexSettings,
output_directories: Vec<Box<dyn Directory>>,
) -> crate::Result<Vec<Index>> {
let mut indices = vec![];
for (target_segment_ord, output_directory) in output_directories.into_iter().enumerate() {
let alive_bitset = get_alive_bitsets(demux_mapping, target_segment_ord as u32)
.into_iter()
.map(Some)
.collect_vec();
let index = merge_filtered_segments(
segments,
target_settings.clone(),
alive_bitset,
output_directory,
)?;
indices.push(index);
}
Ok(indices)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::collector::TopDocs;
use crate::directory::RamDirectory;
use crate::query::QueryParser;
use crate::schema::{Schema, TEXT};
use crate::{DocAddress, Term};
#[test]
fn test_demux_map_to_alive_bitset() {
let max_value = 2;
let mut demux_mapping = DemuxMapping::default();
// segment ordinal 0 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 0);
demux_mapping.add(doc_id_to_segment);
// segment ordinal 1 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 1);
demux_mapping.add(doc_id_to_segment);
{
let bit_sets_for_demuxing_to_segment_ord_0 = get_alive_bitsets(&demux_mapping, 0);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_0[0].is_deleted(0),
true
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_0[0].is_deleted(1),
false
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_0[1].is_deleted(0),
true
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_0[1].is_deleted(1),
true
);
}
{
let bit_sets_for_demuxing_to_segment_ord_1 = get_alive_bitsets(&demux_mapping, 1);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_1[0].is_deleted(0),
false
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_1[0].is_deleted(1),
true
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_1[1].is_deleted(0),
false
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_1[1].is_deleted(1),
false
);
}
}
#[test]
fn test_demux_segments() -> crate::Result<()> {
let first_index = {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"texto1"))?;
index_writer.add_document(doc!(text_field=>"texto2"))?;
index_writer.commit()?;
index
};
let second_index = {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"texto3"))?;
index_writer.add_document(doc!(text_field=>"texto4"))?;
index_writer.delete_term(Term::from_field_text(text_field, "4"));
index_writer.commit()?;
index
};
let mut segments: Vec<Segment> = Vec::new();
segments.extend(first_index.searchable_segments()?);
segments.extend(second_index.searchable_segments()?);
let target_settings = first_index.settings().clone();
let mut demux_mapping = DemuxMapping::default();
{
let max_value = 2;
// segment ordinal 0 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 0);
demux_mapping.add(doc_id_to_segment);
// segment ordinal 1 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 1);
demux_mapping.add(doc_id_to_segment);
}
assert_eq!(demux_mapping.get_old_num_segments(), 2);
let demuxed_indices = demux(
&segments,
&demux_mapping,
target_settings,
vec![
Box::<RamDirectory>::default(),
Box::<RamDirectory>::default(),
],
)?;
{
let index = &demuxed_indices[0];
let segments = index.searchable_segments()?;
assert_eq!(segments.len(), 1);
let segment_metas = segments[0].meta();
assert_eq!(segment_metas.num_deleted_docs(), 0);
assert_eq!(segment_metas.num_docs(), 1);
let searcher = index.reader().unwrap().searcher();
{
let text_field = index.schema().get_field("text").unwrap();
let do_search = |term: &str| {
let query = QueryParser::for_index(index, vec![text_field])
.parse_query(term)
.unwrap();
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(3)).unwrap();
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>()
};
assert_eq!(do_search("texto1"), vec![] as Vec<u32>);
assert_eq!(do_search("texto2"), vec![0]);
}
}
{
let index = &demuxed_indices[1];
let segments = index.searchable_segments()?;
assert_eq!(segments.len(), 1);
let segment_metas = segments[0].meta();
assert_eq!(segment_metas.num_deleted_docs(), 0);
assert_eq!(segment_metas.num_docs(), 3);
let searcher = index.reader().unwrap().searcher();
{
let text_field = index.schema().get_field("text").unwrap();
let do_search = |term: &str| {
let query = QueryParser::for_index(index, vec![text_field])
.parse_query(term)
.unwrap();
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(3)).unwrap();
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>()
};
assert_eq!(do_search("texto1"), vec![0]);
assert_eq!(do_search("texto2"), vec![] as Vec<u32>);
assert_eq!(do_search("texto3"), vec![1]);
assert_eq!(do_search("texto4"), vec![2]);
}
}
Ok(())
}
}

View File

@@ -1,5 +1,6 @@
pub mod delete_queue;
pub mod demuxer;
pub mod doc_id_mapping;
mod doc_opstamp_mapping;
mod flat_map_with_buffer;

View File

@@ -299,6 +299,7 @@ pub use crate::core::{
SegmentReader, SingleSegmentIndexWriter,
};
pub use crate::directory::Directory;
pub use crate::indexer::demuxer::*;
pub use crate::indexer::operation::UserOperation;
pub use crate::indexer::{merge_filtered_segments, merge_indices, IndexWriter, PreparedCommit};
pub use crate::postings::Postings;

View File

@@ -30,7 +30,7 @@ impl TextOptions {
self.stored
}
/// Returns true if and only if the value is a fast field.
/// Returns true iff the value is a fast field.
pub fn is_fast(&self) -> bool {
self.fast
}

View File

@@ -312,7 +312,7 @@ mod tests {
bitpack.write(51, 6, &mut buffer).unwrap();
assert_eq!(compute_num_bits(51), 6);
bitpack.close(&mut buffer).unwrap();
assert_eq!(buffer.len(), 3);
assert_eq!(buffer.len(), 3 + 7);
assert_eq!(extract_bits(&buffer[..], 0, 9), 321u64);
assert_eq!(extract_bits(&buffer[..], 9, 2), 2u64);
assert_eq!(extract_bits(&buffer[..], 11, 6), 51u64);

View File

@@ -69,16 +69,14 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
pub(crate) fn sstable_delta_reader_for_key_range(
&self,
key_range: impl RangeBounds<[u8]>,
limit: Option<u64>,
) -> io::Result<DeltaReader<'static, TSSTable::ValueReader>> {
let slice = self.file_slice_for_range(key_range, limit);
let slice = self.file_slice_for_range(key_range);
let data = slice.read_bytes()?;
Ok(TSSTable::delta_reader(data))
}
/// This function returns a file slice covering a set of sstable blocks
/// that include the key range passed in arguments. Optionally returns
/// only block for up to `limit` matching terms.
/// that include the key range passed in arguments.
///
/// It works by identifying
/// - `first_block`: the block containing the start boudary key
@@ -94,56 +92,26 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// On the rare edge case where a user asks for `(start_key, end_key]`
/// and `start_key` happens to be the last key of a block, we return a
/// slice that is the first block was not necessary.
pub fn file_slice_for_range(
&self,
key_range: impl RangeBounds<[u8]>,
limit: Option<u64>,
) -> FileSlice {
let first_block_id = match key_range.start_bound() {
pub fn file_slice_for_range(&self, key_range: impl RangeBounds<[u8]>) -> FileSlice {
let start_bound: Bound<usize> = match key_range.start_bound() {
Bound::Included(key) | Bound::Excluded(key) => {
let Some(first_block_id) = self.sstable_index.locate_with_key(key) else {
let Some(first_block_addr) = self.sstable_index.search_block(key) else {
return FileSlice::empty();
};
Some(first_block_id)
Bound::Included(first_block_addr.byte_range.start)
}
Bound::Unbounded => None,
Bound::Unbounded => Bound::Unbounded,
};
let last_block_id = match key_range.end_bound() {
Bound::Included(key) | Bound::Excluded(key) => self.sstable_index.locate_with_key(key),
Bound::Unbounded => None,
};
let start_bound = if let Some(first_block_id) = first_block_id {
let Some(block_addr) = self.sstable_index.get_block(first_block_id) else {
return FileSlice::empty();
};
Bound::Included(block_addr.byte_range.start)
} else {
Bound::Unbounded
};
let last_block_id = if let Some(limit) = limit {
let second_block_id = first_block_id.map(|id| id + 1).unwrap_or(0);
if let Some(block_addr) = self.sstable_index.get_block(second_block_id) {
let ordinal_limit = block_addr.first_ordinal + limit;
let last_block_limit = self.sstable_index.locate_with_ord(ordinal_limit);
if let Some(last_block_id) = last_block_id {
Some(last_block_id.min(last_block_limit))
let end_bound: Bound<usize> = match key_range.end_bound() {
Bound::Included(key) | Bound::Excluded(key) => {
if let Some(block_addr) = self.sstable_index.search_block(key) {
Bound::Excluded(block_addr.byte_range.end)
} else {
Some(last_block_limit)
Bound::Unbounded
}
} else {
last_block_id
}
} else {
last_block_id
Bound::Unbounded => Bound::Unbounded,
};
let end_bound = last_block_id
.and_then(|block_id| self.sstable_index.get_block(block_id))
.map(|block_addr| Bound::Excluded(block_addr.byte_range.end))
.unwrap_or(Bound::Unbounded);
self.sstable_slice.slice((start_bound, end_bound))
}
@@ -188,15 +156,10 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// Returns the ordinal associated with a given term.
pub fn term_ord<K: AsRef<[u8]>>(&self, key: K) -> io::Result<Option<TermOrdinal>> {
let mut term_ord = 0u64;
let key_bytes = key.as_ref();
let Some(block_addr) = self.sstable_index.get_block_with_key(key_bytes) else {
return Ok(None);
};
let mut term_ord = block_addr.first_ordinal;
let mut sstable_reader = self.sstable_reader_block(block_addr)?;
while sstable_reader.advance()? {
let mut sstable_reader = self.sstable_reader()?;
while sstable_reader.advance().unwrap_or(false) {
if sstable_reader.key() == key_bytes {
return Ok(Some(term_ord));
}
@@ -215,32 +178,22 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// Regardless of whether the term is found or not,
/// the buffer may be modified.
pub fn ord_to_term(&self, ord: TermOrdinal, bytes: &mut Vec<u8>) -> io::Result<bool> {
// find block in which the term would be
let block_addr = self.sstable_index.get_block_with_ord(ord);
let first_ordinal = block_addr.first_ordinal;
// then search inside that block only
let mut sstable_reader = self.sstable_reader_block(block_addr)?;
for _ in first_ordinal..=ord {
if !sstable_reader.advance()? {
let mut sstable_reader = self.sstable_reader()?;
bytes.clear();
for _ in 0..(ord + 1) {
if !sstable_reader.advance().unwrap_or(false) {
return Ok(false);
}
}
bytes.clear();
bytes.extend_from_slice(sstable_reader.key());
Ok(true)
}
/// Returns the number of terms in the dictionary.
pub fn term_info_from_ord(&self, term_ord: TermOrdinal) -> io::Result<Option<TSSTable::Value>> {
// find block in which the term would be
let block_addr = self.sstable_index.get_block_with_ord(term_ord);
let first_ordinal = block_addr.first_ordinal;
// then search inside that block only
let mut sstable_reader = self.sstable_reader_block(block_addr)?;
for _ in first_ordinal..=term_ord {
if !sstable_reader.advance()? {
let mut sstable_reader = self.sstable_reader()?;
for _ in 0..(term_ord + 1) {
if !sstable_reader.advance().unwrap_or(false) {
return Ok(None);
}
}
@@ -249,10 +202,10 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// Lookups the value corresponding to the key.
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> io::Result<Option<TSSTable::Value>> {
if let Some(block_addr) = self.sstable_index.get_block_with_key(key.as_ref()) {
if let Some(block_addr) = self.sstable_index.search_block(key.as_ref()) {
let mut sstable_reader = self.sstable_reader_block(block_addr)?;
let key_bytes = key.as_ref();
while sstable_reader.advance()? {
while sstable_reader.advance().unwrap_or(false) {
if sstable_reader.key() == key_bytes {
let value = sstable_reader.value().clone();
return Ok(Some(value));
@@ -264,10 +217,10 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// Lookups the value corresponding to the key.
pub async fn get_async<K: AsRef<[u8]>>(&self, key: K) -> io::Result<Option<TSSTable::Value>> {
if let Some(block_addr) = self.sstable_index.get_block_with_key(key.as_ref()) {
if let Some(block_addr) = self.sstable_index.search_block(key.as_ref()) {
let mut sstable_reader = self.sstable_reader_block_async(block_addr).await?;
let key_bytes = key.as_ref();
while sstable_reader.advance()? {
while sstable_reader.advance().unwrap_or(false) {
if sstable_reader.key() == key_bytes {
let value = sstable_reader.value().clone();
return Ok(Some(value));
@@ -306,192 +259,3 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::ops::Range;
use std::sync::{Arc, Mutex};
use common::OwnedBytes;
use super::Dictionary;
use crate::MonotonicU64SSTable;
#[derive(Debug)]
struct PermissionedHandle {
bytes: OwnedBytes,
allowed_range: Mutex<Range<usize>>,
}
impl PermissionedHandle {
fn new(bytes: Vec<u8>) -> Self {
let bytes = OwnedBytes::new(bytes);
PermissionedHandle {
allowed_range: Mutex::new(0..bytes.len()),
bytes,
}
}
fn restrict(&self, range: Range<usize>) {
*self.allowed_range.lock().unwrap() = range;
}
}
impl common::HasLen for PermissionedHandle {
fn len(&self) -> usize {
self.bytes.len()
}
}
impl common::file_slice::FileHandle for PermissionedHandle {
fn read_bytes(&self, range: Range<usize>) -> std::io::Result<OwnedBytes> {
let allowed_range = self.allowed_range.lock().unwrap();
if !allowed_range.contains(&range.start) || !allowed_range.contains(&(range.end - 1)) {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("invalid range, allowed {allowed_range:?}, requested {range:?}"),
));
}
Ok(self.bytes.slice(range))
}
}
fn make_test_sstable() -> (Dictionary<MonotonicU64SSTable>, Arc<PermissionedHandle>) {
let mut builder = Dictionary::<MonotonicU64SSTable>::builder(Vec::new()).unwrap();
// this makes 256k keys, enough to fill multiple blocks.
for elem in 0..0x3ffff {
let key = format!("{elem:05X}").into_bytes();
builder.insert_cannot_fail(&key, &elem);
}
let table = builder.finish().unwrap();
let table = Arc::new(PermissionedHandle::new(table));
let slice = common::file_slice::FileSlice::new(table.clone());
let dictionary = Dictionary::<MonotonicU64SSTable>::open(slice).unwrap();
// if the last block is id 0, tests are meaningless
assert_ne!(dictionary.sstable_index.locate_with_ord(u64::MAX), 0);
assert_eq!(dictionary.num_terms(), 0x3ffff);
(dictionary, table)
}
#[test]
fn test_ord_term_conversion() {
let (dic, slice) = make_test_sstable();
let block = dic.sstable_index.get_block_with_ord(100_000);
slice.restrict(block.byte_range);
let mut res = Vec::new();
// middle of a block
assert!(dic.ord_to_term(100_000, &mut res).unwrap());
assert_eq!(res, format!("{:05X}", 100_000).into_bytes());
assert_eq!(dic.term_info_from_ord(100_000).unwrap().unwrap(), 100_000);
assert_eq!(dic.get(&res).unwrap().unwrap(), 100_000);
assert_eq!(dic.term_ord(&res).unwrap().unwrap(), 100_000);
// start of a block
assert!(dic.ord_to_term(block.first_ordinal, &mut res).unwrap());
assert_eq!(res, format!("{:05X}", block.first_ordinal).into_bytes());
assert_eq!(
dic.term_info_from_ord(block.first_ordinal)
.unwrap()
.unwrap(),
block.first_ordinal
);
assert_eq!(dic.get(&res).unwrap().unwrap(), block.first_ordinal);
assert_eq!(dic.term_ord(&res).unwrap().unwrap(), block.first_ordinal);
// end of a block
let ordinal = block.first_ordinal - 1;
let new_range = dic.sstable_index.get_block_with_ord(ordinal).byte_range;
slice.restrict(new_range);
assert!(dic.ord_to_term(ordinal, &mut res).unwrap());
assert_eq!(res, format!("{:05X}", ordinal).into_bytes());
assert_eq!(dic.term_info_from_ord(ordinal).unwrap().unwrap(), ordinal);
assert_eq!(dic.get(&res).unwrap().unwrap(), ordinal);
assert_eq!(dic.term_ord(&res).unwrap().unwrap(), ordinal);
// before first block
// 1st block must be loaded for key-related operations
let block = dic.sstable_index.get_block_with_ord(0);
slice.restrict(block.byte_range);
assert!(dic.get(&b"$$$").unwrap().is_none());
assert!(dic.term_ord(&b"$$$").unwrap().is_none());
// after last block
// last block must be loaded for ord related operations
let ordinal = 0x40000 + 10;
let new_range = dic.sstable_index.get_block_with_ord(ordinal).byte_range;
slice.restrict(new_range);
assert!(!dic.ord_to_term(ordinal, &mut res).unwrap());
assert!(dic.term_info_from_ord(ordinal).unwrap().is_none());
// last block isn't required to be loaded for key related operations
slice.restrict(0..0);
assert!(dic.get(&b"~~~").unwrap().is_none());
assert!(dic.term_ord(&b"~~~").unwrap().is_none());
}
#[test]
fn test_range() {
let (dic, slice) = make_test_sstable();
let start = dic
.sstable_index
.get_block_with_key(b"10000")
.unwrap()
.byte_range;
let end = dic
.sstable_index
.get_block_with_key(b"18000")
.unwrap()
.byte_range;
slice.restrict(start.start..end.end);
let mut stream = dic.range().ge(b"10000").lt(b"18000").into_stream().unwrap();
for i in 0x10000..0x18000 {
assert!(stream.advance());
assert_eq!(stream.term_ord(), i);
assert_eq!(stream.value(), &i);
assert_eq!(stream.key(), format!("{i:05X}").into_bytes());
}
assert!(!stream.advance());
// verify limiting the number of results reduce the size read
slice.restrict(start.start..(end.end - 1));
let mut stream = dic
.range()
.ge(b"10000")
.lt(b"18000")
.limit(0xfff)
.into_stream()
.unwrap();
for i in 0x10000..0x10fff {
assert!(stream.advance());
assert_eq!(stream.term_ord(), i);
assert_eq!(stream.value(), &i);
assert_eq!(stream.key(), format!("{i:05X}").into_bytes());
}
// there might be more successful elements after, though how many is undefined
slice.restrict(0..slice.bytes.len());
let mut stream = dic.stream().unwrap();
for i in 0..0x3ffff {
assert!(stream.advance());
assert_eq!(stream.term_ord(), i);
assert_eq!(stream.value(), &i);
assert_eq!(stream.key(), format!("{i:05X}").into_bytes());
}
assert!(!stream.advance());
}
}

View File

@@ -3,7 +3,7 @@ use std::ops::Range;
use serde::{Deserialize, Serialize};
use crate::{common_prefix_len, SSTableDataCorruption, TermOrdinal};
use crate::{common_prefix_len, SSTableDataCorruption};
#[derive(Default, Debug, Serialize, Deserialize)]
pub struct SSTableIndex {
@@ -11,61 +11,15 @@ pub struct SSTableIndex {
}
impl SSTableIndex {
/// Load an index from its binary representation
pub fn load(data: &[u8]) -> Result<SSTableIndex, SSTableDataCorruption> {
ciborium::de::from_reader(data).map_err(|_| SSTableDataCorruption)
}
/// Get the [`BlockAddr`] of the requested block.
pub(crate) fn get_block(&self, block_id: usize) -> Option<BlockAddr> {
pub fn search_block(&self, key: &[u8]) -> Option<BlockAddr> {
self.blocks
.get(block_id)
.map(|block_meta| block_meta.block_addr.clone())
}
/// Get the block id of the block that woudl contain `key`.
///
/// Returns None if `key` is lexicographically after the last key recorded.
pub(crate) fn locate_with_key(&self, key: &[u8]) -> Option<usize> {
let pos = self
.blocks
.binary_search_by_key(&key, |block| &block.last_key_or_greater);
match pos {
Ok(pos) => Some(pos),
Err(pos) => {
if pos < self.blocks.len() {
Some(pos)
} else {
// after end of last block: no block matches
None
}
}
}
}
/// Get the [`BlockAddr`] of the block that would contain `key`.
///
/// Returns None if `key` is lexicographically after the last key recorded.
pub fn get_block_with_key(&self, key: &[u8]) -> Option<BlockAddr> {
self.locate_with_key(key).and_then(|id| self.get_block(id))
}
pub(crate) fn locate_with_ord(&self, ord: TermOrdinal) -> usize {
let pos = self
.blocks
.binary_search_by_key(&ord, |block| block.block_addr.first_ordinal);
match pos {
Ok(pos) => pos,
// Err(0) can't happen as the sstable starts with ordinal zero
Err(pos) => pos - 1,
}
}
/// Get the [`BlockAddr`] of the block containing the `ord`-th term.
pub(crate) fn get_block_with_ord(&self, ord: TermOrdinal) -> BlockAddr {
// locate_with_ord always returns an index within range
self.get_block(self.locate_with_ord(ord)).unwrap()
.iter()
.find(|block| &block.last_key_or_greater[..] >= key)
.map(|block| block.block_addr.clone())
}
}
@@ -76,7 +30,7 @@ pub struct BlockAddr {
}
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct BlockMeta {
struct BlockMeta {
/// Any byte string that is lexicographically greater or equal to
/// the last key in the block,
/// and yet strictly smaller than the first key in the next block.
@@ -144,38 +98,26 @@ mod tests {
fn test_sstable_index() {
let mut sstable_builder = SSTableIndexBuilder::default();
sstable_builder.add_block(b"aaa", 10..20, 0u64);
sstable_builder.add_block(b"bbbbbbb", 20..30, 5u64);
sstable_builder.add_block(b"bbbbbbb", 20..30, 564);
sstable_builder.add_block(b"ccc", 30..40, 10u64);
sstable_builder.add_block(b"dddd", 40..50, 15u64);
let mut buffer: Vec<u8> = Vec::new();
sstable_builder.serialize(&mut buffer).unwrap();
let sstable_index = SSTableIndex::load(&buffer[..]).unwrap();
assert_eq!(
sstable_index.get_block_with_key(b"bbbde"),
sstable_index.search_block(b"bbbde"),
Some(BlockAddr {
first_ordinal: 10u64,
byte_range: 30..40
})
);
assert_eq!(sstable_index.locate_with_key(b"aa").unwrap(), 0);
assert_eq!(sstable_index.locate_with_key(b"aaa").unwrap(), 0);
assert_eq!(sstable_index.locate_with_key(b"aab").unwrap(), 1);
assert_eq!(sstable_index.locate_with_key(b"ccc").unwrap(), 2);
assert!(sstable_index.locate_with_key(b"e").is_none());
assert_eq!(sstable_index.locate_with_ord(0), 0);
assert_eq!(sstable_index.locate_with_ord(1), 0);
assert_eq!(sstable_index.locate_with_ord(4), 0);
assert_eq!(sstable_index.locate_with_ord(5), 1);
assert_eq!(sstable_index.locate_with_ord(100), 3);
}
#[test]
fn test_sstable_with_corrupted_data() {
let mut sstable_builder = SSTableIndexBuilder::default();
sstable_builder.add_block(b"aaa", 10..20, 0u64);
sstable_builder.add_block(b"bbbbbbb", 20..30, 5u64);
sstable_builder.add_block(b"bbbbbbb", 20..30, 564);
sstable_builder.add_block(b"ccc", 30..40, 10u64);
sstable_builder.add_block(b"dddd", 40..50, 15u64);
let mut buffer: Vec<u8> = Vec::new();

View File

@@ -19,7 +19,6 @@ where
automaton: A,
lower: Bound<Vec<u8>>,
upper: Bound<Vec<u8>>,
limit: Option<u64>,
}
fn bound_as_byte_slice(bound: &Bound<Vec<u8>>) -> Bound<&[u8]> {
@@ -42,7 +41,6 @@ where
automaton,
lower: Bound::Unbounded,
upper: Bound::Unbounded,
limit: None,
}
}
@@ -70,46 +68,24 @@ where
self
}
/// Load no more data than what's required to to get `limit`
/// matching entries.
///
/// The resulting [`Streamer`] can still return marginaly
/// more than `limit` elements.
pub fn limit(mut self, limit: u64) -> Self {
self.limit = Some(limit);
self
}
/// Creates the stream corresponding to the range
/// of terms defined using the `StreamerBuilder`.
pub fn into_stream(self) -> io::Result<Streamer<'a, TSSTable, A>> {
// TODO Optimize by skipping to the right first block.
let start_state = self.automaton.start();
let key_range = (
bound_as_byte_slice(&self.lower),
bound_as_byte_slice(&self.upper),
);
let first_term = match &key_range.0 {
Bound::Included(key) | Bound::Excluded(key) => self
.term_dict
.sstable_index
.get_block_with_key(key)
.map(|block| block.first_ordinal)
.unwrap_or(0),
Bound::Unbounded => 0,
};
let delta_reader = self
.term_dict
.sstable_delta_reader_for_key_range(key_range, self.limit)?;
.sstable_delta_reader_for_key_range(key_range)?;
Ok(Streamer {
automaton: self.automaton,
states: vec![start_state],
delta_reader,
key: Vec::new(),
term_ord: first_term.checked_sub(1),
term_ord: None,
lower_bound: self.lower,
upper_bound: self.upper,
})

View File

@@ -16,7 +16,7 @@ pub trait ValueReader: Default {
/// Loads a block.
///
/// Returns the number of bytes that were read.
/// Returns the number of bytes that were written.
fn load(&mut self, data: &[u8]) -> io::Result<usize>;
}