Compare commits

...

20 Commits

Author SHA1 Message Date
Pascal Seitz
806a1e1b1e clarify tokenizer docs 2023-04-03 22:59:38 +08:00
PSeitz
5c4ea6a708 tokenizer option on text fastfield (#1945)
* tokenizer option on text fastfield

allow to set tokenizer option on text fastfield (fixes #1901)
handle PreTokenized strings in fast field

* change visibility

* remove custom de/serialization
2023-03-31 10:03:38 +02:00
PSeitz
4cf93dab7d fix build (#1973) 2023-03-31 13:54:03 +09:00
PSeitz
5c380b76e7 Better mixed types support in aggs and fix serialization issue (#1971)
* Better mixed types support in aggs and fix serialization issue

- Improve support for mixed types in JSON field aggregations (pick the right field, #1913)
- Resolve the issue with JSON serialization for numeric keys (fixes #1967)
- Add JSON round-trip test for term buckets
- Remove `u64_lenient`, as this is a footgun without the type
- move aggregation benchmarks

* remove shadowing
2023-03-31 05:52:11 +02:00
PSeitz
571735c5f7 Fix index sort by on optional/multicolumn (#1972)
Fix index sort by on optional/multicolumn
add optional columns to proptest
extend proptests for sort
add columnar sort tests
2023-03-31 04:24:11 +02:00
zhouhui
8e92f960d3 Fix comment: change max_merge_size to max_docs_before_merge. (#1970) 2023-03-28 22:49:00 +09:00
Paul Masurel
057211c3d8 Fixing build on arm (#1966) 2023-03-27 22:42:57 +09:00
Paul Masurel
059fc767ea Added ::MIN ::MAX DateTime. (#1965) 2023-03-27 15:32:53 +09:00
Paul Masurel
694a056255 Faster range (#1954)
* Faster range queries

This PR does several changes
- ip compact space now uses u32
- the bitunpacker now gets a get_batch function
- we push down range filtering, removing GCD / shift in the bitpacking
  codec.
- we rely on AVX2 routine to do the filtering.

* Apply suggestions from code review

* Apply suggestions from code review

* CR comments
2023-03-27 14:56:32 +09:00
Paul Masurel
2955e34452 Added proptests for building/merging columnar. (#1963) 2023-03-27 14:56:02 +09:00
Paul Masurel
821208480b Adding Debug/Display impl. Refining the ColumnIndex::get_cardinality 2023-03-26 14:40:37 +09:00
Paul Masurel
a2e3c2ed5b Renaming Column::idx -> Column::index (#1961)
There was some variable name ghosting happening.
2023-03-26 13:58:50 +09:00
PSeitz
835f228bfa fix cardinality when merging empty columns (#1960)
fixes #1958
2023-03-25 15:58:15 +09:00
Paul Masurel
2b6a4da640 Exposing empty column builder. (#1959) 2023-03-24 16:34:41 +09:00
PSeitz
d6a95381ee add memory check for term agg (#1957) 2023-03-24 06:47:45 +01:00
PSeitz
da2804644f fetch blocks of vals in aggregation for all cardinality (#1950)
* fetch blocks of vals in aggregation for all cardinality

* move caching in common accessor
2023-03-23 08:41:11 +01:00
PSeitz
5504cfd012 remove IterColumn (#1955)
fixes #1658
2023-03-23 06:43:17 +01:00
trinity-1686a
482b4155e8 fix bug with new sstable index format (#1953) 2023-03-22 10:22:36 +01:00
Till Wegmüller
1a35f6573d Switch fs2 to fs4 as it is now unmaintained and does not support illumos (#1944)
Signed-off-by: Till Wegmueller <toasterson@gmail.com>
2023-03-22 13:48:49 +09:00
trinity-1686a
e5e50603a8 new sstable format (#1943)
* document a new sstable format

* add support for changing target block size

* use new format for sstable index

* handle sstable version errror

* use very small blocks for proptests

* add a footer structure
2023-03-21 15:03:52 +01:00
70 changed files with 3700 additions and 1277 deletions

View File

@@ -32,7 +32,7 @@ log = "0.4.16"
serde = { version = "1.0.136", features = ["derive"] } serde = { version = "1.0.136", features = ["derive"] }
serde_json = "1.0.79" serde_json = "1.0.79"
num_cpus = "1.13.1" num_cpus = "1.13.1"
fs2 = { version = "0.4.3", optional = true } fs4 = { version = "0.6.3", optional = true }
levenshtein_automata = "0.2.1" levenshtein_automata = "0.2.1"
uuid = { version = "1.0.0", features = ["v4", "serde"] } uuid = { version = "1.0.0", features = ["v4", "serde"] }
crossbeam-channel = "0.5.4" crossbeam-channel = "0.5.4"
@@ -94,7 +94,7 @@ overflow-checks = true
[features] [features]
default = ["mmap", "stopwords", "lz4-compression"] default = ["mmap", "stopwords", "lz4-compression"]
mmap = ["fs2", "tempfile", "memmap2"] mmap = ["fs4", "tempfile", "memmap2"]
stopwords = [] stopwords = []
brotli-compression = ["brotli"] brotli-compression = ["brotli"]

View File

@@ -15,6 +15,7 @@ homepage = "https://github.com/quickwit-oss/tantivy"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
bitpacking = {version="0.8", default-features=false, features = ["bitpacker1x"]}
[dev-dependencies] [dev-dependencies]
rand = "0.8" rand = "0.8"

View File

@@ -1,10 +1,14 @@
use std::convert::TryInto; use std::convert::TryInto;
use std::io; use std::io;
use std::ops::{Range, RangeInclusive};
use bitpacking::{BitPacker as ExternalBitPackerTrait, BitPacker1x};
pub struct BitPacker { pub struct BitPacker {
mini_buffer: u64, mini_buffer: u64,
mini_buffer_written: usize, mini_buffer_written: usize,
} }
impl Default for BitPacker { impl Default for BitPacker {
fn default() -> Self { fn default() -> Self {
BitPacker::new() BitPacker::new()
@@ -118,6 +122,125 @@ impl BitUnpacker {
let val_shifted = val_unshifted_unmasked >> bit_shift; let val_shifted = val_unshifted_unmasked >> bit_shift;
val_shifted & self.mask val_shifted & self.mask
} }
// Decodes the range of bitpacked `u32` values with idx
// in [start_idx, start_idx + output.len()).
//
// #Panics
//
// This methods panics if `num_bits` is > 32.
fn get_batch_u32s(&self, start_idx: u32, data: &[u8], output: &mut [u32]) {
assert!(
self.bit_width() <= 32,
"Bitwidth must be <= 32 to use this method."
);
let end_idx = start_idx + output.len() as u32;
let end_bit_read = end_idx * self.num_bits;
let end_byte_read = (end_bit_read + 7) / 8;
assert!(
end_byte_read as usize <= data.len(),
"Requested index is out of bounds."
);
// Simple slow implementation of get_batch_u32s, to deal with our ramps.
let get_batch_ramp = |start_idx: u32, output: &mut [u32]| {
for (out, idx) in output.iter_mut().zip(start_idx..) {
*out = self.get(idx, data) as u32;
}
};
// We use an unrolled routine to decode 32 values at once.
// We therefore decompose our range of values to decode into three ranges:
// - Entrance ramp: [start_idx, fast_track_start) (up to 31 values)
// - Highway: [fast_track_start, fast_track_end) (a length multiple of 32s)
// - Exit ramp: [fast_track_end, start_idx + output.len()) (up to 31 values)
// We want the start of the fast track to start align with bytes.
// A sufficient condition is to start with an idx that is a multiple of 8,
// so highway start is the closest multiple of 8 that is >= start_idx.
let entrance_ramp_len = 8 - (start_idx % 8) % 8;
let highway_start: u32 = start_idx + entrance_ramp_len;
if highway_start + BitPacker1x::BLOCK_LEN as u32 > end_idx {
// We don't have enough values to have even a single block of highway.
// Let's just supply the values the simple way.
get_batch_ramp(start_idx, output);
return;
}
let num_blocks: u32 = (end_idx - highway_start) / BitPacker1x::BLOCK_LEN as u32;
// Entrance ramp
get_batch_ramp(start_idx, &mut output[..entrance_ramp_len as usize]);
// Highway
let mut offset = (highway_start * self.num_bits) as usize / 8;
let mut output_cursor = (highway_start - start_idx) as usize;
for _ in 0..num_blocks {
offset += BitPacker1x.decompress(
&data[offset..],
&mut output[output_cursor..],
self.num_bits as u8,
);
output_cursor += 32;
}
// Exit ramp
let highway_end = highway_start + num_blocks * BitPacker1x::BLOCK_LEN as u32;
get_batch_ramp(highway_end, &mut output[output_cursor..]);
}
pub fn get_ids_for_value_range(
&self,
range: RangeInclusive<u64>,
id_range: Range<u32>,
data: &[u8],
positions: &mut Vec<u32>,
) {
if self.bit_width() > 32 {
self.get_ids_for_value_range_slow(range, id_range, data, positions)
} else {
if *range.start() > u32::MAX as u64 {
positions.clear();
return;
}
let range_u32 = (*range.start() as u32)..=(*range.end()).min(u32::MAX as u64) as u32;
self.get_ids_for_value_range_fast(range_u32, id_range, data, positions)
}
}
fn get_ids_for_value_range_slow(
&self,
range: RangeInclusive<u64>,
id_range: Range<u32>,
data: &[u8],
positions: &mut Vec<u32>,
) {
positions.clear();
for i in id_range {
// If we cared we could make this branchless, but the slow implementation should rarely
// kick in.
let val = self.get(i, data);
if range.contains(&val) {
positions.push(i);
}
}
}
fn get_ids_for_value_range_fast(
&self,
value_range: RangeInclusive<u32>,
id_range: Range<u32>,
data: &[u8],
positions: &mut Vec<u32>,
) {
positions.resize(id_range.len(), 0u32);
self.get_batch_u32s(id_range.start, data, positions);
crate::filter_vec::filter_vec_in_place(value_range, id_range.start, positions)
}
} }
#[cfg(test)] #[cfg(test)]
@@ -200,4 +323,58 @@ mod test {
test_bitpacker_aux(num_bits, &vals); test_bitpacker_aux(num_bits, &vals);
} }
} }
#[test]
#[should_panic]
fn test_get_batch_panics_over_32_bits() {
let bitunpacker = BitUnpacker::new(33);
let mut output: [u32; 1] = [0u32];
bitunpacker.get_batch_u32s(0, &[0, 0, 0, 0, 0, 0, 0, 0], &mut output[..]);
}
#[test]
fn test_get_batch_limit() {
let bitunpacker = BitUnpacker::new(1);
let mut output: [u32; 3] = [0u32, 0u32, 0u32];
bitunpacker.get_batch_u32s(8 * 4 - 3, &[0u8, 0u8, 0u8, 0u8], &mut output[..]);
}
#[test]
#[should_panic]
fn test_get_batch_panics_when_off_scope() {
let bitunpacker = BitUnpacker::new(1);
let mut output: [u32; 3] = [0u32, 0u32, 0u32];
// We are missing exactly one bit.
bitunpacker.get_batch_u32s(8 * 4 - 2, &[0u8, 0u8, 0u8, 0u8], &mut output[..]);
}
proptest::proptest! {
#[test]
fn test_get_batch_u32s_proptest(num_bits in 0u8..=32u8) {
let mask =
if num_bits == 32u8 {
u32::MAX
} else {
(1u32 << num_bits) - 1
};
let mut buffer: Vec<u8> = Vec::new();
let mut bitpacker = BitPacker::new();
for val in 0..100 {
bitpacker.write(val & mask as u64, num_bits, &mut buffer).unwrap();
}
bitpacker.flush(&mut buffer).unwrap();
let bitunpacker = BitUnpacker::new(num_bits);
let mut output: Vec<u32> = Vec::new();
for len in [0, 1, 2, 32, 33, 34, 64] {
for start_idx in 0u32..32u32 {
output.resize(len as usize, 0);
bitunpacker.get_batch_u32s(start_idx, &buffer, &mut output);
for i in 0..len {
let expected = (start_idx + i as u32) & mask;
assert_eq!(output[i], expected);
}
}
}
}
}
} }

View File

@@ -0,0 +1,365 @@
//! SIMD filtering of a vector as described in the following blog post.
//! https://quickwit.io/blog/filtering%20a%20vector%20with%20simd%20instructions%20avx-2%20and%20avx-512
use std::arch::x86_64::{
__m256i as DataType, _mm256_add_epi32 as op_add, _mm256_cmpgt_epi32 as op_greater,
_mm256_lddqu_si256 as load_unaligned, _mm256_or_si256 as op_or, _mm256_set1_epi32 as set1,
_mm256_storeu_si256 as store_unaligned, _mm256_xor_si256 as op_xor, *,
};
use std::ops::RangeInclusive;
const NUM_LANES: usize = 8;
const HIGHEST_BIT: u32 = 1 << 31;
#[inline]
fn u32_to_i32(val: u32) -> i32 {
(val ^ HIGHEST_BIT) as i32
}
#[inline]
unsafe fn u32_to_i32_avx2(vals_u32x8s: DataType) -> DataType {
const HIGHEST_BIT_MASK: DataType = from_u32x8([HIGHEST_BIT; NUM_LANES]);
op_xor(vals_u32x8s, HIGHEST_BIT_MASK)
}
pub fn filter_vec_in_place(range: RangeInclusive<u32>, offset: u32, output: &mut Vec<u32>) {
// We use a monotonic mapping from u32 to i32 to make the comparison possible in AVX2.
let range_i32: RangeInclusive<i32> = u32_to_i32(*range.start())..=u32_to_i32(*range.end());
let num_words = output.len() / NUM_LANES;
let mut output_len = unsafe {
filter_vec_avx2_aux(
output.as_ptr() as *const __m256i,
range_i32,
output.as_mut_ptr(),
offset,
num_words,
)
};
let reminder_start = num_words * NUM_LANES;
for i in reminder_start..output.len() {
let val = output[i];
output[output_len] = offset + i as u32;
output_len += if range.contains(&val) { 1 } else { 0 };
}
output.truncate(output_len);
}
#[target_feature(enable = "avx2")]
unsafe fn filter_vec_avx2_aux(
mut input: *const __m256i,
range: RangeInclusive<i32>,
output: *mut u32,
offset: u32,
num_words: usize,
) -> usize {
let mut output_tail = output;
let range_simd = set1(*range.start())..=set1(*range.end());
let mut ids = from_u32x8([
offset,
offset + 1,
offset + 2,
offset + 3,
offset + 4,
offset + 5,
offset + 6,
offset + 7,
]);
const SHIFT: __m256i = from_u32x8([NUM_LANES as u32; NUM_LANES]);
for _ in 0..num_words {
let word = load_unaligned(input);
let word = u32_to_i32_avx2(word);
let keeper_bitset = compute_filter_bitset(word, range_simd.clone());
let added_len = keeper_bitset.count_ones();
let filtered_doc_ids = compact(ids, keeper_bitset);
store_unaligned(output_tail as *mut __m256i, filtered_doc_ids);
output_tail = output_tail.offset(added_len as isize);
ids = op_add(ids, SHIFT);
input = input.offset(1);
}
output_tail.offset_from(output) as usize
}
#[inline]
#[target_feature(enable = "avx2")]
unsafe fn compact(data: DataType, mask: u8) -> DataType {
let vperm_mask = MASK_TO_PERMUTATION[mask as usize];
_mm256_permutevar8x32_epi32(data, vperm_mask)
}
#[inline]
#[target_feature(enable = "avx2")]
unsafe fn compute_filter_bitset(val: __m256i, range: std::ops::RangeInclusive<__m256i>) -> u8 {
let too_low = op_greater(*range.start(), val);
let too_high = op_greater(val, *range.end());
let inside = op_or(too_low, too_high);
255 - std::arch::x86_64::_mm256_movemask_ps(std::mem::transmute::<DataType, __m256>(inside))
as u8
}
union U8x32 {
vector: DataType,
vals: [u32; NUM_LANES],
}
const fn from_u32x8(vals: [u32; NUM_LANES]) -> DataType {
unsafe { U8x32 { vals }.vector }
}
const MASK_TO_PERMUTATION: [DataType; 256] = [
from_u32x8([0, 0, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 0, 0, 0, 0, 0, 0, 0]),
from_u32x8([1, 0, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 0, 0, 0, 0, 0, 0]),
from_u32x8([2, 0, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 0, 0, 0, 0, 0, 0]),
from_u32x8([1, 2, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 0, 0, 0, 0, 0]),
from_u32x8([3, 0, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 0, 0, 0, 0, 0, 0]),
from_u32x8([1, 3, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 0, 0, 0, 0, 0]),
from_u32x8([2, 3, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 0, 0, 0, 0, 0]),
from_u32x8([1, 2, 3, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 0, 0, 0, 0]),
from_u32x8([4, 0, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 4, 0, 0, 0, 0, 0, 0]),
from_u32x8([1, 4, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 4, 0, 0, 0, 0, 0]),
from_u32x8([2, 4, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 4, 0, 0, 0, 0, 0]),
from_u32x8([1, 2, 4, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 4, 0, 0, 0, 0]),
from_u32x8([3, 4, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 4, 0, 0, 0, 0, 0]),
from_u32x8([1, 3, 4, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 4, 0, 0, 0, 0]),
from_u32x8([2, 3, 4, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 4, 0, 0, 0, 0]),
from_u32x8([1, 2, 3, 4, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 4, 0, 0, 0]),
from_u32x8([5, 0, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 5, 0, 0, 0, 0, 0, 0]),
from_u32x8([1, 5, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 5, 0, 0, 0, 0, 0]),
from_u32x8([2, 5, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 5, 0, 0, 0, 0, 0]),
from_u32x8([1, 2, 5, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 5, 0, 0, 0, 0]),
from_u32x8([3, 5, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 5, 0, 0, 0, 0, 0]),
from_u32x8([1, 3, 5, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 5, 0, 0, 0, 0]),
from_u32x8([2, 3, 5, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 5, 0, 0, 0, 0]),
from_u32x8([1, 2, 3, 5, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 5, 0, 0, 0]),
from_u32x8([4, 5, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 4, 5, 0, 0, 0, 0, 0]),
from_u32x8([1, 4, 5, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 4, 5, 0, 0, 0, 0]),
from_u32x8([2, 4, 5, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 4, 5, 0, 0, 0, 0]),
from_u32x8([1, 2, 4, 5, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 4, 5, 0, 0, 0]),
from_u32x8([3, 4, 5, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 4, 5, 0, 0, 0, 0]),
from_u32x8([1, 3, 4, 5, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 4, 5, 0, 0, 0]),
from_u32x8([2, 3, 4, 5, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 4, 5, 0, 0, 0]),
from_u32x8([1, 2, 3, 4, 5, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 4, 5, 0, 0]),
from_u32x8([6, 0, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 6, 0, 0, 0, 0, 0, 0]),
from_u32x8([1, 6, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 6, 0, 0, 0, 0, 0]),
from_u32x8([2, 6, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 6, 0, 0, 0, 0, 0]),
from_u32x8([1, 2, 6, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 6, 0, 0, 0, 0]),
from_u32x8([3, 6, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 6, 0, 0, 0, 0, 0]),
from_u32x8([1, 3, 6, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 6, 0, 0, 0, 0]),
from_u32x8([2, 3, 6, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 6, 0, 0, 0, 0]),
from_u32x8([1, 2, 3, 6, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 6, 0, 0, 0]),
from_u32x8([4, 6, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 4, 6, 0, 0, 0, 0, 0]),
from_u32x8([1, 4, 6, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 4, 6, 0, 0, 0, 0]),
from_u32x8([2, 4, 6, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 4, 6, 0, 0, 0, 0]),
from_u32x8([1, 2, 4, 6, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 4, 6, 0, 0, 0]),
from_u32x8([3, 4, 6, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 4, 6, 0, 0, 0, 0]),
from_u32x8([1, 3, 4, 6, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 4, 6, 0, 0, 0]),
from_u32x8([2, 3, 4, 6, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 4, 6, 0, 0, 0]),
from_u32x8([1, 2, 3, 4, 6, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 4, 6, 0, 0]),
from_u32x8([5, 6, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 5, 6, 0, 0, 0, 0, 0]),
from_u32x8([1, 5, 6, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 5, 6, 0, 0, 0, 0]),
from_u32x8([2, 5, 6, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 5, 6, 0, 0, 0, 0]),
from_u32x8([1, 2, 5, 6, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 5, 6, 0, 0, 0]),
from_u32x8([3, 5, 6, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 5, 6, 0, 0, 0, 0]),
from_u32x8([1, 3, 5, 6, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 5, 6, 0, 0, 0]),
from_u32x8([2, 3, 5, 6, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 5, 6, 0, 0, 0]),
from_u32x8([1, 2, 3, 5, 6, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 5, 6, 0, 0]),
from_u32x8([4, 5, 6, 0, 0, 0, 0, 0]),
from_u32x8([0, 4, 5, 6, 0, 0, 0, 0]),
from_u32x8([1, 4, 5, 6, 0, 0, 0, 0]),
from_u32x8([0, 1, 4, 5, 6, 0, 0, 0]),
from_u32x8([2, 4, 5, 6, 0, 0, 0, 0]),
from_u32x8([0, 2, 4, 5, 6, 0, 0, 0]),
from_u32x8([1, 2, 4, 5, 6, 0, 0, 0]),
from_u32x8([0, 1, 2, 4, 5, 6, 0, 0]),
from_u32x8([3, 4, 5, 6, 0, 0, 0, 0]),
from_u32x8([0, 3, 4, 5, 6, 0, 0, 0]),
from_u32x8([1, 3, 4, 5, 6, 0, 0, 0]),
from_u32x8([0, 1, 3, 4, 5, 6, 0, 0]),
from_u32x8([2, 3, 4, 5, 6, 0, 0, 0]),
from_u32x8([0, 2, 3, 4, 5, 6, 0, 0]),
from_u32x8([1, 2, 3, 4, 5, 6, 0, 0]),
from_u32x8([0, 1, 2, 3, 4, 5, 6, 0]),
from_u32x8([7, 0, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 7, 0, 0, 0, 0, 0, 0]),
from_u32x8([1, 7, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 7, 0, 0, 0, 0, 0]),
from_u32x8([2, 7, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 7, 0, 0, 0, 0, 0]),
from_u32x8([1, 2, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 7, 0, 0, 0, 0]),
from_u32x8([3, 7, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 7, 0, 0, 0, 0, 0]),
from_u32x8([1, 3, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 7, 0, 0, 0, 0]),
from_u32x8([2, 3, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 7, 0, 0, 0, 0]),
from_u32x8([1, 2, 3, 7, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 7, 0, 0, 0]),
from_u32x8([4, 7, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 4, 7, 0, 0, 0, 0, 0]),
from_u32x8([1, 4, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 4, 7, 0, 0, 0, 0]),
from_u32x8([2, 4, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 4, 7, 0, 0, 0, 0]),
from_u32x8([1, 2, 4, 7, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 4, 7, 0, 0, 0]),
from_u32x8([3, 4, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 4, 7, 0, 0, 0, 0]),
from_u32x8([1, 3, 4, 7, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 4, 7, 0, 0, 0]),
from_u32x8([2, 3, 4, 7, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 4, 7, 0, 0, 0]),
from_u32x8([1, 2, 3, 4, 7, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 4, 7, 0, 0]),
from_u32x8([5, 7, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 5, 7, 0, 0, 0, 0, 0]),
from_u32x8([1, 5, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 5, 7, 0, 0, 0, 0]),
from_u32x8([2, 5, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 5, 7, 0, 0, 0, 0]),
from_u32x8([1, 2, 5, 7, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 5, 7, 0, 0, 0]),
from_u32x8([3, 5, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 5, 7, 0, 0, 0, 0]),
from_u32x8([1, 3, 5, 7, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 5, 7, 0, 0, 0]),
from_u32x8([2, 3, 5, 7, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 5, 7, 0, 0, 0]),
from_u32x8([1, 2, 3, 5, 7, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 5, 7, 0, 0]),
from_u32x8([4, 5, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 4, 5, 7, 0, 0, 0, 0]),
from_u32x8([1, 4, 5, 7, 0, 0, 0, 0]),
from_u32x8([0, 1, 4, 5, 7, 0, 0, 0]),
from_u32x8([2, 4, 5, 7, 0, 0, 0, 0]),
from_u32x8([0, 2, 4, 5, 7, 0, 0, 0]),
from_u32x8([1, 2, 4, 5, 7, 0, 0, 0]),
from_u32x8([0, 1, 2, 4, 5, 7, 0, 0]),
from_u32x8([3, 4, 5, 7, 0, 0, 0, 0]),
from_u32x8([0, 3, 4, 5, 7, 0, 0, 0]),
from_u32x8([1, 3, 4, 5, 7, 0, 0, 0]),
from_u32x8([0, 1, 3, 4, 5, 7, 0, 0]),
from_u32x8([2, 3, 4, 5, 7, 0, 0, 0]),
from_u32x8([0, 2, 3, 4, 5, 7, 0, 0]),
from_u32x8([1, 2, 3, 4, 5, 7, 0, 0]),
from_u32x8([0, 1, 2, 3, 4, 5, 7, 0]),
from_u32x8([6, 7, 0, 0, 0, 0, 0, 0]),
from_u32x8([0, 6, 7, 0, 0, 0, 0, 0]),
from_u32x8([1, 6, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 1, 6, 7, 0, 0, 0, 0]),
from_u32x8([2, 6, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 2, 6, 7, 0, 0, 0, 0]),
from_u32x8([1, 2, 6, 7, 0, 0, 0, 0]),
from_u32x8([0, 1, 2, 6, 7, 0, 0, 0]),
from_u32x8([3, 6, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 3, 6, 7, 0, 0, 0, 0]),
from_u32x8([1, 3, 6, 7, 0, 0, 0, 0]),
from_u32x8([0, 1, 3, 6, 7, 0, 0, 0]),
from_u32x8([2, 3, 6, 7, 0, 0, 0, 0]),
from_u32x8([0, 2, 3, 6, 7, 0, 0, 0]),
from_u32x8([1, 2, 3, 6, 7, 0, 0, 0]),
from_u32x8([0, 1, 2, 3, 6, 7, 0, 0]),
from_u32x8([4, 6, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 4, 6, 7, 0, 0, 0, 0]),
from_u32x8([1, 4, 6, 7, 0, 0, 0, 0]),
from_u32x8([0, 1, 4, 6, 7, 0, 0, 0]),
from_u32x8([2, 4, 6, 7, 0, 0, 0, 0]),
from_u32x8([0, 2, 4, 6, 7, 0, 0, 0]),
from_u32x8([1, 2, 4, 6, 7, 0, 0, 0]),
from_u32x8([0, 1, 2, 4, 6, 7, 0, 0]),
from_u32x8([3, 4, 6, 7, 0, 0, 0, 0]),
from_u32x8([0, 3, 4, 6, 7, 0, 0, 0]),
from_u32x8([1, 3, 4, 6, 7, 0, 0, 0]),
from_u32x8([0, 1, 3, 4, 6, 7, 0, 0]),
from_u32x8([2, 3, 4, 6, 7, 0, 0, 0]),
from_u32x8([0, 2, 3, 4, 6, 7, 0, 0]),
from_u32x8([1, 2, 3, 4, 6, 7, 0, 0]),
from_u32x8([0, 1, 2, 3, 4, 6, 7, 0]),
from_u32x8([5, 6, 7, 0, 0, 0, 0, 0]),
from_u32x8([0, 5, 6, 7, 0, 0, 0, 0]),
from_u32x8([1, 5, 6, 7, 0, 0, 0, 0]),
from_u32x8([0, 1, 5, 6, 7, 0, 0, 0]),
from_u32x8([2, 5, 6, 7, 0, 0, 0, 0]),
from_u32x8([0, 2, 5, 6, 7, 0, 0, 0]),
from_u32x8([1, 2, 5, 6, 7, 0, 0, 0]),
from_u32x8([0, 1, 2, 5, 6, 7, 0, 0]),
from_u32x8([3, 5, 6, 7, 0, 0, 0, 0]),
from_u32x8([0, 3, 5, 6, 7, 0, 0, 0]),
from_u32x8([1, 3, 5, 6, 7, 0, 0, 0]),
from_u32x8([0, 1, 3, 5, 6, 7, 0, 0]),
from_u32x8([2, 3, 5, 6, 7, 0, 0, 0]),
from_u32x8([0, 2, 3, 5, 6, 7, 0, 0]),
from_u32x8([1, 2, 3, 5, 6, 7, 0, 0]),
from_u32x8([0, 1, 2, 3, 5, 6, 7, 0]),
from_u32x8([4, 5, 6, 7, 0, 0, 0, 0]),
from_u32x8([0, 4, 5, 6, 7, 0, 0, 0]),
from_u32x8([1, 4, 5, 6, 7, 0, 0, 0]),
from_u32x8([0, 1, 4, 5, 6, 7, 0, 0]),
from_u32x8([2, 4, 5, 6, 7, 0, 0, 0]),
from_u32x8([0, 2, 4, 5, 6, 7, 0, 0]),
from_u32x8([1, 2, 4, 5, 6, 7, 0, 0]),
from_u32x8([0, 1, 2, 4, 5, 6, 7, 0]),
from_u32x8([3, 4, 5, 6, 7, 0, 0, 0]),
from_u32x8([0, 3, 4, 5, 6, 7, 0, 0]),
from_u32x8([1, 3, 4, 5, 6, 7, 0, 0]),
from_u32x8([0, 1, 3, 4, 5, 6, 7, 0]),
from_u32x8([2, 3, 4, 5, 6, 7, 0, 0]),
from_u32x8([0, 2, 3, 4, 5, 6, 7, 0]),
from_u32x8([1, 2, 3, 4, 5, 6, 7, 0]),
from_u32x8([0, 1, 2, 3, 4, 5, 6, 7]),
];

View File

@@ -0,0 +1,165 @@
use std::ops::RangeInclusive;
#[cfg(any(target_arch = "x86_64"))]
mod avx2;
mod scalar;
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
#[repr(u8)]
enum FilterImplPerInstructionSet {
#[cfg(target_arch = "x86_64")]
AVX2 = 0u8,
Scalar = 1u8,
}
impl FilterImplPerInstructionSet {
#[inline]
pub fn is_available(&self) -> bool {
match *self {
#[cfg(target_arch = "x86_64")]
FilterImplPerInstructionSet::AVX2 => is_x86_feature_detected!("avx2"),
FilterImplPerInstructionSet::Scalar => true,
}
}
}
// List of available implementation in preferred order.
#[cfg(target_arch = "x86_64")]
const IMPLS: [FilterImplPerInstructionSet; 2] = [
FilterImplPerInstructionSet::AVX2,
FilterImplPerInstructionSet::Scalar,
];
#[cfg(not(target_arch = "x86_64"))]
const IMPLS: [FilterImplPerInstructionSet; 1] = [FilterImplPerInstructionSet::Scalar];
impl FilterImplPerInstructionSet {
#[allow(unused_variables)]
#[inline]
fn from(code: u8) -> FilterImplPerInstructionSet {
#[cfg(target_arch = "x86_64")]
if code == FilterImplPerInstructionSet::AVX2 as u8 {
return FilterImplPerInstructionSet::AVX2;
}
FilterImplPerInstructionSet::Scalar
}
#[inline]
fn filter_vec_in_place(self, range: RangeInclusive<u32>, offset: u32, output: &mut Vec<u32>) {
match self {
#[cfg(target_arch = "x86_64")]
FilterImplPerInstructionSet::AVX2 => avx2::filter_vec_in_place(range, offset, output),
FilterImplPerInstructionSet::Scalar => {
scalar::filter_vec_in_place(range, offset, output)
}
}
}
}
#[inline]
fn get_best_available_instruction_set() -> FilterImplPerInstructionSet {
use std::sync::atomic::{AtomicU8, Ordering};
static INSTRUCTION_SET_BYTE: AtomicU8 = AtomicU8::new(u8::MAX);
let instruction_set_byte: u8 = INSTRUCTION_SET_BYTE.load(Ordering::Relaxed);
if instruction_set_byte == u8::MAX {
// Let's initialize the instruction set and cache it.
let instruction_set = IMPLS
.into_iter()
.find(FilterImplPerInstructionSet::is_available)
.unwrap();
INSTRUCTION_SET_BYTE.store(instruction_set as u8, Ordering::Relaxed);
return instruction_set;
}
FilterImplPerInstructionSet::from(instruction_set_byte)
}
pub fn filter_vec_in_place(range: RangeInclusive<u32>, offset: u32, output: &mut Vec<u32>) {
get_best_available_instruction_set().filter_vec_in_place(range, offset, output)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_best_available_instruction_set() {
// This does not test much unfortunately.
// We just make sure the function returns without crashing and returns the same result.
let instruction_set = get_best_available_instruction_set();
assert_eq!(get_best_available_instruction_set(), instruction_set);
}
#[cfg(target_arch = "x86_64")]
#[test]
fn test_instruction_set_to_code_from_code() {
for instruction_set in [
FilterImplPerInstructionSet::AVX2,
FilterImplPerInstructionSet::Scalar,
] {
let code = instruction_set as u8;
assert_eq!(instruction_set, FilterImplPerInstructionSet::from(code));
}
}
fn test_filter_impl_empty_aux(filter_impl: FilterImplPerInstructionSet) {
let mut output = vec![];
filter_impl.filter_vec_in_place(0..=u32::MAX, 0, &mut output);
assert_eq!(&output, &[]);
}
fn test_filter_impl_simple_aux(filter_impl: FilterImplPerInstructionSet) {
let mut output = vec![3, 2, 1, 5, 11, 2, 5, 10, 2];
filter_impl.filter_vec_in_place(3..=10, 0, &mut output);
assert_eq!(&output, &[0, 3, 6, 7]);
}
fn test_filter_impl_simple_aux_shifted(filter_impl: FilterImplPerInstructionSet) {
let mut output = vec![3, 2, 1, 5, 11, 2, 5, 10, 2];
filter_impl.filter_vec_in_place(3..=10, 10, &mut output);
assert_eq!(&output, &[10, 13, 16, 17]);
}
fn test_filter_impl_simple_outside_i32_range(filter_impl: FilterImplPerInstructionSet) {
let mut output = vec![u32::MAX, i32::MAX as u32 + 1, 0, 1, 3, 1, 1, 1, 1];
filter_impl.filter_vec_in_place(1..=i32::MAX as u32 + 1u32, 0, &mut output);
assert_eq!(&output, &[1, 3, 4, 5, 6, 7, 8]);
}
fn test_filter_impl_test_suite(filter_impl: FilterImplPerInstructionSet) {
test_filter_impl_empty_aux(filter_impl);
test_filter_impl_simple_aux(filter_impl);
test_filter_impl_simple_aux_shifted(filter_impl);
test_filter_impl_simple_outside_i32_range(filter_impl);
}
#[test]
#[cfg(target_arch = "x86_64")]
fn test_filter_implementation_avx2() {
if FilterImplPerInstructionSet::AVX2.is_available() {
test_filter_impl_test_suite(FilterImplPerInstructionSet::AVX2);
}
}
#[test]
fn test_filter_implementation_scalar() {
test_filter_impl_test_suite(FilterImplPerInstructionSet::Scalar);
}
#[cfg(target_arch = "x86_64")]
proptest::proptest! {
#[test]
fn test_filter_compare_scalar_and_avx2_impl_proptest(
start in proptest::prelude::any::<u32>(),
end in proptest::prelude::any::<u32>(),
offset in 0u32..2u32,
mut vals in proptest::collection::vec(0..u32::MAX, 0..30)) {
if FilterImplPerInstructionSet::AVX2.is_available() {
let mut vals_clone = vals.clone();
FilterImplPerInstructionSet::AVX2.filter_vec_in_place(start..=end, offset, &mut vals);
FilterImplPerInstructionSet::Scalar.filter_vec_in_place(start..=end, offset, &mut vals_clone);
assert_eq!(&vals, &vals_clone);
}
}
}
}

View File

@@ -0,0 +1,13 @@
use std::ops::RangeInclusive;
pub fn filter_vec_in_place(range: RangeInclusive<u32>, offset: u32, output: &mut Vec<u32>) {
// We restrict the accepted boundary, because unsigned integers & SIMD don't
// play well.
let mut output_cursor = 0;
for i in 0..output.len() {
let val = output[i];
output[output_cursor] = offset + i as u32;
output_cursor += if range.contains(&val) { 1 } else { 0 };
}
output.truncate(output_cursor);
}

View File

@@ -1,5 +1,6 @@
mod bitpacker; mod bitpacker;
mod blocked_bitpacker; mod blocked_bitpacker;
mod filter_vec;
use std::cmp::Ordering; use std::cmp::Ordering;

View File

@@ -0,0 +1,36 @@
use crate::{Column, DocId, RowId};
#[derive(Debug, Default, Clone)]
pub struct ColumnBlockAccessor<T> {
val_cache: Vec<T>,
docid_cache: Vec<DocId>,
row_id_cache: Vec<RowId>,
}
impl<T: PartialOrd + Copy + std::fmt::Debug + Send + Sync + 'static + Default>
ColumnBlockAccessor<T>
{
#[inline]
pub fn fetch_block(&mut self, docs: &[u32], accessor: &Column<T>) {
self.docid_cache.clear();
self.row_id_cache.clear();
accessor.row_ids_for_docs(docs, &mut self.docid_cache, &mut self.row_id_cache);
self.val_cache.resize(self.row_id_cache.len(), T::default());
accessor
.values
.get_vals(&self.row_id_cache, &mut self.val_cache);
}
#[inline]
pub fn iter_vals(&self) -> impl Iterator<Item = T> + '_ {
self.val_cache.iter().cloned()
}
#[inline]
pub fn iter_docid_vals(&self) -> impl Iterator<Item = (DocId, T)> + '_ {
self.docid_cache
.iter()
.cloned()
.zip(self.val_cache.iter().cloned())
}
}

View File

@@ -1,6 +1,6 @@
use std::io;
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
use std::{fmt, io};
use sstable::{Dictionary, VoidSSTable}; use sstable::{Dictionary, VoidSSTable};
@@ -21,6 +21,14 @@ pub struct BytesColumn {
pub(crate) term_ord_column: Column<u64>, pub(crate) term_ord_column: Column<u64>,
} }
impl fmt::Debug for BytesColumn {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BytesColumn")
.field("term_ord_column", &self.term_ord_column)
.finish()
}
}
impl BytesColumn { impl BytesColumn {
/// Fills the given `output` buffer with the term associated to the ordinal `ord`. /// Fills the given `output` buffer with the term associated to the ordinal `ord`.
/// ///
@@ -56,6 +64,12 @@ impl BytesColumn {
#[derive(Clone)] #[derive(Clone)]
pub struct StrColumn(BytesColumn); pub struct StrColumn(BytesColumn);
impl fmt::Debug for StrColumn {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.term_ord_column)
}
}
impl From<StrColumn> for BytesColumn { impl From<StrColumn> for BytesColumn {
fn from(str_column: StrColumn) -> BytesColumn { fn from(str_column: StrColumn) -> BytesColumn {
str_column.0 str_column.0

View File

@@ -1,7 +1,7 @@
mod dictionary_encoded; mod dictionary_encoded;
mod serialize; mod serialize;
use std::fmt::Debug; use std::fmt::{self, Debug};
use std::io::Write; use std::io::Write;
use std::ops::{Deref, Range, RangeInclusive}; use std::ops::{Deref, Range, RangeInclusive};
use std::sync::Arc; use std::sync::Arc;
@@ -16,14 +16,33 @@ pub use serialize::{
use crate::column_index::ColumnIndex; use crate::column_index::ColumnIndex;
use crate::column_values::monotonic_mapping::StrictlyMonotonicMappingToInternal; use crate::column_values::monotonic_mapping::StrictlyMonotonicMappingToInternal;
use crate::column_values::{monotonic_map_column, ColumnValues}; use crate::column_values::{monotonic_map_column, ColumnValues};
use crate::{Cardinality, MonotonicallyMappableToU64, RowId}; use crate::{Cardinality, DocId, EmptyColumnValues, MonotonicallyMappableToU64, RowId};
#[derive(Clone)] #[derive(Clone)]
pub struct Column<T = u64> { pub struct Column<T = u64> {
pub idx: ColumnIndex, pub index: ColumnIndex,
pub values: Arc<dyn ColumnValues<T>>, pub values: Arc<dyn ColumnValues<T>>,
} }
impl<T: Debug + PartialOrd + Send + Sync + Copy + 'static> Debug for Column<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let num_docs = self.num_docs();
let entries = (0..num_docs)
.map(|i| (i, self.values_for_doc(i).collect::<Vec<_>>()))
.filter(|(_, vals)| !vals.is_empty());
f.debug_map().entries(entries).finish()
}
}
impl<T: PartialOrd + Default> Column<T> {
pub fn build_empty_column(num_docs: u32) -> Column<T> {
Column {
index: ColumnIndex::Empty { num_docs },
values: Arc::new(EmptyColumnValues),
}
}
}
impl<T: MonotonicallyMappableToU64> Column<T> { impl<T: MonotonicallyMappableToU64> Column<T> {
pub fn to_u64_monotonic(self) -> Column<u64> { pub fn to_u64_monotonic(self) -> Column<u64> {
let values = Arc::new(monotonic_map_column( let values = Arc::new(monotonic_map_column(
@@ -31,7 +50,7 @@ impl<T: MonotonicallyMappableToU64> Column<T> {
StrictlyMonotonicMappingToInternal::<T>::new(), StrictlyMonotonicMappingToInternal::<T>::new(),
)); ));
Column { Column {
idx: self.idx, index: self.index,
values, values,
} }
} }
@@ -40,11 +59,11 @@ impl<T: MonotonicallyMappableToU64> Column<T> {
impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> { impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
#[inline] #[inline]
pub fn get_cardinality(&self) -> Cardinality { pub fn get_cardinality(&self) -> Cardinality {
self.idx.get_cardinality() self.index.get_cardinality()
} }
pub fn num_docs(&self) -> RowId { pub fn num_docs(&self) -> RowId {
match &self.idx { match &self.index {
ColumnIndex::Empty { num_docs } => *num_docs, ColumnIndex::Empty { num_docs } => *num_docs,
ColumnIndex::Full => self.values.num_vals(), ColumnIndex::Full => self.values.num_vals(),
ColumnIndex::Optional(optional_index) => optional_index.num_docs(), ColumnIndex::Optional(optional_index) => optional_index.num_docs(),
@@ -68,8 +87,25 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
self.values_for_doc(row_id).next() self.values_for_doc(row_id).next()
} }
pub fn values_for_doc(&self, row_id: RowId) -> impl Iterator<Item = T> + '_ { /// Translates a block of docis to row_ids.
self.value_row_ids(row_id) ///
/// returns the row_ids and the matching docids on the same index
/// e.g.
/// DocId In: [0, 5, 6]
/// DocId Out: [0, 0, 6, 6]
/// RowId Out: [0, 1, 2, 3]
#[inline]
pub fn row_ids_for_docs(
&self,
doc_ids: &[DocId],
doc_ids_out: &mut Vec<DocId>,
row_ids: &mut Vec<RowId>,
) {
self.index.docids_to_rowids(doc_ids, doc_ids_out, row_ids)
}
pub fn values_for_doc(&self, doc_id: DocId) -> impl Iterator<Item = T> + '_ {
self.value_row_ids(doc_id)
.map(|value_row_id: RowId| self.values.get_val(value_row_id)) .map(|value_row_id: RowId| self.values.get_val(value_row_id))
} }
@@ -82,13 +118,15 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
doc_ids: &mut Vec<u32>, doc_ids: &mut Vec<u32>,
) { ) {
// convert passed docid range to row id range // convert passed docid range to row id range
let rowid_range = self.idx.docid_range_to_rowids(selected_docid_range.clone()); let rowid_range = self
.index
.docid_range_to_rowids(selected_docid_range.clone());
// Load rows // Load rows
self.values self.values
.get_row_ids_for_value_range(value_range, rowid_range, doc_ids); .get_row_ids_for_value_range(value_range, rowid_range, doc_ids);
// Convert rows to docids // Convert rows to docids
self.idx self.index
.select_batch_in_place(selected_docid_range.start, doc_ids); .select_batch_in_place(selected_docid_range.start, doc_ids);
} }
@@ -113,7 +151,7 @@ impl<T> Deref for Column<T> {
type Target = ColumnIndex; type Target = ColumnIndex;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.idx &self.index
} }
} }
@@ -151,7 +189,7 @@ impl<T: PartialOrd + Debug + Send + Sync + Copy + 'static> ColumnValues<T>
} }
fn num_vals(&self) -> u32 { fn num_vals(&self) -> u32 {
match &self.column.idx { match &self.column.index {
ColumnIndex::Empty { .. } => 0u32, ColumnIndex::Empty { .. } => 0u32,
ColumnIndex::Full => self.column.values.num_vals(), ColumnIndex::Full => self.column.values.num_vals(),
ColumnIndex::Optional(optional_idx) => optional_idx.num_docs(), ColumnIndex::Optional(optional_idx) => optional_idx.num_docs(),

View File

@@ -52,7 +52,7 @@ pub fn open_column_u64<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::
let column_index = crate::column_index::open_column_index(column_index_data)?; let column_index = crate::column_index::open_column_index(column_index_data)?;
let column_values = load_u64_based_column_values(column_values_data)?; let column_values = load_u64_based_column_values(column_values_data)?;
Ok(Column { Ok(Column {
idx: column_index, index: column_index,
values: column_values, values: column_values,
}) })
} }
@@ -71,7 +71,7 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
let column_index = crate::column_index::open_column_index(column_index_data)?; let column_index = crate::column_index::open_column_index(column_index_data)?;
let column_values = crate::column_values::open_u128_mapped(column_values_data)?; let column_values = crate::column_values::open_u128_mapped(column_values_data)?;
Ok(Column { Ok(Column {
idx: column_index, index: column_index,
values: column_values, values: column_values,
}) })
} }

View File

@@ -8,17 +8,16 @@ use crate::column_index::SerializableColumnIndex;
use crate::{Cardinality, ColumnIndex, MergeRowOrder}; use crate::{Cardinality, ColumnIndex, MergeRowOrder};
// For simplification, we never have cardinality go down due to deletes. // For simplification, we never have cardinality go down due to deletes.
fn detect_cardinality(columns: &[Option<ColumnIndex>]) -> Cardinality { fn detect_cardinality(columns: &[ColumnIndex]) -> Cardinality {
columns columns
.iter() .iter()
.flatten()
.map(ColumnIndex::get_cardinality) .map(ColumnIndex::get_cardinality)
.max() .max()
.unwrap_or(Cardinality::Full) .unwrap_or(Cardinality::Full)
} }
pub fn merge_column_index<'a>( pub fn merge_column_index<'a>(
columns: &'a [Option<ColumnIndex>], columns: &'a [ColumnIndex],
merge_row_order: &'a MergeRowOrder, merge_row_order: &'a MergeRowOrder,
) -> SerializableColumnIndex<'a> { ) -> SerializableColumnIndex<'a> {
// For simplification, we do not try to detect whether the cardinality could be // For simplification, we do not try to detect whether the cardinality could be
@@ -53,34 +52,33 @@ mod tests {
let optional_index: ColumnIndex = OptionalIndex::for_test(1, &[]).into(); let optional_index: ColumnIndex = OptionalIndex::for_test(1, &[]).into();
let multivalued_index: ColumnIndex = MultiValueIndex::for_test(&[0, 1]).into(); let multivalued_index: ColumnIndex = MultiValueIndex::for_test(&[0, 1]).into();
assert_eq!( assert_eq!(
detect_cardinality(&[Some(optional_index.clone()), None]), detect_cardinality(&[optional_index.clone(), ColumnIndex::Empty { num_docs: 0 }]),
Cardinality::Optional Cardinality::Optional
); );
assert_eq!( assert_eq!(
detect_cardinality(&[Some(optional_index.clone()), Some(ColumnIndex::Full)]), detect_cardinality(&[optional_index.clone(), ColumnIndex::Full]),
Cardinality::Optional Cardinality::Optional
); );
assert_eq!(
detect_cardinality(&[Some(multivalued_index.clone()), None]),
Cardinality::Multivalued
);
assert_eq!( assert_eq!(
detect_cardinality(&[ detect_cardinality(&[
Some(multivalued_index.clone()), multivalued_index.clone(),
Some(optional_index.clone()) ColumnIndex::Empty { num_docs: 0 }
]), ]),
Cardinality::Multivalued Cardinality::Multivalued
); );
assert_eq!( assert_eq!(
detect_cardinality(&[Some(optional_index), Some(multivalued_index)]), detect_cardinality(&[multivalued_index.clone(), optional_index.clone()]),
Cardinality::Multivalued
);
assert_eq!(
detect_cardinality(&[optional_index, multivalued_index]),
Cardinality::Multivalued Cardinality::Multivalued
); );
} }
#[test] #[test]
fn test_merge_index_multivalued_sorted() { fn test_merge_index_multivalued_sorted() {
let column_indexes: Vec<Option<ColumnIndex>> = let column_indexes: Vec<ColumnIndex> = vec![MultiValueIndex::for_test(&[0, 2, 5]).into()];
vec![Some(MultiValueIndex::for_test(&[0, 2, 5]).into())];
let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test( let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test(
&[2], &[2],
vec![ vec![
@@ -104,10 +102,10 @@ mod tests {
#[test] #[test]
fn test_merge_index_multivalued_sorted_several_segment() { fn test_merge_index_multivalued_sorted_several_segment() {
let column_indexes: Vec<Option<ColumnIndex>> = vec![ let column_indexes: Vec<ColumnIndex> = vec![
Some(MultiValueIndex::for_test(&[0, 2, 5]).into()), MultiValueIndex::for_test(&[0, 2, 5]).into(),
None, ColumnIndex::Empty { num_docs: 0 },
Some(MultiValueIndex::for_test(&[0, 1, 4]).into()), MultiValueIndex::for_test(&[0, 1, 4]).into(),
]; ];
let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test( let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test(
&[2, 0, 2], &[2, 0, 2],

View File

@@ -5,7 +5,7 @@ use crate::iterable::Iterable;
use crate::{Cardinality, ColumnIndex, RowId, ShuffleMergeOrder}; use crate::{Cardinality, ColumnIndex, RowId, ShuffleMergeOrder};
pub fn merge_column_index_shuffled<'a>( pub fn merge_column_index_shuffled<'a>(
column_indexes: &'a [Option<ColumnIndex>], column_indexes: &'a [ColumnIndex],
cardinality_after_merge: Cardinality, cardinality_after_merge: Cardinality,
shuffle_merge_order: &'a ShuffleMergeOrder, shuffle_merge_order: &'a ShuffleMergeOrder,
) -> SerializableColumnIndex<'a> { ) -> SerializableColumnIndex<'a> {
@@ -33,41 +33,41 @@ pub fn merge_column_index_shuffled<'a>(
/// ///
/// In other words the column_indexes passed as argument may NOT be multivalued. /// In other words the column_indexes passed as argument may NOT be multivalued.
fn merge_column_index_shuffled_optional<'a>( fn merge_column_index_shuffled_optional<'a>(
column_indexes: &'a [Option<ColumnIndex>], column_indexes: &'a [ColumnIndex],
merge_order: &'a ShuffleMergeOrder, merge_order: &'a ShuffleMergeOrder,
) -> Box<dyn Iterable<RowId> + 'a> { ) -> Box<dyn Iterable<RowId> + 'a> {
Box::new(ShuffledOptionalIndex { Box::new(ShuffledIndex {
column_indexes, column_indexes,
merge_order, merge_order,
}) })
} }
struct ShuffledOptionalIndex<'a> { struct ShuffledIndex<'a> {
column_indexes: &'a [Option<ColumnIndex>], column_indexes: &'a [ColumnIndex],
merge_order: &'a ShuffleMergeOrder, merge_order: &'a ShuffleMergeOrder,
} }
impl<'a> Iterable<u32> for ShuffledOptionalIndex<'a> { impl<'a> Iterable<u32> for ShuffledIndex<'a> {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> { fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
Box::new(self.merge_order Box::new(
.iter_new_to_old_row_addrs() self.merge_order
.enumerate() .iter_new_to_old_row_addrs()
.filter_map(|(new_row_id, old_row_addr)| { .enumerate()
let Some(column_index) = &self.column_indexes[old_row_addr.segment_ord as usize] else { .filter_map(|(new_row_id, old_row_addr)| {
return None; let column_index = &self.column_indexes[old_row_addr.segment_ord as usize];
}; let row_id = new_row_id as u32;
let row_id = new_row_id as u32; if column_index.has_value(old_row_addr.row_id) {
if column_index.has_value(old_row_addr.row_id) { Some(row_id)
Some(row_id) } else {
} else { None
None }
} }),
})) )
} }
} }
fn merge_column_index_shuffled_multivalued<'a>( fn merge_column_index_shuffled_multivalued<'a>(
column_indexes: &'a [Option<ColumnIndex>], column_indexes: &'a [ColumnIndex],
merge_order: &'a ShuffleMergeOrder, merge_order: &'a ShuffleMergeOrder,
) -> Box<dyn Iterable<RowId> + 'a> { ) -> Box<dyn Iterable<RowId> + 'a> {
Box::new(ShuffledMultivaluedIndex { Box::new(ShuffledMultivaluedIndex {
@@ -77,19 +77,16 @@ fn merge_column_index_shuffled_multivalued<'a>(
} }
struct ShuffledMultivaluedIndex<'a> { struct ShuffledMultivaluedIndex<'a> {
column_indexes: &'a [Option<ColumnIndex>], column_indexes: &'a [ColumnIndex],
merge_order: &'a ShuffleMergeOrder, merge_order: &'a ShuffleMergeOrder,
} }
fn iter_num_values<'a>( fn iter_num_values<'a>(
column_indexes: &'a [Option<ColumnIndex>], column_indexes: &'a [ColumnIndex],
merge_order: &'a ShuffleMergeOrder, merge_order: &'a ShuffleMergeOrder,
) -> impl Iterator<Item = u32> + 'a { ) -> impl Iterator<Item = u32> + 'a {
merge_order.iter_new_to_old_row_addrs().map(|row_addr| { merge_order.iter_new_to_old_row_addrs().map(|row_addr| {
let Some(column_index) = &column_indexes[row_addr.segment_ord as usize] else { let column_index = &column_indexes[row_addr.segment_ord as usize];
// No values in the entire column. It surely means there are 0 values associated to this row.
return 0u32;
};
match column_index { match column_index {
ColumnIndex::Empty { .. } => 0u32, ColumnIndex::Empty { .. } => 0u32,
ColumnIndex::Full => 1, ColumnIndex::Full => 1,
@@ -143,7 +140,7 @@ mod tests {
#[test] #[test]
fn test_merge_column_index_optional_shuffle() { fn test_merge_column_index_optional_shuffle() {
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into(); let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
let column_indexes = vec![Some(optional_index), Some(ColumnIndex::Full)]; let column_indexes = vec![optional_index, ColumnIndex::Full];
let row_addrs = vec![ let row_addrs = vec![
RowAddr { RowAddr {
segment_ord: 0u32, segment_ord: 0u32,

View File

@@ -9,7 +9,7 @@ use crate::{Cardinality, ColumnIndex, RowId, StackMergeOrder};
/// ///
/// There are no sort nor deletes involved. /// There are no sort nor deletes involved.
pub fn merge_column_index_stacked<'a>( pub fn merge_column_index_stacked<'a>(
columns: &'a [Option<ColumnIndex>], columns: &'a [ColumnIndex],
cardinality_after_merge: Cardinality, cardinality_after_merge: Cardinality,
stack_merge_order: &'a StackMergeOrder, stack_merge_order: &'a StackMergeOrder,
) -> SerializableColumnIndex<'a> { ) -> SerializableColumnIndex<'a> {
@@ -33,7 +33,7 @@ pub fn merge_column_index_stacked<'a>(
} }
struct StackedOptionalIndex<'a> { struct StackedOptionalIndex<'a> {
columns: &'a [Option<ColumnIndex>], columns: &'a [ColumnIndex],
stack_merge_order: &'a StackMergeOrder, stack_merge_order: &'a StackMergeOrder,
} }
@@ -46,16 +46,16 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
.flat_map(|(columnar_id, column_index_opt)| { .flat_map(|(columnar_id, column_index_opt)| {
let columnar_row_range = self.stack_merge_order.columnar_range(columnar_id); let columnar_row_range = self.stack_merge_order.columnar_range(columnar_id);
let rows_it: Box<dyn Iterator<Item = RowId>> = match column_index_opt { let rows_it: Box<dyn Iterator<Item = RowId>> = match column_index_opt {
Some(ColumnIndex::Full) => Box::new(columnar_row_range), ColumnIndex::Full => Box::new(columnar_row_range),
Some(ColumnIndex::Optional(optional_index)) => Box::new( ColumnIndex::Optional(optional_index) => Box::new(
optional_index optional_index
.iter_rows() .iter_rows()
.map(move |row_id: RowId| columnar_row_range.start + row_id), .map(move |row_id: RowId| columnar_row_range.start + row_id),
), ),
Some(ColumnIndex::Multivalued(_)) => { ColumnIndex::Multivalued(_) => {
panic!("No multivalued index is allowed when stacking column index"); panic!("No multivalued index is allowed when stacking column index");
} }
None | Some(ColumnIndex::Empty { .. }) => Box::new(std::iter::empty()), ColumnIndex::Empty { .. } => Box::new(std::iter::empty()),
}; };
rows_it rows_it
}), }),
@@ -65,20 +65,18 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
struct StackedMultivaluedIndex<'a> { struct StackedMultivaluedIndex<'a> {
columns: &'a [Option<ColumnIndex>], columns: &'a [ColumnIndex],
stack_merge_order: &'a StackMergeOrder, stack_merge_order: &'a StackMergeOrder,
} }
fn convert_column_opt_to_multivalued_index<'a>( fn convert_column_opt_to_multivalued_index<'a>(
column_index_opt: Option<&'a ColumnIndex>, column_index_opt: &'a ColumnIndex,
num_rows: RowId, num_rows: RowId,
) -> Box<dyn Iterator<Item = RowId> + 'a> { ) -> Box<dyn Iterator<Item = RowId> + 'a> {
match column_index_opt { match column_index_opt {
None | Some(ColumnIndex::Empty { .. }) => { ColumnIndex::Empty { .. } => Box::new(iter::repeat(0u32).take(num_rows as usize + 1)),
Box::new(iter::repeat(0u32).take(num_rows as usize + 1)) ColumnIndex::Full => Box::new(0..num_rows + 1),
} ColumnIndex::Optional(optional_index) => {
Some(ColumnIndex::Full) => Box::new(0..num_rows + 1),
Some(ColumnIndex::Optional(optional_index)) => {
Box::new( Box::new(
(0..num_rows) (0..num_rows)
// TODO optimize // TODO optimize
@@ -86,9 +84,7 @@ fn convert_column_opt_to_multivalued_index<'a>(
.chain(std::iter::once(optional_index.num_non_nulls())), .chain(std::iter::once(optional_index.num_non_nulls())),
) )
} }
Some(ColumnIndex::Multivalued(multivalued_index)) => { ColumnIndex::Multivalued(multivalued_index) => multivalued_index.start_index_column.iter(),
multivalued_index.start_index_column.iter()
}
} }
} }
@@ -97,7 +93,6 @@ impl<'a> Iterable<RowId> for StackedMultivaluedIndex<'a> {
let multivalued_indexes = let multivalued_indexes =
self.columns self.columns
.iter() .iter()
.map(Option::as_ref)
.enumerate() .enumerate()
.map(|(columnar_id, column_opt)| { .map(|(columnar_id, column_opt)| {
let num_rows = let num_rows =

View File

@@ -12,7 +12,7 @@ pub use serialize::{open_column_index, serialize_column_index, SerializableColum
use crate::column_index::multivalued_index::MultiValueIndex; use crate::column_index::multivalued_index::MultiValueIndex;
use crate::{Cardinality, DocId, RowId}; use crate::{Cardinality, DocId, RowId};
#[derive(Clone)] #[derive(Clone, Debug)]
pub enum ColumnIndex { pub enum ColumnIndex {
Empty { Empty {
num_docs: u32, num_docs: u32,
@@ -37,11 +37,15 @@ impl From<MultiValueIndex> for ColumnIndex {
} }
impl ColumnIndex { impl ColumnIndex {
// Returns the cardinality of the column index.
//
// By convention, if the column contains no docs, we consider that it is
// full.
#[inline] #[inline]
pub fn get_cardinality(&self) -> Cardinality { pub fn get_cardinality(&self) -> Cardinality {
match self { match self {
ColumnIndex::Empty { num_docs: 0 } | ColumnIndex::Full => Cardinality::Full,
ColumnIndex::Empty { .. } => Cardinality::Optional, ColumnIndex::Empty { .. } => Cardinality::Optional,
ColumnIndex::Full => Cardinality::Full,
ColumnIndex::Optional(_) => Cardinality::Optional, ColumnIndex::Optional(_) => Cardinality::Optional,
ColumnIndex::Multivalued(_) => Cardinality::Multivalued, ColumnIndex::Multivalued(_) => Cardinality::Multivalued,
} }
@@ -74,6 +78,45 @@ impl ColumnIndex {
} }
} }
/// Translates a block of docis to row_ids.
///
/// returns the row_ids and the matching docids on the same index
/// e.g.
/// DocId In: [0, 5, 6]
/// DocId Out: [0, 0, 6, 6]
/// RowId Out: [0, 1, 2, 3]
#[inline]
pub fn docids_to_rowids(
&self,
doc_ids: &[DocId],
doc_ids_out: &mut Vec<DocId>,
row_ids: &mut Vec<RowId>,
) {
match self {
ColumnIndex::Empty { .. } => {}
ColumnIndex::Full => {
doc_ids_out.extend_from_slice(doc_ids);
row_ids.extend_from_slice(doc_ids);
}
ColumnIndex::Optional(optional_index) => {
for doc_id in doc_ids {
if let Some(row_id) = optional_index.rank_if_exists(*doc_id) {
doc_ids_out.push(*doc_id);
row_ids.push(row_id);
}
}
}
ColumnIndex::Multivalued(multivalued_index) => {
for doc_id in doc_ids {
for row_id in multivalued_index.range(*doc_id) {
doc_ids_out.push(*doc_id);
row_ids.push(row_id);
}
}
}
}
}
pub fn docid_range_to_rowids(&self, doc_id: Range<DocId>) -> Range<RowId> { pub fn docid_range_to_rowids(&self, doc_id: Range<DocId>) -> Range<RowId> {
match self { match self {
ColumnIndex::Empty { .. } => 0..0, ColumnIndex::Empty { .. } => 0..0,
@@ -113,3 +156,21 @@ impl ColumnIndex {
} }
} }
} }
#[cfg(test)]
mod tests {
use crate::{Cardinality, ColumnIndex};
#[test]
fn test_column_index_get_cardinality() {
assert_eq!(
ColumnIndex::Empty { num_docs: 0 }.get_cardinality(),
Cardinality::Full
);
assert_eq!(ColumnIndex::Full.get_cardinality(), Cardinality::Full);
assert_eq!(
ColumnIndex::Empty { num_docs: 1 }.get_cardinality(),
Cardinality::Optional
);
}
}

View File

@@ -35,6 +35,14 @@ pub struct MultiValueIndex {
pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>, pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>,
} }
impl std::fmt::Debug for MultiValueIndex {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("MultiValuedIndex")
.field("num_rows", &self.start_index_column.num_vals())
.finish_non_exhaustive()
}
}
impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex { impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex {
fn from(start_index_column: Arc<dyn ColumnValues<RowId>>) -> Self { fn from(start_index_column: Arc<dyn ColumnValues<RowId>>) -> Self {
MultiValueIndex { start_index_column } MultiValueIndex { start_index_column }
@@ -106,11 +114,8 @@ impl MultiValueIndex {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::ops::Range; use std::ops::Range;
use std::sync::Arc;
use super::MultiValueIndex; use super::MultiValueIndex;
use crate::column_values::IterColumn;
use crate::{ColumnValues, RowId};
fn index_to_pos_helper( fn index_to_pos_helper(
index: &MultiValueIndex, index: &MultiValueIndex,
@@ -124,9 +129,7 @@ mod tests {
#[test] #[test]
fn test_positions_to_docid() { fn test_positions_to_docid() {
let offsets: Vec<RowId> = vec![0, 10, 12, 15, 22, 23]; // docid values are [0..10, 10..12, 12..15, etc.] let index = MultiValueIndex::for_test(&[0, 10, 12, 15, 22, 23]);
let column: Arc<dyn ColumnValues<RowId>> = Arc::new(IterColumn::from(offsets.into_iter()));
let index = MultiValueIndex::from(column);
assert_eq!(index.num_docs(), 5); assert_eq!(index.num_docs(), 5);
let positions = &[10u32, 11, 15, 20, 21, 22]; let positions = &[10u32, 11, 15, 20, 21, 22];
assert_eq!(index_to_pos_helper(&index, 0..5, positions), vec![1, 3, 4]); assert_eq!(index_to_pos_helper(&index, 0..5, positions), vec![1, 3, 4]);

View File

@@ -88,6 +88,15 @@ pub struct OptionalIndex {
block_metas: Arc<[BlockMeta]>, block_metas: Arc<[BlockMeta]>,
} }
impl std::fmt::Debug for OptionalIndex {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("OptionalIndex")
.field("num_rows", &self.num_rows)
.field("num_non_null_rows", &self.num_non_null_rows)
.finish_non_exhaustive()
}
}
/// Splits a value address into lower and upper 16bits. /// Splits a value address into lower and upper 16bits.
/// The lower 16 bits are the value in the block /// The lower 16 bits are the value in the block
/// The upper 16 bits are the block index /// The upper 16 bits are the block index

View File

@@ -5,7 +5,7 @@ use crate::iterable::Iterable;
use crate::{ColumnIndex, ColumnValues, MergeRowOrder}; use crate::{ColumnIndex, ColumnValues, MergeRowOrder};
pub(crate) struct MergedColumnValues<'a, T> { pub(crate) struct MergedColumnValues<'a, T> {
pub(crate) column_indexes: &'a [Option<ColumnIndex>], pub(crate) column_indexes: &'a [ColumnIndex],
pub(crate) column_values: &'a [Option<Arc<dyn ColumnValues<T>>>], pub(crate) column_values: &'a [Option<Arc<dyn ColumnValues<T>>>],
pub(crate) merge_row_order: &'a MergeRowOrder, pub(crate) merge_row_order: &'a MergeRowOrder,
} }
@@ -23,8 +23,7 @@ impl<'a, T: Copy + PartialOrd + Debug> Iterable<T> for MergedColumnValues<'a, T>
shuffle_merge_order shuffle_merge_order
.iter_new_to_old_row_addrs() .iter_new_to_old_row_addrs()
.flat_map(|row_addr| { .flat_map(|row_addr| {
let column_index = let column_index = &self.column_indexes[row_addr.segment_ord as usize];
self.column_indexes[row_addr.segment_ord as usize].as_ref()?;
let column_values = let column_values =
self.column_values[row_addr.segment_ord as usize].as_ref()?; self.column_values[row_addr.segment_ord as usize].as_ref()?;
let value_range = column_index.value_row_ids(row_addr.row_id); let value_range = column_index.value_row_ids(row_addr.row_id);

View File

@@ -94,7 +94,6 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
/// Get the row ids of values which are in the provided value range. /// Get the row ids of values which are in the provided value range.
/// ///
/// Note that position == docid for single value fast fields /// Note that position == docid for single value fast fields
#[inline(always)]
fn get_row_ids_for_value_range( fn get_row_ids_for_value_range(
&self, &self,
value_range: RangeInclusive<T>, value_range: RangeInclusive<T>,
@@ -110,20 +109,26 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
} }
} }
/// Returns the minimum value for this fast field. /// Returns a lower bound for this column of values.
/// ///
/// This min_value may not be exact. /// All values are guaranteed to be higher than `.min_value()`
/// For instance, the min value does not take in account of possible /// but this value is not necessary the best boundary value.
/// deleted document. All values are however guaranteed to be higher than ///
/// `.min_value()`. /// We have
/// ∀i < self.num_vals(), self.get_val(i) >= self.min_value()
/// But we don't have necessarily
/// ∃i < self.num_vals(), self.get_val(i) == self.min_value()
fn min_value(&self) -> T; fn min_value(&self) -> T;
/// Returns the maximum value for this fast field. /// Returns an upper bound for this column of values.
/// ///
/// This max_value may not be exact. /// All values are guaranteed to be lower than `.max_value()`
/// For instance, the max value does not take in account of possible /// but this value is not necessary the best boundary value.
/// deleted document. All values are however guaranteed to be higher than ///
/// `.max_value()`. /// We have
/// ∀i < self.num_vals(), self.get_val(i) <= self.max_value()
/// But we don't have necessarily
/// ∃i < self.num_vals(), self.get_val(i) == self.max_value()
fn max_value(&self) -> T; fn max_value(&self) -> T;
/// The number of values in the column. /// The number of values in the column.
@@ -135,6 +140,27 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
} }
} }
/// Empty column of values.
pub struct EmptyColumnValues;
impl<T: PartialOrd + Default> ColumnValues<T> for EmptyColumnValues {
fn get_val(&self, _idx: u32) -> T {
panic!("Internal Error: Called get_val of empty column.")
}
fn min_value(&self) -> T {
T::default()
}
fn max_value(&self) -> T {
T::default()
}
fn num_vals(&self) -> u32 {
0
}
}
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> { impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
#[inline(always)] #[inline(always)]
fn get_val(&self, idx: u32) -> T { fn get_val(&self, idx: u32) -> T {
@@ -178,54 +204,5 @@ impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>>
} }
} }
/// Wraps an cloneable iterator into a `Column`.
pub struct IterColumn<T>(T);
impl<T> From<T> for IterColumn<T>
where T: Iterator + Clone + ExactSizeIterator
{
fn from(iter: T) -> Self {
IterColumn(iter)
}
}
impl<T> ColumnValues<T::Item> for IterColumn<T>
where
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
T::Item: PartialOrd + Debug,
{
fn get_val(&self, idx: u32) -> T::Item {
self.0.clone().nth(idx as usize).unwrap()
}
fn min_value(&self) -> T::Item {
self.0.clone().next().unwrap()
}
fn max_value(&self) -> T::Item {
self.0.clone().last().unwrap()
}
fn num_vals(&self) -> u32 {
self.0.len() as u32
}
fn iter(&self) -> Box<dyn Iterator<Item = T::Item> + '_> {
Box::new(self.0.clone())
}
}
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench; mod bench;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_range_as_col() {
let col = IterColumn::from(10..100);
assert_eq!(col.num_vals(), 90);
assert_eq!(col.max_value(), 99);
}
}

View File

@@ -10,7 +10,7 @@ use super::{CompactSpace, RangeMapping};
/// Put the blanks for the sorted values into a binary heap /// Put the blanks for the sorted values into a binary heap
fn get_blanks(values_sorted: &BTreeSet<u128>) -> BinaryHeap<BlankRange> { fn get_blanks(values_sorted: &BTreeSet<u128>) -> BinaryHeap<BlankRange> {
let mut blanks: BinaryHeap<BlankRange> = BinaryHeap::new(); let mut blanks: BinaryHeap<BlankRange> = BinaryHeap::new();
for (first, second) in values_sorted.iter().tuple_windows() { for (first, second) in values_sorted.iter().copied().tuple_windows() {
// Correctness Overflow: the values are deduped and sorted (BTreeSet property), that means // Correctness Overflow: the values are deduped and sorted (BTreeSet property), that means
// there's always space between two values. // there's always space between two values.
let blank_range = first + 1..=second - 1; let blank_range = first + 1..=second - 1;
@@ -65,12 +65,12 @@ pub fn get_compact_space(
return compact_space_builder.finish(); return compact_space_builder.finish();
} }
let mut blanks: BinaryHeap<BlankRange> = get_blanks(values_deduped_sorted);
// Replace after stabilization of https://github.com/rust-lang/rust/issues/62924
// We start by space that's limited to min_value..=max_value // We start by space that's limited to min_value..=max_value
let min_value = *values_deduped_sorted.iter().next().unwrap_or(&0); // Replace after stabilization of https://github.com/rust-lang/rust/issues/62924
let max_value = *values_deduped_sorted.iter().last().unwrap_or(&0); let min_value = values_deduped_sorted.iter().next().copied().unwrap_or(0);
let max_value = values_deduped_sorted.iter().last().copied().unwrap_or(0);
let mut blanks: BinaryHeap<BlankRange> = get_blanks(values_deduped_sorted);
// +1 for null, in case min and max covers the whole space, we are off by one. // +1 for null, in case min and max covers the whole space, we are off by one.
let mut amplitude_compact_space = (max_value - min_value).saturating_add(1); let mut amplitude_compact_space = (max_value - min_value).saturating_add(1);
@@ -84,6 +84,7 @@ pub fn get_compact_space(
let mut amplitude_bits: u8 = num_bits(amplitude_compact_space); let mut amplitude_bits: u8 = num_bits(amplitude_compact_space);
let mut blank_collector = BlankCollector::new(); let mut blank_collector = BlankCollector::new();
// We will stage blanks until they reduce the compact space by at least 1 bit and then flush // We will stage blanks until they reduce the compact space by at least 1 bit and then flush
// them if the metadata cost is lower than the total number of saved bits. // them if the metadata cost is lower than the total number of saved bits.
// Binary heap to process the gaps by their size // Binary heap to process the gaps by their size
@@ -93,6 +94,7 @@ pub fn get_compact_space(
let staged_spaces_sum: u128 = blank_collector.staged_blanks_sum(); let staged_spaces_sum: u128 = blank_collector.staged_blanks_sum();
let amplitude_new_compact_space = amplitude_compact_space - staged_spaces_sum; let amplitude_new_compact_space = amplitude_compact_space - staged_spaces_sum;
let amplitude_new_bits = num_bits(amplitude_new_compact_space); let amplitude_new_bits = num_bits(amplitude_new_compact_space);
if amplitude_bits == amplitude_new_bits { if amplitude_bits == amplitude_new_bits {
continue; continue;
} }
@@ -100,7 +102,16 @@ pub fn get_compact_space(
// TODO: Maybe calculate exact cost of blanks and run this more expensive computation only, // TODO: Maybe calculate exact cost of blanks and run this more expensive computation only,
// when amplitude_new_bits changes // when amplitude_new_bits changes
let cost = blank_collector.num_staged_blanks() * cost_per_blank; let cost = blank_collector.num_staged_blanks() * cost_per_blank;
if cost >= saved_bits {
// We want to end up with a compact space that fits into 32 bits.
// In order to deal with pathological cases, we force the algorithm to keep
// refining the compact space the amplitude bits is lower than 32.
//
// The worst case scenario happens for a large number of u128s regularly
// spread over the full u128 space.
//
// This change will force the algorithm to degenerate into dictionary encoding.
if amplitude_bits <= 32 && cost >= saved_bits {
// Continue here, since although we walk over the blanks by size, // Continue here, since although we walk over the blanks by size,
// we can potentially save a lot at the last bits, which are smaller blanks // we can potentially save a lot at the last bits, which are smaller blanks
// //
@@ -115,6 +126,8 @@ pub fn get_compact_space(
compact_space_builder.add_blanks(blank_collector.drain().map(|blank| blank.blank_range())); compact_space_builder.add_blanks(blank_collector.drain().map(|blank| blank.blank_range()));
} }
assert!(amplitude_bits <= 32);
// special case, when we don't collected any blanks because: // special case, when we don't collected any blanks because:
// * the data is empty (early exit) // * the data is empty (early exit)
// * the algorithm did decide it's not worth the cost, which can be the case for single values // * the algorithm did decide it's not worth the cost, which can be the case for single values
@@ -199,7 +212,7 @@ impl CompactSpaceBuilder {
covered_space.push(0..=0); // empty data case covered_space.push(0..=0); // empty data case
}; };
let mut compact_start: u64 = 1; // 0 is reserved for `null` let mut compact_start: u32 = 1; // 0 is reserved for `null`
let mut ranges_mapping: Vec<RangeMapping> = Vec::with_capacity(covered_space.len()); let mut ranges_mapping: Vec<RangeMapping> = Vec::with_capacity(covered_space.len());
for cov in covered_space { for cov in covered_space {
let range_mapping = super::RangeMapping { let range_mapping = super::RangeMapping {
@@ -218,6 +231,7 @@ impl CompactSpaceBuilder {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::column_values::u128_based::compact_space::COST_PER_BLANK_IN_BITS;
#[test] #[test]
fn test_binary_heap_pop_order() { fn test_binary_heap_pop_order() {
@@ -228,4 +242,11 @@ mod tests {
assert_eq!(blanks.pop().unwrap().blank_size(), 101); assert_eq!(blanks.pop().unwrap().blank_size(), 101);
assert_eq!(blanks.pop().unwrap().blank_size(), 11); assert_eq!(blanks.pop().unwrap().blank_size(), 11);
} }
#[test]
fn test_worst_case_scenario() {
let vals: BTreeSet<u128> = (0..8).map(|i| i * ((1u128 << 34) / 8)).collect();
let compact_space = get_compact_space(&vals, vals.len() as u32, COST_PER_BLANK_IN_BITS);
assert!(compact_space.amplitude_compact_space() < u32::MAX as u128);
}
} }

View File

@@ -42,15 +42,15 @@ pub struct CompactSpace {
#[derive(Debug, Clone, Eq, PartialEq)] #[derive(Debug, Clone, Eq, PartialEq)]
struct RangeMapping { struct RangeMapping {
value_range: RangeInclusive<u128>, value_range: RangeInclusive<u128>,
compact_start: u64, compact_start: u32,
} }
impl RangeMapping { impl RangeMapping {
fn range_length(&self) -> u64 { fn range_length(&self) -> u32 {
(self.value_range.end() - self.value_range.start()) as u64 + 1 (self.value_range.end() - self.value_range.start()) as u32 + 1
} }
// The last value of the compact space in this range // The last value of the compact space in this range
fn compact_end(&self) -> u64 { fn compact_end(&self) -> u32 {
self.compact_start + self.range_length() - 1 self.compact_start + self.range_length() - 1
} }
} }
@@ -81,7 +81,7 @@ impl BinarySerializable for CompactSpace {
let num_ranges = VInt::deserialize(reader)?.0; let num_ranges = VInt::deserialize(reader)?.0;
let mut ranges_mapping: Vec<RangeMapping> = vec![]; let mut ranges_mapping: Vec<RangeMapping> = vec![];
let mut value = 0u128; let mut value = 0u128;
let mut compact_start = 1u64; // 0 is reserved for `null` let mut compact_start = 1u32; // 0 is reserved for `null`
for _ in 0..num_ranges { for _ in 0..num_ranges {
let blank_delta_start = VIntU128::deserialize(reader)?.0; let blank_delta_start = VIntU128::deserialize(reader)?.0;
value += blank_delta_start; value += blank_delta_start;
@@ -122,10 +122,10 @@ impl CompactSpace {
/// Returns either Ok(the value in the compact space) or if it is outside the compact space the /// Returns either Ok(the value in the compact space) or if it is outside the compact space the
/// Err(position where it would be inserted) /// Err(position where it would be inserted)
fn u128_to_compact(&self, value: u128) -> Result<u64, usize> { fn u128_to_compact(&self, value: u128) -> Result<u32, usize> {
self.ranges_mapping self.ranges_mapping
.binary_search_by(|probe| { .binary_search_by(|probe| {
let value_range = &probe.value_range; let value_range: &RangeInclusive<u128> = &probe.value_range;
if value < *value_range.start() { if value < *value_range.start() {
Ordering::Greater Ordering::Greater
} else if value > *value_range.end() { } else if value > *value_range.end() {
@@ -136,13 +136,13 @@ impl CompactSpace {
}) })
.map(|pos| { .map(|pos| {
let range_mapping = &self.ranges_mapping[pos]; let range_mapping = &self.ranges_mapping[pos];
let pos_in_range = (value - range_mapping.value_range.start()) as u64; let pos_in_range: u32 = (value - range_mapping.value_range.start()) as u32;
range_mapping.compact_start + pos_in_range range_mapping.compact_start + pos_in_range
}) })
} }
/// Unpacks a value from compact space u64 to u128 space /// Unpacks a value from compact space u32 to u128 space
fn compact_to_u128(&self, compact: u64) -> u128 { fn compact_to_u128(&self, compact: u32) -> u128 {
let pos = self let pos = self
.ranges_mapping .ranges_mapping
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start) .binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
@@ -178,11 +178,15 @@ impl CompactSpaceCompressor {
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals. /// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
pub fn train_from(iter: impl Iterator<Item = u128>) -> Self { pub fn train_from(iter: impl Iterator<Item = u128>) -> Self {
let mut values_sorted = BTreeSet::new(); let mut values_sorted = BTreeSet::new();
// Total number of values, with their redundancy.
let mut total_num_values = 0u32; let mut total_num_values = 0u32;
for val in iter { for val in iter {
total_num_values += 1u32; total_num_values += 1u32;
values_sorted.insert(val); values_sorted.insert(val);
} }
let min_value = *values_sorted.iter().next().unwrap_or(&0);
let max_value = *values_sorted.iter().last().unwrap_or(&0);
let compact_space = let compact_space =
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS); get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
let amplitude_compact_space = compact_space.amplitude_compact_space(); let amplitude_compact_space = compact_space.amplitude_compact_space();
@@ -193,13 +197,12 @@ impl CompactSpaceCompressor {
); );
let num_bits = tantivy_bitpacker::compute_num_bits(amplitude_compact_space as u64); let num_bits = tantivy_bitpacker::compute_num_bits(amplitude_compact_space as u64);
let min_value = *values_sorted.iter().next().unwrap_or(&0);
let max_value = *values_sorted.iter().last().unwrap_or(&0);
assert_eq!( assert_eq!(
compact_space compact_space
.u128_to_compact(max_value) .u128_to_compact(max_value)
.expect("could not convert max value to compact space"), .expect("could not convert max value to compact space"),
amplitude_compact_space as u64 amplitude_compact_space as u32
); );
CompactSpaceCompressor { CompactSpaceCompressor {
params: IPCodecParams { params: IPCodecParams {
@@ -240,7 +243,7 @@ impl CompactSpaceCompressor {
"Could not convert value to compact_space. This is a bug.", "Could not convert value to compact_space. This is a bug.",
) )
})?; })?;
bitpacker.write(compact, self.params.num_bits, write)?; bitpacker.write(compact as u64, self.params.num_bits, write)?;
} }
bitpacker.close(write)?; bitpacker.close(write)?;
self.write_footer(write)?; self.write_footer(write)?;
@@ -314,48 +317,6 @@ impl ColumnValues<u128> for CompactSpaceDecompressor {
#[inline] #[inline]
fn get_row_ids_for_value_range( fn get_row_ids_for_value_range(
&self,
value_range: RangeInclusive<u128>,
positions_range: Range<u32>,
positions: &mut Vec<u32>,
) {
self.get_positions_for_value_range(value_range, positions_range, positions)
}
}
impl CompactSpaceDecompressor {
pub fn open(data: OwnedBytes) -> io::Result<CompactSpaceDecompressor> {
let (data_slice, footer_len_bytes) = data.split_at(data.len() - 4);
let footer_len = u32::deserialize(&mut &footer_len_bytes[..])?;
let data_footer = &data_slice[data_slice.len() - footer_len as usize..];
let params = IPCodecParams::deserialize(&mut &data_footer[..])?;
let decompressor = CompactSpaceDecompressor { data, params };
Ok(decompressor)
}
/// Converting to compact space for the decompressor is more complex, since we may get values
/// which are outside the compact space. e.g. if we map
/// 1000 => 5
/// 2000 => 6
///
/// and we want a mapping for 1005, there is no equivalent compact space. We instead return an
/// error with the index of the next range.
fn u128_to_compact(&self, value: u128) -> Result<u64, usize> {
self.params.compact_space.u128_to_compact(value)
}
fn compact_to_u128(&self, compact: u64) -> u128 {
self.params.compact_space.compact_to_u128(compact)
}
/// Comparing on compact space: Random dataset 0,24 (50% random hit) - 1.05 GElements/s
/// Comparing on compact space: Real dataset 1.08 GElements/s
///
/// Comparing on original space: Real dataset .06 GElements/s (not completely optimized)
#[inline]
pub fn get_positions_for_value_range(
&self, &self,
value_range: RangeInclusive<u128>, value_range: RangeInclusive<u128>,
position_range: Range<u32>, position_range: Range<u32>,
@@ -395,44 +356,42 @@ impl CompactSpaceDecompressor {
range_mapping.compact_end() range_mapping.compact_end()
}); });
let range = compact_from..=compact_to; let value_range = compact_from..=compact_to;
self.get_positions_for_compact_value_range(value_range, position_range, positions);
}
}
let scan_num_docs = position_range.end - position_range.start; impl CompactSpaceDecompressor {
pub fn open(data: OwnedBytes) -> io::Result<CompactSpaceDecompressor> {
let (data_slice, footer_len_bytes) = data.split_at(data.len() - 4);
let footer_len = u32::deserialize(&mut &footer_len_bytes[..])?;
let step_size = 4; let data_footer = &data_slice[data_slice.len() - footer_len as usize..];
let cutoff = position_range.start + scan_num_docs - scan_num_docs % step_size; let params = IPCodecParams::deserialize(&mut &data_footer[..])?;
let decompressor = CompactSpaceDecompressor { data, params };
let mut push_if_in_range = |idx, val| { Ok(decompressor)
if range.contains(&val) { }
positions.push(idx);
}
};
let get_val = |idx| self.params.bit_unpacker.get(idx, &self.data);
// unrolled loop
for idx in (position_range.start..cutoff).step_by(step_size as usize) {
let idx1 = idx;
let idx2 = idx + 1;
let idx3 = idx + 2;
let idx4 = idx + 3;
let val1 = get_val(idx1);
let val2 = get_val(idx2);
let val3 = get_val(idx3);
let val4 = get_val(idx4);
push_if_in_range(idx1, val1);
push_if_in_range(idx2, val2);
push_if_in_range(idx3, val3);
push_if_in_range(idx4, val4);
}
// handle rest /// Converting to compact space for the decompressor is more complex, since we may get values
for idx in cutoff..position_range.end { /// which are outside the compact space. e.g. if we map
push_if_in_range(idx, get_val(idx)); /// 1000 => 5
} /// 2000 => 6
///
/// and we want a mapping for 1005, there is no equivalent compact space. We instead return an
/// error with the index of the next range.
fn u128_to_compact(&self, value: u128) -> Result<u32, usize> {
self.params.compact_space.u128_to_compact(value)
}
fn compact_to_u128(&self, compact: u32) -> u128 {
self.params.compact_space.compact_to_u128(compact)
} }
#[inline] #[inline]
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ { fn iter_compact(&self) -> impl Iterator<Item = u32> + '_ {
(0..self.params.num_vals).map(move |idx| self.params.bit_unpacker.get(idx, &self.data)) (0..self.params.num_vals)
.map(move |idx| self.params.bit_unpacker.get(idx, &self.data) as u32)
} }
#[inline] #[inline]
@@ -445,7 +404,7 @@ impl CompactSpaceDecompressor {
#[inline] #[inline]
pub fn get(&self, idx: u32) -> u128 { pub fn get(&self, idx: u32) -> u128 {
let compact = self.params.bit_unpacker.get(idx, &self.data); let compact = self.params.bit_unpacker.get(idx, &self.data) as u32;
self.compact_to_u128(compact) self.compact_to_u128(compact)
} }
@@ -456,6 +415,20 @@ impl CompactSpaceDecompressor {
pub fn max_value(&self) -> u128 { pub fn max_value(&self) -> u128 {
self.params.max_value self.params.max_value
} }
fn get_positions_for_compact_value_range(
&self,
value_range: RangeInclusive<u32>,
position_range: Range<u32>,
positions: &mut Vec<u32>,
) {
self.params.bit_unpacker.get_ids_for_value_range(
*value_range.start() as u64..=*value_range.end() as u64,
position_range,
&self.data,
positions,
);
}
} }
#[cfg(test)] #[cfg(test)]
@@ -469,12 +442,12 @@ mod tests {
#[test] #[test]
fn compact_space_test() { fn compact_space_test() {
let ips = &[ let ips: BTreeSet<u128> = [
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260, 2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
] ]
.into_iter() .into_iter()
.collect(); .collect();
let compact_space = get_compact_space(ips, ips.len() as u32, 11); let compact_space = get_compact_space(&ips, ips.len() as u32, 11);
let amplitude = compact_space.amplitude_compact_space(); let amplitude = compact_space.amplitude_compact_space();
assert_eq!(amplitude, 17); assert_eq!(amplitude, 17);
assert_eq!(1, compact_space.u128_to_compact(2).unwrap()); assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
@@ -497,8 +470,8 @@ mod tests {
); );
for ip in ips { for ip in ips {
let compact = compact_space.u128_to_compact(*ip).unwrap(); let compact = compact_space.u128_to_compact(ip).unwrap();
assert_eq!(compact_space.compact_to_u128(compact), *ip); assert_eq!(compact_space.compact_to_u128(compact), ip);
} }
} }
@@ -524,7 +497,7 @@ mod tests {
.map(|pos| pos as u32) .map(|pos| pos as u32)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let mut positions = Vec::new(); let mut positions = Vec::new();
decompressor.get_positions_for_value_range( decompressor.get_row_ids_for_value_range(
range, range,
0..decompressor.num_vals(), 0..decompressor.num_vals(),
&mut positions, &mut positions,
@@ -569,7 +542,7 @@ mod tests {
let val = *val; let val = *val;
let pos = pos as u32; let pos = pos as u32;
let mut positions = Vec::new(); let mut positions = Vec::new();
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions); decomp.get_row_ids_for_value_range(val..=val, pos..pos + 1, &mut positions);
assert_eq!(positions, vec![pos]); assert_eq!(positions, vec![pos]);
} }

View File

@@ -1,4 +1,6 @@
use std::io::{self, Write}; use std::io::{self, Write};
use std::num::NonZeroU64;
use std::ops::{Range, RangeInclusive};
use common::{BinarySerializable, OwnedBytes}; use common::{BinarySerializable, OwnedBytes};
use fastdivide::DividerU64; use fastdivide::DividerU64;
@@ -16,6 +18,46 @@ pub struct BitpackedReader {
stats: ColumnStats, stats: ColumnStats,
} }
#[inline(always)]
const fn div_ceil(n: u64, q: NonZeroU64) -> u64 {
// copied from unstable rust standard library.
let d = n / q.get();
let r = n % q.get();
if r > 0 {
d + 1
} else {
d
}
}
// The bitpacked codec applies a linear transformation `f` over data that are bitpacked.
// f is defined by:
// f: bitpacked -> stats.min_value + stats.gcd * bitpacked
//
// In order to run range queries, we invert the transformation.
// `transform_range_before_linear_transformation` returns the range of values
// [min_bipacked_value..max_bitpacked_value] such that
// f(bitpacked) ∈ [min_value, max_value] <=> bitpacked ∈ [min_bitpacked_value, max_bitpacked_value]
fn transform_range_before_linear_transformation(
stats: &ColumnStats,
range: RangeInclusive<u64>,
) -> Option<RangeInclusive<u64>> {
if range.is_empty() {
return None;
}
if stats.min_value > *range.end() {
return None;
}
if stats.max_value < *range.start() {
return None;
}
let shifted_range =
range.start().saturating_sub(stats.min_value)..=range.end().saturating_sub(stats.min_value);
let start_before_gcd_multiplication: u64 = div_ceil(*shifted_range.start(), stats.gcd);
let end_before_gcd_multiplication: u64 = *shifted_range.end() / stats.gcd;
Some(start_before_gcd_multiplication..=end_before_gcd_multiplication)
}
impl ColumnValues for BitpackedReader { impl ColumnValues for BitpackedReader {
#[inline(always)] #[inline(always)]
fn get_val(&self, doc: u32) -> u64 { fn get_val(&self, doc: u32) -> u64 {
@@ -34,6 +76,25 @@ impl ColumnValues for BitpackedReader {
fn num_vals(&self) -> RowId { fn num_vals(&self) -> RowId {
self.stats.num_rows self.stats.num_rows
} }
fn get_row_ids_for_value_range(
&self,
range: RangeInclusive<u64>,
doc_id_range: Range<u32>,
positions: &mut Vec<u32>,
) {
let Some(transformed_range) = transform_range_before_linear_transformation(&self.stats, range)
else {
positions.clear();
return;
};
self.bit_unpacker.get_ids_for_value_range(
transformed_range,
doc_id_range,
&self.data,
positions,
);
}
} }
fn num_bits(stats: &ColumnStats) -> u8 { fn num_bits(stats: &ColumnStats) -> u8 {

View File

@@ -1,3 +1,4 @@
use std::fmt;
use std::fmt::Debug; use std::fmt::Debug;
use std::net::Ipv6Addr; use std::net::Ipv6Addr;
@@ -21,6 +22,22 @@ pub enum ColumnType {
DateTime = 7u8, DateTime = 7u8,
} }
impl fmt::Display for ColumnType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let short_str = match self {
ColumnType::I64 => "i64",
ColumnType::U64 => "u64",
ColumnType::F64 => "f64",
ColumnType::Bytes => "bytes",
ColumnType::Str => "str",
ColumnType::Bool => "bool",
ColumnType::IpAddr => "ip",
ColumnType::DateTime => "datetime",
};
write!(f, "{}", short_str)
}
}
// The order needs to match _exactly_ the order in the enum // The order needs to match _exactly_ the order in the enum
const COLUMN_TYPES: [ColumnType; 8] = [ const COLUMN_TYPES: [ColumnType; 8] = [
ColumnType::I64, ColumnType::I64,

View File

@@ -28,7 +28,7 @@ use crate::{
/// ///
/// See also [README.md]. /// See also [README.md].
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
enum ColumnTypeCategory { pub(crate) enum ColumnTypeCategory {
Bool, Bool,
Str, Str,
Numerical, Numerical,
@@ -78,20 +78,23 @@ pub fn merge_columnar(
output: &mut impl io::Write, output: &mut impl io::Write,
) -> io::Result<()> { ) -> io::Result<()> {
let mut serializer = ColumnarSerializer::new(output); let mut serializer = ColumnarSerializer::new(output);
let num_rows_per_columnar = columnar_readers
.iter()
.map(|reader| reader.num_rows())
.collect::<Vec<u32>>();
let columns_to_merge = group_columns_for_merge(columnar_readers, required_columns)?; let columns_to_merge = group_columns_for_merge(columnar_readers, required_columns)?;
for ((column_name, column_type), columns) in columns_to_merge { for ((column_name, column_type), columns) in columns_to_merge {
let mut column_serializer = let mut column_serializer =
serializer.serialize_column(column_name.as_bytes(), column_type); serializer.serialize_column(column_name.as_bytes(), column_type);
merge_column( merge_column(
column_type, column_type,
&num_rows_per_columnar,
columns, columns,
&merge_row_order, &merge_row_order,
&mut column_serializer, &mut column_serializer,
)?; )?;
} }
serializer.finalize(merge_row_order.num_rows())?; serializer.finalize(merge_row_order.num_rows())?;
Ok(()) Ok(())
} }
@@ -108,6 +111,7 @@ fn dynamic_column_to_u64_monotonic(dynamic_column: DynamicColumn) -> Option<Colu
fn merge_column( fn merge_column(
column_type: ColumnType, column_type: ColumnType,
num_docs_per_column: &[u32],
columns: Vec<Option<DynamicColumn>>, columns: Vec<Option<DynamicColumn>>,
merge_row_order: &MergeRowOrder, merge_row_order: &MergeRowOrder,
wrt: &mut impl io::Write, wrt: &mut impl io::Write,
@@ -118,17 +122,19 @@ fn merge_column(
| ColumnType::F64 | ColumnType::F64
| ColumnType::DateTime | ColumnType::DateTime
| ColumnType::Bool => { | ColumnType::Bool => {
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len()); let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns.len());
let mut column_values: Vec<Option<Arc<dyn ColumnValues>>> = let mut column_values: Vec<Option<Arc<dyn ColumnValues>>> =
Vec::with_capacity(columns.len()); Vec::with_capacity(columns.len());
for dynamic_column_opt in columns { for (i, dynamic_column_opt) in columns.into_iter().enumerate() {
if let Some(Column { idx, values }) = if let Some(Column { index: idx, values }) =
dynamic_column_opt.and_then(dynamic_column_to_u64_monotonic) dynamic_column_opt.and_then(dynamic_column_to_u64_monotonic)
{ {
column_indexes.push(Some(idx)); column_indexes.push(idx);
column_values.push(Some(values)); column_values.push(Some(values));
} else { } else {
column_indexes.push(None); column_indexes.push(ColumnIndex::Empty {
num_docs: num_docs_per_column[i],
});
column_values.push(None); column_values.push(None);
} }
} }
@@ -142,15 +148,19 @@ fn merge_column(
serialize_column_mappable_to_u64(merged_column_index, &merge_column_values, wrt)?; serialize_column_mappable_to_u64(merged_column_index, &merge_column_values, wrt)?;
} }
ColumnType::IpAddr => { ColumnType::IpAddr => {
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len()); let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns.len());
let mut column_values: Vec<Option<Arc<dyn ColumnValues<Ipv6Addr>>>> = let mut column_values: Vec<Option<Arc<dyn ColumnValues<Ipv6Addr>>>> =
Vec::with_capacity(columns.len()); Vec::with_capacity(columns.len());
for dynamic_column_opt in columns { for (i, dynamic_column_opt) in columns.into_iter().enumerate() {
if let Some(DynamicColumn::IpAddr(Column { idx, values })) = dynamic_column_opt { if let Some(DynamicColumn::IpAddr(Column { index: idx, values })) =
column_indexes.push(Some(idx)); dynamic_column_opt
{
column_indexes.push(idx);
column_values.push(Some(values)); column_values.push(Some(values));
} else { } else {
column_indexes.push(None); column_indexes.push(ColumnIndex::Empty {
num_docs: num_docs_per_column[i],
});
column_values.push(None); column_values.push(None);
} }
} }
@@ -166,20 +176,22 @@ fn merge_column(
serialize_column_mappable_to_u128(merged_column_index, &merge_column_values, wrt)?; serialize_column_mappable_to_u128(merged_column_index, &merge_column_values, wrt)?;
} }
ColumnType::Bytes | ColumnType::Str => { ColumnType::Bytes | ColumnType::Str => {
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len()); let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns.len());
let mut bytes_columns: Vec<Option<BytesColumn>> = Vec::with_capacity(columns.len()); let mut bytes_columns: Vec<Option<BytesColumn>> = Vec::with_capacity(columns.len());
for dynamic_column_opt in columns { for (i, dynamic_column_opt) in columns.into_iter().enumerate() {
match dynamic_column_opt { match dynamic_column_opt {
Some(DynamicColumn::Str(str_column)) => { Some(DynamicColumn::Str(str_column)) => {
column_indexes.push(Some(str_column.term_ord_column.idx.clone())); column_indexes.push(str_column.term_ord_column.index.clone());
bytes_columns.push(Some(str_column.into())); bytes_columns.push(Some(str_column.into()));
} }
Some(DynamicColumn::Bytes(bytes_column)) => { Some(DynamicColumn::Bytes(bytes_column)) => {
column_indexes.push(Some(bytes_column.term_ord_column.idx.clone())); column_indexes.push(bytes_column.term_ord_column.index.clone());
bytes_columns.push(Some(bytes_column)); bytes_columns.push(Some(bytes_column));
} }
_ => { _ => {
column_indexes.push(None); column_indexes.push(ColumnIndex::Empty {
num_docs: num_docs_per_column[i],
});
bytes_columns.push(None); bytes_columns.push(None);
} }
} }
@@ -361,8 +373,8 @@ fn coerce_column(column_type: ColumnType, column: DynamicColumn) -> io::Result<D
fn min_max_if_numerical(column: &DynamicColumn) -> Option<(NumericalValue, NumericalValue)> { fn min_max_if_numerical(column: &DynamicColumn) -> Option<(NumericalValue, NumericalValue)> {
match column { match column {
DynamicColumn::I64(column) => Some((column.min_value().into(), column.max_value().into())), DynamicColumn::I64(column) => Some((column.min_value().into(), column.max_value().into())),
DynamicColumn::U64(column) => Some((column.min_value().into(), column.min_value().into())), DynamicColumn::U64(column) => Some((column.min_value().into(), column.max_value().into())),
DynamicColumn::F64(column) => Some((column.min_value().into(), column.min_value().into())), DynamicColumn::F64(column) => Some((column.min_value().into(), column.max_value().into())),
DynamicColumn::Bool(_) DynamicColumn::Bool(_)
| DynamicColumn::IpAddr(_) | DynamicColumn::IpAddr(_)
| DynamicColumn::DateTime(_) | DynamicColumn::DateTime(_)

View File

@@ -1,3 +1,5 @@
use itertools::Itertools;
use super::*; use super::*;
use crate::{Cardinality, ColumnarWriter, HasAssociatedColumnType, RowId}; use crate::{Cardinality, ColumnarWriter, HasAssociatedColumnType, RowId};
@@ -249,6 +251,8 @@ fn test_merge_columnar_texts() {
let cols = columnar_reader.read_columns("texts").unwrap(); let cols = columnar_reader.read_columns("texts").unwrap();
let dynamic_column = cols[0].open().unwrap(); let dynamic_column = cols[0].open().unwrap();
let DynamicColumn::Str(vals) = dynamic_column else { panic!() }; let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional);
let get_str_for_ord = |ord| { let get_str_for_ord = |ord| {
let mut out = String::new(); let mut out = String::new();
vals.ord_to_str(ord, &mut out).unwrap(); vals.ord_to_str(ord, &mut out).unwrap();
@@ -376,3 +380,93 @@ fn test_merge_columnar_byte_with_missing() {
assert_eq!(get_bytes_for_row(6), vec![b"b".to_vec()]); assert_eq!(get_bytes_for_row(6), vec![b"b".to_vec()]);
assert_eq!(get_bytes_for_row(7), vec![b"a".to_vec(), b"b".to_vec()]); assert_eq!(get_bytes_for_row(7), vec![b"a".to_vec(), b"b".to_vec()]);
} }
#[test]
fn test_merge_columnar_different_types() {
let columnar1 = make_text_columnar_multiple_columns(&[("mixed", &[&["a"]])]);
let columnar2 = make_text_columnar_multiple_columns(&[("mixed", &[&[], &["b"]])]);
let columnar3 = make_columnar("mixed", &[1i64]);
let mut buffer = Vec::new();
let columnars = &[&columnar1, &columnar2, &columnar3];
let stack_merge_order = StackMergeOrder::stack(columnars);
crate::columnar::merge_columnar(
columnars,
&[],
MergeRowOrder::Stack(stack_merge_order),
&mut buffer,
)
.unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar_reader.num_rows(), 4);
assert_eq!(columnar_reader.num_columns(), 2);
let cols = columnar_reader.read_columns("mixed").unwrap();
// numeric column
let dynamic_column = cols[0].open().unwrap();
let DynamicColumn::I64(vals) = dynamic_column else { panic!() };
assert_eq!(vals.get_cardinality(), Cardinality::Optional);
assert_eq!(vals.values_for_doc(0).collect_vec(), vec![]);
assert_eq!(vals.values_for_doc(1).collect_vec(), vec![]);
assert_eq!(vals.values_for_doc(2).collect_vec(), vec![]);
assert_eq!(vals.values_for_doc(3).collect_vec(), vec![1]);
assert_eq!(vals.values_for_doc(4).collect_vec(), vec![]);
// text column
let dynamic_column = cols[1].open().unwrap();
let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional);
let get_str_for_ord = |ord| {
let mut out = String::new();
vals.ord_to_str(ord, &mut out).unwrap();
out
};
assert_eq!(vals.dictionary.num_terms(), 2);
assert_eq!(get_str_for_ord(0), "a");
assert_eq!(get_str_for_ord(1), "b");
let get_str_for_row = |row_id| {
let term_ords: Vec<String> = vals
.term_ords(row_id)
.map(|el| {
let mut out = String::new();
vals.ord_to_str(el, &mut out).unwrap();
out
})
.collect();
term_ords
};
assert_eq!(get_str_for_row(0), vec!["a".to_string()]);
assert_eq!(get_str_for_row(1), Vec::<String>::new());
assert_eq!(get_str_for_row(2), vec!["b".to_string()]);
assert_eq!(get_str_for_row(3), Vec::<String>::new());
}
#[test]
fn test_merge_columnar_different_empty_cardinality() {
let columnar1 = make_text_columnar_multiple_columns(&[("mixed", &[&["a"]])]);
let columnar2 = make_columnar("mixed", &[1i64]);
let mut buffer = Vec::new();
let columnars = &[&columnar1, &columnar2];
let stack_merge_order = StackMergeOrder::stack(columnars);
crate::columnar::merge_columnar(
columnars,
&[],
MergeRowOrder::Stack(stack_merge_order),
&mut buffer,
)
.unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar_reader.num_rows(), 2);
assert_eq!(columnar_reader.num_columns(), 2);
let cols = columnar_reader.read_columns("mixed").unwrap();
// numeric column
let dynamic_column = cols[0].open().unwrap();
assert_eq!(dynamic_column.get_cardinality(), Cardinality::Optional);
// text column
let dynamic_column = cols[1].open().unwrap();
assert_eq!(dynamic_column.get_cardinality(), Cardinality::Optional);
}

View File

@@ -5,6 +5,8 @@ mod reader;
mod writer; mod writer;
pub use column_type::{ColumnType, HasAssociatedColumnType}; pub use column_type::{ColumnType, HasAssociatedColumnType};
#[cfg(test)]
pub(crate) use merge::ColumnTypeCategory;
pub use merge::{merge_columnar, MergeRowOrder, ShuffleMergeOrder, StackMergeOrder}; pub use merge::{merge_columnar, MergeRowOrder, ShuffleMergeOrder, StackMergeOrder};
pub use reader::ColumnarReader; pub use reader::ColumnarReader;
pub use writer::ColumnarWriter; pub use writer::ColumnarWriter;

View File

@@ -1,4 +1,4 @@
use std::{io, mem}; use std::{fmt, io, mem};
use common::file_slice::FileSlice; use common::file_slice::FileSlice;
use common::BinarySerializable; use common::BinarySerializable;
@@ -21,6 +21,32 @@ pub struct ColumnarReader {
num_rows: RowId, num_rows: RowId,
} }
impl fmt::Debug for ColumnarReader {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let num_rows = self.num_rows();
let columns = self.list_columns().unwrap();
let num_cols = columns.len();
let mut debug_struct = f.debug_struct("Columnar");
debug_struct
.field("num_rows", &num_rows)
.field("num_cols", &num_cols);
for (col_name, dynamic_column_handle) in columns.into_iter().take(5) {
let col = dynamic_column_handle.open().unwrap();
if col.num_values() > 10 {
debug_struct.field(&col_name, &"..");
} else {
debug_struct.field(&col_name, &col);
}
}
if num_cols > 5 {
debug_struct.finish_non_exhaustive()?;
} else {
debug_struct.finish()?;
}
Ok(())
}
}
/// Functions by both the async/sync code listing columns. /// Functions by both the async/sync code listing columns.
/// It takes a stream from the column sstable and return the list of /// It takes a stream from the column sstable and return the list of
/// `DynamicColumn` available in it. /// `DynamicColumn` available in it.

View File

@@ -104,16 +104,25 @@ impl ColumnarWriter {
}; };
let mut symbols_buffer = Vec::new(); let mut symbols_buffer = Vec::new();
let mut values = Vec::new(); let mut values = Vec::new();
let mut last_doc_opt: Option<RowId> = None; let mut start_doc_check_fill = 0;
let mut current_doc_opt: Option<RowId> = None;
// Assumption: NewDoc will never call the same doc twice and is strictly increasing between
// calls
for op in numerical_col_writer.operation_iterator(&self.arena, None, &mut symbols_buffer) { for op in numerical_col_writer.operation_iterator(&self.arena, None, &mut symbols_buffer) {
match op { match op {
ColumnOperation::NewDoc(doc) => { ColumnOperation::NewDoc(doc) => {
last_doc_opt = Some(doc); current_doc_opt = Some(doc);
} }
ColumnOperation::Value(numerical_value) => { ColumnOperation::Value(numerical_value) => {
if let Some(last_doc) = last_doc_opt { if let Some(current_doc) = current_doc_opt {
// Fill up with 0.0 since last doc
values.extend((start_doc_check_fill..current_doc).map(|doc| (0.0, doc)));
start_doc_check_fill = current_doc + 1;
// handle multi values
current_doc_opt = None;
let score: f32 = f64::coerce(numerical_value) as f32; let score: f32 = f64::coerce(numerical_value) as f32;
values.push((score, last_doc)); values.push((score, current_doc));
} }
} }
} }
@@ -123,9 +132,9 @@ impl ColumnarWriter {
} }
values.sort_by(|(left_score, _), (right_score, _)| { values.sort_by(|(left_score, _), (right_score, _)| {
if reversed { if reversed {
right_score.partial_cmp(left_score).unwrap() right_score.total_cmp(left_score)
} else { } else {
left_score.partial_cmp(right_score).unwrap() left_score.total_cmp(right_score)
} }
}); });
values.into_iter().map(|(_score, doc)| doc).collect() values.into_iter().map(|(_score, doc)| doc).collect()

View File

@@ -1,6 +1,6 @@
use std::io;
use std::net::Ipv6Addr; use std::net::Ipv6Addr;
use std::sync::Arc; use std::sync::Arc;
use std::{fmt, io};
use common::file_slice::FileSlice; use common::file_slice::FileSlice;
use common::{ByteCount, DateTime, HasLen, OwnedBytes}; use common::{ByteCount, DateTime, HasLen, OwnedBytes};
@@ -8,7 +8,7 @@ use common::{ByteCount, DateTime, HasLen, OwnedBytes};
use crate::column::{BytesColumn, Column, StrColumn}; use crate::column::{BytesColumn, Column, StrColumn};
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn}; use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
use crate::columnar::ColumnType; use crate::columnar::ColumnType;
use crate::{Cardinality, NumericalType}; use crate::{Cardinality, ColumnIndex, NumericalType};
#[derive(Clone)] #[derive(Clone)]
pub enum DynamicColumn { pub enum DynamicColumn {
@@ -22,19 +22,54 @@ pub enum DynamicColumn {
Str(StrColumn), Str(StrColumn),
} }
impl DynamicColumn { impl fmt::Debug for DynamicColumn {
pub fn get_cardinality(&self) -> Cardinality { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{} {} |", self.get_cardinality(), self.column_type())?;
match self { match self {
DynamicColumn::Bool(c) => c.get_cardinality(), DynamicColumn::Bool(col) => write!(f, " {:?}", col)?,
DynamicColumn::I64(c) => c.get_cardinality(), DynamicColumn::I64(col) => write!(f, " {:?}", col)?,
DynamicColumn::U64(c) => c.get_cardinality(), DynamicColumn::U64(col) => write!(f, " {:?}", col)?,
DynamicColumn::F64(c) => c.get_cardinality(), DynamicColumn::F64(col) => write!(f, "{:?}", col)?,
DynamicColumn::IpAddr(c) => c.get_cardinality(), DynamicColumn::IpAddr(col) => write!(f, "{:?}", col)?,
DynamicColumn::DateTime(c) => c.get_cardinality(), DynamicColumn::DateTime(col) => write!(f, "{:?}", col)?,
DynamicColumn::Bytes(c) => c.ords().get_cardinality(), DynamicColumn::Bytes(col) => write!(f, "{:?}", col)?,
DynamicColumn::Str(c) => c.ords().get_cardinality(), DynamicColumn::Str(col) => write!(f, "{:?}", col)?,
}
write!(f, "]")
}
}
impl DynamicColumn {
pub fn column_index(&self) -> &ColumnIndex {
match self {
DynamicColumn::Bool(c) => &c.index,
DynamicColumn::I64(c) => &c.index,
DynamicColumn::U64(c) => &c.index,
DynamicColumn::F64(c) => &c.index,
DynamicColumn::IpAddr(c) => &c.index,
DynamicColumn::DateTime(c) => &c.index,
DynamicColumn::Bytes(c) => &c.ords().index,
DynamicColumn::Str(c) => &c.ords().index,
} }
} }
pub fn get_cardinality(&self) -> Cardinality {
self.column_index().get_cardinality()
}
pub fn num_values(&self) -> u32 {
match self {
DynamicColumn::Bool(c) => c.values.num_vals(),
DynamicColumn::I64(c) => c.values.num_vals(),
DynamicColumn::U64(c) => c.values.num_vals(),
DynamicColumn::F64(c) => c.values.num_vals(),
DynamicColumn::IpAddr(c) => c.values.num_vals(),
DynamicColumn::DateTime(c) => c.values.num_vals(),
DynamicColumn::Bytes(c) => c.ords().values.num_vals(),
DynamicColumn::Str(c) => c.ords().values.num_vals(),
}
}
pub fn column_type(&self) -> ColumnType { pub fn column_type(&self) -> ColumnType {
match self { match self {
DynamicColumn::Bool(_) => ColumnType::Bool, DynamicColumn::Bool(_) => ColumnType::Bool,
@@ -73,11 +108,11 @@ impl DynamicColumn {
fn coerce_to_f64(self) -> Option<DynamicColumn> { fn coerce_to_f64(self) -> Option<DynamicColumn> {
match self { match self {
DynamicColumn::I64(column) => Some(DynamicColumn::F64(Column { DynamicColumn::I64(column) => Some(DynamicColumn::F64(Column {
idx: column.idx, index: column.index,
values: Arc::new(monotonic_map_column(column.values, MapI64ToF64)), values: Arc::new(monotonic_map_column(column.values, MapI64ToF64)),
})), })),
DynamicColumn::U64(column) => Some(DynamicColumn::F64(Column { DynamicColumn::U64(column) => Some(DynamicColumn::F64(Column {
idx: column.idx, index: column.index,
values: Arc::new(monotonic_map_column(column.values, MapU64ToF64)), values: Arc::new(monotonic_map_column(column.values, MapU64ToF64)),
})), })),
DynamicColumn::F64(_) => Some(self), DynamicColumn::F64(_) => Some(self),
@@ -91,7 +126,7 @@ impl DynamicColumn {
return None; return None;
} }
Some(DynamicColumn::I64(Column { Some(DynamicColumn::I64(Column {
idx: column.idx, index: column.index,
values: Arc::new(monotonic_map_column(column.values, MapU64ToI64)), values: Arc::new(monotonic_map_column(column.values, MapU64ToI64)),
})) }))
} }
@@ -106,7 +141,7 @@ impl DynamicColumn {
return None; return None;
} }
Some(DynamicColumn::U64(Column { Some(DynamicColumn::U64(Column {
idx: column.idx, index: column.index,
values: Arc::new(monotonic_map_column(column.values, MapI64ToU64)), values: Arc::new(monotonic_map_column(column.values, MapI64ToU64)),
})) }))
} }

View File

@@ -7,8 +7,10 @@ extern crate more_asserts;
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
extern crate test; extern crate test;
use std::fmt::Display;
use std::io; use std::io;
mod block_accessor;
mod column; mod column;
mod column_index; mod column_index;
pub mod column_values; pub mod column_values;
@@ -19,9 +21,12 @@ mod iterable;
pub(crate) mod utils; pub(crate) mod utils;
mod value; mod value;
pub use block_accessor::ColumnBlockAccessor;
pub use column::{BytesColumn, Column, StrColumn}; pub use column::{BytesColumn, Column, StrColumn};
pub use column_index::ColumnIndex; pub use column_index::ColumnIndex;
pub use column_values::{ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64}; pub use column_values::{
ColumnValues, EmptyColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64,
};
pub use columnar::{ pub use columnar::{
merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType, merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
MergeRowOrder, ShuffleMergeOrder, StackMergeOrder, MergeRowOrder, ShuffleMergeOrder, StackMergeOrder,
@@ -71,6 +76,17 @@ pub enum Cardinality {
Multivalued = 2, Multivalued = 2,
} }
impl Display for Cardinality {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let short_str = match self {
Cardinality::Full => "full",
Cardinality::Optional => "opt",
Cardinality::Multivalued => "mult",
};
write!(f, "{short_str}")
}
}
impl Cardinality { impl Cardinality {
pub fn is_optional(&self) -> bool { pub fn is_optional(&self) -> bool {
matches!(self, Cardinality::Optional) matches!(self, Cardinality::Optional)
@@ -81,7 +97,6 @@ impl Cardinality {
pub(crate) fn to_code(self) -> u8 { pub(crate) fn to_code(self) -> u8 {
self as u8 self as u8
} }
pub(crate) fn try_from_code(code: u8) -> Result<Cardinality, InvalidData> { pub(crate) fn try_from_code(code: u8) -> Result<Cardinality, InvalidData> {
match code { match code {
0 => Ok(Cardinality::Full), 0 => Ok(Cardinality::Full),

View File

@@ -1,10 +1,17 @@
use std::collections::HashMap;
use std::fmt::Debug;
use std::net::Ipv6Addr; use std::net::Ipv6Addr;
use common::DateTime;
use proptest::prelude::*;
use crate::column_values::MonotonicallyMappableToU128; use crate::column_values::MonotonicallyMappableToU128;
use crate::columnar::ColumnType; use crate::columnar::{ColumnType, ColumnTypeCategory};
use crate::dynamic_column::{DynamicColumn, DynamicColumnHandle}; use crate::dynamic_column::{DynamicColumn, DynamicColumnHandle};
use crate::value::NumericalValue; use crate::value::{Coerce, NumericalValue};
use crate::{Cardinality, ColumnarReader, ColumnarWriter}; use crate::{
BytesColumn, Cardinality, Column, ColumnarReader, ColumnarWriter, RowId, StackMergeOrder,
};
#[test] #[test]
fn test_dataframe_writer_str() { fn test_dataframe_writer_str() {
@@ -17,7 +24,7 @@ fn test_dataframe_writer_str() {
assert_eq!(columnar.num_columns(), 1); assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap(); let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
assert_eq!(cols.len(), 1); assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 158); assert_eq!(cols[0].num_bytes(), 89);
} }
#[test] #[test]
@@ -31,7 +38,7 @@ fn test_dataframe_writer_bytes() {
assert_eq!(columnar.num_columns(), 1); assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap(); let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
assert_eq!(cols.len(), 1); assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 158); assert_eq!(cols[0].num_bytes(), 89);
} }
#[test] #[test]
@@ -126,7 +133,7 @@ fn test_dataframe_writer_numerical() {
assert_eq!(cols[0].num_bytes(), 33); assert_eq!(cols[0].num_bytes(), 33);
let column = cols[0].open().unwrap(); let column = cols[0].open().unwrap();
let DynamicColumn::I64(column_i64) = column else { panic!(); }; let DynamicColumn::I64(column_i64) = column else { panic!(); };
assert_eq!(column_i64.idx.get_cardinality(), Cardinality::Optional); assert_eq!(column_i64.index.get_cardinality(), Cardinality::Optional);
assert_eq!(column_i64.first(0), None); assert_eq!(column_i64.first(0), None);
assert_eq!(column_i64.first(1), Some(12i64)); assert_eq!(column_i64.first(1), Some(12i64));
assert_eq!(column_i64.first(2), Some(13i64)); assert_eq!(column_i64.first(2), Some(13i64));
@@ -136,6 +143,46 @@ fn test_dataframe_writer_numerical() {
assert_eq!(column_i64.first(6), None); //< we can change the spec for that one. assert_eq!(column_i64.first(6), None); //< we can change the spec for that one.
} }
#[test]
fn test_dataframe_sort_by_full() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(0u32, "value", NumericalValue::U64(1));
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(2));
let data = dataframe_writer.sort_order("value", 2, false);
assert_eq!(data, vec![0, 1]);
}
#[test]
fn test_dataframe_sort_by_opt() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(3));
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(2));
let data = dataframe_writer.sort_order("value", 5, false);
// 0, 2, 4 is 0.0
assert_eq!(data, vec![0, 2, 4, 3, 1]);
let data = dataframe_writer.sort_order("value", 5, true);
assert_eq!(
data,
vec![4, 2, 0, 3, 1].into_iter().rev().collect::<Vec<_>>()
);
}
#[test]
fn test_dataframe_sort_by_multi() {
let mut dataframe_writer = ColumnarWriter::default();
// valid for sort
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(2));
// those are ignored for sort
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(4));
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(4));
// valid for sort
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(3));
// ignored, would change sort order
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(1));
let data = dataframe_writer.sort_order("value", 4, false);
assert_eq!(data, vec![0, 2, 1, 3]);
}
#[test] #[test]
fn test_dictionary_encoded_str() { fn test_dictionary_encoded_str() {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
@@ -210,3 +257,497 @@ fn test_dictionary_encoded_bytes() {
.unwrap(); .unwrap();
assert_eq!(term_buffer, b"b"); assert_eq!(term_buffer, b"b");
} }
fn num_strategy() -> impl Strategy<Value = NumericalValue> {
prop_oneof![
Just(NumericalValue::U64(0u64)),
Just(NumericalValue::U64(u64::MAX)),
Just(NumericalValue::I64(0i64)),
Just(NumericalValue::I64(i64::MIN)),
Just(NumericalValue::I64(i64::MAX)),
Just(NumericalValue::F64(1.2f64)),
]
}
#[derive(Debug, Clone, Copy)]
enum ColumnValue {
Str(&'static str),
Bytes(&'static [u8]),
Numerical(NumericalValue),
IpAddr(Ipv6Addr),
Bool(bool),
DateTime(DateTime),
}
impl ColumnValue {
pub(crate) fn column_type_category(&self) -> ColumnTypeCategory {
match self {
ColumnValue::Str(_) => ColumnTypeCategory::Str,
ColumnValue::Bytes(_) => ColumnTypeCategory::Bytes,
ColumnValue::Numerical(_) => ColumnTypeCategory::Numerical,
ColumnValue::IpAddr(_) => ColumnTypeCategory::IpAddr,
ColumnValue::Bool(_) => ColumnTypeCategory::Bool,
ColumnValue::DateTime(_) => ColumnTypeCategory::DateTime,
}
}
}
fn column_name_strategy() -> impl Strategy<Value = &'static str> {
prop_oneof![Just("c1"), Just("c2")]
}
fn string_strategy() -> impl Strategy<Value = &'static str> {
prop_oneof![Just("a"), Just("b")]
}
fn bytes_strategy() -> impl Strategy<Value = &'static [u8]> {
prop_oneof![Just(&[0u8][..]), Just(&[1u8][..])]
}
// A random column value
fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
prop_oneof![
10 => string_strategy().prop_map(|s| ColumnValue::Str(s)),
1 => bytes_strategy().prop_map(|b| ColumnValue::Bytes(b)),
40 => num_strategy().prop_map(|n| ColumnValue::Numerical(n)),
1 => (1u16..3u16).prop_map(|ip_addr_byte| ColumnValue::IpAddr(Ipv6Addr::new(
127,
0,
0,
0,
0,
0,
0,
ip_addr_byte
))),
1 => any::<bool>().prop_map(|b| ColumnValue::Bool(b)),
1 => (0_679_723_993i64..1_679_723_995i64)
.prop_map(|val| { ColumnValue::DateTime(DateTime::from_timestamp_secs(val)) })
]
}
// A document contains up to 4 values.
fn doc_strategy() -> impl Strategy<Value = Vec<(&'static str, ColumnValue)>> {
proptest::collection::vec((column_name_strategy(), column_value_strategy()), 0..4)
}
// A columnar contains up to 2 docs.
fn columnar_docs_strategy() -> impl Strategy<Value = Vec<Vec<(&'static str, ColumnValue)>>> {
proptest::collection::vec(doc_strategy(), 0..=2)
}
fn columnar_docs_and_mapping_strategy(
) -> impl Strategy<Value = (Vec<Vec<(&'static str, ColumnValue)>>, Vec<RowId>)> {
columnar_docs_strategy().prop_flat_map(|docs| {
permutation_strategy(docs.len()).prop_map(move |permutation| (docs.clone(), permutation))
})
}
fn permutation_strategy(n: usize) -> impl Strategy<Value = Vec<RowId>> {
Just((0u32..n as RowId).collect()).prop_shuffle()
}
fn build_columnar_with_mapping(
docs: &[Vec<(&'static str, ColumnValue)>],
old_to_new_row_ids_opt: Option<&[RowId]>,
) -> ColumnarReader {
let num_docs = docs.len() as u32;
let mut buffer = Vec::new();
let mut columnar_writer = ColumnarWriter::default();
for (doc_id, vals) in docs.iter().enumerate() {
for (column_name, col_val) in vals {
match *col_val {
ColumnValue::Str(str_val) => {
columnar_writer.record_str(doc_id as u32, column_name, str_val);
}
ColumnValue::Bytes(bytes) => {
columnar_writer.record_bytes(doc_id as u32, column_name, bytes)
}
ColumnValue::Numerical(num) => {
columnar_writer.record_numerical(doc_id as u32, column_name, num);
}
ColumnValue::IpAddr(ip_addr) => {
columnar_writer.record_ip_addr(doc_id as u32, column_name, ip_addr);
}
ColumnValue::Bool(bool_val) => {
columnar_writer.record_bool(doc_id as u32, column_name, bool_val);
}
ColumnValue::DateTime(date_time) => {
columnar_writer.record_datetime(doc_id as u32, column_name, date_time);
}
}
}
}
columnar_writer
.serialize(num_docs, old_to_new_row_ids_opt, &mut buffer)
.unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap();
columnar_reader
}
fn build_columnar(docs: &[Vec<(&'static str, ColumnValue)>]) -> ColumnarReader {
build_columnar_with_mapping(docs, None)
}
fn assert_columnar_eq(left: &ColumnarReader, right: &ColumnarReader) {
assert_eq!(left.num_rows(), right.num_rows());
let left_columns = left.list_columns().unwrap();
let right_columns = right.list_columns().unwrap();
assert_eq!(left_columns.len(), right_columns.len());
for i in 0..left_columns.len() {
assert_eq!(left_columns[i].0, right_columns[i].0);
let left_column = left_columns[i].1.open().unwrap();
let right_column = right_columns[i].1.open().unwrap();
assert_dyn_column_eq(&left_column, &right_column);
}
}
fn assert_column_eq<T: Copy + PartialOrd + Debug + Send + Sync + 'static>(
left: &Column<T>,
right: &Column<T>,
) {
assert_eq!(left.get_cardinality(), right.get_cardinality());
assert_eq!(left.num_docs(), right.num_docs());
let num_docs = left.num_docs();
for doc in 0..num_docs {
assert_eq!(
left.index.value_row_ids(doc),
right.index.value_row_ids(doc)
);
}
assert_eq!(left.values.num_vals(), right.values.num_vals());
let num_vals = left.values.num_vals();
for i in 0..num_vals {
assert_eq!(left.values.get_val(i), right.values.get_val(i));
}
}
fn assert_bytes_column_eq(left: &BytesColumn, right: &BytesColumn) {
assert_eq!(
left.term_ord_column.get_cardinality(),
right.term_ord_column.get_cardinality()
);
assert_eq!(left.num_rows(), right.num_rows());
assert_column_eq(&left.term_ord_column, &right.term_ord_column);
assert_eq!(left.dictionary.num_terms(), right.dictionary.num_terms());
let num_terms = left.dictionary.num_terms();
let mut left_terms = left.dictionary.stream().unwrap();
let mut right_terms = right.dictionary.stream().unwrap();
for _ in 0..num_terms {
assert!(left_terms.advance());
assert!(right_terms.advance());
assert_eq!(left_terms.key(), right_terms.key());
}
assert!(!left_terms.advance());
assert!(!right_terms.advance());
}
fn assert_dyn_column_eq(left_dyn_column: &DynamicColumn, right_dyn_column: &DynamicColumn) {
assert_eq!(
&left_dyn_column.column_type(),
&right_dyn_column.column_type()
);
assert_eq!(
&left_dyn_column.get_cardinality(),
&right_dyn_column.get_cardinality()
);
match &(left_dyn_column, right_dyn_column) {
(DynamicColumn::Bool(left_col), DynamicColumn::Bool(right_col)) => {
assert_column_eq(left_col, right_col);
}
(DynamicColumn::I64(left_col), DynamicColumn::I64(right_col)) => {
assert_column_eq(left_col, right_col);
}
(DynamicColumn::U64(left_col), DynamicColumn::U64(right_col)) => {
assert_column_eq(left_col, right_col);
}
(DynamicColumn::F64(left_col), DynamicColumn::F64(right_col)) => {
assert_column_eq(left_col, right_col);
}
(DynamicColumn::DateTime(left_col), DynamicColumn::DateTime(right_col)) => {
assert_column_eq(left_col, right_col);
}
(DynamicColumn::IpAddr(left_col), DynamicColumn::IpAddr(right_col)) => {
assert_column_eq(left_col, right_col);
}
(DynamicColumn::Bytes(left_col), DynamicColumn::Bytes(right_col)) => {
assert_bytes_column_eq(left_col, right_col);
}
(DynamicColumn::Str(left_col), DynamicColumn::Str(right_col)) => {
assert_bytes_column_eq(left_col, right_col);
}
_ => {
unreachable!()
}
}
}
trait AssertEqualToColumnValue {
fn assert_equal_to_column_value(&self, column_value: &ColumnValue);
}
impl AssertEqualToColumnValue for bool {
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
let ColumnValue::Bool(val) = column_value else { panic!() };
assert_eq!(self, val);
}
}
impl AssertEqualToColumnValue for Ipv6Addr {
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
let ColumnValue::IpAddr(val) = column_value else { panic!() };
assert_eq!(self, val);
}
}
impl<T: Coerce + PartialEq + Debug + Into<NumericalValue>> AssertEqualToColumnValue for T {
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
let ColumnValue::Numerical(num) = column_value else { panic!() };
assert_eq!(self, &T::coerce(*num));
}
}
impl AssertEqualToColumnValue for DateTime {
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
let ColumnValue::DateTime(dt) = column_value else { panic!() };
assert_eq!(self, dt);
}
}
fn assert_column_values<
T: AssertEqualToColumnValue + PartialEq + Copy + PartialOrd + Debug + Send + Sync + 'static,
>(
col: &Column<T>,
expected: &HashMap<u32, Vec<&ColumnValue>>,
) {
let mut num_non_empty_rows = 0;
for doc in 0..col.num_docs() {
let doc_vals: Vec<T> = col.values_for_doc(doc).collect();
if doc_vals.is_empty() {
continue;
}
num_non_empty_rows += 1;
let expected_vals = expected.get(&doc).unwrap();
assert_eq!(doc_vals.len(), expected_vals.len());
for (val, &expected) in doc_vals.iter().zip(expected_vals.iter()) {
val.assert_equal_to_column_value(expected)
}
}
assert_eq!(num_non_empty_rows, expected.len());
}
fn assert_bytes_column_values(
col: &BytesColumn,
expected: &HashMap<u32, Vec<&ColumnValue>>,
is_str: bool,
) {
let mut num_non_empty_rows = 0;
let mut buffer = Vec::new();
for doc in 0..col.term_ord_column.num_docs() {
let doc_vals: Vec<u64> = col.term_ords(doc).collect();
if doc_vals.is_empty() {
continue;
}
let expected_vals = expected.get(&doc).unwrap();
assert_eq!(doc_vals.len(), expected_vals.len());
for (&expected_col_val, &ord) in expected_vals.iter().zip(&doc_vals) {
col.ord_to_bytes(ord, &mut buffer).unwrap();
match expected_col_val {
ColumnValue::Str(str_val) => {
assert!(is_str);
assert_eq!(str_val.as_bytes(), &buffer);
}
ColumnValue::Bytes(bytes_val) => {
assert!(!is_str);
assert_eq!(bytes_val, &buffer);
}
_ => {
panic!();
}
}
}
num_non_empty_rows += 1;
}
assert_eq!(num_non_empty_rows, expected.len());
}
// This proptest attempts to create a tiny columnar based of up to 3 rows, and checks that the
// resulting columnar matches the row data.
proptest! {
#![proptest_config(ProptestConfig::with_cases(500))]
#[test]
fn test_single_columnar_builder_proptest(docs in columnar_docs_strategy()) {
let columnar = build_columnar(&docs[..]);
assert_eq!(columnar.num_rows() as usize, docs.len());
let mut expected_columns: HashMap<(&str, ColumnTypeCategory), HashMap<u32, Vec<&ColumnValue>> > = Default::default();
for (doc_id, doc_vals) in docs.iter().enumerate() {
for (col_name, col_val) in doc_vals {
expected_columns
.entry((col_name, col_val.column_type_category()))
.or_default()
.entry(doc_id as u32)
.or_default()
.push(col_val);
}
}
let column_list = columnar.list_columns().unwrap();
assert_eq!(expected_columns.len(), column_list.len());
for (column_name, column) in column_list {
let dynamic_column = column.open().unwrap();
let col_category: ColumnTypeCategory = dynamic_column.column_type().into();
let expected_col_values: &HashMap<u32, Vec<&ColumnValue>> = expected_columns.get(&(column_name.as_str(), col_category)).unwrap();
match &dynamic_column {
DynamicColumn::Bool(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::I64(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::U64(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::F64(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::IpAddr(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::DateTime(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::Bytes(col) =>
assert_bytes_column_values(col, expected_col_values, false),
DynamicColumn::Str(col) =>
assert_bytes_column_values(col, expected_col_values, true),
}
}
}
}
// Same as `test_single_columnar_builder_proptest` but with a shuffling mapping.
proptest! {
#![proptest_config(ProptestConfig::with_cases(500))]
#[test]
fn test_single_columnar_builder_with_shuffle_proptest((docs, mapping) in columnar_docs_and_mapping_strategy()) {
let columnar = build_columnar_with_mapping(&docs[..], Some(&mapping));
assert_eq!(columnar.num_rows() as usize, docs.len());
let mut expected_columns: HashMap<(&str, ColumnTypeCategory), HashMap<u32, Vec<&ColumnValue>> > = Default::default();
for (doc_id, doc_vals) in docs.iter().enumerate() {
for (col_name, col_val) in doc_vals {
expected_columns
.entry((col_name, col_val.column_type_category()))
.or_default()
.entry(mapping[doc_id])
.or_default()
.push(col_val);
}
}
let column_list = columnar.list_columns().unwrap();
assert_eq!(expected_columns.len(), column_list.len());
for (column_name, column) in column_list {
let dynamic_column = column.open().unwrap();
let col_category: ColumnTypeCategory = dynamic_column.column_type().into();
let expected_col_values: &HashMap<u32, Vec<&ColumnValue>> = expected_columns.get(&(column_name.as_str(), col_category)).unwrap();
for _doc_id in 0..columnar.num_rows() {
match &dynamic_column {
DynamicColumn::Bool(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::I64(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::U64(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::F64(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::IpAddr(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::DateTime(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::Bytes(col) =>
assert_bytes_column_values(col, expected_col_values, false),
DynamicColumn::Str(col) =>
assert_bytes_column_values(col, expected_col_values, true),
}
}
}
}
}
// This tests create 2 or 3 random small columnar and attempts to merge them.
// It compares the resulting merged dataframe with what would have been obtained by building the
// dataframe from the concatenated rows to begin with.
proptest! {
#![proptest_config(ProptestConfig::with_cases(1000))]
#[test]
fn test_columnar_merge_proptest(columnar_docs in proptest::collection::vec(columnar_docs_strategy(), 2..=3)) {
let columnar_readers: Vec<ColumnarReader> = columnar_docs.iter()
.map(|docs| build_columnar(&docs[..]))
.collect::<Vec<_>>();
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
let mut output: Vec<u8> = Vec::new();
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]).into();
crate::merge_columnar(&columnar_readers_arr[..], &[], stack_merge_order, &mut output).unwrap();
let merged_columnar = ColumnarReader::open(output).unwrap();
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> = columnar_docs.iter().cloned().flatten().collect();
let expected_merged_columnar = build_columnar(&concat_rows[..]);
assert_columnar_eq(&merged_columnar, &expected_merged_columnar);
}
}
#[test]
fn test_columnar_merging_empty_columnar() {
let columnar_docs: Vec<Vec<Vec<(&str, ColumnValue)>>> =
vec![vec![], vec![vec![("c1", ColumnValue::Str("a"))]]];
let columnar_readers: Vec<ColumnarReader> = columnar_docs
.iter()
.map(|docs| build_columnar(&docs[..]))
.collect::<Vec<_>>();
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
let mut output: Vec<u8> = Vec::new();
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]);
crate::merge_columnar(
&columnar_readers_arr[..],
&[],
crate::MergeRowOrder::Stack(stack_merge_order),
&mut output,
)
.unwrap();
let merged_columnar = ColumnarReader::open(output).unwrap();
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
columnar_docs.iter().cloned().flatten().collect();
let expected_merged_columnar = build_columnar(&concat_rows[..]);
assert_columnar_eq(&merged_columnar, &expected_merged_columnar);
}
#[test]
fn test_columnar_merging_number_columns() {
let columnar_docs: Vec<Vec<Vec<(&str, ColumnValue)>>> = vec![
// columnar 1
vec![
// doc 1.1
vec![("c2", ColumnValue::Numerical(0i64.into()))],
],
// columnar2
vec![
// doc 2.1
vec![("c2", ColumnValue::Numerical(0u64.into()))],
// doc 2.2
vec![("c2", ColumnValue::Numerical(u64::MAX.into()))],
],
];
let columnar_readers: Vec<ColumnarReader> = columnar_docs
.iter()
.map(|docs| build_columnar(&docs[..]))
.collect::<Vec<_>>();
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
let mut output: Vec<u8> = Vec::new();
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]);
crate::merge_columnar(
&columnar_readers_arr[..],
&[],
crate::MergeRowOrder::Stack(stack_merge_order),
&mut output,
)
.unwrap();
let merged_columnar = ColumnarReader::open(output).unwrap();
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
columnar_docs.iter().cloned().flatten().collect();
let expected_merged_columnar = build_columnar(&concat_rows[..]);
assert_columnar_eq(&merged_columnar, &expected_merged_columnar);
}
// TODO add non trivial remap and merge
// TODO test required_columns
// TODO document edge case: required_columns incompatible with values.

View File

@@ -36,6 +36,16 @@ pub struct DateTime {
} }
impl DateTime { impl DateTime {
/// Minimum possible `DateTime` value.
pub const MIN: DateTime = DateTime {
timestamp_micros: i64::MIN,
};
/// Maximum possible `DateTime` value.
pub const MAX: DateTime = DateTime {
timestamp_micros: i64::MAX,
};
/// Create new from UNIX timestamp in seconds /// Create new from UNIX timestamp in seconds
pub const fn from_timestamp_secs(seconds: i64) -> Self { pub const fn from_timestamp_secs(seconds: i64) -> Self {
Self { Self {

View File

@@ -0,0 +1,63 @@
use std::io::{self, Read, Write};
use crate::BinarySerializable;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u32)]
pub enum DictionaryKind {
Fst = 1,
SSTable = 2,
}
#[derive(Debug, Clone, PartialEq)]
pub struct DictionaryFooter {
pub kind: DictionaryKind,
pub version: u32,
}
impl DictionaryFooter {
pub fn verify_equal(&self, other: &DictionaryFooter) -> io::Result<()> {
if self.kind != other.kind {
return Err(io::Error::new(
io::ErrorKind::Other,
format!(
"Invalid dictionary type, expected {:?}, found {:?}",
self.kind, other.kind
),
));
}
if self.version != other.version {
return Err(io::Error::new(
io::ErrorKind::Other,
format!(
"Unsuported dictionary version, expected {}, found {}",
self.version, other.version
),
));
}
Ok(())
}
}
impl BinarySerializable for DictionaryFooter {
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
self.version.serialize(writer)?;
(self.kind as u32).serialize(writer)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let version = u32::deserialize(reader)?;
let kind = u32::deserialize(reader)?;
let kind = match kind {
1 => DictionaryKind::Fst,
2 => DictionaryKind::SSTable,
_ => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("invalid dictionary kind: {kind}"),
))
}
};
Ok(DictionaryFooter { kind, version })
}
}

View File

@@ -7,6 +7,7 @@ pub use byteorder::LittleEndian as Endianness;
mod bitset; mod bitset;
mod byte_count; mod byte_count;
mod datetime; mod datetime;
mod dictionary_footer;
pub mod file_slice; pub mod file_slice;
mod group_by; mod group_by;
mod serialize; mod serialize;
@@ -15,6 +16,7 @@ mod writer;
pub use bitset::*; pub use bitset::*;
pub use byte_count::ByteCount; pub use byte_count::ByteCount;
pub use datetime::{DatePrecision, DateTime}; pub use datetime::{DatePrecision, DateTime};
pub use dictionary_footer::*;
pub use group_by::GroupByIteratorExtended; pub use group_by::GroupByIteratorExtended;
pub use ownedbytes::{OwnedBytes, StableDeref}; pub use ownedbytes::{OwnedBytes, StableDeref};
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize}; pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};

View File

@@ -42,7 +42,7 @@ fn main() -> tantivy::Result<()> {
.set_index_option(IndexRecordOption::WithFreqs) .set_index_option(IndexRecordOption::WithFreqs)
.set_tokenizer("raw"), .set_tokenizer("raw"),
) )
.set_fast() .set_fast(None)
.set_stored(); .set_stored();
schema_builder.add_text_field("category", text_fieldtype); schema_builder.add_text_field("category", text_fieldtype);
schema_builder.add_f64_field("stock", FAST); schema_builder.add_f64_field("stock", FAST);

View File

@@ -0,0 +1,611 @@
#[cfg(all(test, feature = "unstable"))]
mod bench {
use columnar::Cardinality;
use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng};
use test::{self, Bencher};
use super::*;
use crate::aggregation::bucket::{
CustomOrder, HistogramAggregation, HistogramBounds, Order, OrderTarget, TermsAggregation,
};
use crate::aggregation::metric::StatsAggregation;
use crate::query::AllQuery;
use crate::schema::{Schema, TextFieldIndexing, FAST, STRING};
use crate::Index;
fn get_test_index_bench(cardinality: Cardinality) -> crate::Result<Index> {
let mut schema_builder = Schema::builder();
let text_fieldtype = crate::schema::TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
)
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
let index = Index::create_from_tempdir(schema_builder.build())?;
let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"];
let many_terms_data = (0..150_000)
.map(|num| format!("author{}", num))
.collect::<Vec<_>>();
{
let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(1, 100_000_000)?;
// To make the different test cases comparable we just change one doc to force the
// cardinality
if cardinality == Cardinality::Optional {
index_writer.add_document(doc!())?;
}
if cardinality == Cardinality::Multivalued {
index_writer.add_document(doc!(
text_field => "cool",
text_field => "cool",
text_field_many_terms => "cool",
text_field_many_terms => "cool",
text_field_few_terms => "cool",
text_field_few_terms => "cool",
score_field => 1u64,
score_field => 1u64,
score_field_f64 => 1.0,
score_field_f64 => 1.0,
score_field_i64 => 1i64,
score_field_i64 => 1i64,
))?;
}
for _ in 0..1_000_000 {
let val: f64 = rng.gen_range(0.0..1_000_000.0);
index_writer.add_document(doc!(
text_field => "cool",
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
score_field => val as u64,
score_field_f64 => val,
score_field_i64 => val as i64,
))?;
}
// writing the segment
index_writer.commit()?;
}
Ok(index)
}
use paste::paste;
#[macro_export]
macro_rules! bench_all_cardinalities {
( $x:ident ) => {
paste! {
#[bench]
fn $x(b: &mut Bencher) {
[<$x _card>](b, Cardinality::Full)
}
#[bench]
fn [<$x _opt>](b: &mut Bencher) {
[<$x _card>](b, Cardinality::Optional)
}
#[bench]
fn [<$x _multi>](b: &mut Bencher) {
[<$x _card>](b, Cardinality::Multivalued)
}
}
};
}
bench_all_cardinalities!(bench_aggregation_average_u64);
fn bench_aggregation_average_u64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = vec![(
"average".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score".to_string()),
)),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_stats_f64);
fn bench_aggregation_stats_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Stats(StatsAggregation::from_field_name(
"score_f64".to_string(),
))),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_average_f64);
fn bench_aggregation_average_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_average_u64_and_f64);
fn bench_aggregation_average_u64_and_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = vec![
(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
),
(
"average".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score".to_string()),
)),
),
]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_few);
fn bench_aggregation_terms_few_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_few_terms".to_string(),
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_with_sub_agg);
fn bench_aggregation_terms_many_with_sub_agg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let sub_agg_req: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
)]
.into_iter()
.collect();
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_many_terms".to_string(),
..Default::default()
}),
sub_aggregation: sub_agg_req,
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many2);
fn bench_aggregation_terms_many2_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_many_terms".to_string(),
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_order_by_term);
fn bench_aggregation_terms_many_order_by_term_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_many_terms".to_string(),
order: Some(CustomOrder {
order: Order::Desc,
target: OrderTarget::Key,
}),
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_range_only);
fn bench_aggregation_range_only_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = vec![(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score_f64".to_string(),
ranges: vec![
(3f64..7000f64).into(),
(7000f64..20000f64).into(),
(20000f64..30000f64).into(),
(30000f64..40000f64).into(),
(40000f64..50000f64).into(),
(50000f64..60000f64).into(),
],
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_range_with_avg);
fn bench_aggregation_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let sub_agg_req: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
)]
.into_iter()
.collect();
let agg_req_1: Aggregations = vec![(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score_f64".to_string(),
ranges: vec![
(3f64..7000f64).into(),
(7000f64..20000f64).into(),
(20000f64..30000f64).into(),
(30000f64..40000f64).into(),
(40000f64..50000f64).into(),
(50000f64..60000f64).into(),
],
..Default::default()
}),
sub_aggregation: sub_agg_req,
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
// hard bounds has a different algorithm, because it actually limits collection range
//
bench_all_cardinalities!(bench_aggregation_histogram_only_hard_bounds);
fn bench_aggregation_histogram_only_hard_bounds_card(
b: &mut Bencher,
cardinality: Cardinality,
) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = vec![(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
field: "score_f64".to_string(),
interval: 100f64,
hard_bounds: Some(HistogramBounds {
min: 1000.0,
max: 300_000.0,
}),
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_histogram_with_avg);
fn bench_aggregation_histogram_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let sub_agg_req: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
)]
.into_iter()
.collect();
let agg_req_1: Aggregations = vec![(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
field: "score_f64".to_string(),
interval: 100f64, // 1000 buckets
..Default::default()
}),
sub_aggregation: sub_agg_req,
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_histogram_only);
fn bench_aggregation_histogram_only_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = vec![(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
field: "score_f64".to_string(),
interval: 100f64, // 1000 buckets
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_avg_and_range_with_avg);
fn bench_aggregation_avg_and_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let sub_agg_req_1: Aggregations = vec![(
"average_in_range".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score".to_string()),
)),
)]
.into_iter()
.collect();
let agg_req_1: Aggregations = vec![
(
"average".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score".to_string()),
)),
),
(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score_f64".to_string(),
ranges: vec![
(3f64..7000f64).into(),
(7000f64..20000f64).into(),
(20000f64..60000f64).into(),
],
..Default::default()
}),
sub_aggregation: sub_agg_req_1,
}
.into(),
),
),
]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
}

View File

@@ -8,7 +8,7 @@ use super::collector::DEFAULT_MEMORY_LIMIT;
use super::{AggregationError, DEFAULT_BUCKET_LIMIT}; use super::{AggregationError, DEFAULT_BUCKET_LIMIT};
use crate::TantivyError; use crate::TantivyError;
/// An estimate for memory consumption /// An estimate for memory consumption. Non recursive
pub trait MemoryConsumption { pub trait MemoryConsumption {
fn memory_consumption(&self) -> usize; fn memory_consumption(&self) -> usize;
} }
@@ -83,12 +83,13 @@ impl AggregationLimits {
self.memory_consumption self.memory_consumption
.fetch_add(num_bytes, std::sync::atomic::Ordering::Relaxed); .fetch_add(num_bytes, std::sync::atomic::Ordering::Relaxed);
} }
/// Returns the estimated memory consumed by the aggregations
pub fn get_memory_consumed(&self) -> ByteCount { pub fn get_memory_consumed(&self) -> ByteCount {
self.memory_consumption self.memory_consumption
.load(std::sync::atomic::Ordering::Relaxed) .load(std::sync::atomic::Ordering::Relaxed)
.into() .into()
} }
pub fn get_bucket_limit(&self) -> u32 { pub(crate) fn get_bucket_limit(&self) -> u32 {
self.bucket_limit self.bucket_limit
} }
} }

View File

@@ -1,8 +1,6 @@
//! This will enhance the request tree with access to the fastfield and metadata. //! This will enhance the request tree with access to the fastfield and metadata.
use std::sync::Arc; use columnar::{Column, ColumnBlockAccessor, ColumnType, StrColumn};
use columnar::{Column, ColumnType, ColumnValues, StrColumn};
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation}; use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
use super::bucket::{ use super::bucket::{
@@ -45,6 +43,16 @@ pub struct BucketAggregationWithAccessor {
pub(crate) bucket_agg: BucketAggregationType, pub(crate) bucket_agg: BucketAggregationType,
pub(crate) sub_aggregation: AggregationsWithAccessor, pub(crate) sub_aggregation: AggregationsWithAccessor,
pub(crate) limits: AggregationLimits, pub(crate) limits: AggregationLimits,
pub(crate) column_block_accessor: ColumnBlockAccessor<u64>,
}
fn get_numeric_or_date_column_types() -> &'static [ColumnType] {
&[
ColumnType::F64,
ColumnType::U64,
ColumnType::I64,
ColumnType::DateTime,
]
} }
impl BucketAggregationWithAccessor { impl BucketAggregationWithAccessor {
@@ -58,19 +66,31 @@ impl BucketAggregationWithAccessor {
let (accessor, field_type) = match &bucket { let (accessor, field_type) = match &bucket {
BucketAggregationType::Range(RangeAggregation { BucketAggregationType::Range(RangeAggregation {
field: field_name, .. field: field_name, ..
}) => get_ff_reader_and_validate(reader, field_name)?, }) => get_ff_reader_and_validate(
reader,
field_name,
Some(get_numeric_or_date_column_types()),
)?,
BucketAggregationType::Histogram(HistogramAggregation { BucketAggregationType::Histogram(HistogramAggregation {
field: field_name, .. field: field_name, ..
}) => get_ff_reader_and_validate(reader, field_name)?, }) => get_ff_reader_and_validate(
reader,
field_name,
Some(get_numeric_or_date_column_types()),
)?,
BucketAggregationType::DateHistogram(DateHistogramAggregationReq { BucketAggregationType::DateHistogram(DateHistogramAggregationReq {
field: field_name, field: field_name,
.. ..
}) => get_ff_reader_and_validate(reader, field_name)?, }) => get_ff_reader_and_validate(
reader,
field_name,
Some(get_numeric_or_date_column_types()),
)?,
BucketAggregationType::Terms(TermsAggregation { BucketAggregationType::Terms(TermsAggregation {
field: field_name, .. field: field_name, ..
}) => { }) => {
str_dict_column = reader.fast_fields().str(field_name)?; str_dict_column = reader.fast_fields().str(field_name)?;
get_ff_reader_and_validate(reader, field_name)? get_ff_reader_and_validate(reader, field_name, None)?
} }
}; };
let sub_aggregation = sub_aggregation.clone(); let sub_aggregation = sub_aggregation.clone();
@@ -85,6 +105,7 @@ impl BucketAggregationWithAccessor {
bucket_agg: bucket.clone(), bucket_agg: bucket.clone(),
str_dict_column, str_dict_column,
limits, limits,
column_block_accessor: Default::default(),
}) })
} }
} }
@@ -95,6 +116,7 @@ pub struct MetricAggregationWithAccessor {
pub metric: MetricAggregation, pub metric: MetricAggregation,
pub field_type: ColumnType, pub field_type: ColumnType,
pub accessor: Column<u64>, pub accessor: Column<u64>,
pub column_block_accessor: ColumnBlockAccessor<u64>,
} }
impl MetricAggregationWithAccessor { impl MetricAggregationWithAccessor {
@@ -109,12 +131,17 @@ impl MetricAggregationWithAccessor {
| MetricAggregation::Min(MinAggregation { field: field_name }) | MetricAggregation::Min(MinAggregation { field: field_name })
| MetricAggregation::Stats(StatsAggregation { field: field_name }) | MetricAggregation::Stats(StatsAggregation { field: field_name })
| MetricAggregation::Sum(SumAggregation { field: field_name }) => { | MetricAggregation::Sum(SumAggregation { field: field_name }) => {
let (accessor, field_type) = get_ff_reader_and_validate(reader, field_name)?; let (accessor, field_type) = get_ff_reader_and_validate(
reader,
field_name,
Some(get_numeric_or_date_column_types()),
)?;
Ok(MetricAggregationWithAccessor { Ok(MetricAggregationWithAccessor {
accessor, accessor,
field_type, field_type,
metric: metric.clone(), metric: metric.clone(),
column_block_accessor: Default::default(),
}) })
} }
} }
@@ -155,35 +182,16 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
fn get_ff_reader_and_validate( fn get_ff_reader_and_validate(
reader: &SegmentReader, reader: &SegmentReader,
field_name: &str, field_name: &str,
allowed_column_types: Option<&[ColumnType]>,
) -> crate::Result<(columnar::Column<u64>, ColumnType)> { ) -> crate::Result<(columnar::Column<u64>, ColumnType)> {
let ff_fields = reader.fast_fields(); let ff_fields = reader.fast_fields();
let ff_field_with_type = ff_fields let ff_field_with_type = ff_fields
.u64_lenient_with_type(field_name)? .u64_lenient_for_type(allowed_column_types, field_name)?
.unwrap_or_else(|| (build_empty_column(reader.num_docs()), ColumnType::U64)); .unwrap_or_else(|| {
(
Column::build_empty_column(reader.num_docs()),
ColumnType::U64,
)
});
Ok(ff_field_with_type) Ok(ff_field_with_type)
} }
// Empty Column
fn build_empty_column(num_docs: u32) -> Column {
struct EmptyValues;
impl ColumnValues for EmptyValues {
fn get_val(&self, _idx: u32) -> u64 {
unimplemented!("Internal Error: Called get_val of empty column.")
}
fn min_value(&self) -> u64 {
unimplemented!("Internal Error: Called min_value of empty column.")
}
fn max_value(&self) -> u64 {
unimplemented!("Internal Error: Called max_value of empty column.")
}
fn num_vals(&self) -> u32 {
0
}
}
Column {
idx: columnar::ColumnIndex::Empty { num_docs },
values: Arc::new(EmptyValues),
}
}

View File

@@ -769,614 +769,98 @@ fn test_aggregation_on_json_object_empty_columns() {
); );
} }
#[cfg(all(test, feature = "unstable"))] #[test]
mod bench { fn test_aggregation_on_json_object_mixed_types() {
let mut schema_builder = Schema::builder();
let json = schema_builder.add_json_field("json", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
// => Segment with all values numeric
index_writer
.add_document(doc!(json => json!({"mixed_type": 10.0})))
.unwrap();
index_writer.commit().unwrap();
// => Segment with all values text
index_writer
.add_document(doc!(json => json!({"mixed_type": "blue"})))
.unwrap();
index_writer.commit().unwrap();
// => Segment with all boolen
index_writer
.add_document(doc!(json => json!({"mixed_type": true})))
.unwrap();
index_writer.commit().unwrap();
use columnar::Cardinality; // => Segment with mixed values
use rand::prelude::SliceRandom; index_writer
use rand::{thread_rng, Rng}; .add_document(doc!(json => json!({"mixed_type": "red"})))
use test::{self, Bencher}; .unwrap();
index_writer
.add_document(doc!(json => json!({"mixed_type": -20.5})))
.unwrap();
index_writer
.add_document(doc!(json => json!({"mixed_type": true})))
.unwrap();
use super::*; index_writer.commit().unwrap();
use crate::aggregation::bucket::{
CustomOrder, HistogramAggregation, HistogramBounds, Order, OrderTarget, TermsAggregation,
};
use crate::aggregation::metric::StatsAggregation;
use crate::query::AllQuery;
use crate::schema::{Schema, TextFieldIndexing, FAST, STRING};
use crate::Index;
fn get_test_index_bench(cardinality: Cardinality) -> crate::Result<Index> { // All bucket types
let mut schema_builder = Schema::builder(); let agg_req_str = r#"
let text_fieldtype = crate::schema::TextOptions::default() {
.set_indexing_options( "termagg": {
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs), "terms": {
) "field": "json.mixed_type",
.set_stored(); "order": { "min_price": "desc" }
let text_field = schema_builder.add_text_field("text", text_fieldtype); },
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST); "aggs": {
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST); "min_price": { "min": { "field": "json.mixed_type" } }
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
let index = Index::create_from_tempdir(schema_builder.build())?;
let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"];
let many_terms_data = (0..150_000)
.map(|num| format!("author{}", num))
.collect::<Vec<_>>();
{
let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(1, 100_000_000)?;
// To make the different test cases comparable we just change one doc to force the
// cardinality
if cardinality == Cardinality::Optional {
index_writer.add_document(doc!())?;
} }
if cardinality == Cardinality::Multivalued { },
index_writer.add_document(doc!( "rangeagg": {
text_field => "cool", "range": {
text_field => "cool", "field": "json.mixed_type",
text_field_many_terms => "cool", "ranges": [
text_field_many_terms => "cool", { "to": 3.0 },
text_field_few_terms => "cool", { "from": 19.0, "to": 20.0 },
text_field_few_terms => "cool", { "from": 20.0 }
score_field => 1u64, ]
score_field => 1u64, },
score_field_f64 => 1.0, "aggs": {
score_field_f64 => 1.0, "average_in_range": { "avg": { "field": "json.mixed_type" } }
score_field_i64 => 1i64,
score_field_i64 => 1i64,
))?;
} }
for _ in 0..1_000_000 {
let val: f64 = rng.gen_range(0.0..1_000_000.0);
index_writer.add_document(doc!(
text_field => "cool",
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
score_field => val as u64,
score_field_f64 => val,
score_field_i64 => val as i64,
))?;
}
// writing the segment
index_writer.commit()?;
} }
} "#;
let agg: Aggregations = serde_json::from_str(agg_req_str).unwrap();
let aggregation_collector = get_collector(agg);
let reader = index.reader().unwrap();
let searcher = reader.searcher();
Ok(index) let aggregation_results = searcher.search(&AllQuery, &aggregation_collector).unwrap();
} let aggregation_res_json = serde_json::to_value(aggregation_results).unwrap();
assert_eq!(
use paste::paste; &aggregation_res_json,
#[macro_export] &serde_json::json!({
macro_rules! bench_all_cardinalities { "rangeagg": {
( $x:ident ) => { "buckets": [
paste! { { "average_in_range": { "value": -20.5 }, "doc_count": 1, "key": "*-3", "to": 3.0 },
#[bench] { "average_in_range": { "value": 10.0 }, "doc_count": 1, "from": 3.0, "key": "3-19", "to": 19.0 },
fn $x(b: &mut Bencher) { { "average_in_range": { "value": null }, "doc_count": 0, "from": 19.0, "key": "19-20", "to": 20.0 },
[<$x _card>](b, Cardinality::Full) { "average_in_range": { "value": null }, "doc_count": 0, "from": 20.0, "key": "20-*" }
}
#[bench]
fn [<$x _opt>](b: &mut Bencher) {
[<$x _card>](b, Cardinality::Optional)
}
#[bench]
fn [<$x _multi>](b: &mut Bencher) {
[<$x _card>](b, Cardinality::Multivalued)
}
}
};
}
bench_all_cardinalities!(bench_aggregation_average_u64);
fn bench_aggregation_average_u64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = vec![(
"average".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score".to_string()),
)),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_stats_f64);
fn bench_aggregation_stats_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Stats(StatsAggregation::from_field_name(
"score_f64".to_string(),
))),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_average_f64);
fn bench_aggregation_average_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_average_u64_and_f64);
fn bench_aggregation_average_u64_and_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = vec![
(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
),
(
"average".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score".to_string()),
)),
),
] ]
.into_iter() },
.collect(); "termagg": {
"buckets": [
let collector = get_collector(agg_req_1); { "doc_count": 1, "key": 10.0, "min_price": { "value": 10.0 } },
{ "doc_count": 1, "key": -20.5, "min_price": { "value": -20.5 } },
let searcher = reader.searcher(); // TODO red is missing since there is no multi aggregation within one
searcher.search(&term_query, &collector).unwrap() // segment for multiple types
}); // TODO bool is also not yet handled in aggregation
} { "doc_count": 1, "key": "blue", "min_price": { "value": null } }
],
bench_all_cardinalities!(bench_aggregation_terms_few); "sum_other_doc_count": 0
}
fn bench_aggregation_terms_few_card(b: &mut Bencher, cardinality: Cardinality) { }
let index = get_test_index_bench(cardinality).unwrap(); )
let reader = index.reader().unwrap(); );
b.iter(|| {
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_few_terms".to_string(),
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_with_sub_agg);
fn bench_aggregation_terms_many_with_sub_agg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let sub_agg_req: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
)]
.into_iter()
.collect();
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_many_terms".to_string(),
..Default::default()
}),
sub_aggregation: sub_agg_req,
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many2);
fn bench_aggregation_terms_many2_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_many_terms".to_string(),
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_order_by_term);
fn bench_aggregation_terms_many_order_by_term_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_many_terms".to_string(),
order: Some(CustomOrder {
order: Order::Desc,
target: OrderTarget::Key,
}),
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_range_only);
fn bench_aggregation_range_only_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = vec![(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score_f64".to_string(),
ranges: vec![
(3f64..7000f64).into(),
(7000f64..20000f64).into(),
(20000f64..30000f64).into(),
(30000f64..40000f64).into(),
(40000f64..50000f64).into(),
(50000f64..60000f64).into(),
],
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_range_with_avg);
fn bench_aggregation_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let sub_agg_req: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
)]
.into_iter()
.collect();
let agg_req_1: Aggregations = vec![(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score_f64".to_string(),
ranges: vec![
(3f64..7000f64).into(),
(7000f64..20000f64).into(),
(20000f64..30000f64).into(),
(30000f64..40000f64).into(),
(40000f64..50000f64).into(),
(50000f64..60000f64).into(),
],
..Default::default()
}),
sub_aggregation: sub_agg_req,
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
// hard bounds has a different algorithm, because it actually limits collection range
//
bench_all_cardinalities!(bench_aggregation_histogram_only_hard_bounds);
fn bench_aggregation_histogram_only_hard_bounds_card(
b: &mut Bencher,
cardinality: Cardinality,
) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = vec![(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
field: "score_f64".to_string(),
interval: 100f64,
hard_bounds: Some(HistogramBounds {
min: 1000.0,
max: 300_000.0,
}),
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_histogram_with_avg);
fn bench_aggregation_histogram_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let sub_agg_req: Aggregations = vec![(
"average_f64".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score_f64".to_string()),
)),
)]
.into_iter()
.collect();
let agg_req_1: Aggregations = vec![(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
field: "score_f64".to_string(),
interval: 100f64, // 1000 buckets
..Default::default()
}),
sub_aggregation: sub_agg_req,
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_histogram_only);
fn bench_aggregation_histogram_only_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = vec![(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
field: "score_f64".to_string(),
interval: 100f64, // 1000 buckets
..Default::default()
}),
sub_aggregation: Default::default(),
}
.into(),
),
)]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_avg_and_range_with_avg);
fn bench_aggregation_avg_and_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let sub_agg_req_1: Aggregations = vec![(
"average_in_range".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score".to_string()),
)),
)]
.into_iter()
.collect();
let agg_req_1: Aggregations = vec![
(
"average".to_string(),
Aggregation::Metric(MetricAggregation::Average(
AverageAggregation::from_field_name("score".to_string()),
)),
),
(
"rangef64".to_string(),
Aggregation::Bucket(
BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "score_f64".to_string(),
ranges: vec![
(3f64..7000f64).into(),
(7000f64..20000f64).into(),
(20000f64..60000f64).into(),
],
..Default::default()
}),
sub_aggregation: sub_agg_req_1,
}
.into(),
),
),
]
.into_iter()
.collect();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
} }

View File

@@ -20,7 +20,7 @@ use crate::aggregation::segment_agg_result::{
build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector, build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector,
}; };
use crate::aggregation::{f64_from_fastfield_u64, format_date, VecWithNames}; use crate::aggregation::{f64_from_fastfield_u64, format_date, VecWithNames};
use crate::{DocId, TantivyError}; use crate::TantivyError;
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`. /// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
/// Each document value is rounded down to its bucket. /// Each document value is rounded down to its bucket.
@@ -235,7 +235,7 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
fn collect( fn collect(
&mut self, &mut self,
doc: crate::DocId, doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
self.collect_block(&[doc], agg_with_accessor) self.collect_block(&[doc], agg_with_accessor)
} }
@@ -244,11 +244,9 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
fn collect_block( fn collect_block(
&mut self, &mut self,
docs: &[crate::DocId], docs: &[crate::DocId],
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
let accessor = &agg_with_accessor.buckets.values[self.accessor_idx].accessor; let bucket_agg_accessor = &mut agg_with_accessor.buckets.values[self.accessor_idx];
let sub_aggregation_accessor =
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
let mem_pre = self.get_memory_consumption(); let mem_pre = self.get_memory_consumption();
@@ -257,20 +255,26 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
let offset = self.offset; let offset = self.offset;
let get_bucket_pos = |val| (get_bucket_pos_f64(val, interval, offset) as i64); let get_bucket_pos = |val| (get_bucket_pos_f64(val, interval, offset) as i64);
for doc in docs { bucket_agg_accessor
for val in accessor.values_for_doc(*doc) { .column_block_accessor
let val = self.f64_from_fastfield_u64(val); .fetch_block(docs, &bucket_agg_accessor.accessor);
let bucket_pos = get_bucket_pos(val); for (doc, val) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
let val = self.f64_from_fastfield_u64(val);
if bounds.contains(val) { let bucket_pos = get_bucket_pos(val);
self.increment_bucket(
bucket_pos, if bounds.contains(val) {
*doc, let bucket = self.buckets.entry(bucket_pos).or_insert_with(|| {
sub_aggregation_accessor, let key = get_bucket_key_from_pos(bucket_pos as f64, interval, offset);
interval, SegmentHistogramBucketEntry { key, doc_count: 0 }
offset, });
)?; bucket.doc_count += 1;
if let Some(sub_aggregation_blueprint) = self.sub_aggregation_blueprint.as_mut() {
self.sub_aggregations
.entry(bucket_pos)
.or_insert_with(|| sub_aggregation_blueprint.clone())
.collect(doc, &mut bucket_agg_accessor.sub_aggregation)?;
} }
} }
} }
@@ -283,9 +287,9 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
Ok(()) Ok(())
} }
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> { fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
let sub_aggregation_accessor = let sub_aggregation_accessor =
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation; &mut agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
for sub_aggregation in self.sub_aggregations.values_mut() { for sub_aggregation in self.sub_aggregations.values_mut() {
sub_aggregation.flush(sub_aggregation_accessor)?; sub_aggregation.flush(sub_aggregation_accessor)?;
@@ -360,29 +364,6 @@ impl SegmentHistogramCollector {
}) })
} }
#[inline]
fn increment_bucket(
&mut self,
bucket_pos: i64,
doc: DocId,
bucket_with_accessor: &AggregationsWithAccessor,
interval: f64,
offset: f64,
) -> crate::Result<()> {
let bucket = self.buckets.entry(bucket_pos).or_insert_with(|| {
let key = get_bucket_key_from_pos(bucket_pos as f64, interval, offset);
SegmentHistogramBucketEntry { key, doc_count: 0 }
});
bucket.doc_count += 1;
if let Some(sub_aggregation_blueprint) = self.sub_aggregation_blueprint.as_mut() {
self.sub_aggregations
.entry(bucket_pos)
.or_insert_with(|| sub_aggregation_blueprint.clone())
.collect(doc, bucket_with_accessor)?;
}
Ok(())
}
#[inline] #[inline]
fn f64_from_fastfield_u64(&self, val: u64) -> f64 { fn f64_from_fastfield_u64(&self, val: u64) -> f64 {
f64_from_fastfield_u64(val, &self.column_type) f64_from_fastfield_u64(val, &self.column_type)

View File

@@ -212,7 +212,7 @@ impl SegmentAggregationCollector for SegmentRangeCollector {
fn collect( fn collect(
&mut self, &mut self,
doc: crate::DocId, doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
self.collect_block(&[doc], agg_with_accessor) self.collect_block(&[doc], agg_with_accessor)
} }
@@ -221,30 +221,31 @@ impl SegmentAggregationCollector for SegmentRangeCollector {
fn collect_block( fn collect_block(
&mut self, &mut self,
docs: &[crate::DocId], docs: &[crate::DocId],
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
let accessor = &agg_with_accessor.buckets.values[self.accessor_idx].accessor; let bucket_agg_accessor = &mut agg_with_accessor.buckets.values[self.accessor_idx];
let sub_aggregation_accessor =
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
for doc in docs {
for val in accessor.values_for_doc(*doc) {
let bucket_pos = self.get_bucket_pos(val);
let bucket = &mut self.buckets[bucket_pos]; bucket_agg_accessor
.column_block_accessor
.fetch_block(docs, &bucket_agg_accessor.accessor);
bucket.bucket.doc_count += 1; for (doc, val) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation { let bucket_pos = self.get_bucket_pos(val);
sub_aggregation.collect(*doc, sub_aggregation_accessor)?;
} let bucket = &mut self.buckets[bucket_pos];
bucket.bucket.doc_count += 1;
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
sub_aggregation.collect(doc, &mut bucket_agg_accessor.sub_aggregation)?;
} }
} }
Ok(()) Ok(())
} }
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> { fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
let sub_aggregation_accessor = let sub_aggregation_accessor =
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation; &mut agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
for bucket in self.buckets.iter_mut() { for bucket in self.buckets.iter_mut() {
if let Some(sub_agg) = bucket.bucket.sub_aggregation.as_mut() { if let Some(sub_agg) = bucket.bucket.sub_aggregation.as_mut() {

View File

@@ -1,10 +1,11 @@
use std::fmt::Debug; use std::fmt::Debug;
use columnar::{Cardinality, ColumnType}; use columnar::ColumnType;
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::{CustomOrder, Order, OrderTarget}; use super::{CustomOrder, Order, OrderTarget};
use crate::aggregation::agg_limits::MemoryConsumption;
use crate::aggregation::agg_req_with_accessor::{ use crate::aggregation::agg_req_with_accessor::{
AggregationsWithAccessor, BucketAggregationWithAccessor, AggregationsWithAccessor, BucketAggregationWithAccessor,
}; };
@@ -210,7 +211,16 @@ struct TermBuckets {
} }
impl TermBuckets { impl TermBuckets {
fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> { fn get_memory_consumption(&self) -> usize {
let sub_aggs_mem = self.sub_aggs.memory_consumption();
let buckets_mem = self.entries.memory_consumption();
sub_aggs_mem + buckets_mem
}
fn force_flush(
&mut self,
agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> {
for sub_aggregations in &mut self.sub_aggs.values_mut() { for sub_aggregations in &mut self.sub_aggs.values_mut() {
sub_aggregations.as_mut().flush(agg_with_accessor)?; sub_aggregations.as_mut().flush(agg_with_accessor)?;
} }
@@ -228,7 +238,6 @@ pub struct SegmentTermCollector {
blueprint: Option<Box<dyn SegmentAggregationCollector>>, blueprint: Option<Box<dyn SegmentAggregationCollector>>,
field_type: ColumnType, field_type: ColumnType,
accessor_idx: usize, accessor_idx: usize,
val_cache: Vec<u64>,
} }
pub(crate) fn get_agg_name_and_property(name: &str) -> (&str, &str) { pub(crate) fn get_agg_name_and_property(name: &str) -> (&str, &str) {
@@ -257,7 +266,7 @@ impl SegmentAggregationCollector for SegmentTermCollector {
fn collect( fn collect(
&mut self, &mut self,
doc: crate::DocId, doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
self.collect_block(&[doc], agg_with_accessor) self.collect_block(&[doc], agg_with_accessor)
} }
@@ -266,53 +275,42 @@ impl SegmentAggregationCollector for SegmentTermCollector {
fn collect_block( fn collect_block(
&mut self, &mut self,
docs: &[crate::DocId], docs: &[crate::DocId],
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
let accessor = &agg_with_accessor.buckets.values[self.accessor_idx].accessor; let bucket_agg_accessor = &mut agg_with_accessor.buckets.values[self.accessor_idx];
let sub_aggregation_accessor =
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
if accessor.get_cardinality() == Cardinality::Full { let mem_pre = self.get_memory_consumption();
self.val_cache.resize(docs.len(), 0);
accessor.values.get_vals(docs, &mut self.val_cache); bucket_agg_accessor
for term_id in self.val_cache.iter().cloned() { .column_block_accessor
let entry = self.term_buckets.entries.entry(term_id).or_default(); .fetch_block(docs, &bucket_agg_accessor.accessor);
*entry += 1; for term_id in bucket_agg_accessor.column_block_accessor.iter_vals() {
} let entry = self.term_buckets.entries.entry(term_id).or_default();
// has subagg *entry += 1;
if let Some(blueprint) = self.blueprint.as_ref() { }
for (doc, term_id) in docs.iter().zip(self.val_cache.iter().cloned()) { // has subagg
let sub_aggregations = self if let Some(blueprint) = self.blueprint.as_ref() {
.term_buckets for (doc, term_id) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
.sub_aggs let sub_aggregations = self
.entry(term_id) .term_buckets
.or_insert_with(|| blueprint.clone()); .sub_aggs
sub_aggregations.collect(*doc, sub_aggregation_accessor)?; .entry(term_id)
} .or_insert_with(|| blueprint.clone());
} sub_aggregations.collect(doc, &mut bucket_agg_accessor.sub_aggregation)?;
} else {
for doc in docs {
for term_id in accessor.values_for_doc(*doc) {
let entry = self.term_buckets.entries.entry(term_id).or_default();
*entry += 1;
// TODO: check if seperate loop is faster (may depend on the codec)
if let Some(blueprint) = self.blueprint.as_ref() {
let sub_aggregations = self
.term_buckets
.sub_aggs
.entry(term_id)
.or_insert_with(|| blueprint.clone());
sub_aggregations.collect(*doc, sub_aggregation_accessor)?;
}
}
} }
} }
let mem_delta = self.get_memory_consumption() - mem_pre;
let limits = &agg_with_accessor.buckets.values[self.accessor_idx].limits;
limits.add_memory_consumed(mem_delta as u64);
limits.validate_memory_consumption()?;
Ok(()) Ok(())
} }
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> { fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
let sub_aggregation_accessor = let sub_aggregation_accessor =
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation; &mut agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
self.term_buckets.force_flush(sub_aggregation_accessor)?; self.term_buckets.force_flush(sub_aggregation_accessor)?;
Ok(()) Ok(())
@@ -320,6 +318,12 @@ impl SegmentAggregationCollector for SegmentTermCollector {
} }
impl SegmentTermCollector { impl SegmentTermCollector {
fn get_memory_consumption(&self) -> usize {
let self_mem = std::mem::size_of::<Self>();
let term_buckets_mem = self.term_buckets.get_memory_consumption();
self_mem + term_buckets_mem
}
pub(crate) fn from_req_and_validate( pub(crate) fn from_req_and_validate(
req: &TermsAggregation, req: &TermsAggregation,
sub_aggregations: &AggregationsWithAccessor, sub_aggregations: &AggregationsWithAccessor,
@@ -356,7 +360,6 @@ impl SegmentTermCollector {
blueprint, blueprint,
field_type, field_type,
accessor_idx, accessor_idx,
val_cache: Default::default(),
}) })
} }
@@ -525,9 +528,10 @@ mod tests {
}; };
use crate::aggregation::metric::{AverageAggregation, StatsAggregation}; use crate::aggregation::metric::{AverageAggregation, StatsAggregation};
use crate::aggregation::tests::{ use crate::aggregation::tests::{
exec_request, exec_request_with_query, get_test_index_from_terms, exec_request, exec_request_with_query, exec_request_with_query_and_memory_limit,
get_test_index_from_values_and_terms, get_test_index_from_terms, get_test_index_from_values_and_terms,
}; };
use crate::aggregation::AggregationLimits;
#[test] #[test]
fn terms_aggregation_test_single_segment() -> crate::Result<()> { fn terms_aggregation_test_single_segment() -> crate::Result<()> {
@@ -1332,34 +1336,40 @@ mod tests {
Ok(()) Ok(())
} }
// TODO reenable with memory limit #[test]
//#[test] fn terms_aggregation_term_bucket_limit() -> crate::Result<()> {
// fn terms_aggregation_term_bucket_limit() -> crate::Result<()> { let terms: Vec<String> = (0..20_000).map(|el| el.to_string()).collect();
// let terms: Vec<String> = (0..100_000).map(|el| el.to_string()).collect(); let terms_per_segment = vec![terms.iter().map(|el| el.as_str()).collect()];
// let terms_per_segment = vec![terms.iter().map(|el| el.as_str()).collect()];
// let index = get_test_index_from_terms(true, &terms_per_segment)?; let index = get_test_index_from_terms(true, &terms_per_segment)?;
// let agg_req: Aggregations = vec![( let agg_req: Aggregations = vec![(
//"my_texts".to_string(), "my_texts".to_string(),
// Aggregation::Bucket(BucketAggregation { Aggregation::Bucket(Box::new(BucketAggregation {
// bucket_agg: BucketAggregationType::Terms(TermsAggregation { bucket_agg: BucketAggregationType::Terms(TermsAggregation {
// field: "string_id".to_string(), field: "string_id".to_string(),
// min_doc_count: Some(0), min_doc_count: Some(0),
//..Default::default() ..Default::default()
//}), }),
// sub_aggregation: Default::default(), sub_aggregation: Default::default(),
//}), })),
//)] )]
//.into_iter() .into_iter()
//.collect(); .collect();
// let res = exec_request_with_query(agg_req, &index, None); let res = exec_request_with_query_and_memory_limit(
agg_req,
&index,
None,
AggregationLimits::new(Some(50_000), None),
)
.unwrap_err();
assert!(res
.to_string()
.contains("Aborting aggregation because memory limit was exceeded. Limit: 50.00 KB"));
// assert!(res.is_err()); Ok(())
}
// Ok(())
//}
#[test] #[test]
fn terms_aggregation_different_tokenizer_on_ff_test() -> crate::Result<()> { fn terms_aggregation_different_tokenizer_on_ff_test() -> crate::Result<()> {

View File

@@ -46,7 +46,7 @@ impl SegmentAggregationCollector for BufAggregationCollector {
fn collect( fn collect(
&mut self, &mut self,
doc: crate::DocId, doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
self.staged_docs[self.num_staged_docs] = doc; self.staged_docs[self.num_staged_docs] = doc;
self.num_staged_docs += 1; self.num_staged_docs += 1;
@@ -62,7 +62,7 @@ impl SegmentAggregationCollector for BufAggregationCollector {
fn collect_block( fn collect_block(
&mut self, &mut self,
docs: &[crate::DocId], docs: &[crate::DocId],
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
self.collector.collect_block(docs, agg_with_accessor)?; self.collector.collect_block(docs, agg_with_accessor)?;
@@ -70,7 +70,7 @@ impl SegmentAggregationCollector for BufAggregationCollector {
} }
#[inline] #[inline]
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> { fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
self.collector self.collector
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor)?; .collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor)?;
self.num_staged_docs = 0; self.num_staged_docs = 0;

View File

@@ -156,7 +156,10 @@ impl SegmentCollector for AggregationSegmentCollector {
if self.error.is_some() { if self.error.is_some() {
return; return;
} }
if let Err(err) = self.agg_collector.collect(doc, &self.aggs_with_accessor) { if let Err(err) = self
.agg_collector
.collect(doc, &mut self.aggs_with_accessor)
{
self.error = Some(err); self.error = Some(err);
} }
} }
@@ -170,7 +173,7 @@ impl SegmentCollector for AggregationSegmentCollector {
} }
if let Err(err) = self if let Err(err) = self
.agg_collector .agg_collector
.collect_block(docs, &self.aggs_with_accessor) .collect_block(docs, &mut self.aggs_with_accessor)
{ {
self.error = Some(err); self.error = Some(err);
} }
@@ -180,7 +183,7 @@ impl SegmentCollector for AggregationSegmentCollector {
if let Some(err) = self.error { if let Some(err) = self.error {
return Err(err); return Err(err);
} }
self.agg_collector.flush(&self.aggs_with_accessor)?; self.agg_collector.flush(&mut self.aggs_with_accessor)?;
Box::new(self.agg_collector).into_intermediate_aggregations_result(&self.aggs_with_accessor) Box::new(self.agg_collector).into_intermediate_aggregations_result(&self.aggs_with_accessor)
} }
} }

View File

@@ -7,7 +7,8 @@ use std::cmp::Ordering;
use columnar::ColumnType; use columnar::ColumnType;
use itertools::Itertools; use itertools::Itertools;
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize}; use serde::ser::SerializeSeq;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use super::agg_req::{ use super::agg_req::{
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType, Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
@@ -45,7 +46,6 @@ impl IntermediateAggregationResults {
req: Aggregations, req: Aggregations,
limits: &AggregationLimits, limits: &AggregationLimits,
) -> crate::Result<AggregationResults> { ) -> crate::Result<AggregationResults> {
// TODO count and validate buckets
let res = self.into_final_bucket_result_internal(&(req.into()), limits)?; let res = self.into_final_bucket_result_internal(&(req.into()), limits)?;
let bucket_count = res.get_bucket_count() as u32; let bucket_count = res.get_bucket_count() as u32;
if bucket_count > limits.get_bucket_limit() { if bucket_count > limits.get_bucket_limit() {
@@ -464,11 +464,39 @@ pub struct IntermediateRangeBucketResult {
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)] #[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
/// Term aggregation including error counts /// Term aggregation including error counts
pub struct IntermediateTermBucketResult { pub struct IntermediateTermBucketResult {
#[serde(
serialize_with = "serialize_entries",
deserialize_with = "deserialize_entries"
)]
pub(crate) entries: FxHashMap<Key, IntermediateTermBucketEntry>, pub(crate) entries: FxHashMap<Key, IntermediateTermBucketEntry>,
pub(crate) sum_other_doc_count: u64, pub(crate) sum_other_doc_count: u64,
pub(crate) doc_count_error_upper_bound: u64, pub(crate) doc_count_error_upper_bound: u64,
} }
// Serialize into a Vec to circument the JSON limitation, where keys can't be numbers
fn serialize_entries<S>(
entries: &FxHashMap<Key, IntermediateTermBucketEntry>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(entries.len()))?;
for (k, v) in entries {
seq.serialize_element(&(k, v))?;
}
seq.end()
}
fn deserialize_entries<'de, D>(
deserializer: D,
) -> Result<FxHashMap<Key, IntermediateTermBucketEntry>, D::Error>
where D: Deserializer<'de> {
let vec_entries: Vec<(Key, IntermediateTermBucketEntry)> =
Deserialize::deserialize(deserializer)?;
Ok(vec_entries.into_iter().collect())
}
impl IntermediateTermBucketResult { impl IntermediateTermBucketResult {
pub(crate) fn into_final_result( pub(crate) fn into_final_result(
self, self,
@@ -842,4 +870,26 @@ mod tests {
assert_eq!(tree_left, orig); assert_eq!(tree_left, orig);
} }
#[test]
fn test_term_bucket_json_roundtrip() {
let term_buckets = IntermediateTermBucketResult {
entries: vec![(
Key::F64(5.0),
IntermediateTermBucketEntry {
doc_count: 10,
sub_aggregation: Default::default(),
},
)]
.into_iter()
.collect(),
sum_other_doc_count: 0,
doc_count_error_upper_bound: 0,
};
let term_buckets_round: IntermediateTermBucketResult =
serde_json::from_str(&serde_json::to_string(&term_buckets).unwrap()).unwrap();
assert_eq!(term_buckets, term_buckets_round);
}
} }

View File

@@ -1,8 +1,10 @@
use columnar::{Cardinality, Column, ColumnType}; use columnar::ColumnType;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::*; use super::*;
use crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor; use crate::aggregation::agg_req_with_accessor::{
AggregationsWithAccessor, MetricAggregationWithAccessor,
};
use crate::aggregation::intermediate_agg_result::{ use crate::aggregation::intermediate_agg_result::{
IntermediateAggregationResults, IntermediateMetricResult, IntermediateAggregationResults, IntermediateMetricResult,
}; };
@@ -174,21 +176,18 @@ impl SegmentStatsCollector {
} }
} }
#[inline] #[inline]
pub(crate) fn collect_block_with_field(&mut self, docs: &[DocId], field: &Column<u64>) { pub(crate) fn collect_block_with_field(
if field.get_cardinality() == Cardinality::Full { &mut self,
self.val_cache.resize(docs.len(), 0); docs: &[DocId],
field.values.get_vals(docs, &mut self.val_cache); agg_accessor: &mut MetricAggregationWithAccessor,
for val in self.val_cache.iter() { ) {
let val1 = f64_from_fastfield_u64(*val, &self.field_type); agg_accessor
self.stats.collect(val1); .column_block_accessor
} .fetch_block(docs, &agg_accessor.accessor);
} else {
for doc in docs { for val in agg_accessor.column_block_accessor.iter_vals() {
for val in field.values_for_doc(*doc) { let val1 = f64_from_fastfield_u64(val, &self.field_type);
let val1 = f64_from_fastfield_u64(val, &self.field_type); self.stats.collect(val1);
self.stats.collect(val1);
}
}
} }
} }
} }
@@ -235,7 +234,7 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
fn collect( fn collect(
&mut self, &mut self,
doc: crate::DocId, doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
let field = &agg_with_accessor.metrics.values[self.accessor_idx].accessor; let field = &agg_with_accessor.metrics.values[self.accessor_idx].accessor;
@@ -251,9 +250,9 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
fn collect_block( fn collect_block(
&mut self, &mut self,
docs: &[crate::DocId], docs: &[crate::DocId],
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
let field = &agg_with_accessor.metrics.values[self.accessor_idx].accessor; let field = &mut agg_with_accessor.metrics.values[self.accessor_idx];
self.collect_block_with_field(docs, field); self.collect_block_with_field(docs, field);
Ok(()) Ok(())
} }

View File

@@ -445,7 +445,7 @@ mod tests {
.set_index_option(IndexRecordOption::Basic) .set_index_option(IndexRecordOption::Basic)
.set_fieldnorms(false), .set_fieldnorms(false),
) )
.set_fast() .set_fast(None)
.set_stored(); .set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype.clone()); let text_field = schema_builder.add_text_field("text", text_fieldtype.clone());
let text_field_id = schema_builder.add_text_field("text_id", text_fieldtype); let text_field_id = schema_builder.add_text_field("text_id", text_fieldtype);
@@ -500,7 +500,7 @@ mod tests {
.set_indexing_options( .set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs), TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
) )
.set_fast() .set_fast(None)
.set_stored(); .set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype); let text_field = schema_builder.add_text_field("text", text_fieldtype);
let date_field = schema_builder.add_date_field("date", FAST); let date_field = schema_builder.add_date_field("date", FAST);

View File

@@ -28,18 +28,18 @@ pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
fn collect( fn collect(
&mut self, &mut self,
doc: crate::DocId, doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()>; ) -> crate::Result<()>;
fn collect_block( fn collect_block(
&mut self, &mut self,
docs: &[crate::DocId], docs: &[crate::DocId],
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()>; ) -> crate::Result<()>;
/// Finalize method. Some Aggregator collect blocks of docs before calling `collect_block`. /// Finalize method. Some Aggregator collect blocks of docs before calling `collect_block`.
/// This method ensures those staged docs will be collected. /// This method ensures those staged docs will be collected.
fn flush(&mut self, _agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> { fn flush(&mut self, _agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
Ok(()) Ok(())
} }
} }
@@ -206,7 +206,7 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
fn collect( fn collect(
&mut self, &mut self,
doc: crate::DocId, doc: crate::DocId,
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
self.collect_block(&[doc], agg_with_accessor)?; self.collect_block(&[doc], agg_with_accessor)?;
@@ -216,7 +216,7 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
fn collect_block( fn collect_block(
&mut self, &mut self,
docs: &[crate::DocId], docs: &[crate::DocId],
agg_with_accessor: &AggregationsWithAccessor, agg_with_accessor: &mut AggregationsWithAccessor,
) -> crate::Result<()> { ) -> crate::Result<()> {
if let Some(metrics) = self.metrics.as_mut() { if let Some(metrics) = self.metrics.as_mut() {
for collector in metrics { for collector in metrics {
@@ -233,7 +233,7 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
Ok(()) Ok(())
} }
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> { fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
if let Some(metrics) = &mut self.metrics { if let Some(metrics) = &mut self.metrics {
for collector in metrics { for collector in metrics {
collector.flush(agg_with_accessor)?; collector.flush(agg_with_accessor)?;

View File

@@ -113,7 +113,7 @@ impl Collector for HistogramCollector {
segment: &crate::SegmentReader, segment: &crate::SegmentReader,
) -> crate::Result<Self::Child> { ) -> crate::Result<Self::Child> {
let column_opt = segment.fast_fields().u64_lenient(&self.field)?; let column_opt = segment.fast_fields().u64_lenient(&self.field)?;
let column = column_opt.ok_or_else(|| FastFieldNotAvailableError { let (column, _column_type) = column_opt.ok_or_else(|| FastFieldNotAvailableError {
field_name: self.field.clone(), field_name: self.field.clone(),
})?; })?;
let column_u64 = column.first_or_default_col(0u64); let column_u64 = column.first_or_default_col(0u64);

View File

@@ -155,12 +155,13 @@ impl CustomScorer<u64> for ScorerByField {
// //
// The conversion will then happen only on the top-K docs. // The conversion will then happen only on the top-K docs.
let sort_column_opt = segment_reader.fast_fields().u64_lenient(&self.field)?; let sort_column_opt = segment_reader.fast_fields().u64_lenient(&self.field)?;
let sort_column = sort_column_opt let (sort_column, _sort_column_type) =
.ok_or_else(|| FastFieldNotAvailableError { sort_column_opt.ok_or_else(|| FastFieldNotAvailableError {
field_name: self.field.clone(), field_name: self.field.clone(),
})? })?;
.first_or_default_col(0u64); Ok(ScorerByFastFieldReader {
Ok(ScorerByFastFieldReader { sort_column }) sort_column: sort_column.first_or_default_col(0u64),
})
} }
} }

View File

@@ -7,7 +7,7 @@ use std::sync::{Arc, RwLock, Weak};
use std::{fmt, result}; use std::{fmt, result};
use common::StableDeref; use common::StableDeref;
use fs2::FileExt; use fs4::FileExt;
use memmap2::Mmap; use memmap2::Mmap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tempfile::TempDir; use tempfile::TempDir;

View File

@@ -14,8 +14,8 @@
//! Fields have to be declared as `FAST` in the schema. //! Fields have to be declared as `FAST` in the schema.
//! Currently supported fields are: u64, i64, f64, bytes, ip and text. //! Currently supported fields are: u64, i64, f64, bytes, ip and text.
//! //!
//! Fast fields are stored in with [different codecs](fastfield_codecs). The best codec is detected //! Fast fields are stored in with [different codecs](columnar::column_values). The best codec is
//! automatically, when serializing. //! detected automatically, when serializing.
//! //!
//! Read access performance is comparable to that of an array lookup. //! Read access performance is comparable to that of an array lookup.
@@ -115,7 +115,7 @@ mod tests {
let directory: RamDirectory = RamDirectory::create(); let directory: RamDirectory = RamDirectory::create();
{ {
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA); let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA).unwrap();
fast_field_writers fast_field_writers
.add_document(&doc!(*FIELD=>13u64)) .add_document(&doc!(*FIELD=>13u64))
.unwrap(); .unwrap();
@@ -130,7 +130,7 @@ mod tests {
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 161); assert_eq!(file.len(), 95);
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap(); let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
let column = fast_field_readers let column = fast_field_readers
.u64("field") .u64("field")
@@ -148,7 +148,7 @@ mod tests {
let directory: RamDirectory = RamDirectory::create(); let directory: RamDirectory = RamDirectory::create();
{ {
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA); let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA).unwrap();
fast_field_writers fast_field_writers
.add_document(&doc!(*FIELD=>4u64)) .add_document(&doc!(*FIELD=>4u64))
.unwrap(); .unwrap();
@@ -180,7 +180,7 @@ mod tests {
write.terminate().unwrap(); write.terminate().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 189); assert_eq!(file.len(), 123);
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap(); let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
let col = fast_field_readers let col = fast_field_readers
.u64("field") .u64("field")
@@ -203,7 +203,7 @@ mod tests {
let directory: RamDirectory = RamDirectory::create(); let directory: RamDirectory = RamDirectory::create();
{ {
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA); let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA).unwrap();
for _ in 0..10_000 { for _ in 0..10_000 {
fast_field_writers fast_field_writers
.add_document(&doc!(*FIELD=>100_000u64)) .add_document(&doc!(*FIELD=>100_000u64))
@@ -213,7 +213,7 @@ mod tests {
write.terminate().unwrap(); write.terminate().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 162); assert_eq!(file.len(), 96);
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap(); let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
let fast_field_reader = fast_field_readers let fast_field_reader = fast_field_readers
.u64("field") .u64("field")
@@ -231,7 +231,7 @@ mod tests {
{ {
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA); let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA).unwrap();
// forcing the amplitude to be high // forcing the amplitude to be high
fast_field_writers fast_field_writers
.add_document(&doc!(*FIELD=>0u64)) .add_document(&doc!(*FIELD=>0u64))
@@ -245,7 +245,7 @@ mod tests {
write.terminate().unwrap(); write.terminate().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 4557); assert_eq!(file.len(), 4491);
{ {
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap(); let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
let col = fast_field_readers let col = fast_field_readers
@@ -268,7 +268,7 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
{ {
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema); let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
for i in -100i64..10_000i64 { for i in -100i64..10_000i64 {
let mut doc = Document::default(); let mut doc = Document::default();
doc.add_i64(i64_field, i); doc.add_i64(i64_field, i);
@@ -278,7 +278,7 @@ mod tests {
write.terminate().unwrap(); write.terminate().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 333_usize); assert_eq!(file.len(), 267);
{ {
let fast_field_readers = FastFieldReaders::open(file, schema).unwrap(); let fast_field_readers = FastFieldReaders::open(file, schema).unwrap();
@@ -310,7 +310,7 @@ mod tests {
{ {
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema); let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
let doc = Document::default(); let doc = Document::default();
fast_field_writers.add_document(&doc).unwrap(); fast_field_writers.add_document(&doc).unwrap();
fast_field_writers.serialize(&mut write, None).unwrap(); fast_field_writers.serialize(&mut write, None).unwrap();
@@ -343,7 +343,7 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
{ {
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema); let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
let doc = Document::default(); let doc = Document::default();
fast_field_writers.add_document(&doc).unwrap(); fast_field_writers.add_document(&doc).unwrap();
fast_field_writers.serialize(&mut write, None).unwrap(); fast_field_writers.serialize(&mut write, None).unwrap();
@@ -379,7 +379,7 @@ mod tests {
let directory = RamDirectory::create(); let directory = RamDirectory::create();
{ {
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA); let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA).unwrap();
for &x in &permutation { for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x)).unwrap(); fast_field_writers.add_document(&doc!(*FIELD=>x)).unwrap();
} }
@@ -759,7 +759,7 @@ mod tests {
{ {
let mut write: WritePtr = directory.open_write(path).unwrap(); let mut write: WritePtr = directory.open_write(path).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema); let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
fast_field_writers.add_document(&doc!(field=>true)).unwrap(); fast_field_writers.add_document(&doc!(field=>true)).unwrap();
fast_field_writers fast_field_writers
.add_document(&doc!(field=>false)) .add_document(&doc!(field=>false))
@@ -772,7 +772,7 @@ mod tests {
write.terminate().unwrap(); write.terminate().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 175); assert_eq!(file.len(), 104);
let fast_field_readers = FastFieldReaders::open(file, schema).unwrap(); let fast_field_readers = FastFieldReaders::open(file, schema).unwrap();
let bool_col = fast_field_readers.bool("field_bool").unwrap(); let bool_col = fast_field_readers.bool("field_bool").unwrap();
assert_eq!(bool_col.first(0), Some(true)); assert_eq!(bool_col.first(0), Some(true));
@@ -793,7 +793,7 @@ mod tests {
{ {
let mut write: WritePtr = directory.open_write(path).unwrap(); let mut write: WritePtr = directory.open_write(path).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema); let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
for _ in 0..50 { for _ in 0..50 {
fast_field_writers.add_document(&doc!(field=>true)).unwrap(); fast_field_writers.add_document(&doc!(field=>true)).unwrap();
fast_field_writers fast_field_writers
@@ -804,7 +804,7 @@ mod tests {
write.terminate().unwrap(); write.terminate().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 187); assert_eq!(file.len(), 116);
let readers = FastFieldReaders::open(file, schema).unwrap(); let readers = FastFieldReaders::open(file, schema).unwrap();
let bool_col = readers.bool("field_bool").unwrap(); let bool_col = readers.bool("field_bool").unwrap();
for i in 0..25 { for i in 0..25 {
@@ -822,14 +822,14 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
{ {
let mut write: WritePtr = directory.open_write(path).unwrap(); let mut write: WritePtr = directory.open_write(path).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema); let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
let doc = Document::default(); let doc = Document::default();
fast_field_writers.add_document(&doc).unwrap(); fast_field_writers.add_document(&doc).unwrap();
fast_field_writers.serialize(&mut write, None).unwrap(); fast_field_writers.serialize(&mut write, None).unwrap();
write.terminate().unwrap(); write.terminate().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 177); assert_eq!(file.len(), 106);
let fastfield_readers = FastFieldReaders::open(file, schema).unwrap(); let fastfield_readers = FastFieldReaders::open(file, schema).unwrap();
let col = fastfield_readers.bool("field_bool").unwrap(); let col = fastfield_readers.bool("field_bool").unwrap();
assert_eq!(col.first(0), None); assert_eq!(col.first(0), None);
@@ -849,7 +849,7 @@ mod tests {
let directory: RamDirectory = RamDirectory::create(); let directory: RamDirectory = RamDirectory::create();
{ {
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(schema); let mut fast_field_writers = FastFieldsWriter::from_schema(schema).unwrap();
for doc in docs { for doc in docs {
fast_field_writers.add_document(doc).unwrap(); fast_field_writers.add_document(doc).unwrap();
} }
@@ -1173,6 +1173,45 @@ mod tests {
assert_eq!(&vals, &[33]); assert_eq!(&vals, &[33]);
} }
#[test]
fn test_text_fast_field_tokenizer() {
let mut schema_builder = Schema::builder();
let text_fieldtype = crate::schema::TextOptions::default()
.set_indexing_options(
crate::schema::TextFieldIndexing::default()
.set_index_option(crate::schema::IndexRecordOption::WithFreqs)
.set_tokenizer("raw"),
)
.set_fast(Some("default"))
.set_stored();
let log_field = schema_builder.add_text_field("log_level", text_fieldtype);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
index_writer
.add_document(doc!(log_field => "info"))
.unwrap();
index_writer
.add_document(doc!(log_field => "INFO"))
.unwrap();
index_writer.commit().unwrap();
let searcher = index.reader().unwrap().searcher();
let fast_field_reader = searcher.segment_reader(0u32).fast_fields();
let text_fast_field = fast_field_reader.str("log_level").unwrap().unwrap();
let mut buffer = String::new();
assert!(text_fast_field.ord_to_str(0, &mut buffer).unwrap());
assert_eq!(buffer, "info");
assert!(!text_fast_field.ord_to_str(1, &mut buffer).unwrap());
assert!(text_fast_field.term_ords(0).eq([0].into_iter()));
assert!(text_fast_field.term_ords(1).eq([0].into_iter()));
assert!(text_fast_field.ords().values_for_doc(0u32).eq([0]));
assert!(text_fast_field.ords().values_for_doc(1u32).eq([0]));
}
#[test] #[test]
fn test_shadowing_fast_field_with_expand_dots() { fn test_shadowing_fast_field_with_expand_dots() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();

View File

@@ -252,26 +252,25 @@ impl FastFieldReaders {
Ok(columns) Ok(columns)
} }
/// Returns the `u64` column used to represent any `u64`-mapped typed (i64, u64, f64, DateTime). /// Returns the `u64` column used to represent any `u64`-mapped typed (String/Bytes term ids,
#[doc(hidden)] /// i64, u64, f64, DateTime).
pub fn u64_lenient(&self, field_name: &str) -> crate::Result<Option<Column<u64>>> {
Ok(self
.u64_lenient_with_type(field_name)?
.map(|(u64_column, _)| u64_column))
}
/// Returns the `u64` column used to represent any `u64`-mapped typed (i64, u64, f64, DateTime).
/// ///
/// Returns Ok(None) for empty columns /// Returns Ok(None) for empty columns
#[doc(hidden)] #[doc(hidden)]
pub fn u64_lenient_with_type( pub fn u64_lenient_for_type(
&self, &self,
type_white_list_opt: Option<&[ColumnType]>,
field_name: &str, field_name: &str,
) -> crate::Result<Option<(Column<u64>, ColumnType)>> { ) -> crate::Result<Option<(Column<u64>, ColumnType)>> {
let Some(resolved_field_name) = self.resolve_field(field_name)? else { let Some(resolved_field_name) = self.resolve_field(field_name)? else {
return Ok(None); return Ok(None);
}; };
for col in self.columnar.read_columns(&resolved_field_name)? { for col in self.columnar.read_columns(&resolved_field_name)? {
if let Some(type_white_list) = type_white_list_opt {
if !type_white_list.contains(&col.column_type()) {
continue;
}
}
if let Some(col_u64) = col.open_u64_lenient()? { if let Some(col_u64) = col.open_u64_lenient()? {
return Ok(Some((col_u64, col.column_type()))); return Ok(Some((col_u64, col.column_type())));
} }
@@ -279,6 +278,17 @@ impl FastFieldReaders {
Ok(None) Ok(None)
} }
/// Returns the `u64` column used to represent any `u64`-mapped typed (i64, u64, f64, DateTime).
///
/// Returns Ok(None) for empty columns
#[doc(hidden)]
pub fn u64_lenient(
&self,
field_name: &str,
) -> crate::Result<Option<(Column<u64>, ColumnType)>> {
self.u64_lenient_for_type(None, field_name)
}
/// Returns the `i64` fast field reader reader associated with `field`. /// Returns the `i64` fast field reader reader associated with `field`.
/// ///
/// If `field` is not a i64 fast field, this method returns an Error. /// If `field` is not a i64 fast field, this method returns an Error.

View File

@@ -2,11 +2,13 @@ use std::io;
use columnar::{ColumnarWriter, NumericalValue}; use columnar::{ColumnarWriter, NumericalValue};
use common::replace_in_place; use common::replace_in_place;
use tokenizer_api::Token;
use crate::indexer::doc_id_mapping::DocIdMapping; use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::schema::term::{JSON_PATH_SEGMENT_SEP, JSON_PATH_SEGMENT_SEP_STR}; use crate::schema::term::{JSON_PATH_SEGMENT_SEP, JSON_PATH_SEGMENT_SEP_STR};
use crate::schema::{value_type_to_column_type, Document, FieldType, Schema, Type, Value}; use crate::schema::{value_type_to_column_type, Document, FieldType, Schema, Type, Value};
use crate::{DatePrecision, DocId}; use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::{DatePrecision, DocId, TantivyError};
/// Only index JSON down to a depth of 20. /// Only index JSON down to a depth of 20.
/// This is mostly to guard us from a stack overflow triggered by malicious input. /// This is mostly to guard us from a stack overflow triggered by malicious input.
@@ -15,7 +17,8 @@ const JSON_DEPTH_LIMIT: usize = 20;
/// The `FastFieldsWriter` groups all of the fast field writers. /// The `FastFieldsWriter` groups all of the fast field writers.
pub struct FastFieldsWriter { pub struct FastFieldsWriter {
columnar_writer: ColumnarWriter, columnar_writer: ColumnarWriter,
fast_field_names: Vec<Option<String>>, //< TODO see if we can cash the field name hash too. fast_field_names: Vec<Option<String>>, //< TODO see if we can hash the field name hash too.
per_field_tokenizer: Vec<Option<TextAnalyzer>>,
date_precisions: Vec<DatePrecision>, date_precisions: Vec<DatePrecision>,
expand_dots: Vec<bool>, expand_dots: Vec<bool>,
num_docs: DocId, num_docs: DocId,
@@ -25,14 +28,25 @@ pub struct FastFieldsWriter {
impl FastFieldsWriter { impl FastFieldsWriter {
/// Create all `FastFieldWriter` required by the schema. /// Create all `FastFieldWriter` required by the schema.
pub fn from_schema(schema: &Schema) -> FastFieldsWriter { #[cfg(test)]
pub fn from_schema(schema: &Schema) -> crate::Result<FastFieldsWriter> {
FastFieldsWriter::from_schema_and_tokenizer_manager(&schema, TokenizerManager::new())
}
/// Create all `FastFieldWriter` required by the schema.
pub fn from_schema_and_tokenizer_manager(
schema: &Schema,
tokenizer_manager: TokenizerManager,
) -> crate::Result<FastFieldsWriter> {
let mut columnar_writer = ColumnarWriter::default(); let mut columnar_writer = ColumnarWriter::default();
let mut fast_field_names: Vec<Option<String>> = vec![None; schema.num_fields()]; let mut fast_field_names: Vec<Option<String>> = vec![None; schema.num_fields()];
let mut date_precisions: Vec<DatePrecision> = let mut date_precisions: Vec<DatePrecision> =
std::iter::repeat_with(DatePrecision::default) std::iter::repeat_with(DatePrecision::default)
.take(schema.num_fields()) .take(schema.num_fields())
.collect(); .collect();
let mut expand_dots = vec![false; schema.num_fields()]; let mut expand_dots = vec![false; schema.num_fields()];
let mut per_field_tokenizer = vec![None; schema.num_fields()];
// TODO see other types // TODO see other types
for (field_id, field_entry) in schema.fields() { for (field_id, field_entry) in schema.fields() {
if !field_entry.field_type().is_fast() { if !field_entry.field_type().is_fast() {
@@ -47,6 +61,18 @@ impl FastFieldsWriter {
expand_dots[field_id.field_id() as usize] = expand_dots[field_id.field_id() as usize] =
json_object_options.is_expand_dots_enabled(); json_object_options.is_expand_dots_enabled();
} }
if let FieldType::Str(text_options) = field_entry.field_type() {
if let Some(tokenizer_name) = text_options.get_fast_field_tokenizer_name() {
let text_analyzer = tokenizer_manager.get(tokenizer_name).ok_or_else(|| {
TantivyError::InvalidArgument(format!(
"Tokenizer {:?} not found",
tokenizer_name
))
})?;
per_field_tokenizer[field_id.field_id() as usize] = Some(text_analyzer);
}
}
let sort_values_within_row = value_type == Type::Facet; let sort_values_within_row = value_type == Type::Facet;
if let Some(column_type) = value_type_to_column_type(value_type) { if let Some(column_type) = value_type_to_column_type(value_type) {
columnar_writer.record_column_type( columnar_writer.record_column_type(
@@ -56,14 +82,15 @@ impl FastFieldsWriter {
); );
} }
} }
FastFieldsWriter { Ok(FastFieldsWriter {
columnar_writer, columnar_writer,
fast_field_names, fast_field_names,
per_field_tokenizer,
num_docs: 0u32, num_docs: 0u32,
date_precisions, date_precisions,
expand_dots, expand_dots,
json_path_buffer: String::new(), json_path_buffer: String::new(),
} })
} }
/// The memory used (inclusive childs) /// The memory used (inclusive childs)
@@ -111,14 +138,35 @@ impl FastFieldsWriter {
); );
} }
Value::Str(text_val) => { Value::Str(text_val) => {
self.columnar_writer if let Some(text_analyzer) =
.record_str(doc_id, field_name.as_str(), text_val); &self.per_field_tokenizer[field_value.field().field_id() as usize]
{
let mut token_stream = text_analyzer.token_stream(text_val);
token_stream.process(&mut |token: &Token| {
self.columnar_writer.record_str(
doc_id,
field_name.as_str(),
&token.text,
);
})
} else {
self.columnar_writer
.record_str(doc_id, field_name.as_str(), text_val);
}
} }
Value::Bytes(bytes_val) => { Value::Bytes(bytes_val) => {
self.columnar_writer self.columnar_writer
.record_bytes(doc_id, field_name.as_str(), bytes_val); .record_bytes(doc_id, field_name.as_str(), bytes_val);
} }
Value::PreTokStr(_) => todo!(), Value::PreTokStr(pre_tok) => {
for token in &pre_tok.tokens {
self.columnar_writer.record_str(
doc_id,
field_name.as_str(),
&token.text,
);
}
}
Value::Bool(bool_val) => { Value::Bool(bool_val) => {
self.columnar_writer self.columnar_writer
.record_bool(doc_id, field_name.as_str(), *bool_val); .record_bool(doc_id, field_name.as_str(), *bool_val);

View File

@@ -1640,6 +1640,7 @@ mod tests {
.add_ip_addr_field("ips", IpAddrOptions::default().set_fast().set_indexed()); .add_ip_addr_field("ips", IpAddrOptions::default().set_fast().set_indexed());
let i64_field = schema_builder.add_i64_field("i64", INDEXED); let i64_field = schema_builder.add_i64_field("i64", INDEXED);
let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED); let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED);
let id_opt_field = schema_builder.add_u64_field("id_opt", FAST | INDEXED | STORED);
let f64_field = schema_builder.add_f64_field("f64", INDEXED); let f64_field = schema_builder.add_f64_field("f64", INDEXED);
let date_field = schema_builder.add_date_field("date", INDEXED); let date_field = schema_builder.add_date_field("date", INDEXED);
let bytes_field = schema_builder.add_bytes_field("bytes", FAST | INDEXED | STORED); let bytes_field = schema_builder.add_bytes_field("bytes", FAST | INDEXED | STORED);
@@ -1670,7 +1671,7 @@ mod tests {
let settings = if sort_index { let settings = if sort_index {
IndexSettings { IndexSettings {
sort_by_field: Some(IndexSortByField { sort_by_field: Some(IndexSortByField {
field: "id".to_string(), field: "id_opt".to_string(),
order: Order::Asc, order: Order::Asc,
}), }),
..Default::default() ..Default::default()
@@ -1689,7 +1690,7 @@ mod tests {
let old_reader = index.reader()?; let old_reader = index.reader()?;
let ip_exists = |id| id % 3 != 0; // 0 does not exist let id_exists = |id| id % 3 != 0; // 0 does not exist
let multi_text_field_text1 = "test1 test2 test3 test1 test2 test3"; let multi_text_field_text1 = "test1 test2 test3 test1 test2 test3";
// rotate left // rotate left
@@ -1705,28 +1706,15 @@ mod tests {
let facet = Facet::from(&("/cola/".to_string() + &id.to_string())); let facet = Facet::from(&("/cola/".to_string() + &id.to_string()));
let ip = ip_from_id(id); let ip = ip_from_id(id);
if !ip_exists(id) { if !id_exists(id) {
// every 3rd doc has no ip field // every 3rd doc has no ip field
index_writer.add_document(doc!(id_field=>id, index_writer.add_document(doc!(
bytes_field => id.to_le_bytes().as_slice(), id_field=>id,
multi_numbers=> id,
multi_numbers => id,
bool_field => (id % 2u64) != 0,
i64_field => id as i64,
f64_field => id as f64,
date_field => DateTime::from_timestamp_secs(id as i64),
multi_bools => (id % 2u64) != 0,
multi_bools => (id % 2u64) == 0,
text_field => id.to_string(),
facet_field => facet,
large_text_field => LOREM,
multi_text_fields => multi_text_field_text1,
multi_text_fields => multi_text_field_text2,
multi_text_fields => multi_text_field_text3,
))?; ))?;
} else { } else {
index_writer.add_document(doc!(id_field=>id, index_writer.add_document(doc!(id_field=>id,
bytes_field => id.to_le_bytes().as_slice(), bytes_field => id.to_le_bytes().as_slice(),
id_opt_field => id,
ip_field => ip, ip_field => ip,
ips_field => ip, ips_field => ip,
ips_field => ip, ips_field => ip,
@@ -1835,6 +1823,13 @@ mod tests {
.values() .values()
.map(|id_occurrences| *id_occurrences as usize) .map(|id_occurrences| *id_occurrences as usize)
.sum::<usize>(); .sum::<usize>();
let num_docs_with_values = expected_ids_and_num_occurrences
.iter()
.filter(|(id, _id_occurrences)| id_exists(**id))
.map(|(_, id_occurrences)| *id_occurrences as usize)
.sum::<usize>();
assert_eq!(searcher.num_docs() as usize, num_docs_expected); assert_eq!(searcher.num_docs() as usize, num_docs_expected);
assert_eq!(old_searcher.num_docs() as usize, num_docs_expected); assert_eq!(old_searcher.num_docs() as usize, num_docs_expected);
assert_eq!( assert_eq!(
@@ -1855,7 +1850,7 @@ mod tests {
if force_end_merge && num_segments_before_merge > 1 && num_segments_after_merge == 1 { if force_end_merge && num_segments_before_merge > 1 && num_segments_after_merge == 1 {
let mut expected_multi_ips: Vec<_> = id_list let mut expected_multi_ips: Vec<_> = id_list
.iter() .iter()
.filter(|id| ip_exists(**id)) .filter(|id| id_exists(**id))
.flat_map(|id| vec![ip_from_id(*id), ip_from_id(*id)]) .flat_map(|id| vec![ip_from_id(*id), ip_from_id(*id)])
.collect(); .collect();
assert_eq!(num_ips, expected_multi_ips.len() as u32); assert_eq!(num_ips, expected_multi_ips.len() as u32);
@@ -1893,7 +1888,7 @@ mod tests {
let expected_ips = expected_ids_and_num_occurrences let expected_ips = expected_ids_and_num_occurrences
.keys() .keys()
.flat_map(|id| { .flat_map(|id| {
if !ip_exists(*id) { if !id_exists(*id) {
None None
} else { } else {
Some(Ipv6Addr::from_u128(*id as u128)) Some(Ipv6Addr::from_u128(*id as u128))
@@ -1905,7 +1900,7 @@ mod tests {
let expected_ips = expected_ids_and_num_occurrences let expected_ips = expected_ids_and_num_occurrences
.keys() .keys()
.filter_map(|id| { .filter_map(|id| {
if !ip_exists(*id) { if !id_exists(*id) {
None None
} else { } else {
Some(Ipv6Addr::from_u128(*id as u128)) Some(Ipv6Addr::from_u128(*id as u128))
@@ -1937,16 +1932,25 @@ mod tests {
.unwrap() .unwrap()
.unwrap(); .unwrap();
for doc in segment_reader.doc_ids_alive() { for doc in segment_reader.doc_ids_alive() {
let id = id_reader.first(doc).unwrap();
let vals: Vec<u64> = ff_reader.values_for_doc(doc).collect(); let vals: Vec<u64> = ff_reader.values_for_doc(doc).collect();
assert_eq!(vals.len(), 2); if id_exists(id) {
assert_eq!(vals[0], vals[1]); assert_eq!(vals.len(), 2);
assert_eq!(id_reader.first(doc), Some(vals[0])); assert_eq!(vals[0], vals[1]);
assert!(expected_ids_and_num_occurrences.contains_key(&vals[0]));
assert_eq!(id_reader.first(doc), Some(vals[0]));
} else {
assert_eq!(vals.len(), 0);
}
let bool_vals: Vec<bool> = bool_ff_reader.values_for_doc(doc).collect(); let bool_vals: Vec<bool> = bool_ff_reader.values_for_doc(doc).collect();
assert_eq!(bool_vals.len(), 2); if id_exists(id) {
assert_ne!(bool_vals[0], bool_vals[1]); assert_eq!(bool_vals.len(), 2);
assert_ne!(bool_vals[0], bool_vals[1]);
assert!(expected_ids_and_num_occurrences.contains_key(&vals[0])); } else {
assert_eq!(bool_vals.len(), 0);
}
} }
} }
@@ -1970,26 +1974,28 @@ mod tests {
.as_u64() .as_u64()
.unwrap(); .unwrap();
assert!(expected_ids_and_num_occurrences.contains_key(&id)); assert!(expected_ids_and_num_occurrences.contains_key(&id));
let id2 = store_reader if id_exists(id) {
.get(doc_id) let id2 = store_reader
.unwrap() .get(doc_id)
.get_first(multi_numbers) .unwrap()
.unwrap() .get_first(multi_numbers)
.as_u64() .unwrap()
.unwrap(); .as_u64()
assert_eq!(id, id2); .unwrap();
let bool = store_reader assert_eq!(id, id2);
.get(doc_id) let bool = store_reader
.unwrap() .get(doc_id)
.get_first(bool_field) .unwrap()
.unwrap() .get_first(bool_field)
.as_bool() .unwrap()
.unwrap(); .as_bool()
let doc = store_reader.get(doc_id).unwrap(); .unwrap();
let mut bool2 = doc.get_all(multi_bools); let doc = store_reader.get(doc_id).unwrap();
assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap()); let mut bool2 = doc.get_all(multi_bools);
assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap()); assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap());
assert_eq!(None, bool2.next()) assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap());
assert_eq!(None, bool2.next())
}
} }
} }
// test search // test search
@@ -2011,22 +2017,25 @@ mod tests {
top_docs.iter().map(|el| el.1).collect::<Vec<_>>() top_docs.iter().map(|el| el.1).collect::<Vec<_>>()
}; };
for (existing_id, count) in &expected_ids_and_num_occurrences { for (id, count) in &expected_ids_and_num_occurrences {
let (existing_id, count) = (*existing_id, *count); let (existing_id, count) = (*id, *count);
let get_num_hits = |field| do_search(&existing_id.to_string(), field).len() as u64; let get_num_hits = |field| do_search(&existing_id.to_string(), field).len() as u64;
assert_eq!(get_num_hits(id_field), count);
if !id_exists(existing_id) {
continue;
}
assert_eq!(get_num_hits(text_field), count); assert_eq!(get_num_hits(text_field), count);
assert_eq!(get_num_hits(i64_field), count); assert_eq!(get_num_hits(i64_field), count);
assert_eq!(get_num_hits(f64_field), count); assert_eq!(get_num_hits(f64_field), count);
assert_eq!(get_num_hits(id_field), count);
// Test multi text // Test multi text
assert_eq!( assert_eq!(
do_search("\"test1 test2\"", multi_text_fields).len(), do_search("\"test1 test2\"", multi_text_fields).len(),
num_docs_expected num_docs_with_values
); );
assert_eq!( assert_eq!(
do_search("\"test2 test3\"", multi_text_fields).len(), do_search("\"test2 test3\"", multi_text_fields).len(),
num_docs_expected num_docs_with_values
); );
// Test bytes // Test bytes
@@ -2062,7 +2071,7 @@ mod tests {
// //
for (existing_id, count) in &expected_ids_and_num_occurrences { for (existing_id, count) in &expected_ids_and_num_occurrences {
let (existing_id, count) = (*existing_id, *count); let (existing_id, count) = (*existing_id, *count);
if !ip_exists(existing_id) { if !id_exists(existing_id) {
continue; continue;
} }
let do_search_ip_field = |term: &str| do_search(term, ip_field).len() as u64; let do_search_ip_field = |term: &str| do_search(term, ip_field).len() as u64;
@@ -2083,7 +2092,7 @@ mod tests {
// //
for (existing_id, count) in expected_ids_and_num_occurrences.iter().take(10) { for (existing_id, count) in expected_ids_and_num_occurrences.iter().take(10) {
let (existing_id, count) = (*existing_id, *count); let (existing_id, count) = (*existing_id, *count);
if !ip_exists(existing_id) { if !id_exists(existing_id) {
continue; continue;
} }
let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| { let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| {
@@ -2106,7 +2115,7 @@ mod tests {
// //
for (existing_id, count) in expected_ids_and_num_occurrences.iter().take(10) { for (existing_id, count) in expected_ids_and_num_occurrences.iter().take(10) {
let (existing_id, count) = (*existing_id, *count); let (existing_id, count) = (*existing_id, *count);
if !ip_exists(existing_id) { if !id_exists(existing_id) {
continue; continue;
} }
let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| { let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| {
@@ -2131,23 +2140,61 @@ mod tests {
.fast_fields() .fast_fields()
.u64("id") .u64("id")
.unwrap() .unwrap()
.first_or_default_col(0); .first_or_default_col(9999);
for doc_id in segment_reader.doc_ids_alive() { for doc_id in segment_reader.doc_ids_alive() {
let id = ff_reader.get_val(doc_id);
if !id_exists(id) {
continue;
}
let facet_ords: Vec<u64> = facet_reader.facet_ords(doc_id).collect(); let facet_ords: Vec<u64> = facet_reader.facet_ords(doc_id).collect();
assert_eq!(facet_ords.len(), 1); assert_eq!(facet_ords.len(), 1);
let mut facet = Facet::default(); let mut facet = Facet::default();
facet_reader facet_reader
.facet_from_ord(facet_ords[0], &mut facet) .facet_from_ord(facet_ords[0], &mut facet)
.unwrap(); .unwrap();
let id = ff_reader.get_val(doc_id);
let facet_expected = Facet::from(&("/cola/".to_string() + &id.to_string())); let facet_expected = Facet::from(&("/cola/".to_string() + &id.to_string()));
assert_eq!(facet, facet_expected); assert_eq!(facet, facet_expected);
} }
} }
// Test if index property is in sort order
if sort_index {
// load all id_opt in each segment and check they are in order
for reader in searcher.segment_readers() {
let (ff_reader, _) = reader.fast_fields().u64_lenient("id_opt").unwrap().unwrap();
let mut ids_in_segment: Vec<u64> = Vec::new();
for doc in 0..reader.num_docs() {
ids_in_segment.extend(ff_reader.values_for_doc(doc));
}
assert!(is_sorted(&ids_in_segment));
fn is_sorted<T>(data: &[T]) -> bool
where T: Ord {
data.windows(2).all(|w| w[0] <= w[1])
}
}
}
Ok(index) Ok(index)
} }
#[test]
fn test_sort_index_on_opt_field_regression() {
assert!(test_operation_strategy(
&[
IndexingOp::AddDoc { id: 81 },
IndexingOp::AddDoc { id: 70 },
IndexingOp::DeleteDoc { id: 70 }
],
true,
false
)
.is_ok());
}
#[test] #[test]
fn test_ip_range_query_multivalue_bug() { fn test_ip_range_query_multivalue_bug() {
assert!(test_operation_strategy( assert!(test_operation_strategy(

View File

@@ -171,7 +171,7 @@ mod tests {
index_writer.set_merge_policy(Box::new(log_merge_policy)); index_writer.set_merge_policy(Box::new(log_merge_policy));
// after every commit the merge checker is started, it will merge only segments with 1 // after every commit the merge checker is started, it will merge only segments with 1
// element in it because of the max_merge_size. // element in it because of the max_docs_before_merge.
index_writer.add_document(doc!(int_field=>1_u64))?; index_writer.add_document(doc!(int_field=>1_u64))?;
index_writer.commit()?; index_writer.commit()?;

View File

@@ -306,7 +306,7 @@ impl IndexMerger {
sort_by_field: &IndexSortByField, sort_by_field: &IndexSortByField,
) -> crate::Result<Arc<dyn ColumnValues>> { ) -> crate::Result<Arc<dyn ColumnValues>> {
reader.schema().get_field(&sort_by_field.field)?; reader.schema().get_field(&sort_by_field.field)?;
let value_accessor = reader let (value_accessor, _column_type) = reader
.fast_fields() .fast_fields()
.u64_lenient(&sort_by_field.field)? .u64_lenient(&sort_by_field.field)?
.ok_or_else(|| FastFieldNotAvailableError { .ok_or_else(|| FastFieldNotAvailableError {

View File

@@ -111,7 +111,10 @@ impl SegmentWriter {
per_field_postings_writers, per_field_postings_writers,
fieldnorms_writer: FieldNormsWriter::for_schema(&schema), fieldnorms_writer: FieldNormsWriter::for_schema(&schema),
segment_serializer, segment_serializer,
fast_field_writers: FastFieldsWriter::from_schema(&schema), fast_field_writers: FastFieldsWriter::from_schema_and_tokenizer_manager(
&schema,
tokenizer_manager,
)?,
doc_opstamps: Vec::with_capacity(1_000), doc_opstamps: Vec::with_capacity(1_000),
per_field_text_analyzers, per_field_text_analyzers,
term_buffer: Term::with_capacity(16), term_buffer: Term::with_capacity(16),

View File

@@ -33,7 +33,7 @@ impl FastFieldRangeWeight {
impl Weight for FastFieldRangeWeight { impl Weight for FastFieldRangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let fast_field_reader = reader.fast_fields(); let fast_field_reader = reader.fast_fields();
let Some(column) = fast_field_reader.u64_lenient(&self.field)? else { let Some((column, _)) = fast_field_reader.u64_lenient(&self.field)? else {
return Ok(Box::new(EmptyScorer)); return Ok(Box::new(EmptyScorer));
}; };
let value_range = bound_to_value_range( let value_range = bound_to_value_range(

View File

@@ -16,13 +16,53 @@ pub struct TextOptions {
#[serde(default)] #[serde(default)]
stored: bool, stored: bool,
#[serde(default)] #[serde(default)]
fast: bool, fast: FastFieldOptions,
#[serde(default)] #[serde(default)]
#[serde(skip_serializing_if = "is_false")] #[serde(skip_serializing_if = "is_false")]
/// coerce values if they are not of type string /// coerce values into string if they are not of type string
coerce: bool, coerce: bool,
} }
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
enum FastFieldOptions {
IsEnabled(bool),
EnabledWithTokenizer { with_tokenizer: TokenizerName },
}
impl Default for FastFieldOptions {
fn default() -> Self {
FastFieldOptions::IsEnabled(false)
}
}
impl BitOr<FastFieldOptions> for FastFieldOptions {
type Output = FastFieldOptions;
fn bitor(self, other: FastFieldOptions) -> FastFieldOptions {
match (self, other) {
(
FastFieldOptions::EnabledWithTokenizer {
with_tokenizer: tokenizer,
},
_,
)
| (
_,
FastFieldOptions::EnabledWithTokenizer {
with_tokenizer: tokenizer,
},
) => FastFieldOptions::EnabledWithTokenizer {
with_tokenizer: tokenizer,
},
(FastFieldOptions::IsEnabled(true), _) | (_, FastFieldOptions::IsEnabled(true)) => {
FastFieldOptions::IsEnabled(true)
}
(_, FastFieldOptions::IsEnabled(false)) => FastFieldOptions::IsEnabled(false),
}
}
}
fn is_false(val: &bool) -> bool { fn is_false(val: &bool) -> bool {
!val !val
} }
@@ -40,7 +80,21 @@ impl TextOptions {
/// Returns true if and only if the value is a fast field. /// Returns true if and only if the value is a fast field.
pub fn is_fast(&self) -> bool { pub fn is_fast(&self) -> bool {
self.fast matches!(self.fast, FastFieldOptions::IsEnabled(true))
|| matches!(
&self.fast,
FastFieldOptions::EnabledWithTokenizer { with_tokenizer: _ }
)
}
/// Returns true if and only if the value is a fast field.
pub fn get_fast_field_tokenizer_name(&self) -> Option<&str> {
match &self.fast {
FastFieldOptions::IsEnabled(true) | FastFieldOptions::IsEnabled(false) => None,
FastFieldOptions::EnabledWithTokenizer {
with_tokenizer: tokenizer,
} => Some(tokenizer.name()),
}
} }
/// Returns true if values should be coerced to strings (numbers, null). /// Returns true if values should be coerced to strings (numbers, null).
@@ -53,19 +107,24 @@ impl TextOptions {
/// Fast fields are designed for random access. /// Fast fields are designed for random access.
/// Access time are similar to a random lookup in an array. /// Access time are similar to a random lookup in an array.
/// Text fast fields will have the term ids stored in the fast field. /// Text fast fields will have the term ids stored in the fast field.
/// The fast field will be a multivalued fast field.
/// ///
/// The effective cardinality depends on the tokenizer. When creating fast fields on text /// The effective cardinality depends on the tokenizer. Without a tokenizer, the text will be
/// fields it is recommended to use the "raw" tokenizer, since it will store the original text /// stored as is, which equals to the "raw" tokenizer. The tokenizer can be used to apply
/// unchanged. The "default" tokenizer will store the terms as lower case and this will be /// normalization like lower case.
/// reflected in the dictionary.
/// ///
/// The original text can be retrieved via /// The original text can be retrieved via
/// [`TermDictionary::ord_to_term()`](crate::termdict::TermDictionary::ord_to_term) /// [`TermDictionary::ord_to_term()`](crate::termdict::TermDictionary::ord_to_term)
/// from the dictionary. /// from the dictionary.
#[must_use] #[must_use]
pub fn set_fast(mut self) -> TextOptions { pub fn set_fast(mut self, tokenizer_name: Option<&str>) -> TextOptions {
self.fast = true; if let Some(tokenizer) = tokenizer_name {
let tokenizer = TokenizerName::from_name(tokenizer);
self.fast = FastFieldOptions::EnabledWithTokenizer {
with_tokenizer: tokenizer,
}
} else {
self.fast = FastFieldOptions::IsEnabled(true);
}
self self
} }
@@ -92,7 +151,7 @@ impl TextOptions {
} }
#[derive(Clone, PartialEq, Debug, Eq, Serialize, Deserialize)] #[derive(Clone, PartialEq, Debug, Eq, Serialize, Deserialize)]
struct TokenizerName(Cow<'static, str>); pub(crate) struct TokenizerName(Cow<'static, str>);
const DEFAULT_TOKENIZER_NAME: &str = "default"; const DEFAULT_TOKENIZER_NAME: &str = "default";
@@ -105,7 +164,7 @@ impl Default for TokenizerName {
} }
impl TokenizerName { impl TokenizerName {
const fn from_static(name: &'static str) -> Self { pub const fn from_static(name: &'static str) -> Self {
TokenizerName(Cow::Borrowed(name)) TokenizerName(Cow::Borrowed(name))
} }
fn from_name(name: &str) -> Self { fn from_name(name: &str) -> Self {
@@ -199,7 +258,7 @@ pub const STRING: TextOptions = TextOptions {
record: IndexRecordOption::Basic, record: IndexRecordOption::Basic,
}), }),
stored: false, stored: false,
fast: false, fast: FastFieldOptions::IsEnabled(false),
coerce: false, coerce: false,
}; };
@@ -212,7 +271,7 @@ pub const TEXT: TextOptions = TextOptions {
}), }),
stored: false, stored: false,
coerce: false, coerce: false,
fast: false, fast: FastFieldOptions::IsEnabled(false),
}; };
impl<T: Into<TextOptions>> BitOr<T> for TextOptions { impl<T: Into<TextOptions>> BitOr<T> for TextOptions {
@@ -240,7 +299,7 @@ impl From<StoredFlag> for TextOptions {
TextOptions { TextOptions {
indexing: None, indexing: None,
stored: true, stored: true,
fast: false, fast: FastFieldOptions::IsEnabled(false),
coerce: false, coerce: false,
} }
} }
@@ -251,7 +310,7 @@ impl From<CoerceFlag> for TextOptions {
TextOptions { TextOptions {
indexing: None, indexing: None,
stored: false, stored: false,
fast: false, fast: FastFieldOptions::IsEnabled(false),
coerce: true, coerce: true,
} }
} }
@@ -262,7 +321,7 @@ impl From<FastFlag> for TextOptions {
TextOptions { TextOptions {
indexing: None, indexing: None,
stored: false, stored: false,
fast: true, fast: FastFieldOptions::IsEnabled(true),
coerce: false, coerce: false,
} }
} }
@@ -281,6 +340,7 @@ where
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::schema::text_options::{FastFieldOptions, TokenizerName};
use crate::schema::*; use crate::schema::*;
#[test] #[test]
@@ -323,4 +383,44 @@ mod tests {
let options3: TextOptions = serde_json::from_str("{}").unwrap(); let options3: TextOptions = serde_json::from_str("{}").unwrap();
assert_eq!(options3.indexing, None); assert_eq!(options3.indexing, None);
} }
#[test]
fn serde_fast_field_tokenizer() {
let json = r#" {
"fast": { "with_tokenizer": "default" }
} "#;
let options: TextOptions = serde_json::from_str(json).unwrap();
assert_eq!(
options.fast,
FastFieldOptions::EnabledWithTokenizer {
with_tokenizer: TokenizerName::from_static("default")
}
);
let options: TextOptions =
serde_json::from_str(&serde_json::to_string(&options).unwrap()).unwrap();
assert_eq!(
options.fast,
FastFieldOptions::EnabledWithTokenizer {
with_tokenizer: TokenizerName::from_static("default")
}
);
let json = r#" {
"fast": true
} "#;
let options: TextOptions = serde_json::from_str(json).unwrap();
assert_eq!(options.fast, FastFieldOptions::IsEnabled(true));
let options: TextOptions =
serde_json::from_str(&serde_json::to_string(&options).unwrap()).unwrap();
assert_eq!(options.fast, FastFieldOptions::IsEnabled(true));
let json = r#" {
"fast": false
} "#;
let options: TextOptions = serde_json::from_str(json).unwrap();
assert_eq!(options.fast, FastFieldOptions::IsEnabled(false));
let options: TextOptions =
serde_json::from_str(&serde_json::to_string(&options).unwrap()).unwrap();
assert_eq!(options.fast, FastFieldOptions::IsEnabled(false));
}
} }

View File

@@ -2,7 +2,8 @@ use std::str::CharIndices;
use super::{Token, TokenStream, Tokenizer}; use super::{Token, TokenStream, Tokenizer};
/// Tokenize the text by splitting on whitespaces and punctuation. /// Tokenize the text by returning only tokens of consecutive
/// [`alphanumeric`](char::is_alphanumeric).
#[derive(Clone)] #[derive(Clone)]
pub struct SimpleTokenizer; pub struct SimpleTokenizer;

View File

@@ -13,9 +13,8 @@ use crate::tokenizer::{
/// By default, it is populated with the following managers. /// By default, it is populated with the following managers.
/// ///
/// * `raw` : does not process nor tokenize the text. /// * `raw` : does not process nor tokenize the text.
/// * `default` : Chops the text on according to whitespace and /// * `default` : Chops the text according to [`SimpleTokenizer`],
/// punctuation, removes tokens that are too long, and lowercases /// removes tokens that are longer than 40, and lowercases tokens
/// tokens
/// * `en_stem` : Like `default`, but also applies stemming on the /// * `en_stem` : Like `default`, but also applies stemming on the
/// resulting tokens. Stemming can improve the recall of your /// resulting tokens. Stemming can improve the recall of your
/// search engine. /// search engine.
@@ -35,7 +34,9 @@ impl TokenizerManager {
/// Registers a new tokenizer associated with a given name. /// Registers a new tokenizer associated with a given name.
pub fn register<T>(&self, tokenizer_name: &str, tokenizer: T) pub fn register<T>(&self, tokenizer_name: &str, tokenizer: T)
where TextAnalyzer: From<T> { where
TextAnalyzer: From<T>,
{
let boxed_tokenizer: TextAnalyzer = TextAnalyzer::from(tokenizer); let boxed_tokenizer: TextAnalyzer = TextAnalyzer::from(tokenizer);
self.tokenizers self.tokenizers
.write() .write()

View File

@@ -6,8 +6,6 @@ license = "MIT"
[dependencies] [dependencies]
common = {path="../common", package="tantivy-common"} common = {path="../common", package="tantivy-common"}
ciborium = "0.2"
serde = "1"
tantivy-fst = "0.4" tantivy-fst = "0.4"
[dev-dependencies] [dev-dependencies]

View File

@@ -26,3 +26,95 @@ possible.
- it allows incremental encoding of the keys - it allows incremental encoding of the keys
- the front compression is leveraged to optimize - the front compression is leveraged to optimize
the intersection with an automaton the intersection with an automaton
# On disk format
Overview of the SSTable format. Unless noted otherwise, numbers are little-endian.
### SSTable
```
+-------+-------+-----+--------+
| Block | Block | ... | Footer |
+-------+-------+-----+--------+
|----( # of blocks)---|
```
- Block(`SSTBlock`): list of independent block, terminated by a single empty block.
- Footer(`SSTFooter`)
### SSTBlock
```
+----------+--------+-------+-------+-----+
| BlockLen | Values | Delta | Delta | ... |
+----------+--------+-------+-------+-----+
|----( # of deltas)---|
```
- BlockLen(u32): length of the block
- Values: an application defined format storing a sequence of value, capable of determining it own length
- Delta
### Delta
```
+---------+--------+
| KeepAdd | Suffix |
+---------+--------+
```
- KeepAdd
- Suffix: KeepAdd.add bytes of key suffix
### KeepAdd
KeepAdd can be represented in two different representation, a very compact 1byte one which is enough for most usage, and a longer variable-len one when required
When keep < 16 and add < 16
```
+-----+------+
| Add | Keep |
+-----+------+
```
- Add(u4): number of bytes to push
- Keep(u4): number of bytes to pop
Otherwise:
```
+------+------+-----+
| 0x01 | Keep | Add |
+------+------+-----+
```
- Add(VInt): number of bytes to push
- Keep(VInt): number of bytes to pop
Note: there is no ambiguity between both representation as Add is always guarantee to be non-zero, except for the very first key of an SSTable, where Keep is guaranteed to be zero.
### SSTFooter
```
+-------+-------+-----+-------------+---------+---------+------+
| Block | Block | ... | IndexOffset | NumTerm | Version | Type |
+-------+-------+-----+-------------+---------+---------+------+
|----( # of blocks)---|
```
- Block(SSTBlock): uses IndexValue for its Values format
- IndexOffset(u64): Offset to the start of the SSTFooter
- NumTerm(u64): number of terms in the sstable
- Version(u32): Currently defined to 0x00\_00\_00\_01
- Type(u32): Defined to 0x00\_00\_00\_02
### IndexValue
```
+------------+----------+-------+-------+-----+
| EntryCount | StartPos | Entry | Entry | ... |
+------------+----------+-------+-------+-----+
|---( # of entries)---|
```
- EntryCount(VInt): number of entries
- StartPos(VInt): the start pos of the first (data) block referenced by this (index) block
- Entry (IndexEntry)
### Entry
```
+----------+--------------+
| BlockLen | FirstOrdinal |
+----------+--------------+
```
- BlockLen(VInt): length of the block
- FirstOrdinal(VInt): ordinal of the first element in the given block

View File

@@ -18,6 +18,7 @@ where W: io::Write
value_writer: TValueWriter, value_writer: TValueWriter,
// Only here to avoid allocations. // Only here to avoid allocations.
stateless_buffer: Vec<u8>, stateless_buffer: Vec<u8>,
block_len: usize,
} }
impl<W, TValueWriter> DeltaWriter<W, TValueWriter> impl<W, TValueWriter> DeltaWriter<W, TValueWriter>
@@ -31,15 +32,14 @@ where
write: CountingWriter::wrap(BufWriter::new(wrt)), write: CountingWriter::wrap(BufWriter::new(wrt)),
value_writer: TValueWriter::default(), value_writer: TValueWriter::default(),
stateless_buffer: Vec::new(), stateless_buffer: Vec::new(),
block_len: BLOCK_LEN,
} }
} }
}
impl<W, TValueWriter> DeltaWriter<W, TValueWriter> pub fn set_block_len(&mut self, block_len: usize) {
where self.block_len = block_len
W: io::Write, }
TValueWriter: value::ValueWriter,
{
pub fn flush_block(&mut self) -> io::Result<Option<Range<usize>>> { pub fn flush_block(&mut self) -> io::Result<Option<Range<usize>>> {
if self.block.is_empty() { if self.block.is_empty() {
return Ok(None); return Ok(None);
@@ -82,7 +82,7 @@ where
} }
pub fn flush_block_if_required(&mut self) -> io::Result<Option<Range<usize>>> { pub fn flush_block_if_required(&mut self) -> io::Result<Option<Range<usize>>> {
if self.block.len() > BLOCK_LEN { if self.block.len() > self.block_len {
return self.flush_block(); return self.flush_block();
} }
Ok(None) Ok(None)

View File

@@ -5,7 +5,7 @@ use std::ops::{Bound, RangeBounds};
use std::sync::Arc; use std::sync::Arc;
use common::file_slice::FileSlice; use common::file_slice::FileSlice;
use common::{BinarySerializable, OwnedBytes}; use common::{BinarySerializable, DictionaryFooter, OwnedBytes};
use tantivy_fst::automaton::AlwaysMatch; use tantivy_fst::automaton::AlwaysMatch;
use tantivy_fst::Automaton; use tantivy_fst::Automaton;
@@ -110,7 +110,7 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// only block for up to `limit` matching terms. /// only block for up to `limit` matching terms.
/// ///
/// It works by identifying /// It works by identifying
/// - `first_block`: the block containing the start boudary key /// - `first_block`: the block containing the start boundary key
/// - `last_block`: the block containing the end boundary key. /// - `last_block`: the block containing the end boundary key.
/// ///
/// And then returning the range that spans over all blocks between. /// And then returning the range that spans over all blocks between.
@@ -178,10 +178,15 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// Opens a `TermDictionary`. /// Opens a `TermDictionary`.
pub fn open(term_dictionary_file: FileSlice) -> io::Result<Self> { pub fn open(term_dictionary_file: FileSlice) -> io::Result<Self> {
let (main_slice, footer_len_slice) = term_dictionary_file.split_from_end(16); let (main_slice, footer_len_slice) = term_dictionary_file.split_from_end(24);
let mut footer_len_bytes: OwnedBytes = footer_len_slice.read_bytes()?; let mut footer_len_bytes: OwnedBytes = footer_len_slice.read_bytes()?;
let index_offset = u64::deserialize(&mut footer_len_bytes)?; let index_offset = u64::deserialize(&mut footer_len_bytes)?;
let num_terms = u64::deserialize(&mut footer_len_bytes)?; let num_terms = u64::deserialize(&mut footer_len_bytes)?;
let footer = DictionaryFooter::deserialize(&mut footer_len_bytes)?;
crate::FOOTER.verify_equal(&footer)?;
let (sstable_slice, index_slice) = main_slice.split(index_offset as usize); let (sstable_slice, index_slice) = main_slice.split(index_offset as usize);
let sstable_index_bytes = index_slice.read_bytes()?; let sstable_index_bytes = index_slice.read_bytes()?;
let sstable_index = SSTableIndex::load(sstable_index_bytes.as_slice()) let sstable_index = SSTableIndex::load(sstable_index_bytes.as_slice())
@@ -231,7 +236,7 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
let suffix = sstable_delta_reader.suffix(); let suffix = sstable_delta_reader.suffix();
match prefix_len.cmp(&ok_bytes) { match prefix_len.cmp(&ok_bytes) {
Ordering::Less => return Ok(None), // poped bytes already matched => too far Ordering::Less => return Ok(None), // popped bytes already matched => too far
Ordering::Equal => (), Ordering::Equal => (),
Ordering::Greater => { Ordering::Greater => {
// the ok prefix is less than current entry prefix => continue to next elem // the ok prefix is less than current entry prefix => continue to next elem

View File

@@ -17,6 +17,8 @@ pub use dictionary::Dictionary;
pub use streamer::{Streamer, StreamerBuilder}; pub use streamer::{Streamer, StreamerBuilder};
mod block_reader; mod block_reader;
use common::{BinarySerializable, DictionaryFooter, DictionaryKind};
pub use self::block_reader::BlockReader; pub use self::block_reader::BlockReader;
pub use self::delta::{DeltaReader, DeltaWriter}; pub use self::delta::{DeltaReader, DeltaWriter};
pub use self::merge::VoidMerge; pub use self::merge::VoidMerge;
@@ -26,6 +28,10 @@ use crate::value::{RangeValueReader, RangeValueWriter};
pub type TermOrdinal = u64; pub type TermOrdinal = u64;
const DEFAULT_KEY_CAPACITY: usize = 50; const DEFAULT_KEY_CAPACITY: usize = 50;
const FOOTER: DictionaryFooter = DictionaryFooter {
kind: DictionaryKind::SSTable,
version: 1,
};
/// Given two byte string returns the length of /// Given two byte string returns the length of
/// the longest common prefix. /// the longest common prefix.
@@ -201,6 +207,14 @@ where
} }
} }
/// Set the target block length.
///
/// The delta part of a block will generally be slightly larger than the requested `block_len`,
/// however this does not account for the length of the Value part of the table.
pub fn set_block_len(&mut self, block_len: usize) {
self.delta_writer.set_block_len(block_len)
}
/// Returns the last inserted key. /// Returns the last inserted key.
/// If no key has been inserted yet, or the block was just /// If no key has been inserted yet, or the block was just
/// flushed, this function returns "". /// flushed, this function returns "".
@@ -288,6 +302,7 @@ where
self.first_ordinal_of_the_block = self.num_terms; self.first_ordinal_of_the_block = self.num_terms;
} }
let mut wrt = self.delta_writer.finish(); let mut wrt = self.delta_writer.finish();
// add a final empty block as an end marker
wrt.write_all(&0u32.to_le_bytes())?; wrt.write_all(&0u32.to_le_bytes())?;
let offset = wrt.written_bytes(); let offset = wrt.written_bytes();
@@ -295,6 +310,9 @@ where
self.index_builder.serialize(&mut wrt)?; self.index_builder.serialize(&mut wrt)?;
wrt.write_all(&offset.to_le_bytes())?; wrt.write_all(&offset.to_le_bytes())?;
wrt.write_all(&self.num_terms.to_le_bytes())?; wrt.write_all(&self.num_terms.to_le_bytes())?;
FOOTER.serialize(&mut wrt)?;
let wrt = wrt.finish(); let wrt = wrt.finish();
Ok(wrt.into_inner()?) Ok(wrt.into_inner()?)
} }
@@ -371,19 +389,26 @@ mod test {
assert_eq!( assert_eq!(
&buffer, &buffer,
&[ &[
// block len // block
7u8, 0u8, 0u8, 0u8, // keep 0 push 1 | "" 7u8, 0u8, 0u8, 0u8, // block len
16u8, 17u8, // keep 1 push 2 | 18 19 16u8, 17u8, // keep 0 push 1 | 17
33u8, 18u8, 19u8, // keep 1 push 1 | 20 33u8, 18u8, 19u8, // keep 1 push 2 | 18 19
17u8, 20u8, 0u8, 0u8, 0u8, 0u8, // no more blocks 17u8, 20u8, // keep 1 push 1 | 20
// end of block
0u8, 0u8, 0u8, 0u8, // no more blocks
// index // index
161, 102, 98, 108, 111, 99, 107, 115, 129, 162, 115, 108, 97, 115, 116, 95, 107, 7u8, 0u8, 0u8, 0u8, // block len
101, 121, 95, 111, 114, 95, 103, 114, 101, 97, 116, 101, 114, 130, 17, 20, 106, 98, 1, // num blocks
108, 111, 99, 107, 95, 97, 100, 100, 114, 162, 106, 98, 121, 116, 101, 95, 114, 97, 0, // offset
110, 103, 101, 162, 101, 115, 116, 97, 114, 116, 0, 99, 101, 110, 100, 11, 109, 11, // len of 1st block
102, 105, 114, 115, 116, 95, 111, 114, 100, 105, 110, 97, 108, 0, 15, 0, 0, 0, 0, 0, // first ord of 1st block
0, 0, 0, // offset for the index 32, 17, 20, // keep 0 push 2 | 17 20
3u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8 // num terms // end of block
0, 0, 0, 0, // no more blocks
15, 0, 0, 0, 0, 0, 0, 0, // index start offset
3, 0, 0, 0, 0, 0, 0, 0, // num_term
1, 0, 0, 0, // version
2, 0, 0, 0, // dictionary kind. sstable = 2
] ]
); );
let mut sstable_reader = VoidSSTable::reader(&buffer[..]); let mut sstable_reader = VoidSSTable::reader(&buffer[..]);
@@ -501,8 +526,8 @@ mod test {
fn test_proptest_sstable_ranges(words in prop::collection::btree_set("[a-c]{0,6}", 1..100), fn test_proptest_sstable_ranges(words in prop::collection::btree_set("[a-c]{0,6}", 1..100),
(lower_bound, upper_bound) in bounds_strategy(), (lower_bound, upper_bound) in bounds_strategy(),
) { ) {
// TODO tweak block size.
let mut builder = Dictionary::<VoidSSTable>::builder(Vec::new()).unwrap(); let mut builder = Dictionary::<VoidSSTable>::builder(Vec::new()).unwrap();
builder.set_block_len(16);
for word in &words { for word in &words {
builder.insert(word.as_bytes(), &()).unwrap(); builder.insert(word.as_bytes(), &()).unwrap();
} }

View File

@@ -1,11 +1,9 @@
use std::io; use std::io::{self, Write};
use std::ops::Range; use std::ops::Range;
use serde::{Deserialize, Serialize}; use crate::{common_prefix_len, SSTable, SSTableDataCorruption, TermOrdinal};
use crate::{common_prefix_len, SSTableDataCorruption, TermOrdinal}; #[derive(Default, Debug, Clone)]
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct SSTableIndex { pub struct SSTableIndex {
blocks: Vec<BlockMeta>, blocks: Vec<BlockMeta>,
} }
@@ -13,7 +11,17 @@ pub struct SSTableIndex {
impl SSTableIndex { impl SSTableIndex {
/// Load an index from its binary representation /// Load an index from its binary representation
pub fn load(data: &[u8]) -> Result<SSTableIndex, SSTableDataCorruption> { pub fn load(data: &[u8]) -> Result<SSTableIndex, SSTableDataCorruption> {
ciborium::de::from_reader(data).map_err(|_| SSTableDataCorruption) let mut reader = IndexSSTable::reader(data);
let mut blocks = Vec::new();
while reader.advance().map_err(|_| SSTableDataCorruption)? {
blocks.push(BlockMeta {
last_key_or_greater: reader.key().to_vec(),
block_addr: reader.value().clone(),
});
}
Ok(SSTableIndex { blocks })
} }
/// Get the [`BlockAddr`] of the requested block. /// Get the [`BlockAddr`] of the requested block.
@@ -23,7 +31,7 @@ impl SSTableIndex {
.map(|block_meta| block_meta.block_addr.clone()) .map(|block_meta| block_meta.block_addr.clone())
} }
/// Get the block id of the block that woudl contain `key`. /// Get the block id of the block that would contain `key`.
/// ///
/// Returns None if `key` is lexicographically after the last key recorded. /// Returns None if `key` is lexicographically after the last key recorded.
pub(crate) fn locate_with_key(&self, key: &[u8]) -> Option<usize> { pub(crate) fn locate_with_key(&self, key: &[u8]) -> Option<usize> {
@@ -69,13 +77,13 @@ impl SSTableIndex {
} }
} }
#[derive(Clone, Eq, PartialEq, Debug, Serialize, Deserialize)] #[derive(Clone, Eq, PartialEq, Debug)]
pub struct BlockAddr { pub struct BlockAddr {
pub byte_range: Range<usize>, pub byte_range: Range<usize>,
pub first_ordinal: u64, pub first_ordinal: u64,
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone)]
pub(crate) struct BlockMeta { pub(crate) struct BlockMeta {
/// Any byte string that is lexicographically greater or equal to /// Any byte string that is lexicographically greater or equal to
/// the last key in the block, /// the last key in the block,
@@ -130,11 +138,45 @@ impl SSTableIndexBuilder {
} }
pub fn serialize<W: std::io::Write>(&self, wrt: W) -> io::Result<()> { pub fn serialize<W: std::io::Write>(&self, wrt: W) -> io::Result<()> {
ciborium::ser::into_writer(&self.index, wrt) // we can't use a plain writer as it would generate an index
.map_err(|err| io::Error::new(io::ErrorKind::Other, err)) let mut sstable_writer = IndexSSTable::delta_writer(wrt);
// in tests, set a smaller block size to stress-test
#[cfg(test)]
sstable_writer.set_block_len(16);
let mut previous_key = Vec::with_capacity(crate::DEFAULT_KEY_CAPACITY);
for block in self.index.blocks.iter() {
let keep_len = common_prefix_len(&previous_key, &block.last_key_or_greater);
sstable_writer.write_suffix(keep_len, &block.last_key_or_greater[keep_len..]);
sstable_writer.write_value(&block.block_addr);
sstable_writer.flush_block_if_required()?;
previous_key.clear();
previous_key.extend_from_slice(&block.last_key_or_greater);
}
sstable_writer.flush_block()?;
sstable_writer.finish().write_all(&0u32.to_le_bytes())?;
Ok(())
} }
} }
/// SSTable representing an index
///
/// `last_key_or_greater` is used as the key, the value contains the
/// length and first ordinal of each block. The start offset is implicitly
/// obtained from lengths.
struct IndexSSTable;
impl SSTable for IndexSSTable {
type Value = BlockAddr;
type ValueReader = crate::value::index::IndexValueReader;
type ValueWriter = crate::value::index::IndexValueWriter;
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{BlockAddr, SSTableIndex, SSTableIndexBuilder}; use super::{BlockAddr, SSTableIndex, SSTableIndexBuilder};

132
sstable/src/value/index.rs Normal file
View File

@@ -0,0 +1,132 @@
use std::io;
use crate::value::{deserialize_vint_u64, ValueReader, ValueWriter};
use crate::{vint, BlockAddr};
#[derive(Default)]
pub(crate) struct IndexValueReader {
vals: Vec<BlockAddr>,
}
impl ValueReader for IndexValueReader {
type Value = BlockAddr;
#[inline(always)]
fn value(&self, idx: usize) -> &Self::Value {
&self.vals[idx]
}
fn load(&mut self, mut data: &[u8]) -> io::Result<usize> {
let original_num_bytes = data.len();
let num_vals = deserialize_vint_u64(&mut data) as usize;
self.vals.clear();
let mut first_ordinal = 0u64;
let mut prev_start = deserialize_vint_u64(&mut data) as usize;
for _ in 0..num_vals {
let len = deserialize_vint_u64(&mut data);
let delta_ordinal = deserialize_vint_u64(&mut data);
first_ordinal += delta_ordinal;
let end = prev_start + len as usize;
self.vals.push(BlockAddr {
byte_range: prev_start..end,
first_ordinal,
});
prev_start = end;
}
Ok(original_num_bytes - data.len())
}
}
#[derive(Default)]
pub(crate) struct IndexValueWriter {
vals: Vec<BlockAddr>,
}
impl ValueWriter for IndexValueWriter {
type Value = BlockAddr;
fn write(&mut self, val: &Self::Value) {
self.vals.push(val.clone());
}
fn serialize_block(&self, output: &mut Vec<u8>) {
let mut prev_ord = 0u64;
vint::serialize_into_vec(self.vals.len() as u64, output);
let start_pos = if let Some(block_addr) = self.vals.first() {
block_addr.byte_range.start as u64
} else {
0
};
vint::serialize_into_vec(start_pos, output);
// TODO use array_windows when it gets stabilized
for elem in self.vals.windows(2) {
let [current, next] = elem else {
unreachable!("windows should always return exactly 2 elements");
};
let len = next.byte_range.start - current.byte_range.start;
vint::serialize_into_vec(len as u64, output);
let delta = current.first_ordinal - prev_ord;
vint::serialize_into_vec(delta, output);
prev_ord = current.first_ordinal;
}
if let Some(last) = self.vals.last() {
let len = last.byte_range.end - last.byte_range.start;
vint::serialize_into_vec(len as u64, output);
let delta = last.first_ordinal - prev_ord;
vint::serialize_into_vec(delta, output);
}
}
fn clear(&mut self) {
self.vals.clear();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_index_reader_writer() {
crate::value::tests::test_value_reader_writer::<_, IndexValueReader, IndexValueWriter>(&[]);
crate::value::tests::test_value_reader_writer::<_, IndexValueReader, IndexValueWriter>(&[
BlockAddr {
byte_range: 0..10,
first_ordinal: 0,
},
]);
crate::value::tests::test_value_reader_writer::<_, IndexValueReader, IndexValueWriter>(&[
BlockAddr {
byte_range: 0..10,
first_ordinal: 0,
},
BlockAddr {
byte_range: 10..20,
first_ordinal: 5,
},
]);
crate::value::tests::test_value_reader_writer::<_, IndexValueReader, IndexValueWriter>(&[
BlockAddr {
byte_range: 0..10,
first_ordinal: 0,
},
BlockAddr {
byte_range: 10..20,
first_ordinal: 5,
},
BlockAddr {
byte_range: 20..30,
first_ordinal: 10,
},
]);
crate::value::tests::test_value_reader_writer::<_, IndexValueReader, IndexValueWriter>(&[
BlockAddr {
byte_range: 5..10,
first_ordinal: 2,
},
]);
}
}

View File

@@ -1,3 +1,4 @@
pub(crate) mod index;
mod range; mod range;
mod u64_monotonic; mod u64_monotonic;
mod void; mod void;