mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 04:52:55 +00:00
Compare commits
1 Commits
tokenizer_
...
remove_dyn
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6037cdfe7e |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -13,5 +13,3 @@ benchmark
|
||||
.idea
|
||||
trace.dat
|
||||
cargo-timing*
|
||||
control
|
||||
variable
|
||||
|
||||
@@ -32,7 +32,7 @@ log = "0.4.16"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = "1.0.79"
|
||||
num_cpus = "1.13.1"
|
||||
fs4 = { version = "0.6.3", optional = true }
|
||||
fs2 = { version = "0.4.3", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
@@ -44,11 +44,11 @@ rustc-hash = "1.1.0"
|
||||
thiserror = "1.0.30"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.5.0"
|
||||
murmurhash32 = "0.3.0"
|
||||
murmurhash32 = "0.2.0"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.10.0"
|
||||
lru = "0.9.0"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.10.3"
|
||||
measure_time = "0.8.2"
|
||||
@@ -94,7 +94,7 @@ overflow-checks = true
|
||||
|
||||
[features]
|
||||
default = ["mmap", "stopwords", "lz4-compression"]
|
||||
mmap = ["fs4", "tempfile", "memmap2"]
|
||||
mmap = ["fs2", "tempfile", "memmap2"]
|
||||
stopwords = []
|
||||
|
||||
brotli-compression = ["brotli"]
|
||||
|
||||
@@ -15,7 +15,6 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bitpacking = {version="0.8", default-features=false, features = ["bitpacker1x"]}
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
|
||||
use bitpacking::{BitPacker as ExternalBitPackerTrait, BitPacker1x};
|
||||
|
||||
pub struct BitPacker {
|
||||
mini_buffer: u64,
|
||||
mini_buffer_written: usize,
|
||||
}
|
||||
|
||||
impl Default for BitPacker {
|
||||
fn default() -> Self {
|
||||
BitPacker::new()
|
||||
@@ -122,125 +118,6 @@ impl BitUnpacker {
|
||||
let val_shifted = val_unshifted_unmasked >> bit_shift;
|
||||
val_shifted & self.mask
|
||||
}
|
||||
|
||||
// Decodes the range of bitpacked `u32` values with idx
|
||||
// in [start_idx, start_idx + output.len()).
|
||||
//
|
||||
// #Panics
|
||||
//
|
||||
// This methods panics if `num_bits` is > 32.
|
||||
fn get_batch_u32s(&self, start_idx: u32, data: &[u8], output: &mut [u32]) {
|
||||
assert!(
|
||||
self.bit_width() <= 32,
|
||||
"Bitwidth must be <= 32 to use this method."
|
||||
);
|
||||
|
||||
let end_idx = start_idx + output.len() as u32;
|
||||
|
||||
let end_bit_read = end_idx * self.num_bits;
|
||||
let end_byte_read = (end_bit_read + 7) / 8;
|
||||
assert!(
|
||||
end_byte_read as usize <= data.len(),
|
||||
"Requested index is out of bounds."
|
||||
);
|
||||
|
||||
// Simple slow implementation of get_batch_u32s, to deal with our ramps.
|
||||
let get_batch_ramp = |start_idx: u32, output: &mut [u32]| {
|
||||
for (out, idx) in output.iter_mut().zip(start_idx..) {
|
||||
*out = self.get(idx, data) as u32;
|
||||
}
|
||||
};
|
||||
|
||||
// We use an unrolled routine to decode 32 values at once.
|
||||
// We therefore decompose our range of values to decode into three ranges:
|
||||
// - Entrance ramp: [start_idx, fast_track_start) (up to 31 values)
|
||||
// - Highway: [fast_track_start, fast_track_end) (a length multiple of 32s)
|
||||
// - Exit ramp: [fast_track_end, start_idx + output.len()) (up to 31 values)
|
||||
|
||||
// We want the start of the fast track to start align with bytes.
|
||||
// A sufficient condition is to start with an idx that is a multiple of 8,
|
||||
// so highway start is the closest multiple of 8 that is >= start_idx.
|
||||
let entrance_ramp_len = 8 - (start_idx % 8) % 8;
|
||||
|
||||
let highway_start: u32 = start_idx + entrance_ramp_len;
|
||||
|
||||
if highway_start + BitPacker1x::BLOCK_LEN as u32 > end_idx {
|
||||
// We don't have enough values to have even a single block of highway.
|
||||
// Let's just supply the values the simple way.
|
||||
get_batch_ramp(start_idx, output);
|
||||
return;
|
||||
}
|
||||
|
||||
let num_blocks: u32 = (end_idx - highway_start) / BitPacker1x::BLOCK_LEN as u32;
|
||||
|
||||
// Entrance ramp
|
||||
get_batch_ramp(start_idx, &mut output[..entrance_ramp_len as usize]);
|
||||
|
||||
// Highway
|
||||
let mut offset = (highway_start * self.num_bits) as usize / 8;
|
||||
let mut output_cursor = (highway_start - start_idx) as usize;
|
||||
for _ in 0..num_blocks {
|
||||
offset += BitPacker1x.decompress(
|
||||
&data[offset..],
|
||||
&mut output[output_cursor..],
|
||||
self.num_bits as u8,
|
||||
);
|
||||
output_cursor += 32;
|
||||
}
|
||||
|
||||
// Exit ramp
|
||||
let highway_end = highway_start + num_blocks * BitPacker1x::BLOCK_LEN as u32;
|
||||
get_batch_ramp(highway_end, &mut output[output_cursor..]);
|
||||
}
|
||||
|
||||
pub fn get_ids_for_value_range(
|
||||
&self,
|
||||
range: RangeInclusive<u64>,
|
||||
id_range: Range<u32>,
|
||||
data: &[u8],
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
if self.bit_width() > 32 {
|
||||
self.get_ids_for_value_range_slow(range, id_range, data, positions)
|
||||
} else {
|
||||
if *range.start() > u32::MAX as u64 {
|
||||
positions.clear();
|
||||
return;
|
||||
}
|
||||
let range_u32 = (*range.start() as u32)..=(*range.end()).min(u32::MAX as u64) as u32;
|
||||
self.get_ids_for_value_range_fast(range_u32, id_range, data, positions)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_ids_for_value_range_slow(
|
||||
&self,
|
||||
range: RangeInclusive<u64>,
|
||||
id_range: Range<u32>,
|
||||
data: &[u8],
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
positions.clear();
|
||||
for i in id_range {
|
||||
// If we cared we could make this branchless, but the slow implementation should rarely
|
||||
// kick in.
|
||||
let val = self.get(i, data);
|
||||
if range.contains(&val) {
|
||||
positions.push(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_ids_for_value_range_fast(
|
||||
&self,
|
||||
value_range: RangeInclusive<u32>,
|
||||
id_range: Range<u32>,
|
||||
data: &[u8],
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
positions.resize(id_range.len(), 0u32);
|
||||
self.get_batch_u32s(id_range.start, data, positions);
|
||||
crate::filter_vec::filter_vec_in_place(value_range, id_range.start, positions)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -323,58 +200,4 @@ mod test {
|
||||
test_bitpacker_aux(num_bits, &vals);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_get_batch_panics_over_32_bits() {
|
||||
let bitunpacker = BitUnpacker::new(33);
|
||||
let mut output: [u32; 1] = [0u32];
|
||||
bitunpacker.get_batch_u32s(0, &[0, 0, 0, 0, 0, 0, 0, 0], &mut output[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_batch_limit() {
|
||||
let bitunpacker = BitUnpacker::new(1);
|
||||
let mut output: [u32; 3] = [0u32, 0u32, 0u32];
|
||||
bitunpacker.get_batch_u32s(8 * 4 - 3, &[0u8, 0u8, 0u8, 0u8], &mut output[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_get_batch_panics_when_off_scope() {
|
||||
let bitunpacker = BitUnpacker::new(1);
|
||||
let mut output: [u32; 3] = [0u32, 0u32, 0u32];
|
||||
// We are missing exactly one bit.
|
||||
bitunpacker.get_batch_u32s(8 * 4 - 2, &[0u8, 0u8, 0u8, 0u8], &mut output[..]);
|
||||
}
|
||||
|
||||
proptest::proptest! {
|
||||
#[test]
|
||||
fn test_get_batch_u32s_proptest(num_bits in 0u8..=32u8) {
|
||||
let mask =
|
||||
if num_bits == 32u8 {
|
||||
u32::MAX
|
||||
} else {
|
||||
(1u32 << num_bits) - 1
|
||||
};
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
let mut bitpacker = BitPacker::new();
|
||||
for val in 0..100 {
|
||||
bitpacker.write(val & mask as u64, num_bits, &mut buffer).unwrap();
|
||||
}
|
||||
bitpacker.flush(&mut buffer).unwrap();
|
||||
let bitunpacker = BitUnpacker::new(num_bits);
|
||||
let mut output: Vec<u32> = Vec::new();
|
||||
for len in [0, 1, 2, 32, 33, 34, 64] {
|
||||
for start_idx in 0u32..32u32 {
|
||||
output.resize(len as usize, 0);
|
||||
bitunpacker.get_batch_u32s(start_idx, &buffer, &mut output);
|
||||
for i in 0..len {
|
||||
let expected = (start_idx + i as u32) & mask;
|
||||
assert_eq!(output[i], expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,365 +0,0 @@
|
||||
//! SIMD filtering of a vector as described in the following blog post.
|
||||
//! https://quickwit.io/blog/filtering%20a%20vector%20with%20simd%20instructions%20avx-2%20and%20avx-512
|
||||
use std::arch::x86_64::{
|
||||
__m256i as DataType, _mm256_add_epi32 as op_add, _mm256_cmpgt_epi32 as op_greater,
|
||||
_mm256_lddqu_si256 as load_unaligned, _mm256_or_si256 as op_or, _mm256_set1_epi32 as set1,
|
||||
_mm256_storeu_si256 as store_unaligned, _mm256_xor_si256 as op_xor, *,
|
||||
};
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
const NUM_LANES: usize = 8;
|
||||
|
||||
const HIGHEST_BIT: u32 = 1 << 31;
|
||||
|
||||
#[inline]
|
||||
fn u32_to_i32(val: u32) -> i32 {
|
||||
(val ^ HIGHEST_BIT) as i32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn u32_to_i32_avx2(vals_u32x8s: DataType) -> DataType {
|
||||
const HIGHEST_BIT_MASK: DataType = from_u32x8([HIGHEST_BIT; NUM_LANES]);
|
||||
op_xor(vals_u32x8s, HIGHEST_BIT_MASK)
|
||||
}
|
||||
|
||||
pub fn filter_vec_in_place(range: RangeInclusive<u32>, offset: u32, output: &mut Vec<u32>) {
|
||||
// We use a monotonic mapping from u32 to i32 to make the comparison possible in AVX2.
|
||||
let range_i32: RangeInclusive<i32> = u32_to_i32(*range.start())..=u32_to_i32(*range.end());
|
||||
let num_words = output.len() / NUM_LANES;
|
||||
let mut output_len = unsafe {
|
||||
filter_vec_avx2_aux(
|
||||
output.as_ptr() as *const __m256i,
|
||||
range_i32,
|
||||
output.as_mut_ptr(),
|
||||
offset,
|
||||
num_words,
|
||||
)
|
||||
};
|
||||
let reminder_start = num_words * NUM_LANES;
|
||||
for i in reminder_start..output.len() {
|
||||
let val = output[i];
|
||||
output[output_len] = offset + i as u32;
|
||||
output_len += if range.contains(&val) { 1 } else { 0 };
|
||||
}
|
||||
output.truncate(output_len);
|
||||
}
|
||||
|
||||
#[target_feature(enable = "avx2")]
|
||||
unsafe fn filter_vec_avx2_aux(
|
||||
mut input: *const __m256i,
|
||||
range: RangeInclusive<i32>,
|
||||
output: *mut u32,
|
||||
offset: u32,
|
||||
num_words: usize,
|
||||
) -> usize {
|
||||
let mut output_tail = output;
|
||||
let range_simd = set1(*range.start())..=set1(*range.end());
|
||||
let mut ids = from_u32x8([
|
||||
offset,
|
||||
offset + 1,
|
||||
offset + 2,
|
||||
offset + 3,
|
||||
offset + 4,
|
||||
offset + 5,
|
||||
offset + 6,
|
||||
offset + 7,
|
||||
]);
|
||||
const SHIFT: __m256i = from_u32x8([NUM_LANES as u32; NUM_LANES]);
|
||||
for _ in 0..num_words {
|
||||
let word = load_unaligned(input);
|
||||
let word = u32_to_i32_avx2(word);
|
||||
let keeper_bitset = compute_filter_bitset(word, range_simd.clone());
|
||||
let added_len = keeper_bitset.count_ones();
|
||||
let filtered_doc_ids = compact(ids, keeper_bitset);
|
||||
store_unaligned(output_tail as *mut __m256i, filtered_doc_ids);
|
||||
output_tail = output_tail.offset(added_len as isize);
|
||||
ids = op_add(ids, SHIFT);
|
||||
input = input.offset(1);
|
||||
}
|
||||
output_tail.offset_from(output) as usize
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[target_feature(enable = "avx2")]
|
||||
unsafe fn compact(data: DataType, mask: u8) -> DataType {
|
||||
let vperm_mask = MASK_TO_PERMUTATION[mask as usize];
|
||||
_mm256_permutevar8x32_epi32(data, vperm_mask)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[target_feature(enable = "avx2")]
|
||||
unsafe fn compute_filter_bitset(val: __m256i, range: std::ops::RangeInclusive<__m256i>) -> u8 {
|
||||
let too_low = op_greater(*range.start(), val);
|
||||
let too_high = op_greater(val, *range.end());
|
||||
let inside = op_or(too_low, too_high);
|
||||
255 - std::arch::x86_64::_mm256_movemask_ps(std::mem::transmute::<DataType, __m256>(inside))
|
||||
as u8
|
||||
}
|
||||
|
||||
union U8x32 {
|
||||
vector: DataType,
|
||||
vals: [u32; NUM_LANES],
|
||||
}
|
||||
|
||||
const fn from_u32x8(vals: [u32; NUM_LANES]) -> DataType {
|
||||
unsafe { U8x32 { vals }.vector }
|
||||
}
|
||||
|
||||
const MASK_TO_PERMUTATION: [DataType; 256] = [
|
||||
from_u32x8([0, 0, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 0, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 0, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 0, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([3, 0, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 0, 0, 0, 0]),
|
||||
from_u32x8([4, 0, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 4, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 4, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 4, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 4, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 4, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 4, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 4, 0, 0, 0, 0]),
|
||||
from_u32x8([3, 4, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 4, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 4, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 4, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 4, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 4, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 4, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 4, 0, 0, 0]),
|
||||
from_u32x8([5, 0, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 5, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 5, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 5, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 5, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 5, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 5, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 5, 0, 0, 0, 0]),
|
||||
from_u32x8([3, 5, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 5, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 5, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 5, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 5, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 5, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 5, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 5, 0, 0, 0]),
|
||||
from_u32x8([4, 5, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 4, 5, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 4, 5, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 4, 5, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 4, 5, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 4, 5, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 4, 5, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 4, 5, 0, 0, 0]),
|
||||
from_u32x8([3, 4, 5, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 4, 5, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 4, 5, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 4, 5, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 4, 5, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 4, 5, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 4, 5, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 4, 5, 0, 0]),
|
||||
from_u32x8([6, 0, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 6, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 6, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 6, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([3, 6, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 6, 0, 0, 0]),
|
||||
from_u32x8([4, 6, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 4, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 4, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 4, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 4, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 4, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 4, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 4, 6, 0, 0, 0]),
|
||||
from_u32x8([3, 4, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 4, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 4, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 4, 6, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 4, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 4, 6, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 4, 6, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 4, 6, 0, 0]),
|
||||
from_u32x8([5, 6, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 5, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 5, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 5, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 5, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 5, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 5, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 5, 6, 0, 0, 0]),
|
||||
from_u32x8([3, 5, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 5, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 5, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 5, 6, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 5, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 5, 6, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 5, 6, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 5, 6, 0, 0]),
|
||||
from_u32x8([4, 5, 6, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 4, 5, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 4, 5, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 4, 5, 6, 0, 0, 0]),
|
||||
from_u32x8([2, 4, 5, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 4, 5, 6, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 4, 5, 6, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 4, 5, 6, 0, 0]),
|
||||
from_u32x8([3, 4, 5, 6, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 4, 5, 6, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 4, 5, 6, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 4, 5, 6, 0, 0]),
|
||||
from_u32x8([2, 3, 4, 5, 6, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 4, 5, 6, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 4, 5, 6, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 4, 5, 6, 0]),
|
||||
from_u32x8([7, 0, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 7, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 7, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 7, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([3, 7, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 7, 0, 0, 0]),
|
||||
from_u32x8([4, 7, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 4, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 4, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 4, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 4, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 4, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 4, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 4, 7, 0, 0, 0]),
|
||||
from_u32x8([3, 4, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 4, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 4, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 4, 7, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 4, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 4, 7, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 4, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 4, 7, 0, 0]),
|
||||
from_u32x8([5, 7, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 5, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 5, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 5, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 5, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 5, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 5, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 5, 7, 0, 0, 0]),
|
||||
from_u32x8([3, 5, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 5, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 5, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 5, 7, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 5, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 5, 7, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 5, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 5, 7, 0, 0]),
|
||||
from_u32x8([4, 5, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 4, 5, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 4, 5, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 4, 5, 7, 0, 0, 0]),
|
||||
from_u32x8([2, 4, 5, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 4, 5, 7, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 4, 5, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 4, 5, 7, 0, 0]),
|
||||
from_u32x8([3, 4, 5, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 4, 5, 7, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 4, 5, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 4, 5, 7, 0, 0]),
|
||||
from_u32x8([2, 3, 4, 5, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 4, 5, 7, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 4, 5, 7, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 4, 5, 7, 0]),
|
||||
from_u32x8([6, 7, 0, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 6, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 6, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([2, 6, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([3, 6, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([2, 3, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 6, 7, 0, 0]),
|
||||
from_u32x8([4, 6, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 4, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 4, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 4, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([2, 4, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 4, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 4, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 4, 6, 7, 0, 0]),
|
||||
from_u32x8([3, 4, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 4, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 4, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 4, 6, 7, 0, 0]),
|
||||
from_u32x8([2, 3, 4, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 4, 6, 7, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 4, 6, 7, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 4, 6, 7, 0]),
|
||||
from_u32x8([5, 6, 7, 0, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 5, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([1, 5, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 5, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([2, 5, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 5, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([1, 2, 5, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 5, 6, 7, 0, 0]),
|
||||
from_u32x8([3, 5, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 5, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([1, 3, 5, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 5, 6, 7, 0, 0]),
|
||||
from_u32x8([2, 3, 5, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 5, 6, 7, 0, 0]),
|
||||
from_u32x8([1, 2, 3, 5, 6, 7, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 5, 6, 7, 0]),
|
||||
from_u32x8([4, 5, 6, 7, 0, 0, 0, 0]),
|
||||
from_u32x8([0, 4, 5, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([1, 4, 5, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 1, 4, 5, 6, 7, 0, 0]),
|
||||
from_u32x8([2, 4, 5, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 2, 4, 5, 6, 7, 0, 0]),
|
||||
from_u32x8([1, 2, 4, 5, 6, 7, 0, 0]),
|
||||
from_u32x8([0, 1, 2, 4, 5, 6, 7, 0]),
|
||||
from_u32x8([3, 4, 5, 6, 7, 0, 0, 0]),
|
||||
from_u32x8([0, 3, 4, 5, 6, 7, 0, 0]),
|
||||
from_u32x8([1, 3, 4, 5, 6, 7, 0, 0]),
|
||||
from_u32x8([0, 1, 3, 4, 5, 6, 7, 0]),
|
||||
from_u32x8([2, 3, 4, 5, 6, 7, 0, 0]),
|
||||
from_u32x8([0, 2, 3, 4, 5, 6, 7, 0]),
|
||||
from_u32x8([1, 2, 3, 4, 5, 6, 7, 0]),
|
||||
from_u32x8([0, 1, 2, 3, 4, 5, 6, 7]),
|
||||
];
|
||||
@@ -1,165 +0,0 @@
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
#[cfg(any(target_arch = "x86_64"))]
|
||||
mod avx2;
|
||||
|
||||
mod scalar;
|
||||
|
||||
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
|
||||
#[repr(u8)]
|
||||
enum FilterImplPerInstructionSet {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
AVX2 = 0u8,
|
||||
Scalar = 1u8,
|
||||
}
|
||||
|
||||
impl FilterImplPerInstructionSet {
|
||||
#[inline]
|
||||
pub fn is_available(&self) -> bool {
|
||||
match *self {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
FilterImplPerInstructionSet::AVX2 => is_x86_feature_detected!("avx2"),
|
||||
FilterImplPerInstructionSet::Scalar => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// List of available implementation in preferred order.
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
const IMPLS: [FilterImplPerInstructionSet; 2] = [
|
||||
FilterImplPerInstructionSet::AVX2,
|
||||
FilterImplPerInstructionSet::Scalar,
|
||||
];
|
||||
|
||||
#[cfg(not(target_arch = "x86_64"))]
|
||||
const IMPLS: [FilterImplPerInstructionSet; 1] = [FilterImplPerInstructionSet::Scalar];
|
||||
|
||||
impl FilterImplPerInstructionSet {
|
||||
#[allow(unused_variables)]
|
||||
#[inline]
|
||||
fn from(code: u8) -> FilterImplPerInstructionSet {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
if code == FilterImplPerInstructionSet::AVX2 as u8 {
|
||||
return FilterImplPerInstructionSet::AVX2;
|
||||
}
|
||||
FilterImplPerInstructionSet::Scalar
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn filter_vec_in_place(self, range: RangeInclusive<u32>, offset: u32, output: &mut Vec<u32>) {
|
||||
match self {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
FilterImplPerInstructionSet::AVX2 => avx2::filter_vec_in_place(range, offset, output),
|
||||
FilterImplPerInstructionSet::Scalar => {
|
||||
scalar::filter_vec_in_place(range, offset, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_best_available_instruction_set() -> FilterImplPerInstructionSet {
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
static INSTRUCTION_SET_BYTE: AtomicU8 = AtomicU8::new(u8::MAX);
|
||||
let instruction_set_byte: u8 = INSTRUCTION_SET_BYTE.load(Ordering::Relaxed);
|
||||
if instruction_set_byte == u8::MAX {
|
||||
// Let's initialize the instruction set and cache it.
|
||||
let instruction_set = IMPLS
|
||||
.into_iter()
|
||||
.find(FilterImplPerInstructionSet::is_available)
|
||||
.unwrap();
|
||||
INSTRUCTION_SET_BYTE.store(instruction_set as u8, Ordering::Relaxed);
|
||||
return instruction_set;
|
||||
}
|
||||
FilterImplPerInstructionSet::from(instruction_set_byte)
|
||||
}
|
||||
|
||||
pub fn filter_vec_in_place(range: RangeInclusive<u32>, offset: u32, output: &mut Vec<u32>) {
|
||||
get_best_available_instruction_set().filter_vec_in_place(range, offset, output)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_get_best_available_instruction_set() {
|
||||
// This does not test much unfortunately.
|
||||
// We just make sure the function returns without crashing and returns the same result.
|
||||
let instruction_set = get_best_available_instruction_set();
|
||||
assert_eq!(get_best_available_instruction_set(), instruction_set);
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
#[test]
|
||||
fn test_instruction_set_to_code_from_code() {
|
||||
for instruction_set in [
|
||||
FilterImplPerInstructionSet::AVX2,
|
||||
FilterImplPerInstructionSet::Scalar,
|
||||
] {
|
||||
let code = instruction_set as u8;
|
||||
assert_eq!(instruction_set, FilterImplPerInstructionSet::from(code));
|
||||
}
|
||||
}
|
||||
|
||||
fn test_filter_impl_empty_aux(filter_impl: FilterImplPerInstructionSet) {
|
||||
let mut output = vec![];
|
||||
filter_impl.filter_vec_in_place(0..=u32::MAX, 0, &mut output);
|
||||
assert_eq!(&output, &[]);
|
||||
}
|
||||
|
||||
fn test_filter_impl_simple_aux(filter_impl: FilterImplPerInstructionSet) {
|
||||
let mut output = vec![3, 2, 1, 5, 11, 2, 5, 10, 2];
|
||||
filter_impl.filter_vec_in_place(3..=10, 0, &mut output);
|
||||
assert_eq!(&output, &[0, 3, 6, 7]);
|
||||
}
|
||||
|
||||
fn test_filter_impl_simple_aux_shifted(filter_impl: FilterImplPerInstructionSet) {
|
||||
let mut output = vec![3, 2, 1, 5, 11, 2, 5, 10, 2];
|
||||
filter_impl.filter_vec_in_place(3..=10, 10, &mut output);
|
||||
assert_eq!(&output, &[10, 13, 16, 17]);
|
||||
}
|
||||
|
||||
fn test_filter_impl_simple_outside_i32_range(filter_impl: FilterImplPerInstructionSet) {
|
||||
let mut output = vec![u32::MAX, i32::MAX as u32 + 1, 0, 1, 3, 1, 1, 1, 1];
|
||||
filter_impl.filter_vec_in_place(1..=i32::MAX as u32 + 1u32, 0, &mut output);
|
||||
assert_eq!(&output, &[1, 3, 4, 5, 6, 7, 8]);
|
||||
}
|
||||
|
||||
fn test_filter_impl_test_suite(filter_impl: FilterImplPerInstructionSet) {
|
||||
test_filter_impl_empty_aux(filter_impl);
|
||||
test_filter_impl_simple_aux(filter_impl);
|
||||
test_filter_impl_simple_aux_shifted(filter_impl);
|
||||
test_filter_impl_simple_outside_i32_range(filter_impl);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
fn test_filter_implementation_avx2() {
|
||||
if FilterImplPerInstructionSet::AVX2.is_available() {
|
||||
test_filter_impl_test_suite(FilterImplPerInstructionSet::AVX2);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_implementation_scalar() {
|
||||
test_filter_impl_test_suite(FilterImplPerInstructionSet::Scalar);
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
proptest::proptest! {
|
||||
#[test]
|
||||
fn test_filter_compare_scalar_and_avx2_impl_proptest(
|
||||
start in proptest::prelude::any::<u32>(),
|
||||
end in proptest::prelude::any::<u32>(),
|
||||
offset in 0u32..2u32,
|
||||
mut vals in proptest::collection::vec(0..u32::MAX, 0..30)) {
|
||||
if FilterImplPerInstructionSet::AVX2.is_available() {
|
||||
let mut vals_clone = vals.clone();
|
||||
FilterImplPerInstructionSet::AVX2.filter_vec_in_place(start..=end, offset, &mut vals);
|
||||
FilterImplPerInstructionSet::Scalar.filter_vec_in_place(start..=end, offset, &mut vals_clone);
|
||||
assert_eq!(&vals, &vals_clone);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
pub fn filter_vec_in_place(range: RangeInclusive<u32>, offset: u32, output: &mut Vec<u32>) {
|
||||
// We restrict the accepted boundary, because unsigned integers & SIMD don't
|
||||
// play well.
|
||||
let mut output_cursor = 0;
|
||||
for i in 0..output.len() {
|
||||
let val = output[i];
|
||||
output[output_cursor] = offset + i as u32;
|
||||
output_cursor += if range.contains(&val) { 1 } else { 0 };
|
||||
}
|
||||
output.truncate(output_cursor);
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
mod bitpacker;
|
||||
mod blocked_bitpacker;
|
||||
mod filter_vec;
|
||||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
use crate::{Column, DocId, RowId};
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ColumnBlockAccessor<T> {
|
||||
val_cache: Vec<T>,
|
||||
docid_cache: Vec<DocId>,
|
||||
row_id_cache: Vec<RowId>,
|
||||
}
|
||||
|
||||
impl<T: PartialOrd + Copy + std::fmt::Debug + Send + Sync + 'static + Default>
|
||||
ColumnBlockAccessor<T>
|
||||
{
|
||||
#[inline]
|
||||
pub fn fetch_block(&mut self, docs: &[u32], accessor: &Column<T>) {
|
||||
self.docid_cache.clear();
|
||||
self.row_id_cache.clear();
|
||||
accessor.row_ids_for_docs(docs, &mut self.docid_cache, &mut self.row_id_cache);
|
||||
self.val_cache.resize(self.row_id_cache.len(), T::default());
|
||||
accessor
|
||||
.values
|
||||
.get_vals(&self.row_id_cache, &mut self.val_cache);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn iter_vals(&self) -> impl Iterator<Item = T> + '_ {
|
||||
self.val_cache.iter().cloned()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn iter_docid_vals(&self) -> impl Iterator<Item = (DocId, T)> + '_ {
|
||||
self.docid_cache
|
||||
.iter()
|
||||
.cloned()
|
||||
.zip(self.val_cache.iter().cloned())
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::io;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use sstable::{Dictionary, VoidSSTable};
|
||||
|
||||
@@ -21,14 +21,6 @@ pub struct BytesColumn {
|
||||
pub(crate) term_ord_column: Column<u64>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for BytesColumn {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("BytesColumn")
|
||||
.field("term_ord_column", &self.term_ord_column)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl BytesColumn {
|
||||
/// Fills the given `output` buffer with the term associated to the ordinal `ord`.
|
||||
///
|
||||
@@ -64,12 +56,6 @@ impl BytesColumn {
|
||||
#[derive(Clone)]
|
||||
pub struct StrColumn(BytesColumn);
|
||||
|
||||
impl fmt::Debug for StrColumn {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{:?}", self.term_ord_column)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StrColumn> for BytesColumn {
|
||||
fn from(str_column: StrColumn) -> BytesColumn {
|
||||
str_column.0
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
mod dictionary_encoded;
|
||||
mod serialize;
|
||||
|
||||
use std::fmt::{self, Debug};
|
||||
use std::fmt::Debug;
|
||||
use std::io::Write;
|
||||
use std::ops::{Deref, Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
@@ -16,33 +16,14 @@ pub use serialize::{
|
||||
use crate::column_index::ColumnIndex;
|
||||
use crate::column_values::monotonic_mapping::StrictlyMonotonicMappingToInternal;
|
||||
use crate::column_values::{monotonic_map_column, ColumnValues};
|
||||
use crate::{Cardinality, DocId, EmptyColumnValues, MonotonicallyMappableToU64, RowId};
|
||||
use crate::{Cardinality, MonotonicallyMappableToU64, RowId};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Column<T = u64> {
|
||||
pub index: ColumnIndex,
|
||||
pub idx: ColumnIndex,
|
||||
pub values: Arc<dyn ColumnValues<T>>,
|
||||
}
|
||||
|
||||
impl<T: Debug + PartialOrd + Send + Sync + Copy + 'static> Debug for Column<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let num_docs = self.num_docs();
|
||||
let entries = (0..num_docs)
|
||||
.map(|i| (i, self.values_for_doc(i).collect::<Vec<_>>()))
|
||||
.filter(|(_, vals)| !vals.is_empty());
|
||||
f.debug_map().entries(entries).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd + Default> Column<T> {
|
||||
pub fn build_empty_column(num_docs: u32) -> Column<T> {
|
||||
Column {
|
||||
index: ColumnIndex::Empty { num_docs },
|
||||
values: Arc::new(EmptyColumnValues),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: MonotonicallyMappableToU64> Column<T> {
|
||||
pub fn to_u64_monotonic(self) -> Column<u64> {
|
||||
let values = Arc::new(monotonic_map_column(
|
||||
@@ -50,7 +31,7 @@ impl<T: MonotonicallyMappableToU64> Column<T> {
|
||||
StrictlyMonotonicMappingToInternal::<T>::new(),
|
||||
));
|
||||
Column {
|
||||
index: self.index,
|
||||
idx: self.idx,
|
||||
values,
|
||||
}
|
||||
}
|
||||
@@ -59,11 +40,11 @@ impl<T: MonotonicallyMappableToU64> Column<T> {
|
||||
impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
#[inline]
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
self.index.get_cardinality()
|
||||
self.idx.get_cardinality()
|
||||
}
|
||||
|
||||
pub fn num_docs(&self) -> RowId {
|
||||
match &self.index {
|
||||
match &self.idx {
|
||||
ColumnIndex::Empty { num_docs } => *num_docs,
|
||||
ColumnIndex::Full => self.values.num_vals(),
|
||||
ColumnIndex::Optional(optional_index) => optional_index.num_docs(),
|
||||
@@ -87,25 +68,8 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
self.values_for_doc(row_id).next()
|
||||
}
|
||||
|
||||
/// Translates a block of docis to row_ids.
|
||||
///
|
||||
/// returns the row_ids and the matching docids on the same index
|
||||
/// e.g.
|
||||
/// DocId In: [0, 5, 6]
|
||||
/// DocId Out: [0, 0, 6, 6]
|
||||
/// RowId Out: [0, 1, 2, 3]
|
||||
#[inline]
|
||||
pub fn row_ids_for_docs(
|
||||
&self,
|
||||
doc_ids: &[DocId],
|
||||
doc_ids_out: &mut Vec<DocId>,
|
||||
row_ids: &mut Vec<RowId>,
|
||||
) {
|
||||
self.index.docids_to_rowids(doc_ids, doc_ids_out, row_ids)
|
||||
}
|
||||
|
||||
pub fn values_for_doc(&self, doc_id: DocId) -> impl Iterator<Item = T> + '_ {
|
||||
self.value_row_ids(doc_id)
|
||||
pub fn values_for_doc(&self, row_id: RowId) -> impl Iterator<Item = T> + '_ {
|
||||
self.value_row_ids(row_id)
|
||||
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
|
||||
}
|
||||
|
||||
@@ -118,15 +82,13 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
doc_ids: &mut Vec<u32>,
|
||||
) {
|
||||
// convert passed docid range to row id range
|
||||
let rowid_range = self
|
||||
.index
|
||||
.docid_range_to_rowids(selected_docid_range.clone());
|
||||
let rowid_range = self.idx.docid_range_to_rowids(selected_docid_range.clone());
|
||||
|
||||
// Load rows
|
||||
self.values
|
||||
.get_row_ids_for_value_range(value_range, rowid_range, doc_ids);
|
||||
// Convert rows to docids
|
||||
self.index
|
||||
self.idx
|
||||
.select_batch_in_place(selected_docid_range.start, doc_ids);
|
||||
}
|
||||
|
||||
@@ -151,7 +113,7 @@ impl<T> Deref for Column<T> {
|
||||
type Target = ColumnIndex;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.index
|
||||
&self.idx
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,7 +151,7 @@ impl<T: PartialOrd + Debug + Send + Sync + Copy + 'static> ColumnValues<T>
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
match &self.column.index {
|
||||
match &self.column.idx {
|
||||
ColumnIndex::Empty { .. } => 0u32,
|
||||
ColumnIndex::Full => self.column.values.num_vals(),
|
||||
ColumnIndex::Optional(optional_idx) => optional_idx.num_docs(),
|
||||
|
||||
@@ -52,7 +52,7 @@ pub fn open_column_u64<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::
|
||||
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
||||
let column_values = load_u64_based_column_values(column_values_data)?;
|
||||
Ok(Column {
|
||||
index: column_index,
|
||||
idx: column_index,
|
||||
values: column_values,
|
||||
})
|
||||
}
|
||||
@@ -71,7 +71,7 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
|
||||
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
||||
let column_values = crate::column_values::open_u128_mapped(column_values_data)?;
|
||||
Ok(Column {
|
||||
index: column_index,
|
||||
idx: column_index,
|
||||
values: column_values,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,16 +8,17 @@ use crate::column_index::SerializableColumnIndex;
|
||||
use crate::{Cardinality, ColumnIndex, MergeRowOrder};
|
||||
|
||||
// For simplification, we never have cardinality go down due to deletes.
|
||||
fn detect_cardinality(columns: &[ColumnIndex]) -> Cardinality {
|
||||
fn detect_cardinality(columns: &[Option<ColumnIndex>]) -> Cardinality {
|
||||
columns
|
||||
.iter()
|
||||
.flatten()
|
||||
.map(ColumnIndex::get_cardinality)
|
||||
.max()
|
||||
.unwrap_or(Cardinality::Full)
|
||||
}
|
||||
|
||||
pub fn merge_column_index<'a>(
|
||||
columns: &'a [ColumnIndex],
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
merge_row_order: &'a MergeRowOrder,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
// For simplification, we do not try to detect whether the cardinality could be
|
||||
@@ -52,33 +53,34 @@ mod tests {
|
||||
let optional_index: ColumnIndex = OptionalIndex::for_test(1, &[]).into();
|
||||
let multivalued_index: ColumnIndex = MultiValueIndex::for_test(&[0, 1]).into();
|
||||
assert_eq!(
|
||||
detect_cardinality(&[optional_index.clone(), ColumnIndex::Empty { num_docs: 0 }]),
|
||||
detect_cardinality(&[Some(optional_index.clone()), None]),
|
||||
Cardinality::Optional
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[optional_index.clone(), ColumnIndex::Full]),
|
||||
detect_cardinality(&[Some(optional_index.clone()), Some(ColumnIndex::Full)]),
|
||||
Cardinality::Optional
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[Some(multivalued_index.clone()), None]),
|
||||
Cardinality::Multivalued
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[
|
||||
multivalued_index.clone(),
|
||||
ColumnIndex::Empty { num_docs: 0 }
|
||||
Some(multivalued_index.clone()),
|
||||
Some(optional_index.clone())
|
||||
]),
|
||||
Cardinality::Multivalued
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[multivalued_index.clone(), optional_index.clone()]),
|
||||
Cardinality::Multivalued
|
||||
);
|
||||
assert_eq!(
|
||||
detect_cardinality(&[optional_index, multivalued_index]),
|
||||
detect_cardinality(&[Some(optional_index), Some(multivalued_index)]),
|
||||
Cardinality::Multivalued
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_index_multivalued_sorted() {
|
||||
let column_indexes: Vec<ColumnIndex> = vec![MultiValueIndex::for_test(&[0, 2, 5]).into()];
|
||||
let column_indexes: Vec<Option<ColumnIndex>> =
|
||||
vec![Some(MultiValueIndex::for_test(&[0, 2, 5]).into())];
|
||||
let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test(
|
||||
&[2],
|
||||
vec![
|
||||
@@ -102,10 +104,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_merge_index_multivalued_sorted_several_segment() {
|
||||
let column_indexes: Vec<ColumnIndex> = vec![
|
||||
MultiValueIndex::for_test(&[0, 2, 5]).into(),
|
||||
ColumnIndex::Empty { num_docs: 0 },
|
||||
MultiValueIndex::for_test(&[0, 1, 4]).into(),
|
||||
let column_indexes: Vec<Option<ColumnIndex>> = vec![
|
||||
Some(MultiValueIndex::for_test(&[0, 2, 5]).into()),
|
||||
None,
|
||||
Some(MultiValueIndex::for_test(&[0, 1, 4]).into()),
|
||||
];
|
||||
let merge_row_order: MergeRowOrder = ShuffleMergeOrder::for_test(
|
||||
&[2, 0, 2],
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, ColumnIndex, RowId, ShuffleMergeOrder};
|
||||
|
||||
pub fn merge_column_index_shuffled<'a>(
|
||||
column_indexes: &'a [ColumnIndex],
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
cardinality_after_merge: Cardinality,
|
||||
shuffle_merge_order: &'a ShuffleMergeOrder,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
@@ -33,41 +33,41 @@ pub fn merge_column_index_shuffled<'a>(
|
||||
///
|
||||
/// In other words the column_indexes passed as argument may NOT be multivalued.
|
||||
fn merge_column_index_shuffled_optional<'a>(
|
||||
column_indexes: &'a [ColumnIndex],
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
) -> Box<dyn Iterable<RowId> + 'a> {
|
||||
Box::new(ShuffledIndex {
|
||||
Box::new(ShuffledOptionalIndex {
|
||||
column_indexes,
|
||||
merge_order,
|
||||
})
|
||||
}
|
||||
|
||||
struct ShuffledIndex<'a> {
|
||||
column_indexes: &'a [ColumnIndex],
|
||||
struct ShuffledOptionalIndex<'a> {
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
}
|
||||
|
||||
impl<'a> Iterable<u32> for ShuffledIndex<'a> {
|
||||
impl<'a> Iterable<u32> for ShuffledOptionalIndex<'a> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
Box::new(
|
||||
self.merge_order
|
||||
.iter_new_to_old_row_addrs()
|
||||
.enumerate()
|
||||
.filter_map(|(new_row_id, old_row_addr)| {
|
||||
let column_index = &self.column_indexes[old_row_addr.segment_ord as usize];
|
||||
let row_id = new_row_id as u32;
|
||||
if column_index.has_value(old_row_addr.row_id) {
|
||||
Some(row_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}),
|
||||
)
|
||||
Box::new(self.merge_order
|
||||
.iter_new_to_old_row_addrs()
|
||||
.enumerate()
|
||||
.filter_map(|(new_row_id, old_row_addr)| {
|
||||
let Some(column_index) = &self.column_indexes[old_row_addr.segment_ord as usize] else {
|
||||
return None;
|
||||
};
|
||||
let row_id = new_row_id as u32;
|
||||
if column_index.has_value(old_row_addr.row_id) {
|
||||
Some(row_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
fn merge_column_index_shuffled_multivalued<'a>(
|
||||
column_indexes: &'a [ColumnIndex],
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
) -> Box<dyn Iterable<RowId> + 'a> {
|
||||
Box::new(ShuffledMultivaluedIndex {
|
||||
@@ -77,16 +77,19 @@ fn merge_column_index_shuffled_multivalued<'a>(
|
||||
}
|
||||
|
||||
struct ShuffledMultivaluedIndex<'a> {
|
||||
column_indexes: &'a [ColumnIndex],
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
}
|
||||
|
||||
fn iter_num_values<'a>(
|
||||
column_indexes: &'a [ColumnIndex],
|
||||
column_indexes: &'a [Option<ColumnIndex>],
|
||||
merge_order: &'a ShuffleMergeOrder,
|
||||
) -> impl Iterator<Item = u32> + 'a {
|
||||
merge_order.iter_new_to_old_row_addrs().map(|row_addr| {
|
||||
let column_index = &column_indexes[row_addr.segment_ord as usize];
|
||||
let Some(column_index) = &column_indexes[row_addr.segment_ord as usize] else {
|
||||
// No values in the entire column. It surely means there are 0 values associated to this row.
|
||||
return 0u32;
|
||||
};
|
||||
match column_index {
|
||||
ColumnIndex::Empty { .. } => 0u32,
|
||||
ColumnIndex::Full => 1,
|
||||
@@ -140,7 +143,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_merge_column_index_optional_shuffle() {
|
||||
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
|
||||
let column_indexes = vec![optional_index, ColumnIndex::Full];
|
||||
let column_indexes = vec![Some(optional_index), Some(ColumnIndex::Full)];
|
||||
let row_addrs = vec![
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::{Cardinality, ColumnIndex, RowId, StackMergeOrder};
|
||||
///
|
||||
/// There are no sort nor deletes involved.
|
||||
pub fn merge_column_index_stacked<'a>(
|
||||
columns: &'a [ColumnIndex],
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
cardinality_after_merge: Cardinality,
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
@@ -33,7 +33,7 @@ pub fn merge_column_index_stacked<'a>(
|
||||
}
|
||||
|
||||
struct StackedOptionalIndex<'a> {
|
||||
columns: &'a [ColumnIndex],
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
}
|
||||
|
||||
@@ -46,16 +46,16 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
|
||||
.flat_map(|(columnar_id, column_index_opt)| {
|
||||
let columnar_row_range = self.stack_merge_order.columnar_range(columnar_id);
|
||||
let rows_it: Box<dyn Iterator<Item = RowId>> = match column_index_opt {
|
||||
ColumnIndex::Full => Box::new(columnar_row_range),
|
||||
ColumnIndex::Optional(optional_index) => Box::new(
|
||||
Some(ColumnIndex::Full) => Box::new(columnar_row_range),
|
||||
Some(ColumnIndex::Optional(optional_index)) => Box::new(
|
||||
optional_index
|
||||
.iter_rows()
|
||||
.map(move |row_id: RowId| columnar_row_range.start + row_id),
|
||||
),
|
||||
ColumnIndex::Multivalued(_) => {
|
||||
Some(ColumnIndex::Multivalued(_)) => {
|
||||
panic!("No multivalued index is allowed when stacking column index");
|
||||
}
|
||||
ColumnIndex::Empty { .. } => Box::new(std::iter::empty()),
|
||||
None | Some(ColumnIndex::Empty { .. }) => Box::new(std::iter::empty()),
|
||||
};
|
||||
rows_it
|
||||
}),
|
||||
@@ -65,18 +65,20 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct StackedMultivaluedIndex<'a> {
|
||||
columns: &'a [ColumnIndex],
|
||||
columns: &'a [Option<ColumnIndex>],
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
}
|
||||
|
||||
fn convert_column_opt_to_multivalued_index<'a>(
|
||||
column_index_opt: &'a ColumnIndex,
|
||||
column_index_opt: Option<&'a ColumnIndex>,
|
||||
num_rows: RowId,
|
||||
) -> Box<dyn Iterator<Item = RowId> + 'a> {
|
||||
match column_index_opt {
|
||||
ColumnIndex::Empty { .. } => Box::new(iter::repeat(0u32).take(num_rows as usize + 1)),
|
||||
ColumnIndex::Full => Box::new(0..num_rows + 1),
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
None | Some(ColumnIndex::Empty { .. }) => {
|
||||
Box::new(iter::repeat(0u32).take(num_rows as usize + 1))
|
||||
}
|
||||
Some(ColumnIndex::Full) => Box::new(0..num_rows + 1),
|
||||
Some(ColumnIndex::Optional(optional_index)) => {
|
||||
Box::new(
|
||||
(0..num_rows)
|
||||
// TODO optimize
|
||||
@@ -84,7 +86,9 @@ fn convert_column_opt_to_multivalued_index<'a>(
|
||||
.chain(std::iter::once(optional_index.num_non_nulls())),
|
||||
)
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => multivalued_index.start_index_column.iter(),
|
||||
Some(ColumnIndex::Multivalued(multivalued_index)) => {
|
||||
multivalued_index.start_index_column.iter()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,6 +97,7 @@ impl<'a> Iterable<RowId> for StackedMultivaluedIndex<'a> {
|
||||
let multivalued_indexes =
|
||||
self.columns
|
||||
.iter()
|
||||
.map(Option::as_ref)
|
||||
.enumerate()
|
||||
.map(|(columnar_id, column_opt)| {
|
||||
let num_rows =
|
||||
|
||||
@@ -12,7 +12,7 @@ pub use serialize::{open_column_index, serialize_column_index, SerializableColum
|
||||
use crate::column_index::multivalued_index::MultiValueIndex;
|
||||
use crate::{Cardinality, DocId, RowId};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone)]
|
||||
pub enum ColumnIndex {
|
||||
Empty {
|
||||
num_docs: u32,
|
||||
@@ -37,15 +37,11 @@ impl From<MultiValueIndex> for ColumnIndex {
|
||||
}
|
||||
|
||||
impl ColumnIndex {
|
||||
// Returns the cardinality of the column index.
|
||||
//
|
||||
// By convention, if the column contains no docs, we consider that it is
|
||||
// full.
|
||||
#[inline]
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
ColumnIndex::Empty { num_docs: 0 } | ColumnIndex::Full => Cardinality::Full,
|
||||
ColumnIndex::Empty { .. } => Cardinality::Optional,
|
||||
ColumnIndex::Full => Cardinality::Full,
|
||||
ColumnIndex::Optional(_) => Cardinality::Optional,
|
||||
ColumnIndex::Multivalued(_) => Cardinality::Multivalued,
|
||||
}
|
||||
@@ -78,45 +74,6 @@ impl ColumnIndex {
|
||||
}
|
||||
}
|
||||
|
||||
/// Translates a block of docis to row_ids.
|
||||
///
|
||||
/// returns the row_ids and the matching docids on the same index
|
||||
/// e.g.
|
||||
/// DocId In: [0, 5, 6]
|
||||
/// DocId Out: [0, 0, 6, 6]
|
||||
/// RowId Out: [0, 1, 2, 3]
|
||||
#[inline]
|
||||
pub fn docids_to_rowids(
|
||||
&self,
|
||||
doc_ids: &[DocId],
|
||||
doc_ids_out: &mut Vec<DocId>,
|
||||
row_ids: &mut Vec<RowId>,
|
||||
) {
|
||||
match self {
|
||||
ColumnIndex::Empty { .. } => {}
|
||||
ColumnIndex::Full => {
|
||||
doc_ids_out.extend_from_slice(doc_ids);
|
||||
row_ids.extend_from_slice(doc_ids);
|
||||
}
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
for doc_id in doc_ids {
|
||||
if let Some(row_id) = optional_index.rank_if_exists(*doc_id) {
|
||||
doc_ids_out.push(*doc_id);
|
||||
row_ids.push(row_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
for doc_id in doc_ids {
|
||||
for row_id in multivalued_index.range(*doc_id) {
|
||||
doc_ids_out.push(*doc_id);
|
||||
row_ids.push(row_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn docid_range_to_rowids(&self, doc_id: Range<DocId>) -> Range<RowId> {
|
||||
match self {
|
||||
ColumnIndex::Empty { .. } => 0..0,
|
||||
@@ -156,21 +113,3 @@ impl ColumnIndex {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{Cardinality, ColumnIndex};
|
||||
|
||||
#[test]
|
||||
fn test_column_index_get_cardinality() {
|
||||
assert_eq!(
|
||||
ColumnIndex::Empty { num_docs: 0 }.get_cardinality(),
|
||||
Cardinality::Full
|
||||
);
|
||||
assert_eq!(ColumnIndex::Full.get_cardinality(), Cardinality::Full);
|
||||
assert_eq!(
|
||||
ColumnIndex::Empty { num_docs: 1 }.get_cardinality(),
|
||||
Cardinality::Optional
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,14 +35,6 @@ pub struct MultiValueIndex {
|
||||
pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for MultiValueIndex {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
f.debug_struct("MultiValuedIndex")
|
||||
.field("num_rows", &self.start_index_column.num_vals())
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex {
|
||||
fn from(start_index_column: Arc<dyn ColumnValues<RowId>>) -> Self {
|
||||
MultiValueIndex { start_index_column }
|
||||
@@ -114,8 +106,11 @@ impl MultiValueIndex {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::MultiValueIndex;
|
||||
use crate::column_values::IterColumn;
|
||||
use crate::{ColumnValues, RowId};
|
||||
|
||||
fn index_to_pos_helper(
|
||||
index: &MultiValueIndex,
|
||||
@@ -129,7 +124,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_positions_to_docid() {
|
||||
let index = MultiValueIndex::for_test(&[0, 10, 12, 15, 22, 23]);
|
||||
let offsets: Vec<RowId> = vec![0, 10, 12, 15, 22, 23]; // docid values are [0..10, 10..12, 12..15, etc.]
|
||||
let column: Arc<dyn ColumnValues<RowId>> = Arc::new(IterColumn::from(offsets.into_iter()));
|
||||
let index = MultiValueIndex::from(column);
|
||||
assert_eq!(index.num_docs(), 5);
|
||||
let positions = &[10u32, 11, 15, 20, 21, 22];
|
||||
assert_eq!(index_to_pos_helper(&index, 0..5, positions), vec![1, 3, 4]);
|
||||
|
||||
@@ -88,15 +88,6 @@ pub struct OptionalIndex {
|
||||
block_metas: Arc<[BlockMeta]>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for OptionalIndex {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("OptionalIndex")
|
||||
.field("num_rows", &self.num_rows)
|
||||
.field("num_non_null_rows", &self.num_non_null_rows)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
/// Splits a value address into lower and upper 16bits.
|
||||
/// The lower 16 bits are the value in the block
|
||||
/// The upper 16 bits are the block index
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::iterable::Iterable;
|
||||
use crate::{ColumnIndex, ColumnValues, MergeRowOrder};
|
||||
|
||||
pub(crate) struct MergedColumnValues<'a, T> {
|
||||
pub(crate) column_indexes: &'a [ColumnIndex],
|
||||
pub(crate) column_indexes: &'a [Option<ColumnIndex>],
|
||||
pub(crate) column_values: &'a [Option<Arc<dyn ColumnValues<T>>>],
|
||||
pub(crate) merge_row_order: &'a MergeRowOrder,
|
||||
}
|
||||
@@ -23,7 +23,8 @@ impl<'a, T: Copy + PartialOrd + Debug> Iterable<T> for MergedColumnValues<'a, T>
|
||||
shuffle_merge_order
|
||||
.iter_new_to_old_row_addrs()
|
||||
.flat_map(|row_addr| {
|
||||
let column_index = &self.column_indexes[row_addr.segment_ord as usize];
|
||||
let column_index =
|
||||
self.column_indexes[row_addr.segment_ord as usize].as_ref()?;
|
||||
let column_values =
|
||||
self.column_values[row_addr.segment_ord as usize].as_ref()?;
|
||||
let value_range = column_index.value_row_ids(row_addr.row_id);
|
||||
|
||||
@@ -58,21 +58,10 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `idx` is greater than the column length.
|
||||
fn get_vals(&self, indexes: &[u32], output: &mut [T]) {
|
||||
assert!(indexes.len() == output.len());
|
||||
let out_and_idx_chunks = output.chunks_exact_mut(4).zip(indexes.chunks_exact(4));
|
||||
for (out_x4, idx_x4) in out_and_idx_chunks {
|
||||
out_x4[0] = self.get_val(idx_x4[0]);
|
||||
out_x4[1] = self.get_val(idx_x4[1]);
|
||||
out_x4[2] = self.get_val(idx_x4[2]);
|
||||
out_x4[3] = self.get_val(idx_x4[3]);
|
||||
}
|
||||
|
||||
let step_size = 4;
|
||||
let cutoff = indexes.len() - indexes.len() % step_size;
|
||||
|
||||
for idx in cutoff..indexes.len() {
|
||||
output[idx] = self.get_val(indexes[idx]);
|
||||
fn get_vals(&self, idx: &[u32], output: &mut [T]) {
|
||||
assert!(idx.len() == output.len());
|
||||
for (out, idx) in output.iter_mut().zip(idx.iter()) {
|
||||
*out = self.get_val(*idx as u32);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,6 +83,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
/// Get the row ids of values which are in the provided value range.
|
||||
///
|
||||
/// Note that position == docid for single value fast fields
|
||||
#[inline(always)]
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
@@ -109,26 +99,20 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a lower bound for this column of values.
|
||||
/// Returns the minimum value for this fast field.
|
||||
///
|
||||
/// All values are guaranteed to be higher than `.min_value()`
|
||||
/// but this value is not necessary the best boundary value.
|
||||
///
|
||||
/// We have
|
||||
/// ∀i < self.num_vals(), self.get_val(i) >= self.min_value()
|
||||
/// But we don't have necessarily
|
||||
/// ∃i < self.num_vals(), self.get_val(i) == self.min_value()
|
||||
/// This min_value may not be exact.
|
||||
/// For instance, the min value does not take in account of possible
|
||||
/// deleted document. All values are however guaranteed to be higher than
|
||||
/// `.min_value()`.
|
||||
fn min_value(&self) -> T;
|
||||
|
||||
/// Returns an upper bound for this column of values.
|
||||
/// Returns the maximum value for this fast field.
|
||||
///
|
||||
/// All values are guaranteed to be lower than `.max_value()`
|
||||
/// but this value is not necessary the best boundary value.
|
||||
///
|
||||
/// We have
|
||||
/// ∀i < self.num_vals(), self.get_val(i) <= self.max_value()
|
||||
/// But we don't have necessarily
|
||||
/// ∃i < self.num_vals(), self.get_val(i) == self.max_value()
|
||||
/// This max_value may not be exact.
|
||||
/// For instance, the max value does not take in account of possible
|
||||
/// deleted document. All values are however guaranteed to be higher than
|
||||
/// `.max_value()`.
|
||||
fn max_value(&self) -> T;
|
||||
|
||||
/// The number of values in the column.
|
||||
@@ -140,27 +124,6 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
}
|
||||
}
|
||||
|
||||
/// Empty column of values.
|
||||
pub struct EmptyColumnValues;
|
||||
|
||||
impl<T: PartialOrd + Default> ColumnValues<T> for EmptyColumnValues {
|
||||
fn get_val(&self, _idx: u32) -> T {
|
||||
panic!("Internal Error: Called get_val of empty column.")
|
||||
}
|
||||
|
||||
fn min_value(&self) -> T {
|
||||
T::default()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> T {
|
||||
T::default()
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
@@ -204,5 +167,54 @@ impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>>
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps an cloneable iterator into a `Column`.
|
||||
pub struct IterColumn<T>(T);
|
||||
|
||||
impl<T> From<T> for IterColumn<T>
|
||||
where T: Iterator + Clone + ExactSizeIterator
|
||||
{
|
||||
fn from(iter: T) -> Self {
|
||||
IterColumn(iter)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ColumnValues<T::Item> for IterColumn<T>
|
||||
where
|
||||
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
|
||||
T::Item: PartialOrd + Debug,
|
||||
{
|
||||
fn get_val(&self, idx: u32) -> T::Item {
|
||||
self.0.clone().nth(idx as usize).unwrap()
|
||||
}
|
||||
|
||||
fn min_value(&self) -> T::Item {
|
||||
self.0.clone().next().unwrap()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> T::Item {
|
||||
self.0.clone().last().unwrap()
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.0.len() as u32
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = T::Item> + '_> {
|
||||
Box::new(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_range_as_col() {
|
||||
let col = IterColumn::from(10..100);
|
||||
assert_eq!(col.num_vals(), 90);
|
||||
assert_eq!(col.max_value(), 99);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ where
|
||||
Input: PartialOrd + Send + Debug + Sync + Clone,
|
||||
Output: PartialOrd + Send + Debug + Sync + Clone,
|
||||
{
|
||||
#[inline(always)]
|
||||
#[inline]
|
||||
fn get_val(&self, idx: u32) -> Output {
|
||||
let from_val = self.from_column.get_val(idx);
|
||||
self.monotonic_mapping.mapping(from_val)
|
||||
|
||||
@@ -10,7 +10,7 @@ use super::{CompactSpace, RangeMapping};
|
||||
/// Put the blanks for the sorted values into a binary heap
|
||||
fn get_blanks(values_sorted: &BTreeSet<u128>) -> BinaryHeap<BlankRange> {
|
||||
let mut blanks: BinaryHeap<BlankRange> = BinaryHeap::new();
|
||||
for (first, second) in values_sorted.iter().copied().tuple_windows() {
|
||||
for (first, second) in values_sorted.iter().tuple_windows() {
|
||||
// Correctness Overflow: the values are deduped and sorted (BTreeSet property), that means
|
||||
// there's always space between two values.
|
||||
let blank_range = first + 1..=second - 1;
|
||||
@@ -65,12 +65,12 @@ pub fn get_compact_space(
|
||||
return compact_space_builder.finish();
|
||||
}
|
||||
|
||||
// We start by space that's limited to min_value..=max_value
|
||||
// Replace after stabilization of https://github.com/rust-lang/rust/issues/62924
|
||||
let min_value = values_deduped_sorted.iter().next().copied().unwrap_or(0);
|
||||
let max_value = values_deduped_sorted.iter().last().copied().unwrap_or(0);
|
||||
|
||||
let mut blanks: BinaryHeap<BlankRange> = get_blanks(values_deduped_sorted);
|
||||
// Replace after stabilization of https://github.com/rust-lang/rust/issues/62924
|
||||
|
||||
// We start by space that's limited to min_value..=max_value
|
||||
let min_value = *values_deduped_sorted.iter().next().unwrap_or(&0);
|
||||
let max_value = *values_deduped_sorted.iter().last().unwrap_or(&0);
|
||||
|
||||
// +1 for null, in case min and max covers the whole space, we are off by one.
|
||||
let mut amplitude_compact_space = (max_value - min_value).saturating_add(1);
|
||||
@@ -84,7 +84,6 @@ pub fn get_compact_space(
|
||||
let mut amplitude_bits: u8 = num_bits(amplitude_compact_space);
|
||||
|
||||
let mut blank_collector = BlankCollector::new();
|
||||
|
||||
// We will stage blanks until they reduce the compact space by at least 1 bit and then flush
|
||||
// them if the metadata cost is lower than the total number of saved bits.
|
||||
// Binary heap to process the gaps by their size
|
||||
@@ -94,7 +93,6 @@ pub fn get_compact_space(
|
||||
let staged_spaces_sum: u128 = blank_collector.staged_blanks_sum();
|
||||
let amplitude_new_compact_space = amplitude_compact_space - staged_spaces_sum;
|
||||
let amplitude_new_bits = num_bits(amplitude_new_compact_space);
|
||||
|
||||
if amplitude_bits == amplitude_new_bits {
|
||||
continue;
|
||||
}
|
||||
@@ -102,16 +100,7 @@ pub fn get_compact_space(
|
||||
// TODO: Maybe calculate exact cost of blanks and run this more expensive computation only,
|
||||
// when amplitude_new_bits changes
|
||||
let cost = blank_collector.num_staged_blanks() * cost_per_blank;
|
||||
|
||||
// We want to end up with a compact space that fits into 32 bits.
|
||||
// In order to deal with pathological cases, we force the algorithm to keep
|
||||
// refining the compact space the amplitude bits is lower than 32.
|
||||
//
|
||||
// The worst case scenario happens for a large number of u128s regularly
|
||||
// spread over the full u128 space.
|
||||
//
|
||||
// This change will force the algorithm to degenerate into dictionary encoding.
|
||||
if amplitude_bits <= 32 && cost >= saved_bits {
|
||||
if cost >= saved_bits {
|
||||
// Continue here, since although we walk over the blanks by size,
|
||||
// we can potentially save a lot at the last bits, which are smaller blanks
|
||||
//
|
||||
@@ -126,8 +115,6 @@ pub fn get_compact_space(
|
||||
compact_space_builder.add_blanks(blank_collector.drain().map(|blank| blank.blank_range()));
|
||||
}
|
||||
|
||||
assert!(amplitude_bits <= 32);
|
||||
|
||||
// special case, when we don't collected any blanks because:
|
||||
// * the data is empty (early exit)
|
||||
// * the algorithm did decide it's not worth the cost, which can be the case for single values
|
||||
@@ -212,7 +199,7 @@ impl CompactSpaceBuilder {
|
||||
covered_space.push(0..=0); // empty data case
|
||||
};
|
||||
|
||||
let mut compact_start: u32 = 1; // 0 is reserved for `null`
|
||||
let mut compact_start: u64 = 1; // 0 is reserved for `null`
|
||||
let mut ranges_mapping: Vec<RangeMapping> = Vec::with_capacity(covered_space.len());
|
||||
for cov in covered_space {
|
||||
let range_mapping = super::RangeMapping {
|
||||
@@ -231,7 +218,6 @@ impl CompactSpaceBuilder {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_values::u128_based::compact_space::COST_PER_BLANK_IN_BITS;
|
||||
|
||||
#[test]
|
||||
fn test_binary_heap_pop_order() {
|
||||
@@ -242,11 +228,4 @@ mod tests {
|
||||
assert_eq!(blanks.pop().unwrap().blank_size(), 101);
|
||||
assert_eq!(blanks.pop().unwrap().blank_size(), 11);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_worst_case_scenario() {
|
||||
let vals: BTreeSet<u128> = (0..8).map(|i| i * ((1u128 << 34) / 8)).collect();
|
||||
let compact_space = get_compact_space(&vals, vals.len() as u32, COST_PER_BLANK_IN_BITS);
|
||||
assert!(compact_space.amplitude_compact_space() < u32::MAX as u128);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,15 +42,15 @@ pub struct CompactSpace {
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
struct RangeMapping {
|
||||
value_range: RangeInclusive<u128>,
|
||||
compact_start: u32,
|
||||
compact_start: u64,
|
||||
}
|
||||
impl RangeMapping {
|
||||
fn range_length(&self) -> u32 {
|
||||
(self.value_range.end() - self.value_range.start()) as u32 + 1
|
||||
fn range_length(&self) -> u64 {
|
||||
(self.value_range.end() - self.value_range.start()) as u64 + 1
|
||||
}
|
||||
|
||||
// The last value of the compact space in this range
|
||||
fn compact_end(&self) -> u32 {
|
||||
fn compact_end(&self) -> u64 {
|
||||
self.compact_start + self.range_length() - 1
|
||||
}
|
||||
}
|
||||
@@ -81,7 +81,7 @@ impl BinarySerializable for CompactSpace {
|
||||
let num_ranges = VInt::deserialize(reader)?.0;
|
||||
let mut ranges_mapping: Vec<RangeMapping> = vec![];
|
||||
let mut value = 0u128;
|
||||
let mut compact_start = 1u32; // 0 is reserved for `null`
|
||||
let mut compact_start = 1u64; // 0 is reserved for `null`
|
||||
for _ in 0..num_ranges {
|
||||
let blank_delta_start = VIntU128::deserialize(reader)?.0;
|
||||
value += blank_delta_start;
|
||||
@@ -122,10 +122,10 @@ impl CompactSpace {
|
||||
|
||||
/// Returns either Ok(the value in the compact space) or if it is outside the compact space the
|
||||
/// Err(position where it would be inserted)
|
||||
fn u128_to_compact(&self, value: u128) -> Result<u32, usize> {
|
||||
fn u128_to_compact(&self, value: u128) -> Result<u64, usize> {
|
||||
self.ranges_mapping
|
||||
.binary_search_by(|probe| {
|
||||
let value_range: &RangeInclusive<u128> = &probe.value_range;
|
||||
let value_range = &probe.value_range;
|
||||
if value < *value_range.start() {
|
||||
Ordering::Greater
|
||||
} else if value > *value_range.end() {
|
||||
@@ -136,13 +136,13 @@ impl CompactSpace {
|
||||
})
|
||||
.map(|pos| {
|
||||
let range_mapping = &self.ranges_mapping[pos];
|
||||
let pos_in_range: u32 = (value - range_mapping.value_range.start()) as u32;
|
||||
let pos_in_range = (value - range_mapping.value_range.start()) as u64;
|
||||
range_mapping.compact_start + pos_in_range
|
||||
})
|
||||
}
|
||||
|
||||
/// Unpacks a value from compact space u32 to u128 space
|
||||
fn compact_to_u128(&self, compact: u32) -> u128 {
|
||||
/// Unpacks a value from compact space u64 to u128 space
|
||||
fn compact_to_u128(&self, compact: u64) -> u128 {
|
||||
let pos = self
|
||||
.ranges_mapping
|
||||
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
|
||||
@@ -178,15 +178,11 @@ impl CompactSpaceCompressor {
|
||||
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
|
||||
pub fn train_from(iter: impl Iterator<Item = u128>) -> Self {
|
||||
let mut values_sorted = BTreeSet::new();
|
||||
// Total number of values, with their redundancy.
|
||||
let mut total_num_values = 0u32;
|
||||
for val in iter {
|
||||
total_num_values += 1u32;
|
||||
values_sorted.insert(val);
|
||||
}
|
||||
let min_value = *values_sorted.iter().next().unwrap_or(&0);
|
||||
let max_value = *values_sorted.iter().last().unwrap_or(&0);
|
||||
|
||||
let compact_space =
|
||||
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
|
||||
let amplitude_compact_space = compact_space.amplitude_compact_space();
|
||||
@@ -197,12 +193,13 @@ impl CompactSpaceCompressor {
|
||||
);
|
||||
|
||||
let num_bits = tantivy_bitpacker::compute_num_bits(amplitude_compact_space as u64);
|
||||
|
||||
let min_value = *values_sorted.iter().next().unwrap_or(&0);
|
||||
let max_value = *values_sorted.iter().last().unwrap_or(&0);
|
||||
assert_eq!(
|
||||
compact_space
|
||||
.u128_to_compact(max_value)
|
||||
.expect("could not convert max value to compact space"),
|
||||
amplitude_compact_space as u32
|
||||
amplitude_compact_space as u64
|
||||
);
|
||||
CompactSpaceCompressor {
|
||||
params: IPCodecParams {
|
||||
@@ -243,7 +240,7 @@ impl CompactSpaceCompressor {
|
||||
"Could not convert value to compact_space. This is a bug.",
|
||||
)
|
||||
})?;
|
||||
bitpacker.write(compact as u64, self.params.num_bits, write)?;
|
||||
bitpacker.write(compact, self.params.num_bits, write)?;
|
||||
}
|
||||
bitpacker.close(write)?;
|
||||
self.write_footer(write)?;
|
||||
@@ -317,6 +314,48 @@ impl ColumnValues<u128> for CompactSpaceDecompressor {
|
||||
|
||||
#[inline]
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u128>,
|
||||
positions_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
self.get_positions_for_value_range(value_range, positions_range, positions)
|
||||
}
|
||||
}
|
||||
|
||||
impl CompactSpaceDecompressor {
|
||||
pub fn open(data: OwnedBytes) -> io::Result<CompactSpaceDecompressor> {
|
||||
let (data_slice, footer_len_bytes) = data.split_at(data.len() - 4);
|
||||
let footer_len = u32::deserialize(&mut &footer_len_bytes[..])?;
|
||||
|
||||
let data_footer = &data_slice[data_slice.len() - footer_len as usize..];
|
||||
let params = IPCodecParams::deserialize(&mut &data_footer[..])?;
|
||||
let decompressor = CompactSpaceDecompressor { data, params };
|
||||
|
||||
Ok(decompressor)
|
||||
}
|
||||
|
||||
/// Converting to compact space for the decompressor is more complex, since we may get values
|
||||
/// which are outside the compact space. e.g. if we map
|
||||
/// 1000 => 5
|
||||
/// 2000 => 6
|
||||
///
|
||||
/// and we want a mapping for 1005, there is no equivalent compact space. We instead return an
|
||||
/// error with the index of the next range.
|
||||
fn u128_to_compact(&self, value: u128) -> Result<u64, usize> {
|
||||
self.params.compact_space.u128_to_compact(value)
|
||||
}
|
||||
|
||||
fn compact_to_u128(&self, compact: u64) -> u128 {
|
||||
self.params.compact_space.compact_to_u128(compact)
|
||||
}
|
||||
|
||||
/// Comparing on compact space: Random dataset 0,24 (50% random hit) - 1.05 GElements/s
|
||||
/// Comparing on compact space: Real dataset 1.08 GElements/s
|
||||
///
|
||||
/// Comparing on original space: Real dataset .06 GElements/s (not completely optimized)
|
||||
#[inline]
|
||||
pub fn get_positions_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u128>,
|
||||
position_range: Range<u32>,
|
||||
@@ -356,42 +395,44 @@ impl ColumnValues<u128> for CompactSpaceDecompressor {
|
||||
range_mapping.compact_end()
|
||||
});
|
||||
|
||||
let value_range = compact_from..=compact_to;
|
||||
self.get_positions_for_compact_value_range(value_range, position_range, positions);
|
||||
}
|
||||
}
|
||||
let range = compact_from..=compact_to;
|
||||
|
||||
impl CompactSpaceDecompressor {
|
||||
pub fn open(data: OwnedBytes) -> io::Result<CompactSpaceDecompressor> {
|
||||
let (data_slice, footer_len_bytes) = data.split_at(data.len() - 4);
|
||||
let footer_len = u32::deserialize(&mut &footer_len_bytes[..])?;
|
||||
let scan_num_docs = position_range.end - position_range.start;
|
||||
|
||||
let data_footer = &data_slice[data_slice.len() - footer_len as usize..];
|
||||
let params = IPCodecParams::deserialize(&mut &data_footer[..])?;
|
||||
let decompressor = CompactSpaceDecompressor { data, params };
|
||||
let step_size = 4;
|
||||
let cutoff = position_range.start + scan_num_docs - scan_num_docs % step_size;
|
||||
|
||||
Ok(decompressor)
|
||||
}
|
||||
let mut push_if_in_range = |idx, val| {
|
||||
if range.contains(&val) {
|
||||
positions.push(idx);
|
||||
}
|
||||
};
|
||||
let get_val = |idx| self.params.bit_unpacker.get(idx, &self.data);
|
||||
// unrolled loop
|
||||
for idx in (position_range.start..cutoff).step_by(step_size as usize) {
|
||||
let idx1 = idx;
|
||||
let idx2 = idx + 1;
|
||||
let idx3 = idx + 2;
|
||||
let idx4 = idx + 3;
|
||||
let val1 = get_val(idx1);
|
||||
let val2 = get_val(idx2);
|
||||
let val3 = get_val(idx3);
|
||||
let val4 = get_val(idx4);
|
||||
push_if_in_range(idx1, val1);
|
||||
push_if_in_range(idx2, val2);
|
||||
push_if_in_range(idx3, val3);
|
||||
push_if_in_range(idx4, val4);
|
||||
}
|
||||
|
||||
/// Converting to compact space for the decompressor is more complex, since we may get values
|
||||
/// which are outside the compact space. e.g. if we map
|
||||
/// 1000 => 5
|
||||
/// 2000 => 6
|
||||
///
|
||||
/// and we want a mapping for 1005, there is no equivalent compact space. We instead return an
|
||||
/// error with the index of the next range.
|
||||
fn u128_to_compact(&self, value: u128) -> Result<u32, usize> {
|
||||
self.params.compact_space.u128_to_compact(value)
|
||||
}
|
||||
|
||||
fn compact_to_u128(&self, compact: u32) -> u128 {
|
||||
self.params.compact_space.compact_to_u128(compact)
|
||||
// handle rest
|
||||
for idx in cutoff..position_range.end {
|
||||
push_if_in_range(idx, get_val(idx));
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter_compact(&self) -> impl Iterator<Item = u32> + '_ {
|
||||
(0..self.params.num_vals)
|
||||
.map(move |idx| self.params.bit_unpacker.get(idx, &self.data) as u32)
|
||||
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ {
|
||||
(0..self.params.num_vals).map(move |idx| self.params.bit_unpacker.get(idx, &self.data))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -404,7 +445,7 @@ impl CompactSpaceDecompressor {
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u32) -> u128 {
|
||||
let compact = self.params.bit_unpacker.get(idx, &self.data) as u32;
|
||||
let compact = self.params.bit_unpacker.get(idx, &self.data);
|
||||
self.compact_to_u128(compact)
|
||||
}
|
||||
|
||||
@@ -415,20 +456,6 @@ impl CompactSpaceDecompressor {
|
||||
pub fn max_value(&self) -> u128 {
|
||||
self.params.max_value
|
||||
}
|
||||
|
||||
fn get_positions_for_compact_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u32>,
|
||||
position_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
self.params.bit_unpacker.get_ids_for_value_range(
|
||||
*value_range.start() as u64..=*value_range.end() as u64,
|
||||
position_range,
|
||||
&self.data,
|
||||
positions,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -442,12 +469,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn compact_space_test() {
|
||||
let ips: BTreeSet<u128> = [
|
||||
let ips = &[
|
||||
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let compact_space = get_compact_space(&ips, ips.len() as u32, 11);
|
||||
let compact_space = get_compact_space(ips, ips.len() as u32, 11);
|
||||
let amplitude = compact_space.amplitude_compact_space();
|
||||
assert_eq!(amplitude, 17);
|
||||
assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
|
||||
@@ -470,8 +497,8 @@ mod tests {
|
||||
);
|
||||
|
||||
for ip in ips {
|
||||
let compact = compact_space.u128_to_compact(ip).unwrap();
|
||||
assert_eq!(compact_space.compact_to_u128(compact), ip);
|
||||
let compact = compact_space.u128_to_compact(*ip).unwrap();
|
||||
assert_eq!(compact_space.compact_to_u128(compact), *ip);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -497,7 +524,7 @@ mod tests {
|
||||
.map(|pos| pos as u32)
|
||||
.collect::<Vec<_>>();
|
||||
let mut positions = Vec::new();
|
||||
decompressor.get_row_ids_for_value_range(
|
||||
decompressor.get_positions_for_value_range(
|
||||
range,
|
||||
0..decompressor.num_vals(),
|
||||
&mut positions,
|
||||
@@ -542,7 +569,7 @@ mod tests {
|
||||
let val = *val;
|
||||
let pos = pos as u32;
|
||||
let mut positions = Vec::new();
|
||||
decomp.get_row_ids_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||
assert_eq!(positions, vec![pos]);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
use std::io::{self, Write};
|
||||
use std::num::NonZeroU64;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use fastdivide::DividerU64;
|
||||
@@ -18,46 +16,6 @@ pub struct BitpackedReader {
|
||||
stats: ColumnStats,
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
const fn div_ceil(n: u64, q: NonZeroU64) -> u64 {
|
||||
// copied from unstable rust standard library.
|
||||
let d = n / q.get();
|
||||
let r = n % q.get();
|
||||
if r > 0 {
|
||||
d + 1
|
||||
} else {
|
||||
d
|
||||
}
|
||||
}
|
||||
|
||||
// The bitpacked codec applies a linear transformation `f` over data that are bitpacked.
|
||||
// f is defined by:
|
||||
// f: bitpacked -> stats.min_value + stats.gcd * bitpacked
|
||||
//
|
||||
// In order to run range queries, we invert the transformation.
|
||||
// `transform_range_before_linear_transformation` returns the range of values
|
||||
// [min_bipacked_value..max_bitpacked_value] such that
|
||||
// f(bitpacked) ∈ [min_value, max_value] <=> bitpacked ∈ [min_bitpacked_value, max_bitpacked_value]
|
||||
fn transform_range_before_linear_transformation(
|
||||
stats: &ColumnStats,
|
||||
range: RangeInclusive<u64>,
|
||||
) -> Option<RangeInclusive<u64>> {
|
||||
if range.is_empty() {
|
||||
return None;
|
||||
}
|
||||
if stats.min_value > *range.end() {
|
||||
return None;
|
||||
}
|
||||
if stats.max_value < *range.start() {
|
||||
return None;
|
||||
}
|
||||
let shifted_range =
|
||||
range.start().saturating_sub(stats.min_value)..=range.end().saturating_sub(stats.min_value);
|
||||
let start_before_gcd_multiplication: u64 = div_ceil(*shifted_range.start(), stats.gcd);
|
||||
let end_before_gcd_multiplication: u64 = *shifted_range.end() / stats.gcd;
|
||||
Some(start_before_gcd_multiplication..=end_before_gcd_multiplication)
|
||||
}
|
||||
|
||||
impl ColumnValues for BitpackedReader {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
@@ -76,25 +34,6 @@ impl ColumnValues for BitpackedReader {
|
||||
fn num_vals(&self) -> RowId {
|
||||
self.stats.num_rows
|
||||
}
|
||||
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
range: RangeInclusive<u64>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let Some(transformed_range) = transform_range_before_linear_transformation(&self.stats, range)
|
||||
else {
|
||||
positions.clear();
|
||||
return;
|
||||
};
|
||||
self.bit_unpacker.get_ids_for_value_range(
|
||||
transformed_range,
|
||||
doc_id_range,
|
||||
&self.data,
|
||||
positions,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn num_bits(stats: &ColumnStats) -> u8 {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use proptest::prelude::*;
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
use proptest::{num, prop_oneof, proptest};
|
||||
|
||||
#[test]
|
||||
fn test_serialize_and_load_simple() {
|
||||
@@ -99,28 +99,14 @@ pub(crate) fn create_and_validate<TColumnCodec: ColumnCodec>(
|
||||
|
||||
let reader = TColumnCodec::load(OwnedBytes::new(buffer)).unwrap();
|
||||
assert_eq!(reader.num_vals(), vals.len() as u32);
|
||||
let mut buffer = Vec::new();
|
||||
for (doc, orig_val) in vals.iter().copied().enumerate() {
|
||||
let val = reader.get_val(doc as u32);
|
||||
assert_eq!(
|
||||
val, orig_val,
|
||||
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data `{vals:?}`",
|
||||
);
|
||||
|
||||
buffer.resize(1, 0);
|
||||
reader.get_vals(&[doc as u32], &mut buffer);
|
||||
let val = buffer[0];
|
||||
assert_eq!(
|
||||
val, orig_val,
|
||||
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data `{vals:?}`",
|
||||
);
|
||||
}
|
||||
|
||||
let all_docs: Vec<u32> = (0..vals.len() as u32).collect();
|
||||
buffer.resize(all_docs.len(), 0);
|
||||
reader.get_vals(&all_docs, &mut buffer);
|
||||
assert_eq!(vals, buffer);
|
||||
|
||||
if !vals.is_empty() {
|
||||
let test_rand_idx = rand::thread_rng().gen_range(0..=vals.len() - 1);
|
||||
let expected_positions: Vec<u32> = vals
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::fmt;
|
||||
use std::fmt::Debug;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
@@ -22,22 +21,6 @@ pub enum ColumnType {
|
||||
DateTime = 7u8,
|
||||
}
|
||||
|
||||
impl fmt::Display for ColumnType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let short_str = match self {
|
||||
ColumnType::I64 => "i64",
|
||||
ColumnType::U64 => "u64",
|
||||
ColumnType::F64 => "f64",
|
||||
ColumnType::Bytes => "bytes",
|
||||
ColumnType::Str => "str",
|
||||
ColumnType::Bool => "bool",
|
||||
ColumnType::IpAddr => "ip",
|
||||
ColumnType::DateTime => "datetime",
|
||||
};
|
||||
write!(f, "{}", short_str)
|
||||
}
|
||||
}
|
||||
|
||||
// The order needs to match _exactly_ the order in the enum
|
||||
const COLUMN_TYPES: [ColumnType; 8] = [
|
||||
ColumnType::I64,
|
||||
|
||||
@@ -52,18 +52,21 @@ impl<'a> Iterable for RemappedTermOrdinalsValues<'a> {
|
||||
|
||||
impl<'a> RemappedTermOrdinalsValues<'a> {
|
||||
fn boxed_iter_stacked(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
let iter = self.bytes_columns.iter().flatten().enumerate().flat_map(
|
||||
move |(seg_ord_with_column, bytes_column)| {
|
||||
let term_ord_after_merge_mapping = self
|
||||
.term_ord_mapping
|
||||
.get_segment(seg_ord_with_column as u32);
|
||||
bytes_column
|
||||
.ords()
|
||||
.values
|
||||
.iter()
|
||||
.map(move |term_ord| term_ord_after_merge_mapping[term_ord as usize])
|
||||
},
|
||||
);
|
||||
let iter = self
|
||||
.bytes_columns
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(segment_ord, byte_column)| {
|
||||
let segment_ord = self.term_ord_mapping.get_segment(segment_ord as u32);
|
||||
byte_column.iter().flat_map(move |bytes_column| {
|
||||
bytes_column
|
||||
.ords()
|
||||
.values
|
||||
.iter()
|
||||
.map(move |term_ord| segment_ord[term_ord as usize])
|
||||
})
|
||||
});
|
||||
// TODO see if we can better decompose the mapping / and the stacking
|
||||
Box::new(iter)
|
||||
}
|
||||
|
||||
@@ -130,6 +133,7 @@ fn serialize_merged_dict(
|
||||
let mut merged_terms = TermMerger::new(field_term_streams);
|
||||
let mut sstable_builder = sstable::VoidSSTable::writer(output);
|
||||
|
||||
// TODO support complex `merge_row_order`.
|
||||
match merge_row_order {
|
||||
MergeRowOrder::Stack(_) => {
|
||||
let mut current_term_ord = 0;
|
||||
|
||||
@@ -28,7 +28,7 @@ use crate::{
|
||||
///
|
||||
/// See also [README.md].
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
|
||||
pub(crate) enum ColumnTypeCategory {
|
||||
enum ColumnTypeCategory {
|
||||
Bool,
|
||||
Str,
|
||||
Numerical,
|
||||
@@ -78,23 +78,20 @@ pub fn merge_columnar(
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let mut serializer = ColumnarSerializer::new(output);
|
||||
let num_rows_per_columnar = columnar_readers
|
||||
.iter()
|
||||
.map(|reader| reader.num_rows())
|
||||
.collect::<Vec<u32>>();
|
||||
|
||||
let columns_to_merge = group_columns_for_merge(columnar_readers, required_columns)?;
|
||||
for ((column_name, column_type), columns) in columns_to_merge {
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name.as_bytes(), column_type);
|
||||
merge_column(
|
||||
column_type,
|
||||
&num_rows_per_columnar,
|
||||
columns,
|
||||
&merge_row_order,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
serializer.finalize(merge_row_order.num_rows())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -111,7 +108,6 @@ fn dynamic_column_to_u64_monotonic(dynamic_column: DynamicColumn) -> Option<Colu
|
||||
|
||||
fn merge_column(
|
||||
column_type: ColumnType,
|
||||
num_docs_per_column: &[u32],
|
||||
columns: Vec<Option<DynamicColumn>>,
|
||||
merge_row_order: &MergeRowOrder,
|
||||
wrt: &mut impl io::Write,
|
||||
@@ -122,19 +118,17 @@ fn merge_column(
|
||||
| ColumnType::F64
|
||||
| ColumnType::DateTime
|
||||
| ColumnType::Bool => {
|
||||
let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns.len());
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut column_values: Vec<Option<Arc<dyn ColumnValues>>> =
|
||||
Vec::with_capacity(columns.len());
|
||||
for (i, dynamic_column_opt) in columns.into_iter().enumerate() {
|
||||
if let Some(Column { index: idx, values }) =
|
||||
for dynamic_column_opt in columns {
|
||||
if let Some(Column { idx, values }) =
|
||||
dynamic_column_opt.and_then(dynamic_column_to_u64_monotonic)
|
||||
{
|
||||
column_indexes.push(idx);
|
||||
column_indexes.push(Some(idx));
|
||||
column_values.push(Some(values));
|
||||
} else {
|
||||
column_indexes.push(ColumnIndex::Empty {
|
||||
num_docs: num_docs_per_column[i],
|
||||
});
|
||||
column_indexes.push(None);
|
||||
column_values.push(None);
|
||||
}
|
||||
}
|
||||
@@ -148,19 +142,15 @@ fn merge_column(
|
||||
serialize_column_mappable_to_u64(merged_column_index, &merge_column_values, wrt)?;
|
||||
}
|
||||
ColumnType::IpAddr => {
|
||||
let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns.len());
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut column_values: Vec<Option<Arc<dyn ColumnValues<Ipv6Addr>>>> =
|
||||
Vec::with_capacity(columns.len());
|
||||
for (i, dynamic_column_opt) in columns.into_iter().enumerate() {
|
||||
if let Some(DynamicColumn::IpAddr(Column { index: idx, values })) =
|
||||
dynamic_column_opt
|
||||
{
|
||||
column_indexes.push(idx);
|
||||
for dynamic_column_opt in columns {
|
||||
if let Some(DynamicColumn::IpAddr(Column { idx, values })) = dynamic_column_opt {
|
||||
column_indexes.push(Some(idx));
|
||||
column_values.push(Some(values));
|
||||
} else {
|
||||
column_indexes.push(ColumnIndex::Empty {
|
||||
num_docs: num_docs_per_column[i],
|
||||
});
|
||||
column_indexes.push(None);
|
||||
column_values.push(None);
|
||||
}
|
||||
}
|
||||
@@ -176,22 +166,20 @@ fn merge_column(
|
||||
serialize_column_mappable_to_u128(merged_column_index, &merge_column_values, wrt)?;
|
||||
}
|
||||
ColumnType::Bytes | ColumnType::Str => {
|
||||
let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns.len());
|
||||
let mut column_indexes: Vec<Option<ColumnIndex>> = Vec::with_capacity(columns.len());
|
||||
let mut bytes_columns: Vec<Option<BytesColumn>> = Vec::with_capacity(columns.len());
|
||||
for (i, dynamic_column_opt) in columns.into_iter().enumerate() {
|
||||
for dynamic_column_opt in columns {
|
||||
match dynamic_column_opt {
|
||||
Some(DynamicColumn::Str(str_column)) => {
|
||||
column_indexes.push(str_column.term_ord_column.index.clone());
|
||||
column_indexes.push(Some(str_column.term_ord_column.idx.clone()));
|
||||
bytes_columns.push(Some(str_column.into()));
|
||||
}
|
||||
Some(DynamicColumn::Bytes(bytes_column)) => {
|
||||
column_indexes.push(bytes_column.term_ord_column.index.clone());
|
||||
column_indexes.push(Some(bytes_column.term_ord_column.idx.clone()));
|
||||
bytes_columns.push(Some(bytes_column));
|
||||
}
|
||||
_ => {
|
||||
column_indexes.push(ColumnIndex::Empty {
|
||||
num_docs: num_docs_per_column[i],
|
||||
});
|
||||
column_indexes.push(None);
|
||||
bytes_columns.push(None);
|
||||
}
|
||||
}
|
||||
@@ -373,8 +361,8 @@ fn coerce_column(column_type: ColumnType, column: DynamicColumn) -> io::Result<D
|
||||
fn min_max_if_numerical(column: &DynamicColumn) -> Option<(NumericalValue, NumericalValue)> {
|
||||
match column {
|
||||
DynamicColumn::I64(column) => Some((column.min_value().into(), column.max_value().into())),
|
||||
DynamicColumn::U64(column) => Some((column.min_value().into(), column.max_value().into())),
|
||||
DynamicColumn::F64(column) => Some((column.min_value().into(), column.max_value().into())),
|
||||
DynamicColumn::U64(column) => Some((column.min_value().into(), column.min_value().into())),
|
||||
DynamicColumn::F64(column) => Some((column.min_value().into(), column.min_value().into())),
|
||||
DynamicColumn::Bool(_)
|
||||
| DynamicColumn::IpAddr(_)
|
||||
| DynamicColumn::DateTime(_)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::*;
|
||||
use crate::{Cardinality, ColumnarWriter, HasAssociatedColumnType, RowId};
|
||||
|
||||
@@ -155,24 +153,20 @@ fn make_numerical_columnar_multiple_columns(
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn make_byte_columnar_multiple_columns(
|
||||
columns: &[(&str, &[&[&[u8]]])],
|
||||
num_rows: u32,
|
||||
) -> ColumnarReader {
|
||||
fn make_byte_columnar_multiple_columns(columns: &[(&str, &[&[&[u8]]])]) -> ColumnarReader {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
for (column_name, column_values) in columns {
|
||||
assert_eq!(
|
||||
column_values.len(),
|
||||
num_rows as usize,
|
||||
"All columns must have `{num_rows}` rows"
|
||||
);
|
||||
for (row_id, vals) in column_values.iter().enumerate() {
|
||||
for val in vals.iter() {
|
||||
dataframe_writer.record_bytes(row_id as u32, column_name, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
let num_rows = columns
|
||||
.iter()
|
||||
.map(|(_, val_rows)| val_rows.len() as RowId)
|
||||
.max()
|
||||
.unwrap_or(0u32);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(num_rows, None, &mut buffer)
|
||||
@@ -251,8 +245,6 @@ fn test_merge_columnar_texts() {
|
||||
let cols = columnar_reader.read_columns("texts").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
|
||||
assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional);
|
||||
|
||||
let get_str_for_ord = |ord| {
|
||||
let mut out = String::new();
|
||||
vals.ord_to_str(ord, &mut out).unwrap();
|
||||
@@ -280,8 +272,8 @@ fn test_merge_columnar_texts() {
|
||||
|
||||
#[test]
|
||||
fn test_merge_columnar_byte() {
|
||||
let columnar1 = make_byte_columnar_multiple_columns(&[("bytes", &[&[b"bbbb"], &[b"baaa"]])], 2);
|
||||
let columnar2 = make_byte_columnar_multiple_columns(&[("bytes", &[&[], &[b"a"]])], 2);
|
||||
let columnar1 = make_byte_columnar_multiple_columns(&[("bytes", &[&[b"bbbb"], &[b"baaa"]])]);
|
||||
let columnar2 = make_byte_columnar_multiple_columns(&[("bytes", &[&[], &[b"a"]])]);
|
||||
let mut buffer = Vec::new();
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let stack_merge_order = StackMergeOrder::stack(columnars);
|
||||
@@ -324,149 +316,3 @@ fn test_merge_columnar_byte() {
|
||||
assert_eq!(get_bytes_for_row(2), b"");
|
||||
assert_eq!(get_bytes_for_row(3), b"a");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_columnar_byte_with_missing() {
|
||||
let columnar1 = make_byte_columnar_multiple_columns(&[], 3);
|
||||
let columnar2 = make_byte_columnar_multiple_columns(&[("col", &[&[b"b"], &[]])], 2);
|
||||
let columnar3 = make_byte_columnar_multiple_columns(
|
||||
&[
|
||||
("col", &[&[], &[b"b"], &[b"a", b"b"]]),
|
||||
("col2", &[&[b"hello"], &[], &[b"a", b"b"]]),
|
||||
],
|
||||
3,
|
||||
);
|
||||
let mut buffer = Vec::new();
|
||||
let columnars = &[&columnar1, &columnar2, &columnar3];
|
||||
let stack_merge_order = StackMergeOrder::stack(columnars);
|
||||
crate::columnar::merge_columnar(
|
||||
columnars,
|
||||
&[],
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 3 + 2 + 3);
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let cols = columnar_reader.read_columns("col").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::Bytes(vals) = dynamic_column else { panic!() };
|
||||
let get_bytes_for_ord = |ord| {
|
||||
let mut out = Vec::new();
|
||||
vals.ord_to_bytes(ord, &mut out).unwrap();
|
||||
out
|
||||
};
|
||||
assert_eq!(vals.dictionary.num_terms(), 2);
|
||||
assert_eq!(get_bytes_for_ord(0), b"a");
|
||||
assert_eq!(get_bytes_for_ord(1), b"b");
|
||||
let get_bytes_for_row = |row_id| {
|
||||
let terms: Vec<Vec<u8>> = vals
|
||||
.term_ords(row_id)
|
||||
.map(|term_ord| {
|
||||
let mut out = Vec::new();
|
||||
vals.ord_to_bytes(term_ord, &mut out).unwrap();
|
||||
out
|
||||
})
|
||||
.collect();
|
||||
terms
|
||||
};
|
||||
assert!(get_bytes_for_row(0).is_empty());
|
||||
assert!(get_bytes_for_row(1).is_empty());
|
||||
assert!(get_bytes_for_row(2).is_empty());
|
||||
assert_eq!(get_bytes_for_row(3), vec![b"b".to_vec()]);
|
||||
assert!(get_bytes_for_row(4).is_empty());
|
||||
assert!(get_bytes_for_row(5).is_empty());
|
||||
assert_eq!(get_bytes_for_row(6), vec![b"b".to_vec()]);
|
||||
assert_eq!(get_bytes_for_row(7), vec![b"a".to_vec(), b"b".to_vec()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_columnar_different_types() {
|
||||
let columnar1 = make_text_columnar_multiple_columns(&[("mixed", &[&["a"]])]);
|
||||
let columnar2 = make_text_columnar_multiple_columns(&[("mixed", &[&[], &["b"]])]);
|
||||
let columnar3 = make_columnar("mixed", &[1i64]);
|
||||
let mut buffer = Vec::new();
|
||||
let columnars = &[&columnar1, &columnar2, &columnar3];
|
||||
let stack_merge_order = StackMergeOrder::stack(columnars);
|
||||
crate::columnar::merge_columnar(
|
||||
columnars,
|
||||
&[],
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 4);
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let cols = columnar_reader.read_columns("mixed").unwrap();
|
||||
|
||||
// numeric column
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
let DynamicColumn::I64(vals) = dynamic_column else { panic!() };
|
||||
assert_eq!(vals.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(vals.values_for_doc(0).collect_vec(), vec![]);
|
||||
assert_eq!(vals.values_for_doc(1).collect_vec(), vec![]);
|
||||
assert_eq!(vals.values_for_doc(2).collect_vec(), vec![]);
|
||||
assert_eq!(vals.values_for_doc(3).collect_vec(), vec![1]);
|
||||
assert_eq!(vals.values_for_doc(4).collect_vec(), vec![]);
|
||||
|
||||
// text column
|
||||
let dynamic_column = cols[1].open().unwrap();
|
||||
let DynamicColumn::Str(vals) = dynamic_column else { panic!() };
|
||||
assert_eq!(vals.ords().get_cardinality(), Cardinality::Optional);
|
||||
let get_str_for_ord = |ord| {
|
||||
let mut out = String::new();
|
||||
vals.ord_to_str(ord, &mut out).unwrap();
|
||||
out
|
||||
};
|
||||
|
||||
assert_eq!(vals.dictionary.num_terms(), 2);
|
||||
assert_eq!(get_str_for_ord(0), "a");
|
||||
assert_eq!(get_str_for_ord(1), "b");
|
||||
|
||||
let get_str_for_row = |row_id| {
|
||||
let term_ords: Vec<String> = vals
|
||||
.term_ords(row_id)
|
||||
.map(|el| {
|
||||
let mut out = String::new();
|
||||
vals.ord_to_str(el, &mut out).unwrap();
|
||||
out
|
||||
})
|
||||
.collect();
|
||||
term_ords
|
||||
};
|
||||
|
||||
assert_eq!(get_str_for_row(0), vec!["a".to_string()]);
|
||||
assert_eq!(get_str_for_row(1), Vec::<String>::new());
|
||||
assert_eq!(get_str_for_row(2), vec!["b".to_string()]);
|
||||
assert_eq!(get_str_for_row(3), Vec::<String>::new());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_columnar_different_empty_cardinality() {
|
||||
let columnar1 = make_text_columnar_multiple_columns(&[("mixed", &[&["a"]])]);
|
||||
let columnar2 = make_columnar("mixed", &[1i64]);
|
||||
let mut buffer = Vec::new();
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let stack_merge_order = StackMergeOrder::stack(columnars);
|
||||
crate::columnar::merge_columnar(
|
||||
columnars,
|
||||
&[],
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 2);
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let cols = columnar_reader.read_columns("mixed").unwrap();
|
||||
|
||||
// numeric column
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
assert_eq!(dynamic_column.get_cardinality(), Cardinality::Optional);
|
||||
|
||||
// text column
|
||||
let dynamic_column = cols[1].open().unwrap();
|
||||
assert_eq!(dynamic_column.get_cardinality(), Cardinality::Optional);
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ mod reader;
|
||||
mod writer;
|
||||
|
||||
pub use column_type::{ColumnType, HasAssociatedColumnType};
|
||||
#[cfg(test)]
|
||||
pub(crate) use merge::ColumnTypeCategory;
|
||||
pub use merge::{merge_columnar, MergeRowOrder, ShuffleMergeOrder, StackMergeOrder};
|
||||
pub use reader::ColumnarReader;
|
||||
pub use writer::ColumnarWriter;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::{fmt, io, mem};
|
||||
use std::{io, mem};
|
||||
|
||||
use common::file_slice::FileSlice;
|
||||
use common::BinarySerializable;
|
||||
@@ -21,32 +21,6 @@ pub struct ColumnarReader {
|
||||
num_rows: RowId,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ColumnarReader {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let num_rows = self.num_rows();
|
||||
let columns = self.list_columns().unwrap();
|
||||
let num_cols = columns.len();
|
||||
let mut debug_struct = f.debug_struct("Columnar");
|
||||
debug_struct
|
||||
.field("num_rows", &num_rows)
|
||||
.field("num_cols", &num_cols);
|
||||
for (col_name, dynamic_column_handle) in columns.into_iter().take(5) {
|
||||
let col = dynamic_column_handle.open().unwrap();
|
||||
if col.num_values() > 10 {
|
||||
debug_struct.field(&col_name, &"..");
|
||||
} else {
|
||||
debug_struct.field(&col_name, &col);
|
||||
}
|
||||
}
|
||||
if num_cols > 5 {
|
||||
debug_struct.finish_non_exhaustive()?;
|
||||
} else {
|
||||
debug_struct.finish()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Functions by both the async/sync code listing columns.
|
||||
/// It takes a stream from the column sstable and return the list of
|
||||
/// `DynamicColumn` available in it.
|
||||
|
||||
@@ -104,25 +104,16 @@ impl ColumnarWriter {
|
||||
};
|
||||
let mut symbols_buffer = Vec::new();
|
||||
let mut values = Vec::new();
|
||||
let mut start_doc_check_fill = 0;
|
||||
let mut current_doc_opt: Option<RowId> = None;
|
||||
// Assumption: NewDoc will never call the same doc twice and is strictly increasing between
|
||||
// calls
|
||||
let mut last_doc_opt: Option<RowId> = None;
|
||||
for op in numerical_col_writer.operation_iterator(&self.arena, None, &mut symbols_buffer) {
|
||||
match op {
|
||||
ColumnOperation::NewDoc(doc) => {
|
||||
current_doc_opt = Some(doc);
|
||||
last_doc_opt = Some(doc);
|
||||
}
|
||||
ColumnOperation::Value(numerical_value) => {
|
||||
if let Some(current_doc) = current_doc_opt {
|
||||
// Fill up with 0.0 since last doc
|
||||
values.extend((start_doc_check_fill..current_doc).map(|doc| (0.0, doc)));
|
||||
start_doc_check_fill = current_doc + 1;
|
||||
// handle multi values
|
||||
current_doc_opt = None;
|
||||
|
||||
if let Some(last_doc) = last_doc_opt {
|
||||
let score: f32 = f64::coerce(numerical_value) as f32;
|
||||
values.push((score, current_doc));
|
||||
values.push((score, last_doc));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -132,9 +123,9 @@ impl ColumnarWriter {
|
||||
}
|
||||
values.sort_by(|(left_score, _), (right_score, _)| {
|
||||
if reversed {
|
||||
right_score.total_cmp(left_score)
|
||||
right_score.partial_cmp(left_score).unwrap()
|
||||
} else {
|
||||
left_score.total_cmp(right_score)
|
||||
left_score.partial_cmp(right_score).unwrap()
|
||||
}
|
||||
});
|
||||
values.into_iter().map(|(_score, doc)| doc).collect()
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use std::io;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use common::file_slice::FileSlice;
|
||||
use common::{ByteCount, DateTime, HasLen, OwnedBytes};
|
||||
use common::{DateTime, HasLen, OwnedBytes};
|
||||
|
||||
use crate::column::{BytesColumn, Column, StrColumn};
|
||||
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::{Cardinality, ColumnIndex, NumericalType};
|
||||
use crate::{Cardinality, NumericalType};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DynamicColumn {
|
||||
@@ -22,54 +22,19 @@ pub enum DynamicColumn {
|
||||
Str(StrColumn),
|
||||
}
|
||||
|
||||
impl fmt::Debug for DynamicColumn {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "[{} {} |", self.get_cardinality(), self.column_type())?;
|
||||
match self {
|
||||
DynamicColumn::Bool(col) => write!(f, " {:?}", col)?,
|
||||
DynamicColumn::I64(col) => write!(f, " {:?}", col)?,
|
||||
DynamicColumn::U64(col) => write!(f, " {:?}", col)?,
|
||||
DynamicColumn::F64(col) => write!(f, "{:?}", col)?,
|
||||
DynamicColumn::IpAddr(col) => write!(f, "{:?}", col)?,
|
||||
DynamicColumn::DateTime(col) => write!(f, "{:?}", col)?,
|
||||
DynamicColumn::Bytes(col) => write!(f, "{:?}", col)?,
|
||||
DynamicColumn::Str(col) => write!(f, "{:?}", col)?,
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
}
|
||||
|
||||
impl DynamicColumn {
|
||||
pub fn column_index(&self) -> &ColumnIndex {
|
||||
match self {
|
||||
DynamicColumn::Bool(c) => &c.index,
|
||||
DynamicColumn::I64(c) => &c.index,
|
||||
DynamicColumn::U64(c) => &c.index,
|
||||
DynamicColumn::F64(c) => &c.index,
|
||||
DynamicColumn::IpAddr(c) => &c.index,
|
||||
DynamicColumn::DateTime(c) => &c.index,
|
||||
DynamicColumn::Bytes(c) => &c.ords().index,
|
||||
DynamicColumn::Str(c) => &c.ords().index,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
self.column_index().get_cardinality()
|
||||
}
|
||||
|
||||
pub fn num_values(&self) -> u32 {
|
||||
match self {
|
||||
DynamicColumn::Bool(c) => c.values.num_vals(),
|
||||
DynamicColumn::I64(c) => c.values.num_vals(),
|
||||
DynamicColumn::U64(c) => c.values.num_vals(),
|
||||
DynamicColumn::F64(c) => c.values.num_vals(),
|
||||
DynamicColumn::IpAddr(c) => c.values.num_vals(),
|
||||
DynamicColumn::DateTime(c) => c.values.num_vals(),
|
||||
DynamicColumn::Bytes(c) => c.ords().values.num_vals(),
|
||||
DynamicColumn::Str(c) => c.ords().values.num_vals(),
|
||||
DynamicColumn::Bool(c) => c.get_cardinality(),
|
||||
DynamicColumn::I64(c) => c.get_cardinality(),
|
||||
DynamicColumn::U64(c) => c.get_cardinality(),
|
||||
DynamicColumn::F64(c) => c.get_cardinality(),
|
||||
DynamicColumn::IpAddr(c) => c.get_cardinality(),
|
||||
DynamicColumn::DateTime(c) => c.get_cardinality(),
|
||||
DynamicColumn::Bytes(c) => c.ords().get_cardinality(),
|
||||
DynamicColumn::Str(c) => c.ords().get_cardinality(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn column_type(&self) -> ColumnType {
|
||||
match self {
|
||||
DynamicColumn::Bool(_) => ColumnType::Bool,
|
||||
@@ -108,11 +73,11 @@ impl DynamicColumn {
|
||||
fn coerce_to_f64(self) -> Option<DynamicColumn> {
|
||||
match self {
|
||||
DynamicColumn::I64(column) => Some(DynamicColumn::F64(Column {
|
||||
index: column.index,
|
||||
idx: column.idx,
|
||||
values: Arc::new(monotonic_map_column(column.values, MapI64ToF64)),
|
||||
})),
|
||||
DynamicColumn::U64(column) => Some(DynamicColumn::F64(Column {
|
||||
index: column.index,
|
||||
idx: column.idx,
|
||||
values: Arc::new(monotonic_map_column(column.values, MapU64ToF64)),
|
||||
})),
|
||||
DynamicColumn::F64(_) => Some(self),
|
||||
@@ -126,7 +91,7 @@ impl DynamicColumn {
|
||||
return None;
|
||||
}
|
||||
Some(DynamicColumn::I64(Column {
|
||||
index: column.index,
|
||||
idx: column.idx,
|
||||
values: Arc::new(monotonic_map_column(column.values, MapU64ToI64)),
|
||||
}))
|
||||
}
|
||||
@@ -141,7 +106,7 @@ impl DynamicColumn {
|
||||
return None;
|
||||
}
|
||||
Some(DynamicColumn::U64(Column {
|
||||
index: column.index,
|
||||
idx: column.idx,
|
||||
values: Arc::new(monotonic_map_column(column.values, MapI64ToU64)),
|
||||
}))
|
||||
}
|
||||
@@ -283,8 +248,8 @@ impl DynamicColumnHandle {
|
||||
Ok(dynamic_column)
|
||||
}
|
||||
|
||||
pub fn num_bytes(&self) -> ByteCount {
|
||||
self.file_slice.len().into()
|
||||
pub fn num_bytes(&self) -> usize {
|
||||
self.file_slice.len()
|
||||
}
|
||||
|
||||
pub fn column_type(&self) -> ColumnType {
|
||||
|
||||
@@ -7,10 +7,8 @@ extern crate more_asserts;
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
extern crate test;
|
||||
|
||||
use std::fmt::Display;
|
||||
use std::io;
|
||||
|
||||
mod block_accessor;
|
||||
mod column;
|
||||
mod column_index;
|
||||
pub mod column_values;
|
||||
@@ -21,12 +19,9 @@ mod iterable;
|
||||
pub(crate) mod utils;
|
||||
mod value;
|
||||
|
||||
pub use block_accessor::ColumnBlockAccessor;
|
||||
pub use column::{BytesColumn, Column, StrColumn};
|
||||
pub use column_index::ColumnIndex;
|
||||
pub use column_values::{
|
||||
ColumnValues, EmptyColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64,
|
||||
};
|
||||
pub use column_values::{ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
||||
pub use columnar::{
|
||||
merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
|
||||
MergeRowOrder, ShuffleMergeOrder, StackMergeOrder,
|
||||
@@ -76,17 +71,6 @@ pub enum Cardinality {
|
||||
Multivalued = 2,
|
||||
}
|
||||
|
||||
impl Display for Cardinality {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
let short_str = match self {
|
||||
Cardinality::Full => "full",
|
||||
Cardinality::Optional => "opt",
|
||||
Cardinality::Multivalued => "mult",
|
||||
};
|
||||
write!(f, "{short_str}")
|
||||
}
|
||||
}
|
||||
|
||||
impl Cardinality {
|
||||
pub fn is_optional(&self) -> bool {
|
||||
matches!(self, Cardinality::Optional)
|
||||
@@ -97,6 +81,7 @@ impl Cardinality {
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn try_from_code(code: u8) -> Result<Cardinality, InvalidData> {
|
||||
match code {
|
||||
0 => Ok(Cardinality::Full),
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use common::DateTime;
|
||||
use proptest::prelude::*;
|
||||
|
||||
use crate::column_values::MonotonicallyMappableToU128;
|
||||
use crate::columnar::{ColumnType, ColumnTypeCategory};
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::dynamic_column::{DynamicColumn, DynamicColumnHandle};
|
||||
use crate::value::{Coerce, NumericalValue};
|
||||
use crate::{
|
||||
BytesColumn, Cardinality, Column, ColumnarReader, ColumnarWriter, RowId, StackMergeOrder,
|
||||
};
|
||||
use crate::value::NumericalValue;
|
||||
use crate::{Cardinality, ColumnarReader, ColumnarWriter};
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer_str() {
|
||||
@@ -24,7 +17,7 @@ fn test_dataframe_writer_str() {
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 89);
|
||||
assert_eq!(cols[0].num_bytes(), 158);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -38,7 +31,7 @@ fn test_dataframe_writer_bytes() {
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 89);
|
||||
assert_eq!(cols[0].num_bytes(), 158);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -133,7 +126,7 @@ fn test_dataframe_writer_numerical() {
|
||||
assert_eq!(cols[0].num_bytes(), 33);
|
||||
let column = cols[0].open().unwrap();
|
||||
let DynamicColumn::I64(column_i64) = column else { panic!(); };
|
||||
assert_eq!(column_i64.index.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(column_i64.idx.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(column_i64.first(0), None);
|
||||
assert_eq!(column_i64.first(1), Some(12i64));
|
||||
assert_eq!(column_i64.first(2), Some(13i64));
|
||||
@@ -143,46 +136,6 @@ fn test_dataframe_writer_numerical() {
|
||||
assert_eq!(column_i64.first(6), None); //< we can change the spec for that one.
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_sort_by_full() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(0u32, "value", NumericalValue::U64(1));
|
||||
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(2));
|
||||
let data = dataframe_writer.sort_order("value", 2, false);
|
||||
assert_eq!(data, vec![0, 1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_sort_by_opt() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(3));
|
||||
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(2));
|
||||
let data = dataframe_writer.sort_order("value", 5, false);
|
||||
// 0, 2, 4 is 0.0
|
||||
assert_eq!(data, vec![0, 2, 4, 3, 1]);
|
||||
let data = dataframe_writer.sort_order("value", 5, true);
|
||||
assert_eq!(
|
||||
data,
|
||||
vec![4, 2, 0, 3, 1].into_iter().rev().collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_sort_by_multi() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
// valid for sort
|
||||
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(2));
|
||||
// those are ignored for sort
|
||||
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(4));
|
||||
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(4));
|
||||
// valid for sort
|
||||
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(3));
|
||||
// ignored, would change sort order
|
||||
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(1));
|
||||
let data = dataframe_writer.sort_order("value", 4, false);
|
||||
assert_eq!(data, vec![0, 2, 1, 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dictionary_encoded_str() {
|
||||
let mut buffer = Vec::new();
|
||||
@@ -257,497 +210,3 @@ fn test_dictionary_encoded_bytes() {
|
||||
.unwrap();
|
||||
assert_eq!(term_buffer, b"b");
|
||||
}
|
||||
|
||||
fn num_strategy() -> impl Strategy<Value = NumericalValue> {
|
||||
prop_oneof![
|
||||
Just(NumericalValue::U64(0u64)),
|
||||
Just(NumericalValue::U64(u64::MAX)),
|
||||
Just(NumericalValue::I64(0i64)),
|
||||
Just(NumericalValue::I64(i64::MIN)),
|
||||
Just(NumericalValue::I64(i64::MAX)),
|
||||
Just(NumericalValue::F64(1.2f64)),
|
||||
]
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum ColumnValue {
|
||||
Str(&'static str),
|
||||
Bytes(&'static [u8]),
|
||||
Numerical(NumericalValue),
|
||||
IpAddr(Ipv6Addr),
|
||||
Bool(bool),
|
||||
DateTime(DateTime),
|
||||
}
|
||||
|
||||
impl ColumnValue {
|
||||
pub(crate) fn column_type_category(&self) -> ColumnTypeCategory {
|
||||
match self {
|
||||
ColumnValue::Str(_) => ColumnTypeCategory::Str,
|
||||
ColumnValue::Bytes(_) => ColumnTypeCategory::Bytes,
|
||||
ColumnValue::Numerical(_) => ColumnTypeCategory::Numerical,
|
||||
ColumnValue::IpAddr(_) => ColumnTypeCategory::IpAddr,
|
||||
ColumnValue::Bool(_) => ColumnTypeCategory::Bool,
|
||||
ColumnValue::DateTime(_) => ColumnTypeCategory::DateTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn column_name_strategy() -> impl Strategy<Value = &'static str> {
|
||||
prop_oneof![Just("c1"), Just("c2")]
|
||||
}
|
||||
|
||||
fn string_strategy() -> impl Strategy<Value = &'static str> {
|
||||
prop_oneof![Just("a"), Just("b")]
|
||||
}
|
||||
|
||||
fn bytes_strategy() -> impl Strategy<Value = &'static [u8]> {
|
||||
prop_oneof![Just(&[0u8][..]), Just(&[1u8][..])]
|
||||
}
|
||||
|
||||
// A random column value
|
||||
fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
|
||||
prop_oneof![
|
||||
10 => string_strategy().prop_map(|s| ColumnValue::Str(s)),
|
||||
1 => bytes_strategy().prop_map(|b| ColumnValue::Bytes(b)),
|
||||
40 => num_strategy().prop_map(|n| ColumnValue::Numerical(n)),
|
||||
1 => (1u16..3u16).prop_map(|ip_addr_byte| ColumnValue::IpAddr(Ipv6Addr::new(
|
||||
127,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
ip_addr_byte
|
||||
))),
|
||||
1 => any::<bool>().prop_map(|b| ColumnValue::Bool(b)),
|
||||
1 => (0_679_723_993i64..1_679_723_995i64)
|
||||
.prop_map(|val| { ColumnValue::DateTime(DateTime::from_timestamp_secs(val)) })
|
||||
]
|
||||
}
|
||||
|
||||
// A document contains up to 4 values.
|
||||
fn doc_strategy() -> impl Strategy<Value = Vec<(&'static str, ColumnValue)>> {
|
||||
proptest::collection::vec((column_name_strategy(), column_value_strategy()), 0..4)
|
||||
}
|
||||
|
||||
// A columnar contains up to 2 docs.
|
||||
fn columnar_docs_strategy() -> impl Strategy<Value = Vec<Vec<(&'static str, ColumnValue)>>> {
|
||||
proptest::collection::vec(doc_strategy(), 0..=2)
|
||||
}
|
||||
|
||||
fn columnar_docs_and_mapping_strategy(
|
||||
) -> impl Strategy<Value = (Vec<Vec<(&'static str, ColumnValue)>>, Vec<RowId>)> {
|
||||
columnar_docs_strategy().prop_flat_map(|docs| {
|
||||
permutation_strategy(docs.len()).prop_map(move |permutation| (docs.clone(), permutation))
|
||||
})
|
||||
}
|
||||
|
||||
fn permutation_strategy(n: usize) -> impl Strategy<Value = Vec<RowId>> {
|
||||
Just((0u32..n as RowId).collect()).prop_shuffle()
|
||||
}
|
||||
|
||||
fn build_columnar_with_mapping(
|
||||
docs: &[Vec<(&'static str, ColumnValue)>],
|
||||
old_to_new_row_ids_opt: Option<&[RowId]>,
|
||||
) -> ColumnarReader {
|
||||
let num_docs = docs.len() as u32;
|
||||
let mut buffer = Vec::new();
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
for (doc_id, vals) in docs.iter().enumerate() {
|
||||
for (column_name, col_val) in vals {
|
||||
match *col_val {
|
||||
ColumnValue::Str(str_val) => {
|
||||
columnar_writer.record_str(doc_id as u32, column_name, str_val);
|
||||
}
|
||||
ColumnValue::Bytes(bytes) => {
|
||||
columnar_writer.record_bytes(doc_id as u32, column_name, bytes)
|
||||
}
|
||||
ColumnValue::Numerical(num) => {
|
||||
columnar_writer.record_numerical(doc_id as u32, column_name, num);
|
||||
}
|
||||
ColumnValue::IpAddr(ip_addr) => {
|
||||
columnar_writer.record_ip_addr(doc_id as u32, column_name, ip_addr);
|
||||
}
|
||||
ColumnValue::Bool(bool_val) => {
|
||||
columnar_writer.record_bool(doc_id as u32, column_name, bool_val);
|
||||
}
|
||||
ColumnValue::DateTime(date_time) => {
|
||||
columnar_writer.record_datetime(doc_id as u32, column_name, date_time);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
columnar_writer
|
||||
.serialize(num_docs, old_to_new_row_ids_opt, &mut buffer)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
columnar_reader
|
||||
}
|
||||
|
||||
fn build_columnar(docs: &[Vec<(&'static str, ColumnValue)>]) -> ColumnarReader {
|
||||
build_columnar_with_mapping(docs, None)
|
||||
}
|
||||
|
||||
fn assert_columnar_eq(left: &ColumnarReader, right: &ColumnarReader) {
|
||||
assert_eq!(left.num_rows(), right.num_rows());
|
||||
let left_columns = left.list_columns().unwrap();
|
||||
let right_columns = right.list_columns().unwrap();
|
||||
assert_eq!(left_columns.len(), right_columns.len());
|
||||
for i in 0..left_columns.len() {
|
||||
assert_eq!(left_columns[i].0, right_columns[i].0);
|
||||
let left_column = left_columns[i].1.open().unwrap();
|
||||
let right_column = right_columns[i].1.open().unwrap();
|
||||
assert_dyn_column_eq(&left_column, &right_column);
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_column_eq<T: Copy + PartialOrd + Debug + Send + Sync + 'static>(
|
||||
left: &Column<T>,
|
||||
right: &Column<T>,
|
||||
) {
|
||||
assert_eq!(left.get_cardinality(), right.get_cardinality());
|
||||
assert_eq!(left.num_docs(), right.num_docs());
|
||||
let num_docs = left.num_docs();
|
||||
for doc in 0..num_docs {
|
||||
assert_eq!(
|
||||
left.index.value_row_ids(doc),
|
||||
right.index.value_row_ids(doc)
|
||||
);
|
||||
}
|
||||
assert_eq!(left.values.num_vals(), right.values.num_vals());
|
||||
let num_vals = left.values.num_vals();
|
||||
for i in 0..num_vals {
|
||||
assert_eq!(left.values.get_val(i), right.values.get_val(i));
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_bytes_column_eq(left: &BytesColumn, right: &BytesColumn) {
|
||||
assert_eq!(
|
||||
left.term_ord_column.get_cardinality(),
|
||||
right.term_ord_column.get_cardinality()
|
||||
);
|
||||
assert_eq!(left.num_rows(), right.num_rows());
|
||||
assert_column_eq(&left.term_ord_column, &right.term_ord_column);
|
||||
assert_eq!(left.dictionary.num_terms(), right.dictionary.num_terms());
|
||||
let num_terms = left.dictionary.num_terms();
|
||||
let mut left_terms = left.dictionary.stream().unwrap();
|
||||
let mut right_terms = right.dictionary.stream().unwrap();
|
||||
for _ in 0..num_terms {
|
||||
assert!(left_terms.advance());
|
||||
assert!(right_terms.advance());
|
||||
assert_eq!(left_terms.key(), right_terms.key());
|
||||
}
|
||||
assert!(!left_terms.advance());
|
||||
assert!(!right_terms.advance());
|
||||
}
|
||||
|
||||
fn assert_dyn_column_eq(left_dyn_column: &DynamicColumn, right_dyn_column: &DynamicColumn) {
|
||||
assert_eq!(
|
||||
&left_dyn_column.column_type(),
|
||||
&right_dyn_column.column_type()
|
||||
);
|
||||
assert_eq!(
|
||||
&left_dyn_column.get_cardinality(),
|
||||
&right_dyn_column.get_cardinality()
|
||||
);
|
||||
match &(left_dyn_column, right_dyn_column) {
|
||||
(DynamicColumn::Bool(left_col), DynamicColumn::Bool(right_col)) => {
|
||||
assert_column_eq(left_col, right_col);
|
||||
}
|
||||
(DynamicColumn::I64(left_col), DynamicColumn::I64(right_col)) => {
|
||||
assert_column_eq(left_col, right_col);
|
||||
}
|
||||
(DynamicColumn::U64(left_col), DynamicColumn::U64(right_col)) => {
|
||||
assert_column_eq(left_col, right_col);
|
||||
}
|
||||
(DynamicColumn::F64(left_col), DynamicColumn::F64(right_col)) => {
|
||||
assert_column_eq(left_col, right_col);
|
||||
}
|
||||
(DynamicColumn::DateTime(left_col), DynamicColumn::DateTime(right_col)) => {
|
||||
assert_column_eq(left_col, right_col);
|
||||
}
|
||||
(DynamicColumn::IpAddr(left_col), DynamicColumn::IpAddr(right_col)) => {
|
||||
assert_column_eq(left_col, right_col);
|
||||
}
|
||||
(DynamicColumn::Bytes(left_col), DynamicColumn::Bytes(right_col)) => {
|
||||
assert_bytes_column_eq(left_col, right_col);
|
||||
}
|
||||
(DynamicColumn::Str(left_col), DynamicColumn::Str(right_col)) => {
|
||||
assert_bytes_column_eq(left_col, right_col);
|
||||
}
|
||||
_ => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trait AssertEqualToColumnValue {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue);
|
||||
}
|
||||
|
||||
impl AssertEqualToColumnValue for bool {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::Bool(val) = column_value else { panic!() };
|
||||
assert_eq!(self, val);
|
||||
}
|
||||
}
|
||||
|
||||
impl AssertEqualToColumnValue for Ipv6Addr {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::IpAddr(val) = column_value else { panic!() };
|
||||
assert_eq!(self, val);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Coerce + PartialEq + Debug + Into<NumericalValue>> AssertEqualToColumnValue for T {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::Numerical(num) = column_value else { panic!() };
|
||||
assert_eq!(self, &T::coerce(*num));
|
||||
}
|
||||
}
|
||||
|
||||
impl AssertEqualToColumnValue for DateTime {
|
||||
fn assert_equal_to_column_value(&self, column_value: &ColumnValue) {
|
||||
let ColumnValue::DateTime(dt) = column_value else { panic!() };
|
||||
assert_eq!(self, dt);
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_column_values<
|
||||
T: AssertEqualToColumnValue + PartialEq + Copy + PartialOrd + Debug + Send + Sync + 'static,
|
||||
>(
|
||||
col: &Column<T>,
|
||||
expected: &HashMap<u32, Vec<&ColumnValue>>,
|
||||
) {
|
||||
let mut num_non_empty_rows = 0;
|
||||
for doc in 0..col.num_docs() {
|
||||
let doc_vals: Vec<T> = col.values_for_doc(doc).collect();
|
||||
if doc_vals.is_empty() {
|
||||
continue;
|
||||
}
|
||||
num_non_empty_rows += 1;
|
||||
let expected_vals = expected.get(&doc).unwrap();
|
||||
assert_eq!(doc_vals.len(), expected_vals.len());
|
||||
for (val, &expected) in doc_vals.iter().zip(expected_vals.iter()) {
|
||||
val.assert_equal_to_column_value(expected)
|
||||
}
|
||||
}
|
||||
assert_eq!(num_non_empty_rows, expected.len());
|
||||
}
|
||||
|
||||
fn assert_bytes_column_values(
|
||||
col: &BytesColumn,
|
||||
expected: &HashMap<u32, Vec<&ColumnValue>>,
|
||||
is_str: bool,
|
||||
) {
|
||||
let mut num_non_empty_rows = 0;
|
||||
let mut buffer = Vec::new();
|
||||
for doc in 0..col.term_ord_column.num_docs() {
|
||||
let doc_vals: Vec<u64> = col.term_ords(doc).collect();
|
||||
if doc_vals.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let expected_vals = expected.get(&doc).unwrap();
|
||||
assert_eq!(doc_vals.len(), expected_vals.len());
|
||||
for (&expected_col_val, &ord) in expected_vals.iter().zip(&doc_vals) {
|
||||
col.ord_to_bytes(ord, &mut buffer).unwrap();
|
||||
match expected_col_val {
|
||||
ColumnValue::Str(str_val) => {
|
||||
assert!(is_str);
|
||||
assert_eq!(str_val.as_bytes(), &buffer);
|
||||
}
|
||||
ColumnValue::Bytes(bytes_val) => {
|
||||
assert!(!is_str);
|
||||
assert_eq!(bytes_val, &buffer);
|
||||
}
|
||||
_ => {
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
}
|
||||
num_non_empty_rows += 1;
|
||||
}
|
||||
assert_eq!(num_non_empty_rows, expected.len());
|
||||
}
|
||||
|
||||
// This proptest attempts to create a tiny columnar based of up to 3 rows, and checks that the
|
||||
// resulting columnar matches the row data.
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(500))]
|
||||
#[test]
|
||||
fn test_single_columnar_builder_proptest(docs in columnar_docs_strategy()) {
|
||||
let columnar = build_columnar(&docs[..]);
|
||||
assert_eq!(columnar.num_rows() as usize, docs.len());
|
||||
let mut expected_columns: HashMap<(&str, ColumnTypeCategory), HashMap<u32, Vec<&ColumnValue>> > = Default::default();
|
||||
for (doc_id, doc_vals) in docs.iter().enumerate() {
|
||||
for (col_name, col_val) in doc_vals {
|
||||
expected_columns
|
||||
.entry((col_name, col_val.column_type_category()))
|
||||
.or_default()
|
||||
.entry(doc_id as u32)
|
||||
.or_default()
|
||||
.push(col_val);
|
||||
}
|
||||
}
|
||||
let column_list = columnar.list_columns().unwrap();
|
||||
assert_eq!(expected_columns.len(), column_list.len());
|
||||
for (column_name, column) in column_list {
|
||||
let dynamic_column = column.open().unwrap();
|
||||
let col_category: ColumnTypeCategory = dynamic_column.column_type().into();
|
||||
let expected_col_values: &HashMap<u32, Vec<&ColumnValue>> = expected_columns.get(&(column_name.as_str(), col_category)).unwrap();
|
||||
match &dynamic_column {
|
||||
DynamicColumn::Bool(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::I64(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::U64(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::F64(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::IpAddr(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::DateTime(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::Bytes(col) =>
|
||||
assert_bytes_column_values(col, expected_col_values, false),
|
||||
DynamicColumn::Str(col) =>
|
||||
assert_bytes_column_values(col, expected_col_values, true),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Same as `test_single_columnar_builder_proptest` but with a shuffling mapping.
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(500))]
|
||||
#[test]
|
||||
fn test_single_columnar_builder_with_shuffle_proptest((docs, mapping) in columnar_docs_and_mapping_strategy()) {
|
||||
let columnar = build_columnar_with_mapping(&docs[..], Some(&mapping));
|
||||
assert_eq!(columnar.num_rows() as usize, docs.len());
|
||||
let mut expected_columns: HashMap<(&str, ColumnTypeCategory), HashMap<u32, Vec<&ColumnValue>> > = Default::default();
|
||||
for (doc_id, doc_vals) in docs.iter().enumerate() {
|
||||
for (col_name, col_val) in doc_vals {
|
||||
expected_columns
|
||||
.entry((col_name, col_val.column_type_category()))
|
||||
.or_default()
|
||||
.entry(mapping[doc_id])
|
||||
.or_default()
|
||||
.push(col_val);
|
||||
}
|
||||
}
|
||||
let column_list = columnar.list_columns().unwrap();
|
||||
assert_eq!(expected_columns.len(), column_list.len());
|
||||
for (column_name, column) in column_list {
|
||||
let dynamic_column = column.open().unwrap();
|
||||
let col_category: ColumnTypeCategory = dynamic_column.column_type().into();
|
||||
let expected_col_values: &HashMap<u32, Vec<&ColumnValue>> = expected_columns.get(&(column_name.as_str(), col_category)).unwrap();
|
||||
for _doc_id in 0..columnar.num_rows() {
|
||||
match &dynamic_column {
|
||||
DynamicColumn::Bool(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::I64(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::U64(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::F64(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::IpAddr(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::DateTime(col) =>
|
||||
assert_column_values(col, expected_col_values),
|
||||
DynamicColumn::Bytes(col) =>
|
||||
assert_bytes_column_values(col, expected_col_values, false),
|
||||
DynamicColumn::Str(col) =>
|
||||
assert_bytes_column_values(col, expected_col_values, true),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This tests create 2 or 3 random small columnar and attempts to merge them.
|
||||
// It compares the resulting merged dataframe with what would have been obtained by building the
|
||||
// dataframe from the concatenated rows to begin with.
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(1000))]
|
||||
#[test]
|
||||
fn test_columnar_merge_proptest(columnar_docs in proptest::collection::vec(columnar_docs_strategy(), 2..=3)) {
|
||||
let columnar_readers: Vec<ColumnarReader> = columnar_docs.iter()
|
||||
.map(|docs| build_columnar(&docs[..]))
|
||||
.collect::<Vec<_>>();
|
||||
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]).into();
|
||||
crate::merge_columnar(&columnar_readers_arr[..], &[], stack_merge_order, &mut output).unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> = columnar_docs.iter().cloned().flatten().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_columnar_merging_empty_columnar() {
|
||||
let columnar_docs: Vec<Vec<Vec<(&str, ColumnValue)>>> =
|
||||
vec![vec![], vec![vec![("c1", ColumnValue::Str("a"))]]];
|
||||
let columnar_readers: Vec<ColumnarReader> = columnar_docs
|
||||
.iter()
|
||||
.map(|docs| build_columnar(&docs[..]))
|
||||
.collect::<Vec<_>>();
|
||||
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]);
|
||||
crate::merge_columnar(
|
||||
&columnar_readers_arr[..],
|
||||
&[],
|
||||
crate::MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut output,
|
||||
)
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
|
||||
columnar_docs.iter().cloned().flatten().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_columnar_merging_number_columns() {
|
||||
let columnar_docs: Vec<Vec<Vec<(&str, ColumnValue)>>> = vec![
|
||||
// columnar 1
|
||||
vec![
|
||||
// doc 1.1
|
||||
vec![("c2", ColumnValue::Numerical(0i64.into()))],
|
||||
],
|
||||
// columnar2
|
||||
vec![
|
||||
// doc 2.1
|
||||
vec![("c2", ColumnValue::Numerical(0u64.into()))],
|
||||
// doc 2.2
|
||||
vec![("c2", ColumnValue::Numerical(u64::MAX.into()))],
|
||||
],
|
||||
];
|
||||
let columnar_readers: Vec<ColumnarReader> = columnar_docs
|
||||
.iter()
|
||||
.map(|docs| build_columnar(&docs[..]))
|
||||
.collect::<Vec<_>>();
|
||||
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]);
|
||||
crate::merge_columnar(
|
||||
&columnar_readers_arr[..],
|
||||
&[],
|
||||
crate::MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut output,
|
||||
)
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
|
||||
columnar_docs.iter().cloned().flatten().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
// TODO add non trivial remap and merge
|
||||
// TODO test required_columns
|
||||
// TODO document edge case: required_columns incompatible with values.
|
||||
|
||||
@@ -4,8 +4,6 @@ use std::{fmt, io, u64};
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
use crate::ByteCount;
|
||||
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
pub struct TinySet(u64);
|
||||
|
||||
@@ -388,8 +386,8 @@ impl ReadOnlyBitSet {
|
||||
}
|
||||
|
||||
/// Number of bytes used in the bitset representation.
|
||||
pub fn num_bytes(&self) -> ByteCount {
|
||||
self.data.len().into()
|
||||
pub fn num_bytes(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
use std::iter::Sum;
|
||||
use std::ops::{Add, AddAssign};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Indicates space usage in bytes
|
||||
#[derive(Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct ByteCount(u64);
|
||||
|
||||
impl std::fmt::Debug for ByteCount {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(&self.human_readable())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ByteCount {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(&self.human_readable())
|
||||
}
|
||||
}
|
||||
|
||||
const SUFFIX_AND_THRESHOLD: [(&str, u64); 5] = [
|
||||
("KB", 1_000),
|
||||
("MB", 1_000_000),
|
||||
("GB", 1_000_000_000),
|
||||
("TB", 1_000_000_000_000),
|
||||
("PB", 1_000_000_000_000_000),
|
||||
];
|
||||
|
||||
impl ByteCount {
|
||||
#[inline]
|
||||
pub fn get_bytes(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn human_readable(&self) -> String {
|
||||
for (suffix, threshold) in SUFFIX_AND_THRESHOLD.iter().rev() {
|
||||
if self.get_bytes() >= *threshold {
|
||||
let unit_num = self.get_bytes() as f64 / *threshold as f64;
|
||||
return format!("{:.2} {}", unit_num, suffix);
|
||||
}
|
||||
}
|
||||
format!("{:.2} B", self.get_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for ByteCount {
|
||||
fn from(value: u64) -> Self {
|
||||
ByteCount(value)
|
||||
}
|
||||
}
|
||||
impl From<usize> for ByteCount {
|
||||
fn from(value: usize) -> Self {
|
||||
ByteCount(value as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl Sum for ByteCount {
|
||||
#[inline]
|
||||
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
|
||||
iter.fold(ByteCount::default(), |acc, x| acc + x)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<u64> for ByteCount {
|
||||
#[inline]
|
||||
fn eq(&self, other: &u64) -> bool {
|
||||
self.get_bytes() == *other
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd<u64> for ByteCount {
|
||||
#[inline]
|
||||
fn partial_cmp(&self, other: &u64) -> Option<std::cmp::Ordering> {
|
||||
self.get_bytes().partial_cmp(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl Add for ByteCount {
|
||||
type Output = Self;
|
||||
|
||||
#[inline]
|
||||
fn add(self, other: Self) -> Self {
|
||||
Self(self.get_bytes() + other.get_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
impl AddAssign for ByteCount {
|
||||
#[inline]
|
||||
fn add_assign(&mut self, other: Self) {
|
||||
*self = Self(self.get_bytes() + other.get_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::ByteCount;
|
||||
|
||||
#[test]
|
||||
fn test_bytes() {
|
||||
assert_eq!(ByteCount::from(0u64).human_readable(), "0 B");
|
||||
assert_eq!(ByteCount::from(300u64).human_readable(), "300 B");
|
||||
assert_eq!(ByteCount::from(1_000_000u64).human_readable(), "1.00 MB");
|
||||
assert_eq!(ByteCount::from(1_500_000u64).human_readable(), "1.50 MB");
|
||||
assert_eq!(
|
||||
ByteCount::from(1_500_000_000u64).human_readable(),
|
||||
"1.50 GB"
|
||||
);
|
||||
assert_eq!(
|
||||
ByteCount::from(3_213_000_000_000u64).human_readable(),
|
||||
"3.21 TB"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -36,16 +36,6 @@ pub struct DateTime {
|
||||
}
|
||||
|
||||
impl DateTime {
|
||||
/// Minimum possible `DateTime` value.
|
||||
pub const MIN: DateTime = DateTime {
|
||||
timestamp_micros: i64::MIN,
|
||||
};
|
||||
|
||||
/// Maximum possible `DateTime` value.
|
||||
pub const MAX: DateTime = DateTime {
|
||||
timestamp_micros: i64::MAX,
|
||||
};
|
||||
|
||||
/// Create new from UNIX timestamp in seconds
|
||||
pub const fn from_timestamp_secs(seconds: i64) -> Self {
|
||||
Self {
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
use std::io::{self, Read, Write};
|
||||
|
||||
use crate::BinarySerializable;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u32)]
|
||||
pub enum DictionaryKind {
|
||||
Fst = 1,
|
||||
SSTable = 2,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct DictionaryFooter {
|
||||
pub kind: DictionaryKind,
|
||||
pub version: u32,
|
||||
}
|
||||
|
||||
impl DictionaryFooter {
|
||||
pub fn verify_equal(&self, other: &DictionaryFooter) -> io::Result<()> {
|
||||
if self.kind != other.kind {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"Invalid dictionary type, expected {:?}, found {:?}",
|
||||
self.kind, other.kind
|
||||
),
|
||||
));
|
||||
}
|
||||
if self.version != other.version {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"Unsuported dictionary version, expected {}, found {}",
|
||||
self.version, other.version
|
||||
),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for DictionaryFooter {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.version.serialize(writer)?;
|
||||
(self.kind as u32).serialize(writer)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let version = u32::deserialize(reader)?;
|
||||
let kind = u32::deserialize(reader)?;
|
||||
let kind = match kind {
|
||||
1 => DictionaryKind::Fst,
|
||||
2 => DictionaryKind::SSTable,
|
||||
_ => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("invalid dictionary kind: {kind}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(DictionaryFooter { kind, version })
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ use std::{fmt, io};
|
||||
use async_trait::async_trait;
|
||||
use ownedbytes::{OwnedBytes, StableDeref};
|
||||
|
||||
use crate::{ByteCount, HasLen};
|
||||
use crate::HasLen;
|
||||
|
||||
/// Objects that represents files sections in tantivy.
|
||||
///
|
||||
@@ -216,11 +216,6 @@ impl FileSlice {
|
||||
pub fn slice_to(&self, to_offset: usize) -> FileSlice {
|
||||
self.slice(0..to_offset)
|
||||
}
|
||||
|
||||
/// Returns the byte count of the FileSlice.
|
||||
pub fn num_bytes(&self) -> ByteCount {
|
||||
self.range.len().into()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -5,18 +5,14 @@ use std::ops::Deref;
|
||||
pub use byteorder::LittleEndian as Endianness;
|
||||
|
||||
mod bitset;
|
||||
mod byte_count;
|
||||
mod datetime;
|
||||
mod dictionary_footer;
|
||||
pub mod file_slice;
|
||||
mod group_by;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
mod writer;
|
||||
pub use bitset::*;
|
||||
pub use byte_count::ByteCount;
|
||||
pub use datetime::{DatePrecision, DateTime};
|
||||
pub use dictionary_footer::*;
|
||||
pub use group_by::GroupByIteratorExtended;
|
||||
pub use ownedbytes::{OwnedBytes, StableDeref};
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
|
||||
@@ -42,7 +42,7 @@ fn main() -> tantivy::Result<()> {
|
||||
.set_index_option(IndexRecordOption::WithFreqs)
|
||||
.set_tokenizer("raw"),
|
||||
)
|
||||
.set_fast(None)
|
||||
.set_fast()
|
||||
.set_stored();
|
||||
schema_builder.add_text_field("category", text_fieldtype);
|
||||
schema_builder.add_f64_field("stock", FAST);
|
||||
@@ -192,7 +192,7 @@ fn main() -> tantivy::Result<()> {
|
||||
//
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_str(agg_req_str)?;
|
||||
let collector = AggregationCollector::from_aggs(agg_req, Default::default());
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
let res2: Value = serde_json::to_value(agg_res)?;
|
||||
@@ -204,7 +204,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"group_by_stock".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "stock".to_string(),
|
||||
ranges: vec![
|
||||
@@ -234,12 +234,12 @@ fn main() -> tantivy::Result<()> {
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req, Default::default());
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
// We use the `AllQuery` which will pass all documents to the AggregationCollector.
|
||||
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
@@ -287,7 +287,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_str(agg_req_str)?;
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req, Default::default());
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
let res: Value = serde_json::to_value(agg_res)?;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer};
|
||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer};
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
|
||||
@@ -50,13 +50,12 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
// This tokenizer lowers all of the text (to help with stop word matching)
|
||||
// then removes all instances of `the` and `and` from the corpus
|
||||
let tokenizer = TextAnalyzer::builder(SimpleTokenizer)
|
||||
let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(LowerCaser)
|
||||
.filter(StopWordFilter::remove(vec![
|
||||
"the".to_string(),
|
||||
"and".to_string(),
|
||||
]))
|
||||
.build();
|
||||
]));
|
||||
|
||||
index.tokenizers().register("stoppy", tokenizer);
|
||||
|
||||
|
||||
@@ -1,611 +0,0 @@
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use columnar::Cardinality;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::{thread_rng, Rng};
|
||||
use test::{self, Bencher};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::bucket::{
|
||||
CustomOrder, HistogramAggregation, HistogramBounds, Order, OrderTarget, TermsAggregation,
|
||||
};
|
||||
use crate::aggregation::metric::StatsAggregation;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::{Schema, TextFieldIndexing, FAST, STRING};
|
||||
use crate::Index;
|
||||
|
||||
fn get_test_index_bench(cardinality: Cardinality) -> crate::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_fieldtype = crate::schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||
)
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
|
||||
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
|
||||
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
let index = Index::create_from_tempdir(schema_builder.build())?;
|
||||
let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"];
|
||||
let many_terms_data = (0..150_000)
|
||||
.map(|num| format!("author{}", num))
|
||||
.collect::<Vec<_>>();
|
||||
{
|
||||
let mut rng = thread_rng();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000)?;
|
||||
// To make the different test cases comparable we just change one doc to force the
|
||||
// cardinality
|
||||
if cardinality == Cardinality::Optional {
|
||||
index_writer.add_document(doc!())?;
|
||||
}
|
||||
if cardinality == Cardinality::Multivalued {
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
text_field => "cool",
|
||||
text_field_many_terms => "cool",
|
||||
text_field_many_terms => "cool",
|
||||
text_field_few_terms => "cool",
|
||||
text_field_few_terms => "cool",
|
||||
score_field => 1u64,
|
||||
score_field => 1u64,
|
||||
score_field_f64 => 1.0,
|
||||
score_field_f64 => 1.0,
|
||||
score_field_i64 => 1i64,
|
||||
score_field_i64 => 1i64,
|
||||
))?;
|
||||
}
|
||||
for _ in 0..1_000_000 {
|
||||
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
score_field => val as u64,
|
||||
score_field_f64 => val,
|
||||
score_field_i64 => val as i64,
|
||||
))?;
|
||||
}
|
||||
// writing the segment
|
||||
index_writer.commit()?;
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
use paste::paste;
|
||||
#[macro_export]
|
||||
macro_rules! bench_all_cardinalities {
|
||||
( $x:ident ) => {
|
||||
paste! {
|
||||
#[bench]
|
||||
fn $x(b: &mut Bencher) {
|
||||
[<$x _card>](b, Cardinality::Full)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn [<$x _opt>](b: &mut Bencher) {
|
||||
[<$x _card>](b, Cardinality::Optional)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn [<$x _multi>](b: &mut Bencher) {
|
||||
[<$x _card>](b, Cardinality::Multivalued)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_average_u64);
|
||||
|
||||
fn bench_aggregation_average_u64_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let agg_req_1: Aggregations = vec![(
|
||||
"average".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score".to_string()),
|
||||
)),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_stats_f64);
|
||||
|
||||
fn bench_aggregation_stats_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let agg_req_1: Aggregations = vec![(
|
||||
"average_f64".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Stats(StatsAggregation::from_field_name(
|
||||
"score_f64".to_string(),
|
||||
))),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_average_f64);
|
||||
|
||||
fn bench_aggregation_average_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let agg_req_1: Aggregations = vec![(
|
||||
"average_f64".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score_f64".to_string()),
|
||||
)),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_average_u64_and_f64);
|
||||
|
||||
fn bench_aggregation_average_u64_and_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let agg_req_1: Aggregations = vec![
|
||||
(
|
||||
"average_f64".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score_f64".to_string()),
|
||||
)),
|
||||
),
|
||||
(
|
||||
"average".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score".to_string()),
|
||||
)),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_few);
|
||||
|
||||
fn bench_aggregation_terms_few_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "text_few_terms".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many_with_sub_agg);
|
||||
|
||||
fn bench_aggregation_terms_many_with_sub_agg_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let sub_agg_req: Aggregations = vec![(
|
||||
"average_f64".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score_f64".to_string()),
|
||||
)),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "text_many_terms".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req,
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many2);
|
||||
|
||||
fn bench_aggregation_terms_many2_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "text_many_terms".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many_order_by_term);
|
||||
|
||||
fn bench_aggregation_terms_many_order_by_term_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "text_many_terms".to_string(),
|
||||
order: Some(CustomOrder {
|
||||
order: Order::Desc,
|
||||
target: OrderTarget::Key,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_range_only);
|
||||
|
||||
fn bench_aggregation_range_only_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req_1: Aggregations = vec![(
|
||||
"rangef64".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
ranges: vec![
|
||||
(3f64..7000f64).into(),
|
||||
(7000f64..20000f64).into(),
|
||||
(20000f64..30000f64).into(),
|
||||
(30000f64..40000f64).into(),
|
||||
(40000f64..50000f64).into(),
|
||||
(50000f64..60000f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_range_with_avg);
|
||||
|
||||
fn bench_aggregation_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let sub_agg_req: Aggregations = vec![(
|
||||
"average_f64".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score_f64".to_string()),
|
||||
)),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_req_1: Aggregations = vec![(
|
||||
"rangef64".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
ranges: vec![
|
||||
(3f64..7000f64).into(),
|
||||
(7000f64..20000f64).into(),
|
||||
(20000f64..30000f64).into(),
|
||||
(30000f64..40000f64).into(),
|
||||
(40000f64..50000f64).into(),
|
||||
(50000f64..60000f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req,
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
// hard bounds has a different algorithm, because it actually limits collection range
|
||||
//
|
||||
bench_all_cardinalities!(bench_aggregation_histogram_only_hard_bounds);
|
||||
|
||||
fn bench_aggregation_histogram_only_hard_bounds_card(
|
||||
b: &mut Bencher,
|
||||
cardinality: Cardinality,
|
||||
) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req_1: Aggregations = vec![(
|
||||
"rangef64".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 100f64,
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 1000.0,
|
||||
max: 300_000.0,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_histogram_with_avg);
|
||||
|
||||
fn bench_aggregation_histogram_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let sub_agg_req: Aggregations = vec![(
|
||||
"average_f64".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score_f64".to_string()),
|
||||
)),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_req_1: Aggregations = vec![(
|
||||
"rangef64".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 100f64, // 1000 buckets
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req,
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_histogram_only);
|
||||
|
||||
fn bench_aggregation_histogram_only_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req_1: Aggregations = vec![(
|
||||
"rangef64".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 100f64, // 1000 buckets
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_avg_and_range_with_avg);
|
||||
|
||||
fn bench_aggregation_avg_and_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let sub_agg_req_1: Aggregations = vec![(
|
||||
"average_in_range".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score".to_string()),
|
||||
)),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_req_1: Aggregations = vec![
|
||||
(
|
||||
"average".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("score".to_string()),
|
||||
)),
|
||||
),
|
||||
(
|
||||
"rangef64".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
ranges: vec![
|
||||
(3f64..7000f64).into(),
|
||||
(7000f64..20000f64).into(),
|
||||
(20000f64..60000f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req_1,
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::ByteCount;
|
||||
|
||||
use super::collector::DEFAULT_MEMORY_LIMIT;
|
||||
use super::{AggregationError, DEFAULT_BUCKET_LIMIT};
|
||||
use crate::TantivyError;
|
||||
|
||||
/// An estimate for memory consumption. Non recursive
|
||||
pub trait MemoryConsumption {
|
||||
fn memory_consumption(&self) -> usize;
|
||||
}
|
||||
|
||||
impl<K, V, S> MemoryConsumption for HashMap<K, V, S> {
|
||||
fn memory_consumption(&self) -> usize {
|
||||
let num_items = self.capacity();
|
||||
(std::mem::size_of::<K>() + std::mem::size_of::<V>()) * num_items
|
||||
}
|
||||
}
|
||||
|
||||
/// Aggregation memory limit after which the request fails. Defaults to DEFAULT_MEMORY_LIMIT
|
||||
/// (500MB). The limit is shared by all SegmentCollectors
|
||||
pub struct AggregationLimits {
|
||||
/// The counter which is shared between the aggregations for one request.
|
||||
memory_consumption: Arc<AtomicU64>,
|
||||
/// The memory_limit in bytes
|
||||
memory_limit: ByteCount,
|
||||
/// The maximum number of buckets _returned_
|
||||
/// This is not counting intermediate buckets.
|
||||
bucket_limit: u32,
|
||||
}
|
||||
impl Clone for AggregationLimits {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
memory_consumption: Arc::clone(&self.memory_consumption),
|
||||
memory_limit: self.memory_limit,
|
||||
bucket_limit: self.bucket_limit,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AggregationLimits {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
memory_consumption: Default::default(),
|
||||
memory_limit: DEFAULT_MEMORY_LIMIT.into(),
|
||||
bucket_limit: DEFAULT_BUCKET_LIMIT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AggregationLimits {
|
||||
/// *memory_limit*
|
||||
/// memory_limit is defined in bytes.
|
||||
/// Aggregation fails when the estimated memory consumption of the aggregation is higher than
|
||||
/// memory_limit.
|
||||
/// memory_limit will default to `DEFAULT_MEMORY_LIMIT` (500MB)
|
||||
///
|
||||
/// *bucket_limit*
|
||||
/// Limits the maximum number of buckets returned from an aggregation request.
|
||||
/// bucket_limit will default to `DEFAULT_BUCKET_LIMIT` (65000)
|
||||
pub fn new(memory_limit: Option<u64>, bucket_limit: Option<u32>) -> Self {
|
||||
Self {
|
||||
memory_consumption: Default::default(),
|
||||
memory_limit: memory_limit.unwrap_or(DEFAULT_MEMORY_LIMIT).into(),
|
||||
bucket_limit: bucket_limit.unwrap_or(DEFAULT_BUCKET_LIMIT),
|
||||
}
|
||||
}
|
||||
pub(crate) fn validate_memory_consumption(&self) -> crate::Result<()> {
|
||||
if self.get_memory_consumed() > self.memory_limit {
|
||||
return Err(TantivyError::AggregationError(
|
||||
AggregationError::MemoryExceeded {
|
||||
limit: self.memory_limit,
|
||||
current: self.get_memory_consumed(),
|
||||
},
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub(crate) fn add_memory_consumed(&self, num_bytes: u64) {
|
||||
self.memory_consumption
|
||||
.fetch_add(num_bytes, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
/// Returns the estimated memory consumed by the aggregations
|
||||
pub fn get_memory_consumed(&self) -> ByteCount {
|
||||
self.memory_consumption
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
.into()
|
||||
}
|
||||
pub(crate) fn get_bucket_limit(&self) -> u32 {
|
||||
self.bucket_limit
|
||||
}
|
||||
}
|
||||
@@ -16,14 +16,14 @@
|
||||
//! let agg_req1: Aggregations = vec![
|
||||
//! (
|
||||
//! "range".to_string(),
|
||||
//! Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
//! Aggregation::Bucket(BucketAggregation {
|
||||
//! bucket_agg: BucketAggregationType::Range(RangeAggregation{
|
||||
//! field: "score".to_string(),
|
||||
//! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
//! keyed: false,
|
||||
//! }),
|
||||
//! sub_aggregation: Default::default(),
|
||||
//! })),
|
||||
//! }),
|
||||
//! ),
|
||||
//! ]
|
||||
//! .into_iter()
|
||||
@@ -143,7 +143,7 @@ pub fn get_fast_field_names(aggs: &Aggregations) -> HashSet<String> {
|
||||
#[serde(untagged)]
|
||||
pub enum Aggregation {
|
||||
/// Bucket aggregation, see [`BucketAggregation`] for details.
|
||||
Bucket(Box<BucketAggregation>),
|
||||
Bucket(BucketAggregation),
|
||||
/// Metric aggregation, see [`MetricAggregation`] for details.
|
||||
Metric(MetricAggregation),
|
||||
}
|
||||
@@ -301,7 +301,7 @@ mod tests {
|
||||
fn serialize_to_json_test() {
|
||||
let agg_req1: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score".to_string(),
|
||||
ranges: vec![
|
||||
@@ -313,7 +313,7 @@ mod tests {
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -351,7 +351,7 @@ mod tests {
|
||||
let agg_req2: Aggregations = vec![
|
||||
(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score2".to_string(),
|
||||
ranges: vec![
|
||||
@@ -363,7 +363,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
),
|
||||
(
|
||||
"metric".to_string(),
|
||||
@@ -377,7 +377,7 @@ mod tests {
|
||||
|
||||
let agg_req1: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score".to_string(),
|
||||
ranges: vec![
|
||||
@@ -389,7 +389,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: agg_req2,
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
//! This will enhance the request tree with access to the fastfield and metadata.
|
||||
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType, StrColumn};
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
|
||||
use columnar::{Column, ColumnType, StrColumn};
|
||||
|
||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
||||
use super::bucket::{
|
||||
@@ -10,9 +13,9 @@ use super::metric::{
|
||||
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation, StatsAggregation,
|
||||
SumAggregation,
|
||||
};
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::segment_agg_result::BucketCount;
|
||||
use super::VecWithNames;
|
||||
use crate::SegmentReader;
|
||||
use crate::{SegmentReader, TantivyError};
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct AggregationsWithAccessor {
|
||||
@@ -42,17 +45,7 @@ pub struct BucketAggregationWithAccessor {
|
||||
pub(crate) field_type: ColumnType,
|
||||
pub(crate) bucket_agg: BucketAggregationType,
|
||||
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
||||
pub(crate) limits: AggregationLimits,
|
||||
pub(crate) column_block_accessor: ColumnBlockAccessor<u64>,
|
||||
}
|
||||
|
||||
fn get_numeric_or_date_column_types() -> &'static [ColumnType] {
|
||||
&[
|
||||
ColumnType::F64,
|
||||
ColumnType::U64,
|
||||
ColumnType::I64,
|
||||
ColumnType::DateTime,
|
||||
]
|
||||
pub(crate) bucket_count: BucketCount,
|
||||
}
|
||||
|
||||
impl BucketAggregationWithAccessor {
|
||||
@@ -60,37 +53,26 @@ impl BucketAggregationWithAccessor {
|
||||
bucket: &BucketAggregationType,
|
||||
sub_aggregation: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
limits: AggregationLimits,
|
||||
bucket_count: Rc<AtomicU32>,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<BucketAggregationWithAccessor> {
|
||||
let mut str_dict_column = None;
|
||||
let (accessor, field_type) = match &bucket {
|
||||
BucketAggregationType::Range(RangeAggregation {
|
||||
field: field_name, ..
|
||||
}) => get_ff_reader_and_validate(
|
||||
reader,
|
||||
field_name,
|
||||
Some(get_numeric_or_date_column_types()),
|
||||
)?,
|
||||
}) => get_ff_reader_and_validate(reader, field_name)?,
|
||||
BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: field_name, ..
|
||||
}) => get_ff_reader_and_validate(
|
||||
reader,
|
||||
field_name,
|
||||
Some(get_numeric_or_date_column_types()),
|
||||
)?,
|
||||
}) => get_ff_reader_and_validate(reader, field_name)?,
|
||||
BucketAggregationType::DateHistogram(DateHistogramAggregationReq {
|
||||
field: field_name,
|
||||
..
|
||||
}) => get_ff_reader_and_validate(
|
||||
reader,
|
||||
field_name,
|
||||
Some(get_numeric_or_date_column_types()),
|
||||
)?,
|
||||
}) => get_ff_reader_and_validate(reader, field_name)?,
|
||||
BucketAggregationType::Terms(TermsAggregation {
|
||||
field: field_name, ..
|
||||
}) => {
|
||||
str_dict_column = reader.fast_fields().str(field_name)?;
|
||||
get_ff_reader_and_validate(reader, field_name, None)?
|
||||
get_ff_reader_and_validate(reader, field_name)?
|
||||
}
|
||||
};
|
||||
let sub_aggregation = sub_aggregation.clone();
|
||||
@@ -100,12 +82,15 @@ impl BucketAggregationWithAccessor {
|
||||
sub_aggregation: get_aggs_with_accessor_and_validate(
|
||||
&sub_aggregation,
|
||||
reader,
|
||||
&limits.clone(),
|
||||
bucket_count.clone(),
|
||||
max_bucket_count,
|
||||
)?,
|
||||
bucket_agg: bucket.clone(),
|
||||
str_dict_column,
|
||||
limits,
|
||||
column_block_accessor: Default::default(),
|
||||
bucket_count: BucketCount {
|
||||
bucket_count,
|
||||
max_bucket_count,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -116,7 +101,6 @@ pub struct MetricAggregationWithAccessor {
|
||||
pub metric: MetricAggregation,
|
||||
pub field_type: ColumnType,
|
||||
pub accessor: Column<u64>,
|
||||
pub column_block_accessor: ColumnBlockAccessor<u64>,
|
||||
}
|
||||
|
||||
impl MetricAggregationWithAccessor {
|
||||
@@ -131,17 +115,12 @@ impl MetricAggregationWithAccessor {
|
||||
| MetricAggregation::Min(MinAggregation { field: field_name })
|
||||
| MetricAggregation::Stats(StatsAggregation { field: field_name })
|
||||
| MetricAggregation::Sum(SumAggregation { field: field_name }) => {
|
||||
let (accessor, field_type) = get_ff_reader_and_validate(
|
||||
reader,
|
||||
field_name,
|
||||
Some(get_numeric_or_date_column_types()),
|
||||
)?;
|
||||
let (accessor, field_type) = get_ff_reader_and_validate(reader, field_name)?;
|
||||
|
||||
Ok(MetricAggregationWithAccessor {
|
||||
accessor,
|
||||
field_type,
|
||||
metric: metric.clone(),
|
||||
column_block_accessor: Default::default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -151,7 +130,8 @@ impl MetricAggregationWithAccessor {
|
||||
pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||
aggs: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
limits: &AggregationLimits,
|
||||
bucket_count: Rc<AtomicU32>,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<AggregationsWithAccessor> {
|
||||
let mut metrics = vec![];
|
||||
let mut buckets = vec![];
|
||||
@@ -163,7 +143,8 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||
&bucket.bucket_agg,
|
||||
&bucket.sub_aggregation,
|
||||
reader,
|
||||
limits.clone(),
|
||||
Rc::clone(&bucket_count),
|
||||
max_bucket_count,
|
||||
)?,
|
||||
)),
|
||||
Aggregation::Metric(metric) => metrics.push((
|
||||
@@ -182,16 +163,12 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||
fn get_ff_reader_and_validate(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
allowed_column_types: Option<&[ColumnType]>,
|
||||
) -> crate::Result<(columnar::Column<u64>, ColumnType)> {
|
||||
let ff_fields = reader.fast_fields();
|
||||
let ff_field_with_type = ff_fields
|
||||
.u64_lenient_for_type(allowed_column_types, field_name)?
|
||||
.unwrap_or_else(|| {
|
||||
(
|
||||
Column::build_empty_column(reader.num_docs()),
|
||||
ColumnType::U64,
|
||||
)
|
||||
});
|
||||
.u64_lenient_with_type(field_name)?
|
||||
.ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!("No fast field found for field: {}", field_name))
|
||||
})?;
|
||||
Ok(ff_field_with_type)
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ use super::agg_req::BucketAggregationInternal;
|
||||
use super::bucket::GetDocCount;
|
||||
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
|
||||
use super::metric::{SingleMetricResult, Stats};
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::Key;
|
||||
use crate::TantivyError;
|
||||
|
||||
@@ -20,13 +19,6 @@ use crate::TantivyError;
|
||||
pub struct AggregationResults(pub FxHashMap<String, AggregationResult>);
|
||||
|
||||
impl AggregationResults {
|
||||
pub(crate) fn get_bucket_count(&self) -> u64 {
|
||||
self.0
|
||||
.values()
|
||||
.map(|agg| agg.get_bucket_count())
|
||||
.sum::<u64>()
|
||||
}
|
||||
|
||||
pub(crate) fn get_value_from_aggregation(
|
||||
&self,
|
||||
name: &str,
|
||||
@@ -55,13 +47,6 @@ pub enum AggregationResult {
|
||||
}
|
||||
|
||||
impl AggregationResult {
|
||||
pub(crate) fn get_bucket_count(&self) -> u64 {
|
||||
match self {
|
||||
AggregationResult::BucketResult(bucket) => bucket.get_bucket_count(),
|
||||
AggregationResult::MetricResult(_) => 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_value_from_aggregation(
|
||||
&self,
|
||||
_name: &str,
|
||||
@@ -168,28 +153,9 @@ pub enum BucketResult {
|
||||
}
|
||||
|
||||
impl BucketResult {
|
||||
pub(crate) fn get_bucket_count(&self) -> u64 {
|
||||
match self {
|
||||
BucketResult::Range { buckets } => {
|
||||
buckets.iter().map(|bucket| bucket.get_bucket_count()).sum()
|
||||
}
|
||||
BucketResult::Histogram { buckets } => {
|
||||
buckets.iter().map(|bucket| bucket.get_bucket_count()).sum()
|
||||
}
|
||||
BucketResult::Terms {
|
||||
buckets,
|
||||
sum_other_doc_count: _,
|
||||
doc_count_error_upper_bound: _,
|
||||
} => buckets.iter().map(|bucket| bucket.get_bucket_count()).sum(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn empty_from_req(
|
||||
req: &BucketAggregationInternal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<Self> {
|
||||
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
|
||||
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
||||
empty_bucket.into_final_bucket_result(req, limits)
|
||||
empty_bucket.into_final_bucket_result(req)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,15 +170,6 @@ pub enum BucketEntries<T> {
|
||||
HashMap(FxHashMap<String, T>),
|
||||
}
|
||||
|
||||
impl<T> BucketEntries<T> {
|
||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = &T> + 'a> {
|
||||
match self {
|
||||
BucketEntries::Vec(vec) => Box::new(vec.iter()),
|
||||
BucketEntries::HashMap(map) => Box::new(map.values()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
||||
/// sub-aggregations.
|
||||
///
|
||||
@@ -252,11 +209,6 @@ pub struct BucketEntry {
|
||||
/// Sub-aggregations in this bucket.
|
||||
pub sub_aggregation: AggregationResults,
|
||||
}
|
||||
impl BucketEntry {
|
||||
pub(crate) fn get_bucket_count(&self) -> u64 {
|
||||
1 + self.sub_aggregation.get_bucket_count()
|
||||
}
|
||||
}
|
||||
impl GetDocCount for &BucketEntry {
|
||||
fn doc_count(&self) -> u64 {
|
||||
self.doc_count
|
||||
@@ -320,8 +272,3 @@ pub struct RangeBucketEntry {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub to_as_string: Option<String>,
|
||||
}
|
||||
impl RangeBucketEntry {
|
||||
pub(crate) fn get_bucket_count(&self) -> u64 {
|
||||
1 + self.sub_aggregation.get_bucket_count()
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -62,9 +62,7 @@ pub struct DateHistogramAggregationReq {
|
||||
///
|
||||
/// Fractional time values are not supported, but you can address this by shifting to another
|
||||
/// time unit (e.g., `1.5h` could instead be specified as `90m`).
|
||||
///
|
||||
/// `Option` for validation, the parameter is not optional
|
||||
pub fixed_interval: Option<String>,
|
||||
pub fixed_interval: String,
|
||||
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
|
||||
/// 1))`.
|
||||
pub offset: Option<String>,
|
||||
@@ -114,7 +112,7 @@ impl DateHistogramAggregationReq {
|
||||
self.validate()?;
|
||||
Ok(HistogramAggregation {
|
||||
field: self.field.to_string(),
|
||||
interval: parse_into_microseconds(self.fixed_interval.as_ref().unwrap())? as f64,
|
||||
interval: parse_into_microseconds(&self.fixed_interval)? as f64,
|
||||
offset: self
|
||||
.offset
|
||||
.as_ref()
|
||||
@@ -129,18 +127,11 @@ impl DateHistogramAggregationReq {
|
||||
}
|
||||
|
||||
fn validate(&self) -> crate::Result<()> {
|
||||
if let Some(interval) = self.interval.as_ref() {
|
||||
if self.interval.is_some() {
|
||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||
"`interval` parameter {:?} in date histogram is unsupported, only \
|
||||
`fixed_interval` is supported",
|
||||
interval
|
||||
)));
|
||||
}
|
||||
if let Some(interval) = self.date_interval.as_ref() {
|
||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||
"`date_interval` parameter {:?} in date histogram is unsupported, only \
|
||||
`fixed_interval` is supported",
|
||||
interval
|
||||
self.interval
|
||||
)));
|
||||
}
|
||||
if self.format.is_some() {
|
||||
@@ -149,13 +140,15 @@ impl DateHistogramAggregationReq {
|
||||
));
|
||||
}
|
||||
|
||||
if self.fixed_interval.is_none() {
|
||||
if self.date_interval.is_some() {
|
||||
return Err(crate::TantivyError::InvalidArgument(
|
||||
"fixed_interval in date histogram is missing".to_string(),
|
||||
"date_interval in date histogram is unsupported, only `fixed_interval` is \
|
||||
supported"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
parse_into_microseconds(self.fixed_interval.as_ref().unwrap())?;
|
||||
parse_into_microseconds(&self.fixed_interval)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -477,34 +470,6 @@ mod tests {
|
||||
});
|
||||
assert_eq!(res, expected_res);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn histogram_test_invalid_req() -> crate::Result<()> {
|
||||
let docs = vec![];
|
||||
|
||||
let index = get_test_index_from_docs(false, &docs)?;
|
||||
let elasticsearch_compatible_json = json!(
|
||||
{
|
||||
"sales_over_time": {
|
||||
"date_histogram": {
|
||||
"field": "date",
|
||||
"interval": "30d",
|
||||
"offset": "-4d"
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
let agg_req: Aggregations =
|
||||
serde_json::from_str(&serde_json::to_string(&elasticsearch_compatible_json).unwrap())
|
||||
.unwrap();
|
||||
let err = exec_request(agg_req, &index).unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
r#"An invalid argument was passed: '`interval` parameter "30d" in date histogram is unsupported, only `fixed_interval` is supported'"#
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::aggregation::agg_limits::MemoryConsumption;
|
||||
use crate::aggregation::agg_req::AggregationsInternal;
|
||||
use crate::aggregation::agg_req_with_accessor::{
|
||||
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
||||
@@ -17,10 +16,10 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector,
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, format_date, VecWithNames};
|
||||
use crate::TantivyError;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
||||
/// Each document value is rounded down to its bucket.
|
||||
@@ -231,65 +230,51 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
self.collect_block(&[doc], agg_with_accessor)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let bucket_agg_accessor = &mut agg_with_accessor.buckets.values[self.accessor_idx];
|
||||
|
||||
let mem_pre = self.get_memory_consumption();
|
||||
let accessor = &agg_with_accessor.buckets.values[self.accessor_idx].accessor;
|
||||
let sub_aggregation_accessor =
|
||||
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||
|
||||
let bounds = self.bounds;
|
||||
let interval = self.interval;
|
||||
let offset = self.offset;
|
||||
let get_bucket_pos = |val| (get_bucket_pos_f64(val, interval, offset) as i64);
|
||||
|
||||
bucket_agg_accessor
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &bucket_agg_accessor.accessor);
|
||||
for doc in docs {
|
||||
for val in accessor.values_for_doc(*doc) {
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
|
||||
for (doc, val) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
let bucket_pos = get_bucket_pos(val);
|
||||
|
||||
let bucket_pos = get_bucket_pos(val);
|
||||
|
||||
if bounds.contains(val) {
|
||||
let bucket = self.buckets.entry(bucket_pos).or_insert_with(|| {
|
||||
let key = get_bucket_key_from_pos(bucket_pos as f64, interval, offset);
|
||||
SegmentHistogramBucketEntry { key, doc_count: 0 }
|
||||
});
|
||||
bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation_blueprint) = self.sub_aggregation_blueprint.as_mut() {
|
||||
self.sub_aggregations
|
||||
.entry(bucket_pos)
|
||||
.or_insert_with(|| sub_aggregation_blueprint.clone())
|
||||
.collect(doc, &mut bucket_agg_accessor.sub_aggregation)?;
|
||||
if bounds.contains(val) {
|
||||
self.increment_bucket(
|
||||
bucket_pos,
|
||||
*doc,
|
||||
sub_aggregation_accessor,
|
||||
interval,
|
||||
offset,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mem_delta = self.get_memory_consumption() - mem_pre;
|
||||
let limits = &agg_with_accessor.buckets.values[self.accessor_idx].limits;
|
||||
limits.add_memory_consumed(mem_delta as u64);
|
||||
limits.validate_memory_consumption()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
|
||||
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||
let sub_aggregation_accessor =
|
||||
&mut agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||
|
||||
for sub_aggregation in self.sub_aggregations.values_mut() {
|
||||
sub_aggregation.flush(sub_aggregation_accessor)?;
|
||||
@@ -300,12 +285,6 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
|
||||
}
|
||||
|
||||
impl SegmentHistogramCollector {
|
||||
fn get_memory_consumption(&self) -> usize {
|
||||
let self_mem = std::mem::size_of::<Self>();
|
||||
let sub_aggs_mem = self.sub_aggregations.memory_consumption();
|
||||
let buckets_mem = self.buckets.memory_consumption();
|
||||
self_mem + sub_aggs_mem + buckets_mem
|
||||
}
|
||||
pub fn into_intermediate_bucket_result(
|
||||
self,
|
||||
agg_with_accessor: &BucketAggregationWithAccessor,
|
||||
@@ -364,6 +343,29 @@ impl SegmentHistogramCollector {
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn increment_bucket(
|
||||
&mut self,
|
||||
bucket_pos: i64,
|
||||
doc: DocId,
|
||||
bucket_with_accessor: &AggregationsWithAccessor,
|
||||
interval: f64,
|
||||
offset: f64,
|
||||
) -> crate::Result<()> {
|
||||
let bucket = self.buckets.entry(bucket_pos).or_insert_with(|| {
|
||||
let key = get_bucket_key_from_pos(bucket_pos as f64, interval, offset);
|
||||
SegmentHistogramBucketEntry { key, doc_count: 0 }
|
||||
});
|
||||
bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation_blueprint) = self.sub_aggregation_blueprint.as_mut() {
|
||||
self.sub_aggregations
|
||||
.entry(bucket_pos)
|
||||
.or_insert_with(|| sub_aggregation_blueprint.clone())
|
||||
.collect(doc, bucket_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn f64_from_fastfield_u64(&self, val: u64) -> f64 {
|
||||
f64_from_fastfield_u64(val, &self.column_type)
|
||||
@@ -385,7 +387,6 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsInternal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<Vec<BucketEntry>> {
|
||||
// Generate the full list of buckets without gaps.
|
||||
//
|
||||
@@ -393,17 +394,7 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
// extended_bounds from the request
|
||||
let min_max = minmax(buckets.iter().map(|bucket| bucket.key));
|
||||
|
||||
// memory check upfront
|
||||
let (_, first_bucket_num, last_bucket_num) =
|
||||
generate_bucket_pos_with_opt_minmax(histogram_req, min_max);
|
||||
let added_buckets = (first_bucket_num..=last_bucket_num)
|
||||
.count()
|
||||
.saturating_sub(buckets.len());
|
||||
limits.add_memory_consumed(
|
||||
added_buckets as u64 * std::mem::size_of::<IntermediateHistogramBucketEntry>() as u64,
|
||||
);
|
||||
limits.validate_memory_consumption()?;
|
||||
// create buckets
|
||||
// TODO add memory check
|
||||
let fill_gaps_buckets = generate_buckets_with_opt_minmax(histogram_req, min_max);
|
||||
|
||||
let empty_sub_aggregation = IntermediateAggregationResults::empty_from_req(sub_aggregation);
|
||||
@@ -432,9 +423,7 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
sub_aggregation: empty_sub_aggregation.clone(),
|
||||
},
|
||||
})
|
||||
.map(|intermediate_bucket| {
|
||||
intermediate_bucket.into_final_bucket_entry(sub_aggregation, limits)
|
||||
})
|
||||
.map(|intermediate_bucket| intermediate_bucket.into_final_bucket_entry(sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()
|
||||
}
|
||||
|
||||
@@ -444,26 +433,18 @@ pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
||||
column_type: Option<ColumnType>,
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsInternal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<Vec<BucketEntry>> {
|
||||
let mut buckets = if histogram_req.min_doc_count() == 0 {
|
||||
// With min_doc_count != 0, we may need to add buckets, so that there are no
|
||||
// gaps, since intermediate result does not contain empty buckets (filtered to
|
||||
// reduce serialization size).
|
||||
|
||||
intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
buckets,
|
||||
histogram_req,
|
||||
sub_aggregation,
|
||||
limits,
|
||||
)?
|
||||
intermediate_buckets_to_final_buckets_fill_gaps(buckets, histogram_req, sub_aggregation)?
|
||||
} else {
|
||||
buckets
|
||||
.into_iter()
|
||||
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count())
|
||||
.map(|histogram_bucket| {
|
||||
histogram_bucket.into_final_bucket_entry(sub_aggregation, limits)
|
||||
})
|
||||
.map(|histogram_bucket| histogram_bucket.into_final_bucket_entry(sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()?
|
||||
};
|
||||
|
||||
@@ -502,27 +483,15 @@ fn get_req_min_max(req: &HistogramAggregation, min_max: Option<(f64, f64)>) -> (
|
||||
/// Generates buckets with req.interval
|
||||
/// Range is computed for provided min_max and request extended_bounds/hard_bounds
|
||||
/// returns empty vec when there is no range to span
|
||||
pub(crate) fn generate_bucket_pos_with_opt_minmax(
|
||||
pub(crate) fn generate_buckets_with_opt_minmax(
|
||||
req: &HistogramAggregation,
|
||||
min_max: Option<(f64, f64)>,
|
||||
) -> (f64, i64, i64) {
|
||||
) -> Vec<f64> {
|
||||
let (min, max) = get_req_min_max(req, min_max);
|
||||
|
||||
let offset = req.offset.unwrap_or(0.0);
|
||||
let first_bucket_num = get_bucket_pos_f64(min, req.interval, offset) as i64;
|
||||
let last_bucket_num = get_bucket_pos_f64(max, req.interval, offset) as i64;
|
||||
(offset, first_bucket_num, last_bucket_num)
|
||||
}
|
||||
|
||||
/// Generates buckets with req.interval
|
||||
/// Range is computed for provided min_max and request extended_bounds/hard_bounds
|
||||
/// returns empty vec when there is no range to span
|
||||
pub(crate) fn generate_buckets_with_opt_minmax(
|
||||
req: &HistogramAggregation,
|
||||
min_max: Option<(f64, f64)>,
|
||||
) -> Vec<f64> {
|
||||
let (offset, first_bucket_num, last_bucket_num) =
|
||||
generate_bucket_pos_with_opt_minmax(req, min_max);
|
||||
let mut buckets = Vec::with_capacity((first_bucket_num..=last_bucket_num).count());
|
||||
for bucket_pos in first_bucket_num..=last_bucket_num {
|
||||
let bucket_key = bucket_pos as f64 * req.interval + offset;
|
||||
@@ -544,8 +513,8 @@ mod tests {
|
||||
};
|
||||
use crate::aggregation::metric::{AverageAggregation, StatsAggregation};
|
||||
use crate::aggregation::tests::{
|
||||
exec_request, exec_request_with_query, exec_request_with_query_and_memory_limit,
|
||||
get_test_index_2_segments, get_test_index_from_values, get_test_index_with_num_docs,
|
||||
exec_request, exec_request_with_query, get_test_index_2_segments,
|
||||
get_test_index_from_values, get_test_index_with_num_docs,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -556,7 +525,7 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_interval".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 3.5,
|
||||
@@ -564,7 +533,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -582,7 +551,7 @@ mod tests {
|
||||
// With offset
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_interval".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 3.5,
|
||||
@@ -590,7 +559,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -631,14 +600,14 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_interval".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -666,14 +635,14 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -690,40 +659,6 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_memory_limit() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(true, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 0.1,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query_and_memory_limit(
|
||||
agg_req,
|
||||
&index,
|
||||
None,
|
||||
AggregationLimits::new(Some(5_000), None),
|
||||
)
|
||||
.unwrap_err();
|
||||
assert_eq!(
|
||||
res.to_string(),
|
||||
"Aborting aggregation because memory limit was exceeded. Limit: 5.00 KB, Current: \
|
||||
102.48 KB"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_merge_test() -> crate::Result<()> {
|
||||
// Merge buckets counts from different segments
|
||||
@@ -733,14 +668,14 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -773,7 +708,7 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
@@ -781,7 +716,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -811,7 +746,7 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
@@ -822,7 +757,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -843,7 +778,7 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
@@ -851,7 +786,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -874,7 +809,7 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
@@ -883,7 +818,7 @@ mod tests {
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
})),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -918,21 +853,18 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
..Default::default()
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -952,25 +884,22 @@ mod tests {
|
||||
//
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
extended_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
..Default::default()
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
extended_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -989,25 +918,22 @@ mod tests {
|
||||
// Invalid request
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
extended_bounds: Some(HistogramBounds {
|
||||
min: 1.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
..Default::default()
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
extended_bounds: Some(HistogramBounds {
|
||||
min: 1.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -1037,17 +963,14 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -1088,21 +1011,18 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
extended_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
..Default::default()
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
extended_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -1119,22 +1039,19 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
extended_bounds: Some(HistogramBounds { min: 2.0, max: 5.0 }),
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
..Default::default()
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
extended_bounds: Some(HistogramBounds { min: 2.0, max: 5.0 }),
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -1151,21 +1068,18 @@ mod tests {
|
||||
// hard_bounds will not extend the result
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
..Default::default()
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
hard_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -1200,21 +1114,18 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
extended_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
..Default::default()
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
extended_bounds: Some(HistogramBounds {
|
||||
min: 2.0,
|
||||
max: 12.0,
|
||||
}),
|
||||
sub_aggregation: agg_req,
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: agg_req,
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -1264,17 +1175,14 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 100000.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 100000.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -1305,17 +1213,14 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "date".to_string(),
|
||||
interval: 86400000000.0, // one day in microseconds
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "date".to_string(),
|
||||
interval: 86400000000.0, // one day in microseconds
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -1356,17 +1261,14 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 0.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 0.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -1384,18 +1286,15 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 50.0,
|
||||
keyed: true,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 50.0,
|
||||
keyed: true,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
@@ -11,7 +11,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateRangeBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector,
|
||||
build_segment_agg_collector, BucketCount, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{
|
||||
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey, VecWithNames,
|
||||
@@ -208,44 +208,41 @@ impl SegmentAggregationCollector for SegmentRangeCollector {
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
self.collect_block(&[doc], agg_with_accessor)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let bucket_agg_accessor = &mut agg_with_accessor.buckets.values[self.accessor_idx];
|
||||
let accessor = &agg_with_accessor.buckets.values[self.accessor_idx].accessor;
|
||||
let sub_aggregation_accessor =
|
||||
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||
for doc in docs {
|
||||
for val in accessor.values_for_doc(*doc) {
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
|
||||
bucket_agg_accessor
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &bucket_agg_accessor.accessor);
|
||||
let bucket = &mut self.buckets[bucket_pos];
|
||||
|
||||
for (doc, val) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
|
||||
let bucket = &mut self.buckets[bucket_pos];
|
||||
|
||||
bucket.bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
sub_aggregation.collect(doc, &mut bucket_agg_accessor.sub_aggregation)?;
|
||||
bucket.bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
sub_aggregation.collect(*doc, sub_aggregation_accessor)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
|
||||
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||
let sub_aggregation_accessor =
|
||||
&mut agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||
&agg_with_accessor.buckets.values[self.accessor_idx].sub_aggregation;
|
||||
|
||||
for bucket in self.buckets.iter_mut() {
|
||||
if let Some(sub_agg) = bucket.bucket.sub_aggregation.as_mut() {
|
||||
@@ -261,7 +258,7 @@ impl SegmentRangeCollector {
|
||||
pub(crate) fn from_req_and_validate(
|
||||
req: &RangeAggregation,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
limits: &AggregationLimits,
|
||||
bucket_count: &BucketCount,
|
||||
field_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
) -> crate::Result<Self> {
|
||||
@@ -305,10 +302,8 @@ impl SegmentRangeCollector {
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
limits.add_memory_consumed(
|
||||
buckets.len() as u64 * std::mem::size_of::<SegmentRangeAndBucketEntry>() as u64,
|
||||
);
|
||||
limits.validate_memory_consumption()?;
|
||||
bucket_count.add_count(buckets.len() as u32);
|
||||
bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(SegmentRangeCollector {
|
||||
buckets,
|
||||
@@ -480,17 +475,14 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -524,17 +516,14 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req,
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req,
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -559,17 +548,14 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -599,28 +585,25 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![
|
||||
RangeAggregationRange {
|
||||
key: Some("custom-key-0-to-0.1".to_string()),
|
||||
from: Some(0f64),
|
||||
to: Some(0.1f64),
|
||||
},
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: Some(0.1f64),
|
||||
to: Some(0.2f64),
|
||||
},
|
||||
],
|
||||
keyed: false,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![
|
||||
RangeAggregationRange {
|
||||
key: Some("custom-key-0-to-0.1".to_string()),
|
||||
from: Some(0f64),
|
||||
to: Some(0.1f64),
|
||||
},
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: Some(0.1f64),
|
||||
to: Some(0.2f64),
|
||||
},
|
||||
],
|
||||
keyed: false,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -659,28 +642,25 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"date_ranges".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "date".to_string(),
|
||||
ranges: vec![
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: None,
|
||||
to: Some(1546300800000000.0f64),
|
||||
},
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: Some(1546300800000000.0f64),
|
||||
to: Some(1546387200000000.0f64),
|
||||
},
|
||||
],
|
||||
keyed: false,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "date".to_string(),
|
||||
ranges: vec![
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: None,
|
||||
to: Some(1546300800000000.0f64),
|
||||
},
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: Some(1546300800000000.0f64),
|
||||
to: Some(1546387200000000.0f64),
|
||||
},
|
||||
],
|
||||
keyed: false,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
@@ -724,21 +704,18 @@ mod tests {
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![RangeAggregationRange {
|
||||
key: Some("custom-key-0-to-0.1".to_string()),
|
||||
from: Some(0f64),
|
||||
to: Some(0.1f64),
|
||||
}],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![RangeAggregationRange {
|
||||
key: Some("custom-key-0-to-0.1".to_string()),
|
||||
from: Some(0f64),
|
||||
to: Some(0.1f64),
|
||||
}],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -34,7 +34,6 @@ impl BufAggregationCollector {
|
||||
}
|
||||
|
||||
impl SegmentAggregationCollector for BufAggregationCollector {
|
||||
#[inline]
|
||||
fn into_intermediate_aggregations_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
@@ -42,11 +41,10 @@ impl SegmentAggregationCollector for BufAggregationCollector {
|
||||
Box::new(self.collector).into_intermediate_aggregations_result(agg_with_accessor)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
self.staged_docs[self.num_staged_docs] = doc;
|
||||
self.num_staged_docs += 1;
|
||||
@@ -58,19 +56,18 @@ impl SegmentAggregationCollector for BufAggregationCollector {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
self.collector.collect_block(docs, agg_with_accessor)?;
|
||||
|
||||
for doc in docs {
|
||||
self.collect(*doc, agg_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
|
||||
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||
self.collector
|
||||
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor)?;
|
||||
self.num_staged_docs = 0;
|
||||
|
||||
@@ -1,36 +1,36 @@
|
||||
use std::rc::Rc;
|
||||
|
||||
use super::agg_req::Aggregations;
|
||||
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use super::agg_result::AggregationResults;
|
||||
use super::buf_collector::BufAggregationCollector;
|
||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::segment_agg_result::{
|
||||
build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector,
|
||||
};
|
||||
use super::segment_agg_result::{build_segment_agg_collector, SegmentAggregationCollector};
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::{DocId, SegmentReader, TantivyError};
|
||||
use crate::{SegmentReader, TantivyError};
|
||||
|
||||
/// The default max bucket count, before the aggregation fails.
|
||||
pub const DEFAULT_BUCKET_LIMIT: u32 = 65000;
|
||||
|
||||
/// The default memory limit in bytes before the aggregation fails. 500MB
|
||||
pub const DEFAULT_MEMORY_LIMIT: u64 = 500_000_000;
|
||||
pub const MAX_BUCKET_COUNT: u32 = 65000;
|
||||
|
||||
/// Collector for aggregations.
|
||||
///
|
||||
/// The collector collects all aggregations by the underlying aggregation request.
|
||||
pub struct AggregationCollector {
|
||||
agg: Aggregations,
|
||||
limits: AggregationLimits,
|
||||
max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl AggregationCollector {
|
||||
/// Create collector from aggregation request.
|
||||
///
|
||||
/// Aggregation fails when the limits in `AggregationLimits` is exceeded. (memory limit and
|
||||
/// bucket limit)
|
||||
pub fn from_aggs(agg: Aggregations, limits: AggregationLimits) -> Self {
|
||||
Self { agg, limits }
|
||||
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
|
||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||
Self {
|
||||
agg,
|
||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,16 +44,18 @@ impl AggregationCollector {
|
||||
/// into the final `AggregationResults` via the `into_final_result()` method.
|
||||
pub struct DistributedAggregationCollector {
|
||||
agg: Aggregations,
|
||||
limits: AggregationLimits,
|
||||
max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl DistributedAggregationCollector {
|
||||
/// Create collector from aggregation request.
|
||||
///
|
||||
/// Aggregation fails when the limits in `AggregationLimits` is exceeded. (memory limit and
|
||||
/// bucket limit)
|
||||
pub fn from_aggs(agg: Aggregations, limits: AggregationLimits) -> Self {
|
||||
Self { agg, limits }
|
||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||
Self {
|
||||
agg,
|
||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,7 +69,11 @@ impl Collector for DistributedAggregationCollector {
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader, &self.limits)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
self.max_bucket_count,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -92,7 +98,11 @@ impl Collector for AggregationCollector {
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader, &self.limits)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
self.max_bucket_count,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -104,7 +114,7 @@ impl Collector for AggregationCollector {
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let res = merge_fruits(segment_fruits)?;
|
||||
res.into_final_bucket_result(self.agg.clone(), &self.limits)
|
||||
res.into_final_bucket_result(self.agg.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,7 +135,7 @@ fn merge_fruits(
|
||||
/// `AggregationSegmentCollector` does the aggregation collection on a segment.
|
||||
pub struct AggregationSegmentCollector {
|
||||
aggs_with_accessor: AggregationsWithAccessor,
|
||||
agg_collector: BufAggregationCollector,
|
||||
result: BufAggregationCollector,
|
||||
error: Option<TantivyError>,
|
||||
}
|
||||
|
||||
@@ -135,14 +145,15 @@ impl AggregationSegmentCollector {
|
||||
pub fn from_agg_req_and_reader(
|
||||
agg: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
limits: &AggregationLimits,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<Self> {
|
||||
let aggs_with_accessor = get_aggs_with_accessor_and_validate(agg, reader, limits)?;
|
||||
let aggs_with_accessor =
|
||||
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
|
||||
let result =
|
||||
BufAggregationCollector::new(build_segment_agg_collector(&aggs_with_accessor)?);
|
||||
Ok(AggregationSegmentCollector {
|
||||
aggs_with_accessor,
|
||||
agg_collector: result,
|
||||
result,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
@@ -152,29 +163,11 @@ impl SegmentCollector for AggregationSegmentCollector {
|
||||
type Fruit = crate::Result<IntermediateAggregationResults>;
|
||||
|
||||
#[inline]
|
||||
fn collect(&mut self, doc: DocId, _score: crate::Score) {
|
||||
fn collect(&mut self, doc: crate::DocId, _score: crate::Score) {
|
||||
if self.error.is_some() {
|
||||
return;
|
||||
}
|
||||
if let Err(err) = self
|
||||
.agg_collector
|
||||
.collect(doc, &mut self.aggs_with_accessor)
|
||||
{
|
||||
self.error = Some(err);
|
||||
}
|
||||
}
|
||||
|
||||
/// The query pushes the documents to the collector via this method.
|
||||
///
|
||||
/// Only valid for Collectors that ignore docs
|
||||
fn collect_block(&mut self, docs: &[DocId]) {
|
||||
if self.error.is_some() {
|
||||
return;
|
||||
}
|
||||
if let Err(err) = self
|
||||
.agg_collector
|
||||
.collect_block(docs, &mut self.aggs_with_accessor)
|
||||
{
|
||||
if let Err(err) = self.result.collect(doc, &self.aggs_with_accessor) {
|
||||
self.error = Some(err);
|
||||
}
|
||||
}
|
||||
@@ -183,7 +176,7 @@ impl SegmentCollector for AggregationSegmentCollector {
|
||||
if let Some(err) = self.error {
|
||||
return Err(err);
|
||||
}
|
||||
self.agg_collector.flush(&mut self.aggs_with_accessor)?;
|
||||
Box::new(self.agg_collector).into_intermediate_aggregations_result(&self.aggs_with_accessor)
|
||||
self.result.flush(&self.aggs_with_accessor)?;
|
||||
Box::new(self.result).into_intermediate_aggregations_result(&self.aggs_with_accessor)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,33 +1,9 @@
|
||||
use common::ByteCount;
|
||||
|
||||
use super::bucket::DateHistogramParseError;
|
||||
|
||||
/// Error that may occur when opening a directory
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Error)]
|
||||
pub enum AggregationError {
|
||||
/// Date histogram parse error
|
||||
/// Failed to open the directory.
|
||||
#[error("Date histogram parse error: {0:?}")]
|
||||
DateHistogramParseError(#[from] DateHistogramParseError),
|
||||
/// Memory limit exceeded
|
||||
#[error(
|
||||
"Aborting aggregation because memory limit was exceeded. Limit: {limit:?}, Current: \
|
||||
{current:?}"
|
||||
)]
|
||||
MemoryExceeded {
|
||||
/// Memory consumption limit
|
||||
limit: ByteCount,
|
||||
/// Current memory consumption
|
||||
current: ByteCount,
|
||||
},
|
||||
/// Bucket limit exceeded
|
||||
#[error(
|
||||
"Aborting aggregation because bucket limit was exceeded. Limit: {limit:?}, Current: \
|
||||
{current:?}"
|
||||
)]
|
||||
BucketLimitExceeded {
|
||||
/// Bucket limit
|
||||
limit: u32,
|
||||
/// Current num buckets
|
||||
current: u32,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,8 +7,7 @@ use std::cmp::Ordering;
|
||||
use columnar::ColumnType;
|
||||
use itertools::Itertools;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::ser::SerializeSeq;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::agg_req::{
|
||||
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
|
||||
@@ -23,11 +22,9 @@ use super::metric::{
|
||||
IntermediateAverage, IntermediateCount, IntermediateMax, IntermediateMin, IntermediateStats,
|
||||
IntermediateSum,
|
||||
};
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::{format_date, AggregationError, Key, SerializedKey, VecWithNames};
|
||||
use super::{format_date, Key, SerializedKey, VecWithNames};
|
||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
||||
use crate::aggregation::bucket::TermsAggregationInternal;
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
||||
/// intermediate results.
|
||||
@@ -41,22 +38,8 @@ pub struct IntermediateAggregationResults {
|
||||
|
||||
impl IntermediateAggregationResults {
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
pub fn into_final_bucket_result(
|
||||
self,
|
||||
req: Aggregations,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<AggregationResults> {
|
||||
let res = self.into_final_bucket_result_internal(&(req.into()), limits)?;
|
||||
let bucket_count = res.get_bucket_count() as u32;
|
||||
if bucket_count > limits.get_bucket_limit() {
|
||||
return Err(TantivyError::AggregationError(
|
||||
AggregationError::BucketLimitExceeded {
|
||||
limit: limits.get_bucket_limit(),
|
||||
current: bucket_count,
|
||||
},
|
||||
));
|
||||
}
|
||||
Ok(res)
|
||||
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
|
||||
self.into_final_bucket_result_internal(&(req.into()))
|
||||
}
|
||||
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
@@ -66,7 +49,6 @@ impl IntermediateAggregationResults {
|
||||
pub(crate) fn into_final_bucket_result_internal(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<AggregationResults> {
|
||||
// Important assumption:
|
||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||
@@ -74,11 +56,11 @@ impl IntermediateAggregationResults {
|
||||
let mut results: FxHashMap<String, AggregationResult> = FxHashMap::default();
|
||||
|
||||
if let Some(buckets) = self.buckets {
|
||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets, limits)?
|
||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||
} else {
|
||||
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||
// format is constant
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets, limits)?
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
||||
};
|
||||
|
||||
if let Some(metrics) = self.metrics {
|
||||
@@ -179,12 +161,10 @@ fn add_empty_final_metrics_to_result(
|
||||
fn add_empty_final_buckets_to_result(
|
||||
results: &mut FxHashMap<String, AggregationResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<()> {
|
||||
let requested_buckets = req_buckets.iter();
|
||||
for (key, req) in requested_buckets {
|
||||
let empty_bucket =
|
||||
AggregationResult::BucketResult(BucketResult::empty_from_req(req, limits)?);
|
||||
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
||||
results.insert(key.to_string(), empty_bucket);
|
||||
}
|
||||
Ok(())
|
||||
@@ -194,13 +174,12 @@ fn convert_and_add_final_buckets_to_result(
|
||||
results: &mut FxHashMap<String, AggregationResult>,
|
||||
buckets: VecWithNames<IntermediateBucketResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<()> {
|
||||
assert_eq!(buckets.len(), req_buckets.len());
|
||||
|
||||
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||
for ((key, bucket), req) in buckets_with_request {
|
||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req, limits)?);
|
||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
|
||||
results.insert(key, result);
|
||||
}
|
||||
Ok(())
|
||||
@@ -308,7 +287,6 @@ impl IntermediateBucketResult {
|
||||
pub(crate) fn into_final_bucket_result(
|
||||
self,
|
||||
req: &BucketAggregationInternal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<BucketResult> {
|
||||
match self {
|
||||
IntermediateBucketResult::Range(range_res) => {
|
||||
@@ -321,7 +299,6 @@ impl IntermediateBucketResult {
|
||||
req.as_range()
|
||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||
range_res.column_type,
|
||||
limits,
|
||||
)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
@@ -360,7 +337,6 @@ impl IntermediateBucketResult {
|
||||
column_type,
|
||||
histogram_req,
|
||||
&req.sub_aggregation,
|
||||
limits,
|
||||
)?;
|
||||
|
||||
let buckets = if histogram_req.keyed {
|
||||
@@ -379,7 +355,6 @@ impl IntermediateBucketResult {
|
||||
req.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
&req.sub_aggregation,
|
||||
limits,
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -464,45 +439,16 @@ pub struct IntermediateRangeBucketResult {
|
||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
/// Term aggregation including error counts
|
||||
pub struct IntermediateTermBucketResult {
|
||||
#[serde(
|
||||
serialize_with = "serialize_entries",
|
||||
deserialize_with = "deserialize_entries"
|
||||
)]
|
||||
pub(crate) entries: FxHashMap<Key, IntermediateTermBucketEntry>,
|
||||
pub(crate) sum_other_doc_count: u64,
|
||||
pub(crate) doc_count_error_upper_bound: u64,
|
||||
}
|
||||
|
||||
// Serialize into a Vec to circument the JSON limitation, where keys can't be numbers
|
||||
fn serialize_entries<S>(
|
||||
entries: &FxHashMap<Key, IntermediateTermBucketEntry>,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut seq = serializer.serialize_seq(Some(entries.len()))?;
|
||||
for (k, v) in entries {
|
||||
seq.serialize_element(&(k, v))?;
|
||||
}
|
||||
seq.end()
|
||||
}
|
||||
|
||||
fn deserialize_entries<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<FxHashMap<Key, IntermediateTermBucketEntry>, D::Error>
|
||||
where D: Deserializer<'de> {
|
||||
let vec_entries: Vec<(Key, IntermediateTermBucketEntry)> =
|
||||
Deserialize::deserialize(deserializer)?;
|
||||
Ok(vec_entries.into_iter().collect())
|
||||
}
|
||||
|
||||
impl IntermediateTermBucketResult {
|
||||
pub(crate) fn into_final_result(
|
||||
self,
|
||||
req: &TermsAggregation,
|
||||
sub_aggregation_req: &AggregationsInternal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<BucketResult> {
|
||||
let req = TermsAggregationInternal::from_req(req);
|
||||
let mut buckets: Vec<BucketEntry> = self
|
||||
@@ -516,7 +462,7 @@ impl IntermediateTermBucketResult {
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: entry
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(sub_aggregation_req, limits)?,
|
||||
.into_final_bucket_result_internal(sub_aggregation_req)?,
|
||||
})
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
@@ -548,7 +494,7 @@ impl IntermediateTermBucketResult {
|
||||
let val = bucket
|
||||
.sub_aggregation
|
||||
.get_value_from_aggregation(agg_name, agg_property)?
|
||||
.unwrap_or(f64::MIN);
|
||||
.unwrap_or(f64::NAN);
|
||||
Ok((bucket, val))
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
@@ -636,7 +582,6 @@ impl IntermediateHistogramBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<BucketEntry> {
|
||||
Ok(BucketEntry {
|
||||
key_as_string: None,
|
||||
@@ -644,7 +589,7 @@ impl IntermediateHistogramBucketEntry {
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req, limits)?,
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -683,14 +628,13 @@ impl IntermediateRangeBucketEntry {
|
||||
req: &AggregationsInternal,
|
||||
_range_req: &RangeAggregation,
|
||||
column_type: Option<ColumnType>,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<RangeBucketEntry> {
|
||||
let mut range_bucket_entry = RangeBucketEntry {
|
||||
key: self.key,
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req, limits)?,
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
to: self.to,
|
||||
from: self.from,
|
||||
to_as_string: None,
|
||||
@@ -870,26 +814,4 @@ mod tests {
|
||||
|
||||
assert_eq!(tree_left, orig);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_term_bucket_json_roundtrip() {
|
||||
let term_buckets = IntermediateTermBucketResult {
|
||||
entries: vec![(
|
||||
Key::F64(5.0),
|
||||
IntermediateTermBucketEntry {
|
||||
doc_count: 10,
|
||||
sub_aggregation: Default::default(),
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
sum_other_doc_count: 0,
|
||||
doc_count_error_upper_bound: 0,
|
||||
};
|
||||
|
||||
let term_buckets_round: IntermediateTermBucketResult =
|
||||
serde_json::from_str(&serde_json::to_string(&term_buckets).unwrap()).unwrap();
|
||||
|
||||
assert_eq!(term_buckets, term_buckets_round);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ mod tests {
|
||||
"price_sum": { "sum": { "field": "price" } }
|
||||
}"#;
|
||||
let aggregations: Aggregations = serde_json::from_str(aggregations_json).unwrap();
|
||||
let collector = AggregationCollector::from_aggs(aggregations, Default::default());
|
||||
let collector = AggregationCollector::from_aggs(aggregations, None);
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let aggregations_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
use columnar::ColumnType;
|
||||
use columnar::{Cardinality, Column, ColumnType};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req_with_accessor::{
|
||||
AggregationsWithAccessor, MetricAggregationWithAccessor,
|
||||
};
|
||||
use crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
@@ -158,7 +156,6 @@ pub(crate) struct SegmentStatsCollector {
|
||||
pub(crate) collecting_for: SegmentStatsType,
|
||||
pub(crate) stats: IntermediateStats,
|
||||
pub(crate) accessor_idx: usize,
|
||||
val_cache: Vec<u64>,
|
||||
}
|
||||
|
||||
impl SegmentStatsCollector {
|
||||
@@ -172,28 +169,28 @@ impl SegmentStatsCollector {
|
||||
collecting_for,
|
||||
stats: IntermediateStats::default(),
|
||||
accessor_idx,
|
||||
val_cache: Default::default(),
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub(crate) fn collect_block_with_field(
|
||||
&mut self,
|
||||
docs: &[DocId],
|
||||
agg_accessor: &mut MetricAggregationWithAccessor,
|
||||
) {
|
||||
agg_accessor
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &agg_accessor.accessor);
|
||||
|
||||
for val in agg_accessor.column_block_accessor.iter_vals() {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val1);
|
||||
pub(crate) fn collect_block_with_field(&mut self, docs: &[DocId], field: &Column<u64>) {
|
||||
if field.get_cardinality() == Cardinality::Full {
|
||||
for doc in docs {
|
||||
let val = field.values.get_val(*doc);
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val1);
|
||||
}
|
||||
} else {
|
||||
for doc in docs {
|
||||
for val in field.values_for_doc(*doc) {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentAggregationCollector for SegmentStatsCollector {
|
||||
#[inline]
|
||||
fn into_intermediate_aggregations_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
@@ -230,11 +227,10 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let field = &agg_with_accessor.metrics.values[self.accessor_idx].accessor;
|
||||
|
||||
@@ -250,9 +246,9 @@ impl SegmentAggregationCollector for SegmentStatsCollector {
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let field = &mut agg_with_accessor.metrics.values[self.accessor_idx];
|
||||
let field = &agg_with_accessor.metrics.values[self.accessor_idx].accessor;
|
||||
self.collect_block_with_field(docs, field);
|
||||
Ok(())
|
||||
}
|
||||
@@ -293,7 +289,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, Default::default());
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -330,7 +326,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, Default::default());
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -384,33 +380,30 @@ mod tests {
|
||||
),
|
||||
(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(
|
||||
BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score".to_string(),
|
||||
ranges: vec![
|
||||
(3f64..7f64).into(),
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: iter::once((
|
||||
"stats".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Stats(
|
||||
StatsAggregation::from_field_name("score".to_string()),
|
||||
)),
|
||||
))
|
||||
.collect(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score".to_string(),
|
||||
ranges: vec![
|
||||
(3f64..7f64).into(),
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: iter::once((
|
||||
"stats".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Stats(
|
||||
StatsAggregation::from_field_name("score".to_string()),
|
||||
)),
|
||||
))
|
||||
.collect(),
|
||||
}),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, Default::default());
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
//! .into_iter()
|
||||
//! .collect();
|
||||
//!
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req, Default::default());
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
//!
|
||||
//! let searcher = reader.searcher();
|
||||
//! let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
@@ -130,14 +130,14 @@
|
||||
//! let agg_req_1: Aggregations = vec![
|
||||
//! (
|
||||
//! "range".to_string(),
|
||||
//! Aggregation::Bucket(Box::new(BucketAggregation {
|
||||
//! Aggregation::Bucket(BucketAggregation {
|
||||
//! bucket_agg: BucketAggregationType::Range(RangeAggregation{
|
||||
//! field: "score".to_string(),
|
||||
//! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
//! keyed: false,
|
||||
//! }),
|
||||
//! sub_aggregation: sub_agg_req_1.clone(),
|
||||
//! })),
|
||||
//! }),
|
||||
//! ),
|
||||
//! ]
|
||||
//! .into_iter()
|
||||
@@ -155,7 +155,6 @@
|
||||
//! [`AggregationResults`](agg_result::AggregationResults) via the
|
||||
//! [`into_final_bucket_result`](intermediate_agg_result::IntermediateAggregationResults::into_final_bucket_result) method.
|
||||
|
||||
mod agg_limits;
|
||||
pub mod agg_req;
|
||||
mod agg_req_with_accessor;
|
||||
pub mod agg_result;
|
||||
@@ -166,7 +165,6 @@ mod date;
|
||||
mod error;
|
||||
pub mod intermediate_agg_result;
|
||||
pub mod metric;
|
||||
|
||||
mod segment_agg_result;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
@@ -174,10 +172,9 @@ use std::fmt::Display;
|
||||
#[cfg(test)]
|
||||
mod agg_tests;
|
||||
|
||||
pub use agg_limits::AggregationLimits;
|
||||
pub use collector::{
|
||||
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
||||
DEFAULT_BUCKET_LIMIT,
|
||||
MAX_BUCKET_COUNT,
|
||||
};
|
||||
use columnar::{ColumnType, MonotonicallyMappableToU64};
|
||||
pub(crate) use date::format_date;
|
||||
@@ -348,8 +345,9 @@ mod tests {
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use super::agg_req::Aggregations;
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req::{Aggregation, BucketAggregation, BucketAggregationType};
|
||||
use crate::aggregation::bucket::TermsAggregation;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
@@ -373,16 +371,7 @@ mod tests {
|
||||
index: &Index,
|
||||
query: Option<(&str, &str)>,
|
||||
) -> crate::Result<Value> {
|
||||
exec_request_with_query_and_memory_limit(agg_req, index, query, Default::default())
|
||||
}
|
||||
|
||||
pub fn exec_request_with_query_and_memory_limit(
|
||||
agg_req: Aggregations,
|
||||
index: &Index,
|
||||
query: Option<(&str, &str)>,
|
||||
limits: AggregationLimits,
|
||||
) -> crate::Result<Value> {
|
||||
let collector = AggregationCollector::from_aggs(agg_req, limits);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -445,7 +434,7 @@ mod tests {
|
||||
.set_index_option(IndexRecordOption::Basic)
|
||||
.set_fieldnorms(false),
|
||||
)
|
||||
.set_fast(None)
|
||||
.set_fast()
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype.clone());
|
||||
let text_field_id = schema_builder.add_text_field("text_id", text_fieldtype);
|
||||
@@ -500,7 +489,7 @@ mod tests {
|
||||
.set_indexing_options(
|
||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||
)
|
||||
.set_fast(None)
|
||||
.set_fast()
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
@@ -606,4 +595,50 @@ mod tests {
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_on_json_object() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color": "red"})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color": "blue"})))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let agg: Aggregations = vec![(
|
||||
"jsonagg".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "json.color".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let aggregation_collector = AggregationCollector::from_aggs(agg, None);
|
||||
let aggregation_results = searcher.search(&AllQuery, &aggregation_collector).unwrap();
|
||||
let aggregation_res_json = serde_json::to_value(aggregation_results).unwrap();
|
||||
assert_eq!(
|
||||
&aggregation_res_json,
|
||||
&serde_json::json!({
|
||||
"jsonagg": {
|
||||
"buckets": [
|
||||
{"doc_count": 1, "key": "blue"},
|
||||
{"doc_count": 1, "key": "red"}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,13 +4,15 @@
|
||||
//! merging.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
|
||||
pub(crate) use super::agg_limits::AggregationLimits;
|
||||
use super::agg_req::MetricAggregation;
|
||||
use super::agg_req_with_accessor::{
|
||||
AggregationsWithAccessor, BucketAggregationWithAccessor, MetricAggregationWithAccessor,
|
||||
};
|
||||
use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTermCollector};
|
||||
use super::collector::MAX_BUCKET_COUNT;
|
||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::metric::{
|
||||
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation, SegmentStatsCollector,
|
||||
@@ -18,6 +20,7 @@ use super::metric::{
|
||||
};
|
||||
use super::VecWithNames;
|
||||
use crate::aggregation::agg_req::BucketAggregationType;
|
||||
use crate::TantivyError;
|
||||
|
||||
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
||||
fn into_intermediate_aggregations_result(
|
||||
@@ -28,18 +31,18 @@ pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()>;
|
||||
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()>;
|
||||
|
||||
/// Finalize method. Some Aggregator collect blocks of docs before calling `collect_block`.
|
||||
/// This method ensures those staged docs will be collected.
|
||||
fn flush(&mut self, _agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
|
||||
fn flush(&mut self, _agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -128,7 +131,7 @@ pub(crate) fn build_bucket_segment_agg_collector(
|
||||
Ok(Box::new(SegmentRangeCollector::from_req_and_validate(
|
||||
range_req,
|
||||
&req.sub_aggregation,
|
||||
&req.limits,
|
||||
&req.bucket_count,
|
||||
req.field_type,
|
||||
accessor_idx,
|
||||
)?))
|
||||
@@ -206,7 +209,7 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
self.collect_block(&[doc], agg_with_accessor)?;
|
||||
|
||||
@@ -216,7 +219,7 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
if let Some(metrics) = self.metrics.as_mut() {
|
||||
for collector in metrics {
|
||||
@@ -233,7 +236,7 @@ impl SegmentAggregationCollector for GenericSegmentAggregationResultsCollector {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush(&mut self, agg_with_accessor: &mut AggregationsWithAccessor) -> crate::Result<()> {
|
||||
fn flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||
if let Some(metrics) = &mut self.metrics {
|
||||
for collector in metrics {
|
||||
collector.flush(agg_with_accessor)?;
|
||||
@@ -281,3 +284,37 @@ impl GenericSegmentAggregationResultsCollector {
|
||||
Ok(GenericSegmentAggregationResultsCollector { metrics, buckets })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct BucketCount {
|
||||
/// The counter which is shared between the aggregations for one request.
|
||||
pub(crate) bucket_count: Rc<AtomicU32>,
|
||||
pub(crate) max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl Default for BucketCount {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bucket_count: Default::default(),
|
||||
max_bucket_count: MAX_BUCKET_COUNT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketCount {
|
||||
pub(crate) fn validate_bucket_count(&self) -> crate::Result<()> {
|
||||
if self.get_count() > self.max_bucket_count {
|
||||
return Err(TantivyError::InvalidArgument(
|
||||
"Aborting aggregation because too many buckets were created".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub(crate) fn add_count(&self, count: u32) {
|
||||
self.bucket_count
|
||||
.fetch_add(count, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
pub(crate) fn get_count(&self) -> u32 {
|
||||
self.bucket_count.load(std::sync::atomic::Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ impl Collector for HistogramCollector {
|
||||
segment: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let column_opt = segment.fast_fields().u64_lenient(&self.field)?;
|
||||
let (column, _column_type) = column_opt.ok_or_else(|| FastFieldNotAvailableError {
|
||||
let column = column_opt.ok_or_else(|| FastFieldNotAvailableError {
|
||||
field_name: self.field.clone(),
|
||||
})?;
|
||||
let column_u64 = column.first_or_default_col(0u64);
|
||||
|
||||
@@ -106,7 +106,7 @@ mod tweak_score_top_collector;
|
||||
pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker};
|
||||
mod facet_collector;
|
||||
pub use self::facet_collector::{FacetCollector, FacetCounts};
|
||||
use crate::query::Weight;
|
||||
use crate::query::{for_each_docset, for_each_scorer, Weight};
|
||||
|
||||
mod docset_collector;
|
||||
pub use self::docset_collector::DocSetCollector;
|
||||
@@ -173,30 +173,32 @@ pub trait Collector: Sync + Send {
|
||||
|
||||
match (reader.alive_bitset(), self.requires_scoring()) {
|
||||
(Some(alive_bitset), true) => {
|
||||
weight.for_each(reader, &mut |doc, score| {
|
||||
let mut scorer = weight.scorer(reader, 1.0)?;
|
||||
for_each_scorer(scorer.as_mut(), |doc, score| {
|
||||
if alive_bitset.is_alive(doc) {
|
||||
segment_collector.collect(doc, score);
|
||||
}
|
||||
})?;
|
||||
});
|
||||
}
|
||||
(Some(alive_bitset), false) => {
|
||||
weight.for_each_no_score(reader, &mut |docs| {
|
||||
for doc in docs.iter().cloned() {
|
||||
if alive_bitset.is_alive(doc) {
|
||||
segment_collector.collect(doc, 0.0);
|
||||
}
|
||||
let mut docset = weight.scorer(reader, 1.0)?;
|
||||
for_each_docset(docset.as_mut(), |doc| {
|
||||
if alive_bitset.is_alive(doc) {
|
||||
segment_collector.collect(doc, 0.0);
|
||||
}
|
||||
})?;
|
||||
});
|
||||
}
|
||||
(None, true) => {
|
||||
weight.for_each(reader, &mut |doc, score| {
|
||||
let mut scorer = weight.scorer(reader, 1.0)?;
|
||||
for_each_scorer(scorer.as_mut(), |doc, score| {
|
||||
segment_collector.collect(doc, score);
|
||||
})?;
|
||||
});
|
||||
}
|
||||
(None, false) => {
|
||||
weight.for_each_no_score(reader, &mut |docs| {
|
||||
segment_collector.collect_block(docs);
|
||||
})?;
|
||||
let mut docset = weight.scorer(reader, 1.0)?;
|
||||
for_each_docset(docset.as_mut(), |doc| {
|
||||
segment_collector.collect(doc, 0.0);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -272,13 +274,6 @@ pub trait SegmentCollector: 'static {
|
||||
/// The query pushes the scored document to the collector via this method.
|
||||
fn collect(&mut self, doc: DocId, score: Score);
|
||||
|
||||
/// The query pushes the scored document to the collector via this method.
|
||||
fn collect_block(&mut self, docs: &[DocId]) {
|
||||
for doc in docs {
|
||||
self.collect(*doc, 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract the fruit of the collection from the `SegmentCollector`.
|
||||
fn harvest(self) -> Self::Fruit;
|
||||
}
|
||||
|
||||
@@ -155,13 +155,12 @@ impl CustomScorer<u64> for ScorerByField {
|
||||
//
|
||||
// The conversion will then happen only on the top-K docs.
|
||||
let sort_column_opt = segment_reader.fast_fields().u64_lenient(&self.field)?;
|
||||
let (sort_column, _sort_column_type) =
|
||||
sort_column_opt.ok_or_else(|| FastFieldNotAvailableError {
|
||||
let sort_column = sort_column_opt
|
||||
.ok_or_else(|| FastFieldNotAvailableError {
|
||||
field_name: self.field.clone(),
|
||||
})?;
|
||||
Ok(ScorerByFastFieldReader {
|
||||
sort_column: sort_column.first_or_default_col(0u64),
|
||||
})
|
||||
})?
|
||||
.first_or_default_col(0u64);
|
||||
Ok(ScorerByFastFieldReader { sort_column })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1031,7 +1030,7 @@ mod tests {
|
||||
let segment = searcher.segment_reader(0);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(SIZE);
|
||||
let err = top_collector.for_segment(0, segment).err().unwrap();
|
||||
assert!(matches!(err, crate::TantivyError::InvalidArgument(_)));
|
||||
assert!(matches!(err, crate::TantivyError::SchemaError(_)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -327,7 +327,7 @@ impl SegmentReader {
|
||||
self.alive_bitset_opt
|
||||
.as_ref()
|
||||
.map(AliveBitSet::space_usage)
|
||||
.unwrap_or_default(),
|
||||
.unwrap_or(0),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -172,7 +172,7 @@ impl CompositeFile {
|
||||
let mut fields = Vec::new();
|
||||
for (&field_addr, byte_range) in &self.offsets_index {
|
||||
let mut field_usage = FieldUsage::empty(field_addr.field);
|
||||
field_usage.add_field_idx(field_addr.idx, byte_range.len().into());
|
||||
field_usage.add_field_idx(field_addr.idx, byte_range.len());
|
||||
fields.push(field_usage);
|
||||
}
|
||||
PerFieldSpaceUsage::new(fields)
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::sync::{Arc, RwLock, Weak};
|
||||
use std::{fmt, result};
|
||||
|
||||
use common::StableDeref;
|
||||
use fs4::FileExt;
|
||||
use fs2::FileExt;
|
||||
use memmap2::Mmap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tempfile::TempDir;
|
||||
|
||||
@@ -9,8 +9,6 @@ use crate::DocId;
|
||||
/// to compare `[u32; 4]`.
|
||||
pub const TERMINATED: DocId = i32::MAX as u32;
|
||||
|
||||
pub const BUFFER_LEN: usize = 64;
|
||||
|
||||
/// Represents an iterable set of sorted doc ids.
|
||||
pub trait DocSet: Send {
|
||||
/// Goes to the next element.
|
||||
@@ -61,7 +59,7 @@ pub trait DocSet: Send {
|
||||
/// This method is only here for specific high-performance
|
||||
/// use case where batching. The normal way to
|
||||
/// go through the `DocId`'s is to call `.advance()`.
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId; BUFFER_LEN]) -> usize {
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||
if self.doc() == TERMINATED {
|
||||
return 0;
|
||||
}
|
||||
@@ -151,11 +149,6 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||
unboxed.seek(target)
|
||||
}
|
||||
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId; BUFFER_LEN]) -> usize {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.fill_buffer(buffer)
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
let unboxed: &TDocSet = self.borrow();
|
||||
unboxed.doc()
|
||||
|
||||
@@ -55,7 +55,7 @@ impl fmt::Debug for DataCorruption {
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum TantivyError {
|
||||
/// Error when handling aggregations.
|
||||
#[error(transparent)]
|
||||
#[error("AggregationError {0:?}")]
|
||||
AggregationError(#[from] AggregationError),
|
||||
/// Failed to open the directory.
|
||||
#[error("Failed to open the directory: '{0:?}'")]
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use common::{intersect_bitsets, BitSet, ByteCount, OwnedBytes, ReadOnlyBitSet};
|
||||
use common::{intersect_bitsets, BitSet, OwnedBytes, ReadOnlyBitSet};
|
||||
|
||||
use crate::space_usage::ByteCount;
|
||||
use crate::DocId;
|
||||
|
||||
/// Write an alive `BitSet`
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
//! Fields have to be declared as `FAST` in the schema.
|
||||
//! Currently supported fields are: u64, i64, f64, bytes, ip and text.
|
||||
//!
|
||||
//! Fast fields are stored in with [different codecs](columnar::column_values). The best codec is
|
||||
//! detected automatically, when serializing.
|
||||
//! Fast fields are stored in with [different codecs](fastfield_codecs). The best codec is detected
|
||||
//! automatically, when serializing.
|
||||
//!
|
||||
//! Read access performance is comparable to that of an array lookup.
|
||||
|
||||
@@ -80,7 +80,7 @@ mod tests {
|
||||
use std::path::Path;
|
||||
|
||||
use columnar::{Column, MonotonicallyMappableToU64, StrColumn};
|
||||
use common::{ByteCount, HasLen, TerminatingWrite};
|
||||
use common::{HasLen, TerminatingWrite};
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::rngs::StdRng;
|
||||
@@ -115,7 +115,7 @@ mod tests {
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
fast_field_writers
|
||||
.add_document(&doc!(*FIELD=>13u64))
|
||||
.unwrap();
|
||||
@@ -130,7 +130,7 @@ mod tests {
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
|
||||
assert_eq!(file.len(), 95);
|
||||
assert_eq!(file.len(), 161);
|
||||
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
|
||||
let column = fast_field_readers
|
||||
.u64("field")
|
||||
@@ -148,7 +148,7 @@ mod tests {
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
fast_field_writers
|
||||
.add_document(&doc!(*FIELD=>4u64))
|
||||
.unwrap();
|
||||
@@ -180,7 +180,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 123);
|
||||
assert_eq!(file.len(), 189);
|
||||
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
|
||||
let col = fast_field_readers
|
||||
.u64("field")
|
||||
@@ -203,7 +203,7 @@ mod tests {
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
for _ in 0..10_000 {
|
||||
fast_field_writers
|
||||
.add_document(&doc!(*FIELD=>100_000u64))
|
||||
@@ -213,7 +213,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 96);
|
||||
assert_eq!(file.len(), 162);
|
||||
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
|
||||
let fast_field_reader = fast_field_readers
|
||||
.u64("field")
|
||||
@@ -231,7 +231,7 @@ mod tests {
|
||||
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
// forcing the amplitude to be high
|
||||
fast_field_writers
|
||||
.add_document(&doc!(*FIELD=>0u64))
|
||||
@@ -245,7 +245,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 4491);
|
||||
assert_eq!(file.len(), 4557);
|
||||
{
|
||||
let fast_field_readers = FastFieldReaders::open(file, SCHEMA.clone()).unwrap();
|
||||
let col = fast_field_readers
|
||||
@@ -268,7 +268,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
for i in -100i64..10_000i64 {
|
||||
let mut doc = Document::default();
|
||||
doc.add_i64(i64_field, i);
|
||||
@@ -278,7 +278,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 267);
|
||||
assert_eq!(file.len(), 333_usize);
|
||||
|
||||
{
|
||||
let fast_field_readers = FastFieldReaders::open(file, schema).unwrap();
|
||||
@@ -310,7 +310,7 @@ mod tests {
|
||||
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
let doc = Document::default();
|
||||
fast_field_writers.add_document(&doc).unwrap();
|
||||
fast_field_writers.serialize(&mut write, None).unwrap();
|
||||
@@ -343,7 +343,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
let doc = Document::default();
|
||||
fast_field_writers.add_document(&doc).unwrap();
|
||||
fast_field_writers.serialize(&mut write, None).unwrap();
|
||||
@@ -379,7 +379,7 @@ mod tests {
|
||||
let directory = RamDirectory::create();
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
for &x in &permutation {
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>x)).unwrap();
|
||||
}
|
||||
@@ -759,7 +759,7 @@ mod tests {
|
||||
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(path).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
fast_field_writers.add_document(&doc!(field=>true)).unwrap();
|
||||
fast_field_writers
|
||||
.add_document(&doc!(field=>false))
|
||||
@@ -772,7 +772,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 104);
|
||||
assert_eq!(file.len(), 175);
|
||||
let fast_field_readers = FastFieldReaders::open(file, schema).unwrap();
|
||||
let bool_col = fast_field_readers.bool("field_bool").unwrap();
|
||||
assert_eq!(bool_col.first(0), Some(true));
|
||||
@@ -793,7 +793,7 @@ mod tests {
|
||||
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(path).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
for _ in 0..50 {
|
||||
fast_field_writers.add_document(&doc!(field=>true)).unwrap();
|
||||
fast_field_writers
|
||||
@@ -804,7 +804,7 @@ mod tests {
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 116);
|
||||
assert_eq!(file.len(), 187);
|
||||
let readers = FastFieldReaders::open(file, schema).unwrap();
|
||||
let bool_col = readers.bool("field_bool").unwrap();
|
||||
for i in 0..25 {
|
||||
@@ -822,14 +822,14 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(path).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
let doc = Document::default();
|
||||
fast_field_writers.add_document(&doc).unwrap();
|
||||
fast_field_writers.serialize(&mut write, None).unwrap();
|
||||
write.terminate().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 106);
|
||||
assert_eq!(file.len(), 177);
|
||||
let fastfield_readers = FastFieldReaders::open(file, schema).unwrap();
|
||||
let col = fastfield_readers.bool("field_bool").unwrap();
|
||||
assert_eq!(col.first(0), None);
|
||||
@@ -849,7 +849,7 @@ mod tests {
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
{
|
||||
let mut write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(schema).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(schema);
|
||||
for doc in docs {
|
||||
fast_field_writers.add_document(doc).unwrap();
|
||||
}
|
||||
@@ -862,16 +862,16 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_gcd_date() {
|
||||
let size_prec_sec = test_gcd_date_with_codec(DatePrecision::Seconds);
|
||||
assert!((1000 * 13 / 8..100 + 1000 * 13 / 8).contains(&size_prec_sec.get_bytes())); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
|
||||
assert!((1000 * 13 / 8..100 + 1000 * 13 / 8).contains(&size_prec_sec)); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
|
||||
let size_prec_micros = test_gcd_date_with_codec(DatePrecision::Microseconds);
|
||||
assert!((1000 * 33 / 8..100 + 1000 * 33 / 8).contains(&size_prec_micros.get_bytes()));
|
||||
assert!((1000 * 33 / 8..100 + 1000 * 33 / 8).contains(&size_prec_micros));
|
||||
// 33 bits per
|
||||
// val = ceil(log_2(number
|
||||
// of microsecsseconds
|
||||
// in 2hours);
|
||||
}
|
||||
|
||||
fn test_gcd_date_with_codec(precision: DatePrecision) -> ByteCount {
|
||||
fn test_gcd_date_with_codec(precision: DatePrecision) -> usize {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
const T0: i64 = 1_662_345_825_012_529i64;
|
||||
const ONE_HOUR_IN_MICROSECS: i64 = 3_600 * 1_000_000;
|
||||
@@ -1068,8 +1068,8 @@ mod tests {
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let column_without_opt = fast_fields.str("without.hello");
|
||||
assert!(column_without_opt.is_err());
|
||||
let column_without_opt: Option<StrColumn> = fast_fields.str("without.hello").unwrap();
|
||||
assert!(column_without_opt.is_none());
|
||||
let column_with_opt: Option<StrColumn> = fast_fields.str("with.hello").unwrap();
|
||||
let column_with: StrColumn = column_with_opt.unwrap();
|
||||
assert!(column_with.term_ords(0).next().is_none());
|
||||
@@ -1166,52 +1166,13 @@ mod tests {
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let fast_field_reader = searcher.segment_reader(0u32).fast_fields();
|
||||
let column = fast_field_reader
|
||||
.column_opt::<i64>("jsonfield.attr.age")
|
||||
.column_opt::<i64>(&"jsonfield.attr.age")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let vals: Vec<i64> = column.values_for_doc(0u32).collect();
|
||||
assert_eq!(&vals, &[33]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_fast_field_tokenizer() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
let text_fieldtype = crate::schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
crate::schema::TextFieldIndexing::default()
|
||||
.set_index_option(crate::schema::IndexRecordOption::WithFreqs)
|
||||
.set_tokenizer("raw"),
|
||||
)
|
||||
.set_fast(Some("default"))
|
||||
.set_stored();
|
||||
|
||||
let log_field = schema_builder.add_text_field("log_level", text_fieldtype);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(log_field => "info"))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(log_field => "INFO"))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let fast_field_reader = searcher.segment_reader(0u32).fast_fields();
|
||||
|
||||
let text_fast_field = fast_field_reader.str("log_level").unwrap().unwrap();
|
||||
let mut buffer = String::new();
|
||||
assert!(text_fast_field.ord_to_str(0, &mut buffer).unwrap());
|
||||
assert_eq!(buffer, "info");
|
||||
assert!(!text_fast_field.ord_to_str(1, &mut buffer).unwrap());
|
||||
|
||||
assert!(text_fast_field.term_ords(0).eq([0].into_iter()));
|
||||
assert!(text_fast_field.term_ords(1).eq([0].into_iter()));
|
||||
assert!(text_fast_field.ords().values_for_doc(0u32).eq([0]));
|
||||
assert!(text_fast_field.ords().values_for_doc(1u32).eq([0]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shadowing_fast_field_with_expand_dots() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -1230,7 +1191,7 @@ mod tests {
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let fast_field_reader = searcher.segment_reader(0u32).fast_fields();
|
||||
let column = fast_field_reader
|
||||
.column_opt::<i64>("jsonfield.attr.age")
|
||||
.column_opt::<i64>(&"jsonfield.attr.age")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let vals: Vec<i64> = column.values_for_doc(0u32).collect();
|
||||
|
||||
@@ -6,13 +6,11 @@ use columnar::{
|
||||
BytesColumn, Column, ColumnType, ColumnValues, ColumnarReader, DynamicColumn,
|
||||
DynamicColumnHandle, HasAssociatedColumnType, StrColumn,
|
||||
};
|
||||
use common::ByteCount;
|
||||
|
||||
use crate::core::json_utils::encode_column_name;
|
||||
use crate::directory::FileSlice;
|
||||
use crate::schema::{Field, FieldEntry, FieldType, Schema};
|
||||
use crate::space_usage::{FieldUsage, PerFieldSpaceUsage};
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Provides access to all of the BitpackedFastFieldReader.
|
||||
///
|
||||
@@ -30,7 +28,7 @@ impl FastFieldReaders {
|
||||
Ok(FastFieldReaders { columnar, schema })
|
||||
}
|
||||
|
||||
fn resolve_field(&self, column_name: &str) -> crate::Result<Option<String>> {
|
||||
fn resolve_field(&self, column_name: &str) -> Option<String> {
|
||||
let default_field_opt: Option<Field> = if cfg!(feature = "quickwit") {
|
||||
self.schema.get_field("_dynamic").ok()
|
||||
} else {
|
||||
@@ -43,7 +41,7 @@ impl FastFieldReaders {
|
||||
let mut per_field_usages: Vec<FieldUsage> = Default::default();
|
||||
for (field, field_entry) in schema.fields() {
|
||||
let column_handles = self.columnar.read_columns(field_entry.name())?;
|
||||
let num_bytes: ByteCount = column_handles
|
||||
let num_bytes: usize = column_handles
|
||||
.iter()
|
||||
.map(|column_handle| column_handle.num_bytes())
|
||||
.sum();
|
||||
@@ -84,35 +82,27 @@ impl FastFieldReaders {
|
||||
&'a self,
|
||||
field_name: &'a str,
|
||||
default_field_opt: Option<Field>,
|
||||
) -> crate::Result<Option<String>> {
|
||||
let Some((field, path)): Option<(Field, &str)> = self
|
||||
) -> Option<String> {
|
||||
let (field, path): (Field, &str) = self
|
||||
.schema
|
||||
.find_field(field_name)
|
||||
.or_else(|| default_field_opt.map(|default_field| (default_field, field_name)))
|
||||
else{
|
||||
return Ok(None);
|
||||
};
|
||||
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
|
||||
if !field_entry.is_fast() {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Field {field_name:?} is not configured as fast field"
|
||||
)));
|
||||
}
|
||||
.or_else(|| default_field_opt.map(|default_field| (default_field, field_name)))?;
|
||||
let field_name = self.schema.get_field_name(field);
|
||||
if path.is_empty() {
|
||||
return Ok(Some(field_name.to_string()));
|
||||
return Some(field_name.to_string());
|
||||
}
|
||||
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
match (field_type, path) {
|
||||
(FieldType::JsonObject(json_options), path) if !path.is_empty() => {
|
||||
Ok(Some(encode_column_name(
|
||||
Some(encode_column_name(
|
||||
field_entry.name(),
|
||||
path,
|
||||
json_options.is_expand_dots_enabled(),
|
||||
)))
|
||||
))
|
||||
}
|
||||
(_, "") => Ok(Some(field_entry.name().to_string())),
|
||||
_ => Ok(None),
|
||||
(_, "") => Some(field_entry.name().to_string()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,9 +127,9 @@ impl FastFieldReaders {
|
||||
/// Returns the number of `bytes` associated with a column.
|
||||
///
|
||||
/// Returns 0 if the column does not exist.
|
||||
pub fn column_num_bytes(&self, field: &str) -> crate::Result<ByteCount> {
|
||||
let Some(resolved_field_name) = self.resolve_field(field)? else {
|
||||
return Ok(0u64.into());
|
||||
pub fn column_num_bytes(&self, field: &str) -> crate::Result<usize> {
|
||||
let Some(resolved_field_name) = self.resolve_field(field) else {
|
||||
return Ok(0);
|
||||
};
|
||||
Ok(self
|
||||
.columnar
|
||||
@@ -226,7 +216,7 @@ impl FastFieldReaders {
|
||||
field_name: &str,
|
||||
column_type: ColumnType,
|
||||
) -> crate::Result<Option<DynamicColumnHandle>> {
|
||||
let Some(resolved_field_name) = self.resolve_field(field_name)? else {
|
||||
let Some(resolved_field_name) = self.resolve_field(field_name) else {
|
||||
return Ok(None);
|
||||
};
|
||||
let dynamic_column_handle_opt = self
|
||||
@@ -242,7 +232,7 @@ impl FastFieldReaders {
|
||||
&self,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Vec<DynamicColumnHandle>> {
|
||||
let Some(resolved_field_name) = self.resolve_field(field_name)? else {
|
||||
let Some(resolved_field_name) = self.resolve_field(field_name) else {
|
||||
return Ok(Vec::new());
|
||||
};
|
||||
let columns = self
|
||||
@@ -252,25 +242,24 @@ impl FastFieldReaders {
|
||||
Ok(columns)
|
||||
}
|
||||
|
||||
/// Returns the `u64` column used to represent any `u64`-mapped typed (String/Bytes term ids,
|
||||
/// i64, u64, f64, DateTime).
|
||||
///
|
||||
/// Returns Ok(None) for empty columns
|
||||
/// Returns the `u64` column used to represent any `u64`-mapped typed (i64, u64, f64, DateTime).
|
||||
#[doc(hidden)]
|
||||
pub fn u64_lenient_for_type(
|
||||
pub fn u64_lenient(&self, field_name: &str) -> crate::Result<Option<Column<u64>>> {
|
||||
Ok(self
|
||||
.u64_lenient_with_type(field_name)?
|
||||
.map(|(u64_column, _)| u64_column))
|
||||
}
|
||||
|
||||
/// Returns the `u64` column used to represent any `u64`-mapped typed (i64, u64, f64, DateTime).
|
||||
#[doc(hidden)]
|
||||
pub fn u64_lenient_with_type(
|
||||
&self,
|
||||
type_white_list_opt: Option<&[ColumnType]>,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Option<(Column<u64>, ColumnType)>> {
|
||||
let Some(resolved_field_name) = self.resolve_field(field_name)? else {
|
||||
let Some(resolved_field_name) = self.resolve_field(field_name) else {
|
||||
return Ok(None);
|
||||
};
|
||||
for col in self.columnar.read_columns(&resolved_field_name)? {
|
||||
if let Some(type_white_list) = type_white_list_opt {
|
||||
if !type_white_list.contains(&col.column_type()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Some(col_u64) = col.open_u64_lenient()? {
|
||||
return Ok(Some((col_u64, col.column_type())));
|
||||
}
|
||||
@@ -278,17 +267,6 @@ impl FastFieldReaders {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Returns the `u64` column used to represent any `u64`-mapped typed (i64, u64, f64, DateTime).
|
||||
///
|
||||
/// Returns Ok(None) for empty columns
|
||||
#[doc(hidden)]
|
||||
pub fn u64_lenient(
|
||||
&self,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Option<(Column<u64>, ColumnType)>> {
|
||||
self.u64_lenient_for_type(None, field_name)
|
||||
}
|
||||
|
||||
/// Returns the `i64` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a i64 fast field, this method returns an Error.
|
||||
@@ -338,57 +316,44 @@ mod tests {
|
||||
let reader = searcher.segment_reader(0u32);
|
||||
let fast_field_readers = reader.fast_fields();
|
||||
assert_eq!(
|
||||
fast_field_readers
|
||||
.resolve_column_name_given_default_field("age", None)
|
||||
.unwrap(),
|
||||
fast_field_readers.resolve_column_name_given_default_field("age", None),
|
||||
Some("age".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
fast_field_readers
|
||||
.resolve_column_name_given_default_field("age", Some(dynamic_field))
|
||||
.unwrap(),
|
||||
fast_field_readers.resolve_column_name_given_default_field("age", Some(dynamic_field)),
|
||||
Some("age".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
fast_field_readers
|
||||
.resolve_column_name_given_default_field(
|
||||
"json_expand_dots_disabled.attr.color",
|
||||
None
|
||||
)
|
||||
.unwrap(),
|
||||
fast_field_readers.resolve_column_name_given_default_field(
|
||||
"json_expand_dots_disabled.attr.color",
|
||||
None
|
||||
),
|
||||
Some("json_expand_dots_disabled\u{1}attr\u{1}color".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
fast_field_readers
|
||||
.resolve_column_name_given_default_field(
|
||||
"json_expand_dots_disabled.attr\\.color",
|
||||
Some(dynamic_field)
|
||||
)
|
||||
.unwrap(),
|
||||
fast_field_readers.resolve_column_name_given_default_field(
|
||||
"json_expand_dots_disabled.attr\\.color",
|
||||
Some(dynamic_field)
|
||||
),
|
||||
Some("json_expand_dots_disabled\u{1}attr.color".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
fast_field_readers
|
||||
.resolve_column_name_given_default_field(
|
||||
"json_expand_dots_enabled.attr\\.color",
|
||||
Some(dynamic_field)
|
||||
)
|
||||
.unwrap(),
|
||||
fast_field_readers.resolve_column_name_given_default_field(
|
||||
"json_expand_dots_enabled.attr\\.color",
|
||||
Some(dynamic_field)
|
||||
),
|
||||
Some("json_expand_dots_enabled\u{1}attr\u{1}color".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
fast_field_readers
|
||||
.resolve_column_name_given_default_field("notinschema.attr.color", None)
|
||||
.unwrap(),
|
||||
.resolve_column_name_given_default_field("notinschema.attr.color", None),
|
||||
None
|
||||
);
|
||||
assert_eq!(
|
||||
fast_field_readers
|
||||
.resolve_column_name_given_default_field(
|
||||
"notinschema.attr.color",
|
||||
Some(dynamic_field)
|
||||
)
|
||||
.unwrap(),
|
||||
fast_field_readers.resolve_column_name_given_default_field(
|
||||
"notinschema.attr.color",
|
||||
Some(dynamic_field)
|
||||
),
|
||||
Some("_dyna\u{1}notinschema\u{1}attr\u{1}color".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
@@ -2,13 +2,11 @@ use std::io;
|
||||
|
||||
use columnar::{ColumnarWriter, NumericalValue};
|
||||
use common::replace_in_place;
|
||||
use tokenizer_api::Token;
|
||||
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::schema::term::{JSON_PATH_SEGMENT_SEP, JSON_PATH_SEGMENT_SEP_STR};
|
||||
use crate::schema::{value_type_to_column_type, Document, FieldType, Schema, Type, Value};
|
||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||
use crate::{DatePrecision, DocId, TantivyError};
|
||||
use crate::{DatePrecision, DocId};
|
||||
|
||||
/// Only index JSON down to a depth of 20.
|
||||
/// This is mostly to guard us from a stack overflow triggered by malicious input.
|
||||
@@ -17,8 +15,7 @@ const JSON_DEPTH_LIMIT: usize = 20;
|
||||
/// The `FastFieldsWriter` groups all of the fast field writers.
|
||||
pub struct FastFieldsWriter {
|
||||
columnar_writer: ColumnarWriter,
|
||||
fast_field_names: Vec<Option<String>>, //< TODO see if we can hash the field name hash too.
|
||||
per_field_tokenizer: Vec<Option<TextAnalyzer>>,
|
||||
fast_field_names: Vec<Option<String>>, //< TODO see if we can cash the field name hash too.
|
||||
date_precisions: Vec<DatePrecision>,
|
||||
expand_dots: Vec<bool>,
|
||||
num_docs: DocId,
|
||||
@@ -28,25 +25,14 @@ pub struct FastFieldsWriter {
|
||||
|
||||
impl FastFieldsWriter {
|
||||
/// Create all `FastFieldWriter` required by the schema.
|
||||
#[cfg(test)]
|
||||
pub fn from_schema(schema: &Schema) -> crate::Result<FastFieldsWriter> {
|
||||
FastFieldsWriter::from_schema_and_tokenizer_manager(&schema, TokenizerManager::new())
|
||||
}
|
||||
|
||||
/// Create all `FastFieldWriter` required by the schema.
|
||||
pub fn from_schema_and_tokenizer_manager(
|
||||
schema: &Schema,
|
||||
tokenizer_manager: TokenizerManager,
|
||||
) -> crate::Result<FastFieldsWriter> {
|
||||
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
|
||||
let mut fast_field_names: Vec<Option<String>> = vec![None; schema.num_fields()];
|
||||
let mut date_precisions: Vec<DatePrecision> =
|
||||
std::iter::repeat_with(DatePrecision::default)
|
||||
.take(schema.num_fields())
|
||||
.collect();
|
||||
let mut expand_dots = vec![false; schema.num_fields()];
|
||||
let mut per_field_tokenizer = vec![None; schema.num_fields()];
|
||||
// TODO see other types
|
||||
for (field_id, field_entry) in schema.fields() {
|
||||
if !field_entry.field_type().is_fast() {
|
||||
@@ -61,18 +47,6 @@ impl FastFieldsWriter {
|
||||
expand_dots[field_id.field_id() as usize] =
|
||||
json_object_options.is_expand_dots_enabled();
|
||||
}
|
||||
if let FieldType::Str(text_options) = field_entry.field_type() {
|
||||
if let Some(tokenizer_name) = text_options.get_fast_field_tokenizer_name() {
|
||||
let text_analyzer = tokenizer_manager.get(tokenizer_name).ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"Tokenizer {:?} not found",
|
||||
tokenizer_name
|
||||
))
|
||||
})?;
|
||||
per_field_tokenizer[field_id.field_id() as usize] = Some(text_analyzer);
|
||||
}
|
||||
}
|
||||
|
||||
let sort_values_within_row = value_type == Type::Facet;
|
||||
if let Some(column_type) = value_type_to_column_type(value_type) {
|
||||
columnar_writer.record_column_type(
|
||||
@@ -82,15 +56,14 @@ impl FastFieldsWriter {
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(FastFieldsWriter {
|
||||
FastFieldsWriter {
|
||||
columnar_writer,
|
||||
fast_field_names,
|
||||
per_field_tokenizer,
|
||||
num_docs: 0u32,
|
||||
date_precisions,
|
||||
expand_dots,
|
||||
json_path_buffer: String::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// The memory used (inclusive childs)
|
||||
@@ -138,35 +111,14 @@ impl FastFieldsWriter {
|
||||
);
|
||||
}
|
||||
Value::Str(text_val) => {
|
||||
if let Some(text_analyzer) =
|
||||
&self.per_field_tokenizer[field_value.field().field_id() as usize]
|
||||
{
|
||||
let mut token_stream = text_analyzer.token_stream(text_val);
|
||||
token_stream.process(&mut |token: &Token| {
|
||||
self.columnar_writer.record_str(
|
||||
doc_id,
|
||||
field_name.as_str(),
|
||||
&token.text,
|
||||
);
|
||||
})
|
||||
} else {
|
||||
self.columnar_writer
|
||||
.record_str(doc_id, field_name.as_str(), text_val);
|
||||
}
|
||||
self.columnar_writer
|
||||
.record_str(doc_id, field_name.as_str(), text_val);
|
||||
}
|
||||
Value::Bytes(bytes_val) => {
|
||||
self.columnar_writer
|
||||
.record_bytes(doc_id, field_name.as_str(), bytes_val);
|
||||
}
|
||||
Value::PreTokStr(pre_tok) => {
|
||||
for token in &pre_tok.tokens {
|
||||
self.columnar_writer.record_str(
|
||||
doc_id,
|
||||
field_name.as_str(),
|
||||
&token.text,
|
||||
);
|
||||
}
|
||||
}
|
||||
Value::PreTokStr(_) => todo!(),
|
||||
Value::Bool(bool_val) => {
|
||||
self.columnar_writer
|
||||
.record_bool(doc_id, field_name.as_str(), *bool_val);
|
||||
|
||||
@@ -94,12 +94,10 @@ fn compute_deleted_bitset(
|
||||
// document that were inserted before it.
|
||||
delete_op
|
||||
.target
|
||||
.for_each_no_score(segment_reader, &mut |docs_matching_delete_query| {
|
||||
for doc_matching_delete_query in docs_matching_delete_query.iter().cloned() {
|
||||
if doc_opstamps.is_deleted(doc_matching_delete_query, delete_op.opstamp) {
|
||||
alive_bitset.remove(doc_matching_delete_query);
|
||||
might_have_changed = true;
|
||||
}
|
||||
.for_each_no_score(segment_reader, &mut |doc_matching_delete_query| {
|
||||
if doc_opstamps.is_deleted(doc_matching_delete_query, delete_op.opstamp) {
|
||||
alive_bitset.remove(doc_matching_delete_query);
|
||||
might_have_changed = true;
|
||||
}
|
||||
})?;
|
||||
delete_cursor.advance();
|
||||
@@ -1640,7 +1638,6 @@ mod tests {
|
||||
.add_ip_addr_field("ips", IpAddrOptions::default().set_fast().set_indexed());
|
||||
let i64_field = schema_builder.add_i64_field("i64", INDEXED);
|
||||
let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED);
|
||||
let id_opt_field = schema_builder.add_u64_field("id_opt", FAST | INDEXED | STORED);
|
||||
let f64_field = schema_builder.add_f64_field("f64", INDEXED);
|
||||
let date_field = schema_builder.add_date_field("date", INDEXED);
|
||||
let bytes_field = schema_builder.add_bytes_field("bytes", FAST | INDEXED | STORED);
|
||||
@@ -1671,7 +1668,7 @@ mod tests {
|
||||
let settings = if sort_index {
|
||||
IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "id_opt".to_string(),
|
||||
field: "id".to_string(),
|
||||
order: Order::Asc,
|
||||
}),
|
||||
..Default::default()
|
||||
@@ -1690,7 +1687,7 @@ mod tests {
|
||||
|
||||
let old_reader = index.reader()?;
|
||||
|
||||
let id_exists = |id| id % 3 != 0; // 0 does not exist
|
||||
let ip_exists = |id| id % 3 != 0; // 0 does not exist
|
||||
|
||||
let multi_text_field_text1 = "test1 test2 test3 test1 test2 test3";
|
||||
// rotate left
|
||||
@@ -1706,15 +1703,28 @@ mod tests {
|
||||
let facet = Facet::from(&("/cola/".to_string() + &id.to_string()));
|
||||
let ip = ip_from_id(id);
|
||||
|
||||
if !id_exists(id) {
|
||||
if !ip_exists(id) {
|
||||
// every 3rd doc has no ip field
|
||||
index_writer.add_document(doc!(
|
||||
id_field=>id,
|
||||
index_writer.add_document(doc!(id_field=>id,
|
||||
bytes_field => id.to_le_bytes().as_slice(),
|
||||
multi_numbers=> id,
|
||||
multi_numbers => id,
|
||||
bool_field => (id % 2u64) != 0,
|
||||
i64_field => id as i64,
|
||||
f64_field => id as f64,
|
||||
date_field => DateTime::from_timestamp_secs(id as i64),
|
||||
multi_bools => (id % 2u64) != 0,
|
||||
multi_bools => (id % 2u64) == 0,
|
||||
text_field => id.to_string(),
|
||||
facet_field => facet,
|
||||
large_text_field => LOREM,
|
||||
multi_text_fields => multi_text_field_text1,
|
||||
multi_text_fields => multi_text_field_text2,
|
||||
multi_text_fields => multi_text_field_text3,
|
||||
))?;
|
||||
} else {
|
||||
index_writer.add_document(doc!(id_field=>id,
|
||||
bytes_field => id.to_le_bytes().as_slice(),
|
||||
id_opt_field => id,
|
||||
ip_field => ip,
|
||||
ips_field => ip,
|
||||
ips_field => ip,
|
||||
@@ -1823,13 +1833,6 @@ mod tests {
|
||||
.values()
|
||||
.map(|id_occurrences| *id_occurrences as usize)
|
||||
.sum::<usize>();
|
||||
|
||||
let num_docs_with_values = expected_ids_and_num_occurrences
|
||||
.iter()
|
||||
.filter(|(id, _id_occurrences)| id_exists(**id))
|
||||
.map(|(_, id_occurrences)| *id_occurrences as usize)
|
||||
.sum::<usize>();
|
||||
|
||||
assert_eq!(searcher.num_docs() as usize, num_docs_expected);
|
||||
assert_eq!(old_searcher.num_docs() as usize, num_docs_expected);
|
||||
assert_eq!(
|
||||
@@ -1850,7 +1853,7 @@ mod tests {
|
||||
if force_end_merge && num_segments_before_merge > 1 && num_segments_after_merge == 1 {
|
||||
let mut expected_multi_ips: Vec<_> = id_list
|
||||
.iter()
|
||||
.filter(|id| id_exists(**id))
|
||||
.filter(|id| ip_exists(**id))
|
||||
.flat_map(|id| vec![ip_from_id(*id), ip_from_id(*id)])
|
||||
.collect();
|
||||
assert_eq!(num_ips, expected_multi_ips.len() as u32);
|
||||
@@ -1888,7 +1891,7 @@ mod tests {
|
||||
let expected_ips = expected_ids_and_num_occurrences
|
||||
.keys()
|
||||
.flat_map(|id| {
|
||||
if !id_exists(*id) {
|
||||
if !ip_exists(*id) {
|
||||
None
|
||||
} else {
|
||||
Some(Ipv6Addr::from_u128(*id as u128))
|
||||
@@ -1900,7 +1903,7 @@ mod tests {
|
||||
let expected_ips = expected_ids_and_num_occurrences
|
||||
.keys()
|
||||
.filter_map(|id| {
|
||||
if !id_exists(*id) {
|
||||
if !ip_exists(*id) {
|
||||
None
|
||||
} else {
|
||||
Some(Ipv6Addr::from_u128(*id as u128))
|
||||
@@ -1932,25 +1935,16 @@ mod tests {
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
for doc in segment_reader.doc_ids_alive() {
|
||||
let id = id_reader.first(doc).unwrap();
|
||||
|
||||
let vals: Vec<u64> = ff_reader.values_for_doc(doc).collect();
|
||||
if id_exists(id) {
|
||||
assert_eq!(vals.len(), 2);
|
||||
assert_eq!(vals[0], vals[1]);
|
||||
assert!(expected_ids_and_num_occurrences.contains_key(&vals[0]));
|
||||
assert_eq!(id_reader.first(doc), Some(vals[0]));
|
||||
} else {
|
||||
assert_eq!(vals.len(), 0);
|
||||
}
|
||||
assert_eq!(vals.len(), 2);
|
||||
assert_eq!(vals[0], vals[1]);
|
||||
assert_eq!(id_reader.first(doc), Some(vals[0]));
|
||||
|
||||
let bool_vals: Vec<bool> = bool_ff_reader.values_for_doc(doc).collect();
|
||||
if id_exists(id) {
|
||||
assert_eq!(bool_vals.len(), 2);
|
||||
assert_ne!(bool_vals[0], bool_vals[1]);
|
||||
} else {
|
||||
assert_eq!(bool_vals.len(), 0);
|
||||
}
|
||||
assert_eq!(bool_vals.len(), 2);
|
||||
assert_ne!(bool_vals[0], bool_vals[1]);
|
||||
|
||||
assert!(expected_ids_and_num_occurrences.contains_key(&vals[0]));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1974,28 +1968,26 @@ mod tests {
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
assert!(expected_ids_and_num_occurrences.contains_key(&id));
|
||||
if id_exists(id) {
|
||||
let id2 = store_reader
|
||||
.get(doc_id)
|
||||
.unwrap()
|
||||
.get_first(multi_numbers)
|
||||
.unwrap()
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
assert_eq!(id, id2);
|
||||
let bool = store_reader
|
||||
.get(doc_id)
|
||||
.unwrap()
|
||||
.get_first(bool_field)
|
||||
.unwrap()
|
||||
.as_bool()
|
||||
.unwrap();
|
||||
let doc = store_reader.get(doc_id).unwrap();
|
||||
let mut bool2 = doc.get_all(multi_bools);
|
||||
assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap());
|
||||
assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap());
|
||||
assert_eq!(None, bool2.next())
|
||||
}
|
||||
let id2 = store_reader
|
||||
.get(doc_id)
|
||||
.unwrap()
|
||||
.get_first(multi_numbers)
|
||||
.unwrap()
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
assert_eq!(id, id2);
|
||||
let bool = store_reader
|
||||
.get(doc_id)
|
||||
.unwrap()
|
||||
.get_first(bool_field)
|
||||
.unwrap()
|
||||
.as_bool()
|
||||
.unwrap();
|
||||
let doc = store_reader.get(doc_id).unwrap();
|
||||
let mut bool2 = doc.get_all(multi_bools);
|
||||
assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap());
|
||||
assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap());
|
||||
assert_eq!(None, bool2.next())
|
||||
}
|
||||
}
|
||||
// test search
|
||||
@@ -2017,25 +2009,22 @@ mod tests {
|
||||
top_docs.iter().map(|el| el.1).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
for (id, count) in &expected_ids_and_num_occurrences {
|
||||
let (existing_id, count) = (*id, *count);
|
||||
for (existing_id, count) in &expected_ids_and_num_occurrences {
|
||||
let (existing_id, count) = (*existing_id, *count);
|
||||
let get_num_hits = |field| do_search(&existing_id.to_string(), field).len() as u64;
|
||||
assert_eq!(get_num_hits(id_field), count);
|
||||
if !id_exists(existing_id) {
|
||||
continue;
|
||||
}
|
||||
assert_eq!(get_num_hits(text_field), count);
|
||||
assert_eq!(get_num_hits(i64_field), count);
|
||||
assert_eq!(get_num_hits(f64_field), count);
|
||||
assert_eq!(get_num_hits(id_field), count);
|
||||
|
||||
// Test multi text
|
||||
assert_eq!(
|
||||
do_search("\"test1 test2\"", multi_text_fields).len(),
|
||||
num_docs_with_values
|
||||
num_docs_expected
|
||||
);
|
||||
assert_eq!(
|
||||
do_search("\"test2 test3\"", multi_text_fields).len(),
|
||||
num_docs_with_values
|
||||
num_docs_expected
|
||||
);
|
||||
|
||||
// Test bytes
|
||||
@@ -2071,7 +2060,7 @@ mod tests {
|
||||
//
|
||||
for (existing_id, count) in &expected_ids_and_num_occurrences {
|
||||
let (existing_id, count) = (*existing_id, *count);
|
||||
if !id_exists(existing_id) {
|
||||
if !ip_exists(existing_id) {
|
||||
continue;
|
||||
}
|
||||
let do_search_ip_field = |term: &str| do_search(term, ip_field).len() as u64;
|
||||
@@ -2092,7 +2081,7 @@ mod tests {
|
||||
//
|
||||
for (existing_id, count) in expected_ids_and_num_occurrences.iter().take(10) {
|
||||
let (existing_id, count) = (*existing_id, *count);
|
||||
if !id_exists(existing_id) {
|
||||
if !ip_exists(existing_id) {
|
||||
continue;
|
||||
}
|
||||
let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| {
|
||||
@@ -2115,7 +2104,7 @@ mod tests {
|
||||
//
|
||||
for (existing_id, count) in expected_ids_and_num_occurrences.iter().take(10) {
|
||||
let (existing_id, count) = (*existing_id, *count);
|
||||
if !id_exists(existing_id) {
|
||||
if !ip_exists(existing_id) {
|
||||
continue;
|
||||
}
|
||||
let gen_query_inclusive = |field: &str, from: Ipv6Addr, to: Ipv6Addr| {
|
||||
@@ -2140,61 +2129,23 @@ mod tests {
|
||||
.fast_fields()
|
||||
.u64("id")
|
||||
.unwrap()
|
||||
.first_or_default_col(9999);
|
||||
.first_or_default_col(0);
|
||||
for doc_id in segment_reader.doc_ids_alive() {
|
||||
let id = ff_reader.get_val(doc_id);
|
||||
if !id_exists(id) {
|
||||
continue;
|
||||
}
|
||||
let facet_ords: Vec<u64> = facet_reader.facet_ords(doc_id).collect();
|
||||
assert_eq!(facet_ords.len(), 1);
|
||||
let mut facet = Facet::default();
|
||||
facet_reader
|
||||
.facet_from_ord(facet_ords[0], &mut facet)
|
||||
.unwrap();
|
||||
let id = ff_reader.get_val(doc_id);
|
||||
let facet_expected = Facet::from(&("/cola/".to_string() + &id.to_string()));
|
||||
|
||||
assert_eq!(facet, facet_expected);
|
||||
}
|
||||
}
|
||||
|
||||
// Test if index property is in sort order
|
||||
if sort_index {
|
||||
// load all id_opt in each segment and check they are in order
|
||||
|
||||
for reader in searcher.segment_readers() {
|
||||
let (ff_reader, _) = reader.fast_fields().u64_lenient("id_opt").unwrap().unwrap();
|
||||
let mut ids_in_segment: Vec<u64> = Vec::new();
|
||||
|
||||
for doc in 0..reader.num_docs() {
|
||||
ids_in_segment.extend(ff_reader.values_for_doc(doc));
|
||||
}
|
||||
|
||||
assert!(is_sorted(&ids_in_segment));
|
||||
|
||||
fn is_sorted<T>(data: &[T]) -> bool
|
||||
where T: Ord {
|
||||
data.windows(2).all(|w| w[0] <= w[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sort_index_on_opt_field_regression() {
|
||||
assert!(test_operation_strategy(
|
||||
&[
|
||||
IndexingOp::AddDoc { id: 81 },
|
||||
IndexingOp::AddDoc { id: 70 },
|
||||
IndexingOp::DeleteDoc { id: 70 }
|
||||
],
|
||||
true,
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ip_range_query_multivalue_bug() {
|
||||
assert!(test_operation_strategy(
|
||||
|
||||
@@ -171,7 +171,7 @@ mod tests {
|
||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||
|
||||
// after every commit the merge checker is started, it will merge only segments with 1
|
||||
// element in it because of the max_docs_before_merge.
|
||||
// element in it because of the max_merge_size.
|
||||
index_writer.add_document(doc!(int_field=>1_u64))?;
|
||||
index_writer.commit()?;
|
||||
|
||||
|
||||
@@ -306,7 +306,7 @@ impl IndexMerger {
|
||||
sort_by_field: &IndexSortByField,
|
||||
) -> crate::Result<Arc<dyn ColumnValues>> {
|
||||
reader.schema().get_field(&sort_by_field.field)?;
|
||||
let (value_accessor, _column_type) = reader
|
||||
let value_accessor = reader
|
||||
.fast_fields()
|
||||
.u64_lenient(&sort_by_field.field)?
|
||||
.ok_or_else(|| FastFieldNotAvailableError {
|
||||
|
||||
@@ -111,10 +111,7 @@ impl SegmentWriter {
|
||||
per_field_postings_writers,
|
||||
fieldnorms_writer: FieldNormsWriter::for_schema(&schema),
|
||||
segment_serializer,
|
||||
fast_field_writers: FastFieldsWriter::from_schema_and_tokenizer_manager(
|
||||
&schema,
|
||||
tokenizer_manager,
|
||||
)?,
|
||||
fast_field_writers: FastFieldsWriter::from_schema(&schema),
|
||||
doc_opstamps: Vec::with_capacity(1_000),
|
||||
per_field_text_analyzers,
|
||||
term_buffer: Term::with_capacity(16),
|
||||
@@ -191,7 +188,7 @@ impl SegmentWriter {
|
||||
let mut indexing_position = IndexingPosition::default();
|
||||
postings_writer.index_text(
|
||||
doc_id,
|
||||
&mut facet_tokenizer,
|
||||
&mut *facet_tokenizer,
|
||||
term_buffer,
|
||||
ctx,
|
||||
&mut indexing_position,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::core::SegmentReader;
|
||||
use crate::docset::{DocSet, BUFFER_LEN, TERMINATED};
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::query::boost_query::BoostScorer;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::{EnableScoring, Explanation, Query, Scorer, Weight};
|
||||
@@ -44,7 +44,6 @@ pub struct AllScorer {
|
||||
}
|
||||
|
||||
impl DocSet for AllScorer {
|
||||
#[inline(always)]
|
||||
fn advance(&mut self) -> DocId {
|
||||
if self.doc + 1 >= self.max_doc {
|
||||
self.doc = TERMINATED;
|
||||
@@ -54,30 +53,6 @@ impl DocSet for AllScorer {
|
||||
self.doc
|
||||
}
|
||||
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId; BUFFER_LEN]) -> usize {
|
||||
if self.doc() == TERMINATED {
|
||||
return 0;
|
||||
}
|
||||
let is_safe_distance = self.doc() + (buffer.len() as u32) < self.max_doc;
|
||||
if is_safe_distance {
|
||||
let num_items = buffer.len();
|
||||
for buffer_val in buffer {
|
||||
*buffer_val = self.doc();
|
||||
self.doc += 1;
|
||||
}
|
||||
num_items
|
||||
} else {
|
||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||
*buffer_val = self.doc();
|
||||
if self.advance() == TERMINATED {
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
buffer.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn doc(&self) -> DocId {
|
||||
self.doc
|
||||
}
|
||||
@@ -96,8 +71,8 @@ impl Scorer for AllScorer {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::AllQuery;
|
||||
use crate::docset::{DocSet, BUFFER_LEN, TERMINATED};
|
||||
use crate::query::{AllScorer, EnableScoring, Query};
|
||||
use crate::docset::TERMINATED;
|
||||
use crate::query::{EnableScoring, Query};
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::Index;
|
||||
|
||||
@@ -157,22 +132,4 @@ mod tests {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fill_buffer() {
|
||||
let mut postings = AllScorer {
|
||||
doc: 0u32,
|
||||
max_doc: BUFFER_LEN as u32 * 2 + 9,
|
||||
};
|
||||
let mut buffer = [0u32; BUFFER_LEN];
|
||||
assert_eq!(postings.fill_buffer(&mut buffer), BUFFER_LEN);
|
||||
for i in 0u32..BUFFER_LEN as u32 {
|
||||
assert_eq!(buffer[i as usize], i);
|
||||
}
|
||||
assert_eq!(postings.fill_buffer(&mut buffer), BUFFER_LEN);
|
||||
for i in 0u32..BUFFER_LEN as u32 {
|
||||
assert_eq!(buffer[i as usize], i + BUFFER_LEN as u32);
|
||||
}
|
||||
assert_eq!(postings.fill_buffer(&mut buffer), 9);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,6 @@ impl From<BitSet> for BitSetDocSet {
|
||||
}
|
||||
|
||||
impl DocSet for BitSetDocSet {
|
||||
#[inline]
|
||||
fn advance(&mut self) -> DocId {
|
||||
if let Some(lower) = self.cursor_tinybitset.pop_lowest() {
|
||||
self.doc = (self.cursor_bucket * 64u32) | lower;
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::core::SegmentReader;
|
||||
use crate::docset::BUFFER_LEN;
|
||||
use crate::postings::FreqReadingOption;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner};
|
||||
use crate::query::term_query::TermScorer;
|
||||
use crate::query::weight::{for_each_docset_buffered, for_each_pruning_scorer, for_each_scorer};
|
||||
use crate::query::weight::{for_each_docset, for_each_pruning_scorer, for_each_scorer};
|
||||
use crate::query::{
|
||||
intersect_scorers, EmptyScorer, Exclude, Explanation, Occur, RequiredOptionalScorer, Scorer,
|
||||
Union, Weight,
|
||||
@@ -223,18 +222,16 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
fn for_each_no_score(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(&[DocId]),
|
||||
callback: &mut dyn FnMut(DocId),
|
||||
) -> crate::Result<()> {
|
||||
let scorer = self.complex_scorer(reader, 1.0, || DoNothingCombiner)?;
|
||||
let mut buffer = [0u32; BUFFER_LEN];
|
||||
|
||||
match scorer {
|
||||
SpecializedScorer::TermUnion(term_scorers) => {
|
||||
let mut union_scorer = Union::build(term_scorers, &self.score_combiner_fn);
|
||||
for_each_docset_buffered(&mut union_scorer, &mut buffer, callback);
|
||||
for_each_docset(&mut union_scorer, callback);
|
||||
}
|
||||
SpecializedScorer::Other(mut scorer) => {
|
||||
for_each_docset_buffered(scorer.as_mut(), &mut buffer, callback);
|
||||
for_each_docset(scorer.as_mut(), callback);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::fmt;
|
||||
|
||||
use crate::docset::BUFFER_LEN;
|
||||
use crate::fastfield::AliveBitSet;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::{EnableScoring, Explanation, Query, Scorer, Weight};
|
||||
@@ -107,7 +106,7 @@ impl<S: Scorer> DocSet for BoostScorer<S> {
|
||||
self.underlying.seek(target)
|
||||
}
|
||||
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId; BUFFER_LEN]) -> usize {
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||
self.underlying.fill_buffer(buffer)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::fmt;
|
||||
|
||||
use crate::docset::BUFFER_LEN;
|
||||
use crate::query::{EnableScoring, Explanation, Query, Scorer, Weight};
|
||||
use crate::{DocId, DocSet, Score, SegmentReader, TantivyError, Term};
|
||||
|
||||
@@ -120,7 +119,7 @@ impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
|
||||
self.docset.seek(target)
|
||||
}
|
||||
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId; BUFFER_LEN]) -> usize {
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||
self.docset.fill_buffer(buffer)
|
||||
}
|
||||
|
||||
|
||||
@@ -65,6 +65,7 @@ pub use self::union::Union;
|
||||
#[cfg(test)]
|
||||
pub use self::vec_docset::VecDocSet;
|
||||
pub use self::weight::Weight;
|
||||
pub(crate) use self::weight::{for_each_docset, for_each_pruning_scorer, for_each_scorer};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -4,9 +4,7 @@ use std::collections::{BinaryHeap, HashMap};
|
||||
use crate::query::bm25::idf;
|
||||
use crate::query::{BooleanQuery, BoostQuery, Occur, Query, TermQuery};
|
||||
use crate::schema::{Field, FieldType, IndexRecordOption, Term, Value};
|
||||
use crate::tokenizer::{
|
||||
BoxTokenStream, FacetTokenizer, PreTokenizedStream, TokenStream, Tokenizer,
|
||||
};
|
||||
use crate::tokenizer::{BoxTokenStream, FacetTokenizer, PreTokenizedStream, Tokenizer};
|
||||
use crate::{DocAddress, Result, Searcher, TantivyError};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
|
||||
@@ -913,10 +913,9 @@ mod test {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
tokenizer_manager.register(
|
||||
"en_with_stop_words",
|
||||
TextAnalyzer::builder(SimpleTokenizer)
|
||||
TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(LowerCaser)
|
||||
.filter(StopWordFilter::remove(vec!["the".to_string()]))
|
||||
.build(),
|
||||
.filter(StopWordFilter::remove(vec!["the".to_string()])),
|
||||
);
|
||||
QueryParser::new(schema, default_fields, tokenizer_manager)
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ impl FastFieldRangeWeight {
|
||||
impl Weight for FastFieldRangeWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let fast_field_reader = reader.fast_fields();
|
||||
let Some((column, _)) = fast_field_reader.u64_lenient(&self.field)? else {
|
||||
let Some(column) = fast_field_reader.u64_lenient(&self.field)? else {
|
||||
return Ok(Box::new(EmptyScorer));
|
||||
};
|
||||
let value_range = bound_to_value_range(
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use super::term_scorer::TermScorer;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::docset::{DocSet, BUFFER_LEN};
|
||||
use crate::docset::DocSet;
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::postings::SegmentPostings;
|
||||
use crate::query::bm25::Bm25Weight;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::weight::{for_each_docset_buffered, for_each_scorer};
|
||||
use crate::query::weight::{for_each_docset, for_each_scorer};
|
||||
use crate::query::{Explanation, Scorer, Weight};
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::{DocId, Score, Term};
|
||||
@@ -61,11 +61,10 @@ impl Weight for TermWeight {
|
||||
fn for_each_no_score(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(&[DocId]),
|
||||
callback: &mut dyn FnMut(DocId),
|
||||
) -> crate::Result<()> {
|
||||
let mut scorer = self.specialized_scorer(reader, 1.0)?;
|
||||
let mut buffer = [0u32; BUFFER_LEN];
|
||||
for_each_docset_buffered(&mut scorer, &mut buffer, callback);
|
||||
for_each_docset(&mut scorer, callback);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ impl HasLen for VecDocSet {
|
||||
pub mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::docset::{DocSet, BUFFER_LEN};
|
||||
use crate::docset::DocSet;
|
||||
use crate::DocId;
|
||||
|
||||
#[test]
|
||||
@@ -72,17 +72,17 @@ pub mod tests {
|
||||
|
||||
#[test]
|
||||
pub fn test_fill_buffer() {
|
||||
let doc_ids: Vec<DocId> = (1u32..=(BUFFER_LEN as u32 * 2 + 9)).collect();
|
||||
let doc_ids: Vec<DocId> = (1u32..210u32).collect();
|
||||
let mut postings = VecDocSet::from(doc_ids);
|
||||
let mut buffer = [0u32; BUFFER_LEN];
|
||||
assert_eq!(postings.fill_buffer(&mut buffer), BUFFER_LEN);
|
||||
for i in 0u32..BUFFER_LEN as u32 {
|
||||
let mut buffer = vec![1000u32; 100];
|
||||
assert_eq!(postings.fill_buffer(&mut buffer[..]), 100);
|
||||
for i in 0u32..100u32 {
|
||||
assert_eq!(buffer[i as usize], i + 1);
|
||||
}
|
||||
assert_eq!(postings.fill_buffer(&mut buffer), BUFFER_LEN);
|
||||
for i in 0u32..BUFFER_LEN as u32 {
|
||||
assert_eq!(buffer[i as usize], i + 1 + BUFFER_LEN as u32);
|
||||
assert_eq!(postings.fill_buffer(&mut buffer[..]), 100);
|
||||
for i in 0u32..100u32 {
|
||||
assert_eq!(buffer[i as usize], i + 101);
|
||||
}
|
||||
assert_eq!(postings.fill_buffer(&mut buffer), 9);
|
||||
assert_eq!(postings.fill_buffer(&mut buffer[..]), 9);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use super::Scorer;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::docset::BUFFER_LEN;
|
||||
use crate::query::Explanation;
|
||||
use crate::{DocId, DocSet, Score, TERMINATED};
|
||||
|
||||
/// Iterates through all of the documents and scores matched by the DocSet
|
||||
/// `DocSet`.
|
||||
#[inline]
|
||||
pub(crate) fn for_each_scorer<TScorer: Scorer + ?Sized>(
|
||||
scorer: &mut TScorer,
|
||||
callback: &mut dyn FnMut(DocId, Score),
|
||||
mut callback: impl FnMut(DocId, Score),
|
||||
) {
|
||||
let mut doc = scorer.doc();
|
||||
while doc != TERMINATED {
|
||||
@@ -20,17 +20,11 @@ pub(crate) fn for_each_scorer<TScorer: Scorer + ?Sized>(
|
||||
/// Iterates through all of the documents matched by the DocSet
|
||||
/// `DocSet`.
|
||||
#[inline]
|
||||
pub(crate) fn for_each_docset_buffered<T: DocSet + ?Sized>(
|
||||
docset: &mut T,
|
||||
buffer: &mut [DocId; BUFFER_LEN],
|
||||
mut callback: impl FnMut(&[DocId]),
|
||||
) {
|
||||
loop {
|
||||
let num_items = docset.fill_buffer(buffer);
|
||||
callback(&buffer[..num_items]);
|
||||
if num_items != buffer.len() {
|
||||
break;
|
||||
}
|
||||
pub(crate) fn for_each_docset<T: DocSet + ?Sized>(docset: &mut T, mut callback: impl FnMut(DocId)) {
|
||||
let mut doc = docset.doc();
|
||||
while doc != TERMINATED {
|
||||
callback(doc);
|
||||
doc = docset.advance();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +38,7 @@ pub(crate) fn for_each_docset_buffered<T: DocSet + ?Sized>(
|
||||
///
|
||||
/// More importantly, it makes it possible for scorers to implement
|
||||
/// important optimization (e.g. BlockWAND for union).
|
||||
#[inline]
|
||||
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer + ?Sized>(
|
||||
scorer: &mut TScorer,
|
||||
mut threshold: Score,
|
||||
@@ -101,12 +96,10 @@ pub trait Weight: Send + Sync + 'static {
|
||||
fn for_each_no_score(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(&[DocId]),
|
||||
callback: &mut dyn FnMut(DocId),
|
||||
) -> crate::Result<()> {
|
||||
let mut docset = self.scorer(reader, 1.0)?;
|
||||
|
||||
let mut buffer = [0u32; BUFFER_LEN];
|
||||
for_each_docset_buffered(&mut docset, &mut buffer, callback);
|
||||
for_each_docset(docset.as_mut(), callback);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -16,53 +16,13 @@ pub struct TextOptions {
|
||||
#[serde(default)]
|
||||
stored: bool,
|
||||
#[serde(default)]
|
||||
fast: FastFieldOptions,
|
||||
fast: bool,
|
||||
#[serde(default)]
|
||||
#[serde(skip_serializing_if = "is_false")]
|
||||
/// coerce values into string if they are not of type string
|
||||
/// coerce values if they are not of type string
|
||||
coerce: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum FastFieldOptions {
|
||||
IsEnabled(bool),
|
||||
EnabledWithTokenizer { with_tokenizer: TokenizerName },
|
||||
}
|
||||
|
||||
impl Default for FastFieldOptions {
|
||||
fn default() -> Self {
|
||||
FastFieldOptions::IsEnabled(false)
|
||||
}
|
||||
}
|
||||
|
||||
impl BitOr<FastFieldOptions> for FastFieldOptions {
|
||||
type Output = FastFieldOptions;
|
||||
|
||||
fn bitor(self, other: FastFieldOptions) -> FastFieldOptions {
|
||||
match (self, other) {
|
||||
(
|
||||
FastFieldOptions::EnabledWithTokenizer {
|
||||
with_tokenizer: tokenizer,
|
||||
},
|
||||
_,
|
||||
)
|
||||
| (
|
||||
_,
|
||||
FastFieldOptions::EnabledWithTokenizer {
|
||||
with_tokenizer: tokenizer,
|
||||
},
|
||||
) => FastFieldOptions::EnabledWithTokenizer {
|
||||
with_tokenizer: tokenizer,
|
||||
},
|
||||
(FastFieldOptions::IsEnabled(true), _) | (_, FastFieldOptions::IsEnabled(true)) => {
|
||||
FastFieldOptions::IsEnabled(true)
|
||||
}
|
||||
(_, FastFieldOptions::IsEnabled(false)) => FastFieldOptions::IsEnabled(false),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_false(val: &bool) -> bool {
|
||||
!val
|
||||
}
|
||||
@@ -80,21 +40,7 @@ impl TextOptions {
|
||||
|
||||
/// Returns true if and only if the value is a fast field.
|
||||
pub fn is_fast(&self) -> bool {
|
||||
matches!(self.fast, FastFieldOptions::IsEnabled(true))
|
||||
|| matches!(
|
||||
&self.fast,
|
||||
FastFieldOptions::EnabledWithTokenizer { with_tokenizer: _ }
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns true if and only if the value is a fast field.
|
||||
pub fn get_fast_field_tokenizer_name(&self) -> Option<&str> {
|
||||
match &self.fast {
|
||||
FastFieldOptions::IsEnabled(true) | FastFieldOptions::IsEnabled(false) => None,
|
||||
FastFieldOptions::EnabledWithTokenizer {
|
||||
with_tokenizer: tokenizer,
|
||||
} => Some(tokenizer.name()),
|
||||
}
|
||||
self.fast
|
||||
}
|
||||
|
||||
/// Returns true if values should be coerced to strings (numbers, null).
|
||||
@@ -107,24 +53,19 @@ impl TextOptions {
|
||||
/// Fast fields are designed for random access.
|
||||
/// Access time are similar to a random lookup in an array.
|
||||
/// Text fast fields will have the term ids stored in the fast field.
|
||||
/// The fast field will be a multivalued fast field.
|
||||
///
|
||||
/// The effective cardinality depends on the tokenizer. Without a tokenizer, the text will be
|
||||
/// stored as is, which equals to the "raw" tokenizer. The tokenizer can be used to apply
|
||||
/// normalization like lower case.
|
||||
/// The effective cardinality depends on the tokenizer. When creating fast fields on text
|
||||
/// fields it is recommended to use the "raw" tokenizer, since it will store the original text
|
||||
/// unchanged. The "default" tokenizer will store the terms as lower case and this will be
|
||||
/// reflected in the dictionary.
|
||||
///
|
||||
/// The original text can be retrieved via
|
||||
/// [`TermDictionary::ord_to_term()`](crate::termdict::TermDictionary::ord_to_term)
|
||||
/// from the dictionary.
|
||||
#[must_use]
|
||||
pub fn set_fast(mut self, tokenizer_name: Option<&str>) -> TextOptions {
|
||||
if let Some(tokenizer) = tokenizer_name {
|
||||
let tokenizer = TokenizerName::from_name(tokenizer);
|
||||
self.fast = FastFieldOptions::EnabledWithTokenizer {
|
||||
with_tokenizer: tokenizer,
|
||||
}
|
||||
} else {
|
||||
self.fast = FastFieldOptions::IsEnabled(true);
|
||||
}
|
||||
pub fn set_fast(mut self) -> TextOptions {
|
||||
self.fast = true;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -151,7 +92,7 @@ impl TextOptions {
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Eq, Serialize, Deserialize)]
|
||||
pub(crate) struct TokenizerName(Cow<'static, str>);
|
||||
struct TokenizerName(Cow<'static, str>);
|
||||
|
||||
const DEFAULT_TOKENIZER_NAME: &str = "default";
|
||||
|
||||
@@ -164,7 +105,7 @@ impl Default for TokenizerName {
|
||||
}
|
||||
|
||||
impl TokenizerName {
|
||||
pub const fn from_static(name: &'static str) -> Self {
|
||||
const fn from_static(name: &'static str) -> Self {
|
||||
TokenizerName(Cow::Borrowed(name))
|
||||
}
|
||||
fn from_name(name: &str) -> Self {
|
||||
@@ -258,7 +199,7 @@ pub const STRING: TextOptions = TextOptions {
|
||||
record: IndexRecordOption::Basic,
|
||||
}),
|
||||
stored: false,
|
||||
fast: FastFieldOptions::IsEnabled(false),
|
||||
fast: false,
|
||||
coerce: false,
|
||||
};
|
||||
|
||||
@@ -271,7 +212,7 @@ pub const TEXT: TextOptions = TextOptions {
|
||||
}),
|
||||
stored: false,
|
||||
coerce: false,
|
||||
fast: FastFieldOptions::IsEnabled(false),
|
||||
fast: false,
|
||||
};
|
||||
|
||||
impl<T: Into<TextOptions>> BitOr<T> for TextOptions {
|
||||
@@ -299,7 +240,7 @@ impl From<StoredFlag> for TextOptions {
|
||||
TextOptions {
|
||||
indexing: None,
|
||||
stored: true,
|
||||
fast: FastFieldOptions::IsEnabled(false),
|
||||
fast: false,
|
||||
coerce: false,
|
||||
}
|
||||
}
|
||||
@@ -310,7 +251,7 @@ impl From<CoerceFlag> for TextOptions {
|
||||
TextOptions {
|
||||
indexing: None,
|
||||
stored: false,
|
||||
fast: FastFieldOptions::IsEnabled(false),
|
||||
fast: false,
|
||||
coerce: true,
|
||||
}
|
||||
}
|
||||
@@ -321,7 +262,7 @@ impl From<FastFlag> for TextOptions {
|
||||
TextOptions {
|
||||
indexing: None,
|
||||
stored: false,
|
||||
fast: FastFieldOptions::IsEnabled(true),
|
||||
fast: true,
|
||||
coerce: false,
|
||||
}
|
||||
}
|
||||
@@ -340,7 +281,6 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::schema::text_options::{FastFieldOptions, TokenizerName};
|
||||
use crate::schema::*;
|
||||
|
||||
#[test]
|
||||
@@ -383,44 +323,4 @@ mod tests {
|
||||
let options3: TextOptions = serde_json::from_str("{}").unwrap();
|
||||
assert_eq!(options3.indexing, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serde_fast_field_tokenizer() {
|
||||
let json = r#" {
|
||||
"fast": { "with_tokenizer": "default" }
|
||||
} "#;
|
||||
let options: TextOptions = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(
|
||||
options.fast,
|
||||
FastFieldOptions::EnabledWithTokenizer {
|
||||
with_tokenizer: TokenizerName::from_static("default")
|
||||
}
|
||||
);
|
||||
let options: TextOptions =
|
||||
serde_json::from_str(&serde_json::to_string(&options).unwrap()).unwrap();
|
||||
assert_eq!(
|
||||
options.fast,
|
||||
FastFieldOptions::EnabledWithTokenizer {
|
||||
with_tokenizer: TokenizerName::from_static("default")
|
||||
}
|
||||
);
|
||||
|
||||
let json = r#" {
|
||||
"fast": true
|
||||
} "#;
|
||||
let options: TextOptions = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(options.fast, FastFieldOptions::IsEnabled(true));
|
||||
let options: TextOptions =
|
||||
serde_json::from_str(&serde_json::to_string(&options).unwrap()).unwrap();
|
||||
assert_eq!(options.fast, FastFieldOptions::IsEnabled(true));
|
||||
|
||||
let json = r#" {
|
||||
"fast": false
|
||||
} "#;
|
||||
let options: TextOptions = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(options.fast, FastFieldOptions::IsEnabled(false));
|
||||
let options: TextOptions =
|
||||
serde_json::from_str(&serde_json::to_string(&options).unwrap()).unwrap();
|
||||
assert_eq!(options.fast, FastFieldOptions::IsEnabled(false));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,12 +9,14 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use common::ByteCount;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::schema::Field;
|
||||
use crate::SegmentComponent;
|
||||
|
||||
/// Indicates space usage in bytes
|
||||
pub type ByteCount = usize;
|
||||
|
||||
/// Enum containing any of the possible space usage results for segment components.
|
||||
pub enum ComponentSpaceUsage {
|
||||
/// Data is stored per field in a uniform way
|
||||
@@ -36,7 +38,7 @@ impl SearcherSpaceUsage {
|
||||
pub(crate) fn new() -> SearcherSpaceUsage {
|
||||
SearcherSpaceUsage {
|
||||
segments: Vec::new(),
|
||||
total: Default::default(),
|
||||
total: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,7 +260,7 @@ impl FieldUsage {
|
||||
pub(crate) fn empty(field: Field) -> FieldUsage {
|
||||
FieldUsage {
|
||||
field,
|
||||
num_bytes: Default::default(),
|
||||
num_bytes: 0,
|
||||
sub_num_bytes: Vec::new(),
|
||||
}
|
||||
}
|
||||
@@ -292,7 +294,7 @@ impl FieldUsage {
|
||||
mod test {
|
||||
use crate::core::Index;
|
||||
use crate::schema::{Field, Schema, FAST, INDEXED, STORED, TEXT};
|
||||
use crate::space_usage::PerFieldSpaceUsage;
|
||||
use crate::space_usage::{ByteCount, PerFieldSpaceUsage};
|
||||
use crate::Term;
|
||||
|
||||
#[test]
|
||||
@@ -302,14 +304,14 @@ mod test {
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let searcher_space_usage = searcher.space_usage().unwrap();
|
||||
assert_eq!(searcher_space_usage.total(), 0u64);
|
||||
assert_eq!(0, searcher_space_usage.total());
|
||||
}
|
||||
|
||||
fn expect_single_field(
|
||||
field_space: &PerFieldSpaceUsage,
|
||||
field: &Field,
|
||||
min_size: u64,
|
||||
max_size: u64,
|
||||
min_size: ByteCount,
|
||||
max_size: ByteCount,
|
||||
) {
|
||||
assert!(field_space.total() >= min_size);
|
||||
assert!(field_space.total() <= max_size);
|
||||
@@ -351,12 +353,12 @@ mod test {
|
||||
|
||||
expect_single_field(segment.termdict(), &name, 1, 512);
|
||||
expect_single_field(segment.postings(), &name, 1, 512);
|
||||
assert_eq!(segment.positions().total(), 0);
|
||||
assert_eq!(0, segment.positions().total());
|
||||
expect_single_field(segment.fast_fields(), &name, 1, 512);
|
||||
expect_single_field(segment.fieldnorms(), &name, 1, 512);
|
||||
// TODO: understand why the following fails
|
||||
// assert_eq!(0, segment.store().total());
|
||||
assert_eq!(segment.deletes(), 0);
|
||||
assert_eq!(0, segment.deletes());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -392,11 +394,11 @@ mod test {
|
||||
expect_single_field(segment.termdict(), &name, 1, 512);
|
||||
expect_single_field(segment.postings(), &name, 1, 512);
|
||||
expect_single_field(segment.positions(), &name, 1, 512);
|
||||
assert_eq!(segment.fast_fields().total(), 0);
|
||||
assert_eq!(0, segment.fast_fields().total());
|
||||
expect_single_field(segment.fieldnorms(), &name, 1, 512);
|
||||
// TODO: understand why the following fails
|
||||
// assert_eq!(0, segment.store().total());
|
||||
assert_eq!(segment.deletes(), 0);
|
||||
assert_eq!(0, segment.deletes());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -428,14 +430,14 @@ mod test {
|
||||
|
||||
assert_eq!(4, segment.num_docs());
|
||||
|
||||
assert_eq!(segment.termdict().total(), 0);
|
||||
assert_eq!(segment.postings().total(), 0);
|
||||
assert_eq!(segment.positions().total(), 0);
|
||||
assert_eq!(segment.fast_fields().total(), 0);
|
||||
assert_eq!(segment.fieldnorms().total(), 0);
|
||||
assert_eq!(0, segment.termdict().total());
|
||||
assert_eq!(0, segment.postings().total());
|
||||
assert_eq!(0, segment.positions().total());
|
||||
assert_eq!(0, segment.fast_fields().total());
|
||||
assert_eq!(0, segment.fieldnorms().total());
|
||||
assert!(segment.store().total() > 0);
|
||||
assert!(segment.store().total() < 512);
|
||||
assert_eq!(segment.deletes(), 0);
|
||||
assert_eq!(0, segment.deletes());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -476,8 +478,8 @@ mod test {
|
||||
|
||||
expect_single_field(segment_space_usage.termdict(), &name, 1, 512);
|
||||
expect_single_field(segment_space_usage.postings(), &name, 1, 512);
|
||||
assert_eq!(segment_space_usage.positions().total(), 0u64);
|
||||
assert_eq!(segment_space_usage.fast_fields().total(), 0u64);
|
||||
assert_eq!(0, segment_space_usage.positions().total());
|
||||
assert_eq!(0, segment_space_usage.fast_fields().total());
|
||||
expect_single_field(segment_space_usage.fieldnorms(), &name, 1, 512);
|
||||
assert!(segment_space_usage.deletes() > 0);
|
||||
Ok(())
|
||||
|
||||
@@ -5,7 +5,7 @@ use std::ops::{AddAssign, Range};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use common::{BinarySerializable, HasLen, OwnedBytes};
|
||||
use lru::LruCache;
|
||||
|
||||
use super::footer::DocStoreFooter;
|
||||
@@ -122,8 +122,7 @@ impl StoreReader {
|
||||
|
||||
let (data_file, offset_index_file) = data_and_offset.split(footer.offset as usize);
|
||||
let index_data = offset_index_file.read_bytes()?;
|
||||
let space_usage =
|
||||
StoreSpaceUsage::new(data_file.num_bytes(), offset_index_file.num_bytes());
|
||||
let space_usage = StoreSpaceUsage::new(data_file.len(), offset_index_file.len());
|
||||
let skip_index = SkipIndex::open(index_data);
|
||||
Ok(StoreReader {
|
||||
decompressor: footer.decompressor,
|
||||
|
||||
@@ -2,18 +2,16 @@
|
||||
//! ```rust
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! let tokenizer = TextAnalyzer::builder(RawTokenizer)
|
||||
//! .filter(AlphaNumOnlyFilter)
|
||||
//! .build();
|
||||
//! let tokenizer = TextAnalyzer::from(RawTokenizer)
|
||||
//! .filter(AlphaNumOnlyFilter);
|
||||
//!
|
||||
//! let mut stream = tokenizer.token_stream("hello there");
|
||||
//! // is none because the raw filter emits one token that
|
||||
//! // contains a space
|
||||
//! assert!(stream.next().is_none());
|
||||
//!
|
||||
//! let tokenizer = TextAnalyzer::builder(SimpleTokenizer)
|
||||
//! .filter(AlphaNumOnlyFilter)
|
||||
//! .build();
|
||||
//! let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
//! .filter(AlphaNumOnlyFilter);
|
||||
//!
|
||||
//! let mut stream = tokenizer.token_stream("hello there 💣");
|
||||
//! assert!(stream.next().is_some());
|
||||
@@ -21,45 +19,30 @@
|
||||
//! // the "emoji" is dropped because its not an alphanum
|
||||
//! assert!(stream.next().is_none());
|
||||
//! ```
|
||||
use super::{Token, TokenFilter, TokenStream, Tokenizer};
|
||||
use super::{BoxTokenStream, Token, TokenFilter, TokenStream};
|
||||
|
||||
/// `TokenFilter` that removes all tokens that contain non
|
||||
/// ascii alphanumeric characters.
|
||||
#[derive(Clone)]
|
||||
pub struct AlphaNumOnlyFilter;
|
||||
|
||||
pub struct AlphaNumOnlyFilterStream<T> {
|
||||
tail: T,
|
||||
pub struct AlphaNumOnlyFilterStream<'a> {
|
||||
tail: BoxTokenStream<'a>,
|
||||
}
|
||||
|
||||
impl<T> AlphaNumOnlyFilterStream<T> {
|
||||
impl<'a> AlphaNumOnlyFilterStream<'a> {
|
||||
fn predicate(&self, token: &Token) -> bool {
|
||||
token.text.chars().all(|c| c.is_ascii_alphanumeric())
|
||||
}
|
||||
}
|
||||
|
||||
impl TokenFilter for AlphaNumOnlyFilter {
|
||||
type Tokenizer<T: Tokenizer> = AlphaNumOnlyFilterWrapper<T>;
|
||||
|
||||
fn transform<T: Tokenizer>(self, tokenizer: T) -> AlphaNumOnlyFilterWrapper<T> {
|
||||
AlphaNumOnlyFilterWrapper(tokenizer)
|
||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
||||
BoxTokenStream::from(AlphaNumOnlyFilterStream { tail: token_stream })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AlphaNumOnlyFilterWrapper<T>(T);
|
||||
|
||||
impl<T: Tokenizer> Tokenizer for AlphaNumOnlyFilterWrapper<T> {
|
||||
type TokenStream<'a> = AlphaNumOnlyFilterStream<T::TokenStream<'a>>;
|
||||
|
||||
fn token_stream<'a>(&self, text: &'a str) -> Self::TokenStream<'a> {
|
||||
AlphaNumOnlyFilterStream {
|
||||
tail: self.0.token_stream(text),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TokenStream> TokenStream for AlphaNumOnlyFilterStream<T> {
|
||||
impl<'a> TokenStream for AlphaNumOnlyFilterStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
while self.tail.advance() {
|
||||
if self.predicate(self.tail.token()) {
|
||||
@@ -96,9 +79,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn token_stream_helper(text: &str) -> Vec<Token> {
|
||||
let a = TextAnalyzer::builder(SimpleTokenizer)
|
||||
.filter(AlphaNumOnlyFilter)
|
||||
.build();
|
||||
let a = TextAnalyzer::from(SimpleTokenizer).filter(AlphaNumOnlyFilter);
|
||||
let mut token_stream = a.token_stream(text);
|
||||
let mut tokens: Vec<Token> = vec![];
|
||||
let mut add_token = |token: &Token| {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::mem;
|
||||
|
||||
use super::{Token, TokenFilter, TokenStream, Tokenizer};
|
||||
use super::{BoxTokenStream, Token, TokenFilter, TokenStream};
|
||||
|
||||
/// This class converts alphabetic, numeric, and symbolic Unicode characters
|
||||
/// which are not in the first 127 ASCII characters (the "Basic Latin" Unicode
|
||||
@@ -9,33 +9,20 @@ use super::{Token, TokenFilter, TokenStream, Tokenizer};
|
||||
pub struct AsciiFoldingFilter;
|
||||
|
||||
impl TokenFilter for AsciiFoldingFilter {
|
||||
type Tokenizer<T: Tokenizer> = AsciiFoldingFilterWrapper<T>;
|
||||
|
||||
fn transform<T: Tokenizer>(self, tokenizer: T) -> AsciiFoldingFilterWrapper<T> {
|
||||
AsciiFoldingFilterWrapper(tokenizer)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AsciiFoldingFilterWrapper<T>(T);
|
||||
|
||||
impl<T: Tokenizer> Tokenizer for AsciiFoldingFilterWrapper<T> {
|
||||
type TokenStream<'a> = AsciiFoldingFilterTokenStream<T::TokenStream<'a>>;
|
||||
|
||||
fn token_stream<'a>(&self, text: &'a str) -> Self::TokenStream<'a> {
|
||||
AsciiFoldingFilterTokenStream {
|
||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
||||
From::from(AsciiFoldingFilterTokenStream {
|
||||
tail: token_stream,
|
||||
buffer: String::with_capacity(100),
|
||||
tail: self.0.token_stream(text),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AsciiFoldingFilterTokenStream<T> {
|
||||
pub struct AsciiFoldingFilterTokenStream<'a> {
|
||||
buffer: String,
|
||||
tail: T,
|
||||
tail: BoxTokenStream<'a>,
|
||||
}
|
||||
|
||||
impl<T: TokenStream> TokenStream for AsciiFoldingFilterTokenStream<T> {
|
||||
impl<'a> TokenStream for AsciiFoldingFilterTokenStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
if !self.tail.advance() {
|
||||
return false;
|
||||
@@ -1573,9 +1560,8 @@ mod tests {
|
||||
|
||||
fn folding_helper(text: &str) -> Vec<String> {
|
||||
let mut tokens = Vec::new();
|
||||
TextAnalyzer::builder(SimpleTokenizer)
|
||||
TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(AsciiFoldingFilter)
|
||||
.build()
|
||||
.token_stream(text)
|
||||
.process(&mut |token| {
|
||||
tokens.push(token.text.clone());
|
||||
@@ -1584,9 +1570,8 @@ mod tests {
|
||||
}
|
||||
|
||||
fn folding_using_raw_tokenizer_helper(text: &str) -> String {
|
||||
let mut token_stream = TextAnalyzer::builder(RawTokenizer)
|
||||
let mut token_stream = TextAnalyzer::from(RawTokenizer)
|
||||
.filter(AsciiFoldingFilter)
|
||||
.build()
|
||||
.token_stream(text);
|
||||
token_stream.advance();
|
||||
token_stream.token().text.clone()
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
use crate::tokenizer::{Token, TokenStream, Tokenizer};
|
||||
use crate::tokenizer::{BoxTokenStream, Token, TokenStream, Tokenizer};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct EmptyTokenizer;
|
||||
|
||||
impl Tokenizer for EmptyTokenizer {
|
||||
type TokenStream<'a> = EmptyTokenStream;
|
||||
fn token_stream(&self, _text: &str) -> EmptyTokenStream {
|
||||
EmptyTokenStream::default()
|
||||
fn token_stream<'a>(&self, _text: &'a str) -> BoxTokenStream<'a> {
|
||||
EmptyTokenStream::default().into()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct EmptyTokenStream {
|
||||
struct EmptyTokenStream {
|
||||
token: Token,
|
||||
}
|
||||
|
||||
@@ -31,7 +30,7 @@ impl TokenStream for EmptyTokenStream {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::tokenizer::{TokenStream, Tokenizer};
|
||||
use crate::tokenizer::Tokenizer;
|
||||
|
||||
#[test]
|
||||
fn test_empty_tokenizer() {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use super::{Token, TokenStream, Tokenizer};
|
||||
use super::{BoxTokenStream, Token, TokenStream, Tokenizer};
|
||||
use crate::schema::FACET_SEP_BYTE;
|
||||
|
||||
/// The `FacetTokenizer` process a `Facet` binary representation
|
||||
@@ -26,8 +26,7 @@ pub struct FacetTokenStream<'a> {
|
||||
}
|
||||
|
||||
impl Tokenizer for FacetTokenizer {
|
||||
type TokenStream<'a> = FacetTokenStream<'a>;
|
||||
fn token_stream<'a>(&self, text: &'a str) -> FacetTokenStream<'a> {
|
||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
||||
let token = Token {
|
||||
position: 0,
|
||||
..Default::default()
|
||||
@@ -37,6 +36,7 @@ impl Tokenizer for FacetTokenizer {
|
||||
state: State::RootFacetNotEmitted, //< pos is the first char that has not been processed yet.
|
||||
token,
|
||||
}
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ mod tests {
|
||||
|
||||
use super::FacetTokenizer;
|
||||
use crate::schema::Facet;
|
||||
use crate::tokenizer::{Token, TokenStream, Tokenizer};
|
||||
use crate::tokenizer::{Token, Tokenizer};
|
||||
|
||||
#[test]
|
||||
fn test_facet_tokenizer() {
|
||||
|
||||
@@ -1,42 +1,29 @@
|
||||
use std::mem;
|
||||
|
||||
use super::{Token, TokenFilter, TokenStream, Tokenizer};
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
use crate::tokenizer::BoxTokenStream;
|
||||
|
||||
impl TokenFilter for LowerCaser {
|
||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
||||
BoxTokenStream::from(LowerCaserTokenStream {
|
||||
tail: token_stream,
|
||||
buffer: String::with_capacity(100),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Token filter that lowercase terms.
|
||||
#[derive(Clone)]
|
||||
pub struct LowerCaser;
|
||||
|
||||
impl TokenFilter for LowerCaser {
|
||||
type Tokenizer<T: Tokenizer> = LowerCaserFilter<T>;
|
||||
|
||||
fn transform<T: Tokenizer>(self, tokenizer: T) -> Self::Tokenizer<T> {
|
||||
LowerCaserFilter(tokenizer)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LowerCaserFilter<T>(T);
|
||||
|
||||
impl<T: Tokenizer> Tokenizer for LowerCaserFilter<T> {
|
||||
type TokenStream<'a> = LowerCaserTokenStream<T::TokenStream<'a>>;
|
||||
|
||||
fn token_stream<'a>(&self, text: &'a str) -> Self::TokenStream<'a> {
|
||||
LowerCaserTokenStream {
|
||||
tail: self.0.token_stream(text),
|
||||
buffer: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LowerCaserTokenStream<T> {
|
||||
pub struct LowerCaserTokenStream<'a> {
|
||||
buffer: String,
|
||||
tail: T,
|
||||
tail: BoxTokenStream<'a>,
|
||||
}
|
||||
|
||||
// writes a lowercased version of text into output.
|
||||
fn to_lowercase_unicode(text: &str, output: &mut String) {
|
||||
output.clear();
|
||||
output.reserve(50);
|
||||
for c in text.chars() {
|
||||
// Contrary to the std, we do not take care of sigma special case.
|
||||
// This will have an normalizationo effect, which is ok for search.
|
||||
@@ -44,7 +31,7 @@ fn to_lowercase_unicode(text: &str, output: &mut String) {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TokenStream> TokenStream for LowerCaserTokenStream<T> {
|
||||
impl<'a> TokenStream for LowerCaserTokenStream<'a> {
|
||||
fn advance(&mut self) -> bool {
|
||||
if !self.tail.advance() {
|
||||
return false;
|
||||
@@ -86,9 +73,8 @@ mod tests {
|
||||
}
|
||||
|
||||
fn token_stream_helper(text: &str) -> Vec<Token> {
|
||||
let mut token_stream = TextAnalyzer::builder(SimpleTokenizer)
|
||||
let mut token_stream = TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(LowerCaser)
|
||||
.build()
|
||||
.token_stream(text);
|
||||
let mut tokens = vec![];
|
||||
let mut add_token = |token: &Token| {
|
||||
|
||||
@@ -66,11 +66,10 @@
|
||||
//! ```rust
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! let en_stem = TextAnalyzer::builder(SimpleTokenizer)
|
||||
//! let en_stem = TextAnalyzer::from(SimpleTokenizer)
|
||||
//! .filter(RemoveLongFilter::limit(40))
|
||||
//! .filter(LowerCaser)
|
||||
//! .filter(Stemmer::new(Language::English))
|
||||
//! .build();
|
||||
//! .filter(Stemmer::new(Language::English));
|
||||
//! ```
|
||||
//!
|
||||
//! Once your tokenizer is defined, you need to
|
||||
@@ -113,10 +112,9 @@
|
||||
//! let index = Index::create_in_ram(schema);
|
||||
//!
|
||||
//! // We need to register our tokenizer :
|
||||
//! let custom_en_tokenizer = TextAnalyzer::builder(SimpleTokenizer)
|
||||
//! let custom_en_tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
//! .filter(RemoveLongFilter::limit(40))
|
||||
//! .filter(LowerCaser)
|
||||
//! .build();
|
||||
//! .filter(LowerCaser);
|
||||
//! index
|
||||
//! .tokenizers()
|
||||
//! .register("custom_en", custom_en_tokenizer);
|
||||
@@ -139,7 +137,9 @@ mod tokenizer;
|
||||
mod tokenizer_manager;
|
||||
mod whitespace_tokenizer;
|
||||
|
||||
pub use tokenizer_api::{BoxTokenStream, Token, TokenFilter, TokenStream, Tokenizer};
|
||||
pub use tokenizer_api::{
|
||||
BoxTokenFilter, BoxTokenStream, Token, TokenFilter, TokenStream, Tokenizer,
|
||||
};
|
||||
|
||||
pub use self::alphanum_only::AlphaNumOnlyFilter;
|
||||
pub use self::ascii_folding_filter::AsciiFoldingFilter;
|
||||
@@ -237,11 +237,10 @@ pub mod tests {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
tokenizer_manager.register(
|
||||
"el_stem",
|
||||
TextAnalyzer::builder(SimpleTokenizer)
|
||||
TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(RemoveLongFilter::limit(40))
|
||||
.filter(LowerCaser)
|
||||
.filter(Stemmer::new(Language::Greek))
|
||||
.build(),
|
||||
.filter(Stemmer::new(Language::Greek)),
|
||||
);
|
||||
let en_tokenizer = tokenizer_manager.get("el_stem").unwrap();
|
||||
let mut tokens: Vec<Token> = vec![];
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use super::{Token, TokenStream, Tokenizer};
|
||||
use crate::tokenizer::BoxTokenStream;
|
||||
|
||||
/// Tokenize the text by splitting words into n-grams of the given size(s)
|
||||
///
|
||||
@@ -131,9 +132,8 @@ pub struct NgramTokenStream<'a> {
|
||||
}
|
||||
|
||||
impl Tokenizer for NgramTokenizer {
|
||||
type TokenStream<'a> = NgramTokenStream<'a>;
|
||||
fn token_stream<'a>(&self, text: &'a str) -> NgramTokenStream<'a> {
|
||||
NgramTokenStream {
|
||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
||||
From::from(NgramTokenStream {
|
||||
ngram_charidx_iterator: StutteringIterator::new(
|
||||
CodepointFrontiers::for_str(text),
|
||||
self.min_gram,
|
||||
@@ -142,7 +142,7 @@ impl Tokenizer for NgramTokenizer {
|
||||
prefix_only: self.prefix_only,
|
||||
text,
|
||||
token: Token::default(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -303,9 +303,9 @@ mod tests {
|
||||
|
||||
use super::{utf8_codepoint_width, CodepointFrontiers, NgramTokenizer, StutteringIterator};
|
||||
use crate::tokenizer::tests::assert_token;
|
||||
use crate::tokenizer::{Token, TokenStream, Tokenizer};
|
||||
use crate::tokenizer::{BoxTokenStream, Token, Tokenizer};
|
||||
|
||||
fn test_helper<T: TokenStream>(mut tokenizer: T) -> Vec<Token> {
|
||||
fn test_helper(mut tokenizer: BoxTokenStream) -> Vec<Token> {
|
||||
let mut tokens: Vec<Token> = vec![];
|
||||
tokenizer.process(&mut |token: &Token| tokens.push(token.clone()));
|
||||
tokens
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user