mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-29 05:22:55 +00:00
Compare commits
26 Commits
optional_c
...
columnar
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8828b6d310 | ||
|
|
2b89bf9050 | ||
|
|
3580198447 | ||
|
|
d96a716d20 | ||
|
|
2ac1cc2fc0 | ||
|
|
f9171a3981 | ||
|
|
a2cf6a79b4 | ||
|
|
f6e87a5319 | ||
|
|
f9971e15fe | ||
|
|
3cdc8e7472 | ||
|
|
fbb0f8b55d | ||
|
|
136a8f4124 | ||
|
|
5d4535de83 | ||
|
|
2c50b02eb3 | ||
|
|
509adab79d | ||
|
|
96c93a6ba3 | ||
|
|
495824361a | ||
|
|
485a8f507e | ||
|
|
1119e59eae | ||
|
|
ee1f2c1f28 | ||
|
|
600548fd26 | ||
|
|
9929c0c221 | ||
|
|
f53e65648b | ||
|
|
0281b22b77 | ||
|
|
a05c184830 | ||
|
|
0b40a7fe43 |
17
CHANGELOG.md
17
CHANGELOG.md
@@ -1,26 +1,29 @@
|
||||
Tantivy 0.19
|
||||
================================
|
||||
#### Bugfixes
|
||||
- Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
|
||||
- Fix interpolation overflow in linear interpolation fastfield codec [#1480](https://github.com/quickwit-oss/tantivy/pull/1480) (@PSeitz @fulmicoton)
|
||||
|
||||
#### Features/Improvements
|
||||
- Add support for `IN` in queryparser , e.g. `field: IN [val1 val2 val3]` [#1683](https://github.com/quickwit-oss/tantivy/pull/1683) (@trinity-1686a)
|
||||
- Skip score calculation, when no scoring is required [#1646](https://github.com/quickwit-oss/tantivy/pull/1646) (@PSeitz)
|
||||
- Limit fast fields to u32 (`get_val(u32)`) [#1644](https://github.com/quickwit-oss/tantivy/pull/1644) (@PSeitz)
|
||||
- Major bugfix: Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
|
||||
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
|
||||
The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
||||
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing). (@evanxg852000)
|
||||
- The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
||||
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing) [#1396](https://github.com/quickwit-oss/tantivy/pull/1396) (@evanxg852000)
|
||||
- Add IP address field type [#1553](https://github.com/quickwit-oss/tantivy/pull/1553) (@PSeitz)
|
||||
- Add boolean field type [#1382](https://github.com/quickwit-oss/tantivy/pull/1382) (@boraarslan)
|
||||
- Remove Searcher pool and make `Searcher` cloneable. (@PSeitz)
|
||||
- Validate settings on create [#1570](https://github.com/quickwit-oss/tantivy/pull/1570 (@PSeitz)
|
||||
- Fix interpolation overflow in linear interpolation fastfield codec [#1480](https://github.com/quickwit-oss/tantivy/pull/1480 (@PSeitz @fulmicoton)
|
||||
- Validate settings on create [#1570](https://github.com/quickwit-oss/tantivy/pull/1570) (@PSeitz)
|
||||
- Detect and apply gcd on fastfield codecs [#1418](https://github.com/quickwit-oss/tantivy/pull/1418) (@PSeitz)
|
||||
- Doc store
|
||||
- use separate thread to compress block store [#1389](https://github.com/quickwit-oss/tantivy/pull/1389) [#1510](https://github.com/quickwit-oss/tantivy/pull/1510 (@PSeitz @fulmicoton)
|
||||
- use separate thread to compress block store [#1389](https://github.com/quickwit-oss/tantivy/pull/1389) [#1510](https://github.com/quickwit-oss/tantivy/pull/1510) (@PSeitz @fulmicoton)
|
||||
- Expose doc store cache size [#1403](https://github.com/quickwit-oss/tantivy/pull/1403) (@PSeitz)
|
||||
- Enable compression levels for doc store [#1378](https://github.com/quickwit-oss/tantivy/pull/1378) (@PSeitz)
|
||||
- Make block size configurable [#1374](https://github.com/quickwit-oss/tantivy/pull/1374) (@kryesh)
|
||||
- Make `tantivy::TantivyError` cloneable [#1402](https://github.com/quickwit-oss/tantivy/pull/1402) (@PSeitz)
|
||||
- Add support for phrase slop in query language [#1393](https://github.com/quickwit-oss/tantivy/pull/1393) (@saroh)
|
||||
- Aggregation
|
||||
- Add aggregation support for date type [#1693](https://github.com/quickwit-oss/tantivy/pull/1693)(@PSeitz)
|
||||
- Add support for keyed parameter in range and histgram aggregations [#1424](https://github.com/quickwit-oss/tantivy/pull/1424) (@k-yomo)
|
||||
- Add aggregation bucket limit [#1363](https://github.com/quickwit-oss/tantivy/pull/1363) (@PSeitz)
|
||||
- Faster indexing
|
||||
|
||||
26
Cargo.toml
26
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.19.0-dev"
|
||||
version = "0.19.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -15,7 +15,7 @@ rust-version = "1.62"
|
||||
|
||||
[dependencies]
|
||||
oneshot = "0.1.5"
|
||||
base64 = "0.13.0"
|
||||
base64 = "0.20.0"
|
||||
byteorder = "1.4.3"
|
||||
crc32fast = "1.3.2"
|
||||
once_cell = "1.10.0"
|
||||
@@ -25,7 +25,7 @@ tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.5.3", optional = true }
|
||||
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
||||
brotli = { version = "3.3.4", optional = true }
|
||||
zstd = { version = "0.11", optional = true, default-features = false }
|
||||
zstd = { version = "0.12", optional = true, default-features = false }
|
||||
snap = { version = "1.0.5", optional = true }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
log = "0.4.16"
|
||||
@@ -36,11 +36,6 @@ fs2 = { version = "0.4.3", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
tantivy-query-grammar = { version="0.18.0", path="./query-grammar" }
|
||||
tantivy-bitpacker = { version="0.2", path="./bitpacker" }
|
||||
common = { version = "0.3", path = "./common/", package = "tantivy-common" }
|
||||
fastfield_codecs = { version="0.2", path="./fastfield_codecs", default-features = false }
|
||||
ownedbytes = { version="0.3", path="./ownedbytes" }
|
||||
stable_deref_trait = "1.2.0"
|
||||
rust-stemmers = "1.2.0"
|
||||
downcast-rs = "1.2.0"
|
||||
@@ -58,10 +53,17 @@ lru = "0.7.5"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.10.3"
|
||||
measure_time = "0.8.2"
|
||||
ciborium = { version = "0.2", optional = true}
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true }
|
||||
stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" }
|
||||
tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" }
|
||||
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
|
||||
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
|
||||
fastfield_codecs = { version= "0.3", path="./fastfield_codecs", default-features = false }
|
||||
ownedbytes = { version= "0.5", path="./ownedbytes" }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
|
||||
@@ -73,7 +75,7 @@ pretty_assertions = "1.2.1"
|
||||
proptest = "1.0.0"
|
||||
criterion = "0.4"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.9.0"
|
||||
env_logger = "0.10.0"
|
||||
pprof = { version = "0.11.0", features = ["flamegraph", "criterion"] }
|
||||
futures = "0.3.21"
|
||||
|
||||
@@ -103,10 +105,10 @@ zstd-compression = ["zstd"]
|
||||
failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
quickwit = ["ciborium"]
|
||||
quickwit = ["sstable"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
|
||||
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes", "stacker", "sstable", "columnar"]
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
# tests that define specific behavior in fail check points
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.2.0"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
@@ -8,6 +8,8 @@ categories = []
|
||||
description = """Tantivy-sub crate: bitpacking"""
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
keywords = []
|
||||
documentation = "https://docs.rs/tantivy-bitpacker/latest/tantivy_bitpacker"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -25,15 +25,14 @@ impl BitPacker {
|
||||
num_bits: u8,
|
||||
output: &mut TWrite,
|
||||
) -> io::Result<()> {
|
||||
let val_u64 = val as u64;
|
||||
let num_bits = num_bits as usize;
|
||||
if self.mini_buffer_written + num_bits > 64 {
|
||||
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
|
||||
self.mini_buffer |= val.wrapping_shl(self.mini_buffer_written as u32);
|
||||
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
|
||||
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
|
||||
self.mini_buffer = val.wrapping_shr((64 - self.mini_buffer_written) as u32);
|
||||
self.mini_buffer_written = self.mini_buffer_written + num_bits - 64;
|
||||
} else {
|
||||
self.mini_buffer |= val_u64 << self.mini_buffer_written;
|
||||
self.mini_buffer |= val << self.mini_buffer_written;
|
||||
self.mini_buffer_written += num_bits;
|
||||
if self.mini_buffer_written == 64 {
|
||||
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
|
||||
@@ -102,7 +101,7 @@ impl BitUnpacker {
|
||||
.try_into()
|
||||
.unwrap();
|
||||
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
let val_shifted: u64 = val_unshifted_unmasked >> bit_shift;
|
||||
val_shifted & self.mask
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ impl BlockedBitpacker {
|
||||
#[inline]
|
||||
pub fn add(&mut self, val: u64) {
|
||||
self.buffer.push(val);
|
||||
if self.buffer.len() == BLOCK_SIZE as usize {
|
||||
if self.buffer.len() == BLOCK_SIZE {
|
||||
self.flush();
|
||||
}
|
||||
}
|
||||
@@ -126,8 +126,8 @@ impl BlockedBitpacker {
|
||||
}
|
||||
#[inline]
|
||||
pub fn get(&self, idx: usize) -> u64 {
|
||||
let metadata_pos = idx / BLOCK_SIZE as usize;
|
||||
let pos_in_block = idx % BLOCK_SIZE as usize;
|
||||
let metadata_pos = idx / BLOCK_SIZE;
|
||||
let pos_in_block = idx % BLOCK_SIZE;
|
||||
if let Some(metadata) = self.offset_and_bits.get(metadata_pos) {
|
||||
let unpacked = BitUnpacker::new(metadata.num_bits()).get(
|
||||
pos_in_block as u32,
|
||||
|
||||
26
columnar/Cargo.toml
Normal file
26
columnar/Cargo.toml
Normal file
@@ -0,0 +1,26 @@
|
||||
[package]
|
||||
name = "tantivy-columnar"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
stacker = { path = "../stacker", package="tantivy-stacker"}
|
||||
serde_json = "1"
|
||||
thiserror = "1"
|
||||
fnv = "1"
|
||||
tantivy-fst = "0.4.0"
|
||||
sstable = { path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { path = "../common", package = "tantivy-common" }
|
||||
fastfield_codecs = { path = "../fastfield_codecs"}
|
||||
ordered-float = "3.4"
|
||||
itertools = "0.10"
|
||||
|
||||
[features]
|
||||
# default = ["quickwit"]
|
||||
# quickwit = ["common/quickwit"]
|
||||
|
||||
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
33
columnar/README.md
Normal file
33
columnar/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# Columnar format
|
||||
|
||||
This crate describes columnar format used in tantivy.
|
||||
|
||||
|
||||
## Goals
|
||||
|
||||
This format is special in the following way.
|
||||
- it needs to be compact
|
||||
- it does not required to be loaded in memory.
|
||||
- it is designed to fit well with quickwit's strange constraint:
|
||||
we need to be able to load columns rapidly.
|
||||
- columns of several types can be associated with the same column name.
|
||||
- it needs to support columns with different types `(str, u64, i64, f64)`
|
||||
and different cardinality `(required, optional, multivalued)`.
|
||||
- columns, once loaded, offer cheap random access.
|
||||
|
||||
# Format
|
||||
|
||||
A quickwit/tantivy style sstable associated
|
||||
`(column names, column_cardinality, column_type) to range of bytes.
|
||||
|
||||
The format of the key is:
|
||||
`[column_name][ZERO_BYTE][column_type_header: u8]`
|
||||
|
||||
Column name may not contain the zero byte.
|
||||
|
||||
Listing all columns associated to `column_name` can therefore
|
||||
be done by listing all keys prefixed by
|
||||
`[column_name][ZERO_BYTE]`
|
||||
|
||||
The associated range of bytes refer to a range of bytes
|
||||
|
||||
154
columnar/src/column_type_header.rs
Normal file
154
columnar/src/column_type_header.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
use crate::value::NumericalType;
|
||||
|
||||
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[repr(u8)]
|
||||
pub enum Cardinality {
|
||||
#[default]
|
||||
Required = 0,
|
||||
Optional = 1,
|
||||
Multivalued = 2,
|
||||
}
|
||||
|
||||
impl Cardinality {
|
||||
pub fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub fn try_from_code(code: u8) -> Option<Cardinality> {
|
||||
match code {
|
||||
0 => Some(Cardinality::Required),
|
||||
1 => Some(Cardinality::Optional),
|
||||
2 => Some(Cardinality::Multivalued),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)]
|
||||
pub enum ColumnType {
|
||||
Bytes,
|
||||
Numerical(NumericalType),
|
||||
}
|
||||
|
||||
impl ColumnType {
|
||||
pub fn to_code(self) -> u8 {
|
||||
match self {
|
||||
ColumnType::Bytes => 0u8,
|
||||
ColumnType::Numerical(numerical_type) => 1u8 | (numerical_type.to_code() << 1),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_from_code(code: u8) -> Option<ColumnType> {
|
||||
if code == 0u8 {
|
||||
return Some(ColumnType::Bytes);
|
||||
}
|
||||
if code & 1u8 == 0u8 {
|
||||
return None;
|
||||
}
|
||||
let numerical_type = NumericalType::try_from_code(code >> 1)?;
|
||||
Some(ColumnType::Numerical(numerical_type))
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the type and cardinality of a column.
|
||||
/// This is encoded over one-byte and added to a column key in the
|
||||
/// columnar sstable.
|
||||
///
|
||||
/// Cardinality is encoded as the first two highest two bits.
|
||||
/// The low 6 bits encode the column type.
|
||||
#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone)]
|
||||
pub struct ColumnTypeAndCardinality {
|
||||
pub cardinality: Cardinality,
|
||||
pub typ: ColumnType,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
const fn compute_mask(num_bits: u8) -> u8 {
|
||||
if num_bits == 8 {
|
||||
u8::MAX
|
||||
} else {
|
||||
(1u8 << num_bits) - 1
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn select_bits<const START: u8, const END: u8>(code: u8) -> u8 {
|
||||
assert!(START <= END);
|
||||
assert!(END <= 8);
|
||||
let num_bits: u8 = END - START;
|
||||
let mask: u8 = compute_mask(num_bits);
|
||||
(code >> START) & mask
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn place_bits<const START: u8, const END: u8>(code: u8) -> u8 {
|
||||
assert!(START <= END);
|
||||
assert!(END <= 8);
|
||||
let num_bits: u8 = END - START;
|
||||
let mask: u8 = compute_mask(num_bits);
|
||||
assert!(code <= mask);
|
||||
code << START
|
||||
}
|
||||
|
||||
impl ColumnTypeAndCardinality {
|
||||
pub fn to_code(self) -> u8 {
|
||||
place_bits::<6, 8>(self.cardinality.to_code()) | place_bits::<0, 6>(self.typ.to_code())
|
||||
}
|
||||
|
||||
pub fn try_from_code(code: u8) -> Option<ColumnTypeAndCardinality> {
|
||||
let typ_code = select_bits::<0, 6>(code);
|
||||
let cardinality_code = select_bits::<6, 8>(code);
|
||||
let cardinality = Cardinality::try_from_code(cardinality_code)?;
|
||||
let typ = ColumnType::try_from_code(typ_code)?;
|
||||
assert_eq!(typ.to_code(), typ_code);
|
||||
Some(ColumnTypeAndCardinality { cardinality, typ })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::ColumnTypeAndCardinality;
|
||||
use crate::column_type_header::{Cardinality, ColumnType};
|
||||
|
||||
#[test]
|
||||
fn test_column_type_header_to_code() {
|
||||
let mut column_type_header_set: HashSet<ColumnTypeAndCardinality> = HashSet::new();
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Some(column_type_header) = ColumnTypeAndCardinality::try_from_code(code) {
|
||||
assert_eq!(column_type_header.to_code(), code);
|
||||
assert!(column_type_header_set.insert(column_type_header));
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
column_type_header_set.len(),
|
||||
3 /* cardinality */ * (1 + 3) // column_types
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_type_to_code() {
|
||||
let mut column_type_set: HashSet<ColumnType> = HashSet::new();
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Some(column_type) = ColumnType::try_from_code(code) {
|
||||
assert_eq!(column_type.to_code(), code);
|
||||
assert!(column_type_set.insert(column_type));
|
||||
}
|
||||
}
|
||||
assert_eq!(column_type_set.len(), 1 + 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cardinality_to_code() {
|
||||
let mut num_cardinality = 0;
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
let cardinality_opt = Cardinality::try_from_code(code);
|
||||
if let Some(cardinality) = cardinality_opt {
|
||||
assert_eq!(cardinality.to_code(), code);
|
||||
num_cardinality += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(num_cardinality, 3);
|
||||
}
|
||||
}
|
||||
78
columnar/src/dictionary.rs
Normal file
78
columnar/src/dictionary.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
use std::io;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
|
||||
fn fst_err_into_io_err(fst_err: tantivy_fst::Error) -> io::Error {
|
||||
match fst_err {
|
||||
tantivy_fst::Error::Fst(fst_err) => {
|
||||
io::Error::new(io::ErrorKind::Other, format!("FST Error: {:?}", fst_err))
|
||||
}
|
||||
tantivy_fst::Error::Io(io_err) => io_err,
|
||||
}
|
||||
}
|
||||
|
||||
/// `DictionaryBuilder` for dictionary encoding.
|
||||
///
|
||||
/// It stores the different terms encounterred and assigns them a temporary value
|
||||
/// we call unordered id.
|
||||
///
|
||||
/// Upon serialization, we will sort the ids and hence build a `UnorderedId -> Term ordinal`
|
||||
/// mapping.
|
||||
#[derive(Default)]
|
||||
pub struct DictionaryBuilder {
|
||||
dict: FnvHashMap<Vec<u8>, UnorderedId>,
|
||||
}
|
||||
|
||||
pub struct IdMapping {
|
||||
unordered_to_ord: Vec<OrderedId>,
|
||||
}
|
||||
|
||||
impl IdMapping {
|
||||
pub fn to_ord(&self, unordered: UnorderedId) -> OrderedId {
|
||||
self.unordered_to_ord[unordered.0 as usize]
|
||||
}
|
||||
}
|
||||
|
||||
impl DictionaryBuilder {
|
||||
/// Get or allocate an unordered id.
|
||||
/// (This ID is simply an auto-incremented id.)
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8]) -> UnorderedId {
|
||||
if let Some(term_id) = self.dict.get(term) {
|
||||
return *term_id;
|
||||
}
|
||||
let new_id = UnorderedId(self.dict.len() as u32);
|
||||
self.dict.insert(term.to_vec(), new_id);
|
||||
new_id
|
||||
}
|
||||
|
||||
/// Serialize the dictionary into an fst, and returns the
|
||||
/// `UnorderedId -> TermOrdinal` map.
|
||||
pub fn serialize<'a, W: io::Write + 'a>(&self, wrt: &mut W) -> io::Result<IdMapping> {
|
||||
serialize_inner(&self.dict, wrt).map_err(fst_err_into_io_err)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function just there for error conversion.
|
||||
fn serialize_inner<'a, W: io::Write + 'a>(
|
||||
dict: &FnvHashMap<Vec<u8>, UnorderedId>,
|
||||
wrt: &mut W,
|
||||
) -> tantivy_fst::Result<IdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> =
|
||||
dict.iter().map(|(k, v)| (k.as_slice(), *v)).collect();
|
||||
terms.sort_unstable_by_key(|(key, _)| *key);
|
||||
let mut unordered_to_ord: Vec<OrderedId> = vec![OrderedId(0u32); terms.len()];
|
||||
let mut fst_builder = tantivy_fst::MapBuilder::new(wrt)?;
|
||||
for (ord, (key, unordered_id)) in terms.into_iter().enumerate() {
|
||||
let ordered_id = OrderedId(ord as u32);
|
||||
fst_builder.insert(key, ord as u64)?;
|
||||
unordered_to_ord[unordered_id.0 as usize] = ordered_id;
|
||||
}
|
||||
fst_builder.finish()?;
|
||||
Ok(IdMapping { unordered_to_ord })
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct UnorderedId(pub u32);
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct OrderedId(pub u32);
|
||||
69
columnar/src/lib.rs
Normal file
69
columnar/src/lib.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright (C) 2022 Quickwit, Inc.
|
||||
//
|
||||
// Quickwit is offered under the AGPL v3.0 and as commercial software.
|
||||
// For commercial licensing, contact us at hello@quickwit.io.
|
||||
//
|
||||
// AGPL:
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as
|
||||
// published by the Free Software Foundation, either version 3 of the
|
||||
// License, or (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
mod column_type_header;
|
||||
mod dictionary;
|
||||
mod reader;
|
||||
mod serializer;
|
||||
mod value;
|
||||
mod writer;
|
||||
|
||||
pub use column_type_header::Cardinality;
|
||||
pub use reader::ColumnarReader;
|
||||
pub use serializer::ColumnarSerializer;
|
||||
pub use writer::ColumnarWriter;
|
||||
|
||||
pub type DocId = u32;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Range;
|
||||
|
||||
use common::file_slice::FileSlice;
|
||||
|
||||
use crate::column_type_header::ColumnTypeAndCardinality;
|
||||
use crate::reader::ColumnarReader;
|
||||
use crate::serializer::ColumnarSerializer;
|
||||
use crate::value::NumericalValue;
|
||||
use crate::ColumnarWriter;
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(1u32, b"srical.value", NumericalValue::U64(1u64));
|
||||
dataframe_writer.record_numerical(2u32, b"srical.value", NumericalValue::U64(2u64));
|
||||
dataframe_writer.record_numerical(4u32, b"srical.value", NumericalValue::I64(2i64));
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
let serializer = ColumnarSerializer::new(&mut buffer);
|
||||
dataframe_writer.serialize(5, serializer).unwrap();
|
||||
let columnar_fileslice = FileSlice::from(buffer);
|
||||
let columnar = ColumnarReader::open(columnar_fileslice).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<(ColumnTypeAndCardinality, Range<u64>)> =
|
||||
columnar.read_columns("srical.value").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
// Right now this 31 bytes are spent as follows
|
||||
//
|
||||
// - header 14 bytes
|
||||
// - vals 8 //< due to padding? could have been 1byte?.
|
||||
// - null footer 6 bytes
|
||||
// - version footer 3 bytes // Should be file-wide
|
||||
assert_eq!(cols[0].1, 0..31);
|
||||
}
|
||||
}
|
||||
66
columnar/src/reader/mod.rs
Normal file
66
columnar/src/reader/mod.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use std::ops::Range;
|
||||
use std::{io, mem};
|
||||
|
||||
use common::file_slice::FileSlice;
|
||||
use common::BinarySerializable;
|
||||
use sstable::{Dictionary, SSTableRange};
|
||||
|
||||
use crate::column_type_header::ColumnTypeAndCardinality;
|
||||
|
||||
fn io_invalid_data(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::InvalidData, msg) // format!("Invalid key found.
|
||||
// {key_bytes:?}")));
|
||||
}
|
||||
pub struct ColumnarReader {
|
||||
column_dictionary: Dictionary<SSTableRange>,
|
||||
column_data: FileSlice,
|
||||
}
|
||||
|
||||
impl ColumnarReader {
|
||||
pub fn num_columns(&self) -> usize {
|
||||
self.column_dictionary.num_terms()
|
||||
}
|
||||
|
||||
pub fn open(file_slice: FileSlice) -> io::Result<ColumnarReader> {
|
||||
let (file_slice_without_sstable_len, sstable_len_bytes) =
|
||||
file_slice.split_from_end(mem::size_of::<u64>());
|
||||
let mut sstable_len_bytes = sstable_len_bytes.read_bytes()?;
|
||||
let sstable_len = u64::deserialize(&mut sstable_len_bytes)?;
|
||||
let (column_data, sstable) =
|
||||
file_slice_without_sstable_len.split_from_end(sstable_len as usize);
|
||||
let column_dictionary = Dictionary::open(sstable)?;
|
||||
Ok(ColumnarReader {
|
||||
column_dictionary,
|
||||
column_data,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn read_columns(
|
||||
&self,
|
||||
field_name: &str,
|
||||
) -> io::Result<Vec<(ColumnTypeAndCardinality, Range<u64>)>> {
|
||||
let mut start_key = field_name.to_string();
|
||||
start_key.push('\0');
|
||||
let mut end_key = field_name.to_string();
|
||||
end_key.push(1u8 as char);
|
||||
let mut stream = self
|
||||
.column_dictionary
|
||||
.range()
|
||||
.ge(start_key.as_bytes())
|
||||
.lt(end_key.as_bytes())
|
||||
.into_stream()?;
|
||||
let mut results = Vec::new();
|
||||
while stream.advance() {
|
||||
let key_bytes: &[u8] = stream.key();
|
||||
if !key_bytes.starts_with(start_key.as_bytes()) {
|
||||
return Err(io_invalid_data(format!("Invalid key found. {key_bytes:?}")));
|
||||
}
|
||||
let column_code: u8 = key_bytes.last().cloned().unwrap();
|
||||
let column_type_and_cardinality = ColumnTypeAndCardinality::try_from_code(column_code)
|
||||
.ok_or_else(|| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
|
||||
let range = stream.value().clone();
|
||||
results.push((column_type_and_cardinality, range));
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
}
|
||||
39
columnar/src/serializer.rs
Normal file
39
columnar/src/serializer.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::ops::Range;
|
||||
|
||||
use common::CountingWriter;
|
||||
use sstable::value::RangeWriter;
|
||||
use sstable::SSTableRange;
|
||||
|
||||
pub struct ColumnarSerializer<W: io::Write> {
|
||||
wrt: CountingWriter<W>,
|
||||
sstable_range: sstable::Writer<Vec<u8>, RangeWriter>,
|
||||
}
|
||||
|
||||
impl<W: io::Write> ColumnarSerializer<W> {
|
||||
pub fn new(wrt: W) -> ColumnarSerializer<W> {
|
||||
let sstable_range: sstable::Writer<Vec<u8>, RangeWriter> =
|
||||
sstable::Dictionary::<SSTableRange>::builder(Vec::with_capacity(100_000)).unwrap();
|
||||
ColumnarSerializer {
|
||||
wrt: CountingWriter::wrap(wrt),
|
||||
sstable_range,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_column_offsets(&mut self, key: &[u8], byte_range: Range<u64>) -> io::Result<()> {
|
||||
self.sstable_range.insert(key, &byte_range)
|
||||
}
|
||||
|
||||
pub fn wrt(&mut self) -> &mut CountingWriter<W> {
|
||||
&mut self.wrt
|
||||
}
|
||||
|
||||
pub fn finalize(mut self) -> io::Result<()> {
|
||||
let sstable_bytes: Vec<u8> = self.sstable_range.finish()?;
|
||||
let sstable_num_bytes: u64 = sstable_bytes.len() as u64;
|
||||
self.wrt.write_all(&sstable_bytes)?;
|
||||
self.wrt.write_all(&sstable_num_bytes.to_le_bytes()[..])?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
123
columnar/src/value.rs
Normal file
123
columnar/src/value.rs
Normal file
@@ -0,0 +1,123 @@
|
||||
use ordered_float::NotNan;
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
pub enum NumericalValue {
|
||||
I64(i64),
|
||||
U64(u64),
|
||||
F64(NotNan<f64>),
|
||||
}
|
||||
|
||||
impl From<u64> for NumericalValue {
|
||||
fn from(val: u64) -> NumericalValue {
|
||||
NumericalValue::U64(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i64> for NumericalValue {
|
||||
fn from(val: i64) -> Self {
|
||||
NumericalValue::I64(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<NotNan<f64>> for NumericalValue {
|
||||
fn from(val: NotNan<f64>) -> Self {
|
||||
NumericalValue::F64(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl NumericalValue {
|
||||
pub fn numerical_type(&self) -> NumericalType {
|
||||
match self {
|
||||
NumericalValue::F64(_) => NumericalType::F64,
|
||||
NumericalValue::I64(_) => NumericalType::I64,
|
||||
NumericalValue::U64(_) => NumericalType::U64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for NumericalValue {}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Hash, Eq, PartialEq)]
|
||||
#[repr(u8)]
|
||||
pub enum NumericalType {
|
||||
#[default]
|
||||
I64 = 0,
|
||||
U64 = 1,
|
||||
F64 = 2,
|
||||
}
|
||||
|
||||
impl NumericalType {
|
||||
pub fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub fn try_from_code(code: u8) -> Option<NumericalType> {
|
||||
match code {
|
||||
0 => Some(NumericalType::I64),
|
||||
1 => Some(NumericalType::U64),
|
||||
2 => Some(NumericalType::F64),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// We voluntarily avoid using `Into` here to keep this
|
||||
/// implementation quirk as private as possible.
|
||||
///
|
||||
/// This coercion trait actually panics if it is used
|
||||
/// to convert a loose types to a stricter type.
|
||||
///
|
||||
/// The level is strictness is somewhat arbitrary.
|
||||
/// - i64
|
||||
/// - u64
|
||||
/// - f64.
|
||||
pub(crate) trait Coerce {
|
||||
fn coerce(numerical_value: NumericalValue) -> Self;
|
||||
}
|
||||
|
||||
impl Coerce for i64 {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
match value {
|
||||
NumericalValue::I64(val) => val,
|
||||
NumericalValue::U64(val) => val as i64,
|
||||
NumericalValue::F64(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Coerce for u64 {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
match value {
|
||||
NumericalValue::I64(val) => val as u64,
|
||||
NumericalValue::U64(val) => val,
|
||||
NumericalValue::F64(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Coerce for NotNan<f64> {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
match value {
|
||||
NumericalValue::I64(val) => unsafe { NotNan::new_unchecked(val as f64) },
|
||||
NumericalValue::U64(val) => unsafe { NotNan::new_unchecked(val as f64) },
|
||||
NumericalValue::F64(val) => val,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::NumericalType;
|
||||
|
||||
#[test]
|
||||
fn test_numerical_type_code() {
|
||||
let mut num_numerical_type = 0;
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Some(numerical_type) = NumericalType::try_from_code(code) {
|
||||
assert_eq!(numerical_type.to_code(), code);
|
||||
num_numerical_type += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(num_numerical_type, 3);
|
||||
}
|
||||
}
|
||||
321
columnar/src/writer/column_operation.rs
Normal file
321
columnar/src/writer/column_operation.rs
Normal file
@@ -0,0 +1,321 @@
|
||||
use std::fmt;
|
||||
use std::num::NonZeroU8;
|
||||
|
||||
use ordered_float::NotNan;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::dictionary::UnorderedId;
|
||||
use crate::value::NumericalValue;
|
||||
use crate::DocId;
|
||||
|
||||
/// When we build a columnar dataframe, we first just group
|
||||
/// all mutations per column, and append them in append-only object.
|
||||
///
|
||||
/// We represents all of these operations as `ColumnOperation`.
|
||||
#[derive(Eq, PartialEq, Debug, Clone, Copy)]
|
||||
pub(crate) enum ColumnOperation<T> {
|
||||
NewDoc(DocId),
|
||||
Value(T),
|
||||
}
|
||||
|
||||
impl<T> From<T> for ColumnOperation<T> {
|
||||
fn from(value: T) -> Self {
|
||||
ColumnOperation::Value(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::from_over_into)]
|
||||
pub(crate) trait SymbolValue: Into<MiniBuffer> + Clone + Copy + fmt::Debug {
|
||||
fn deserialize(header: NonZeroU8, bytes: &mut &[u8]) -> Result<Self, ParseError>;
|
||||
}
|
||||
|
||||
pub(crate) struct MiniBuffer {
|
||||
pub bytes: [u8; 9],
|
||||
pub len: usize,
|
||||
}
|
||||
|
||||
impl MiniBuffer {
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
&self.bytes[..self.len]
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_header_byte(typ: SymbolType, len: usize) -> u8 {
|
||||
assert!(len <= 9);
|
||||
(len << 4) as u8 | typ as u8
|
||||
}
|
||||
|
||||
impl SymbolValue for NumericalValue {
|
||||
fn deserialize(header_byte: NonZeroU8, bytes: &mut &[u8]) -> Result<Self, ParseError> {
|
||||
let (typ, len) = parse_header_byte(header_byte)?;
|
||||
let value_bytes: &[u8];
|
||||
(value_bytes, *bytes) = bytes.split_at(len);
|
||||
let symbol: NumericalValue = match typ {
|
||||
SymbolType::U64 => {
|
||||
let mut octet: [u8; 8] = [0u8; 8];
|
||||
octet[..value_bytes.len()].copy_from_slice(value_bytes);
|
||||
let val: u64 = u64::from_le_bytes(octet);
|
||||
NumericalValue::U64(val)
|
||||
}
|
||||
SymbolType::I64 => {
|
||||
let mut octet: [u8; 8] = [0u8; 8];
|
||||
octet[..value_bytes.len()].copy_from_slice(value_bytes);
|
||||
let encoded: u64 = u64::from_le_bytes(octet);
|
||||
let val: i64 = decode_zig_zag(encoded);
|
||||
NumericalValue::I64(val)
|
||||
}
|
||||
SymbolType::Float => {
|
||||
let octet: [u8; 8] =
|
||||
value_bytes.try_into().map_err(|_| ParseError::InvalidLen {
|
||||
typ: SymbolType::Float,
|
||||
len,
|
||||
})?;
|
||||
let val_possibly_nan = f64::from_le_bytes(octet);
|
||||
let val_not_nan = NotNan::new(val_possibly_nan)
|
||||
.map_err(|_| ParseError::NaN)?;
|
||||
NumericalValue::F64(val_not_nan)
|
||||
}
|
||||
};
|
||||
Ok(symbol)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::from_over_into)]
|
||||
impl Into<MiniBuffer> for NumericalValue {
|
||||
fn into(self) -> MiniBuffer {
|
||||
let mut bytes = [0u8; 9];
|
||||
match self {
|
||||
NumericalValue::F64(val) => {
|
||||
let len = 8;
|
||||
let header_byte = compute_header_byte(SymbolType::Float, len);
|
||||
bytes[0] = header_byte;
|
||||
bytes[1..].copy_from_slice(&val.to_le_bytes());
|
||||
MiniBuffer {
|
||||
bytes,
|
||||
len: len + 1,
|
||||
}
|
||||
}
|
||||
NumericalValue::U64(val) => {
|
||||
let len = compute_num_bytes_for_u64(val);
|
||||
let header_byte = compute_header_byte(SymbolType::U64, len);
|
||||
bytes[0] = header_byte;
|
||||
bytes[1..].copy_from_slice(&val.to_le_bytes());
|
||||
MiniBuffer {
|
||||
bytes,
|
||||
len: len + 1,
|
||||
}
|
||||
}
|
||||
NumericalValue::I64(val) => {
|
||||
let encoded = encode_zig_zag(val);
|
||||
let len = compute_num_bytes_for_u64(encoded);
|
||||
let header_byte = compute_header_byte(SymbolType::I64, len);
|
||||
bytes[0] = header_byte;
|
||||
bytes[1..].copy_from_slice(&encoded.to_le_bytes());
|
||||
MiniBuffer {
|
||||
bytes,
|
||||
len: len + 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::from_over_into)]
|
||||
impl Into<MiniBuffer> for UnorderedId {
|
||||
fn into(self) -> MiniBuffer {
|
||||
let mut bytes = [0u8; 9];
|
||||
let val = self.0 as u64;
|
||||
let len = compute_num_bytes_for_u64(val) + 1;
|
||||
bytes[0] = len as u8;
|
||||
bytes[1..].copy_from_slice(&val.to_le_bytes());
|
||||
MiniBuffer { bytes, len }
|
||||
}
|
||||
}
|
||||
|
||||
impl SymbolValue for UnorderedId {
|
||||
fn deserialize(header: NonZeroU8, bytes: &mut &[u8]) -> Result<UnorderedId, ParseError> {
|
||||
let len = header.get() as usize;
|
||||
let symbol_bytes: &[u8];
|
||||
(symbol_bytes, *bytes) = bytes.split_at(len);
|
||||
let mut value_bytes = [0u8; 4];
|
||||
value_bytes[..len - 1].copy_from_slice(&symbol_bytes[1..]);
|
||||
let value = u32::from_le_bytes(value_bytes);
|
||||
Ok(UnorderedId(value))
|
||||
}
|
||||
}
|
||||
|
||||
const HEADER_MASK: u8 = (1u8 << 4) - 1u8;
|
||||
|
||||
fn compute_num_bytes_for_u64(val: u64) -> usize {
|
||||
let msb = (64u32 - val.leading_zeros()) as usize;
|
||||
(msb + 7) / 8
|
||||
}
|
||||
|
||||
fn parse_header_byte(byte: NonZeroU8) -> Result<(SymbolType, usize), ParseError> {
|
||||
let len = (byte.get() as usize) >> 4;
|
||||
let typ_code = byte.get() & HEADER_MASK;
|
||||
let typ = SymbolType::try_from(typ_code)?;
|
||||
Ok((typ, len))
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ParseError {
|
||||
#[error("Type byte unknown `{0}`")]
|
||||
UnknownType(u8),
|
||||
#[error("Invalid len for type `{len}` for type `{typ:?}`.")]
|
||||
InvalidLen { typ: SymbolType, len: usize },
|
||||
#[error("Missing bytes.")]
|
||||
MissingBytes,
|
||||
#[error("Not a number value.")]
|
||||
NaN,
|
||||
}
|
||||
|
||||
impl<V: SymbolValue> ColumnOperation<V> {
|
||||
pub fn serialize(self) -> MiniBuffer {
|
||||
match self {
|
||||
ColumnOperation::NewDoc(doc) => {
|
||||
let mut minibuf: [u8; 9] = [0u8; 9];
|
||||
minibuf[0] = 0u8;
|
||||
minibuf[1..5].copy_from_slice(&doc.to_le_bytes());
|
||||
MiniBuffer {
|
||||
bytes: minibuf,
|
||||
len: 5,
|
||||
}
|
||||
}
|
||||
ColumnOperation::Value(val) => val.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize(bytes: &mut &[u8]) -> Result<Self, ParseError> {
|
||||
if bytes.is_empty() {
|
||||
return Err(ParseError::MissingBytes);
|
||||
}
|
||||
let header_byte = bytes[0];
|
||||
*bytes = &bytes[1..];
|
||||
if let Some(header_byte) = NonZeroU8::new(header_byte) {
|
||||
let value = V::deserialize(header_byte, bytes)?;
|
||||
Ok(ColumnOperation::Value(value))
|
||||
} else {
|
||||
let doc_bytes: &[u8];
|
||||
(doc_bytes, *bytes) = bytes.split_at(4);
|
||||
let doc: u32 =
|
||||
u32::from_le_bytes(doc_bytes.try_into().map_err(|_| ParseError::MissingBytes)?);
|
||||
Ok(ColumnOperation::NewDoc(doc))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
#[repr(u8)]
|
||||
pub enum SymbolType {
|
||||
U64 = 1u8,
|
||||
I64 = 2u8,
|
||||
Float = 3u8,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for SymbolType {
|
||||
type Error = ParseError;
|
||||
|
||||
fn try_from(byte: u8) -> Result<Self, ParseError> {
|
||||
match byte {
|
||||
1u8 => Ok(SymbolType::U64),
|
||||
2u8 => Ok(SymbolType::I64),
|
||||
3u8 => Ok(SymbolType::Float),
|
||||
_ => Err(ParseError::UnknownType(byte)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn encode_zig_zag(n: i64) -> u64 {
|
||||
((n << 1) ^ (n >> 63)) as u64
|
||||
}
|
||||
|
||||
fn decode_zig_zag(n: u64) -> i64 {
|
||||
((n >> 1) as i64) ^ (-((n & 1) as i64))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{SymbolType, *};
|
||||
|
||||
#[track_caller]
|
||||
fn test_zig_zag_aux(val: i64) {
|
||||
let encoded = super::encode_zig_zag(val);
|
||||
assert_eq!(decode_zig_zag(encoded), val);
|
||||
if let Some(abs_val) = val.checked_abs() {
|
||||
let abs_val = abs_val as u64;
|
||||
assert!(encoded <= abs_val * 2);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zig_zag() {
|
||||
assert_eq!(encode_zig_zag(0i64), 0u64);
|
||||
assert_eq!(encode_zig_zag(-1i64), 1u64);
|
||||
assert_eq!(encode_zig_zag(1i64), 2u64);
|
||||
test_zig_zag_aux(0i64);
|
||||
test_zig_zag_aux(i64::MIN);
|
||||
test_zig_zag_aux(i64::MAX);
|
||||
}
|
||||
|
||||
use proptest::prelude::any;
|
||||
use proptest::proptest;
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_proptest_zig_zag(val in any::<i64>()) {
|
||||
test_zig_zag_aux(val);
|
||||
}
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn ser_deser_header_byte_aux(symbol_type: SymbolType, len: usize) {
|
||||
let header_byte = compute_header_byte(symbol_type, len);
|
||||
let (serdeser_numerical_type, serdeser_len) =
|
||||
parse_header_byte(NonZeroU8::new(header_byte).unwrap()).unwrap();
|
||||
assert_eq!(symbol_type, serdeser_numerical_type);
|
||||
assert_eq!(len, serdeser_len);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_header_byte_serialization() {
|
||||
for len in 1..9 {
|
||||
ser_deser_header_byte_aux(SymbolType::Float, len);
|
||||
ser_deser_header_byte_aux(SymbolType::I64, len);
|
||||
ser_deser_header_byte_aux(SymbolType::U64, len);
|
||||
}
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn ser_deser_symbol(symbol: ColumnOperation<NumericalValue>) {
|
||||
let buf = symbol.serialize();
|
||||
let mut bytes = &buf.bytes[..];
|
||||
let serdeser_symbol = ColumnOperation::deserialize(&mut bytes).unwrap();
|
||||
assert_eq!(bytes.len() + buf.len, buf.bytes.len());
|
||||
assert_eq!(symbol, serdeser_symbol);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bytes_for_u64() {
|
||||
assert_eq!(compute_num_bytes_for_u64(0), 0);
|
||||
assert_eq!(compute_num_bytes_for_u64(1), 1);
|
||||
assert_eq!(compute_num_bytes_for_u64(255), 1);
|
||||
assert_eq!(compute_num_bytes_for_u64(256), 2);
|
||||
assert_eq!(compute_num_bytes_for_u64((1 << 16) - 1), 2);
|
||||
assert_eq!(compute_num_bytes_for_u64(1 << 16), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_symbol_serialization() {
|
||||
ser_deser_symbol(ColumnOperation::NewDoc(0));
|
||||
ser_deser_symbol(ColumnOperation::NewDoc(3));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(0i64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(1i64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(257u64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(-257i64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(i64::MIN)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(0u64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(u64::MIN)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(u64::MAX)));
|
||||
}
|
||||
}
|
||||
675
columnar/src/writer/mod.rs
Normal file
675
columnar/src/writer/mod.rs
Normal file
@@ -0,0 +1,675 @@
|
||||
mod column_operation;
|
||||
mod value_index;
|
||||
|
||||
use std::io::{self, Write};
|
||||
|
||||
use column_operation::ColumnOperation;
|
||||
use common::CountingWriter;
|
||||
use fastfield_codecs::serialize::ValueIndexInfo;
|
||||
use fastfield_codecs::{Column, MonotonicallyMappableToU64, VecColumn};
|
||||
use ordered_float::NotNan;
|
||||
use stacker::{Addr, ArenaHashMap, ExpUnrolledLinkedList, MemoryArena};
|
||||
|
||||
use crate::column_type_header::{ColumnType, ColumnTypeAndCardinality};
|
||||
use crate::dictionary::{DictionaryBuilder, IdMapping, UnorderedId};
|
||||
use crate::value::{Coerce, NumericalType, NumericalValue};
|
||||
use crate::writer::column_operation::SymbolValue;
|
||||
use crate::writer::value_index::{IndexBuilder, SpareIndexBuilders};
|
||||
use crate::{Cardinality, ColumnarSerializer, DocId};
|
||||
|
||||
#[derive(Copy, Clone, Default)]
|
||||
struct ColumnWriter {
|
||||
// Detected cardinality of the column so far.
|
||||
cardinality: Cardinality,
|
||||
// Last document inserted.
|
||||
// None if no doc has been added yet.
|
||||
last_doc_opt: Option<u32>,
|
||||
// Buffer containing the serialized values.
|
||||
values: ExpUnrolledLinkedList,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Default)]
|
||||
pub struct NumericalColumnWriter {
|
||||
compatible_numerical_types: CompatibleNumericalTypes,
|
||||
column_writer: ColumnWriter,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct CompatibleNumericalTypes {
|
||||
all_values_within_i64_range: bool,
|
||||
all_values_within_u64_range: bool,
|
||||
}
|
||||
|
||||
impl Default for CompatibleNumericalTypes {
|
||||
fn default() -> CompatibleNumericalTypes {
|
||||
CompatibleNumericalTypes {
|
||||
all_values_within_i64_range: true,
|
||||
all_values_within_u64_range: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CompatibleNumericalTypes {
|
||||
pub fn accept_value(&mut self, numerical_value: NumericalValue) {
|
||||
match numerical_value {
|
||||
NumericalValue::I64(val_i64) => {
|
||||
let value_within_u64_range = val_i64 >= 0i64;
|
||||
self.all_values_within_u64_range &= value_within_u64_range;
|
||||
}
|
||||
NumericalValue::U64(val_u64) => {
|
||||
let value_within_i64_range = val_u64 < i64::MAX as u64;
|
||||
self.all_values_within_i64_range &= value_within_i64_range;
|
||||
}
|
||||
NumericalValue::F64(_) => {
|
||||
self.all_values_within_i64_range = false;
|
||||
self.all_values_within_u64_range = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_numerical_type(self) -> NumericalType {
|
||||
if self.all_values_within_i64_range {
|
||||
NumericalType::I64
|
||||
} else if self.all_values_within_u64_range {
|
||||
NumericalType::U64
|
||||
} else {
|
||||
NumericalType::F64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NumericalColumnWriter {
|
||||
pub fn record_numerical_value(
|
||||
&mut self,
|
||||
doc: DocId,
|
||||
value: NumericalValue,
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
self.compatible_numerical_types.accept_value(value);
|
||||
self.column_writer.record(doc, value, arena);
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnWriter {
|
||||
fn symbol_iterator<'a, V: SymbolValue>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<V>> + 'a {
|
||||
buffer.clear();
|
||||
self.values.read_to_end(arena, buffer);
|
||||
let mut cursor: &[u8] = &buffer[..];
|
||||
std::iter::from_fn(move || {
|
||||
if cursor.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let symbol = ColumnOperation::deserialize(&mut cursor)
|
||||
.expect("Failed to deserialize symbol from in-memory. This should never happen.");
|
||||
Some(symbol)
|
||||
})
|
||||
}
|
||||
|
||||
fn delta_with_last_doc(&self, doc: DocId) -> u32 {
|
||||
self.last_doc_opt
|
||||
.map(|last_doc| doc - last_doc)
|
||||
.unwrap_or(doc + 1u32)
|
||||
}
|
||||
|
||||
/// Records a change of the document being recorded.
|
||||
///
|
||||
/// This function will also update the cardinality of the column
|
||||
/// if necessary.
|
||||
fn record(&mut self, doc: DocId, value: NumericalValue, arena: &mut MemoryArena) {
|
||||
// Difference between `doc` and the last doc.
|
||||
match self.delta_with_last_doc(doc) {
|
||||
0 => {
|
||||
// This is the last encounterred document.
|
||||
self.cardinality = Cardinality::Multivalued;
|
||||
}
|
||||
1 => {
|
||||
self.last_doc_opt = Some(doc);
|
||||
self.write_symbol::<NumericalValue>(ColumnOperation::NewDoc(doc), arena);
|
||||
}
|
||||
_ => {
|
||||
self.cardinality = self.cardinality.max(Cardinality::Optional);
|
||||
self.last_doc_opt = Some(doc);
|
||||
self.write_symbol::<NumericalValue>(ColumnOperation::NewDoc(doc), arena);
|
||||
}
|
||||
}
|
||||
self.write_symbol(ColumnOperation::Value(value), arena);
|
||||
}
|
||||
|
||||
// Get the cardinality.
|
||||
// The overall number of docs in the column is necessary to
|
||||
// deal with the case where the all docs contain 1 value, except some documents
|
||||
// at the end of the column.
|
||||
fn get_cardinality(&self, num_docs: DocId) -> Cardinality {
|
||||
if self.delta_with_last_doc(num_docs) > 1 {
|
||||
self.cardinality.max(Cardinality::Optional)
|
||||
} else {
|
||||
self.cardinality
|
||||
}
|
||||
}
|
||||
|
||||
fn write_symbol<V: SymbolValue>(
|
||||
&mut self,
|
||||
symbol: ColumnOperation<V>,
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
self.values
|
||||
.writer(arena)
|
||||
.extend_from_slice(symbol.serialize().as_slice());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Default)]
|
||||
pub struct BytesColumnWriter {
|
||||
dictionary_id: u32,
|
||||
column_writer: ColumnWriter,
|
||||
}
|
||||
|
||||
impl BytesColumnWriter {
|
||||
pub fn with_dictionary_id(dictionary_id: u32) -> BytesColumnWriter {
|
||||
BytesColumnWriter {
|
||||
dictionary_id,
|
||||
column_writer: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_bytes(
|
||||
&mut self,
|
||||
doc: DocId,
|
||||
bytes: &[u8],
|
||||
dictionaries: &mut [DictionaryBuilder],
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
let unordered_id = dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes);
|
||||
let numerical_value = NumericalValue::U64(unordered_id.0 as u64);
|
||||
self.column_writer.record(doc, numerical_value, arena);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ColumnarWriter {
|
||||
numerical_field_hash_map: ArenaHashMap,
|
||||
bytes_field_hash_map: ArenaHashMap,
|
||||
arena: MemoryArena,
|
||||
// Dictionaries used to store dictionary-encoded values.
|
||||
dictionaries: Vec<DictionaryBuilder>,
|
||||
buffers: SpareBuffers,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct SpareBuffers {
|
||||
byte_buffer: Vec<u8>,
|
||||
value_index_builders: SpareIndexBuilders,
|
||||
i64_values: Vec<i64>,
|
||||
u64_values: Vec<u64>,
|
||||
f64_values: Vec<ordered_float::NotNan<f64>>,
|
||||
}
|
||||
|
||||
impl Default for ColumnarWriter {
|
||||
fn default() -> Self {
|
||||
ColumnarWriter {
|
||||
numerical_field_hash_map: ArenaHashMap::new(10_000),
|
||||
bytes_field_hash_map: ArenaHashMap::new(10_000),
|
||||
dictionaries: Vec::new(),
|
||||
arena: MemoryArena::default(),
|
||||
buffers: SpareBuffers::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
|
||||
enum BytesOrNumerical {
|
||||
Bytes,
|
||||
Numerical,
|
||||
}
|
||||
|
||||
impl ColumnarWriter {
|
||||
pub fn record_numerical(&mut self, doc: DocId, key: &[u8], numerical_value: NumericalValue) {
|
||||
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
|
||||
hash_map.mutate_or_create(key, |column_opt: Option<NumericalColumnWriter>| {
|
||||
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record_numerical_value(doc, numerical_value, arena);
|
||||
column
|
||||
});
|
||||
}
|
||||
|
||||
pub fn record_bytes(&mut self, doc: DocId, key: &[u8], value: &[u8]) {
|
||||
let (hash_map, arena, dictionaries) = (
|
||||
&mut self.bytes_field_hash_map,
|
||||
&mut self.arena,
|
||||
&mut self.dictionaries,
|
||||
);
|
||||
hash_map.mutate_or_create(key, |column_opt: Option<BytesColumnWriter>| {
|
||||
let mut column: BytesColumnWriter = column_opt.unwrap_or_else(|| {
|
||||
let dictionary_id = dictionaries.len() as u32;
|
||||
dictionaries.push(DictionaryBuilder::default());
|
||||
BytesColumnWriter::with_dictionary_id(dictionary_id)
|
||||
});
|
||||
column.record_bytes(doc, value, dictionaries, arena);
|
||||
column
|
||||
});
|
||||
}
|
||||
|
||||
pub fn serialize<W: io::Write>(
|
||||
&mut self,
|
||||
num_docs: DocId,
|
||||
mut serializer: ColumnarSerializer<W>,
|
||||
) -> io::Result<()> {
|
||||
let mut field_columns: Vec<(&[u8], BytesOrNumerical, Addr)> = self
|
||||
.numerical_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, BytesOrNumerical::Numerical, addr))
|
||||
.collect();
|
||||
field_columns.extend(
|
||||
self.bytes_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, BytesOrNumerical::Bytes, addr)),
|
||||
);
|
||||
let mut key_buffer = Vec::new();
|
||||
field_columns.sort_unstable_by_key(|(key, col_type, _)| (*key, *col_type));
|
||||
let (arena, buffers, dictionaries) = (&self.arena, &mut self.buffers, &self.dictionaries);
|
||||
for (key, bytes_or_numerical, addr) in field_columns {
|
||||
let wrt = serializer.wrt();
|
||||
let start_offset = wrt.written_bytes();
|
||||
let column_type_and_cardinality: ColumnTypeAndCardinality =
|
||||
match bytes_or_numerical {
|
||||
BytesOrNumerical::Bytes => {
|
||||
let BytesColumnWriter { dictionary_id, column_writer } =
|
||||
self.bytes_field_hash_map.read(addr);
|
||||
let dictionary_builder =
|
||||
&dictionaries[dictionary_id as usize];
|
||||
serialize_bytes_column(
|
||||
&column_writer,
|
||||
num_docs,
|
||||
dictionary_builder,
|
||||
arena,
|
||||
buffers,
|
||||
wrt,
|
||||
)?;
|
||||
ColumnTypeAndCardinality {
|
||||
cardinality: column_writer.get_cardinality(num_docs),
|
||||
typ: ColumnType::Bytes,
|
||||
}
|
||||
}
|
||||
BytesOrNumerical::Numerical => {
|
||||
let NumericalColumnWriter { compatible_numerical_types, column_writer } =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let numerical_type = compatible_numerical_types.to_numerical_type();
|
||||
serialize_numerical_column(
|
||||
cardinality,
|
||||
numerical_type,
|
||||
&column_writer,
|
||||
num_docs,
|
||||
arena,
|
||||
buffers,
|
||||
wrt,
|
||||
)?;
|
||||
ColumnTypeAndCardinality {
|
||||
cardinality,
|
||||
typ: ColumnType::Numerical(numerical_type),
|
||||
}
|
||||
}
|
||||
};
|
||||
let end_offset = wrt.written_bytes();
|
||||
let key_with_type = prepare_key(key, column_type_and_cardinality, &mut key_buffer);
|
||||
serializer.record_column_offsets(key_with_type, start_offset..end_offset)?;
|
||||
}
|
||||
serializer.finalize()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a key consisting of the concatenation of the key and the column_type_and_cardinality
|
||||
/// code.
|
||||
fn prepare_key<'a>(
|
||||
key: &[u8],
|
||||
column_type_cardinality: ColumnTypeAndCardinality,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> &'a [u8] {
|
||||
buffer.clear();
|
||||
buffer.extend_from_slice(key);
|
||||
buffer.push(0u8);
|
||||
buffer.push(column_type_cardinality.to_code());
|
||||
&buffer[..]
|
||||
}
|
||||
|
||||
fn serialize_bytes_column<W: io::Write>(
|
||||
column_writer: &ColumnWriter,
|
||||
num_docs: DocId,
|
||||
dictionary_builder: &DictionaryBuilder,
|
||||
arena: &MemoryArena,
|
||||
buffers: &mut SpareBuffers,
|
||||
wrt: &mut CountingWriter<W>,
|
||||
) -> io::Result<()> {
|
||||
let start_offset = wrt.written_bytes();
|
||||
let id_mapping: IdMapping = dictionary_builder.serialize(wrt)?;
|
||||
let dictionary_num_bytes: u32 = (wrt.written_bytes() - start_offset) as u32;
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let SpareBuffers {
|
||||
byte_buffer,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
..
|
||||
} = buffers;
|
||||
let symbol_iterator = column_writer
|
||||
.symbol_iterator(arena, byte_buffer)
|
||||
.map(|symbol: ColumnOperation<UnorderedId>| {
|
||||
// We map unordered ids to ordered ids.
|
||||
match symbol {
|
||||
ColumnOperation::Value(unordered_id) => {
|
||||
let ordered_id = id_mapping.to_ord(unordered_id);
|
||||
ColumnOperation::Value(ordered_id.0 as u64)
|
||||
}
|
||||
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
|
||||
}
|
||||
});
|
||||
serialize_column(
|
||||
symbol_iterator,
|
||||
cardinality,
|
||||
num_docs,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
wrt,
|
||||
)?;
|
||||
wrt.write_all(&dictionary_num_bytes.to_le_bytes()[..])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_numerical_column<W: io::Write>(
|
||||
cardinality: Cardinality,
|
||||
numerical_type: NumericalType,
|
||||
column_writer: &ColumnWriter,
|
||||
num_docs: DocId,
|
||||
arena: &MemoryArena,
|
||||
buffers: &mut SpareBuffers,
|
||||
wrt: &mut W,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
byte_buffer,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
i64_values,
|
||||
f64_values,
|
||||
} = buffers;
|
||||
let symbol_iterator = column_writer.symbol_iterator(arena, byte_buffer);
|
||||
match numerical_type {
|
||||
NumericalType::I64 => {
|
||||
serialize_column(
|
||||
coerce_numerical_symbol::<i64>(symbol_iterator),
|
||||
cardinality,
|
||||
num_docs,
|
||||
value_index_builders,
|
||||
i64_values,
|
||||
wrt,
|
||||
)?;
|
||||
}
|
||||
NumericalType::U64 => {
|
||||
serialize_column(
|
||||
coerce_numerical_symbol::<u64>(symbol_iterator),
|
||||
cardinality,
|
||||
num_docs,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
wrt,
|
||||
)?;
|
||||
}
|
||||
NumericalType::F64 => {
|
||||
serialize_column(
|
||||
coerce_numerical_symbol::<NotNan<f64>>(symbol_iterator),
|
||||
cardinality,
|
||||
num_docs,
|
||||
value_index_builders,
|
||||
f64_values,
|
||||
wrt,
|
||||
)?;
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_column<
|
||||
T: Copy + Ord + Default + Send + Sync + MonotonicallyMappableToU64,
|
||||
W: io::Write,
|
||||
>(
|
||||
symbol_iterator: impl Iterator<Item = ColumnOperation<T>>,
|
||||
cardinality: Cardinality,
|
||||
num_docs: DocId,
|
||||
value_index_builders: &mut SpareIndexBuilders,
|
||||
values: &mut Vec<T>,
|
||||
wrt: &mut W,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
for<'a> VecColumn<'a, T>: Column<T>,
|
||||
{
|
||||
match cardinality {
|
||||
Cardinality::Required => {
|
||||
consume_symbol_iterator(
|
||||
symbol_iterator,
|
||||
value_index_builders.borrow_required_index_builder(),
|
||||
values,
|
||||
);
|
||||
fastfield_codecs::serialize(
|
||||
VecColumn::from(&values[..]),
|
||||
wrt,
|
||||
&fastfield_codecs::ALL_CODEC_TYPES[..],
|
||||
)?;
|
||||
}
|
||||
Cardinality::Optional => {
|
||||
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
|
||||
consume_symbol_iterator(symbol_iterator, optional_index_builder, values);
|
||||
let optional_index = optional_index_builder.finish(num_docs);
|
||||
fastfield_codecs::serialize::serialize_new(
|
||||
ValueIndexInfo::SingleValue(Box::new(optional_index)),
|
||||
VecColumn::from(&values[..]),
|
||||
wrt,
|
||||
&fastfield_codecs::ALL_CODEC_TYPES[..],
|
||||
)?;
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
|
||||
consume_symbol_iterator(symbol_iterator, multivalued_index_builder, values);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_docs);
|
||||
fastfield_codecs::serialize::serialize_new(
|
||||
ValueIndexInfo::MultiValue(Box::new(multivalued_index)),
|
||||
VecColumn::from(&values[..]),
|
||||
wrt,
|
||||
&fastfield_codecs::ALL_CODEC_TYPES[..],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn coerce_numerical_symbol<T>(
|
||||
symbol_iterator: impl Iterator<Item = ColumnOperation<NumericalValue>>,
|
||||
) -> impl Iterator<Item = ColumnOperation<T>>
|
||||
where T: Coerce {
|
||||
symbol_iterator.map(|symbol| match symbol {
|
||||
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
|
||||
ColumnOperation::Value(numerical_value) => {
|
||||
ColumnOperation::Value(Coerce::coerce(numerical_value))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn consume_symbol_iterator<T, TIndexBuilder: IndexBuilder>(
|
||||
symbol_iterator: impl Iterator<Item = ColumnOperation<T>>,
|
||||
index_builder: &mut TIndexBuilder,
|
||||
values: &mut Vec<T>,
|
||||
) {
|
||||
for symbol in symbol_iterator {
|
||||
match symbol {
|
||||
ColumnOperation::NewDoc(doc) => {
|
||||
index_builder.record_doc(doc);
|
||||
}
|
||||
ColumnOperation::Value(value) => {
|
||||
index_builder.record_value();
|
||||
values.push(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use ordered_float::NotNan;
|
||||
use stacker::MemoryArena;
|
||||
|
||||
use super::prepare_key;
|
||||
use crate::column_type_header::{ColumnType, ColumnTypeAndCardinality};
|
||||
use crate::value::{NumericalType, NumericalValue};
|
||||
use crate::writer::column_operation::ColumnOperation;
|
||||
use crate::writer::CompatibleNumericalTypes;
|
||||
use crate::Cardinality;
|
||||
|
||||
#[test]
|
||||
fn test_prepare_key_bytes() {
|
||||
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
|
||||
let column_type_and_cardinality = ColumnTypeAndCardinality {
|
||||
typ: ColumnType::Bytes,
|
||||
cardinality: Cardinality::Optional,
|
||||
};
|
||||
let prepared_key = prepare_key(b"root\0child", column_type_and_cardinality, &mut buffer);
|
||||
assert_eq!(prepared_key.len(), 12);
|
||||
assert_eq!(&prepared_key[..10], b"root\0child");
|
||||
assert_eq!(prepared_key[10], 0u8);
|
||||
assert_eq!(prepared_key[11], column_type_and_cardinality.to_code());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_required_simple() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(0u32, 14i64.into(), &mut arena);
|
||||
column_writer.record(1u32, 15i64.into(), &mut arena);
|
||||
column_writer.record(2u32, (-16i64).into(), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Required);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.symbol_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 6);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(14i64))
|
||||
));
|
||||
assert!(matches!(symbols[2], ColumnOperation::NewDoc(1u32)));
|
||||
assert!(matches!(
|
||||
symbols[3],
|
||||
ColumnOperation::Value(NumericalValue::I64(15i64))
|
||||
));
|
||||
assert!(matches!(symbols[4], ColumnOperation::NewDoc(2u32)));
|
||||
assert!(matches!(
|
||||
symbols[5],
|
||||
ColumnOperation::Value(NumericalValue::I64(-16i64))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_optional_cardinality_missing_first() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(1u32, 15i64.into(), &mut arena);
|
||||
column_writer.record(2u32, (-16i64).into(), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.symbol_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 4);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(15i64))
|
||||
));
|
||||
assert!(matches!(symbols[2], ColumnOperation::NewDoc(2u32)));
|
||||
assert!(matches!(
|
||||
symbols[3],
|
||||
ColumnOperation::Value(NumericalValue::I64(-16i64))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_optional_cardinality_missing_last() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(0u32, 15i64.into(), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.symbol_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 2);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(15i64))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_multivalued() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(0u32, 16i64.into(), &mut arena);
|
||||
column_writer.record(0u32, 17i64.into(), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.symbol_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 3);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(16i64))
|
||||
));
|
||||
assert!(matches!(
|
||||
symbols[2],
|
||||
ColumnOperation::Value(NumericalValue::I64(17i64))
|
||||
));
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn test_column_writer_coercion_iter_aux(
|
||||
values: impl Iterator<Item = NumericalValue>,
|
||||
expected_numerical_type: NumericalType,
|
||||
) {
|
||||
let mut compatible_numerical_types = CompatibleNumericalTypes::default();
|
||||
for value in values {
|
||||
compatible_numerical_types.accept_value(value);
|
||||
}
|
||||
assert_eq!(
|
||||
compatible_numerical_types.to_numerical_type(),
|
||||
expected_numerical_type
|
||||
);
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn test_column_writer_coercion_aux(
|
||||
values: &[NumericalValue],
|
||||
expected_numerical_type: NumericalType,
|
||||
) {
|
||||
test_column_writer_coercion_iter_aux(values.iter().copied(), expected_numerical_type);
|
||||
test_column_writer_coercion_iter_aux(values.iter().rev().copied(), expected_numerical_type);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_coercion() {
|
||||
test_column_writer_coercion_aux(&[], NumericalType::I64);
|
||||
test_column_writer_coercion_aux(&[1i64.into()], NumericalType::I64);
|
||||
test_column_writer_coercion_aux(&[1u64.into()], NumericalType::I64);
|
||||
// We don't detect exact integer at the moment. We could!
|
||||
test_column_writer_coercion_aux(&[NotNan::new(1f64).unwrap().into()], NumericalType::F64);
|
||||
test_column_writer_coercion_aux(&[u64::MAX.into()], NumericalType::U64);
|
||||
test_column_writer_coercion_aux(&[(i64::MAX as u64).into()], NumericalType::U64);
|
||||
test_column_writer_coercion_aux(&[(1u64 << 63).into()], NumericalType::U64);
|
||||
test_column_writer_coercion_aux(&[1i64.into(), 1u64.into()], NumericalType::I64);
|
||||
test_column_writer_coercion_aux(&[u64::MAX.into(), (-1i64).into()], NumericalType::F64);
|
||||
}
|
||||
}
|
||||
218
columnar/src/writer/value_index.rs
Normal file
218
columnar/src/writer/value_index.rs
Normal file
@@ -0,0 +1,218 @@
|
||||
use fastfield_codecs::serialize::{MultiValueIndexInfo, SingleValueIndexInfo};
|
||||
|
||||
use crate::DocId;
|
||||
|
||||
/// The `IndexBuilder` interprets a sequence of
|
||||
/// calls of the form:
|
||||
/// (record_doc,record_value+)*
|
||||
/// and can then serialize the results into an index.
|
||||
///
|
||||
/// It has different implementation depending on whether the
|
||||
/// cardinality is required, optional, or multivalued.
|
||||
pub(crate) trait IndexBuilder {
|
||||
fn record_doc(&mut self, doc: DocId);
|
||||
#[inline]
|
||||
fn record_value(&mut self) {}
|
||||
}
|
||||
|
||||
/// The RequiredIndexBuilder does nothing.
|
||||
#[derive(Default)]
|
||||
pub struct RequiredIndexBuilder;
|
||||
|
||||
impl IndexBuilder for RequiredIndexBuilder {
|
||||
#[inline(always)]
|
||||
fn record_doc(&mut self, _doc: DocId) {}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct OptionalIndexBuilder {
|
||||
docs: Vec<DocId>,
|
||||
}
|
||||
|
||||
struct SingleValueArrayIndex<'a> {
|
||||
docs: &'a [DocId],
|
||||
num_docs: DocId,
|
||||
}
|
||||
|
||||
impl<'a> SingleValueIndexInfo for SingleValueArrayIndex<'a> {
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.num_docs as u32
|
||||
}
|
||||
|
||||
fn num_non_nulls(&self) -> u32 {
|
||||
self.docs.len() as u32
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
Box::new(self.docs.iter().copied())
|
||||
}
|
||||
}
|
||||
|
||||
impl OptionalIndexBuilder {
|
||||
pub fn finish(&mut self, num_docs: DocId) -> impl SingleValueIndexInfo + '_ {
|
||||
debug_assert!(self
|
||||
.docs
|
||||
.last()
|
||||
.copied()
|
||||
.map(|last_doc| last_doc < num_docs)
|
||||
.unwrap_or(true));
|
||||
SingleValueArrayIndex {
|
||||
docs: &self.docs[..],
|
||||
num_docs,
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.docs.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexBuilder for OptionalIndexBuilder {
|
||||
#[inline(always)]
|
||||
fn record_doc(&mut self, doc: DocId) {
|
||||
debug_assert!(self
|
||||
.docs
|
||||
.last()
|
||||
.copied()
|
||||
.map(|prev_doc| doc > prev_doc)
|
||||
.unwrap_or(true));
|
||||
self.docs.push(doc);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MultivaluedIndexBuilder {
|
||||
// TODO should we switch to `start_offset`?
|
||||
end_values: Vec<DocId>,
|
||||
total_num_vals_seen: u32,
|
||||
}
|
||||
|
||||
pub struct MultivaluedValueArrayIndex<'a> {
|
||||
end_offsets: &'a [DocId],
|
||||
}
|
||||
|
||||
impl<'a> MultiValueIndexInfo for MultivaluedValueArrayIndex<'a> {
|
||||
fn num_docs(&self) -> u32 {
|
||||
self.end_offsets.len() as u32
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.end_offsets.last().copied().unwrap_or(0u32)
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
if self.end_offsets.is_empty() {
|
||||
return Box::new(std::iter::empty());
|
||||
}
|
||||
let n = self.end_offsets.len();
|
||||
Box::new(std::iter::once(0u32).chain(self.end_offsets[..n - 1].iter().copied()))
|
||||
}
|
||||
}
|
||||
|
||||
impl MultivaluedIndexBuilder {
|
||||
pub fn finish(&mut self, num_docs: DocId) -> impl MultiValueIndexInfo + '_ {
|
||||
self.end_values
|
||||
.resize(num_docs as usize, self.total_num_vals_seen);
|
||||
MultivaluedValueArrayIndex {
|
||||
end_offsets: &self.end_values[..],
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.end_values.clear();
|
||||
self.total_num_vals_seen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexBuilder for MultivaluedIndexBuilder {
|
||||
fn record_doc(&mut self, doc: DocId) {
|
||||
self.end_values
|
||||
.resize(doc as usize, self.total_num_vals_seen);
|
||||
}
|
||||
|
||||
fn record_value(&mut self) {
|
||||
self.total_num_vals_seen += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// The `SpareIndexBuilders` is there to avoid allocating a
|
||||
/// new index builder for every single column.
|
||||
#[derive(Default)]
|
||||
pub struct SpareIndexBuilders {
|
||||
required_index_builder: RequiredIndexBuilder,
|
||||
optional_index_builder: OptionalIndexBuilder,
|
||||
multivalued_index_builder: MultivaluedIndexBuilder,
|
||||
}
|
||||
|
||||
impl SpareIndexBuilders {
|
||||
pub fn borrow_required_index_builder(&mut self) -> &mut RequiredIndexBuilder {
|
||||
&mut self.required_index_builder
|
||||
}
|
||||
|
||||
pub fn borrow_optional_index_builder(&mut self) -> &mut OptionalIndexBuilder {
|
||||
self.optional_index_builder.reset();
|
||||
&mut self.optional_index_builder
|
||||
}
|
||||
|
||||
pub fn borrow_multivalued_index_builder(&mut self) -> &mut MultivaluedIndexBuilder {
|
||||
self.multivalued_index_builder.reset();
|
||||
&mut self.multivalued_index_builder
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_optional_value_index_builder() {
|
||||
let mut opt_value_index_builder = OptionalIndexBuilder::default();
|
||||
opt_value_index_builder.record_doc(0u32);
|
||||
opt_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
&opt_value_index_builder
|
||||
.finish(1u32)
|
||||
.iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
&[0]
|
||||
);
|
||||
opt_value_index_builder.reset();
|
||||
opt_value_index_builder.record_doc(1u32);
|
||||
opt_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
&opt_value_index_builder
|
||||
.finish(2u32)
|
||||
.iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
&[1]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_value_index_builder() {
|
||||
let mut multivalued_value_index_builder = MultivaluedIndexBuilder::default();
|
||||
multivalued_value_index_builder.record_doc(1u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_doc(2u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
multivalued_value_index_builder
|
||||
.finish(4u32)
|
||||
.iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
vec![0, 0, 2, 3]
|
||||
);
|
||||
multivalued_value_index_builder.reset();
|
||||
multivalued_value_index_builder.record_doc(2u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
multivalued_value_index_builder
|
||||
.finish(4u32)
|
||||
.iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
vec![0, 0, 0, 2]
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,21 @@
|
||||
[package]
|
||||
name = "tantivy-common"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
description = "common traits and utility functions used by multiple tantivy subcrates"
|
||||
documentation = "https://docs.rs/tantivy_common/"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
ownedbytes = { version="0.3", path="../ownedbytes" }
|
||||
ownedbytes = { version= "0.5", path="../ownedbytes" }
|
||||
async-trait = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1.0.0"
|
||||
|
||||
@@ -151,7 +151,7 @@ impl TinySet {
|
||||
if self.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let lowest = self.0.trailing_zeros() as u32;
|
||||
let lowest = self.0.trailing_zeros();
|
||||
self.0 ^= TinySet::singleton(lowest).0;
|
||||
Some(lowest)
|
||||
}
|
||||
@@ -421,7 +421,7 @@ mod tests {
|
||||
bitset.serialize(&mut out).unwrap();
|
||||
|
||||
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
|
||||
assert_eq!(bitset.len() as usize, i as usize);
|
||||
assert_eq!(bitset.len(), i as usize);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -432,7 +432,7 @@ mod tests {
|
||||
bitset.serialize(&mut out).unwrap();
|
||||
|
||||
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
|
||||
assert_eq!(bitset.len() as usize, 64);
|
||||
assert_eq!(bitset.len(), 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,19 +1,18 @@
|
||||
use std::ops::{Deref, Range};
|
||||
use std::ops::{Deref, Range, RangeBounds};
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common::HasLen;
|
||||
use stable_deref_trait::StableDeref;
|
||||
use ownedbytes::{OwnedBytes, StableDeref};
|
||||
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::HasLen;
|
||||
|
||||
/// Objects that represents files sections in tantivy.
|
||||
///
|
||||
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||
///
|
||||
/// The underlying behavior is therefore specific to the [`Directory`](crate::Directory) that
|
||||
/// The underlying behavior is therefore specific to the `Directory` that
|
||||
/// created it. Despite its name, a [`FileSlice`] may or may not directly map to an actual file
|
||||
/// on the filesystem.
|
||||
|
||||
@@ -24,13 +23,9 @@ pub trait FileHandle: 'static + Send + Sync + HasLen + fmt::Debug {
|
||||
/// This method may panic if the range requested is invalid.
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes>;
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[doc(hidden)]
|
||||
async fn read_bytes_async(
|
||||
&self,
|
||||
_byte_range: Range<usize>,
|
||||
) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
Err(crate::error::AsyncIoError::AsyncUnsupported)
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
self.read_bytes(byte_range)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +37,7 @@ impl FileHandle for &'static [u8] {
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
Ok(self.read_bytes(byte_range)?)
|
||||
}
|
||||
}
|
||||
@@ -70,6 +65,25 @@ impl fmt::Debug for FileSlice {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn combine_ranges<R: RangeBounds<usize>>(orig_range: Range<usize>, rel_range: R) -> Range<usize> {
|
||||
let start: usize = orig_range.start
|
||||
+ match rel_range.start_bound().cloned() {
|
||||
std::ops::Bound::Included(rel_start) => rel_start,
|
||||
std::ops::Bound::Excluded(rel_start) => rel_start + 1,
|
||||
std::ops::Bound::Unbounded => 0,
|
||||
};
|
||||
assert!(start <= orig_range.end);
|
||||
let end: usize = match rel_range.end_bound().cloned() {
|
||||
std::ops::Bound::Included(rel_end) => orig_range.start + rel_end + 1,
|
||||
std::ops::Bound::Excluded(rel_end) => orig_range.start + rel_end,
|
||||
std::ops::Bound::Unbounded => orig_range.end,
|
||||
};
|
||||
assert!(end >= start);
|
||||
assert!(end <= orig_range.end);
|
||||
start..end
|
||||
}
|
||||
|
||||
impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
pub fn new(file_handle: Arc<dyn FileHandle>) -> Self {
|
||||
@@ -93,11 +107,11 @@ impl FileSlice {
|
||||
///
|
||||
/// Panics if `byte_range.end` exceeds the filesize.
|
||||
#[must_use]
|
||||
pub fn slice(&self, byte_range: Range<usize>) -> FileSlice {
|
||||
assert!(byte_range.end <= self.len());
|
||||
#[inline]
|
||||
pub fn slice<R: RangeBounds<usize>>(&self, byte_range: R) -> FileSlice {
|
||||
FileSlice {
|
||||
data: self.data.clone(),
|
||||
range: self.range.start + byte_range.start..self.range.start + byte_range.end,
|
||||
range: combine_ranges(self.range.clone(), byte_range),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,9 +131,8 @@ impl FileSlice {
|
||||
self.data.read_bytes(self.range.clone())
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[doc(hidden)]
|
||||
pub async fn read_bytes_async(&self) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
pub async fn read_bytes_async(&self) -> io::Result<OwnedBytes> {
|
||||
self.data.read_bytes_async(self.range.clone()).await
|
||||
}
|
||||
|
||||
@@ -137,12 +150,8 @@ impl FileSlice {
|
||||
.read_bytes(self.range.start + range.start..self.range.start + range.end)
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[doc(hidden)]
|
||||
pub async fn read_bytes_slice_async(
|
||||
&self,
|
||||
byte_range: Range<usize>,
|
||||
) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
pub async fn read_bytes_slice_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
assert!(
|
||||
self.range.start + byte_range.end <= self.range.end,
|
||||
"`to` exceeds the fileslice length"
|
||||
@@ -205,7 +214,7 @@ impl FileHandle for FileSlice {
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
self.read_bytes_slice_async(byte_range).await
|
||||
}
|
||||
}
|
||||
@@ -223,7 +232,7 @@ impl FileHandle for OwnedBytes {
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
async fn read_bytes_async(&self, range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
async fn read_bytes_async(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
let bytes = self.read_bytes(range)?;
|
||||
Ok(bytes)
|
||||
}
|
||||
@@ -234,9 +243,9 @@ mod tests {
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::HasLen;
|
||||
|
||||
use super::{FileHandle, FileSlice};
|
||||
use crate::file_slice::combine_ranges;
|
||||
use crate::HasLen;
|
||||
|
||||
#[test]
|
||||
fn test_file_slice() -> io::Result<()> {
|
||||
@@ -307,4 +316,18 @@ mod tests {
|
||||
b"bcd"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_combine_range() {
|
||||
assert_eq!(combine_ranges(1..3, 0..1), 1..2);
|
||||
assert_eq!(combine_ranges(1..3, 1..), 2..3);
|
||||
assert_eq!(combine_ranges(1..4, ..2), 1..3);
|
||||
assert_eq!(combine_ranges(3..10, 2..5), 5..8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_combine_range_panics() {
|
||||
let _ = combine_ranges(3..5, 1..4);
|
||||
}
|
||||
}
|
||||
@@ -5,11 +5,12 @@ use std::ops::Deref;
|
||||
pub use byteorder::LittleEndian as Endianness;
|
||||
|
||||
mod bitset;
|
||||
pub mod file_slice;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
mod writer;
|
||||
|
||||
pub use bitset::*;
|
||||
pub use ownedbytes::OwnedBytes;
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
pub use vint::{
|
||||
deserialize_vint_u128, read_u32_vint, read_u32_vint_no_advance, serialize_vint_u128,
|
||||
|
||||
@@ -94,6 +94,20 @@ impl FixedSize for u32 {
|
||||
const SIZE_IN_BYTES: usize = 4;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u16 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u16::<Endianness>(*self)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u16> {
|
||||
reader.read_u16::<Endianness>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u16 {
|
||||
const SIZE_IN_BYTES: usize = 2;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u64::<Endianness>(*self)
|
||||
|
||||
@@ -118,7 +118,7 @@ fn main() -> tantivy::Result<()> {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::OptionalColumn;
|
||||
use fastfield_codecs::Column;
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
@@ -97,7 +97,7 @@ impl Collector for StatsCollector {
|
||||
}
|
||||
|
||||
struct StatsSegmentCollector {
|
||||
fast_field_reader: Arc<dyn OptionalColumn<u64>>,
|
||||
fast_field_reader: Arc<dyn Column<u64>>,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
@@ -105,12 +105,10 @@ impl SegmentCollector for StatsSegmentCollector {
|
||||
type Fruit = Option<Stats>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: Score) {
|
||||
if let Some(value) = self.fast_field_reader.get_val(doc) {
|
||||
let value = value as f64;
|
||||
self.stats.count += 1;
|
||||
self.stats.sum += value;
|
||||
self.stats.squared_sum += value * value;
|
||||
}
|
||||
let value = self.fast_field_reader.get_val(doc) as f64;
|
||||
self.stats.count += 1;
|
||||
self.stats.sum += value;
|
||||
self.stats.squared_sum += value * value;
|
||||
}
|
||||
|
||||
fn harvest(self) -> <Self as SegmentCollector>::Fruit {
|
||||
|
||||
@@ -51,7 +51,7 @@ impl Warmer for DynamicPriceColumn {
|
||||
let product_id_reader = segment.fast_fields().u64(self.field)?;
|
||||
let product_ids: Vec<ProductId> = segment
|
||||
.doc_ids_alive()
|
||||
.flat_map(|doc| product_id_reader.get_val(doc))
|
||||
.map(|doc| product_id_reader.get_val(doc))
|
||||
.collect();
|
||||
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
|
||||
let mut price_vals: Vec<Price> = Vec::new();
|
||||
|
||||
@@ -1,23 +1,27 @@
|
||||
[package]
|
||||
name = "fastfield_codecs"
|
||||
version = "0.2.0"
|
||||
version = "0.3.0"
|
||||
authors = ["Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
description = "Fast field codecs used by tantivy"
|
||||
documentation = "https://docs.rs/fastfield_codecs/"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
|
||||
ownedbytes = { version = "0.3.0", path = "../ownedbytes" }
|
||||
common = { version = "0.5", path = "../common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
|
||||
ownedbytes = { version = "0.5", path = "../ownedbytes" }
|
||||
prettytable-rs = {version="0.9.0", optional= true}
|
||||
rand = {version="0.8.3", optional= true}
|
||||
fastdivide = "0.4"
|
||||
log = "0.4"
|
||||
itertools = { version = "0.10.3" }
|
||||
measure_time = { version="0.8.2", optional=true}
|
||||
ordered-float = "3.4"
|
||||
|
||||
[dev-dependencies]
|
||||
more-asserts = "0.3.0"
|
||||
|
||||
@@ -41,7 +41,7 @@ mod tests {
|
||||
) -> Arc<dyn Column<T>> {
|
||||
let mut buffer = Vec::new();
|
||||
serialize(VecColumn::from(&column), &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
open(OwnedBytes::new(buffer)).unwrap().to_full().unwrap()
|
||||
open(OwnedBytes::new(buffer)).unwrap()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
@@ -103,7 +103,7 @@ mod tests {
|
||||
let iter_gen = || data.iter().cloned();
|
||||
serialize_u128(iter_gen, data.len() as u32, &mut out).unwrap();
|
||||
let out = OwnedBytes::new(out);
|
||||
open_u128::<u128>(out).unwrap().to_full().unwrap()
|
||||
open_u128::<u128>(out).unwrap()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
|
||||
@@ -75,7 +75,7 @@ impl FastFieldCodec for BlockwiseLinearCodec {
|
||||
if column.num_vals() < 10 * CHUNK_SIZE as u32 {
|
||||
return None;
|
||||
}
|
||||
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE as usize).collect();
|
||||
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE).collect();
|
||||
let line = Line::train(&VecColumn::from(&first_chunk));
|
||||
for (i, buffer_val) in first_chunk.iter_mut().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
|
||||
@@ -208,7 +208,7 @@ impl CompactSpaceBuilder {
|
||||
};
|
||||
let covered_range_len = range_mapping.range_length();
|
||||
ranges_mapping.push(range_mapping);
|
||||
compact_start += covered_range_len as u64;
|
||||
compact_start += covered_range_len;
|
||||
}
|
||||
// println!("num ranges {}", ranges_mapping.len());
|
||||
CompactSpace { ranges_mapping }
|
||||
|
||||
@@ -97,7 +97,7 @@ impl BinarySerializable for CompactSpace {
|
||||
};
|
||||
let range_length = range_mapping.range_length();
|
||||
ranges_mapping.push(range_mapping);
|
||||
compact_start += range_length as u64;
|
||||
compact_start += range_length;
|
||||
}
|
||||
|
||||
Ok(Self { ranges_mapping })
|
||||
@@ -407,10 +407,10 @@ impl CompactSpaceDecompressor {
|
||||
let idx2 = idx + 1;
|
||||
let idx3 = idx + 2;
|
||||
let idx4 = idx + 3;
|
||||
let val1 = get_val(idx1 as u32);
|
||||
let val2 = get_val(idx2 as u32);
|
||||
let val3 = get_val(idx3 as u32);
|
||||
let val4 = get_val(idx4 as u32);
|
||||
let val1 = get_val(idx1);
|
||||
let val2 = get_val(idx2);
|
||||
let val3 = get_val(idx3);
|
||||
let val4 = get_val(idx4);
|
||||
push_if_in_range(idx1, val1);
|
||||
push_if_in_range(idx2, val2);
|
||||
push_if_in_range(idx3, val3);
|
||||
@@ -419,14 +419,13 @@ impl CompactSpaceDecompressor {
|
||||
|
||||
// handle rest
|
||||
for idx in cutoff..position_range.end {
|
||||
push_if_in_range(idx, get_val(idx as u32));
|
||||
push_if_in_range(idx, get_val(idx));
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ {
|
||||
(0..self.params.num_vals)
|
||||
.map(move |idx| self.params.bit_unpacker.get(idx, &self.data) as u64)
|
||||
(0..self.params.num_vals).map(move |idx| self.params.bit_unpacker.get(idx, &self.data))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -456,6 +455,8 @@ impl CompactSpaceDecompressor {
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::format_version::read_format_version;
|
||||
use crate::null_index_footer::read_null_index_footer;
|
||||
use crate::serialize::U128Header;
|
||||
use crate::{open_u128, serialize_u128};
|
||||
|
||||
@@ -541,7 +542,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let data = OwnedBytes::new(out);
|
||||
let (data, _format_version) = read_format_version(data).unwrap();
|
||||
let (data, _null_index_footer) = read_null_index_footer(data).unwrap();
|
||||
test_all(data.clone(), u128_vals);
|
||||
|
||||
data
|
||||
}
|
||||
|
||||
@@ -559,11 +563,12 @@ mod tests {
|
||||
333u128,
|
||||
];
|
||||
let mut data = test_aux_vals(vals);
|
||||
|
||||
let _header = U128Header::deserialize(&mut data);
|
||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
for (pos, val) in vals.iter().enumerate() {
|
||||
let val = *val as u128;
|
||||
let val = *val;
|
||||
let pos = pos as u32;
|
||||
let mut positions = Vec::new();
|
||||
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||
@@ -660,7 +665,7 @@ mod tests {
|
||||
get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
4_000_211_221u128..=5_000_000_000u128,
|
||||
complete_range.clone()
|
||||
complete_range
|
||||
),
|
||||
vec![6, 7]
|
||||
);
|
||||
@@ -697,7 +702,7 @@ mod tests {
|
||||
vec![0]
|
||||
);
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(&decomp, 0..=105, complete_range.clone()),
|
||||
get_positions_for_value_range_helper(&decomp, 0..=105, complete_range),
|
||||
vec![0]
|
||||
);
|
||||
}
|
||||
@@ -731,10 +736,7 @@ mod tests {
|
||||
];
|
||||
let mut out = Vec::new();
|
||||
serialize_u128(|| vals.iter().cloned(), vals.len() as u32, &mut out).unwrap();
|
||||
let decomp = open_u128::<u128>(OwnedBytes::new(out))
|
||||
.unwrap()
|
||||
.to_full()
|
||||
.unwrap();
|
||||
let decomp = open_u128::<u128>(OwnedBytes::new(out)).unwrap();
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
|
||||
assert_eq!(
|
||||
@@ -753,11 +755,7 @@ mod tests {
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(
|
||||
&*decomp,
|
||||
1_000_000..=1_000_000,
|
||||
complete_range.clone()
|
||||
),
|
||||
get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range),
|
||||
vec![11]
|
||||
);
|
||||
}
|
||||
|
||||
39
fastfield_codecs/src/format_version.rs
Normal file
39
fastfield_codecs/src/format_version.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
use std::io;
|
||||
|
||||
use common::BinarySerializable;
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
const MAGIC_NUMBER: u16 = 4335u16;
|
||||
const FASTFIELD_FORMAT_VERSION: u8 = 1;
|
||||
|
||||
pub(crate) fn append_format_version(output: &mut impl io::Write) -> io::Result<()> {
|
||||
FASTFIELD_FORMAT_VERSION.serialize(output)?;
|
||||
MAGIC_NUMBER.serialize(output)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn read_format_version(data: OwnedBytes) -> io::Result<(OwnedBytes, u8)> {
|
||||
let (data, magic_number_bytes) = data.rsplit(2);
|
||||
|
||||
let magic_number = u16::deserialize(&mut magic_number_bytes.as_slice())?;
|
||||
if magic_number != MAGIC_NUMBER {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("magic number mismatch {} != {}", magic_number, MAGIC_NUMBER),
|
||||
));
|
||||
}
|
||||
let (data, format_version_bytes) = data.rsplit(1);
|
||||
let format_version = u8::deserialize(&mut format_version_bytes.as_slice())?;
|
||||
if format_version > FASTFIELD_FORMAT_VERSION {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Unsupported fastfield format version: {}. Max supported version: {}",
|
||||
format_version, FASTFIELD_FORMAT_VERSION
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
Ok((data, format_version))
|
||||
}
|
||||
@@ -59,11 +59,11 @@ mod tests {
|
||||
crate::serialize(VecColumn::from(&vals), &mut buffer, &[codec_type])?;
|
||||
let buffer = OwnedBytes::new(buffer);
|
||||
let column = crate::open::<i64>(buffer.clone())?;
|
||||
assert_eq!(column.get_val(0), Some(-4000i64));
|
||||
assert_eq!(column.get_val(1), Some(-3000i64));
|
||||
assert_eq!(column.get_val(2), Some(-2000i64));
|
||||
assert_eq!(column.max_value(), Some((num_vals as i64 - 5) * 1000));
|
||||
assert_eq!(column.min_value(), Some(-4000i64));
|
||||
assert_eq!(column.get_val(0), -4000i64);
|
||||
assert_eq!(column.get_val(1), -3000i64);
|
||||
assert_eq!(column.get_val(2), -2000i64);
|
||||
assert_eq!(column.max_value(), (num_vals as i64 - 5) * 1000);
|
||||
assert_eq!(column.min_value(), -4000i64);
|
||||
|
||||
// Can't apply gcd
|
||||
let mut buffer_without_gcd = Vec::new();
|
||||
@@ -101,11 +101,11 @@ mod tests {
|
||||
crate::serialize(VecColumn::from(&vals), &mut buffer, &[codec_type])?;
|
||||
let buffer = OwnedBytes::new(buffer);
|
||||
let column = crate::open::<u64>(buffer.clone())?;
|
||||
assert_eq!(column.get_val(0), Some(1000u64));
|
||||
assert_eq!(column.get_val(1), Some(2000u64));
|
||||
assert_eq!(column.get_val(2), Some(3000u64));
|
||||
assert_eq!(column.max_value(), Some(num_vals as u64 * 1000));
|
||||
assert_eq!(column.min_value(), Some(1000u64));
|
||||
assert_eq!(column.get_val(0), 1000u64);
|
||||
assert_eq!(column.get_val(1), 2000u64);
|
||||
assert_eq!(column.get_val(2), 3000u64);
|
||||
assert_eq!(column.max_value(), num_vals as u64 * 1000);
|
||||
assert_eq!(column.min_value(), 1000u64);
|
||||
|
||||
// Can't apply gcd
|
||||
let mut buffer_without_gcd = Vec::new();
|
||||
|
||||
@@ -20,27 +20,32 @@ use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
use compact_space::CompactSpaceDecompressor;
|
||||
use format_version::read_format_version;
|
||||
use monotonic_mapping::{
|
||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||
StrictlyMonotonicMappingToInternalBaseval, StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
pub use optional_column::OptionalColumn;
|
||||
use optional_column::ToOptionalColumn;
|
||||
use null_index_footer::read_null_index_footer;
|
||||
use ownedbytes::OwnedBytes;
|
||||
use serialize::{Header, U128Header};
|
||||
|
||||
mod bitpacked;
|
||||
mod blockwise_linear;
|
||||
mod compact_space;
|
||||
mod format_version;
|
||||
mod line;
|
||||
mod linear;
|
||||
mod monotonic_mapping;
|
||||
mod monotonic_mapping_u128;
|
||||
mod optional_column;
|
||||
#[allow(dead_code)]
|
||||
mod null_index;
|
||||
mod null_index_footer;
|
||||
|
||||
mod column;
|
||||
mod gcd;
|
||||
mod serialize;
|
||||
pub mod serialize;
|
||||
|
||||
pub use ordered_float;
|
||||
|
||||
use self::bitpacked::BitpackedCodec;
|
||||
use self::blockwise_linear::BlockwiseLinearCodec;
|
||||
@@ -132,23 +137,22 @@ impl U128FastFieldCodecType {
|
||||
|
||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||
pub fn open_u128<Item: MonotonicallyMappableToU128>(
|
||||
mut bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn OptionalColumn<Item>>> {
|
||||
bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn Column<Item>>> {
|
||||
let (bytes, _format_version) = read_format_version(bytes)?;
|
||||
let (mut bytes, _null_index_footer) = read_null_index_footer(bytes)?;
|
||||
let header = U128Header::deserialize(&mut bytes)?;
|
||||
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
|
||||
let reader = CompactSpaceDecompressor::open(bytes)?;
|
||||
let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<Item>> =
|
||||
StrictlyMonotonicMappingToInternal::<Item>::new().into();
|
||||
|
||||
Ok(Arc::new(ToOptionalColumn::new(Arc::new(
|
||||
monotonic_map_column(reader, inverted),
|
||||
))))
|
||||
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
||||
}
|
||||
|
||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||
pub fn open<T: MonotonicallyMappableToU64>(
|
||||
mut bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn OptionalColumn<T>>> {
|
||||
pub fn open<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::Result<Arc<dyn Column<T>>> {
|
||||
let (bytes, _format_version) = read_format_version(bytes)?;
|
||||
let (mut bytes, _null_index_footer) = read_null_index_footer(bytes)?;
|
||||
let header = Header::deserialize(&mut bytes)?;
|
||||
match header.codec_type {
|
||||
FastFieldCodecType::Bitpacked => open_specific_codec::<BitpackedCodec, _>(bytes, &header),
|
||||
@@ -162,7 +166,7 @@ pub fn open<T: MonotonicallyMappableToU64>(
|
||||
fn open_specific_codec<C: FastFieldCodec, Item: MonotonicallyMappableToU64>(
|
||||
bytes: OwnedBytes,
|
||||
header: &Header,
|
||||
) -> io::Result<Arc<dyn OptionalColumn<Item>>> {
|
||||
) -> io::Result<Arc<dyn Column<Item>>> {
|
||||
let normalized_header = header.normalized();
|
||||
let reader = C::open_from_bytes(bytes, normalized_header)?;
|
||||
let min_value = header.min_value;
|
||||
@@ -170,16 +174,12 @@ fn open_specific_codec<C: FastFieldCodec, Item: MonotonicallyMappableToU64>(
|
||||
let mapping = StrictlyMonotonicMappingInverter::from(
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd.get(), min_value),
|
||||
);
|
||||
Ok(Arc::new(ToOptionalColumn::new(Arc::new(
|
||||
monotonic_map_column(reader, mapping),
|
||||
))))
|
||||
Ok(Arc::new(monotonic_map_column(reader, mapping)))
|
||||
} else {
|
||||
let mapping = StrictlyMonotonicMappingInverter::from(
|
||||
StrictlyMonotonicMappingToInternalBaseval::new(min_value),
|
||||
);
|
||||
Ok(Arc::new(ToOptionalColumn::new(Arc::new(
|
||||
monotonic_map_column(reader, mapping),
|
||||
))))
|
||||
Ok(Arc::new(monotonic_map_column(reader, mapping)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,9 +250,8 @@ mod tests {
|
||||
for (doc, orig_val) in data.iter().copied().enumerate() {
|
||||
let val = reader.get_val(doc as u32);
|
||||
assert_eq!(
|
||||
val,
|
||||
Some(orig_val),
|
||||
"val `{val:?}` does not match orig_val {orig_val:?}, in data set {name}, data \
|
||||
val, orig_val,
|
||||
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data \
|
||||
`{data:?}`",
|
||||
);
|
||||
}
|
||||
|
||||
@@ -113,10 +113,7 @@ fn bench_ip() {
|
||||
(data.len() * 8) as f32 / dataset.len() as f32
|
||||
);
|
||||
|
||||
let decompressor = open_u128::<u128>(OwnedBytes::new(data))
|
||||
.unwrap()
|
||||
.to_full()
|
||||
.unwrap();
|
||||
let decompressor = open_u128::<u128>(OwnedBytes::new(data)).unwrap();
|
||||
// Sample some ranges
|
||||
let mut doc_values = Vec::new();
|
||||
for value in dataset.iter().take(1110).skip(1100).cloned() {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use fastdivide::DividerU64;
|
||||
use ordered_float::NotNan;
|
||||
|
||||
use crate::MonotonicallyMappableToU128;
|
||||
|
||||
@@ -192,6 +193,8 @@ impl MonotonicallyMappableToU64 for bool {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO remove me.
|
||||
// Tantivy should refuse NaN values and work with NotNaN internally.
|
||||
impl MonotonicallyMappableToU64 for f64 {
|
||||
fn to_u64(self) -> u64 {
|
||||
common::f64_to_u64(self)
|
||||
@@ -202,11 +205,42 @@ impl MonotonicallyMappableToU64 for f64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for ordered_float::NotNan<f64> {
|
||||
fn to_u64(self) -> u64 {
|
||||
common::f64_to_u64(self.into_inner())
|
||||
}
|
||||
|
||||
fn from_u64(val: u64) -> Self {
|
||||
NotNan::new(common::u64_to_f64(val)).expect("Invalid NotNaN f64 value.")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_from_u64_pos_inf() {
|
||||
let inf_as_u64 = common::f64_to_u64(f64::INFINITY);
|
||||
let inf_back_to_f64 = NotNan::from_u64(inf_as_u64);
|
||||
assert_eq!(inf_back_to_f64, NotNan::new(f64::INFINITY).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_u64_neg_inf() {
|
||||
let inf_as_u64 = common::f64_to_u64(-f64::INFINITY);
|
||||
let inf_back_to_f64 = NotNan::from_u64(inf_as_u64);
|
||||
assert_eq!(inf_back_to_f64, NotNan::new(-f64::INFINITY).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Invalid NotNaN")]
|
||||
fn test_from_u64_nan_panics() {
|
||||
let nan_as_u64 = common::f64_to_u64(f64::NAN);
|
||||
NotNan::from_u64(nan_as_u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn strictly_monotonic_test() {
|
||||
// identity mapping
|
||||
|
||||
454
fastfield_codecs/src/null_index/dense.rs
Normal file
454
fastfield_codecs/src/null_index/dense.rs
Normal file
@@ -0,0 +1,454 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
use itertools::Itertools;
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
use super::{get_bit_at, set_bit_at};
|
||||
|
||||
/// For the `DenseCodec`, `data` which contains the encoded blocks.
|
||||
/// Each block consists of [u8; 12]. The first 8 bytes is a bitvec for 64 elements.
|
||||
/// The last 4 bytes are the offset, the number of set bits so far.
|
||||
///
|
||||
/// When translating the original index to a dense index, the correct block can be computed
|
||||
/// directly `orig_idx/64`. Inside the block the position is `orig_idx%64`.
|
||||
///
|
||||
/// When translating a dense index to the original index, we can use the offset to find the correct
|
||||
/// block. Direct computation is not possible, but we can employ a linear or binary search.
|
||||
#[derive(Clone)]
|
||||
pub struct DenseCodec {
|
||||
// data consists of blocks of 64 bits.
|
||||
//
|
||||
// The format is &[(u64, u32)]
|
||||
// u64 is the bitvec
|
||||
// u32 is the offset of the block, the number of set bits so far.
|
||||
//
|
||||
// At the end one block is appended, to store the number of values in the index in offset.
|
||||
data: OwnedBytes,
|
||||
}
|
||||
const ELEMENTS_PER_BLOCK: u32 = 64;
|
||||
const BLOCK_BITVEC_SIZE: usize = 8;
|
||||
const BLOCK_OFFSET_SIZE: usize = 4;
|
||||
const SERIALIZED_BLOCK_SIZE: usize = BLOCK_BITVEC_SIZE + BLOCK_OFFSET_SIZE;
|
||||
|
||||
#[inline]
|
||||
fn count_ones(bitvec: u64, pos_in_bitvec: u32) -> u32 {
|
||||
if pos_in_bitvec == 63 {
|
||||
bitvec.count_ones()
|
||||
} else {
|
||||
let mask = (1u64 << (pos_in_bitvec + 1)) - 1;
|
||||
let masked_bitvec = bitvec & mask;
|
||||
masked_bitvec.count_ones()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct DenseIndexBlock {
|
||||
bitvec: u64,
|
||||
offset: u32,
|
||||
}
|
||||
|
||||
impl From<[u8; SERIALIZED_BLOCK_SIZE]> for DenseIndexBlock {
|
||||
fn from(data: [u8; SERIALIZED_BLOCK_SIZE]) -> Self {
|
||||
let bitvec = u64::from_le_bytes(data[..BLOCK_BITVEC_SIZE].try_into().unwrap());
|
||||
let offset = u32::from_le_bytes(data[BLOCK_BITVEC_SIZE..].try_into().unwrap());
|
||||
Self { bitvec, offset }
|
||||
}
|
||||
}
|
||||
|
||||
impl DenseCodec {
|
||||
/// Open the DenseCodec from OwnedBytes
|
||||
pub fn open(data: OwnedBytes) -> Self {
|
||||
Self { data }
|
||||
}
|
||||
#[inline]
|
||||
/// Check if value at position is not null.
|
||||
pub fn exists(&self, idx: u32) -> bool {
|
||||
let block_pos = idx / ELEMENTS_PER_BLOCK;
|
||||
let bitvec = self.dense_index_block(block_pos).bitvec;
|
||||
|
||||
let pos_in_bitvec = idx % ELEMENTS_PER_BLOCK;
|
||||
|
||||
get_bit_at(bitvec, pos_in_bitvec)
|
||||
}
|
||||
#[inline]
|
||||
fn dense_index_block(&self, block_pos: u32) -> DenseIndexBlock {
|
||||
dense_index_block(&self.data, block_pos)
|
||||
}
|
||||
|
||||
/// Return the number of non-null values in an index
|
||||
pub fn num_non_nulls(&self) -> u32 {
|
||||
let last_block = (self.data.len() / SERIALIZED_BLOCK_SIZE) - 1;
|
||||
self.dense_index_block(last_block as u32).offset
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Translate from the original index to the codec index.
|
||||
pub fn translate_to_codec_idx(&self, idx: u32) -> Option<u32> {
|
||||
let block_pos = idx / ELEMENTS_PER_BLOCK;
|
||||
let index_block = self.dense_index_block(block_pos);
|
||||
let pos_in_block_bit_vec = idx % ELEMENTS_PER_BLOCK;
|
||||
let ones_in_block = count_ones(index_block.bitvec, pos_in_block_bit_vec);
|
||||
if get_bit_at(index_block.bitvec, pos_in_block_bit_vec) {
|
||||
// -1 is ok, since idx does exist, so there's at least one
|
||||
Some(index_block.offset + ones_in_block - 1)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Translate positions from the codec index to the original index.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if any `idx` is greater than the max codec index.
|
||||
pub fn translate_codec_idx_to_original_idx<'a>(
|
||||
&'a self,
|
||||
iter: impl Iterator<Item = u32> + 'a,
|
||||
) -> impl Iterator<Item = u32> + 'a {
|
||||
let mut block_pos = 0u32;
|
||||
iter.map(move |dense_idx| {
|
||||
// update block_pos to limit search scope
|
||||
block_pos = find_block(dense_idx, block_pos, &self.data);
|
||||
let index_block = self.dense_index_block(block_pos);
|
||||
|
||||
// The next offset is higher than dense_idx and therefore:
|
||||
// dense_idx <= offset + num_set_bits in block
|
||||
let mut num_set_bits = 0;
|
||||
for idx_in_bitvec in 0..ELEMENTS_PER_BLOCK {
|
||||
if get_bit_at(index_block.bitvec, idx_in_bitvec) {
|
||||
num_set_bits += 1;
|
||||
}
|
||||
if num_set_bits == (dense_idx - index_block.offset + 1) {
|
||||
let orig_idx = block_pos * ELEMENTS_PER_BLOCK + idx_in_bitvec;
|
||||
return orig_idx;
|
||||
}
|
||||
}
|
||||
panic!("Internal Error: Offset calculation in dense idx seems to be wrong.");
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn dense_index_block(data: &[u8], block_pos: u32) -> DenseIndexBlock {
|
||||
let data_start_pos = block_pos as usize * SERIALIZED_BLOCK_SIZE;
|
||||
let block_data: [u8; SERIALIZED_BLOCK_SIZE] = data[data_start_pos..][..SERIALIZED_BLOCK_SIZE]
|
||||
.try_into()
|
||||
.unwrap();
|
||||
block_data.into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Finds the block position containing the dense_idx.
|
||||
///
|
||||
/// # Correctness
|
||||
/// dense_idx needs to be smaller than the number of values in the index
|
||||
///
|
||||
/// The last offset number is equal to the number of values in the index.
|
||||
fn find_block(dense_idx: u32, mut block_pos: u32, data: &[u8]) -> u32 {
|
||||
loop {
|
||||
let offset = dense_index_block(data, block_pos).offset;
|
||||
if offset > dense_idx {
|
||||
return block_pos - 1;
|
||||
}
|
||||
block_pos += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterator over all values, true if set, otherwise false
|
||||
pub fn serialize_dense_codec(
|
||||
iter: impl Iterator<Item = bool>,
|
||||
mut out: impl Write,
|
||||
) -> io::Result<()> {
|
||||
let mut offset: u32 = 0;
|
||||
|
||||
for chunk in &iter.chunks(ELEMENTS_PER_BLOCK as usize) {
|
||||
let mut block: u64 = 0;
|
||||
for (pos, is_bit_set) in chunk.enumerate() {
|
||||
if is_bit_set {
|
||||
set_bit_at(&mut block, pos as u64);
|
||||
}
|
||||
}
|
||||
|
||||
block.serialize(&mut out)?;
|
||||
offset.serialize(&mut out)?;
|
||||
|
||||
offset += block.count_ones();
|
||||
}
|
||||
// Add sentinal block for the offset
|
||||
let block: u64 = 0;
|
||||
block.serialize(&mut out)?;
|
||||
offset.serialize(&mut out)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use proptest::prelude::{any, prop, *};
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn random_bitvec() -> BoxedStrategy<Vec<bool>> {
|
||||
prop_oneof![
|
||||
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..100),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..64),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.0), 0..100),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.0), 0..64),
|
||||
8 => vec![any::<bool>()],
|
||||
2 => prop::collection::vec(any::<bool>(), 0..50),
|
||||
]
|
||||
.boxed()
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(500))]
|
||||
#[test]
|
||||
fn test_with_random_bitvecs(bitvec1 in random_bitvec(), bitvec2 in random_bitvec(), bitvec3 in random_bitvec()) {
|
||||
let mut bitvec = Vec::new();
|
||||
bitvec.extend_from_slice(&bitvec1);
|
||||
bitvec.extend_from_slice(&bitvec2);
|
||||
bitvec.extend_from_slice(&bitvec3);
|
||||
test_null_index(bitvec);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dense_codec_test_one_block_false() {
|
||||
let mut iter = vec![false; 64];
|
||||
iter.push(true);
|
||||
test_null_index(iter);
|
||||
}
|
||||
|
||||
fn test_null_index(data: Vec<bool>) {
|
||||
let mut out = vec![];
|
||||
|
||||
serialize_dense_codec(data.iter().cloned(), &mut out).unwrap();
|
||||
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||
|
||||
let orig_idx_with_value: Vec<u32> = data
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| **val)
|
||||
.map(|(pos, _val)| pos as u32)
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
null_index
|
||||
.translate_codec_idx_to_original_idx(0..orig_idx_with_value.len() as u32)
|
||||
.collect_vec(),
|
||||
orig_idx_with_value
|
||||
);
|
||||
|
||||
for (dense_idx, orig_idx) in orig_idx_with_value.iter().enumerate() {
|
||||
assert_eq!(
|
||||
null_index.translate_to_codec_idx(*orig_idx),
|
||||
Some(dense_idx as u32)
|
||||
);
|
||||
}
|
||||
|
||||
for (pos, value) in data.iter().enumerate() {
|
||||
assert_eq!(null_index.exists(pos as u32), *value);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dense_codec_test_translation() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_dense_codec(iter, &mut out).unwrap();
|
||||
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||
|
||||
assert_eq!(
|
||||
null_index
|
||||
.translate_codec_idx_to_original_idx(0..2)
|
||||
.collect_vec(),
|
||||
vec![0, 2]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dense_codec_translate() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_dense_codec(iter, &mut out).unwrap();
|
||||
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||
assert_eq!(null_index.translate_to_codec_idx(0), Some(0));
|
||||
assert_eq!(null_index.translate_to_codec_idx(2), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dense_codec_test_small() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_dense_codec(iter, &mut out).unwrap();
|
||||
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||
assert!(null_index.exists(0));
|
||||
assert!(!null_index.exists(1));
|
||||
assert!(null_index.exists(2));
|
||||
assert!(!null_index.exists(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dense_codec_test_large() {
|
||||
let mut docs = vec![];
|
||||
docs.extend((0..1000).map(|_idx| false));
|
||||
docs.extend((0..=1000).map(|_idx| true));
|
||||
|
||||
let iter = docs.iter().cloned();
|
||||
let mut out = vec![];
|
||||
serialize_dense_codec(iter, &mut out).unwrap();
|
||||
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||
assert!(!null_index.exists(0));
|
||||
assert!(!null_index.exists(100));
|
||||
assert!(!null_index.exists(999));
|
||||
assert!(null_index.exists(1000));
|
||||
assert!(null_index.exists(1999));
|
||||
assert!(null_index.exists(2000));
|
||||
assert!(!null_index.exists(2001));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_ones() {
|
||||
let mut block = 0;
|
||||
set_bit_at(&mut block, 0);
|
||||
set_bit_at(&mut block, 2);
|
||||
|
||||
assert_eq!(count_ones(block, 0), 1);
|
||||
assert_eq!(count_ones(block, 1), 1);
|
||||
assert_eq!(count_ones(block, 2), 2);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::Bencher;
|
||||
|
||||
use super::*;
|
||||
|
||||
const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||
fn gen_bools(fill_ratio: f64) -> DenseCodec {
|
||||
let mut out = Vec::new();
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let bools: Vec<_> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.collect();
|
||||
serialize_dense_codec(bools.into_iter(), &mut out).unwrap();
|
||||
|
||||
let codec = DenseCodec::open(OwnedBytes::new(out));
|
||||
codec
|
||||
}
|
||||
|
||||
fn random_range_iterator(start: u32, end: u32, step_size: u32) -> impl Iterator<Item = u32> {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let mut current = start;
|
||||
std::iter::from_fn(move || {
|
||||
current += rng.gen_range(1..step_size + 1);
|
||||
if current >= end {
|
||||
None
|
||||
} else {
|
||||
Some(current)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn walk_over_data(codec: &DenseCodec, max_step_size: u32) -> Option<u32> {
|
||||
walk_over_data_from_positions(
|
||||
codec,
|
||||
random_range_iterator(0, TOTAL_NUM_VALUES, max_step_size),
|
||||
)
|
||||
}
|
||||
|
||||
fn walk_over_data_from_positions(
|
||||
codec: &DenseCodec,
|
||||
positions: impl Iterator<Item = u32>,
|
||||
) -> Option<u32> {
|
||||
let mut dense_idx: Option<u32> = None;
|
||||
for idx in positions {
|
||||
dense_idx = dense_idx.or(codec.translate_to_codec_idx(idx));
|
||||
}
|
||||
dense_idx
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_dense_codec_translate_orig_to_codec_90percent_filled_random_stride(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_dense_codec_translate_orig_to_codec_50percent_filled_random_stride(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.5f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_dense_codec_translate_orig_to_codec_full_scan_10percent(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_dense_codec_translate_orig_to_codec_full_scan_90percent(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_dense_codec_translate_orig_to_codec_10percent_filled_random_stride(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_dense_codec_translate_codec_to_orig_90percent_filled_random_stride_big_step(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 50_000))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_dense_codec_translate_codec_to_orig_90percent_filled_random_stride(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 100))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_dense_codec_translate_codec_to_orig_90percent_filled_full_scan(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(0..num_vals)
|
||||
.last()
|
||||
});
|
||||
}
|
||||
}
|
||||
14
fastfield_codecs/src/null_index/mod.rs
Normal file
14
fastfield_codecs/src/null_index/mod.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
pub use dense::{serialize_dense_codec, DenseCodec};
|
||||
|
||||
mod dense;
|
||||
mod sparse;
|
||||
|
||||
#[inline]
|
||||
fn get_bit_at(input: u64, n: u32) -> bool {
|
||||
input & (1 << n) != 0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_bit_at(input: &mut u64, n: u64) {
|
||||
*input |= 1 << n;
|
||||
}
|
||||
752
fastfield_codecs/src/null_index/sparse.rs
Normal file
752
fastfield_codecs/src/null_index/sparse.rs
Normal file
@@ -0,0 +1,752 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BitSet;
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
use super::{serialize_dense_codec, DenseCodec};
|
||||
|
||||
/// `SparseCodec` is the codec for data, when only few documents have values.
|
||||
/// In contrast to `DenseCodec` opening a `SparseCodec` causes runtime data to be produced, for
|
||||
/// faster access.
|
||||
///
|
||||
/// The lower 16 bits of doc ids are stored as u16 while the upper 16 bits are given by the block
|
||||
/// id. Each block contains 1<<16 docids.
|
||||
///
|
||||
/// # Serialized Data Layout
|
||||
/// The data starts with the block data. Each block is either dense or sparse encoded, depending on
|
||||
/// the number of values in the block. A block is sparse when it contains less than
|
||||
/// DENSE_BLOCK_THRESHOLD (6144) values.
|
||||
/// [Sparse data block | dense data block, .. #repeat*; Desc: Either a sparse or dense encoded
|
||||
/// block]
|
||||
/// ### Sparse block data
|
||||
/// [u16 LE, .. #repeat*; Desc: Positions with values in a block]
|
||||
/// ### Dense block data
|
||||
/// [Dense codec for the whole block; Desc: Similar to a bitvec(0..ELEMENTS_PER_BLOCK) + Metadata
|
||||
/// for faster lookups. See dense.rs]
|
||||
///
|
||||
/// The data is followed by block metadata, to know which area of the raw block data belongs to
|
||||
/// which block. Only metadata for blocks with elements is recorded to
|
||||
/// keep the overhead low for scenarios with many very sparse columns. The block metadata consists
|
||||
/// of the block index and the number of values in the block. Since we don't store empty blocks
|
||||
/// num_vals is incremented by 1, e.g. 0 means 1 value.
|
||||
///
|
||||
/// The last u16 is storing the number of metadata blocks.
|
||||
/// [u16 LE, .. #repeat*; Desc: Positions with values in a block][(u16 LE, u16 LE), .. #repeat*;
|
||||
/// Desc: (Block Id u16, Num Elements u16)][u16 LE; Desc: num blocks with values u16]
|
||||
///
|
||||
/// # Opening
|
||||
/// When opening the data layout, the data is expanded to `Vec<SparseCodecBlockVariant>`, where the
|
||||
/// index is the block index. For each block `byte_start` and `offset` is computed.
|
||||
pub struct SparseCodec {
|
||||
data: OwnedBytes,
|
||||
blocks: Vec<SparseCodecBlockVariant>,
|
||||
}
|
||||
|
||||
/// The threshold for for number of elements after which we switch to dense block encoding
|
||||
const DENSE_BLOCK_THRESHOLD: u32 = 6144;
|
||||
|
||||
const ELEMENTS_PER_BLOCK: u32 = u16::MAX as u32 + 1;
|
||||
|
||||
/// 1.5 bit per Element + 12 bytes for the sentinal block
|
||||
const NUM_BYTES_DENSE_BLOCK: u32 = (ELEMENTS_PER_BLOCK + ELEMENTS_PER_BLOCK / 2 + 64 + 32) / 8;
|
||||
|
||||
#[derive(Clone)]
|
||||
enum SparseCodecBlockVariant {
|
||||
Empty { offset: u32 },
|
||||
Dense(DenseBlock),
|
||||
Sparse(SparseBlock),
|
||||
}
|
||||
|
||||
impl SparseCodecBlockVariant {
|
||||
/// The number of non-null values that preceeded that block.
|
||||
fn offset(&self) -> u32 {
|
||||
match self {
|
||||
SparseCodecBlockVariant::Empty { offset } => *offset,
|
||||
SparseCodecBlockVariant::Dense(dense) => dense.offset,
|
||||
SparseCodecBlockVariant::Sparse(sparse) => sparse.offset,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A block consists of max u16 values
|
||||
#[derive(Clone)]
|
||||
struct DenseBlock {
|
||||
/// The number of values set before the block
|
||||
offset: u32,
|
||||
/// The data for the dense encoding
|
||||
codec: DenseCodec,
|
||||
}
|
||||
|
||||
impl DenseBlock {
|
||||
pub fn exists(&self, idx: u32) -> bool {
|
||||
self.codec.exists(idx)
|
||||
}
|
||||
pub fn translate_to_codec_idx(&self, idx: u32) -> Option<u32> {
|
||||
self.codec.translate_to_codec_idx(idx)
|
||||
}
|
||||
pub fn translate_codec_idx_to_original_idx(&self, idx: u32) -> u32 {
|
||||
self.codec
|
||||
.translate_codec_idx_to_original_idx(idx..=idx)
|
||||
.next()
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// A block consists of max u16 values
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
struct SparseBlock {
|
||||
/// The number of values in the block
|
||||
num_vals: u32,
|
||||
/// The number of values set before the block
|
||||
offset: u32,
|
||||
/// The start position of the data for the block
|
||||
byte_start: u32,
|
||||
}
|
||||
|
||||
impl SparseBlock {
|
||||
fn empty_block(offset: u32) -> Self {
|
||||
Self {
|
||||
num_vals: 0,
|
||||
byte_start: 0,
|
||||
offset,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn value_at_idx(&self, data: &[u8], idx: u16) -> u16 {
|
||||
let start_offset: usize = self.byte_start as usize + (idx as u32 as usize * 2);
|
||||
get_u16(data, start_offset)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(clippy::comparison_chain)]
|
||||
// Looks for the element in the block. Returns the positions if found.
|
||||
fn binary_search(&self, data: &[u8], target: u16) -> Option<u16> {
|
||||
let mut size = self.num_vals as u16;
|
||||
let mut left = 0;
|
||||
let mut right = size;
|
||||
// TODO try different implem.
|
||||
// e.g. exponential search into binary search
|
||||
while left < right {
|
||||
let mid = left + size / 2;
|
||||
|
||||
// TODO do boundary check only once, and then use an
|
||||
// unsafe `value_at_idx`
|
||||
let mid_val = self.value_at_idx(data, mid);
|
||||
|
||||
if target > mid_val {
|
||||
left = mid + 1;
|
||||
} else if target < mid_val {
|
||||
right = mid;
|
||||
} else {
|
||||
return Some(mid);
|
||||
}
|
||||
|
||||
size = right - left;
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_u16(data: &[u8], byte_position: usize) -> u16 {
|
||||
let bytes: [u8; 2] = data[byte_position..byte_position + 2].try_into().unwrap();
|
||||
u16::from_le_bytes(bytes)
|
||||
}
|
||||
|
||||
const SERIALIZED_BLOCK_METADATA_SIZE: usize = 4;
|
||||
|
||||
fn deserialize_sparse_codec_block(data: &OwnedBytes) -> Vec<SparseCodecBlockVariant> {
|
||||
// The number of vals so far
|
||||
let mut offset = 0;
|
||||
let mut sparse_codec_blocks = Vec::new();
|
||||
let num_blocks = get_u16(data, data.len() - 2);
|
||||
let block_data_index_start =
|
||||
data.len() - 2 - num_blocks as usize * SERIALIZED_BLOCK_METADATA_SIZE;
|
||||
let mut byte_start = 0;
|
||||
for block_num in 0..num_blocks as usize {
|
||||
let block_data_index = block_data_index_start + SERIALIZED_BLOCK_METADATA_SIZE * block_num;
|
||||
let block_idx = get_u16(data, block_data_index);
|
||||
let num_vals = get_u16(data, block_data_index + 2) as u32 + 1;
|
||||
sparse_codec_blocks.resize(
|
||||
block_idx as usize,
|
||||
SparseCodecBlockVariant::Empty { offset },
|
||||
);
|
||||
|
||||
if is_sparse(num_vals) {
|
||||
let block = SparseBlock {
|
||||
num_vals,
|
||||
offset,
|
||||
byte_start,
|
||||
};
|
||||
sparse_codec_blocks.push(SparseCodecBlockVariant::Sparse(block));
|
||||
byte_start += 2 * num_vals;
|
||||
} else {
|
||||
let block = DenseBlock {
|
||||
offset,
|
||||
codec: DenseCodec::open(data.slice(byte_start as usize..data.len()).clone()),
|
||||
};
|
||||
sparse_codec_blocks.push(SparseCodecBlockVariant::Dense(block));
|
||||
// Dense blocks have a fixed size spanning ELEMENTS_PER_BLOCK.
|
||||
byte_start += NUM_BYTES_DENSE_BLOCK;
|
||||
}
|
||||
|
||||
offset += num_vals;
|
||||
}
|
||||
sparse_codec_blocks.push(SparseCodecBlockVariant::Empty { offset });
|
||||
sparse_codec_blocks
|
||||
}
|
||||
|
||||
/// Splits a value address into lower and upper 16bits.
|
||||
/// The lower 16 bits are the value in the block
|
||||
/// The upper 16 bits are the block index
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct ValueAddr {
|
||||
block_idx: u16,
|
||||
value_in_block: u16,
|
||||
}
|
||||
|
||||
/// Splits a idx into block index and value in the block
|
||||
fn value_addr(idx: u32) -> ValueAddr {
|
||||
/// Static assert number elements per block this method expects
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
const _: () = assert!(ELEMENTS_PER_BLOCK == (1 << 16));
|
||||
|
||||
let value_in_block = idx as u16;
|
||||
let block_idx = (idx >> 16) as u16;
|
||||
ValueAddr {
|
||||
block_idx,
|
||||
value_in_block,
|
||||
}
|
||||
}
|
||||
|
||||
impl SparseCodec {
|
||||
/// Open the SparseCodec from OwnedBytes
|
||||
pub fn open(data: OwnedBytes) -> Self {
|
||||
let blocks = deserialize_sparse_codec_block(&data);
|
||||
Self { data, blocks }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Check if value at position is not null.
|
||||
pub fn exists(&self, idx: u32) -> bool {
|
||||
let value_addr = value_addr(idx);
|
||||
// There may be trailing nulls without data, those are not stored as blocks. It would be
|
||||
// possible to create empty blocks, but for that we would need to serialize the number of
|
||||
// values or pass them when opening
|
||||
|
||||
if let Some(block) = self.blocks.get(value_addr.block_idx as usize) {
|
||||
match block {
|
||||
SparseCodecBlockVariant::Empty { offset: _ } => false,
|
||||
SparseCodecBlockVariant::Dense(block) => {
|
||||
block.exists(value_addr.value_in_block as u32)
|
||||
}
|
||||
SparseCodecBlockVariant::Sparse(block) => block
|
||||
.binary_search(&self.data, value_addr.value_in_block)
|
||||
.is_some(),
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the number of non-null values in an index
|
||||
pub fn num_non_nulls(&self) -> u32 {
|
||||
self.blocks.last().map(|block| block.offset()).unwrap_or(0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Translate from the original index to the codec index.
|
||||
pub fn translate_to_codec_idx(&self, idx: u32) -> Option<u32> {
|
||||
let value_addr = value_addr(idx);
|
||||
let block = self.blocks.get(value_addr.block_idx as usize)?;
|
||||
|
||||
match block {
|
||||
SparseCodecBlockVariant::Empty { offset: _ } => None,
|
||||
SparseCodecBlockVariant::Dense(block) => block
|
||||
.translate_to_codec_idx(value_addr.value_in_block as u32)
|
||||
.map(|pos_in_block| pos_in_block + block.offset),
|
||||
SparseCodecBlockVariant::Sparse(block) => {
|
||||
let pos_in_block = block.binary_search(&self.data, value_addr.value_in_block);
|
||||
pos_in_block.map(|pos_in_block: u16| block.offset + pos_in_block as u32)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_block(&self, dense_idx: u32, mut block_pos: u32) -> u32 {
|
||||
loop {
|
||||
let offset = self.blocks[block_pos as usize].offset();
|
||||
if offset > dense_idx {
|
||||
return block_pos - 1;
|
||||
}
|
||||
block_pos += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Translate positions from the codec index to the original index.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if any `idx` is greater than the max codec index.
|
||||
pub fn translate_codec_idx_to_original_idx<'a>(
|
||||
&'a self,
|
||||
iter: impl Iterator<Item = u32> + 'a,
|
||||
) -> impl Iterator<Item = u32> + 'a {
|
||||
// TODO: There's a big potential performance gain, by using iterators per block instead of
|
||||
// random access for each element in a block
|
||||
// group_by itertools won't help though, since it requires a temporary local variable
|
||||
let mut block_pos = 0u32;
|
||||
iter.map(move |codec_idx| {
|
||||
// update block_pos to limit search scope
|
||||
block_pos = self.find_block(codec_idx, block_pos);
|
||||
let block_doc_idx_start = block_pos * ELEMENTS_PER_BLOCK;
|
||||
let block = &self.blocks[block_pos as usize];
|
||||
let idx_in_block = codec_idx - block.offset();
|
||||
match block {
|
||||
SparseCodecBlockVariant::Empty { offset: _ } => {
|
||||
panic!(
|
||||
"invalid input, cannot translate to original index. associated empty \
|
||||
block with dense idx. block_pos {}, idx_in_block {}",
|
||||
block_pos, idx_in_block
|
||||
)
|
||||
}
|
||||
SparseCodecBlockVariant::Dense(dense) => {
|
||||
dense.translate_codec_idx_to_original_idx(idx_in_block) + block_doc_idx_start
|
||||
}
|
||||
SparseCodecBlockVariant::Sparse(block) => {
|
||||
block.value_at_idx(&self.data, idx_in_block as u16) as u32 + block_doc_idx_start
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn is_sparse(num_elem_in_block: u32) -> bool {
|
||||
num_elem_in_block < DENSE_BLOCK_THRESHOLD
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct BlockDataSerialized {
|
||||
block_idx: u16,
|
||||
num_vals: u32,
|
||||
}
|
||||
|
||||
/// Iterator over positions of set values.
|
||||
pub fn serialize_sparse_codec<W: Write>(
|
||||
mut iter: impl Iterator<Item = u32>,
|
||||
mut out: W,
|
||||
) -> io::Result<()> {
|
||||
let mut block_metadata: Vec<BlockDataSerialized> = Vec::new();
|
||||
let mut current_block = Vec::new();
|
||||
// This if-statement for the first element ensures that
|
||||
// `block_metadata` is not empty in the loop below.
|
||||
if let Some(idx) = iter.next() {
|
||||
let value_addr = value_addr(idx);
|
||||
block_metadata.push(BlockDataSerialized {
|
||||
block_idx: value_addr.block_idx,
|
||||
num_vals: 1,
|
||||
});
|
||||
current_block.push(value_addr.value_in_block);
|
||||
}
|
||||
let flush_block = |current_block: &mut Vec<u16>, out: &mut W| -> io::Result<()> {
|
||||
let is_sparse = is_sparse(current_block.len() as u32);
|
||||
if is_sparse {
|
||||
for val_in_block in current_block.iter() {
|
||||
out.write_all(val_in_block.to_le_bytes().as_ref())?;
|
||||
}
|
||||
} else {
|
||||
let mut bitset = BitSet::with_max_value(ELEMENTS_PER_BLOCK + 1);
|
||||
for val_in_block in current_block.iter() {
|
||||
bitset.insert(*val_in_block as u32);
|
||||
}
|
||||
|
||||
let iter = (0..ELEMENTS_PER_BLOCK).map(|idx| bitset.contains(idx));
|
||||
serialize_dense_codec(iter, out)?;
|
||||
}
|
||||
current_block.clear();
|
||||
Ok(())
|
||||
};
|
||||
for idx in iter {
|
||||
let value_addr = value_addr(idx);
|
||||
if block_metadata[block_metadata.len() - 1].block_idx == value_addr.block_idx {
|
||||
let last_idx_metadata = block_metadata.len() - 1;
|
||||
block_metadata[last_idx_metadata].num_vals += 1;
|
||||
} else {
|
||||
// flush prev block
|
||||
flush_block(&mut current_block, &mut out)?;
|
||||
|
||||
block_metadata.push(BlockDataSerialized {
|
||||
block_idx: value_addr.block_idx,
|
||||
num_vals: 1,
|
||||
});
|
||||
}
|
||||
current_block.push(value_addr.value_in_block);
|
||||
}
|
||||
// handle last block
|
||||
flush_block(&mut current_block, &mut out)?;
|
||||
|
||||
for block in &block_metadata {
|
||||
out.write_all(block.block_idx.to_le_bytes().as_ref())?;
|
||||
// We don't store empty blocks, therefore we can subtract 1.
|
||||
// This way we will be able to use u16 when the number of elements is 1 << 16 or u16::MAX+1
|
||||
out.write_all(((block.num_vals - 1) as u16).to_le_bytes().as_ref())?;
|
||||
}
|
||||
out.write_all((block_metadata.len() as u16).to_le_bytes().as_ref())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use itertools::Itertools;
|
||||
use proptest::prelude::{any, prop, *};
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn random_bitvec() -> BoxedStrategy<Vec<bool>> {
|
||||
prop_oneof![
|
||||
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..100),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.00), 0..(ELEMENTS_PER_BLOCK as usize * 3)), // empty blocks
|
||||
1 => prop::collection::vec(proptest::bool::weighted(1.00), 0..(ELEMENTS_PER_BLOCK as usize + 10)), // full block
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.01), 0..100),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.01), 0..u16::MAX as usize),
|
||||
8 => vec![any::<bool>()],
|
||||
]
|
||||
.boxed()
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(50))]
|
||||
#[test]
|
||||
fn test_with_random_bitvecs(bitvec1 in random_bitvec(), bitvec2 in random_bitvec(), bitvec3 in random_bitvec()) {
|
||||
let mut bitvec = Vec::new();
|
||||
bitvec.extend_from_slice(&bitvec1);
|
||||
bitvec.extend_from_slice(&bitvec2);
|
||||
bitvec.extend_from_slice(&bitvec3);
|
||||
test_null_index(bitvec);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_test_one_block_false() {
|
||||
let mut iter = vec![false; ELEMENTS_PER_BLOCK as usize];
|
||||
iter.push(true);
|
||||
test_null_index(iter);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_test_one_block_true() {
|
||||
let mut iter = vec![true; ELEMENTS_PER_BLOCK as usize];
|
||||
iter.push(true);
|
||||
test_null_index(iter);
|
||||
}
|
||||
|
||||
fn test_null_index(data: Vec<bool>) {
|
||||
let mut out = vec![];
|
||||
|
||||
serialize_sparse_codec(
|
||||
data.iter()
|
||||
.cloned()
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let null_index = SparseCodec::open(OwnedBytes::new(out));
|
||||
|
||||
let orig_idx_with_value: Vec<u32> = data
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| **val)
|
||||
.map(|(pos, _val)| pos as u32)
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
null_index
|
||||
.translate_codec_idx_to_original_idx(0..orig_idx_with_value.len() as u32)
|
||||
.collect_vec(),
|
||||
orig_idx_with_value
|
||||
);
|
||||
|
||||
let step_size = (orig_idx_with_value.len() / 100).max(1);
|
||||
for (dense_idx, orig_idx) in orig_idx_with_value.iter().enumerate().step_by(step_size) {
|
||||
assert_eq!(
|
||||
null_index.translate_to_codec_idx(*orig_idx),
|
||||
Some(dense_idx as u32)
|
||||
);
|
||||
}
|
||||
|
||||
// 100 samples
|
||||
let step_size = (data.len() / 100).max(1);
|
||||
for (pos, value) in data.iter().enumerate().step_by(step_size) {
|
||||
assert_eq!(null_index.exists(pos as u32), *value);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_test_translation() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_sparse_codec(
|
||||
iter.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let null_index = SparseCodec::open(OwnedBytes::new(out));
|
||||
|
||||
assert_eq!(
|
||||
null_index
|
||||
.translate_codec_idx_to_original_idx(0..2)
|
||||
.collect_vec(),
|
||||
vec![0, 2]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_translate() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_sparse_codec(
|
||||
iter.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let null_index = SparseCodec::open(OwnedBytes::new(out));
|
||||
assert_eq!(null_index.translate_to_codec_idx(0), Some(0));
|
||||
assert_eq!(null_index.translate_to_codec_idx(2), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_test_small() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_sparse_codec(
|
||||
iter.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let null_index = SparseCodec::open(OwnedBytes::new(out));
|
||||
assert!(null_index.exists(0));
|
||||
assert!(!null_index.exists(1));
|
||||
assert!(null_index.exists(2));
|
||||
assert!(!null_index.exists(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_test_large() {
|
||||
let mut docs = vec![];
|
||||
docs.extend((0..ELEMENTS_PER_BLOCK).map(|_idx| false));
|
||||
docs.extend((0..=1).map(|_idx| true));
|
||||
|
||||
let iter = docs.iter().cloned();
|
||||
let mut out = vec![];
|
||||
serialize_sparse_codec(
|
||||
iter.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let null_index = SparseCodec::open(OwnedBytes::new(out));
|
||||
assert!(!null_index.exists(0));
|
||||
assert!(!null_index.exists(100));
|
||||
assert!(!null_index.exists(ELEMENTS_PER_BLOCK - 1));
|
||||
assert!(null_index.exists(ELEMENTS_PER_BLOCK));
|
||||
assert!(null_index.exists(ELEMENTS_PER_BLOCK + 1));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::Bencher;
|
||||
|
||||
use super::*;
|
||||
|
||||
const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||
fn gen_bools(fill_ratio: f64) -> SparseCodec {
|
||||
let mut out = Vec::new();
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
serialize_sparse_codec(
|
||||
(0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let codec = SparseCodec::open(OwnedBytes::new(out));
|
||||
codec
|
||||
}
|
||||
|
||||
fn random_range_iterator(start: u32, end: u32, step_size: u32) -> impl Iterator<Item = u32> {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let mut current = start;
|
||||
std::iter::from_fn(move || {
|
||||
current += rng.gen_range(1..step_size + 1);
|
||||
if current >= end {
|
||||
None
|
||||
} else {
|
||||
Some(current)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn walk_over_data(codec: &SparseCodec, max_step_size: u32) -> Option<u32> {
|
||||
walk_over_data_from_positions(
|
||||
codec,
|
||||
random_range_iterator(0, TOTAL_NUM_VALUES, max_step_size),
|
||||
)
|
||||
}
|
||||
|
||||
fn walk_over_data_from_positions(
|
||||
codec: &SparseCodec,
|
||||
positions: impl Iterator<Item = u32>,
|
||||
) -> Option<u32> {
|
||||
let mut dense_idx: Option<u32> = None;
|
||||
for idx in positions {
|
||||
dense_idx = dense_idx.or(codec.translate_to_codec_idx(idx));
|
||||
}
|
||||
dense_idx
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_orig_to_codec_1percent_filled_random_stride(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_orig_to_codec_5percent_filled_random_stride(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_orig_to_codec_full_scan_10percent(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_orig_to_codec_full_scan_90percent(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_orig_to_codec_full_scan_1percent(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_orig_to_codec_10percent_filled_random_stride(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_orig_to_codec_90percent_filled_random_stride(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_codec_to_orig_1percent_filled_random_stride_big_step(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 50_000))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_codec_to_orig_1percent_filled_random_stride(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 100))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_codec_to_orig_1percent_filled_full_scan(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(0..num_vals)
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_codec_to_orig_90percent_filled_random_stride_big_step(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.90f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 50_000))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_codec_to_orig_90percent_filled_random_stride(
|
||||
bench: &mut Bencher,
|
||||
) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 100))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_sparse_codec_translate_codec_to_orig_90percent_filled_full_scan(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(0..num_vals)
|
||||
.last()
|
||||
});
|
||||
}
|
||||
}
|
||||
146
fastfield_codecs/src/null_index_footer.rs
Normal file
146
fastfield_codecs/src/null_index_footer.rs
Normal file
@@ -0,0 +1,146 @@
|
||||
use std::io::{self, Write};
|
||||
use std::ops::Range;
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, VInt};
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||
pub(crate) enum FastFieldCardinality {
|
||||
Single = 1,
|
||||
Multi = 2,
|
||||
}
|
||||
|
||||
impl BinarySerializable for FastFieldCardinality {
|
||||
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
self.to_code().serialize(wrt)
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let code = u8::deserialize(reader)?;
|
||||
let codec_type: Self = Self::from_code(code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||
Ok(codec_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldCardinality {
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||
match code {
|
||||
1 => Some(Self::Single),
|
||||
2 => Some(Self::Multi),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum NullIndexCodec {
|
||||
Full = 1,
|
||||
}
|
||||
|
||||
impl BinarySerializable for NullIndexCodec {
|
||||
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
self.to_code().serialize(wrt)
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let code = u8::deserialize(reader)?;
|
||||
let codec_type: Self = Self::from_code(code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||
Ok(codec_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl NullIndexCodec {
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||
match code {
|
||||
1 => Some(Self::Full),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub(crate) struct NullIndexFooter {
|
||||
pub(crate) cardinality: FastFieldCardinality,
|
||||
pub(crate) null_index_codec: NullIndexCodec,
|
||||
// Unused for NullIndexCodec::Full
|
||||
pub(crate) null_index_byte_range: Range<u64>,
|
||||
}
|
||||
|
||||
impl BinarySerializable for NullIndexFooter {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.cardinality.serialize(writer)?;
|
||||
self.null_index_codec.serialize(writer)?;
|
||||
VInt(self.null_index_byte_range.start).serialize(writer)?;
|
||||
VInt(self.null_index_byte_range.end - self.null_index_byte_range.start)
|
||||
.serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let cardinality = FastFieldCardinality::deserialize(reader)?;
|
||||
let null_index_codec = NullIndexCodec::deserialize(reader)?;
|
||||
let null_index_byte_range_start = VInt::deserialize(reader)?.0;
|
||||
let null_index_byte_range_end = VInt::deserialize(reader)?.0 + null_index_byte_range_start;
|
||||
Ok(Self {
|
||||
cardinality,
|
||||
null_index_codec,
|
||||
null_index_byte_range: null_index_byte_range_start..null_index_byte_range_end,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn append_null_index_footer(
|
||||
output: &mut impl io::Write,
|
||||
null_index_footer: NullIndexFooter,
|
||||
) -> io::Result<()> {
|
||||
let mut counting_write = CountingWriter::wrap(output);
|
||||
null_index_footer.serialize(&mut counting_write)?;
|
||||
let footer_payload_len = counting_write.written_bytes();
|
||||
BinarySerializable::serialize(&(footer_payload_len as u16), &mut counting_write)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn read_null_index_footer(
|
||||
data: OwnedBytes,
|
||||
) -> io::Result<(OwnedBytes, NullIndexFooter)> {
|
||||
let (data, null_footer_length_bytes) = data.rsplit(2);
|
||||
|
||||
let footer_length = u16::deserialize(&mut null_footer_length_bytes.as_slice())?;
|
||||
let (data, null_index_footer_bytes) = data.rsplit(footer_length as usize);
|
||||
let null_index_footer = NullIndexFooter::deserialize(&mut null_index_footer_bytes.as_ref())?;
|
||||
|
||||
Ok((data, null_index_footer))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn null_index_footer_deser_test() {
|
||||
let null_index_footer = NullIndexFooter {
|
||||
cardinality: FastFieldCardinality::Single,
|
||||
null_index_codec: NullIndexCodec::Full,
|
||||
null_index_byte_range: 100..120,
|
||||
};
|
||||
|
||||
let mut out = vec![];
|
||||
null_index_footer.serialize(&mut out).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
null_index_footer,
|
||||
NullIndexFooter::deserialize(&mut &out[..]).unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::Column;
|
||||
|
||||
/// `OptionalColumn` provides columnar access on a field.
|
||||
pub trait OptionalColumn<T: PartialOrd = u64>: Send + Sync {
|
||||
/// Return the value associated with the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `idx` is greater than the column length.
|
||||
fn get_val(&self, idx: u32) -> Option<T>;
|
||||
|
||||
/// Fills an output buffer with the fast field values
|
||||
/// associated with the `DocId` going from
|
||||
/// `start` to `start + output.len()`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Must panic if `start + output.len()` is greater than
|
||||
/// the segment's `maxdoc`.
|
||||
fn get_range(&self, start: u64, output: &mut [Option<T>]) {
|
||||
for (out, idx) in output.iter_mut().zip(start..) {
|
||||
*out = self.get_val(idx as u32);
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the positions of values which are in the provided range.
|
||||
fn get_docids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
|
||||
|
||||
for idx in doc_id_range {
|
||||
let val = self.get_val(idx);
|
||||
if let Some(val) = val {
|
||||
if value_range.contains(&val) {
|
||||
positions.push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the minimum value for this fast field.
|
||||
///
|
||||
/// This min_value may not be exact.
|
||||
/// For instance, the min value does not take in account of possible
|
||||
/// deleted document. All values are however guaranteed to be higher than
|
||||
/// `.min_value()`.
|
||||
fn min_value(&self) -> Option<T>;
|
||||
|
||||
/// Returns the maximum value for this fast field.
|
||||
///
|
||||
/// This max_value may not be exact.
|
||||
/// For instance, the max value does not take in account of possible
|
||||
/// deleted document. All values are however guaranteed to be higher than
|
||||
/// `.max_value()`.
|
||||
fn max_value(&self) -> Option<T>;
|
||||
|
||||
/// The number of values including `None` in the column.
|
||||
fn num_vals(&self) -> u32;
|
||||
|
||||
/// Returns a iterator over the data
|
||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = Option<T>> + 'a> {
|
||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||
}
|
||||
|
||||
/// return full column if all values are set and is not empty
|
||||
fn to_full(&self) -> Option<Arc<dyn Column<T>>> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Temporary wrapper to migrate to optional column
|
||||
pub(crate) struct ToOptionalColumn<T> {
|
||||
column: Arc<dyn Column<T>>,
|
||||
}
|
||||
|
||||
impl<T: PartialOrd> ToOptionalColumn<T> {
|
||||
pub(crate) fn new(column: Arc<dyn Column<T>>) -> Self {
|
||||
Self { column }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd> OptionalColumn<T> for ToOptionalColumn<T> {
|
||||
#[inline]
|
||||
fn get_val(&self, idx: u32) -> Option<T> {
|
||||
let val = self.column.get_val(idx);
|
||||
Some(val)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> Option<T> {
|
||||
let min_value = self.column.min_value();
|
||||
Some(min_value)
|
||||
}
|
||||
|
||||
fn max_value(&self) -> Option<T> {
|
||||
let max_value = self.column.max_value();
|
||||
Some(max_value)
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.column.num_vals()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = Option<T>> + '_> {
|
||||
Box::new(self.column.iter().map(|el| Some(el)))
|
||||
}
|
||||
/// return full column if all values are set and is not empty
|
||||
fn to_full(&self) -> Option<Arc<dyn Column<T>>> {
|
||||
Some(self.column.clone())
|
||||
}
|
||||
}
|
||||
@@ -1,22 +1,3 @@
|
||||
// Copyright (C) 2022 Quickwit, Inc.
|
||||
//
|
||||
// Quickwit is offered under the AGPL v3.0 and as commercial software.
|
||||
// For commercial licensing, contact us at hello@quickwit.io.
|
||||
//
|
||||
// AGPL:
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as
|
||||
// published by the Free Software Foundation, either version 3 of the
|
||||
// License, or (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::io;
|
||||
use std::num::NonZeroU64;
|
||||
use std::sync::Arc;
|
||||
@@ -28,11 +9,15 @@ use ownedbytes::OwnedBytes;
|
||||
use crate::bitpacked::BitpackedCodec;
|
||||
use crate::blockwise_linear::BlockwiseLinearCodec;
|
||||
use crate::compact_space::CompactSpaceCompressor;
|
||||
use crate::format_version::append_format_version;
|
||||
use crate::linear::LinearCodec;
|
||||
use crate::monotonic_mapping::{
|
||||
StrictlyMonotonicFn, StrictlyMonotonicMappingToInternal,
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
use crate::null_index_footer::{
|
||||
append_null_index_footer, FastFieldCardinality, NullIndexCodec, NullIndexFooter,
|
||||
};
|
||||
use crate::{
|
||||
monotonic_map_column, Column, FastFieldCodec, FastFieldCodecType, MonotonicallyMappableToU64,
|
||||
U128FastFieldCodecType, VecColumn, ALL_CODEC_TYPES,
|
||||
@@ -189,6 +174,68 @@ pub fn serialize_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
|
||||
iter_gen: F,
|
||||
num_vals: u32,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
serialize_u128_new(ValueIndexInfo::default(), iter_gen, num_vals, output)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub enum ValueIndexInfo<'a> {
|
||||
MultiValue(Box<dyn MultiValueIndexInfo + 'a>),
|
||||
SingleValue(Box<dyn SingleValueIndexInfo + 'a>),
|
||||
}
|
||||
|
||||
impl Default for ValueIndexInfo<'static> {
|
||||
fn default() -> Self {
|
||||
struct Dummy {}
|
||||
impl SingleValueIndexInfo for Dummy {
|
||||
fn num_vals(&self) -> u32 {
|
||||
todo!()
|
||||
}
|
||||
fn num_non_nulls(&self) -> u32 {
|
||||
todo!()
|
||||
}
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u32>> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
Self::SingleValue(Box::new(Dummy {}))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ValueIndexInfo<'a> {
|
||||
fn get_cardinality(&self) -> FastFieldCardinality {
|
||||
match self {
|
||||
ValueIndexInfo::MultiValue(_) => FastFieldCardinality::Multi,
|
||||
ValueIndexInfo::SingleValue(_) => FastFieldCardinality::Single,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait MultiValueIndexInfo {
|
||||
/// The number of docs in the column.
|
||||
fn num_docs(&self) -> u32;
|
||||
/// The number of values in the column.
|
||||
fn num_vals(&self) -> u32;
|
||||
/// Return the start index of the values for each doc
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_>;
|
||||
}
|
||||
|
||||
pub trait SingleValueIndexInfo {
|
||||
/// The number of values including nulls in the column.
|
||||
fn num_vals(&self) -> u32;
|
||||
/// The number of non-null values in the column.
|
||||
fn num_non_nulls(&self) -> u32;
|
||||
/// Return a iterator of the positions of docs with a value
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_>;
|
||||
}
|
||||
|
||||
/// Serializes u128 values with the compact space codec.
|
||||
pub fn serialize_u128_new<F: Fn() -> I, I: Iterator<Item = u128>>(
|
||||
value_index: ValueIndexInfo,
|
||||
iter_gen: F,
|
||||
num_vals: u32,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let header = U128Header {
|
||||
num_vals,
|
||||
@@ -198,6 +245,14 @@ pub fn serialize_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
|
||||
let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
|
||||
compressor.compress_into(iter_gen(), output).unwrap();
|
||||
|
||||
let null_index_footer = NullIndexFooter {
|
||||
cardinality: value_index.get_cardinality(),
|
||||
null_index_codec: NullIndexCodec::Full,
|
||||
null_index_byte_range: 0..0,
|
||||
};
|
||||
append_null_index_footer(output, null_index_footer)?;
|
||||
append_format_version(output)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -206,6 +261,16 @@ pub fn serialize<T: MonotonicallyMappableToU64>(
|
||||
typed_column: impl Column<T>,
|
||||
output: &mut impl io::Write,
|
||||
codecs: &[FastFieldCodecType],
|
||||
) -> io::Result<()> {
|
||||
serialize_new(ValueIndexInfo::default(), typed_column, output, codecs)
|
||||
}
|
||||
|
||||
/// Serializes the column with the codec with the best estimate on the data.
|
||||
pub fn serialize_new<T: MonotonicallyMappableToU64>(
|
||||
value_index: ValueIndexInfo,
|
||||
typed_column: impl Column<T>,
|
||||
output: &mut impl io::Write,
|
||||
codecs: &[FastFieldCodecType],
|
||||
) -> io::Result<()> {
|
||||
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
|
||||
let header = Header::compute_header(&column, codecs).ok_or_else(|| {
|
||||
@@ -221,6 +286,15 @@ pub fn serialize<T: MonotonicallyMappableToU64>(
|
||||
let normalized_column = header.normalize_column(column);
|
||||
assert_eq!(normalized_column.min_value(), 0u64);
|
||||
serialize_given_codec(normalized_column, header.codec_type, output)?;
|
||||
|
||||
let null_index_footer = NullIndexFooter {
|
||||
cardinality: value_index.get_cardinality(),
|
||||
null_index_codec: NullIndexCodec::Full,
|
||||
null_index_byte_range: 0..0,
|
||||
};
|
||||
append_null_index_footer(output, null_index_footer)?;
|
||||
append_format_version(output)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -278,10 +352,7 @@ pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
||||
) -> Arc<dyn Column<T>> {
|
||||
let mut buffer = Vec::new();
|
||||
super::serialize(VecColumn::from(&column), &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
super::open(OwnedBytes::new(buffer))
|
||||
.unwrap()
|
||||
.to_full()
|
||||
.unwrap()
|
||||
super::open(OwnedBytes::new(buffer)).unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -313,7 +384,7 @@ mod tests {
|
||||
let col = VecColumn::from(&[false, true][..]);
|
||||
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 5 + 8);
|
||||
assert_eq!(buffer.len(), 3 + 5 + 8 + 4 + 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -322,7 +393,7 @@ mod tests {
|
||||
let col = VecColumn::from(&[true][..]);
|
||||
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
// 5 bytes of header, 0 bytes of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 5 + 7);
|
||||
assert_eq!(buffer.len(), 3 + 5 + 7 + 4 + 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -332,6 +403,6 @@ mod tests {
|
||||
let col = VecColumn::from(&vals[..]);
|
||||
serialize(col, &mut buffer, &[FastFieldCodecType::Bitpacked]).unwrap();
|
||||
// Values are stored over 3 bits.
|
||||
assert_eq!(buffer.len(), 7 + (3 * 80 / 8) + 7);
|
||||
assert_eq!(buffer.len(), 3 + 7 + (3 * 80 / 8) + 7 + 4 + 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
[package]
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
name = "ownedbytes"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
description = "Expose data as static slice"
|
||||
license = "MIT"
|
||||
documentation = "https://docs.rs/ownedbytes/"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::ops::{Deref, Range};
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io, mem};
|
||||
|
||||
use stable_deref_trait::StableDeref;
|
||||
pub use stable_deref_trait::StableDeref;
|
||||
|
||||
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
||||
/// this data as a slice.
|
||||
@@ -80,6 +80,21 @@ impl OwnedBytes {
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
|
||||
///
|
||||
/// Right will hold `split_len` bytes.
|
||||
///
|
||||
/// This operation is cheap and does not require to copy any memory.
|
||||
/// On the other hand, both `left` and `right` retain a handle over
|
||||
/// the entire slice of memory. In other words, the memory will only
|
||||
/// be released when both left and right are dropped.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn rsplit(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
|
||||
let data_len = self.data.len();
|
||||
self.split(data_len - split_len)
|
||||
}
|
||||
|
||||
/// Splits the right part of the `OwnedBytes` at the given offset.
|
||||
///
|
||||
/// `self` is truncated to `split_len`, left with the remaining bytes.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.18.0"
|
||||
version = "0.19.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
|
||||
@@ -4,14 +4,14 @@ use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::OptionalColumn;
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
||||
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
||||
use super::metric::{AverageAggregation, StatsAggregation};
|
||||
use super::segment_agg_result::BucketCount;
|
||||
use super::VecWithNames;
|
||||
use crate::fastfield::{type_and_cardinality, FastType, MultiValuedFastFieldReader};
|
||||
use crate::fastfield::{type_and_cardinality, MultiValuedFastFieldReader};
|
||||
use crate::schema::{Cardinality, Type};
|
||||
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
|
||||
|
||||
@@ -37,16 +37,16 @@ impl AggregationsWithAccessor {
|
||||
#[derive(Clone)]
|
||||
pub(crate) enum FastFieldAccessor {
|
||||
Multi(MultiValuedFastFieldReader<u64>),
|
||||
Single(Arc<dyn OptionalColumn<u64>>),
|
||||
Single(Arc<dyn Column<u64>>),
|
||||
}
|
||||
impl FastFieldAccessor {
|
||||
pub fn as_single(&self) -> Option<&dyn OptionalColumn<u64>> {
|
||||
pub fn as_single(&self) -> Option<&dyn Column<u64>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(_) => None,
|
||||
FastFieldAccessor::Single(reader) => Some(&**reader),
|
||||
}
|
||||
}
|
||||
pub fn into_single(self) -> Option<Arc<dyn OptionalColumn<u64>>> {
|
||||
pub fn into_single(self) -> Option<Arc<dyn Column<u64>>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(_) => None,
|
||||
FastFieldAccessor::Single(reader) => Some(reader),
|
||||
@@ -124,7 +124,7 @@ impl BucketAggregationWithAccessor {
|
||||
pub struct MetricAggregationWithAccessor {
|
||||
pub metric: MetricAggregation,
|
||||
pub field_type: Type,
|
||||
pub accessor: Arc<dyn OptionalColumn>,
|
||||
pub accessor: Arc<dyn Column>,
|
||||
}
|
||||
|
||||
impl MetricAggregationWithAccessor {
|
||||
@@ -194,13 +194,7 @@ fn get_ff_reader_and_validate(
|
||||
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
|
||||
let field_type = reader.schema().get_field_entry(field).field_type();
|
||||
|
||||
if let Some((ff_type, field_cardinality)) = type_and_cardinality(field_type) {
|
||||
if ff_type == FastType::Date {
|
||||
return Err(TantivyError::InvalidArgument(
|
||||
"Unsupported field type date in aggregation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some((_ff_type, field_cardinality)) = type_and_cardinality(field_type) {
|
||||
if cardinality != field_cardinality {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Invalid field cardinality on field {} expected {:?}, but got {:?}",
|
||||
|
||||
@@ -12,6 +12,7 @@ use super::bucket::GetDocCount;
|
||||
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
|
||||
use super::metric::{SingleMetricResult, Stats};
|
||||
use super::Key;
|
||||
use crate::schema::Schema;
|
||||
use crate::TantivyError;
|
||||
|
||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@@ -129,9 +130,12 @@ pub enum BucketResult {
|
||||
}
|
||||
|
||||
impl BucketResult {
|
||||
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
|
||||
pub(crate) fn empty_from_req(
|
||||
req: &BucketAggregationInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<Self> {
|
||||
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
||||
empty_bucket.into_final_bucket_result(req)
|
||||
empty_bucket.into_final_bucket_result(req, schema)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,6 +178,9 @@ pub enum BucketEntries<T> {
|
||||
/// ```
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BucketEntry {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The string representation of the bucket.
|
||||
pub key_as_string: Option<String>,
|
||||
/// The identifier of the bucket.
|
||||
pub key: Key,
|
||||
/// Number of documents in the bucket.
|
||||
@@ -238,4 +245,10 @@ pub struct RangeBucketEntry {
|
||||
/// The to range of the bucket. Equals `f64::MAX` when `None`.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub to: Option<f64>,
|
||||
/// The optional string representation for the `from` range.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub from_as_string: Option<String>,
|
||||
/// The optional string representation for the `to` range.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub to_as_string: Option<String>,
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::Display;
|
||||
|
||||
use fastfield_codecs::OptionalColumn;
|
||||
use fastfield_codecs::Column;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -10,12 +10,12 @@ use crate::aggregation::agg_req_with_accessor::{
|
||||
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
||||
};
|
||||
use crate::aggregation::agg_result::BucketEntry;
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::schema::Type;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, format_date};
|
||||
use crate::schema::{Schema, Type};
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
||||
@@ -206,6 +206,7 @@ pub struct SegmentHistogramCollector {
|
||||
field_type: Type,
|
||||
interval: f64,
|
||||
offset: f64,
|
||||
min_doc_count: u64,
|
||||
first_bucket_num: i64,
|
||||
bounds: HistogramBounds,
|
||||
}
|
||||
@@ -215,6 +216,30 @@ impl SegmentHistogramCollector {
|
||||
self,
|
||||
agg_with_accessor: &BucketAggregationWithAccessor,
|
||||
) -> crate::Result<IntermediateBucketResult> {
|
||||
// Compute the number of buckets to validate against max num buckets
|
||||
// Note: We use min_doc_count here, but it's only an lowerbound here, since were are on the
|
||||
// intermediate level and after merging the number of documents of a bucket could exceed
|
||||
// `min_doc_count`.
|
||||
{
|
||||
let cut_off_buckets_front = self
|
||||
.buckets
|
||||
.iter()
|
||||
.take_while(|bucket| bucket.doc_count <= self.min_doc_count)
|
||||
.count();
|
||||
let cut_off_buckets_back = self.buckets[cut_off_buckets_front..]
|
||||
.iter()
|
||||
.rev()
|
||||
.take_while(|bucket| bucket.doc_count <= self.min_doc_count)
|
||||
.count();
|
||||
let estimate_num_buckets =
|
||||
self.buckets.len() - cut_off_buckets_front - cut_off_buckets_back;
|
||||
|
||||
agg_with_accessor
|
||||
.bucket_count
|
||||
.add_count(estimate_num_buckets as u32);
|
||||
agg_with_accessor.bucket_count.validate_bucket_count()?;
|
||||
}
|
||||
|
||||
let mut buckets = Vec::with_capacity(
|
||||
self.buckets
|
||||
.iter()
|
||||
@@ -251,11 +276,6 @@ impl SegmentHistogramCollector {
|
||||
);
|
||||
};
|
||||
|
||||
agg_with_accessor
|
||||
.bucket_count
|
||||
.add_count(buckets.len() as u32);
|
||||
agg_with_accessor.bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(IntermediateBucketResult::Histogram { buckets })
|
||||
}
|
||||
|
||||
@@ -263,17 +283,13 @@ impl SegmentHistogramCollector {
|
||||
req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
field_type: Type,
|
||||
accessor: &dyn OptionalColumn<u64>,
|
||||
accessor: &dyn Column<u64>,
|
||||
) -> crate::Result<Self> {
|
||||
req.validate()?;
|
||||
let min_max_u64 = accessor.min_value().zip(accessor.max_value());
|
||||
let min_max_f64 = min_max_u64.map(|(min, max)| {
|
||||
let min = f64_from_fastfield_u64(min, &field_type);
|
||||
let max = f64_from_fastfield_u64(max, &field_type);
|
||||
(min, max)
|
||||
});
|
||||
let min = f64_from_fastfield_u64(accessor.min_value(), &field_type);
|
||||
let max = f64_from_fastfield_u64(accessor.max_value(), &field_type);
|
||||
|
||||
let (min, max) = get_req_min_max(req, min_max_f64);
|
||||
let (min, max) = get_req_min_max(req, Some((min, max)));
|
||||
|
||||
// We compute and generate the buckets range (min, max) based on the request and the min
|
||||
// max in the fast field, but this is likely not ideal when this is a subbucket, where many
|
||||
@@ -312,6 +328,7 @@ impl SegmentHistogramCollector {
|
||||
first_bucket_num,
|
||||
bounds,
|
||||
sub_aggregations,
|
||||
min_doc_count: req.min_doc_count(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -335,58 +352,47 @@ impl SegmentHistogramCollector {
|
||||
.expect("unexpected fast field cardinatility");
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
if let Some(val) = accessor.get_val(docs[0]) {
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
let bucket_pos = get_bucket_num(val);
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val,
|
||||
&bounds,
|
||||
bucket_pos,
|
||||
docs[0],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
}
|
||||
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0]));
|
||||
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1]));
|
||||
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2]));
|
||||
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3]));
|
||||
|
||||
if let Some(val) = accessor.get_val(docs[1]) {
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
let bucket_pos = get_bucket_num(val);
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val,
|
||||
&bounds,
|
||||
bucket_pos,
|
||||
docs[1],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
}
|
||||
let bucket_pos0 = get_bucket_num(val0);
|
||||
let bucket_pos1 = get_bucket_num(val1);
|
||||
let bucket_pos2 = get_bucket_num(val2);
|
||||
let bucket_pos3 = get_bucket_num(val3);
|
||||
|
||||
if let Some(val) = accessor.get_val(docs[2]) {
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
let bucket_pos = get_bucket_num(val);
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val,
|
||||
&bounds,
|
||||
bucket_pos,
|
||||
docs[2],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
}
|
||||
|
||||
if let Some(val) = accessor.get_val(docs[3]) {
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
let bucket_pos = get_bucket_num(val);
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val,
|
||||
&bounds,
|
||||
bucket_pos,
|
||||
docs[3],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
}
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val0,
|
||||
&bounds,
|
||||
bucket_pos0,
|
||||
docs[0],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val1,
|
||||
&bounds,
|
||||
bucket_pos1,
|
||||
docs[1],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val2,
|
||||
&bounds,
|
||||
bucket_pos2,
|
||||
docs[2],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val3,
|
||||
&bounds,
|
||||
bucket_pos3,
|
||||
docs[3],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
let Some(val) = accessor.get_val(doc).map(|val|f64_from_fastfield_u64(val, &self.field_type)) else{
|
||||
continue;
|
||||
};
|
||||
let val = f64_from_fastfield_u64(accessor.get_val(doc), &self.field_type);
|
||||
if !bounds.contains(val) {
|
||||
continue;
|
||||
}
|
||||
@@ -395,7 +401,7 @@ impl SegmentHistogramCollector {
|
||||
|
||||
debug_assert_eq!(
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset) as f64
|
||||
get_bucket_val(val, self.interval, self.offset)
|
||||
);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
@@ -422,7 +428,7 @@ impl SegmentHistogramCollector {
|
||||
if bounds.contains(val) {
|
||||
debug_assert_eq!(
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset) as f64
|
||||
get_bucket_val(val, self.interval, self.offset)
|
||||
);
|
||||
|
||||
self.increment_bucket(bucket_pos, doc, bucket_with_accessor)?;
|
||||
@@ -466,6 +472,7 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<Vec<BucketEntry>> {
|
||||
// Generate the full list of buckets without gaps.
|
||||
//
|
||||
@@ -506,7 +513,9 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
sub_aggregation: empty_sub_aggregation.clone(),
|
||||
},
|
||||
})
|
||||
.map(|intermediate_bucket| intermediate_bucket.into_final_bucket_entry(sub_aggregation))
|
||||
.map(|intermediate_bucket| {
|
||||
intermediate_bucket.into_final_bucket_entry(sub_aggregation, schema)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()
|
||||
}
|
||||
|
||||
@@ -515,20 +524,43 @@ pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<Vec<BucketEntry>> {
|
||||
if histogram_req.min_doc_count() == 0 {
|
||||
let mut buckets = if histogram_req.min_doc_count() == 0 {
|
||||
// With min_doc_count != 0, we may need to add buckets, so that there are no
|
||||
// gaps, since intermediate result does not contain empty buckets (filtered to
|
||||
// reduce serialization size).
|
||||
|
||||
intermediate_buckets_to_final_buckets_fill_gaps(buckets, histogram_req, sub_aggregation)
|
||||
intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
buckets,
|
||||
histogram_req,
|
||||
sub_aggregation,
|
||||
schema,
|
||||
)?
|
||||
} else {
|
||||
buckets
|
||||
.into_iter()
|
||||
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count())
|
||||
.map(|histogram_bucket| histogram_bucket.into_final_bucket_entry(sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()
|
||||
.map(|histogram_bucket| {
|
||||
histogram_bucket.into_final_bucket_entry(sub_aggregation, schema)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?
|
||||
};
|
||||
|
||||
// If we have a date type on the histogram buckets, we add the `key_as_string` field as rfc339
|
||||
let field = schema
|
||||
.get_field(&histogram_req.field)
|
||||
.ok_or_else(|| TantivyError::FieldNotFound(histogram_req.field.to_string()))?;
|
||||
if schema.get_field_entry(field).field_type().is_date() {
|
||||
for bucket in buckets.iter_mut() {
|
||||
if let crate::aggregation::Key::F64(val) = bucket.key {
|
||||
let key_as_string = format_date(val as i64)?;
|
||||
bucket.key_as_string = Some(key_as_string);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(buckets)
|
||||
}
|
||||
|
||||
/// Applies req extended_bounds/hard_bounds on the min_max value
|
||||
@@ -1387,6 +1419,63 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_date_test_single_segment() -> crate::Result<()> {
|
||||
histogram_date_test_with_opt(true)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_date_test_multi_segment() -> crate::Result<()> {
|
||||
histogram_date_test_with_opt(false)
|
||||
}
|
||||
|
||||
fn histogram_date_test_with_opt(merge_segments: bool) -> crate::Result<()> {
|
||||
let index = get_test_index_2_segments(merge_segments)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "date".to_string(),
|
||||
interval: 86400000000.0, // one day in microseconds
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_res = exec_request(agg_req, &index)?;
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
|
||||
assert_eq!(res["histogram"]["buckets"][0]["key"], 1546300800000000.0);
|
||||
assert_eq!(
|
||||
res["histogram"]["buckets"][0]["key_as_string"],
|
||||
"2019-01-01T00:00:00Z"
|
||||
);
|
||||
assert_eq!(res["histogram"]["buckets"][0]["doc_count"], 1);
|
||||
|
||||
assert_eq!(res["histogram"]["buckets"][1]["key"], 1546387200000000.0);
|
||||
assert_eq!(
|
||||
res["histogram"]["buckets"][1]["key_as_string"],
|
||||
"2019-01-02T00:00:00Z"
|
||||
);
|
||||
|
||||
assert_eq!(res["histogram"]["buckets"][1]["doc_count"], 5);
|
||||
|
||||
assert_eq!(res["histogram"]["buckets"][2]["key"], 1546473600000000.0);
|
||||
assert_eq!(
|
||||
res["histogram"]["buckets"][2]["key_as_string"],
|
||||
"2019-01-03T00:00:00Z"
|
||||
);
|
||||
|
||||
assert_eq!(res["histogram"]["buckets"][3], Value::Null);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_invalid_request() -> crate::Result<()> {
|
||||
let index = get_test_index_2_segments(true)?;
|
||||
@@ -1453,4 +1542,36 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_test_max_buckets_segments() -> crate::Result<()> {
|
||||
let values = vec![0.0, 70000.0];
|
||||
|
||||
let index = get_test_index_from_values(true, &values)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_interval".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request(agg_req, &index);
|
||||
|
||||
assert_eq!(
|
||||
res.unwrap_err().to_string(),
|
||||
"An invalid argument was passed: 'Aborting aggregation because too many buckets were \
|
||||
created'"
|
||||
.to_string()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -11,7 +12,9 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey};
|
||||
use crate::aggregation::{
|
||||
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
|
||||
};
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
@@ -181,7 +184,7 @@ impl SegmentRangeCollector {
|
||||
.into_iter()
|
||||
.map(move |range_bucket| {
|
||||
Ok((
|
||||
range_to_string(&range_bucket.range, &field_type),
|
||||
range_to_string(&range_bucket.range, &field_type)?,
|
||||
range_bucket
|
||||
.bucket
|
||||
.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
||||
@@ -209,8 +212,8 @@ impl SegmentRangeCollector {
|
||||
let key = range
|
||||
.key
|
||||
.clone()
|
||||
.map(Key::Str)
|
||||
.unwrap_or_else(|| range_to_key(&range.range, &field_type));
|
||||
.map(|key| Ok(Key::Str(key)))
|
||||
.unwrap_or_else(|| range_to_key(&range.range, &field_type))?;
|
||||
let to = if range.range.end == u64::MAX {
|
||||
None
|
||||
} else {
|
||||
@@ -228,6 +231,7 @@ impl SegmentRangeCollector {
|
||||
sub_aggregation,
|
||||
)?)
|
||||
};
|
||||
|
||||
Ok(SegmentRangeAndBucketEntry {
|
||||
range: range.range.clone(),
|
||||
bucket: SegmentRangeBucketEntry {
|
||||
@@ -267,29 +271,20 @@ impl SegmentRangeCollector {
|
||||
let val2 = accessor.get_val(docs[1]);
|
||||
let val3 = accessor.get_val(docs[2]);
|
||||
let val4 = accessor.get_val(docs[3]);
|
||||
if let Some(val) = val1 {
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, docs[0], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if let Some(val) = val2 {
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, docs[1], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if let Some(val) = val3 {
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, docs[2], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if let Some(val) = val4 {
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
let bucket_pos1 = self.get_bucket_pos(val1);
|
||||
let bucket_pos2 = self.get_bucket_pos(val2);
|
||||
let bucket_pos3 = self.get_bucket_pos(val3);
|
||||
let bucket_pos4 = self.get_bucket_pos(val4);
|
||||
|
||||
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
let val = accessor.get_val(doc);
|
||||
if let Some(val) = val {
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
for bucket in &mut self.buckets {
|
||||
@@ -411,34 +406,45 @@ fn extend_validate_ranges(
|
||||
Ok(converted_buckets)
|
||||
}
|
||||
|
||||
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
||||
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> crate::Result<String> {
|
||||
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
||||
// it should be rendered as "*-0" and not "*-*"
|
||||
let to_str = |val: u64, is_start: bool| {
|
||||
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
|
||||
"*".to_string()
|
||||
Ok("*".to_string())
|
||||
} else if *field_type == Type::Date {
|
||||
let val = i64::from_u64(val);
|
||||
format_date(val)
|
||||
} else {
|
||||
f64_from_fastfield_u64(val, field_type).to_string()
|
||||
Ok(f64_from_fastfield_u64(val, field_type).to_string())
|
||||
}
|
||||
};
|
||||
|
||||
format!("{}-{}", to_str(range.start, true), to_str(range.end, false))
|
||||
Ok(format!(
|
||||
"{}-{}",
|
||||
to_str(range.start, true)?,
|
||||
to_str(range.end, false)?
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
|
||||
Key::Str(range_to_string(range, field_type))
|
||||
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> crate::Result<Key> {
|
||||
Ok(Key::Str(range_to_string(range, field_type)?))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req::{
|
||||
Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
|
||||
};
|
||||
use crate::aggregation::tests::{exec_request_with_query, get_test_index_with_num_docs};
|
||||
use crate::aggregation::tests::{
|
||||
exec_request, exec_request_with_query, get_test_index_2_segments,
|
||||
get_test_index_with_num_docs,
|
||||
};
|
||||
|
||||
pub fn get_collector_from_ranges(
|
||||
ranges: Vec<RangeAggregationRange>,
|
||||
@@ -576,6 +582,77 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_date_test_single_segment() -> crate::Result<()> {
|
||||
range_date_test_with_opt(true)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_date_test_multi_segment() -> crate::Result<()> {
|
||||
range_date_test_with_opt(false)
|
||||
}
|
||||
|
||||
fn range_date_test_with_opt(merge_segments: bool) -> crate::Result<()> {
|
||||
let index = get_test_index_2_segments(merge_segments)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"date_ranges".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "date".to_string(),
|
||||
ranges: vec![
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: None,
|
||||
to: Some(1546300800000000.0f64),
|
||||
},
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: Some(1546300800000000.0f64),
|
||||
to: Some(1546387200000000.0f64),
|
||||
},
|
||||
],
|
||||
keyed: false,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_res = exec_request(agg_req, &index)?;
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][0]["from_as_string"],
|
||||
Value::Null
|
||||
);
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][0]["key"],
|
||||
"*-2019-01-01T00:00:00Z"
|
||||
);
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][1]["from_as_string"],
|
||||
"2019-01-01T00:00:00Z"
|
||||
);
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][1]["to_as_string"],
|
||||
"2019-01-02T00:00:00Z"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][2]["from_as_string"],
|
||||
"2019-01-02T00:00:00Z"
|
||||
);
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][2]["to_as_string"],
|
||||
Value::Null
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_custom_key_keyed_buckets_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
@@ -7,6 +7,7 @@ use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::schema::Schema;
|
||||
use crate::{SegmentReader, TantivyError};
|
||||
|
||||
/// The default max bucket count, before the aggregation fails.
|
||||
@@ -16,6 +17,7 @@ pub const MAX_BUCKET_COUNT: u32 = 65000;
|
||||
///
|
||||
/// The collector collects all aggregations by the underlying aggregation request.
|
||||
pub struct AggregationCollector {
|
||||
schema: Schema,
|
||||
agg: Aggregations,
|
||||
max_bucket_count: u32,
|
||||
}
|
||||
@@ -25,8 +27,9 @@ impl AggregationCollector {
|
||||
///
|
||||
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
|
||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>, schema: Schema) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
agg,
|
||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||
}
|
||||
@@ -113,7 +116,7 @@ impl Collector for AggregationCollector {
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let res = merge_fruits(segment_fruits)?;
|
||||
res.into_final_bucket_result(self.agg.clone())
|
||||
res.into_final_bucket_result(self.agg.clone(), &self.schema)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
18
src/aggregation/date.rs
Normal file
18
src/aggregation/date.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use crate::TantivyError;
|
||||
|
||||
pub(crate) fn format_date(val: i64) -> crate::Result<String> {
|
||||
let datetime =
|
||||
OffsetDateTime::from_unix_timestamp_nanos(1_000 * (val as i128)).map_err(|err| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"Could not convert {:?} to OffsetDateTime, err {:?}",
|
||||
val, err
|
||||
))
|
||||
})?;
|
||||
let key_as_string = datetime
|
||||
.format(&Rfc3339)
|
||||
.map_err(|_err| TantivyError::InvalidArgument("Could not serialize date".to_string()))?;
|
||||
Ok(key_as_string)
|
||||
}
|
||||
@@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::agg_req::{
|
||||
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
|
||||
MetricAggregation,
|
||||
MetricAggregation, RangeAggregation,
|
||||
};
|
||||
use super::agg_result::{AggregationResult, BucketResult, RangeBucketEntry};
|
||||
use super::bucket::{
|
||||
@@ -19,9 +19,11 @@ use super::bucket::{
|
||||
};
|
||||
use super::metric::{IntermediateAverage, IntermediateStats};
|
||||
use super::segment_agg_result::SegmentMetricResultCollector;
|
||||
use super::{Key, SerializedKey, VecWithNames};
|
||||
use super::{format_date, Key, SerializedKey, VecWithNames};
|
||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
||||
use crate::aggregation::bucket::TermsAggregationInternal;
|
||||
use crate::schema::Schema;
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
||||
/// intermediate results.
|
||||
@@ -35,8 +37,12 @@ pub struct IntermediateAggregationResults {
|
||||
|
||||
impl IntermediateAggregationResults {
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
|
||||
self.into_final_bucket_result_internal(&(req.into()))
|
||||
pub fn into_final_bucket_result(
|
||||
self,
|
||||
req: Aggregations,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<AggregationResults> {
|
||||
self.into_final_bucket_result_internal(&(req.into()), schema)
|
||||
}
|
||||
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
@@ -46,6 +52,7 @@ impl IntermediateAggregationResults {
|
||||
pub(crate) fn into_final_bucket_result_internal(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<AggregationResults> {
|
||||
// Important assumption:
|
||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||
@@ -53,11 +60,11 @@ impl IntermediateAggregationResults {
|
||||
let mut results: FxHashMap<String, AggregationResult> = FxHashMap::default();
|
||||
|
||||
if let Some(buckets) = self.buckets {
|
||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets, schema)?
|
||||
} else {
|
||||
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||
// format is constant
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets, schema)?
|
||||
};
|
||||
|
||||
if let Some(metrics) = self.metrics {
|
||||
@@ -158,10 +165,12 @@ fn add_empty_final_metrics_to_result(
|
||||
fn add_empty_final_buckets_to_result(
|
||||
results: &mut FxHashMap<String, AggregationResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<()> {
|
||||
let requested_buckets = req_buckets.iter();
|
||||
for (key, req) in requested_buckets {
|
||||
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
||||
let empty_bucket =
|
||||
AggregationResult::BucketResult(BucketResult::empty_from_req(req, schema)?);
|
||||
results.insert(key.to_string(), empty_bucket);
|
||||
}
|
||||
Ok(())
|
||||
@@ -171,12 +180,13 @@ fn convert_and_add_final_buckets_to_result(
|
||||
results: &mut FxHashMap<String, AggregationResult>,
|
||||
buckets: VecWithNames<IntermediateBucketResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<()> {
|
||||
assert_eq!(buckets.len(), req_buckets.len());
|
||||
|
||||
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||
for ((key, bucket), req) in buckets_with_request {
|
||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
|
||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req, schema)?);
|
||||
results.insert(key, result);
|
||||
}
|
||||
Ok(())
|
||||
@@ -266,13 +276,21 @@ impl IntermediateBucketResult {
|
||||
pub(crate) fn into_final_bucket_result(
|
||||
self,
|
||||
req: &BucketAggregationInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<BucketResult> {
|
||||
match self {
|
||||
IntermediateBucketResult::Range(range_res) => {
|
||||
let mut buckets: Vec<RangeBucketEntry> = range_res
|
||||
.buckets
|
||||
.into_iter()
|
||||
.map(|(_, bucket)| bucket.into_final_bucket_entry(&req.sub_aggregation))
|
||||
.into_values()
|
||||
.map(|bucket| {
|
||||
bucket.into_final_bucket_entry(
|
||||
&req.sub_aggregation,
|
||||
schema,
|
||||
req.as_range()
|
||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||
)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
buckets.sort_by(|left, right| {
|
||||
@@ -303,6 +321,7 @@ impl IntermediateBucketResult {
|
||||
req.as_histogram()
|
||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||
&req.sub_aggregation,
|
||||
schema,
|
||||
)?;
|
||||
|
||||
let buckets = if req.as_histogram().unwrap().keyed {
|
||||
@@ -321,6 +340,7 @@ impl IntermediateBucketResult {
|
||||
req.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
&req.sub_aggregation,
|
||||
schema,
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -411,6 +431,7 @@ impl IntermediateTermBucketResult {
|
||||
self,
|
||||
req: &TermsAggregation,
|
||||
sub_aggregation_req: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<BucketResult> {
|
||||
let req = TermsAggregationInternal::from_req(req);
|
||||
let mut buckets: Vec<BucketEntry> = self
|
||||
@@ -419,11 +440,12 @@ impl IntermediateTermBucketResult {
|
||||
.filter(|bucket| bucket.1.doc_count >= req.min_doc_count)
|
||||
.map(|(key, entry)| {
|
||||
Ok(BucketEntry {
|
||||
key_as_string: None,
|
||||
key: Key::Str(key),
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: entry
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(sub_aggregation_req)?,
|
||||
.into_final_bucket_result_internal(sub_aggregation_req, schema)?,
|
||||
})
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
@@ -528,13 +550,15 @@ impl IntermediateHistogramBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<BucketEntry> {
|
||||
Ok(BucketEntry {
|
||||
key_as_string: None,
|
||||
key: Key::F64(self.key),
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
.into_final_bucket_result_internal(req, schema)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -571,16 +595,38 @@ impl IntermediateRangeBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
range_req: &RangeAggregation,
|
||||
) -> crate::Result<RangeBucketEntry> {
|
||||
Ok(RangeBucketEntry {
|
||||
let mut range_bucket_entry = RangeBucketEntry {
|
||||
key: self.key,
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
.into_final_bucket_result_internal(req, schema)?,
|
||||
to: self.to,
|
||||
from: self.from,
|
||||
})
|
||||
to_as_string: None,
|
||||
from_as_string: None,
|
||||
};
|
||||
|
||||
// If we have a date type on the histogram buckets, we add the `key_as_string` field as
|
||||
// rfc339
|
||||
let field = schema
|
||||
.get_field(&range_req.field)
|
||||
.ok_or_else(|| TantivyError::FieldNotFound(range_req.field.to_string()))?;
|
||||
if schema.get_field_entry(field).field_type().is_date() {
|
||||
if let Some(val) = range_bucket_entry.to {
|
||||
let key_as_string = format_date(val as i64)?;
|
||||
range_bucket_entry.to_as_string = Some(key_as_string);
|
||||
}
|
||||
if let Some(val) = range_bucket_entry.from {
|
||||
let key_as_string = format_date(val as i64)?;
|
||||
range_bucket_entry.from_as_string = Some(key_as_string);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(range_bucket_entry)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use fastfield_codecs::OptionalColumn;
|
||||
use fastfield_codecs::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
@@ -57,33 +57,26 @@ impl SegmentAverageCollector {
|
||||
data: Default::default(),
|
||||
}
|
||||
}
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn OptionalColumn<u64>) {
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
if let Some(val) = field.get_val(docs[0]) {
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.data.collect(val);
|
||||
}
|
||||
if let Some(val) = field.get_val(docs[1]) {
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.data.collect(val);
|
||||
}
|
||||
|
||||
if let Some(val) = field.get_val(docs[2]) {
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.data.collect(val);
|
||||
}
|
||||
|
||||
if let Some(val) = field.get_val(docs[3]) {
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.data.collect(val);
|
||||
}
|
||||
let val1 = field.get_val(docs[0]);
|
||||
let val2 = field.get_val(docs[1]);
|
||||
let val3 = field.get_val(docs[2]);
|
||||
let val4 = field.get_val(docs[3]);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
let val4 = f64_from_fastfield_u64(val4, &self.field_type);
|
||||
self.data.collect(val1);
|
||||
self.data.collect(val2);
|
||||
self.data.collect(val3);
|
||||
self.data.collect(val4);
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
if let Some(val) = field.get_val(doc) {
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.data.collect(val);
|
||||
}
|
||||
let val = field.get_val(doc);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.data.collect(val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use fastfield_codecs::OptionalColumn;
|
||||
use fastfield_codecs::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
@@ -163,31 +163,26 @@ impl SegmentStatsCollector {
|
||||
stats: IntermediateStats::default(),
|
||||
}
|
||||
}
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn OptionalColumn<u64>) {
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
if let Some(val) = field.get_val(docs[0]) {
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
if let Some(val) = field.get_val(docs[1]) {
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
if let Some(val) = field.get_val(docs[2]) {
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
if let Some(val) = field.get_val(docs[3]) {
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
let val1 = field.get_val(docs[0]);
|
||||
let val2 = field.get_val(docs[1]);
|
||||
let val3 = field.get_val(docs[2]);
|
||||
let val4 = field.get_val(docs[3]);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
let val4 = f64_from_fastfield_u64(val4, &self.field_type);
|
||||
self.stats.collect(val1);
|
||||
self.stats.collect(val2);
|
||||
self.stats.collect(val3);
|
||||
self.stats.collect(val4);
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
if let Some(val) = field.get_val(doc) {
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
let val = field.get_val(doc);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -227,7 +222,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -305,7 +300,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
//!
|
||||
//! ## Prerequisite
|
||||
//! Currently aggregations work only on [fast fields](`crate::fastfield`). Single value fast fields
|
||||
//! of type `u64`, `f64`, `i64` and fast fields on text fields.
|
||||
//! of type `u64`, `f64`, `i64`, `date` and fast fields on text fields.
|
||||
//!
|
||||
//! ## Usage
|
||||
//! To use aggregations, build an aggregation request by constructing
|
||||
@@ -53,9 +53,10 @@
|
||||
//! use tantivy::query::AllQuery;
|
||||
//! use tantivy::aggregation::agg_result::AggregationResults;
|
||||
//! use tantivy::IndexReader;
|
||||
//! use tantivy::schema::Schema;
|
||||
//!
|
||||
//! # #[allow(dead_code)]
|
||||
//! fn aggregate_on_index(reader: &IndexReader) {
|
||||
//! fn aggregate_on_index(reader: &IndexReader, schema: Schema) {
|
||||
//! let agg_req: Aggregations = vec![
|
||||
//! (
|
||||
//! "average".to_string(),
|
||||
@@ -67,7 +68,7 @@
|
||||
//! .into_iter()
|
||||
//! .collect();
|
||||
//!
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req, None, schema);
|
||||
//!
|
||||
//! let searcher = reader.searcher();
|
||||
//! let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
@@ -157,6 +158,7 @@ mod agg_req_with_accessor;
|
||||
pub mod agg_result;
|
||||
pub mod bucket;
|
||||
mod collector;
|
||||
mod date;
|
||||
pub mod intermediate_agg_result;
|
||||
pub mod metric;
|
||||
mod segment_agg_result;
|
||||
@@ -167,6 +169,7 @@ pub use collector::{
|
||||
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
||||
MAX_BUCKET_COUNT,
|
||||
};
|
||||
pub(crate) use date::format_date;
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -283,11 +286,11 @@ impl Display for Key {
|
||||
/// Inverse of `to_fastfield_u64`. Used to convert to `f64` for metrics.
|
||||
///
|
||||
/// # Panics
|
||||
/// Only `u64`, `f64`, and `i64` are supported.
|
||||
/// Only `u64`, `f64`, `date`, and `i64` are supported.
|
||||
pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
||||
match field_type {
|
||||
Type::U64 => val as f64,
|
||||
Type::I64 => i64::from_u64(val) as f64,
|
||||
Type::I64 | Type::Date => i64::from_u64(val) as f64,
|
||||
Type::F64 => f64::from_u64(val),
|
||||
_ => {
|
||||
panic!("unexpected type {:?}. This should not happen", field_type)
|
||||
@@ -295,10 +298,9 @@ pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts the `f64` value to fast field value space.
|
||||
/// Converts the `f64` value to fast field value space, which is always u64.
|
||||
///
|
||||
/// If the fast field has `u64`, values are stored as `u64` in the fast field.
|
||||
/// A `f64` value of e.g. `2.0` therefore needs to be converted to `1u64`.
|
||||
/// If the fast field has `u64`, values are stored unchanged as `u64` in the fast field.
|
||||
///
|
||||
/// If the fast field has `f64` values are converted and stored to `u64` using a
|
||||
/// monotonic mapping.
|
||||
@@ -308,7 +310,7 @@ pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
||||
pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &Type) -> Option<u64> {
|
||||
match field_type {
|
||||
Type::U64 => Some(val as u64),
|
||||
Type::I64 => Some((val as i64).to_u64()),
|
||||
Type::I64 | Type::Date => Some((val as i64).to_u64()),
|
||||
Type::F64 => Some(val.to_u64()),
|
||||
_ => None,
|
||||
}
|
||||
@@ -317,6 +319,7 @@ pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &Type) -> Option<u64> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use serde_json::Value;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregation};
|
||||
use super::bucket::RangeAggregation;
|
||||
@@ -332,7 +335,7 @@ mod tests {
|
||||
use crate::aggregation::DistributedAggregationCollector;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{Cardinality, IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use crate::{Index, Term};
|
||||
use crate::{DateTime, Index, Term};
|
||||
|
||||
fn get_avg_req(field_name: &str) -> Aggregation {
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
@@ -358,7 +361,7 @@ mod tests {
|
||||
index: &Index,
|
||||
query: Option<(&str, &str)>,
|
||||
) -> crate::Result<Value> {
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -448,9 +451,9 @@ mod tests {
|
||||
text_field_id => term.to_string(),
|
||||
string_field_id => term.to_string(),
|
||||
score_field => i as u64,
|
||||
score_field_f64 => i as f64,
|
||||
score_field_f64 => i,
|
||||
score_field_i64 => i as i64,
|
||||
fraction_field => i as f64/100.0,
|
||||
fraction_field => i/100.0,
|
||||
))?;
|
||||
}
|
||||
index_writer.commit()?;
|
||||
@@ -552,10 +555,10 @@ mod tests {
|
||||
let searcher = reader.searcher();
|
||||
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
||||
intermediate_agg_result
|
||||
.into_final_bucket_result(agg_req)
|
||||
.into_final_bucket_result(agg_req, &index.schema())
|
||||
.unwrap()
|
||||
} else {
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
@@ -648,6 +651,7 @@ mod tests {
|
||||
.set_fast()
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
schema_builder.add_text_field("dummy_text", STRING);
|
||||
let score_fieldtype =
|
||||
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
|
||||
@@ -665,6 +669,7 @@ mod tests {
|
||||
// writing the segment
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800).unwrap()),
|
||||
score_field => 1u64,
|
||||
score_field_f64 => 1f64,
|
||||
score_field_i64 => 1i64,
|
||||
@@ -673,6 +678,7 @@ mod tests {
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||
score_field => 3u64,
|
||||
score_field_f64 => 3f64,
|
||||
score_field_i64 => 3i64,
|
||||
@@ -681,18 +687,21 @@ mod tests {
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||
score_field => 5u64,
|
||||
score_field_f64 => 5f64,
|
||||
score_field_i64 => 5i64,
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "nohit",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||
score_field => 6u64,
|
||||
score_field_f64 => 6f64,
|
||||
score_field_i64 => 6i64,
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||
score_field => 7u64,
|
||||
score_field_f64 => 7f64,
|
||||
score_field_i64 => 7i64,
|
||||
@@ -700,12 +709,14 @@ mod tests {
|
||||
index_writer.commit()?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||
score_field => 11u64,
|
||||
score_field_f64 => 11f64,
|
||||
score_field_i64 => 11i64,
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
|
||||
score_field => 14u64,
|
||||
score_field_f64 => 14f64,
|
||||
score_field_i64 => 14i64,
|
||||
@@ -713,6 +724,7 @@ mod tests {
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
|
||||
score_field => 44u64,
|
||||
score_field_f64 => 44.5f64,
|
||||
score_field_i64 => 44i64,
|
||||
@@ -723,6 +735,7 @@ mod tests {
|
||||
// no hits segment
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "nohit",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
|
||||
score_field => 44u64,
|
||||
score_field_f64 => 44.5f64,
|
||||
score_field_i64 => 44i64,
|
||||
@@ -795,7 +808,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
@@ -995,9 +1008,10 @@ mod tests {
|
||||
// Test de/serialization roundtrip on intermediate_agg_result
|
||||
let res: IntermediateAggregationResults =
|
||||
serde_json::from_str(&serde_json::to_string(&res).unwrap()).unwrap();
|
||||
res.into_final_bucket_result(agg_req.clone()).unwrap()
|
||||
res.into_final_bucket_result(agg_req.clone(), &index.schema())
|
||||
.unwrap()
|
||||
} else {
|
||||
let collector = AggregationCollector::from_aggs(agg_req.clone(), None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req.clone(), None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
@@ -1055,7 +1069,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test empty result set
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&query_with_no_hits, &collector).unwrap();
|
||||
|
||||
@@ -1120,7 +1134,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
|
||||
@@ -1233,7 +1247,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1264,7 +1278,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1295,7 +1309,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1334,7 +1348,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1363,7 +1377,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1392,7 +1406,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1429,7 +1443,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1464,7 +1478,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1503,7 +1517,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1533,7 +1547,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1590,7 +1604,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
|
||||
@@ -305,7 +305,7 @@ impl BucketCount {
|
||||
}
|
||||
pub(crate) fn add_count(&self, count: u32) {
|
||||
self.bucket_count
|
||||
.fetch_add(count as u32, std::sync::atomic::Ordering::Relaxed);
|
||||
.fetch_add(count, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
pub(crate) fn get_count(&self) -> u32 {
|
||||
self.bucket_count.load(std::sync::atomic::Ordering::Relaxed)
|
||||
|
||||
@@ -357,7 +357,7 @@ impl SegmentCollector for FacetSegmentCollector {
|
||||
let mut facet = vec![];
|
||||
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
|
||||
// TODO handle errors.
|
||||
if facet_dict.ord_to_term(facet_ord as u64, &mut facet).is_ok() {
|
||||
if facet_dict.ord_to_term(facet_ord, &mut facet).is_ok() {
|
||||
if let Ok(facet) = Facet::from_encoded(facet) {
|
||||
facet_counts.insert(facet, count);
|
||||
}
|
||||
|
||||
@@ -130,9 +130,7 @@ where
|
||||
|
||||
let fast_field_reader = segment_reader
|
||||
.fast_fields()
|
||||
.typed_fast_field_reader(self.field)?
|
||||
.to_full()
|
||||
.expect("temp migration solution");
|
||||
.typed_fast_field_reader(self.field)?;
|
||||
|
||||
let segment_collector = self
|
||||
.collector
|
||||
|
||||
@@ -112,11 +112,7 @@ impl Collector for HistogramCollector {
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
segment: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let ff_reader = segment
|
||||
.fast_fields()
|
||||
.u64_lenient(self.field)?
|
||||
.to_full()
|
||||
.expect("temp migration solution");
|
||||
let ff_reader = segment.fast_fields().u64_lenient(self.field)?;
|
||||
Ok(SegmentHistogramCollector {
|
||||
histogram_computer: HistogramComputer {
|
||||
counts: vec![0; self.num_buckets],
|
||||
|
||||
@@ -170,7 +170,7 @@ pub trait Collector: Sync + Send {
|
||||
segment_ord: u32,
|
||||
reader: &SegmentReader,
|
||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
|
||||
let mut segment_collector = self.for_segment(segment_ord, reader)?;
|
||||
|
||||
match (reader.alive_bitset(), self.requires_scoring()) {
|
||||
(Some(alive_bitset), true) => {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::OptionalColumn;
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::*;
|
||||
use crate::collector::{Count, FilterCollector, TopDocs};
|
||||
@@ -160,7 +160,7 @@ pub struct FastFieldTestCollector {
|
||||
|
||||
pub struct FastFieldSegmentCollector {
|
||||
vals: Vec<u64>,
|
||||
reader: Arc<dyn OptionalColumn<u64>>,
|
||||
reader: Arc<dyn Column<u64>>,
|
||||
}
|
||||
|
||||
impl FastFieldTestCollector {
|
||||
@@ -202,9 +202,7 @@ impl SegmentCollector for FastFieldSegmentCollector {
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
let val = self.reader.get_val(doc);
|
||||
if let Some(val) = val {
|
||||
self.vals.push(val);
|
||||
}
|
||||
self.vals.push(val);
|
||||
}
|
||||
|
||||
fn harvest(self) -> Vec<u64> {
|
||||
|
||||
@@ -156,9 +156,7 @@ impl CustomScorer<u64> for ScorerByField {
|
||||
// The conversion will then happen only on the top-K docs.
|
||||
let ff_reader = segment_reader
|
||||
.fast_fields()
|
||||
.typed_fast_field_reader(self.field)?
|
||||
.to_full()
|
||||
.expect("temp migration solution");
|
||||
.typed_fast_field_reader(self.field)?;
|
||||
Ok(ScorerByFastFieldReader { ff_reader })
|
||||
}
|
||||
}
|
||||
@@ -460,7 +458,7 @@ impl TopDocs {
|
||||
///
|
||||
/// // We can now define our actual scoring function
|
||||
/// move |doc: DocId, original_score: Score| {
|
||||
/// let popularity: u64 = popularity_reader.get_val(doc).unwrap();
|
||||
/// let popularity: u64 = popularity_reader.get_val(doc);
|
||||
/// // Well.. For the sake of the example we use a simple logarithm
|
||||
/// // function.
|
||||
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
||||
@@ -569,8 +567,8 @@ impl TopDocs {
|
||||
///
|
||||
/// // We can now define our actual scoring function
|
||||
/// move |doc: DocId| {
|
||||
/// let popularity: u64 = popularity_reader.get_val(doc).unwrap();
|
||||
/// let boosted: u64 = boosted_reader.get_val(doc).unwrap();
|
||||
/// let popularity: u64 = popularity_reader.get_val(doc);
|
||||
/// let boosted: u64 = boosted_reader.get_val(doc);
|
||||
/// // Score do not have to be `f64` in tantivy.
|
||||
/// // Here we return a couple to get lexicographical order
|
||||
/// // for free.
|
||||
|
||||
@@ -149,7 +149,8 @@ impl IndexBuilder {
|
||||
/// Creates a new index using the [`RamDirectory`].
|
||||
///
|
||||
/// The index will be allocated in anonymous memory.
|
||||
/// This should only be used for unit tests.
|
||||
/// This is useful for indexing small set of documents
|
||||
/// for instances like unit test or temporary in memory index.
|
||||
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
|
||||
let ram_directory = RamDirectory::create();
|
||||
self.create(ram_directory)
|
||||
@@ -812,7 +813,7 @@ mod tests {
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
||||
let index = Index::create_in_dir(tempdir_path, schema).unwrap();
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
|
||||
@@ -200,10 +200,7 @@ impl InvertedIndexReader {
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
impl InvertedIndexReader {
|
||||
pub(crate) async fn get_term_info_async(
|
||||
&self,
|
||||
term: &Term,
|
||||
) -> crate::AsyncIoResult<Option<TermInfo>> {
|
||||
pub(crate) async fn get_term_info_async(&self, term: &Term) -> io::Result<Option<TermInfo>> {
|
||||
self.termdict.get_async(term.value_bytes()).await
|
||||
}
|
||||
|
||||
@@ -211,12 +208,8 @@ impl InvertedIndexReader {
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub async fn warm_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
with_positions: bool,
|
||||
) -> crate::AsyncIoResult<()> {
|
||||
let term_info_opt = self.get_term_info_async(term).await?;
|
||||
pub async fn warm_postings(&self, term: &Term, with_positions: bool) -> io::Result<()> {
|
||||
let term_info_opt: Option<TermInfo> = self.get_term_info_async(term).await?;
|
||||
if let Some(term_info) = term_info_opt {
|
||||
self.postings_file_slice
|
||||
.read_bytes_slice_async(term_info.postings_range.clone())
|
||||
@@ -234,7 +227,7 @@ impl InvertedIndexReader {
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// If you know which terms to pre-load, prefer using [`Self::warm_postings`] instead.
|
||||
pub async fn warm_postings_full(&self, with_positions: bool) -> crate::AsyncIoResult<()> {
|
||||
pub async fn warm_postings_full(&self, with_positions: bool) -> io::Result<()> {
|
||||
self.postings_file_slice.read_bytes_async().await?;
|
||||
if with_positions {
|
||||
self.positions_file_slice.read_bytes_async().await?;
|
||||
@@ -243,7 +236,7 @@ impl InvertedIndexReader {
|
||||
}
|
||||
|
||||
/// Returns the number of documents containing the term asynchronously.
|
||||
pub async fn doc_freq_async(&self, term: &Term) -> crate::AsyncIoResult<u32> {
|
||||
pub async fn doc_freq_async(&self, term: &Term) -> io::Result<u32> {
|
||||
Ok(self
|
||||
.get_term_info_async(term)
|
||||
.await?
|
||||
|
||||
@@ -75,7 +75,7 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
|
||||
let mut prev_offset = 0;
|
||||
for (file_addr, offset) in self.offsets {
|
||||
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
|
||||
VInt(offset - prev_offset).serialize(&mut self.write)?;
|
||||
file_addr.serialize(&mut self.write)?;
|
||||
prev_offset = offset;
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ impl Footer {
|
||||
counting_write.write_all(serde_json::to_string(&self)?.as_ref())?;
|
||||
let footer_payload_len = counting_write.written_bytes();
|
||||
BinarySerializable::serialize(&(footer_payload_len as u32), write)?;
|
||||
BinarySerializable::serialize(&(FOOTER_MAGIC_NUMBER as u32), write)?;
|
||||
BinarySerializable::serialize(&FOOTER_MAGIC_NUMBER, write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -90,9 +90,10 @@ impl Footer {
|
||||
));
|
||||
}
|
||||
|
||||
let footer: Footer = serde_json::from_slice(&file.read_bytes_slice(
|
||||
file.len() - total_footer_size..file.len() - footer_metadata_len as usize,
|
||||
)?)?;
|
||||
let footer: Footer =
|
||||
serde_json::from_slice(&file.read_bytes_slice(
|
||||
file.len() - total_footer_size..file.len() - footer_metadata_len,
|
||||
)?)?;
|
||||
|
||||
let body = file.slice_to(file.len() - total_footer_size);
|
||||
Ok((footer, body))
|
||||
|
||||
@@ -388,7 +388,7 @@ mod tests_mmap_specific {
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
let living_files = HashSet::new();
|
||||
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mmap_directory = MmapDirectory::open(tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(Box::new(mmap_directory)).unwrap();
|
||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||
write.write_all(&[0u8, 1u8]).unwrap();
|
||||
|
||||
@@ -341,7 +341,7 @@ impl Directory for MmapDirectory {
|
||||
/// removed before the file is deleted.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
fs::remove_file(&full_path).map_err(|e| {
|
||||
fs::remove_file(full_path).map_err(|e| {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
DeleteError::FileDoesNotExist(path.to_owned())
|
||||
} else {
|
||||
@@ -395,7 +395,7 @@ impl Directory for MmapDirectory {
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
let mut buffer = Vec::new();
|
||||
match File::open(&full_path) {
|
||||
match File::open(full_path) {
|
||||
Ok(mut file) => {
|
||||
file.read_to_end(&mut buffer).map_err(|io_error| {
|
||||
OpenReadError::wrap_io_error(io_error, path.to_path_buf())
|
||||
@@ -425,7 +425,7 @@ impl Directory for MmapDirectory {
|
||||
let file: File = OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true) //< if the file does not exist yet, create it.
|
||||
.open(&full_path)
|
||||
.open(full_path)
|
||||
.map_err(LockError::wrap_io_error)?;
|
||||
if lock.is_blocking {
|
||||
file.lock_exclusive().map_err(LockError::wrap_io_error)?;
|
||||
|
||||
@@ -5,7 +5,6 @@ mod mmap_directory;
|
||||
|
||||
mod directory;
|
||||
mod directory_lock;
|
||||
mod file_slice;
|
||||
mod file_watcher;
|
||||
mod footer;
|
||||
mod managed_directory;
|
||||
@@ -20,13 +19,13 @@ mod composite_file;
|
||||
use std::io::BufWriter;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub use common::file_slice::{FileHandle, FileSlice};
|
||||
pub use common::{AntiCallToken, TerminatingWrite};
|
||||
pub use ownedbytes::OwnedBytes;
|
||||
|
||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub use self::file_slice::{FileHandle, FileSlice};
|
||||
pub use self::ram_directory::RamDirectory;
|
||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
|
||||
|
||||
22
src/error.rs
22
src/error.rs
@@ -104,28 +104,6 @@ pub enum TantivyError {
|
||||
InternalError(String),
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[derive(Error, Debug)]
|
||||
#[doc(hidden)]
|
||||
pub enum AsyncIoError {
|
||||
#[error("io::Error `{0}`")]
|
||||
Io(#[from] io::Error),
|
||||
#[error("Asynchronous API is unsupported by this directory")]
|
||||
AsyncUnsupported,
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
impl From<AsyncIoError> for TantivyError {
|
||||
fn from(async_io_err: AsyncIoError) -> Self {
|
||||
match async_io_err {
|
||||
AsyncIoError::Io(io_err) => TantivyError::from(io_err),
|
||||
AsyncIoError::AsyncUnsupported => {
|
||||
TantivyError::SystemError(format!("{:?}", async_io_err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for TantivyError {
|
||||
fn from(io_err: io::Error) -> TantivyError {
|
||||
TantivyError::IoError(Arc::new(io_err))
|
||||
|
||||
@@ -64,9 +64,7 @@ impl FacetReader {
|
||||
facet_ord: TermOrdinal,
|
||||
output: &mut Facet,
|
||||
) -> crate::Result<()> {
|
||||
let found_term = self
|
||||
.term_dict
|
||||
.ord_to_term(facet_ord as u64, &mut self.buffer)?;
|
||||
let found_term = self.term_dict.ord_to_term(facet_ord, &mut self.buffer)?;
|
||||
assert!(found_term, "Term ordinal {} no found.", facet_ord);
|
||||
let facet_str = str::from_utf8(&self.buffer[..])
|
||||
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
|
||||
|
||||
@@ -30,8 +30,8 @@ pub use self::multivalued::{
|
||||
MultiValueIndex, MultiValueU128FastFieldWriter, MultiValuedFastFieldReader,
|
||||
MultiValuedFastFieldWriter, MultiValuedU128FastFieldReader,
|
||||
};
|
||||
pub(crate) use self::readers::type_and_cardinality;
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub(crate) use self::readers::{type_and_cardinality, FastType};
|
||||
pub use self::serializer::{Column, CompositeFastFieldSerializer};
|
||||
use self::writer::unexpected_value;
|
||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||
@@ -207,10 +207,10 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 25);
|
||||
assert_eq!(file.len(), 34);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let fast_field_bytes = composite_file.open_read(*FIELD).unwrap().read_bytes()?;
|
||||
let fast_field_reader = open::<u64>(fast_field_bytes)?.to_full().unwrap();
|
||||
let fast_field_reader = open::<u64>(fast_field_bytes)?;
|
||||
assert_eq!(fast_field_reader.get_val(0), 13u64);
|
||||
assert_eq!(fast_field_reader.get_val(1), 14u64);
|
||||
assert_eq!(fast_field_reader.get_val(2), 2u64);
|
||||
@@ -256,14 +256,14 @@ mod tests {
|
||||
serializer.close()?;
|
||||
}
|
||||
let file = directory.open_read(path)?;
|
||||
assert_eq!(file.len(), 53);
|
||||
assert_eq!(file.len(), 62);
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite
|
||||
.open_read(*FIELD)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let fast_field_reader = open::<u64>(data)?.to_full().unwrap();
|
||||
let fast_field_reader = open::<u64>(data)?;
|
||||
assert_eq!(fast_field_reader.get_val(0), 4u64);
|
||||
assert_eq!(fast_field_reader.get_val(1), 14_082_001u64);
|
||||
assert_eq!(fast_field_reader.get_val(2), 3_052u64);
|
||||
@@ -297,14 +297,14 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 26);
|
||||
assert_eq!(file.len(), 35);
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite
|
||||
.open_read(*FIELD)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let fast_field_reader = open::<u64>(data)?.to_full().unwrap();
|
||||
let fast_field_reader = open::<u64>(data)?;
|
||||
for doc in 0..10_000 {
|
||||
assert_eq!(fast_field_reader.get_val(doc), 100_000u64);
|
||||
}
|
||||
@@ -336,14 +336,14 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 80040);
|
||||
assert_eq!(file.len(), 80049);
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite
|
||||
.open_read(*FIELD)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let fast_field_reader = open::<u64>(data)?.to_full().unwrap();
|
||||
let fast_field_reader = open::<u64>(data)?;
|
||||
assert_eq!(fast_field_reader.get_val(0), 0u64);
|
||||
for doc in 1..10_001 {
|
||||
assert_eq!(
|
||||
@@ -378,7 +378,7 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 40_usize);
|
||||
assert_eq!(file.len(), 49_usize);
|
||||
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
@@ -386,7 +386,7 @@ mod tests {
|
||||
.open_read(i64_field)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let fast_field_reader = open::<i64>(data)?.to_full().unwrap();
|
||||
let fast_field_reader = open::<i64>(data)?;
|
||||
|
||||
assert_eq!(fast_field_reader.min_value(), -100i64);
|
||||
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
||||
@@ -429,7 +429,7 @@ mod tests {
|
||||
.open_read(i64_field)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let fast_field_reader = open::<i64>(data)?.to_full().unwrap();
|
||||
let fast_field_reader = open::<i64>(data)?;
|
||||
assert_eq!(fast_field_reader.get_val(0), 0i64);
|
||||
}
|
||||
Ok(())
|
||||
@@ -470,10 +470,10 @@ mod tests {
|
||||
.open_read(*FIELD)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let fast_field_reader = open::<u64>(data)?.to_full().unwrap();
|
||||
let fast_field_reader = open::<u64>(data)?;
|
||||
|
||||
for a in 0..n {
|
||||
assert_eq!(fast_field_reader.get_val(a as u32), permutation[a as usize]);
|
||||
assert_eq!(fast_field_reader.get_val(a as u32), permutation[a]);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -763,28 +763,19 @@ mod tests {
|
||||
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
||||
let mut dates = vec![];
|
||||
{
|
||||
assert_eq!(
|
||||
date_fast_field.get_val(0).unwrap().into_timestamp_micros(),
|
||||
1i64
|
||||
);
|
||||
assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64);
|
||||
dates_fast_field.get_vals(0u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].into_timestamp_micros(), 2i64);
|
||||
assert_eq!(dates[1].into_timestamp_micros(), 3i64);
|
||||
}
|
||||
{
|
||||
assert_eq!(
|
||||
date_fast_field.get_val(1).unwrap().into_timestamp_micros(),
|
||||
4i64
|
||||
);
|
||||
assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64);
|
||||
dates_fast_field.get_vals(1u32, &mut dates);
|
||||
assert!(dates.is_empty());
|
||||
}
|
||||
{
|
||||
assert_eq!(
|
||||
date_fast_field.get_val(2).unwrap().into_timestamp_micros(),
|
||||
0i64
|
||||
);
|
||||
assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64);
|
||||
dates_fast_field.get_vals(2u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].into_timestamp_micros(), 5i64);
|
||||
@@ -831,10 +822,10 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 24);
|
||||
assert_eq!(file.len(), 33);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let data = composite_file.open_read(field).unwrap().read_bytes()?;
|
||||
let fast_field_reader = open::<bool>(data)?.to_full().unwrap();
|
||||
let fast_field_reader = open::<bool>(data)?;
|
||||
assert_eq!(fast_field_reader.get_val(0), true);
|
||||
assert_eq!(fast_field_reader.get_val(1), false);
|
||||
assert_eq!(fast_field_reader.get_val(2), true);
|
||||
@@ -869,10 +860,10 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 36);
|
||||
assert_eq!(file.len(), 45);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let data = composite_file.open_read(field).unwrap().read_bytes()?;
|
||||
let fast_field_reader = open::<bool>(data)?.to_full().unwrap();
|
||||
let fast_field_reader = open::<bool>(data)?;
|
||||
for i in 0..25 {
|
||||
assert_eq!(fast_field_reader.get_val(i * 2), true);
|
||||
assert_eq!(fast_field_reader.get_val(i * 2 + 1), false);
|
||||
@@ -901,9 +892,9 @@ mod tests {
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
assert_eq!(file.len(), 23);
|
||||
assert_eq!(file.len(), 32);
|
||||
let data = composite_file.open_read(field).unwrap().read_bytes()?;
|
||||
let fast_field_reader = open::<bool>(data)?.to_full().unwrap();
|
||||
let fast_field_reader = open::<bool>(data)?;
|
||||
assert_eq!(fast_field_reader.get_val(0), false);
|
||||
|
||||
Ok(())
|
||||
@@ -935,10 +926,10 @@ mod tests {
|
||||
pub fn test_gcd_date() -> crate::Result<()> {
|
||||
let size_prec_sec =
|
||||
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Seconds)?;
|
||||
assert_eq!(size_prec_sec, 28 + (1_000 * 13) / 8); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
|
||||
assert_eq!(size_prec_sec, 5 + 4 + 28 + (1_000 * 13) / 8); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
|
||||
let size_prec_micro =
|
||||
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Microseconds)?;
|
||||
assert_eq!(size_prec_micro, 26 + (1_000 * 33) / 8); // 33 bits per val = ceil(log_2(number of microsecsseconds in 2hours);
|
||||
assert_eq!(size_prec_micro, 5 + 4 + 26 + (1_000 * 33) / 8); // 33 bits per val = ceil(log_2(number of microsecsseconds in 2hours);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -971,9 +962,7 @@ mod tests {
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(*FIELD).unwrap();
|
||||
let len = file.len();
|
||||
let test_fastfield = open::<DateTime>(file.read_bytes()?)?
|
||||
.to_full()
|
||||
.expect("temp migration solution");
|
||||
let test_fastfield = open::<DateTime>(file.read_bytes()?)?;
|
||||
|
||||
for (i, time) in times.iter().enumerate() {
|
||||
assert_eq!(test_fastfield.get_val(i as u32), time.truncate(precision));
|
||||
|
||||
@@ -533,17 +533,14 @@ mod bench {
|
||||
.unwrap()
|
||||
.read_bytes()
|
||||
.unwrap();
|
||||
let idx_reader = fastfield_codecs::open(data_idx).unwrap().to_full().unwrap();
|
||||
let idx_reader = fastfield_codecs::open(data_idx).unwrap();
|
||||
|
||||
let data_vals = fast_fields_composite
|
||||
.open_read_with_idx(field, 1)
|
||||
.unwrap()
|
||||
.read_bytes()
|
||||
.unwrap();
|
||||
let vals_reader = fastfield_codecs::open(data_vals)
|
||||
.unwrap()
|
||||
.to_full()
|
||||
.unwrap();
|
||||
let vals_reader = fastfield_codecs::open(data_vals).unwrap();
|
||||
let fast_field_reader = MultiValuedFastFieldReader::open(idx_reader, vals_reader);
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
|
||||
@@ -264,7 +264,7 @@ fn iter_remapped_multivalue_index<'a, C: Column>(
|
||||
std::iter::once(0).chain(doc_id_map.iter_old_doc_ids().map(move |old_doc| {
|
||||
let num_vals_for_doc = column.get_val(old_doc + 1) - column.get_val(old_doc);
|
||||
offset += num_vals_for_doc;
|
||||
offset as u64
|
||||
offset
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::{open, open_u128, Column, OptionalColumn};
|
||||
use fastfield_codecs::{open, open_u128, Column};
|
||||
|
||||
use super::multivalued::MultiValuedU128FastFieldReader;
|
||||
use crate::directory::{CompositeFile, FileSlice};
|
||||
@@ -118,7 +118,7 @@ impl FastFieldReaders {
|
||||
&self,
|
||||
field: Field,
|
||||
index: usize,
|
||||
) -> crate::Result<Arc<dyn OptionalColumn<TFastValue>>> {
|
||||
) -> crate::Result<Arc<dyn Column<TFastValue>>> {
|
||||
let fast_field_slice = self.fast_field_data(field, index)?;
|
||||
let bytes = fast_field_slice.read_bytes()?;
|
||||
let column = fastfield_codecs::open(bytes)?;
|
||||
@@ -128,7 +128,7 @@ impl FastFieldReaders {
|
||||
pub(crate) fn typed_fast_field_reader<TFastValue: FastValue>(
|
||||
&self,
|
||||
field: Field,
|
||||
) -> crate::Result<Arc<dyn OptionalColumn<TFastValue>>> {
|
||||
) -> crate::Result<Arc<dyn Column<TFastValue>>> {
|
||||
self.typed_fast_field_reader_with_idx(field, 0)
|
||||
}
|
||||
|
||||
@@ -138,20 +138,13 @@ impl FastFieldReaders {
|
||||
) -> crate::Result<MultiValuedFastFieldReader<TFastValue>> {
|
||||
let idx_reader = self.typed_fast_field_reader(field)?;
|
||||
let vals_reader = self.typed_fast_field_reader_with_idx(field, 1)?;
|
||||
Ok(MultiValuedFastFieldReader::open(
|
||||
idx_reader
|
||||
.to_full()
|
||||
.expect("multivalue fast field are always full"),
|
||||
vals_reader
|
||||
.to_full()
|
||||
.expect("multivalue fast field are always full"),
|
||||
))
|
||||
Ok(MultiValuedFastFieldReader::open(idx_reader, vals_reader))
|
||||
}
|
||||
|
||||
/// Returns the `u64` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a u64 fast field, this method returns an Error.
|
||||
pub fn u64(&self, field: Field) -> crate::Result<Arc<dyn OptionalColumn<u64>>> {
|
||||
pub fn u64(&self, field: Field) -> crate::Result<Arc<dyn Column<u64>>> {
|
||||
self.check_type(field, FastType::U64, Cardinality::SingleValue)?;
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
@@ -159,7 +152,7 @@ impl FastFieldReaders {
|
||||
/// Returns the `ip` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u128 fast field, this method returns an Error.
|
||||
pub fn ip_addr(&self, field: Field) -> crate::Result<Arc<dyn OptionalColumn<Ipv6Addr>>> {
|
||||
pub fn ip_addr(&self, field: Field) -> crate::Result<Arc<dyn Column<Ipv6Addr>>> {
|
||||
self.check_type(field, FastType::U128, Cardinality::SingleValue)?;
|
||||
let bytes = self.fast_field_data(field, 0)?.read_bytes()?;
|
||||
Ok(open_u128::<Ipv6Addr>(bytes)?)
|
||||
@@ -173,15 +166,10 @@ impl FastFieldReaders {
|
||||
field: Field,
|
||||
) -> crate::Result<MultiValuedU128FastFieldReader<Ipv6Addr>> {
|
||||
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
|
||||
let idx_reader: Arc<dyn Column<u64>> = self
|
||||
.typed_fast_field_reader(field)?
|
||||
.to_full()
|
||||
.expect("multivalue fast fields are always full");
|
||||
let idx_reader: Arc<dyn Column<u64>> = self.typed_fast_field_reader(field)?;
|
||||
|
||||
let bytes = self.fast_field_data(field, 1)?.read_bytes()?;
|
||||
let vals_reader = open_u128::<Ipv6Addr>(bytes)?
|
||||
.to_full()
|
||||
.expect("multivalue fields are always full");
|
||||
let vals_reader = open_u128::<Ipv6Addr>(bytes)?;
|
||||
|
||||
Ok(MultiValuedU128FastFieldReader::open(
|
||||
idx_reader,
|
||||
@@ -191,9 +179,8 @@ impl FastFieldReaders {
|
||||
|
||||
/// Returns the `u128` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u128 base type fast field, this method returns an Error.
|
||||
/// Ip addresses use u128 as base type.
|
||||
pub(crate) fn u128(&self, field: Field) -> crate::Result<Arc<dyn OptionalColumn<u128>>> {
|
||||
/// If `field` is not a u128 fast field, this method returns an Error.
|
||||
pub(crate) fn u128(&self, field: Field) -> crate::Result<Arc<dyn Column<u128>>> {
|
||||
self.check_type(field, FastType::U128, Cardinality::SingleValue)?;
|
||||
let bytes = self.fast_field_data(field, 0)?.read_bytes()?;
|
||||
Ok(open_u128::<u128>(bytes)?)
|
||||
@@ -204,15 +191,10 @@ impl FastFieldReaders {
|
||||
/// If `field` is not a u128 multi-valued fast field, this method returns an Error.
|
||||
pub fn u128s(&self, field: Field) -> crate::Result<MultiValuedU128FastFieldReader<u128>> {
|
||||
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
|
||||
let idx_reader: Arc<dyn Column<u64>> = self
|
||||
.typed_fast_field_reader(field)?
|
||||
.to_full()
|
||||
.expect("multivalue fast fields are always full");
|
||||
let idx_reader: Arc<dyn Column<u64>> = self.typed_fast_field_reader(field)?;
|
||||
|
||||
let bytes = self.fast_field_data(field, 1)?.read_bytes()?;
|
||||
let vals_reader = open_u128::<u128>(bytes)?
|
||||
.to_full()
|
||||
.expect("multivalue fast fields are always full");
|
||||
let vals_reader = open_u128::<u128>(bytes)?;
|
||||
|
||||
Ok(MultiValuedU128FastFieldReader::open(
|
||||
idx_reader,
|
||||
@@ -225,14 +207,14 @@ impl FastFieldReaders {
|
||||
///
|
||||
/// If not, the fastfield reader will returns the u64-value associated with the original
|
||||
/// FastValue.
|
||||
pub fn u64_lenient(&self, field: Field) -> crate::Result<Arc<dyn OptionalColumn<u64>>> {
|
||||
pub fn u64_lenient(&self, field: Field) -> crate::Result<Arc<dyn Column<u64>>> {
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
|
||||
/// Returns the `i64` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a i64 fast field, this method returns an Error.
|
||||
pub fn i64(&self, field: Field) -> crate::Result<Arc<dyn OptionalColumn<i64>>> {
|
||||
pub fn i64(&self, field: Field) -> crate::Result<Arc<dyn Column<i64>>> {
|
||||
self.check_type(field, FastType::I64, Cardinality::SingleValue)?;
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
@@ -240,7 +222,7 @@ impl FastFieldReaders {
|
||||
/// Returns the `date` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a date fast field, this method returns an Error.
|
||||
pub fn date(&self, field: Field) -> crate::Result<Arc<dyn OptionalColumn<DateTime>>> {
|
||||
pub fn date(&self, field: Field) -> crate::Result<Arc<dyn Column<DateTime>>> {
|
||||
self.check_type(field, FastType::Date, Cardinality::SingleValue)?;
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
@@ -248,7 +230,7 @@ impl FastFieldReaders {
|
||||
/// Returns the `f64` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a f64 fast field, this method returns an Error.
|
||||
pub fn f64(&self, field: Field) -> crate::Result<Arc<dyn OptionalColumn<f64>>> {
|
||||
pub fn f64(&self, field: Field) -> crate::Result<Arc<dyn Column<f64>>> {
|
||||
self.check_type(field, FastType::F64, Cardinality::SingleValue)?;
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
@@ -256,7 +238,7 @@ impl FastFieldReaders {
|
||||
/// Returns the `bool` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a bool fast field, this method returns an Error.
|
||||
pub fn bool(&self, field: Field) -> crate::Result<Arc<dyn OptionalColumn<bool>>> {
|
||||
pub fn bool(&self, field: Field) -> crate::Result<Arc<dyn Column<bool>>> {
|
||||
self.check_type(field, FastType::Bool, Cardinality::SingleValue)?;
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
@@ -327,12 +309,7 @@ impl FastFieldReaders {
|
||||
let fast_field_idx_bytes = fast_field_idx_file.read_bytes()?;
|
||||
let idx_reader = open(fast_field_idx_bytes)?;
|
||||
let data = self.fast_field_data(field, 1)?;
|
||||
BytesFastFieldReader::open(
|
||||
idx_reader
|
||||
.to_full()
|
||||
.expect("multivalue fields are always full"),
|
||||
data,
|
||||
)
|
||||
BytesFastFieldReader::open(idx_reader, data)
|
||||
} else {
|
||||
Err(FastFieldNotAvailableError::new(field_entry).into())
|
||||
}
|
||||
|
||||
@@ -360,20 +360,10 @@ impl U128FastFieldWriter {
|
||||
.map(|idx| self.vals[idx as usize])
|
||||
};
|
||||
|
||||
serializer.create_u128_fast_field_with_idx(
|
||||
self.field,
|
||||
iter_gen,
|
||||
self.val_count as u32,
|
||||
0,
|
||||
)?;
|
||||
serializer.create_u128_fast_field_with_idx(self.field, iter_gen, self.val_count, 0)?;
|
||||
} else {
|
||||
let iter_gen = || self.vals.iter().cloned();
|
||||
serializer.create_u128_fast_field_with_idx(
|
||||
self.field,
|
||||
iter_gen,
|
||||
self.val_count as u32,
|
||||
0,
|
||||
)?;
|
||||
serializer.create_u128_fast_field_with_idx(self.field, iter_gen, self.val_count, 0)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -252,8 +252,8 @@ mod tests {
|
||||
&demux_mapping,
|
||||
target_settings,
|
||||
vec![
|
||||
Box::new(RamDirectory::default()),
|
||||
Box::new(RamDirectory::default()),
|
||||
Box::<RamDirectory>::default(),
|
||||
Box::<RamDirectory>::default(),
|
||||
],
|
||||
)?;
|
||||
|
||||
|
||||
@@ -465,9 +465,9 @@ mod tests_indexsorting {
|
||||
let my_number = index.schema().get_field("my_number").unwrap();
|
||||
|
||||
let fast_field = fast_fields.u64(my_number).unwrap();
|
||||
assert_eq!(fast_field.get_val(0), Some(10u64));
|
||||
assert_eq!(fast_field.get_val(1), Some(20u64));
|
||||
assert_eq!(fast_field.get_val(2), Some(30u64));
|
||||
assert_eq!(fast_field.get_val(0), 10u64);
|
||||
assert_eq!(fast_field.get_val(1), 20u64);
|
||||
assert_eq!(fast_field.get_val(2), 30u64);
|
||||
|
||||
let multi_numbers = index.schema().get_field("multi_numbers").unwrap();
|
||||
let multifield = fast_fields.u64s(multi_numbers).unwrap();
|
||||
|
||||
@@ -152,7 +152,7 @@ pub(crate) fn advance_deletes(
|
||||
let num_deleted_docs = max_doc - num_alive_docs;
|
||||
if num_deleted_docs > num_deleted_docs_before {
|
||||
// There are new deletes. We need to write a new delete file.
|
||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||
segment = segment.with_delete_meta(num_deleted_docs, target_opstamp);
|
||||
let mut alive_doc_file = segment.open_write(SegmentComponent::Delete)?;
|
||||
write_alive_bitset(&alive_bitset, &mut alive_doc_file)?;
|
||||
alive_doc_file.terminate()?;
|
||||
@@ -984,7 +984,7 @@ mod tests {
|
||||
"LogMergePolicy { min_num_segments: 8, max_docs_before_merge: 10000000, \
|
||||
min_layer_size: 10000, level_log_size: 0.75, del_docs_ratio_before_merge: 1.0 }"
|
||||
);
|
||||
let merge_policy = Box::new(NoMergePolicy::default());
|
||||
let merge_policy = Box::<NoMergePolicy>::default();
|
||||
index_writer.set_merge_policy(merge_policy);
|
||||
assert_eq!(
|
||||
format!("{:?}", index_writer.get_merge_policy()),
|
||||
@@ -1467,7 +1467,7 @@ mod tests {
|
||||
let fast_field_reader = segment_reader.fast_fields().u64(id_field)?;
|
||||
let in_order_alive_ids: Vec<u64> = segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(|doc| fast_field_reader.get_val(doc).unwrap())
|
||||
.map(|doc| fast_field_reader.get_val(doc))
|
||||
.collect();
|
||||
assert_eq!(&in_order_alive_ids[..], &[9, 8, 7, 6, 5, 4, 1, 0]);
|
||||
Ok(())
|
||||
@@ -1528,7 +1528,7 @@ mod tests {
|
||||
let fast_field_reader = segment_reader.fast_fields().u64(id_field)?;
|
||||
let in_order_alive_ids: Vec<u64> = segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(|doc| fast_field_reader.get_val(doc).unwrap())
|
||||
.map(|doc| fast_field_reader.get_val(doc))
|
||||
.collect();
|
||||
assert_eq!(&in_order_alive_ids[..], &[9, 8, 7, 6, 5, 4, 2, 0]);
|
||||
Ok(())
|
||||
@@ -1777,12 +1777,7 @@ mod tests {
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.flat_map(|segment_reader| {
|
||||
let ff_reader = segment_reader
|
||||
.fast_fields()
|
||||
.u64(id_field)
|
||||
.unwrap()
|
||||
.to_full()
|
||||
.unwrap();
|
||||
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
|
||||
segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(move |doc| ff_reader.get_val(doc))
|
||||
@@ -1793,12 +1788,7 @@ mod tests {
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.flat_map(|segment_reader| {
|
||||
let ff_reader = segment_reader
|
||||
.fast_fields()
|
||||
.u64(id_field)
|
||||
.unwrap()
|
||||
.to_full()
|
||||
.unwrap();
|
||||
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
|
||||
segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(move |doc| ff_reader.get_val(doc))
|
||||
@@ -1823,8 +1813,8 @@ mod tests {
|
||||
}
|
||||
|
||||
let num_docs_expected = expected_ids_and_num_occurrences
|
||||
.iter()
|
||||
.map(|(_, id_occurrences)| *id_occurrences as usize)
|
||||
.values()
|
||||
.map(|id_occurrences| *id_occurrences as usize)
|
||||
.sum::<usize>();
|
||||
assert_eq!(searcher.num_docs() as usize, num_docs_expected);
|
||||
assert_eq!(old_searcher.num_docs() as usize, num_docs_expected);
|
||||
@@ -1874,7 +1864,7 @@ mod tests {
|
||||
.flat_map(|segment_reader| {
|
||||
let ff_reader = segment_reader.fast_fields().ip_addr(ip_field).unwrap();
|
||||
segment_reader.doc_ids_alive().flat_map(move |doc| {
|
||||
let val = ff_reader.get_val(doc).unwrap(); // TODO handle null
|
||||
let val = ff_reader.get_val(doc);
|
||||
if val == Ipv6Addr::from_u128(0) {
|
||||
// TODO Fix null handling
|
||||
None
|
||||
@@ -1931,7 +1921,7 @@ mod tests {
|
||||
ff_reader.get_vals(doc, &mut vals);
|
||||
assert_eq!(vals.len(), 2);
|
||||
assert_eq!(vals[0], vals[1]);
|
||||
assert_eq!(id_reader.get_val(doc).unwrap(), vals[0]);
|
||||
assert_eq!(id_reader.get_val(doc), vals[0]);
|
||||
|
||||
let mut bool_vals = vec![];
|
||||
bool_ff_reader.get_vals(doc, &mut bool_vals);
|
||||
@@ -2127,7 +2117,7 @@ mod tests {
|
||||
facet_reader
|
||||
.facet_from_ord(facet_ords[0], &mut facet)
|
||||
.unwrap();
|
||||
let id = ff_reader.get_val(doc_id).unwrap();
|
||||
let id = ff_reader.get_val(doc_id);
|
||||
let facet_expected = Facet::from(&("/cola/".to_string() + &id.to_string()));
|
||||
|
||||
assert_eq!(facet, facet_expected);
|
||||
|
||||
@@ -67,11 +67,12 @@ pub(crate) fn index_json_values<'a>(
|
||||
doc: DocId,
|
||||
json_values: impl Iterator<Item = crate::Result<&'a serde_json::Map<String, serde_json::Value>>>,
|
||||
text_analyzer: &TextAnalyzer,
|
||||
expand_dots_enabled: bool,
|
||||
term_buffer: &mut Term,
|
||||
postings_writer: &mut dyn PostingsWriter,
|
||||
ctx: &mut IndexingContext,
|
||||
) -> crate::Result<()> {
|
||||
let mut json_term_writer = JsonTermWriter::wrap(term_buffer);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(term_buffer, expand_dots_enabled);
|
||||
let mut positions_per_path: IndexingPositionsPerPath = Default::default();
|
||||
for json_value_res in json_values {
|
||||
let json_value = json_value_res?;
|
||||
@@ -259,6 +260,7 @@ pub(crate) fn set_string_and_get_terms(
|
||||
pub struct JsonTermWriter<'a> {
|
||||
term_buffer: &'a mut Term,
|
||||
path_stack: Vec<usize>,
|
||||
expand_dots_enabled: bool,
|
||||
}
|
||||
|
||||
/// Splits a json path supplied to the query parser in such a way that
|
||||
@@ -298,23 +300,25 @@ impl<'a> JsonTermWriter<'a> {
|
||||
pub fn from_field_and_json_path(
|
||||
field: Field,
|
||||
json_path: &str,
|
||||
expand_dots_enabled: bool,
|
||||
term_buffer: &'a mut Term,
|
||||
) -> Self {
|
||||
term_buffer.set_field_and_type(field, Type::Json);
|
||||
let mut json_term_writer = Self::wrap(term_buffer);
|
||||
let mut json_term_writer = Self::wrap(term_buffer, expand_dots_enabled);
|
||||
for segment in split_json_path(json_path) {
|
||||
json_term_writer.push_path_segment(&segment);
|
||||
}
|
||||
json_term_writer
|
||||
}
|
||||
|
||||
pub fn wrap(term_buffer: &'a mut Term) -> Self {
|
||||
pub fn wrap(term_buffer: &'a mut Term, expand_dots_enabled: bool) -> Self {
|
||||
term_buffer.clear_with_type(Type::Json);
|
||||
let mut path_stack = Vec::with_capacity(10);
|
||||
path_stack.push(0);
|
||||
Self {
|
||||
term_buffer,
|
||||
path_stack,
|
||||
expand_dots_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,11 +340,24 @@ impl<'a> JsonTermWriter<'a> {
|
||||
self.trim_to_end_of_path();
|
||||
let buffer = self.term_buffer.value_bytes_mut();
|
||||
let buffer_len = buffer.len();
|
||||
|
||||
if self.path_stack.len() > 1 {
|
||||
buffer[buffer_len - 1] = JSON_PATH_SEGMENT_SEP;
|
||||
}
|
||||
self.term_buffer.append_bytes(segment.as_bytes());
|
||||
self.term_buffer.append_bytes(&[JSON_PATH_SEGMENT_SEP]);
|
||||
if self.expand_dots_enabled && segment.as_bytes().contains(&b'.') {
|
||||
// We need to replace `.` by JSON_PATH_SEGMENT_SEP.
|
||||
self.term_buffer
|
||||
.append_bytes(segment.as_bytes())
|
||||
.iter_mut()
|
||||
.for_each(|byte| {
|
||||
if *byte == b'.' {
|
||||
*byte = JSON_PATH_SEGMENT_SEP;
|
||||
}
|
||||
});
|
||||
} else {
|
||||
self.term_buffer.append_bytes(segment.as_bytes());
|
||||
}
|
||||
self.term_buffer.push_byte(JSON_PATH_SEGMENT_SEP);
|
||||
self.path_stack.push(self.term_buffer.len_bytes());
|
||||
}
|
||||
|
||||
@@ -391,7 +408,7 @@ mod tests {
|
||||
fn test_json_writer() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("attributes");
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_str("red");
|
||||
@@ -425,7 +442,7 @@ mod tests {
|
||||
fn test_string_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_str("red");
|
||||
assert_eq!(
|
||||
@@ -438,7 +455,7 @@ mod tests {
|
||||
fn test_i64_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_fast_value(-4i64);
|
||||
assert_eq!(
|
||||
@@ -451,7 +468,7 @@ mod tests {
|
||||
fn test_u64_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_fast_value(4u64);
|
||||
assert_eq!(
|
||||
@@ -464,7 +481,7 @@ mod tests {
|
||||
fn test_f64_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_fast_value(4.0f64);
|
||||
assert_eq!(
|
||||
@@ -477,7 +494,7 @@ mod tests {
|
||||
fn test_bool_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_fast_value(true);
|
||||
assert_eq!(
|
||||
@@ -490,7 +507,7 @@ mod tests {
|
||||
fn test_push_after_set_path_segment() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("attribute");
|
||||
json_writer.set_str("something");
|
||||
json_writer.push_path_segment("color");
|
||||
@@ -505,7 +522,7 @@ mod tests {
|
||||
fn test_pop_segment() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.push_path_segment("hue");
|
||||
json_writer.pop_path_segment();
|
||||
@@ -520,7 +537,7 @@ mod tests {
|
||||
fn test_json_writer_path() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
assert_eq!(json_writer.path(), b"color");
|
||||
json_writer.push_path_segment("hue");
|
||||
@@ -529,6 +546,37 @@ mod tests {
|
||||
assert_eq!(json_writer.path(), b"color\x01hue");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_path_expand_dots_disabled() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color.hue");
|
||||
assert_eq!(json_writer.path(), b"color.hue");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_path_expand_dots_enabled() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, true);
|
||||
json_writer.push_path_segment("color.hue");
|
||||
assert_eq!(json_writer.path(), b"color\x01hue");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_path_expand_dots_enabled_pop_segment() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, true);
|
||||
json_writer.push_path_segment("hello");
|
||||
assert_eq!(json_writer.path(), b"hello");
|
||||
json_writer.push_path_segment("color.hue");
|
||||
assert_eq!(json_writer.path(), b"hello\x01color\x01hue");
|
||||
json_writer.pop_path_segment();
|
||||
assert_eq!(json_writer.path(), b"hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_json_path_simple() {
|
||||
let json_path = split_json_path("titi.toto");
|
||||
|
||||
@@ -366,7 +366,7 @@ impl IndexMerger {
|
||||
.map(|doc| reader.num_vals(doc))
|
||||
.sum()
|
||||
} else {
|
||||
reader.total_num_vals() as u32
|
||||
reader.total_num_vals()
|
||||
}
|
||||
})
|
||||
.sum();
|
||||
@@ -401,15 +401,10 @@ impl IndexMerger {
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| {
|
||||
let u128_reader: Arc<dyn Column<u128>> = reader
|
||||
.fast_fields()
|
||||
.u128(field)
|
||||
.expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
it should never happen.",
|
||||
)
|
||||
.to_full()
|
||||
.expect("temp migration solution");
|
||||
let u128_reader: Arc<dyn Column<u128>> = reader.fast_fields().u128(field).expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and it \
|
||||
should never happen.",
|
||||
);
|
||||
u128_reader
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -470,11 +465,7 @@ impl IndexMerger {
|
||||
sort_by_field: &IndexSortByField,
|
||||
) -> crate::Result<Arc<dyn Column>> {
|
||||
let field_id = expect_field_id_for_sort_field(reader.schema(), sort_by_field)?; // for now expect fastfield, but not strictly required
|
||||
let value_accessor = reader
|
||||
.fast_fields()
|
||||
.u64_lenient(field_id)?
|
||||
.to_full()
|
||||
.expect("temp migration solution");
|
||||
let value_accessor = reader.fast_fields().u64_lenient(field_id)?;
|
||||
Ok(value_accessor)
|
||||
}
|
||||
/// Collecting value_accessors into a vec to bind the lifetime.
|
||||
@@ -977,7 +968,7 @@ impl IndexMerger {
|
||||
let doc_bytes = doc_bytes_res?;
|
||||
store_writer.store_bytes(&doc_bytes)?;
|
||||
} else {
|
||||
return Err(DataCorruption::comment_only(&format!(
|
||||
return Err(DataCorruption::comment_only(format!(
|
||||
"unexpected missing document in docstore on merge, doc address \
|
||||
{old_doc_addr:?}",
|
||||
))
|
||||
@@ -1377,16 +1368,16 @@ mod tests {
|
||||
.fast_fields()
|
||||
.u64(score_field)
|
||||
.unwrap();
|
||||
assert_eq!(score_field_reader.min_value(), Some(4000));
|
||||
assert_eq!(score_field_reader.max_value(), Some(7000));
|
||||
assert_eq!(score_field_reader.min_value(), 4000);
|
||||
assert_eq!(score_field_reader.max_value(), 7000);
|
||||
|
||||
let score_field_reader = searcher
|
||||
.segment_reader(1)
|
||||
.fast_fields()
|
||||
.u64(score_field)
|
||||
.unwrap();
|
||||
assert_eq!(score_field_reader.min_value(), Some(1));
|
||||
assert_eq!(score_field_reader.max_value(), Some(3));
|
||||
assert_eq!(score_field_reader.min_value(), 1);
|
||||
assert_eq!(score_field_reader.max_value(), 3);
|
||||
}
|
||||
{
|
||||
// merging the segments
|
||||
@@ -1431,8 +1422,8 @@ mod tests {
|
||||
.fast_fields()
|
||||
.u64(score_field)
|
||||
.unwrap();
|
||||
assert_eq!(score_field_reader.min_value(), Some(3));
|
||||
assert_eq!(score_field_reader.max_value(), Some(7000));
|
||||
assert_eq!(score_field_reader.min_value(), 3);
|
||||
assert_eq!(score_field_reader.max_value(), 7000);
|
||||
}
|
||||
{
|
||||
// test a commit with only deletes
|
||||
@@ -1478,8 +1469,8 @@ mod tests {
|
||||
.fast_fields()
|
||||
.u64(score_field)
|
||||
.unwrap();
|
||||
assert_eq!(score_field_reader.min_value(), Some(3));
|
||||
assert_eq!(score_field_reader.max_value(), Some(7000));
|
||||
assert_eq!(score_field_reader.min_value(), 3);
|
||||
assert_eq!(score_field_reader.max_value(), 7000);
|
||||
}
|
||||
{
|
||||
// Test merging a single segment in order to remove deletes.
|
||||
@@ -1525,8 +1516,8 @@ mod tests {
|
||||
.fast_fields()
|
||||
.u64(score_field)
|
||||
.unwrap();
|
||||
assert_eq!(score_field_reader.min_value(), Some(6000));
|
||||
assert_eq!(score_field_reader.max_value(), Some(7000));
|
||||
assert_eq!(score_field_reader.min_value(), 6000);
|
||||
assert_eq!(score_field_reader.max_value(), 7000);
|
||||
}
|
||||
|
||||
{
|
||||
|
||||
@@ -186,17 +186,17 @@ mod tests {
|
||||
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let fast_field = fast_fields.u64(int_field).unwrap();
|
||||
assert_eq!(fast_field.get_val(5), Some(1u64));
|
||||
assert_eq!(fast_field.get_val(4), Some(2u64));
|
||||
assert_eq!(fast_field.get_val(3), Some(3u64));
|
||||
assert_eq!(fast_field.get_val(5), 1u64);
|
||||
assert_eq!(fast_field.get_val(4), 2u64);
|
||||
assert_eq!(fast_field.get_val(3), 3u64);
|
||||
if force_disjunct_segment_sort_values {
|
||||
assert_eq!(fast_field.get_val(2), Some(20u64));
|
||||
assert_eq!(fast_field.get_val(1), Some(100u64));
|
||||
assert_eq!(fast_field.get_val(2), 20u64);
|
||||
assert_eq!(fast_field.get_val(1), 100u64);
|
||||
} else {
|
||||
assert_eq!(fast_field.get_val(2), Some(10u64));
|
||||
assert_eq!(fast_field.get_val(1), Some(20u64));
|
||||
assert_eq!(fast_field.get_val(2), 10u64);
|
||||
assert_eq!(fast_field.get_val(1), 20u64);
|
||||
}
|
||||
assert_eq!(fast_field.get_val(0), Some(1_000u64));
|
||||
assert_eq!(fast_field.get_val(0), 1_000u64);
|
||||
|
||||
// test new field norm mapping
|
||||
{
|
||||
@@ -373,12 +373,12 @@ mod tests {
|
||||
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let fast_field = fast_fields.u64(int_field).unwrap();
|
||||
assert_eq!(fast_field.get_val(0), Some(1u64));
|
||||
assert_eq!(fast_field.get_val(1), Some(2u64));
|
||||
assert_eq!(fast_field.get_val(2), Some(3u64));
|
||||
assert_eq!(fast_field.get_val(3), Some(10u64));
|
||||
assert_eq!(fast_field.get_val(4), Some(20u64));
|
||||
assert_eq!(fast_field.get_val(5), Some(1_000u64));
|
||||
assert_eq!(fast_field.get_val(0), 1u64);
|
||||
assert_eq!(fast_field.get_val(1), 2u64);
|
||||
assert_eq!(fast_field.get_val(2), 3u64);
|
||||
assert_eq!(fast_field.get_val(3), 10u64);
|
||||
assert_eq!(fast_field.get_val(4), 20u64);
|
||||
assert_eq!(fast_field.get_val(5), 1_000u64);
|
||||
|
||||
let get_vals = |fast_field: &MultiValuedFastFieldReader<u64>, doc_id: u32| -> Vec<u64> {
|
||||
let mut vals = vec![];
|
||||
@@ -535,15 +535,11 @@ mod bench_sorted_index_merge {
|
||||
b.iter(|| {
|
||||
let sorted_doc_ids = doc_id_mapping.iter_old_doc_addrs().map(|doc_addr| {
|
||||
let reader = &merger.readers[doc_addr.segment_ord as usize];
|
||||
let u64_reader: Arc<dyn Column<u64>> = reader
|
||||
.fast_fields()
|
||||
.typed_fast_field_reader(field)
|
||||
.expect(
|
||||
let u64_reader: Arc<dyn Column<u64>> =
|
||||
reader.fast_fields().typed_fast_field_reader(field).expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
it should never happen.",
|
||||
)
|
||||
.to_full()
|
||||
.unwrap();
|
||||
);
|
||||
(doc_addr.doc_id, reader, u64_reader)
|
||||
});
|
||||
// add values in order of the new doc_ids
|
||||
|
||||
@@ -60,7 +60,7 @@ type AddBatchReceiver = channel::Receiver<AddBatch>;
|
||||
mod tests_mmap {
|
||||
use crate::collector::Count;
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{Schema, STORED, TEXT};
|
||||
use crate::schema::{JsonObjectOptions, Schema, TEXT};
|
||||
use crate::{Index, Term};
|
||||
|
||||
#[test]
|
||||
@@ -81,9 +81,9 @@ mod tests_mmap {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_field_espace() {
|
||||
fn test_json_field_expand_dots_disabled_dot_escaped_required() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json_field = schema_builder.add_json_field("json", TEXT | STORED);
|
||||
let json_field = schema_builder.add_json_field("json", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let json = serde_json::json!({"k8s.container.name": "prometheus", "val": "hello"});
|
||||
@@ -99,4 +99,26 @@ mod tests_mmap {
|
||||
let num_docs = searcher.search(&query, &Count).unwrap();
|
||||
assert_eq!(num_docs, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_field_expand_dots_enabled_dot_escape_not_required() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json_options: JsonObjectOptions =
|
||||
JsonObjectOptions::from(TEXT).set_expand_dots_enabled();
|
||||
let json_field = schema_builder.add_json_field("json", json_options);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let json = serde_json::json!({"k8s.container.name": "prometheus", "val": "hello"});
|
||||
index_writer.add_document(doc!(json_field=>json)).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let parse_query = QueryParser::for_index(&index, Vec::new());
|
||||
let query = parse_query
|
||||
.parse_query(r#"json.k8s.container.name:prometheus"#)
|
||||
.unwrap();
|
||||
let num_docs = searcher.search(&query, &Count).unwrap();
|
||||
assert_eq!(num_docs, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -866,7 +866,7 @@ mod tests {
|
||||
}
|
||||
|
||||
assert_eq!(indices.len(), 3);
|
||||
let output_directory: Box<dyn Directory> = Box::new(RamDirectory::default());
|
||||
let output_directory: Box<dyn Directory> = Box::<RamDirectory>::default();
|
||||
let index = merge_indices(&indices, output_directory)?;
|
||||
assert_eq!(index.schema(), schema);
|
||||
|
||||
|
||||
@@ -180,7 +180,7 @@ impl SegmentWriter {
|
||||
self.per_field_postings_writers.get_for_field_mut(field);
|
||||
term_buffer.clear_with_field_and_type(field_entry.field_type().value_type(), field);
|
||||
|
||||
match *field_entry.field_type() {
|
||||
match field_entry.field_type() {
|
||||
FieldType::Facet(_) => {
|
||||
for value in values {
|
||||
let facet = value.as_facet().ok_or_else(make_schema_error)?;
|
||||
@@ -307,7 +307,7 @@ impl SegmentWriter {
|
||||
self.fieldnorms_writer.record(doc_id, field, num_vals);
|
||||
}
|
||||
}
|
||||
FieldType::JsonObject(_) => {
|
||||
FieldType::JsonObject(json_options) => {
|
||||
let text_analyzer = &self.per_field_text_analyzers[field.field_id() as usize];
|
||||
let json_values_it =
|
||||
values.map(|value| value.as_json().ok_or_else(make_schema_error));
|
||||
@@ -315,6 +315,7 @@ impl SegmentWriter {
|
||||
doc_id,
|
||||
json_values_it,
|
||||
text_analyzer,
|
||||
json_options.is_expand_dots_enabled(),
|
||||
term_buffer,
|
||||
postings_writer,
|
||||
ctx,
|
||||
@@ -557,7 +558,7 @@ mod tests {
|
||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
||||
let mut term_stream = term_dict.stream().unwrap();
|
||||
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
|
||||
json_term_writer.push_path_segment("bool");
|
||||
json_term_writer.set_fast_value(true);
|
||||
@@ -648,7 +649,7 @@ mod tests {
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_term_writer.push_path_segment("mykey");
|
||||
json_term_writer.set_str("token");
|
||||
let term_info = inv_index
|
||||
@@ -692,7 +693,7 @@ mod tests {
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_term_writer.push_path_segment("mykey");
|
||||
json_term_writer.set_str("two tokens");
|
||||
let term_info = inv_index
|
||||
@@ -737,7 +738,7 @@ mod tests {
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_term_writer.push_path_segment("mykey");
|
||||
json_term_writer.push_path_segment("field");
|
||||
json_term_writer.set_str("hello");
|
||||
|
||||
@@ -46,15 +46,11 @@ impl<'a> RemappedDocIdColumn<'a> {
|
||||
let (min_value, max_value) = readers
|
||||
.iter()
|
||||
.filter_map(|reader| {
|
||||
let u64_reader: Arc<dyn Column<u64>> = reader
|
||||
.fast_fields()
|
||||
.typed_fast_field_reader(field)
|
||||
.expect(
|
||||
let u64_reader: Arc<dyn Column<u64>> =
|
||||
reader.fast_fields().typed_fast_field_reader(field).expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
it should never happen.",
|
||||
)
|
||||
.to_full()
|
||||
.expect("temp migration solution");
|
||||
);
|
||||
compute_min_max_val(&*u64_reader, reader)
|
||||
})
|
||||
.reduce(|a, b| (a.0.min(b.0), a.1.max(b.1)))
|
||||
@@ -63,15 +59,11 @@ impl<'a> RemappedDocIdColumn<'a> {
|
||||
let fast_field_readers = readers
|
||||
.iter()
|
||||
.map(|reader| {
|
||||
let u64_reader: Arc<dyn Column<u64>> = reader
|
||||
.fast_fields()
|
||||
.typed_fast_field_reader(field)
|
||||
.expect(
|
||||
let u64_reader: Arc<dyn Column<u64>> =
|
||||
reader.fast_fields().typed_fast_field_reader(field).expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
it should never happen.",
|
||||
)
|
||||
.to_full()
|
||||
.expect("temp migration solution");
|
||||
);
|
||||
u64_reader
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -16,11 +16,11 @@ mod atomic_impl {
|
||||
|
||||
impl AtomicU64Wrapper {
|
||||
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
|
||||
AtomicU64Wrapper(AtomicU64::new(first_opstamp as u64))
|
||||
AtomicU64Wrapper(AtomicU64::new(first_opstamp))
|
||||
}
|
||||
|
||||
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
|
||||
self.0.fetch_add(val as u64, order) as u64
|
||||
self.0.fetch_add(val, order) as u64
|
||||
}
|
||||
|
||||
pub fn revert(&self, val: u64, order: Ordering) -> u64 {
|
||||
@@ -77,7 +77,7 @@ impl Stamper {
|
||||
}
|
||||
|
||||
pub fn stamp(&self) -> Opstamp {
|
||||
self.0.fetch_add(1u64, Ordering::SeqCst) as u64
|
||||
self.0.fetch_add(1u64, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Given a desired count `n`, `stamps` returns an iterator that
|
||||
|
||||
12
src/lib.rs
12
src/lib.rs
@@ -177,7 +177,7 @@ impl DateTime {
|
||||
/// The given date/time is converted to UTC and the actual
|
||||
/// time zone is discarded.
|
||||
pub const fn from_utc(dt: OffsetDateTime) -> Self {
|
||||
let timestamp_micros = dt.unix_timestamp() as i64 * 1_000_000 + dt.microsecond() as i64;
|
||||
let timestamp_micros = dt.unix_timestamp() * 1_000_000 + dt.microsecond() as i64;
|
||||
Self { timestamp_micros }
|
||||
}
|
||||
|
||||
@@ -259,10 +259,6 @@ pub use crate::future_result::FutureResult;
|
||||
/// and instead, refer to this as `crate::Result<T>`.
|
||||
pub type Result<T> = std::result::Result<T, TantivyError>;
|
||||
|
||||
/// Result for an Async io operation.
|
||||
#[cfg(feature = "quickwit")]
|
||||
pub type AsyncIoResult<T> = std::result::Result<T, crate::error::AsyncIoError>;
|
||||
|
||||
mod core;
|
||||
mod indexer;
|
||||
|
||||
@@ -1037,21 +1033,21 @@ pub mod tests {
|
||||
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_unsigned);
|
||||
assert!(fast_field_reader_opt.is_ok());
|
||||
let fast_field_reader = fast_field_reader_opt.unwrap();
|
||||
assert_eq!(fast_field_reader.get_val(0), Some(4u64))
|
||||
assert_eq!(fast_field_reader.get_val(0), 4u64)
|
||||
}
|
||||
|
||||
{
|
||||
let fast_field_reader_res = segment_reader.fast_fields().i64(fast_field_signed);
|
||||
assert!(fast_field_reader_res.is_ok());
|
||||
let fast_field_reader = fast_field_reader_res.unwrap();
|
||||
assert_eq!(fast_field_reader.get_val(0), Some(4i64))
|
||||
assert_eq!(fast_field_reader.get_val(0), 4i64)
|
||||
}
|
||||
|
||||
{
|
||||
let fast_field_reader_res = segment_reader.fast_fields().f64(fast_field_float);
|
||||
assert!(fast_field_reader_res.is_ok());
|
||||
let fast_field_reader = fast_field_reader_res.unwrap();
|
||||
assert_eq!(fast_field_reader.get_val(0), Some(4f64))
|
||||
assert_eq!(fast_field_reader.get_val(0), 4f64)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ impl PositionReader {
|
||||
.map(|num_bits| num_bits as usize)
|
||||
.sum();
|
||||
let num_bytes_to_skip = num_bits * COMPRESSION_BLOCK_SIZE / 8;
|
||||
self.bit_widths.advance(num_blocks as usize);
|
||||
self.bit_widths.advance(num_blocks);
|
||||
self.positions.advance(num_bytes_to_skip);
|
||||
self.anchor_offset += (num_blocks * COMPRESSION_BLOCK_SIZE) as u64;
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use crate::postings::stacker::{MemoryArena, TermHashMap};
|
||||
use stacker::{ArenaHashMap, MemoryArena};
|
||||
|
||||
/// IndexingContext contains all of the transient memory arenas
|
||||
/// required for building the inverted index.
|
||||
pub(crate) struct IndexingContext {
|
||||
/// The term index is an adhoc hashmap,
|
||||
/// itself backed by a dedicated memory arena.
|
||||
pub term_index: TermHashMap,
|
||||
pub term_index: ArenaHashMap,
|
||||
/// Arena is a memory arena that stores posting lists / term frequencies / positions.
|
||||
pub arena: MemoryArena,
|
||||
}
|
||||
@@ -13,9 +13,9 @@ pub(crate) struct IndexingContext {
|
||||
impl IndexingContext {
|
||||
/// Create a new IndexingContext given the size of the term hash map.
|
||||
pub(crate) fn new(table_size: usize) -> IndexingContext {
|
||||
let term_index = TermHashMap::new(table_size);
|
||||
let term_index = ArenaHashMap::new(table_size);
|
||||
IndexingContext {
|
||||
arena: MemoryArena::new(),
|
||||
arena: MemoryArena::default(),
|
||||
term_index,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use std::io;
|
||||
|
||||
use stacker::Addr;
|
||||
|
||||
use crate::fastfield::MultiValuedFastFieldWriter;
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
||||
use crate::postings::recorder::{BufferLender, DocIdRecorder, Recorder};
|
||||
use crate::postings::stacker::Addr;
|
||||
use crate::postings::{
|
||||
FieldSerializer, IndexingContext, IndexingPosition, PostingsWriter, UnorderedTermId,
|
||||
};
|
||||
|
||||
@@ -15,9 +15,10 @@ mod recorder;
|
||||
mod segment_postings;
|
||||
mod serializer;
|
||||
mod skip;
|
||||
mod stacker;
|
||||
mod term_info;
|
||||
|
||||
pub(crate) use stacker::compute_table_size;
|
||||
|
||||
pub use self::block_segment_postings::BlockSegmentPostings;
|
||||
pub(crate) use self::indexing_context::IndexingContext;
|
||||
pub(crate) use self::per_field_postings_writer::PerFieldPostingsWriter;
|
||||
@@ -26,10 +27,9 @@ pub(crate) use self::postings_writer::{serialize_postings, IndexingPosition, Pos
|
||||
pub use self::segment_postings::SegmentPostings;
|
||||
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
||||
pub(crate) use self::skip::{BlockInfo, SkipReader};
|
||||
pub(crate) use self::stacker::compute_table_size;
|
||||
pub use self::term_info::TermInfo;
|
||||
|
||||
pub(crate) type UnorderedTermId = u64;
|
||||
pub(crate) type UnorderedTermId = stacker::UnorderedId;
|
||||
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
|
||||
|
||||
@@ -51,7 +51,7 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box<dyn Postings
|
||||
| FieldType::Date(_)
|
||||
| FieldType::Bytes(_)
|
||||
| FieldType::IpAddr(_)
|
||||
| FieldType::Facet(_) => Box::new(SpecializedPostingsWriter::<DocIdRecorder>::default()),
|
||||
| FieldType::Facet(_) => Box::<SpecializedPostingsWriter<DocIdRecorder>>::default(),
|
||||
FieldType::JsonObject(ref json_object_options) => {
|
||||
if let Some(text_indexing_option) = json_object_options.get_text_indexing_options() {
|
||||
match text_indexing_option.index_option() {
|
||||
|
||||
@@ -4,8 +4,8 @@ use std::marker::PhantomData;
|
||||
use std::ops::Range;
|
||||
|
||||
use rustc_hash::FxHashMap;
|
||||
use stacker::Addr;
|
||||
|
||||
use super::stacker::Addr;
|
||||
use crate::fastfield::MultiValuedFastFieldWriter;
|
||||
use crate::fieldnorm::FieldNormReaders;
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
@@ -59,7 +59,11 @@ pub(crate) fn serialize_postings(
|
||||
) -> crate::Result<HashMap<Field, FxHashMap<UnorderedTermId, TermOrdinal>>> {
|
||||
let mut term_offsets: Vec<(Term<&[u8]>, Addr, UnorderedTermId)> =
|
||||
Vec::with_capacity(ctx.term_index.len());
|
||||
term_offsets.extend(ctx.term_index.iter());
|
||||
term_offsets.extend(
|
||||
ctx.term_index
|
||||
.iter()
|
||||
.map(|(bytes, addr, unordered_id)| (Term::wrap(bytes), addr, unordered_id)),
|
||||
);
|
||||
term_offsets.sort_unstable_by_key(|(k, _, _)| k.clone());
|
||||
let mut unordered_term_mappings: HashMap<Field, FxHashMap<UnorderedTermId, TermOrdinal>> =
|
||||
HashMap::new();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use common::read_u32_vint;
|
||||
use stacker::{ExpUnrolledLinkedList, MemoryArena};
|
||||
|
||||
use super::stacker::{ExpUnrolledLinkedList, MemoryArena};
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::postings::FieldSerializer;
|
||||
use crate::DocId;
|
||||
@@ -91,7 +91,7 @@ pub struct DocIdRecorder {
|
||||
impl Default for DocIdRecorder {
|
||||
fn default() -> Self {
|
||||
DocIdRecorder {
|
||||
stack: ExpUnrolledLinkedList::new(),
|
||||
stack: ExpUnrolledLinkedList::default(),
|
||||
current_doc: u32::MAX,
|
||||
}
|
||||
}
|
||||
@@ -144,7 +144,7 @@ impl Recorder for DocIdRecorder {
|
||||
}
|
||||
|
||||
/// Recorder encoding document ids, and term frequencies
|
||||
#[derive(Clone, Copy)]
|
||||
#[derive(Clone, Copy, Default)]
|
||||
pub struct TermFrequencyRecorder {
|
||||
stack: ExpUnrolledLinkedList,
|
||||
current_doc: DocId,
|
||||
@@ -152,17 +152,6 @@ pub struct TermFrequencyRecorder {
|
||||
term_doc_freq: u32,
|
||||
}
|
||||
|
||||
impl Default for TermFrequencyRecorder {
|
||||
fn default() -> Self {
|
||||
TermFrequencyRecorder {
|
||||
stack: ExpUnrolledLinkedList::new(),
|
||||
current_doc: 0,
|
||||
current_tf: 0u32,
|
||||
term_doc_freq: 0u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Recorder for TermFrequencyRecorder {
|
||||
fn current_doc(&self) -> DocId {
|
||||
self.current_doc
|
||||
@@ -229,7 +218,7 @@ pub struct TfAndPositionRecorder {
|
||||
impl Default for TfAndPositionRecorder {
|
||||
fn default() -> Self {
|
||||
TfAndPositionRecorder {
|
||||
stack: ExpUnrolledLinkedList::new(),
|
||||
stack: ExpUnrolledLinkedList::default(),
|
||||
current_doc: u32::MAX,
|
||||
term_doc_freq: 0u32,
|
||||
}
|
||||
|
||||
@@ -213,21 +213,21 @@ impl<'a> FieldSerializer<'a> {
|
||||
fail_point!("FieldSerializer::close_term", |msg: Option<String>| {
|
||||
Err(io::Error::new(io::ErrorKind::Other, format!("{:?}", msg)))
|
||||
});
|
||||
if self.term_open {
|
||||
self.postings_serializer
|
||||
.close_term(self.current_term_info.doc_freq)?;
|
||||
self.current_term_info.postings_range.end =
|
||||
self.postings_serializer.written_bytes() as usize;
|
||||
|
||||
if let Some(positions_serializer) = self.positions_serializer_opt.as_mut() {
|
||||
positions_serializer.close_term()?;
|
||||
self.current_term_info.positions_range.end =
|
||||
positions_serializer.written_bytes() as usize;
|
||||
}
|
||||
self.term_dictionary_builder
|
||||
.insert_value(&self.current_term_info)?;
|
||||
self.term_open = false;
|
||||
if !self.term_open {
|
||||
return Ok(());
|
||||
}
|
||||
self.postings_serializer
|
||||
.close_term(self.current_term_info.doc_freq)?;
|
||||
self.current_term_info.postings_range.end =
|
||||
self.postings_serializer.written_bytes() as usize;
|
||||
if let Some(positions_serializer) = self.positions_serializer_opt.as_mut() {
|
||||
positions_serializer.close_term()?;
|
||||
self.current_term_info.positions_range.end =
|
||||
positions_serializer.written_bytes() as usize;
|
||||
}
|
||||
self.term_dictionary_builder
|
||||
.insert_value(&self.current_term_info)?;
|
||||
self.term_open = false;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -465,7 +465,7 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
/// When called after writing the postings of a term, this value is used as a
|
||||
/// end offset.
|
||||
fn written_bytes(&self) -> u64 {
|
||||
self.output_write.written_bytes() as u64
|
||||
self.output_write.written_bytes()
|
||||
}
|
||||
|
||||
fn clear(&mut self) {
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
mod expull;
|
||||
mod memory_arena;
|
||||
mod term_hashmap;
|
||||
|
||||
pub(crate) use self::expull::ExpUnrolledLinkedList;
|
||||
pub(crate) use self::memory_arena::{Addr, MemoryArena};
|
||||
pub(crate) use self::term_hashmap::{compute_table_size, TermHashMap};
|
||||
@@ -47,7 +47,7 @@ impl From<BitSet> for BitSetDocSet {
|
||||
impl DocSet for BitSetDocSet {
|
||||
fn advance(&mut self) -> DocId {
|
||||
if let Some(lower) = self.cursor_tinybitset.pop_lowest() {
|
||||
self.doc = (self.cursor_bucket as u32 * 64u32) | lower;
|
||||
self.doc = (self.cursor_bucket * 64u32) | lower;
|
||||
return self.doc;
|
||||
}
|
||||
if let Some(cursor_bucket) = self.docs.first_non_empty_bucket(self.cursor_bucket + 1) {
|
||||
|
||||
@@ -16,7 +16,8 @@ use crate::query::{
|
||||
TermQuery, TermSetQuery,
|
||||
};
|
||||
use crate::schema::{
|
||||
Facet, FacetParseError, Field, FieldType, IndexRecordOption, IntoIpv6Addr, Schema, Term, Type,
|
||||
Facet, FacetParseError, Field, FieldType, IndexRecordOption, IntoIpv6Addr, JsonObjectOptions,
|
||||
Schema, Term, Type,
|
||||
};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::OffsetDateTime;
|
||||
@@ -182,7 +183,6 @@ pub struct QueryParser {
|
||||
conjunction_by_default: bool,
|
||||
tokenizer_manager: TokenizerManager,
|
||||
boost: HashMap<Field, Score>,
|
||||
field_names: HashMap<String, Field>,
|
||||
}
|
||||
|
||||
fn all_negative(ast: &LogicalAst) -> bool {
|
||||
@@ -195,31 +195,6 @@ fn all_negative(ast: &LogicalAst) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the position (in byte offsets) of the unescaped '.' in the `field_path`.
|
||||
//
|
||||
// This function operates directly on bytes (as opposed to codepoint), relying
|
||||
// on a encoding property of utf-8 for its correctness.
|
||||
fn locate_splitting_dots(field_path: &str) -> Vec<usize> {
|
||||
let mut splitting_dots_pos = Vec::new();
|
||||
let mut escape_state = false;
|
||||
for (pos, b) in field_path.bytes().enumerate() {
|
||||
if escape_state {
|
||||
escape_state = false;
|
||||
continue;
|
||||
}
|
||||
match b {
|
||||
b'\\' => {
|
||||
escape_state = true;
|
||||
}
|
||||
b'.' => {
|
||||
splitting_dots_pos.push(pos);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
splitting_dots_pos
|
||||
}
|
||||
|
||||
impl QueryParser {
|
||||
/// Creates a `QueryParser`, given
|
||||
/// * schema - index Schema
|
||||
@@ -229,34 +204,19 @@ impl QueryParser {
|
||||
default_fields: Vec<Field>,
|
||||
tokenizer_manager: TokenizerManager,
|
||||
) -> QueryParser {
|
||||
let field_names = schema
|
||||
.fields()
|
||||
.map(|(field, field_entry)| (field_entry.name().to_string(), field))
|
||||
.collect();
|
||||
QueryParser {
|
||||
schema,
|
||||
default_fields,
|
||||
tokenizer_manager,
|
||||
conjunction_by_default: false,
|
||||
boost: Default::default(),
|
||||
field_names,
|
||||
}
|
||||
}
|
||||
|
||||
// Splits a full_path as written in a query, into a field name and a
|
||||
// json path.
|
||||
pub(crate) fn split_full_path<'a>(&self, full_path: &'a str) -> Option<(Field, &'a str)> {
|
||||
if let Some(field) = self.field_names.get(full_path) {
|
||||
return Some((*field, ""));
|
||||
}
|
||||
let mut splitting_period_pos: Vec<usize> = locate_splitting_dots(full_path);
|
||||
while let Some(pos) = splitting_period_pos.pop() {
|
||||
let (prefix, suffix) = full_path.split_at(pos);
|
||||
if let Some(field) = self.field_names.get(prefix) {
|
||||
return Some((*field, &suffix[1..]));
|
||||
}
|
||||
}
|
||||
None
|
||||
self.schema.find_field(full_path)
|
||||
}
|
||||
|
||||
/// Creates a `QueryParser`, given
|
||||
@@ -482,28 +442,14 @@ impl QueryParser {
|
||||
.into_iter()
|
||||
.collect())
|
||||
}
|
||||
FieldType::JsonObject(ref json_options) => {
|
||||
let option = json_options.get_text_indexing_options().ok_or_else(|| {
|
||||
// This should have been seen earlier really.
|
||||
QueryParserError::FieldNotIndexed(field_name.to_string())
|
||||
})?;
|
||||
let text_analyzer =
|
||||
self.tokenizer_manager
|
||||
.get(option.tokenizer())
|
||||
.ok_or_else(|| QueryParserError::UnknownTokenizer {
|
||||
field: field_name.to_string(),
|
||||
tokenizer: option.tokenizer().to_string(),
|
||||
})?;
|
||||
let index_record_option = option.index_option();
|
||||
generate_literals_for_json_object(
|
||||
field_name,
|
||||
field,
|
||||
json_path,
|
||||
phrase,
|
||||
&text_analyzer,
|
||||
index_record_option,
|
||||
)
|
||||
}
|
||||
FieldType::JsonObject(ref json_options) => generate_literals_for_json_object(
|
||||
field_name,
|
||||
field,
|
||||
json_path,
|
||||
phrase,
|
||||
&self.tokenizer_manager,
|
||||
json_options,
|
||||
),
|
||||
FieldType::Facet(_) => match Facet::from_text(phrase) {
|
||||
Ok(facet) => {
|
||||
let facet_term = Term::from_facet(field, &facet);
|
||||
@@ -767,17 +713,32 @@ fn generate_literals_for_json_object(
|
||||
field: Field,
|
||||
json_path: &str,
|
||||
phrase: &str,
|
||||
text_analyzer: &TextAnalyzer,
|
||||
index_record_option: IndexRecordOption,
|
||||
tokenizer_manager: &TokenizerManager,
|
||||
json_options: &JsonObjectOptions,
|
||||
) -> Result<Vec<LogicalLiteral>, QueryParserError> {
|
||||
let text_options = json_options.get_text_indexing_options().ok_or_else(|| {
|
||||
// This should have been seen earlier really.
|
||||
QueryParserError::FieldNotIndexed(field_name.to_string())
|
||||
})?;
|
||||
let text_analyzer = tokenizer_manager
|
||||
.get(text_options.tokenizer())
|
||||
.ok_or_else(|| QueryParserError::UnknownTokenizer {
|
||||
field: field_name.to_string(),
|
||||
tokenizer: text_options.tokenizer().to_string(),
|
||||
})?;
|
||||
let index_record_option = text_options.index_option();
|
||||
let mut logical_literals = Vec::new();
|
||||
let mut term = Term::with_capacity(100);
|
||||
let mut json_term_writer =
|
||||
JsonTermWriter::from_field_and_json_path(field, json_path, &mut term);
|
||||
let mut json_term_writer = JsonTermWriter::from_field_and_json_path(
|
||||
field,
|
||||
json_path,
|
||||
json_options.is_expand_dots_enabled(),
|
||||
&mut term,
|
||||
);
|
||||
if let Some(term) = convert_to_fast_value_and_get_term(&mut json_term_writer, phrase) {
|
||||
logical_literals.push(LogicalLiteral::Term(term));
|
||||
}
|
||||
let terms = set_string_and_get_terms(&mut json_term_writer, phrase, text_analyzer);
|
||||
let terms = set_string_and_get_terms(&mut json_term_writer, phrase, &text_analyzer);
|
||||
drop(json_term_writer);
|
||||
if terms.len() <= 1 {
|
||||
for (_, term) in terms {
|
||||
@@ -1564,13 +1525,6 @@ mod test {
|
||||
assert_eq!(query_parser.split_full_path("firsty"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_locate_splitting_dots() {
|
||||
assert_eq!(&super::locate_splitting_dots("a.b.c"), &[1, 3]);
|
||||
assert_eq!(&super::locate_splitting_dots(r#"a\.b.c"#), &[4]);
|
||||
assert_eq!(&super::locate_splitting_dots(r#"a\..b.c"#), &[3, 5]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_phrase_slop() {
|
||||
test_parse_query_to_logical_ast_helper(
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::ops::{Bound, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
use fastfield_codecs::{MonotonicallyMappableToU128, OptionalColumn};
|
||||
use fastfield_codecs::{Column, MonotonicallyMappableToU128};
|
||||
|
||||
use super::range_query::map_bound;
|
||||
use super::{ConstScorer, Explanation, Scorer, Weight};
|
||||
@@ -45,10 +45,12 @@ impl Weight for IPFastFieldRangeWeight {
|
||||
match field_type.fastfield_cardinality().unwrap() {
|
||||
Cardinality::SingleValue => {
|
||||
let ip_addr_fast_field = reader.fast_fields().ip_addr(self.field)?;
|
||||
let minmax = ip_addr_fast_field
|
||||
.min_value()
|
||||
.zip(ip_addr_fast_field.max_value());
|
||||
let value_range = bound_to_value_range(&self.left_bound, &self.right_bound, minmax);
|
||||
let value_range = bound_to_value_range(
|
||||
&self.left_bound,
|
||||
&self.right_bound,
|
||||
ip_addr_fast_field.min_value(),
|
||||
ip_addr_fast_field.max_value(),
|
||||
);
|
||||
let docset = IpRangeDocSet::new(
|
||||
value_range,
|
||||
IpFastFieldCardinality::SingleValue(ip_addr_fast_field),
|
||||
@@ -60,10 +62,8 @@ impl Weight for IPFastFieldRangeWeight {
|
||||
let value_range = bound_to_value_range(
|
||||
&self.left_bound,
|
||||
&self.right_bound,
|
||||
Some((
|
||||
ip_addr_fast_field.min_value(),
|
||||
ip_addr_fast_field.max_value(),
|
||||
)),
|
||||
ip_addr_fast_field.min_value(),
|
||||
ip_addr_fast_field.max_value(),
|
||||
);
|
||||
let docset = IpRangeDocSet::new(
|
||||
value_range,
|
||||
@@ -91,10 +91,9 @@ impl Weight for IPFastFieldRangeWeight {
|
||||
fn bound_to_value_range(
|
||||
left_bound: &Bound<Ipv6Addr>,
|
||||
right_bound: &Bound<Ipv6Addr>,
|
||||
min_max: Option<(Ipv6Addr, Ipv6Addr)>,
|
||||
min_value: Ipv6Addr,
|
||||
max_value: Ipv6Addr,
|
||||
) -> RangeInclusive<Ipv6Addr> {
|
||||
let (min_value, max_value) =
|
||||
min_max.unwrap_or((Ipv6Addr::from(u128::MIN), Ipv6Addr::from(u128::MAX)));
|
||||
let start_value = match left_bound {
|
||||
Bound::Included(ip_addr) => *ip_addr,
|
||||
Bound::Excluded(ip_addr) => Ipv6Addr::from(ip_addr.to_u128() + 1),
|
||||
@@ -127,7 +126,7 @@ impl VecCursor {
|
||||
}
|
||||
#[inline]
|
||||
fn current(&self) -> Option<u32> {
|
||||
self.docs.get(self.current_pos).map(|el| *el as u32)
|
||||
self.docs.get(self.current_pos).copied()
|
||||
}
|
||||
fn get_cleared_data(&mut self) -> &mut Vec<u32> {
|
||||
self.docs.clear();
|
||||
@@ -143,7 +142,7 @@ impl VecCursor {
|
||||
}
|
||||
|
||||
pub(crate) enum IpFastFieldCardinality {
|
||||
SingleValue(Arc<dyn OptionalColumn<Ipv6Addr>>),
|
||||
SingleValue(Arc<dyn Column<Ipv6Addr>>),
|
||||
MultiValue(MultiValuedU128FastFieldReader<Ipv6Addr>),
|
||||
}
|
||||
|
||||
@@ -269,9 +268,9 @@ impl DocSet for IpRangeDocSet {
|
||||
#[inline]
|
||||
fn advance(&mut self) -> DocId {
|
||||
if let Some(docid) = self.loaded_docs.next() {
|
||||
docid as u32
|
||||
docid
|
||||
} else {
|
||||
if self.next_fetch_start >= self.ip_addr_fast_field.num_docs() as u32 {
|
||||
if self.next_fetch_start >= self.ip_addr_fast_field.num_docs() {
|
||||
return TERMINATED;
|
||||
}
|
||||
self.fetch_block();
|
||||
@@ -283,7 +282,6 @@ impl DocSet for IpRangeDocSet {
|
||||
fn doc(&self) -> DocId {
|
||||
self.loaded_docs
|
||||
.current()
|
||||
.map(|el| el as u32)
|
||||
.unwrap_or(TERMINATED)
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ fn refill<TScorer: Scorer, TScoreCombiner: ScoreCombiner>(
|
||||
min_doc: DocId,
|
||||
) {
|
||||
unordered_drain_filter(scorers, |scorer| {
|
||||
let horizon = min_doc + HORIZON as u32;
|
||||
let horizon = min_doc + HORIZON;
|
||||
loop {
|
||||
let doc = scorer.doc();
|
||||
if doc >= horizon {
|
||||
|
||||
@@ -236,7 +236,7 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let date_options_json = serde_json::to_value(&date_options).unwrap();
|
||||
let date_options_json = serde_json::to_value(date_options).unwrap();
|
||||
assert_eq!(
|
||||
date_options_json,
|
||||
serde_json::json!({
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user