Compare commits

..

2 Commits

Author SHA1 Message Date
Paul Masurel
c69661f0f0 Removing iterators. 2022-09-21 23:26:01 +09:00
Paul Masurel
85ebb3c420 Introducing ColumnReader.
Introducing a ColumnReader trait and .reader() to Column,
hence removing the dreaded Mutex in the `MultiValueStartIndex`
thingy.
2022-09-21 12:47:44 +09:00
241 changed files with 3090 additions and 14651 deletions

1
.gitattributes vendored Normal file
View File

@@ -0,0 +1 @@
cpp/* linguist-vendored

View File

@@ -48,7 +48,7 @@ jobs:
strategy:
matrix:
features: [
{ label: "all", flags: "mmap,stopwords,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
{ label: "all", flags: "mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
]

1
.gitignore vendored
View File

@@ -9,6 +9,7 @@ target/release
Cargo.lock
benchmark
.DS_Store
cpp/simdcomp/bitpackingbenchmark
*.bk
.idea
trace.dat

View File

@@ -1,37 +1,10 @@
Tantivy 0.19
================================
#### Bugfixes
- Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
- Fix interpolation overflow in linear interpolation fastfield codec [#1480](https://github.com/quickwit-oss/tantivy/pull/1480) (@PSeitz @fulmicoton)
#### Features/Improvements
- Add support for `IN` in queryparser , e.g. `field: IN [val1 val2 val3]` [#1683](https://github.com/quickwit-oss/tantivy/pull/1683) (@trinity-1686a)
- Skip score calculation, when no scoring is required [#1646](https://github.com/quickwit-oss/tantivy/pull/1646) (@PSeitz)
- Limit fast fields to u32 (`get_val(u32)`) [#1644](https://github.com/quickwit-oss/tantivy/pull/1644) (@PSeitz)
- The `DateTime` type has been updated to hold timestamps with microseconds precision.
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing) [#1396](https://github.com/quickwit-oss/tantivy/pull/1396) (@evanxg852000)
- Add IP address field type [#1553](https://github.com/quickwit-oss/tantivy/pull/1553) (@PSeitz)
- Add boolean field type [#1382](https://github.com/quickwit-oss/tantivy/pull/1382) (@boraarslan)
- Remove Searcher pool and make `Searcher` cloneable. (@PSeitz)
- Validate settings on create [#1570](https://github.com/quickwit-oss/tantivy/pull/1570) (@PSeitz)
- Detect and apply gcd on fastfield codecs [#1418](https://github.com/quickwit-oss/tantivy/pull/1418) (@PSeitz)
- Doc store
- use separate thread to compress block store [#1389](https://github.com/quickwit-oss/tantivy/pull/1389) [#1510](https://github.com/quickwit-oss/tantivy/pull/1510) (@PSeitz @fulmicoton)
- Expose doc store cache size [#1403](https://github.com/quickwit-oss/tantivy/pull/1403) (@PSeitz)
- Enable compression levels for doc store [#1378](https://github.com/quickwit-oss/tantivy/pull/1378) (@PSeitz)
- Make block size configurable [#1374](https://github.com/quickwit-oss/tantivy/pull/1374) (@kryesh)
- Make `tantivy::TantivyError` cloneable [#1402](https://github.com/quickwit-oss/tantivy/pull/1402) (@PSeitz)
- Add support for phrase slop in query language [#1393](https://github.com/quickwit-oss/tantivy/pull/1393) (@saroh)
- Aggregation
- Add aggregation support for date type [#1693](https://github.com/quickwit-oss/tantivy/pull/1693)(@PSeitz)
- Add support for keyed parameter in range and histgram aggregations [#1424](https://github.com/quickwit-oss/tantivy/pull/1424) (@k-yomo)
- Add aggregation bucket limit [#1363](https://github.com/quickwit-oss/tantivy/pull/1363) (@PSeitz)
- Faster indexing
- [#1610](https://github.com/quickwit-oss/tantivy/pull/1610) (@PSeitz)
- [#1594](https://github.com/quickwit-oss/tantivy/pull/1594) (@PSeitz)
- [#1582](https://github.com/quickwit-oss/tantivy/pull/1582) (@PSeitz)
- [#1611](https://github.com/quickwit-oss/tantivy/pull/1611) (@PSeitz)
- Added a pre-configured stop word filter for various language [#1666](https://github.com/quickwit-oss/tantivy/pull/1666) (@adamreichold)
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
The `DateTime` type has been updated to hold timestamps with microseconds precision.
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing).
- Remove Searcher pool and make `Searcher` cloneable.
Tantivy 0.18
================================
@@ -49,10 +22,6 @@ Tantivy 0.18
- Add terms aggregation (@PSeitz)
- Add support for zstd compression (@kryesh)
Tantivy 0.18.1
================================
- Hotfix: positions computation. #1629 (@fmassot, @fulmicoton, @PSeitz)
Tantivy 0.17
================================

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.19.0"
version = "0.18.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
@@ -11,21 +11,19 @@ repository = "https://github.com/quickwit-oss/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2021"
rust-version = "1.62"
[dependencies]
oneshot = "0.1.5"
base64 = "0.20.0"
oneshot = "0.1.3"
base64 = "0.13.0"
byteorder = "1.4.3"
crc32fast = "1.3.2"
once_cell = "1.10.0"
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
aho-corasick = "0.7"
tantivy-fst = "0.4.0"
tantivy-fst = "0.3.0"
memmap2 = { version = "0.5.3", optional = true }
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
brotli = { version = "3.3.4", optional = true }
zstd = { version = "0.12", optional = true, default-features = false }
zstd = { version = "0.11", optional = true }
snap = { version = "1.0.5", optional = true }
tempfile = { version = "3.3.0", optional = true }
log = "0.4.16"
@@ -36,11 +34,17 @@ fs2 = { version = "0.4.3", optional = true }
levenshtein_automata = "0.2.1"
uuid = { version = "1.0.0", features = ["v4", "serde"] }
crossbeam-channel = "0.5.4"
tantivy-query-grammar = { version="0.18.0", path="./query-grammar" }
tantivy-bitpacker = { version="0.2", path="./bitpacker" }
common = { version = "0.3", path = "./common/", package = "tantivy-common" }
fastfield_codecs = { version="0.2", path="./fastfield_codecs", default-features = false }
ownedbytes = { version="0.3", path="./ownedbytes" }
stable_deref_trait = "1.2.0"
rust-stemmers = "1.2.0"
downcast-rs = "1.2.0"
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
census = "0.4.0"
rustc-hash = "1.1.0"
fnv = "1.0.7"
thiserror = "1.0.30"
htmlescape = "0.3.1"
fail = "0.5.0"
@@ -52,16 +56,10 @@ lru = "0.7.5"
fastdivide = "0.4.0"
itertools = "0.10.3"
measure_time = "0.8.2"
serde_cbor = { version = "0.11.2", optional = true }
async-trait = "0.1.53"
arc-swap = "1.5.0"
sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true }
stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" }
tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" }
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
fastfield_codecs = { version= "0.3", path="./fastfield_codecs", default-features = false }
[target.'cfg(windows)'.dependencies]
winapi = "0.3.9"
@@ -71,10 +69,10 @@ maplit = "1.0.2"
matches = "0.1.9"
pretty_assertions = "1.2.1"
proptest = "1.0.0"
criterion = "0.4"
criterion = "0.3.5"
test-log = "0.2.10"
env_logger = "0.10.0"
pprof = { version = "0.11.0", features = ["flamegraph", "criterion"] }
env_logger = "0.9.0"
pprof = { version = "0.10.0", features = ["flamegraph", "criterion"] }
futures = "0.3.21"
[dev-dependencies.fail]
@@ -91,9 +89,8 @@ debug-assertions = true
overflow-checks = true
[features]
default = ["mmap", "stopwords", "lz4-compression"]
default = ["mmap", "lz4-compression" ]
mmap = ["fs2", "tempfile", "memmap2"]
stopwords = []
brotli-compression = ["brotli"]
lz4-compression = ["lz4_flex"]
@@ -103,10 +100,10 @@ zstd-compression = ["zstd"]
failpoints = ["fail/failpoints"]
unstable = [] # useful for benches.
quickwit = ["sstable"]
quickwit = ["serde_cbor"]
[workspace]
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes", "stacker", "sstable", "columnar"]
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
# Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points

View File

@@ -58,7 +58,7 @@ Distributed search is out of the scope of Tantivy, but if you are looking for th
# Getting started
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
@@ -81,13 +81,9 @@ There are many ways to support this project.
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
## Minimum supported Rust version
Tantivy currently requires at least Rust 1.62 or later to compile.
## Clone and build locally
Tantivy compiles on stable Rust.
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
To check out and run tests, you can simply run:
```bash

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy-bitpacker"
version = "0.3.0"
version = "0.2.0"
edition = "2021"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
@@ -8,8 +8,6 @@ categories = []
description = """Tantivy-sub crate: bitpacking"""
repository = "https://github.com/quickwit-oss/tantivy"
keywords = []
documentation = "https://docs.rs/tantivy-bitpacker/latest/tantivy_bitpacker"
homepage = "https://github.com/quickwit-oss/tantivy"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@@ -25,14 +25,15 @@ impl BitPacker {
num_bits: u8,
output: &mut TWrite,
) -> io::Result<()> {
let val_u64 = val as u64;
let num_bits = num_bits as usize;
if self.mini_buffer_written + num_bits > 64 {
self.mini_buffer |= val.wrapping_shl(self.mini_buffer_written as u32);
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
self.mini_buffer = val.wrapping_shr((64 - self.mini_buffer_written) as u32);
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
self.mini_buffer_written = self.mini_buffer_written + num_bits - 64;
} else {
self.mini_buffer |= val << self.mini_buffer_written;
self.mini_buffer |= val_u64 << self.mini_buffer_written;
self.mini_buffer_written += num_bits;
if self.mini_buffer_written == 64 {
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
@@ -86,20 +87,22 @@ impl BitUnpacker {
}
#[inline]
pub fn get(&self, idx: u32, data: &[u8]) -> u64 {
pub fn get(&self, idx: u64, data: &[u8]) -> u64 {
if self.num_bits == 0 {
return 0u64;
}
let addr_in_bits = idx * self.num_bits as u32;
let addr = (addr_in_bits >> 3) as usize;
let addr_in_bits = idx * self.num_bits;
let addr = addr_in_bits >> 3;
let bit_shift = addr_in_bits & 7;
debug_assert!(
addr + 8 <= data.len(),
addr + 8 <= data.len() as u64,
"The fast field field should have been padded with 7 bytes."
);
let bytes: [u8; 8] = (&data[addr..addr + 8]).try_into().unwrap();
let bytes: [u8; 8] = (&data[(addr as usize)..(addr as usize) + 8])
.try_into()
.unwrap();
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
let val_shifted = val_unshifted_unmasked >> bit_shift;
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
val_shifted & self.mask
}
}
@@ -127,7 +130,7 @@ mod test {
fn test_bitpacker_util(len: usize, num_bits: u8) {
let (bitunpacker, vals, data) = create_fastfield_bitpacker(len, num_bits);
for (i, val) in vals.iter().enumerate() {
assert_eq!(bitunpacker.get(i as u32, &data), *val);
assert_eq!(bitunpacker.get(i as u64, &data), *val);
}
}

View File

@@ -84,7 +84,7 @@ impl BlockedBitpacker {
#[inline]
pub fn add(&mut self, val: u64) {
self.buffer.push(val);
if self.buffer.len() == BLOCK_SIZE {
if self.buffer.len() == BLOCK_SIZE as usize {
self.flush();
}
}
@@ -126,11 +126,11 @@ impl BlockedBitpacker {
}
#[inline]
pub fn get(&self, idx: usize) -> u64 {
let metadata_pos = idx / BLOCK_SIZE;
let pos_in_block = idx % BLOCK_SIZE;
let metadata_pos = idx / BLOCK_SIZE as usize;
let pos_in_block = idx % BLOCK_SIZE as usize;
if let Some(metadata) = self.offset_and_bits.get(metadata_pos) {
let unpacked = BitUnpacker::new(metadata.num_bits()).get(
pos_in_block as u32,
pos_in_block as u64,
&self.compressed_blocks[metadata.offset() as usize..],
);
unpacked + metadata.base_value()

View File

@@ -1,8 +1,6 @@
mod bitpacker;
mod blocked_bitpacker;
use std::cmp::Ordering;
pub use crate::bitpacker::{BitPacker, BitUnpacker};
pub use crate::blocked_bitpacker::BlockedBitpacker;
@@ -39,104 +37,44 @@ pub fn compute_num_bits(n: u64) -> u8 {
}
}
/// Computes the (min, max) of an iterator of `PartialOrd` values.
///
/// For values implementing `Ord` (in a way consistent to their `PartialOrd` impl),
/// this function behaves as expected.
///
/// For values with partial ordering, the behavior is non-trivial and may
/// depends on the order of the values.
/// For floats however, it simply returns the same results as if NaN were
/// skipped.
pub fn minmax<I, T>(mut vals: I) -> Option<(T, T)>
where
I: Iterator<Item = T>,
T: Copy + PartialOrd,
T: Copy + Ord,
{
let first_el = vals.find(|val| {
// We use this to make sure we skip all NaN values when
// working with a float type.
val.partial_cmp(val) == Some(Ordering::Equal)
})?;
let mut min_so_far: T = first_el;
let mut max_so_far: T = first_el;
for val in vals {
if val.partial_cmp(&min_so_far) == Some(Ordering::Less) {
min_so_far = val;
}
if val.partial_cmp(&max_so_far) == Some(Ordering::Greater) {
max_so_far = val;
}
if let Some(first_el) = vals.next() {
return Some(vals.fold((first_el, first_el), |(min_val, max_val), el| {
(min_val.min(el), max_val.max(el))
}));
}
Some((min_so_far, max_so_far))
None
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_compute_num_bits() {
assert_eq!(compute_num_bits(1), 1u8);
assert_eq!(compute_num_bits(0), 0u8);
assert_eq!(compute_num_bits(2), 2u8);
assert_eq!(compute_num_bits(3), 2u8);
assert_eq!(compute_num_bits(4), 3u8);
assert_eq!(compute_num_bits(255), 8u8);
assert_eq!(compute_num_bits(256), 9u8);
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
}
#[test]
fn test_minmax_empty() {
let vals: Vec<u32> = vec![];
assert_eq!(minmax(vals.into_iter()), None);
}
#[test]
fn test_minmax_one() {
assert_eq!(minmax(vec![1].into_iter()), Some((1, 1)));
}
#[test]
fn test_minmax_two() {
assert_eq!(minmax(vec![1, 2].into_iter()), Some((1, 2)));
assert_eq!(minmax(vec![2, 1].into_iter()), Some((1, 2)));
}
#[test]
fn test_minmax_nan() {
assert_eq!(
minmax(vec![f64::NAN, 1f64, 2f64].into_iter()),
Some((1f64, 2f64))
);
assert_eq!(
minmax(vec![2f64, f64::NAN, 1f64].into_iter()),
Some((1f64, 2f64))
);
assert_eq!(
minmax(vec![2f64, 1f64, f64::NAN].into_iter()),
Some((1f64, 2f64))
);
}
#[test]
fn test_minmax_inf() {
assert_eq!(
minmax(vec![f64::INFINITY, 1f64, 2f64].into_iter()),
Some((1f64, f64::INFINITY))
);
assert_eq!(
minmax(vec![-f64::INFINITY, 1f64, 2f64].into_iter()),
Some((-f64::INFINITY, 2f64))
);
assert_eq!(
minmax(vec![2f64, f64::INFINITY, 1f64].into_iter()),
Some((1f64, f64::INFINITY))
);
assert_eq!(
minmax(vec![2f64, 1f64, -f64::INFINITY].into_iter()),
Some((-f64::INFINITY, 2f64))
);
}
#[test]
fn test_compute_num_bits() {
assert_eq!(compute_num_bits(1), 1u8);
assert_eq!(compute_num_bits(0), 0u8);
assert_eq!(compute_num_bits(2), 2u8);
assert_eq!(compute_num_bits(3), 2u8);
assert_eq!(compute_num_bits(4), 3u8);
assert_eq!(compute_num_bits(255), 8u8);
assert_eq!(compute_num_bits(256), 9u8);
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
}
#[test]
fn test_minmax_empty() {
let vals: Vec<u32> = vec![];
assert_eq!(minmax(vals.into_iter()), None);
}
#[test]
fn test_minmax_one() {
assert_eq!(minmax(vec![1].into_iter()), Some((1, 1)));
}
#[test]
fn test_minmax_two() {
assert_eq!(minmax(vec![1, 2].into_iter()), Some((1, 2)));
assert_eq!(minmax(vec![2, 1].into_iter()), Some((1, 2)));
}

View File

@@ -1,18 +0,0 @@
[package]
name = "tantivy-columnar"
version = "0.1.0"
edition = "2021"
license = "MIT"
[dependencies]
stacker = { path = "../stacker", package="tantivy-stacker"}
serde_json = "1"
thiserror = "1"
fnv = "1"
sstable = { path = "../sstable", package = "tantivy-sstable" }
common = { path = "../common", package = "tantivy-common" }
fastfield_codecs = { path = "../fastfield_codecs"}
itertools = "0.10"
[dev-dependencies]
proptest = "1"

View File

@@ -1,67 +0,0 @@
# Columnar format
This crate describes columnar format used in tantivy.
## Goals
This format is special in the following way.
- it needs to be compact
- it does not required to be loaded in memory.
- it is designed to fit well with quickwit's strange constraint:
we need to be able to load columns rapidly.
- columns of several types can be associated with the same column name.
- it needs to support columns with different types `(str, u64, i64, f64)`
and different cardinality `(required, optional, multivalued)`.
- columns, once loaded, offer cheap random access.
# Coercion rules
Users can create a columnar by inserting rows to a `ColumnarWriter`,
and serializing it into a `Write` object.
Nothing prevents a user from recording values with different type to the same `column_name`.
In that case, `tantivy-columnar`'s behavior is as follows:
- JsonValues are grouped into 3 types (String, Number, bool).
Values that corresponds to different groups are mapped to different columns. For instance, String values are treated independently
from Number or boolean values. `tantivy-columnar` will simply emit several columns associated to a given column_name.
- Only one column for a given json value type is emitted. If number values with different number types are recorded (e.g. u64, i64, f64),
`tantivy-columnar` will pick the first type that can represents the set of appended value, with the following prioriy order (`i64`, `u64`, `f64`).
`i64` is picked over `u64` as it is likely to yield less change of types. Most use cases strictly requiring `u64` show the
restriction on 50% of the values (e.g. a 64-bit hash). On the other hand, a lot of use cases can show rare negative value.
# Columnar format
This columnar format may have more than one column (with different types) associated to the same `column_name` (see [Coercion rules](#coercion-rules) above).
The `(column_name, columne_type)` couple however uniquely identifies a column.
That couple is serialized as a column `column_key`. The format of that key is:
`[column_name][ZERO_BYTE][column_type_header: u8]`
```
COLUMNAR:=
[COLUMNAR_DATA]
[COLUMNAR_KEY_TO_DATA_INDEX]
[COLUMNAR_FOOTER];
# Columns are sorted by their column key.
COLUMNAR_DATA:=
[COLUMN_DATA]+;
COLUMNAR_FOOTER := [RANGE_SSTABLE_BYTES_LEN: 8 bytes little endian]
```
The columnar file starts by the actual column data, concatenated one after the other,
sorted by column key.
A sstable associates
`(column names, column_cardinality, column_type) to range of bytes.
Column name may not contain the zero byte `\0`.
Listing all columns associated to `column_name` can therefore
be done by listing all keys prefixed by
`[column_name][ZERO_BYTE]`
The associated range of bytes refer to a range of bytes

View File

@@ -1,200 +0,0 @@
use crate::utils::{place_bits, select_bits};
use crate::value::NumericalType;
use crate::InvalidData;
/// Enum describing the number of values that can exist per document
/// (or per row if you will).
///
/// The cardinality must fit on 2 bits.
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[repr(u8)]
pub enum Cardinality {
/// All documents contain exactly one value.
#[default]
Required = 0,
/// All documents contain at most one value.
Optional = 1,
/// All documents may contain any number of values.
Multivalued = 2,
}
impl Cardinality {
pub(crate) fn to_code(self) -> u8 {
self as u8
}
pub(crate) fn try_from_code(code: u8) -> Result<Cardinality, InvalidData> {
match code {
0 => Ok(Cardinality::Required),
1 => Ok(Cardinality::Optional),
2 => Ok(Cardinality::Multivalued),
_ => Err(InvalidData),
}
}
}
/// The column type represents the column type and can fit on 6-bits.
///
/// - bits[0..3]: Column category type.
/// - bits[3..6]: Numerical type if necessary.
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)]
pub enum ColumnType {
Bytes,
Numerical(NumericalType),
Bool,
}
impl ColumnType {
/// Encoded over 6 bits.
pub(crate) fn to_code(self) -> u8 {
let column_type_category;
let numerical_type_code: u8;
match self {
ColumnType::Bytes => {
column_type_category = ColumnTypeCategory::Str;
numerical_type_code = 0u8;
}
ColumnType::Numerical(numerical_type) => {
column_type_category = ColumnTypeCategory::Numerical;
numerical_type_code = numerical_type.to_code();
}
ColumnType::Bool => {
column_type_category = ColumnTypeCategory::Bool;
numerical_type_code = 0u8;
}
}
place_bits::<0, 3>(column_type_category.to_code()) | place_bits::<3, 6>(numerical_type_code)
}
pub(crate) fn try_from_code(code: u8) -> Result<ColumnType, InvalidData> {
if select_bits::<6, 8>(code) != 0u8 {
return Err(InvalidData);
}
let column_type_category_code = select_bits::<0, 3>(code);
let numerical_type_code = select_bits::<3, 6>(code);
let column_type_category = ColumnTypeCategory::try_from_code(column_type_category_code)?;
match column_type_category {
ColumnTypeCategory::Bool => {
if numerical_type_code != 0u8 {
return Err(InvalidData);
}
Ok(ColumnType::Bool)
}
ColumnTypeCategory::Str => {
if numerical_type_code != 0u8 {
return Err(InvalidData);
}
Ok(ColumnType::Bytes)
}
ColumnTypeCategory::Numerical => {
let numerical_type = NumericalType::try_from_code(numerical_type_code)?;
Ok(ColumnType::Numerical(numerical_type))
}
}
}
}
/// Column types are grouped into different categories that
/// corresponds to the different types of `JsonValue` types.
///
/// The columnar writer will apply coercion rules to make sure that
/// at most one column exist per `ColumnTypeCategory`.
///
/// See also [README.md].
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
#[repr(u8)]
pub(crate) enum ColumnTypeCategory {
Bool = 0u8,
Str = 1u8,
Numerical = 2u8,
}
impl ColumnTypeCategory {
pub fn to_code(self) -> u8 {
self as u8
}
pub fn try_from_code(code: u8) -> Result<Self, InvalidData> {
match code {
0u8 => Ok(Self::Bool),
1u8 => Ok(Self::Str),
2u8 => Ok(Self::Numerical),
_ => Err(InvalidData),
}
}
}
/// Represents the type and cardinality of a column.
/// This is encoded over one-byte and added to a column key in the
/// columnar sstable.
///
/// - [0..6] bits: encodes the column type
/// - [6..8] bits: encodes the cardinality
#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone)]
pub struct ColumnTypeAndCardinality {
pub typ: ColumnType,
pub cardinality: Cardinality,
}
impl ColumnTypeAndCardinality {
pub fn to_code(self) -> u8 {
place_bits::<6, 8>(self.cardinality.to_code()) | place_bits::<0, 6>(self.typ.to_code())
}
pub fn try_from_code(code: u8) -> Result<ColumnTypeAndCardinality, InvalidData> {
let typ_code = select_bits::<0, 6>(code);
let cardinality_code = select_bits::<6, 8>(code);
let cardinality = Cardinality::try_from_code(cardinality_code)?;
let typ = ColumnType::try_from_code(typ_code)?;
assert_eq!(typ.to_code(), typ_code);
Ok(ColumnTypeAndCardinality { cardinality, typ })
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::ColumnTypeAndCardinality;
use crate::column_type_header::{Cardinality, ColumnType};
#[test]
fn test_column_type_header_to_code() {
let mut column_type_header_set: HashSet<ColumnTypeAndCardinality> = HashSet::new();
for code in u8::MIN..=u8::MAX {
if let Ok(column_type_header) = ColumnTypeAndCardinality::try_from_code(code) {
assert_eq!(column_type_header.to_code(), code);
assert!(column_type_header_set.insert(column_type_header));
}
}
assert_eq!(
column_type_header_set.len(),
3 /* cardinality */ *
(1 + 1 + 3) // column_types (str, bool, numerical x 3)
);
}
#[test]
fn test_column_type_to_code() {
let mut column_type_set: HashSet<ColumnType> = HashSet::new();
for code in u8::MIN..=u8::MAX {
if let Ok(column_type) = ColumnType::try_from_code(code) {
assert_eq!(column_type.to_code(), code);
assert!(column_type_set.insert(column_type));
}
}
assert_eq!(column_type_set.len(), 2 + 3);
}
#[test]
fn test_cardinality_to_code() {
let mut num_cardinality = 0;
for code in u8::MIN..=u8::MAX {
if let Ok(cardinality) = Cardinality::try_from_code(code) {
assert_eq!(cardinality.to_code(), code);
num_cardinality += 1;
}
}
assert_eq!(num_cardinality, 3);
}
}

View File

@@ -1,84 +0,0 @@
use std::io;
use fnv::FnvHashMap;
use sstable::SSTable;
pub(crate) struct TermIdMapping {
unordered_to_ord: Vec<OrderedId>,
}
impl TermIdMapping {
pub fn to_ord(&self, unordered: UnorderedId) -> OrderedId {
self.unordered_to_ord[unordered.0 as usize]
}
}
/// When we add values, we cannot know their ordered id yet.
/// For this reason, we temporarily assign them a `UnorderedId`
/// that will be mapped to an `OrderedId` upon serialization.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct UnorderedId(pub u32);
#[derive(Clone, Copy, Hash, PartialEq, Eq, Debug)]
pub struct OrderedId(pub u32);
/// `DictionaryBuilder` for dictionary encoding.
///
/// It stores the different terms encounterred and assigns them a temporary value
/// we call unordered id.
///
/// Upon serialization, we will sort the ids and hence build a `UnorderedId -> Term ordinal`
/// mapping.
#[derive(Default)]
pub(crate) struct DictionaryBuilder {
dict: FnvHashMap<Vec<u8>, UnorderedId>,
}
impl DictionaryBuilder {
/// Get or allocate an unordered id.
/// (This ID is simply an auto-incremented id.)
pub fn get_or_allocate_id(&mut self, term: &[u8]) -> UnorderedId {
if let Some(term_id) = self.dict.get(term) {
return *term_id;
}
let new_id = UnorderedId(self.dict.len() as u32);
self.dict.insert(term.to_vec(), new_id);
new_id
}
/// Serialize the dictionary into an fst, and returns the
/// `UnorderedId -> TermOrdinal` map.
pub fn serialize<'a, W: io::Write + 'a>(&self, wrt: &mut W) -> io::Result<TermIdMapping> {
let mut terms: Vec<(&[u8], UnorderedId)> =
self.dict.iter().map(|(k, v)| (k.as_slice(), *v)).collect();
terms.sort_unstable_by_key(|(key, _)| *key);
// TODO Remove the allocation.
let mut unordered_to_ord: Vec<OrderedId> = vec![OrderedId(0u32); terms.len()];
let mut sstable_builder = sstable::VoidSSTable::writer(wrt);
for (ord, (key, unordered_id)) in terms.into_iter().enumerate() {
let ordered_id = OrderedId(ord as u32);
sstable_builder.insert(key, &())?;
unordered_to_ord[unordered_id.0 as usize] = ordered_id;
}
sstable_builder.finish()?;
Ok(TermIdMapping { unordered_to_ord })
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_dictionary_builder() {
let mut dictionary_builder = DictionaryBuilder::default();
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello");
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy");
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax");
let mut buffer = Vec::new();
let id_mapping = dictionary_builder.serialize(&mut buffer).unwrap();
assert_eq!(id_mapping.to_ord(hello_uid), OrderedId(1));
assert_eq!(id_mapping.to_ord(happy_uid), OrderedId(0));
assert_eq!(id_mapping.to_ord(tax_uid), OrderedId(2));
}
}

View File

@@ -1,91 +0,0 @@
mod column_type_header;
mod dictionary;
mod reader;
pub(crate) mod utils;
mod value;
mod writer;
pub use column_type_header::Cardinality;
pub use reader::ColumnarReader;
pub use value::{NumericalType, NumericalValue};
pub use writer::ColumnarWriter;
pub use reader::ColumnHandle;
pub type DocId = u32;
#[derive(Copy, Clone, Debug)]
pub struct InvalidData;
#[cfg(test)]
mod tests {
use common::file_slice::FileSlice;
use crate::column_type_header::ColumnType;
use crate::reader::{ColumnarReader, ColumnHandle};
use crate::value::NumericalValue;
use crate::{Cardinality, ColumnarWriter};
#[test]
fn test_dataframe_writer_bytes() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_str(1u32, "my_string", "hello");
dataframe_writer.record_str(3u32, "my_string", "helloeee");
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar_fileslice = FileSlice::from(buffer);
let columnar = ColumnarReader::open(columnar_fileslice).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<ColumnHandle> =
columnar.read_columns("my_string").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 158);
}
#[test]
fn test_dataframe_writer_bool() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_bool(1u32, "bool.value", false);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar_fileslice = FileSlice::from(buffer);
let columnar = ColumnarReader::open(columnar_fileslice).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<ColumnHandle> =
columnar.read_columns("bool.value").unwrap();
assert_eq!(cols.len(), 1);
let col = cols.into_iter().next().unwrap();
assert_eq!(
col.column_type(),
ColumnType::Bool
);
assert_eq!(
col.cardinality(),
Cardinality::Optional);
assert_eq!(
col.column_name(),
"bool.value"
);
}
#[test]
fn test_dataframe_writer_numerical() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(1u32, "srical.value", NumericalValue::U64(12u64));
dataframe_writer.record_numerical(2u32, "srical.value", NumericalValue::U64(13u64));
dataframe_writer.record_numerical(4u32, "srical.value", NumericalValue::U64(15u64));
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar_fileslice = FileSlice::from(buffer);
let columnar = ColumnarReader::open(columnar_fileslice).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<ColumnHandle> =
columnar.read_columns("srical.value").unwrap();
assert_eq!(cols.len(), 1);
// Right now this 31 bytes are spent as follows
//
// - header 14 bytes
// - vals 8 //< due to padding? could have been 1byte?.
// - null footer 6 bytes
// - version footer 3 bytes // Should be file-wide
assert_eq!(cols[0].num_bytes(), 31);
}
}

View File

@@ -1,42 +0,0 @@
use common::HasLen;
use common::file_slice::FileSlice;
use crate::Cardinality;
use crate::column_type_header::ColumnType;
pub struct ColumnHandle {
column_name: String, //< Mostly for debug and display.
data: FileSlice,
column_type: ColumnType,
cardinality: Cardinality,
}
impl ColumnHandle {
pub fn new(column_name: String, data: FileSlice, column_type: ColumnType, cardinality: Cardinality) -> Self {
ColumnHandle {
column_name,
data,
column_type,
cardinality,
}
}
pub fn column_name(&self) -> &str {
self.column_name.as_str()
}
pub fn num_bytes(&self) -> usize {
self.data.len()
}
pub fn column_type(&self) -> ColumnType {
self.column_type
}
pub fn cardinality(&self) -> Cardinality {
self.cardinality
}
}

View File

@@ -1,116 +0,0 @@
mod column_handle;
use std::ops::Range;
use std::{io, mem};
use common::file_slice::FileSlice;
use common::BinarySerializable;
use sstable::{Dictionary, RangeSSTable};
use crate::column_type_header::ColumnTypeAndCardinality;
pub use crate::reader::column_handle::ColumnHandle;
fn io_invalid_data(msg: String) -> io::Error {
io::Error::new(io::ErrorKind::InvalidData, msg)
// {key_bytes:?}")));
}
/// The ColumnarReader makes it possible to access a set of columns
/// associated to field names.
pub struct ColumnarReader {
column_dictionary: Dictionary<RangeSSTable>,
column_data: FileSlice,
}
impl ColumnarReader {
/// Opens a new Columnar file.
pub fn open<F>(file_slice: F) -> io::Result<ColumnarReader>
where FileSlice: From<F> {
Self::open_inner(file_slice.into())
}
fn open_inner(file_slice: FileSlice) -> io::Result<ColumnarReader> {
let (file_slice_without_sstable_len, sstable_len_bytes) =
file_slice.split_from_end(mem::size_of::<u64>());
let mut sstable_len_bytes = sstable_len_bytes.read_bytes()?;
let sstable_len = u64::deserialize(&mut sstable_len_bytes)?;
let (column_data, sstable) =
file_slice_without_sstable_len.split_from_end(sstable_len as usize);
let column_dictionary = Dictionary::open(sstable)?;
Ok(ColumnarReader {
column_dictionary,
column_data,
})
}
// TODO fix ugly API
pub fn list_columns(
&self,
) -> io::Result<Vec<(String, ColumnTypeAndCardinality, Range<u64>, u64)>> {
let mut stream = self.column_dictionary.stream()?;
let mut results = Vec::new();
while stream.advance() {
let key_bytes: &[u8] = stream.key();
let column_code: u8 = key_bytes.last().cloned().unwrap();
let column_type_and_cardinality = ColumnTypeAndCardinality::try_from_code(column_code)
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
let range = stream.value().clone();
let column_name = String::from_utf8_lossy(&key_bytes[..key_bytes.len() - 1]);
let range_len = range.end - range.start;
results.push((
column_name.to_string(),
column_type_and_cardinality,
range,
range_len,
));
}
Ok(results)
}
/// Get all columns for the given column name.
///
/// There can be more than one column associated to a given column name, provided they have
/// different types.
// TODO fix ugly API
pub fn read_columns(
&self,
column_name: &str,
) -> io::Result<Vec<ColumnHandle>> {
// Each column is a associated to a given `column_key`,
// that starts by `column_name\0column_header`.
//
// Listing the columns associate to the given column name is therefore equivalent to listing
// `column_key` with the prefix `column_name\0`.
//
// This is in turn equivalent to searching for the range
// `[column_name,\0`..column_name\1)`.
let mut start_key = column_name.to_string();
start_key.push('\0');
let mut end_key = column_name.to_string();
end_key.push(1u8 as char);
let mut stream = self
.column_dictionary
.range()
.ge(start_key.as_bytes())
.lt(end_key.as_bytes())
.into_stream()?;
let mut results: Vec<ColumnHandle> = Vec::new();
while stream.advance() {
let key_bytes: &[u8] = stream.key();
assert!(key_bytes.starts_with(start_key.as_bytes()));
let column_code: u8 = key_bytes.last().cloned().unwrap();
let column_type_and_cardinality = ColumnTypeAndCardinality::try_from_code(column_code)
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
let Range { start, end } = stream.value().clone();
let column_data = self.column_data.slice(start as usize..end as usize);
let column_handle = ColumnHandle::new(column_name.to_string(), column_data, column_type_and_cardinality.typ, column_type_and_cardinality.cardinality);
results.push(column_handle);
}
Ok(results)
}
/// Return the number of columns in the columnar.
pub fn num_columns(&self) -> usize {
self.column_dictionary.num_terms()
}
}

View File

@@ -1,76 +0,0 @@
const fn compute_mask(num_bits: u8) -> u8 {
if num_bits == 8 {
u8::MAX
} else {
(1u8 << num_bits) - 1
}
}
#[inline(always)]
#[must_use]
pub(crate) fn select_bits<const START: u8, const END: u8>(code: u8) -> u8 {
assert!(START <= END);
assert!(END <= 8);
let num_bits: u8 = END - START;
let mask: u8 = compute_mask(num_bits);
(code >> START) & mask
}
#[inline(always)]
#[must_use]
pub(crate) fn place_bits<const START: u8, const END: u8>(code: u8) -> u8 {
assert!(START <= END);
assert!(END <= 8);
let num_bits: u8 = END - START;
let mask: u8 = compute_mask(num_bits);
assert!(code <= mask);
code << START
}
/// Pop-front one bytes from a slice of bytes.
#[inline(always)]
pub fn pop_first_byte(bytes: &mut &[u8]) -> Option<u8> {
if bytes.is_empty() {
return None;
}
let first_byte = bytes[0];
*bytes = &bytes[1..];
Some(first_byte)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_select_bits() {
assert_eq!(255u8, select_bits::<0, 8>(255u8));
assert_eq!(0u8, select_bits::<0, 0>(255u8));
assert_eq!(8u8, select_bits::<0, 4>(8u8));
assert_eq!(4u8, select_bits::<1, 4>(8u8));
assert_eq!(0u8, select_bits::<1, 3>(8u8));
}
#[test]
fn test_place_bits() {
assert_eq!(255u8, place_bits::<0, 8>(255u8));
assert_eq!(4u8, place_bits::<2, 3>(1u8));
assert_eq!(0u8, place_bits::<2, 2>(0u8));
}
#[test]
#[should_panic]
fn test_place_bits_overflows() {
let _ = place_bits::<1, 4>(8u8);
}
#[test]
fn test_pop_first_byte() {
let mut cursor: &[u8] = &b"abcd"[..];
assert_eq!(pop_first_byte(&mut cursor), Some(b'a'));
assert_eq!(pop_first_byte(&mut cursor), Some(b'b'));
assert_eq!(pop_first_byte(&mut cursor), Some(b'c'));
assert_eq!(pop_first_byte(&mut cursor), Some(b'd'));
assert_eq!(pop_first_byte(&mut cursor), None);
}
}

View File

@@ -1,124 +0,0 @@
use crate::InvalidData;
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum NumericalValue {
I64(i64),
U64(u64),
F64(f64),
}
impl From<u64> for NumericalValue {
fn from(val: u64) -> NumericalValue {
NumericalValue::U64(val)
}
}
impl From<i64> for NumericalValue {
fn from(val: i64) -> Self {
NumericalValue::I64(val)
}
}
impl From<f64> for NumericalValue {
fn from(val: f64) -> Self {
NumericalValue::F64(val)
}
}
impl NumericalValue {
pub fn numerical_type(&self) -> NumericalType {
match self {
NumericalValue::F64(_) => NumericalType::F64,
NumericalValue::I64(_) => NumericalType::I64,
NumericalValue::U64(_) => NumericalType::U64,
}
}
}
impl Eq for NumericalValue {}
#[derive(Clone, Copy, Debug, Default, Hash, Eq, PartialEq)]
#[repr(u8)]
pub enum NumericalType {
#[default]
I64 = 0,
U64 = 1,
F64 = 2,
}
impl NumericalType {
pub fn to_code(self) -> u8 {
self as u8
}
pub fn try_from_code(code: u8) -> Result<NumericalType, InvalidData> {
match code {
0 => Ok(NumericalType::I64),
1 => Ok(NumericalType::U64),
2 => Ok(NumericalType::F64),
_ => Err(InvalidData),
}
}
}
/// We voluntarily avoid using `Into` here to keep this
/// implementation quirk as private as possible.
///
/// # Panics
/// This coercion trait actually panics if it is used
/// to convert a loose types to a stricter type.
///
/// The level is strictness is somewhat arbitrary.
/// - i64
/// - u64
/// - f64.
pub(crate) trait Coerce {
fn coerce(numerical_value: NumericalValue) -> Self;
}
impl Coerce for i64 {
fn coerce(value: NumericalValue) -> Self {
match value {
NumericalValue::I64(val) => val,
NumericalValue::U64(val) => val as i64,
NumericalValue::F64(_) => unreachable!(),
}
}
}
impl Coerce for u64 {
fn coerce(value: NumericalValue) -> Self {
match value {
NumericalValue::I64(val) => val as u64,
NumericalValue::U64(val) => val,
NumericalValue::F64(_) => unreachable!(),
}
}
}
impl Coerce for f64 {
fn coerce(value: NumericalValue) -> Self {
match value {
NumericalValue::I64(val) => val as f64,
NumericalValue::U64(val) => val as f64,
NumericalValue::F64(val) => val,
}
}
}
#[cfg(test)]
mod tests {
use super::NumericalType;
#[test]
fn test_numerical_type_code() {
let mut num_numerical_type = 0;
for code in u8::MIN..=u8::MAX {
if let Ok(numerical_type) = NumericalType::try_from_code(code) {
assert_eq!(numerical_type.to_code(), code);
num_numerical_type += 1;
}
}
assert_eq!(num_numerical_type, 3);
}
}

View File

@@ -1,311 +0,0 @@
use crate::dictionary::UnorderedId;
use crate::utils::{place_bits, pop_first_byte, select_bits};
use crate::value::NumericalValue;
use crate::{DocId, NumericalType};
/// When we build a columnar dataframe, we first just group
/// all mutations per column, and append them in append-only object.
///
/// We represents all of these operations as `ColumnOperation`.
#[derive(Eq, PartialEq, Debug, Clone, Copy)]
pub(crate) enum ColumnOperation<T> {
NewDoc(DocId),
Value(T),
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct ColumnOperationHeader {
typ_code: u8,
len: u8,
}
impl ColumnOperationHeader {
fn to_code(self) -> u8 {
place_bits::<0, 4>(self.len) | place_bits::<4, 8>(self.typ_code)
}
fn from_code(code: u8) -> Self {
let len = select_bits::<0, 4>(code);
let typ_code = select_bits::<4, 8>(code);
ColumnOperationHeader { typ_code, len }
}
}
const NEW_DOC_CODE: u8 = 0u8;
const NEW_VALUE_CODE: u8 = 1u8;
impl<V: SymbolValue> ColumnOperation<V> {
pub fn serialize(self) -> impl AsRef<[u8]> {
let mut minibuf = MiniBuffer::default();
let header = match self {
ColumnOperation::NewDoc(new_doc) => {
let symbol_len = new_doc.serialize(&mut minibuf.bytes[1..]);
ColumnOperationHeader {
typ_code: NEW_DOC_CODE,
len: symbol_len,
}
}
ColumnOperation::Value(val) => {
let symbol_len = val.serialize(&mut minibuf.bytes[1..]);
ColumnOperationHeader {
typ_code: NEW_VALUE_CODE,
len: symbol_len,
}
}
};
minibuf.bytes[0] = header.to_code();
minibuf.len = 1 + header.len;
minibuf
}
/// Deserialize a colummn operation.
/// Returns None if the buffer is empty.
///
/// Panics if the payload is invalid.
pub fn deserialize(bytes: &mut &[u8]) -> Option<Self> {
let header_byte = pop_first_byte(bytes)?;
let column_op_header = ColumnOperationHeader::from_code(header_byte);
let symbol_bytes: &[u8];
(symbol_bytes, *bytes) = bytes.split_at(column_op_header.len as usize);
match column_op_header.typ_code {
NEW_DOC_CODE => {
let new_doc = u32::deserialize(symbol_bytes);
Some(ColumnOperation::NewDoc(new_doc))
}
NEW_VALUE_CODE => {
let value = V::deserialize(symbol_bytes);
Some(ColumnOperation::Value(value))
}
_ => {
panic!("Unknown code {}", column_op_header.typ_code);
}
}
}
}
impl<T> From<T> for ColumnOperation<T> {
fn from(value: T) -> Self {
ColumnOperation::Value(value)
}
}
#[allow(clippy::from_over_into)]
pub(crate) trait SymbolValue: Clone + Copy {
fn serialize(self, buffer: &mut [u8]) -> u8;
// Reads the header type and the given bytes.
//
// `bytes` does not contain the header byte.
// This method should advance bytes by the number of bytes that were consumed.
fn deserialize(bytes: &[u8]) -> Self;
}
impl SymbolValue for bool {
fn serialize(self, buffer: &mut [u8]) -> u8 {
buffer[0] = if self { 1u8 } else { 0u8 };
1u8
}
fn deserialize(bytes: &[u8]) -> Self {
bytes[0] == 1u8
}
}
#[derive(Default)]
struct MiniBuffer {
pub bytes: [u8; 10],
pub len: u8,
}
impl AsRef<[u8]> for MiniBuffer {
fn as_ref(&self) -> &[u8] {
&self.bytes[..self.len as usize]
}
}
impl SymbolValue for NumericalValue {
fn deserialize(mut bytes: &[u8]) -> Self {
let type_code = pop_first_byte(&mut bytes).unwrap();
let symbol_type = NumericalType::try_from_code(type_code).unwrap();
let mut octet: [u8; 8] = [0u8; 8];
octet[..bytes.len()].copy_from_slice(bytes);
match symbol_type {
NumericalType::U64 => {
let val: u64 = u64::from_le_bytes(octet);
NumericalValue::U64(val)
}
NumericalType::I64 => {
let encoded: u64 = u64::from_le_bytes(octet);
let val: i64 = decode_zig_zag(encoded);
NumericalValue::I64(val)
}
NumericalType::F64 => {
debug_assert_eq!(bytes.len(), 8);
let val: f64 = f64::from_le_bytes(octet);
NumericalValue::F64(val)
}
}
}
fn serialize(self, output: &mut [u8]) -> u8 {
match self {
NumericalValue::F64(val) => {
output[0] = NumericalType::F64 as u8;
output[1..9].copy_from_slice(&val.to_le_bytes());
9u8
}
NumericalValue::U64(val) => {
let len = compute_num_bytes_for_u64(val) as u8;
output[0] = NumericalType::U64 as u8;
output[1..9].copy_from_slice(&val.to_le_bytes());
len + 1u8
}
NumericalValue::I64(val) => {
let zig_zag_encoded = encode_zig_zag(val);
let len = compute_num_bytes_for_u64(zig_zag_encoded) as u8;
output[0] = NumericalType::I64 as u8;
output[1..9].copy_from_slice(&zig_zag_encoded.to_le_bytes());
len + 1u8
}
}
}
}
impl SymbolValue for u32 {
fn serialize(self, output: &mut [u8]) -> u8 {
let len = compute_num_bytes_for_u64(self as u64);
output[0..4].copy_from_slice(&self.to_le_bytes());
len as u8
}
fn deserialize(bytes: &[u8]) -> Self {
let mut quartet: [u8; 4] = [0u8; 4];
quartet[..bytes.len()].copy_from_slice(bytes);
u32::from_le_bytes(quartet)
}
}
impl SymbolValue for UnorderedId {
fn serialize(self, output: &mut [u8]) -> u8 {
self.0.serialize(output)
}
fn deserialize(bytes: &[u8]) -> Self {
UnorderedId(u32::deserialize(bytes))
}
}
fn compute_num_bytes_for_u64(val: u64) -> usize {
let msb = (64u32 - val.leading_zeros()) as usize;
(msb + 7) / 8
}
fn encode_zig_zag(n: i64) -> u64 {
((n << 1) ^ (n >> 63)) as u64
}
fn decode_zig_zag(n: u64) -> i64 {
((n >> 1) as i64) ^ (-((n & 1) as i64))
}
#[cfg(test)]
mod tests {
use super::*;
#[track_caller]
fn test_zig_zag_aux(val: i64) {
let encoded = super::encode_zig_zag(val);
assert_eq!(decode_zig_zag(encoded), val);
if let Some(abs_val) = val.checked_abs() {
let abs_val = abs_val as u64;
assert!(encoded <= abs_val * 2);
}
}
#[test]
fn test_zig_zag() {
assert_eq!(encode_zig_zag(0i64), 0u64);
assert_eq!(encode_zig_zag(-1i64), 1u64);
assert_eq!(encode_zig_zag(1i64), 2u64);
test_zig_zag_aux(0i64);
test_zig_zag_aux(i64::MIN);
test_zig_zag_aux(i64::MAX);
}
use proptest::prelude::any;
use proptest::proptest;
proptest! {
#[test]
fn test_proptest_zig_zag(val in any::<i64>()) {
test_zig_zag_aux(val);
}
}
#[test]
fn test_header_byte_serialization() {
for len in 0..=15 {
for typ_code in 0..=15 {
let header = ColumnOperationHeader { typ_code, len };
let header_code = header.to_code();
let serdeser_header = ColumnOperationHeader::from_code(header_code);
assert_eq!(header, serdeser_header);
}
}
}
#[track_caller]
fn ser_deser_symbol(column_op: ColumnOperation<NumericalValue>) {
let buf = column_op.serialize();
let mut buffer = buf.as_ref().to_vec();
buffer.extend_from_slice(b"234234");
let mut bytes = &buffer[..];
let serdeser_symbol = ColumnOperation::deserialize(&mut bytes).unwrap();
assert_eq!(bytes.len() + buf.as_ref().len() as usize, buffer.len());
assert_eq!(column_op, serdeser_symbol);
}
#[test]
fn test_compute_num_bytes_for_u64() {
assert_eq!(compute_num_bytes_for_u64(0), 0);
assert_eq!(compute_num_bytes_for_u64(1), 1);
assert_eq!(compute_num_bytes_for_u64(255), 1);
assert_eq!(compute_num_bytes_for_u64(256), 2);
assert_eq!(compute_num_bytes_for_u64((1 << 16) - 1), 2);
assert_eq!(compute_num_bytes_for_u64(1 << 16), 3);
}
#[test]
fn test_symbol_serialization() {
ser_deser_symbol(ColumnOperation::NewDoc(0));
ser_deser_symbol(ColumnOperation::NewDoc(3));
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(0i64)));
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(1i64)));
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(257u64)));
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(-257i64)));
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(i64::MIN)));
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(0u64)));
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(u64::MIN)));
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(u64::MAX)));
}
fn test_column_operation_unordered_aux(val: u32, expected_len: usize) {
let column_op = ColumnOperation::Value(UnorderedId(val));
let minibuf = column_op.serialize();
assert_eq!(minibuf.as_ref().len() as usize, expected_len);
let mut buf = minibuf.as_ref().to_vec();
buf.extend_from_slice(&[2, 2, 2, 2, 2, 2]);
let mut cursor = &buf[..];
let column_op_serdeser: ColumnOperation<UnorderedId> =
ColumnOperation::deserialize(&mut cursor).unwrap();
assert_eq!(column_op_serdeser, ColumnOperation::Value(UnorderedId(val)));
assert_eq!(cursor.len() + expected_len, buf.len());
}
#[test]
fn test_column_operation_unordered() {
test_column_operation_unordered_aux(300u32, 3);
test_column_operation_unordered_aux(1u32, 2);
test_column_operation_unordered_aux(0u32, 1);
}
}

View File

@@ -1,271 +0,0 @@
use std::cmp::Ordering;
use stacker::{ExpUnrolledLinkedList, MemoryArena};
use crate::dictionary::{DictionaryBuilder, UnorderedId};
use crate::writer::column_operation::{ColumnOperation, SymbolValue};
use crate::{Cardinality, DocId, NumericalType, NumericalValue};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[repr(u8)]
enum DocumentStep {
SameDoc = 0,
NextDoc = 1,
SkippedDoc = 2,
}
#[inline(always)]
fn delta_with_last_doc(last_doc_opt: Option<u32>, doc: u32) -> DocumentStep {
let expected_next_doc = last_doc_opt.map(|last_doc| last_doc + 1).unwrap_or(0u32);
match doc.cmp(&expected_next_doc) {
Ordering::Less => DocumentStep::SameDoc,
Ordering::Equal => DocumentStep::NextDoc,
Ordering::Greater => DocumentStep::SkippedDoc,
}
}
#[derive(Copy, Clone, Default)]
pub struct ColumnWriter {
// Detected cardinality of the column so far.
cardinality: Cardinality,
// Last document inserted.
// None if no doc has been added yet.
last_doc_opt: Option<u32>,
// Buffer containing the serialized values.
values: ExpUnrolledLinkedList,
}
impl ColumnWriter {
/// Returns an iterator over the Symbol that have been recorded
/// for the given column.
pub(crate) fn operation_iterator<'a, V: SymbolValue>(
&self,
arena: &MemoryArena,
buffer: &'a mut Vec<u8>,
) -> impl Iterator<Item = ColumnOperation<V>> + 'a {
buffer.clear();
self.values.read_to_end(arena, buffer);
let mut cursor: &[u8] = &buffer[..];
std::iter::from_fn(move || ColumnOperation::deserialize(&mut cursor))
}
/// Records a change of the document being recorded.
///
/// This function will also update the cardinality of the column
/// if necessary.
pub(crate) fn record<S: SymbolValue>(&mut self, doc: DocId, value: S, arena: &mut MemoryArena) {
// Difference between `doc` and the last doc.
match delta_with_last_doc(self.last_doc_opt, doc) {
DocumentStep::SameDoc => {
// This is the last encounterred document.
self.cardinality = Cardinality::Multivalued;
}
DocumentStep::NextDoc => {
self.last_doc_opt = Some(doc);
self.write_symbol::<S>(ColumnOperation::NewDoc(doc), arena);
}
DocumentStep::SkippedDoc => {
self.cardinality = self.cardinality.max(Cardinality::Optional);
self.last_doc_opt = Some(doc);
self.write_symbol::<S>(ColumnOperation::NewDoc(doc), arena);
}
}
self.write_symbol(ColumnOperation::Value(value), arena);
}
// Get the cardinality.
// The overall number of docs in the column is necessary to
// deal with the case where the all docs contain 1 value, except some documents
// at the end of the column.
pub(crate) fn get_cardinality(&self, num_docs: DocId) -> Cardinality {
match delta_with_last_doc(self.last_doc_opt, num_docs) {
DocumentStep::SameDoc | DocumentStep::NextDoc => self.cardinality,
DocumentStep::SkippedDoc => self.cardinality.max(Cardinality::Optional),
}
}
/// Appends a new symbol to the `ColumnWriter`.
fn write_symbol<V: SymbolValue>(
&mut self,
column_operation: ColumnOperation<V>,
arena: &mut MemoryArena,
) {
self.values
.writer(arena)
.extend_from_slice(column_operation.serialize().as_ref());
}
}
#[derive(Clone, Copy, Default)]
pub(crate) struct NumericalColumnWriter {
compatible_numerical_types: CompatibleNumericalTypes,
column_writer: ColumnWriter,
}
/// State used to store what types are still acceptable
/// after having seen a set of numerical values.
#[derive(Clone, Copy)]
struct CompatibleNumericalTypes {
all_values_within_i64_range: bool,
all_values_within_u64_range: bool,
// f64 is always acceptable.
}
impl Default for CompatibleNumericalTypes {
fn default() -> CompatibleNumericalTypes {
CompatibleNumericalTypes {
all_values_within_i64_range: true,
all_values_within_u64_range: true,
}
}
}
impl CompatibleNumericalTypes {
fn accept_value(&mut self, numerical_value: NumericalValue) {
match numerical_value {
NumericalValue::I64(val_i64) => {
let value_within_u64_range = val_i64 >= 0i64;
self.all_values_within_u64_range &= value_within_u64_range;
}
NumericalValue::U64(val_u64) => {
let value_within_i64_range = val_u64 < i64::MAX as u64;
self.all_values_within_i64_range &= value_within_i64_range;
}
NumericalValue::F64(_) => {
self.all_values_within_i64_range = false;
self.all_values_within_u64_range = false;
}
}
}
pub fn to_numerical_type(self) -> NumericalType {
if self.all_values_within_i64_range {
NumericalType::I64
} else if self.all_values_within_u64_range {
NumericalType::U64
} else {
NumericalType::F64
}
}
}
impl NumericalColumnWriter {
pub fn column_type_and_cardinality(&self, num_docs: DocId) -> (NumericalType, Cardinality) {
let numerical_type = self.compatible_numerical_types.to_numerical_type();
let cardinality = self.column_writer.get_cardinality(num_docs);
(numerical_type, cardinality)
}
pub fn record_numerical_value(
&mut self,
doc: DocId,
value: NumericalValue,
arena: &mut MemoryArena,
) {
self.compatible_numerical_types.accept_value(value);
self.column_writer.record(doc, value, arena);
}
pub fn operation_iterator<'a>(
self,
arena: &MemoryArena,
buffer: &'a mut Vec<u8>,
) -> impl Iterator<Item = ColumnOperation<NumericalValue>> + 'a {
self.column_writer.operation_iterator(arena, buffer)
}
}
#[derive(Copy, Clone, Default)]
pub(crate) struct StrColumnWriter {
pub(crate) dictionary_id: u32,
pub(crate) column_writer: ColumnWriter,
}
impl StrColumnWriter {
pub(crate) fn with_dictionary_id(dictionary_id: u32) -> StrColumnWriter {
StrColumnWriter {
dictionary_id,
column_writer: Default::default(),
}
}
pub(crate) fn record_bytes(
&mut self,
doc: DocId,
bytes: &[u8],
dictionaries: &mut [DictionaryBuilder],
arena: &mut MemoryArena,
) {
let unordered_id = dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes);
self.column_writer.record(doc, unordered_id, arena);
}
pub(crate) fn operation_iterator<'a>(
&self,
arena: &MemoryArena,
byte_buffer: &'a mut Vec<u8>,
) -> impl Iterator<Item = ColumnOperation<UnorderedId>> + 'a {
self.column_writer.operation_iterator(arena, byte_buffer)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_delta_with_last_doc() {
assert_eq!(delta_with_last_doc(None, 0u32), DocumentStep::NextDoc);
assert_eq!(delta_with_last_doc(None, 1u32), DocumentStep::SkippedDoc);
assert_eq!(delta_with_last_doc(None, 2u32), DocumentStep::SkippedDoc);
assert_eq!(delta_with_last_doc(Some(0u32), 0u32), DocumentStep::SameDoc);
assert_eq!(delta_with_last_doc(Some(1u32), 1u32), DocumentStep::SameDoc);
assert_eq!(delta_with_last_doc(Some(1u32), 2u32), DocumentStep::NextDoc);
assert_eq!(
delta_with_last_doc(Some(1u32), 3u32),
DocumentStep::SkippedDoc
);
assert_eq!(
delta_with_last_doc(Some(1u32), 4u32),
DocumentStep::SkippedDoc
);
}
#[track_caller]
fn test_column_writer_coercion_iter_aux(
values: impl Iterator<Item = NumericalValue>,
expected_numerical_type: NumericalType,
) {
let mut compatible_numerical_types = CompatibleNumericalTypes::default();
for value in values {
compatible_numerical_types.accept_value(value);
}
assert_eq!(
compatible_numerical_types.to_numerical_type(),
expected_numerical_type
);
}
#[track_caller]
fn test_column_writer_coercion_aux(
values: &[NumericalValue],
expected_numerical_type: NumericalType,
) {
test_column_writer_coercion_iter_aux(values.iter().copied(), expected_numerical_type);
test_column_writer_coercion_iter_aux(values.iter().rev().copied(), expected_numerical_type);
}
#[test]
fn test_column_writer_coercion() {
test_column_writer_coercion_aux(&[], NumericalType::I64);
test_column_writer_coercion_aux(&[1i64.into()], NumericalType::I64);
test_column_writer_coercion_aux(&[1u64.into()], NumericalType::I64);
// We don't detect exact integer at the moment. We could!
test_column_writer_coercion_aux(&[1f64.into()], NumericalType::F64);
test_column_writer_coercion_aux(&[u64::MAX.into()], NumericalType::U64);
test_column_writer_coercion_aux(&[(i64::MAX as u64).into()], NumericalType::U64);
test_column_writer_coercion_aux(&[(1u64 << 63).into()], NumericalType::U64);
test_column_writer_coercion_aux(&[1i64.into(), 1u64.into()], NumericalType::I64);
test_column_writer_coercion_aux(&[u64::MAX.into(), (-1i64).into()], NumericalType::F64);
}
}

View File

@@ -1,528 +0,0 @@
mod column_operation;
mod column_writers;
mod serializer;
mod value_index;
use std::io::{self, Write};
use column_operation::ColumnOperation;
use fastfield_codecs::serialize::ValueIndexInfo;
use fastfield_codecs::{Column, MonotonicallyMappableToU64, VecColumn};
use serializer::ColumnarSerializer;
use stacker::{Addr, ArenaHashMap, MemoryArena};
use crate::column_type_header::{ColumnType, ColumnTypeAndCardinality, ColumnTypeCategory};
use crate::dictionary::{DictionaryBuilder, TermIdMapping, UnorderedId};
use crate::value::{Coerce, NumericalType, NumericalValue};
use crate::writer::column_writers::{ColumnWriter, NumericalColumnWriter, StrColumnWriter};
use crate::writer::value_index::{IndexBuilder, SpareIndexBuilders};
use crate::{Cardinality, DocId};
/// This is a set of buffers that are only here
/// to limit the amount of allocation.
#[derive(Default)]
struct SpareBuffers {
value_index_builders: SpareIndexBuilders,
i64_values: Vec<i64>,
u64_values: Vec<u64>,
f64_values: Vec<f64>,
bool_values: Vec<bool>,
column_buffer: Vec<u8>,
}
/// Makes it possible to create a new columnar.
///
/// ```rust
/// use tantivy_columnar::ColumnarWriter;
/// fn main() {
/// let mut columnar_writer = ColumnarWriter::default();
/// columnar_writer.record_str(0u32 /* doc id */, "product_name", "Red backpack");
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10u64);
/// columnar_writer.record_str(1u32 /* doc id */, "product_name", "Apple");
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10.5f64); //< uh oh we ended up mixing integer and floats.
/// let mut wrt: Vec<u8> = Vec::new();
/// columnar_writer.serialize(2u32, &mut wrt).unwrap();
/// }
/// ```
pub struct ColumnarWriter {
numerical_field_hash_map: ArenaHashMap,
bool_field_hash_map: ArenaHashMap,
bytes_field_hash_map: ArenaHashMap,
arena: MemoryArena,
// Dictionaries used to store dictionary-encoded values.
dictionaries: Vec<DictionaryBuilder>,
buffers: SpareBuffers,
}
impl Default for ColumnarWriter {
fn default() -> Self {
ColumnarWriter {
numerical_field_hash_map: ArenaHashMap::new(10_000),
bool_field_hash_map: ArenaHashMap::new(10_000),
bytes_field_hash_map: ArenaHashMap::new(10_000),
dictionaries: Vec::new(),
arena: MemoryArena::default(),
buffers: SpareBuffers::default(),
}
}
}
impl ColumnarWriter {
pub fn record_numerical<T: Into<NumericalValue> + Copy>(
&mut self,
doc: DocId,
column_name: &str,
numerical_value: T,
) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.record_numerical_value(doc, numerical_value.into(), arena);
column
},
);
}
pub fn record_bool(&mut self, doc: DocId, column_name: &str, val: bool) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, val, arena);
column
},
);
}
pub fn record_str(&mut self, doc: DocId, column_name: &str, value: &str) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena, dictionaries) = (
&mut self.bytes_field_hash_map,
&mut self.arena,
&mut self.dictionaries,
);
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<StrColumnWriter>| {
let mut column: StrColumnWriter = column_opt.unwrap_or_else(|| {
let dictionary_id = dictionaries.len() as u32;
dictionaries.push(DictionaryBuilder::default());
StrColumnWriter::with_dictionary_id(dictionary_id)
});
column.record_bytes(doc, value.as_bytes(), dictionaries, arena);
column
},
);
}
pub fn serialize(&mut self, num_docs: DocId, wrt: &mut dyn io::Write) -> io::Result<()> {
let mut serializer = ColumnarSerializer::new(wrt);
let mut field_columns: Vec<(&[u8], ColumnTypeCategory, Addr)> = self
.numerical_field_hash_map
.iter()
.map(|(term, addr, _)| (term, ColumnTypeCategory::Numerical, addr))
.collect();
field_columns.extend(
self.bytes_field_hash_map
.iter()
.map(|(term, addr, _)| (term, ColumnTypeCategory::Str, addr)),
);
field_columns.extend(
self.bool_field_hash_map
.iter()
.map(|(term, addr, _)| (term, ColumnTypeCategory::Bool, addr)),
);
field_columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
let (arena, buffers, dictionaries) = (&self.arena, &mut self.buffers, &self.dictionaries);
let mut symbol_byte_buffer: Vec<u8> = Vec::new();
for (column_name, bytes_or_numerical, addr) in field_columns {
match bytes_or_numerical {
ColumnTypeCategory::Bool => {
let column_writer: ColumnWriter = self.bool_field_hash_map.read(addr);
let cardinality = column_writer.get_cardinality(num_docs);
let column_type_and_cardinality = ColumnTypeAndCardinality {
cardinality,
typ: ColumnType::Bool,
};
let column_serializer =
serializer.serialize_column(column_name, column_type_and_cardinality);
serialize_bool_column(
cardinality,
num_docs,
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
column_serializer,
)?;
}
ColumnTypeCategory::Str => {
let str_column_writer: StrColumnWriter = self.bytes_field_hash_map.read(addr);
let dictionary_builder =
&dictionaries[str_column_writer.dictionary_id as usize];
let cardinality = str_column_writer.column_writer.get_cardinality(num_docs);
let column_type_and_cardinality = ColumnTypeAndCardinality {
cardinality,
typ: ColumnType::Bytes,
};
let column_serializer =
serializer.serialize_column(column_name, column_type_and_cardinality);
serialize_bytes_column(
cardinality,
num_docs,
dictionary_builder,
str_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
column_serializer,
)?;
}
ColumnTypeCategory::Numerical => {
let numerical_column_writer: NumericalColumnWriter =
self.numerical_field_hash_map.read(addr);
let (numerical_type, cardinality) =
numerical_column_writer.column_type_and_cardinality(num_docs);
let column_type_and_cardinality = ColumnTypeAndCardinality {
cardinality,
typ: ColumnType::Numerical(numerical_type),
};
let column_serializer =
serializer.serialize_column(column_name, column_type_and_cardinality);
serialize_numerical_column(
cardinality,
num_docs,
numerical_type,
numerical_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
column_serializer,
)?;
}
};
}
serializer.finalize()?;
Ok(())
}
}
fn compress_and_write_column<W: io::Write>(column_bytes: &[u8], wrt: &mut W) -> io::Result<()> {
wrt.write_all(column_bytes)?;
Ok(())
}
fn serialize_bytes_column<W: io::Write>(
cardinality: Cardinality,
num_docs: DocId,
dictionary_builder: &DictionaryBuilder,
operation_it: impl Iterator<Item = ColumnOperation<UnorderedId>>,
buffers: &mut SpareBuffers,
mut wrt: W,
) -> io::Result<()> {
let SpareBuffers {
value_index_builders,
u64_values,
column_buffer,
..
} = buffers;
column_buffer.clear();
let term_id_mapping: TermIdMapping = dictionary_builder.serialize(column_buffer)?;
let dictionary_num_bytes: u32 = column_buffer.len() as u32;
let operation_iterator = operation_it.map(|symbol: ColumnOperation<UnorderedId>| {
// We map unordered ids to ordered ids.
match symbol {
ColumnOperation::Value(unordered_id) => {
let ordered_id = term_id_mapping.to_ord(unordered_id);
ColumnOperation::Value(ordered_id.0 as u64)
}
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
}
});
serialize_column(
operation_iterator,
cardinality,
num_docs,
value_index_builders,
u64_values,
column_buffer,
)?;
column_buffer.write_all(&dictionary_num_bytes.to_le_bytes()[..])?;
compress_and_write_column(column_buffer, &mut wrt)?;
Ok(())
}
fn serialize_numerical_column<W: io::Write>(
cardinality: Cardinality,
num_docs: DocId,
numerical_type: NumericalType,
op_iterator: impl Iterator<Item = ColumnOperation<NumericalValue>>,
buffers: &mut SpareBuffers,
mut wrt: W,
) -> io::Result<()> {
let SpareBuffers {
value_index_builders,
u64_values,
i64_values,
f64_values,
column_buffer,
..
} = buffers;
column_buffer.clear();
match numerical_type {
NumericalType::I64 => {
serialize_column(
coerce_numerical_symbol::<i64>(op_iterator),
cardinality,
num_docs,
value_index_builders,
i64_values,
column_buffer,
)?;
}
NumericalType::U64 => {
serialize_column(
coerce_numerical_symbol::<u64>(op_iterator),
cardinality,
num_docs,
value_index_builders,
u64_values,
column_buffer,
)?;
}
NumericalType::F64 => {
serialize_column(
coerce_numerical_symbol::<f64>(op_iterator),
cardinality,
num_docs,
value_index_builders,
f64_values,
column_buffer,
)?;
}
};
compress_and_write_column(column_buffer, &mut wrt)?;
Ok(())
}
fn serialize_bool_column<W: io::Write>(
cardinality: Cardinality,
num_docs: DocId,
column_operations_it: impl Iterator<Item = ColumnOperation<bool>>,
buffers: &mut SpareBuffers,
mut wrt: W,
) -> io::Result<()> {
let SpareBuffers {
value_index_builders,
bool_values,
column_buffer,
..
} = buffers;
column_buffer.clear();
serialize_column(
column_operations_it,
cardinality,
num_docs,
value_index_builders,
bool_values,
column_buffer,
)?;
compress_and_write_column(column_buffer, &mut wrt)?;
Ok(())
}
fn serialize_column<
T: Copy + Default + std::fmt::Debug + Send + Sync + MonotonicallyMappableToU64 + PartialOrd,
>(
op_iterator: impl Iterator<Item = ColumnOperation<T>>,
cardinality: Cardinality,
num_docs: DocId,
value_index_builders: &mut SpareIndexBuilders,
values: &mut Vec<T>,
wrt: &mut Vec<u8>,
) -> io::Result<()>
where
for<'a> VecColumn<'a, T>: Column<T>,
{
values.clear();
match cardinality {
Cardinality::Required => {
consume_operation_iterator(
op_iterator,
value_index_builders.borrow_required_index_builder(),
values,
);
fastfield_codecs::serialize(
VecColumn::from(&values[..]),
wrt,
&fastfield_codecs::ALL_CODEC_TYPES[..],
)?;
}
Cardinality::Optional => {
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
consume_operation_iterator(op_iterator, optional_index_builder, values);
let optional_index = optional_index_builder.finish(num_docs);
fastfield_codecs::serialize::serialize_new(
ValueIndexInfo::SingleValue(Box::new(optional_index)),
VecColumn::from(&values[..]),
wrt,
&fastfield_codecs::ALL_CODEC_TYPES[..],
)?;
}
Cardinality::Multivalued => {
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
let multivalued_index = multivalued_index_builder.finish(num_docs);
fastfield_codecs::serialize::serialize_new(
ValueIndexInfo::MultiValue(Box::new(multivalued_index)),
VecColumn::from(&values[..]),
wrt,
&fastfield_codecs::ALL_CODEC_TYPES[..],
)?;
}
}
Ok(())
}
fn coerce_numerical_symbol<T>(
operation_iterator: impl Iterator<Item = ColumnOperation<NumericalValue>>,
) -> impl Iterator<Item = ColumnOperation<T>>
where T: Coerce {
operation_iterator.map(|symbol| match symbol {
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
ColumnOperation::Value(numerical_value) => {
ColumnOperation::Value(Coerce::coerce(numerical_value))
}
})
}
fn consume_operation_iterator<T: std::fmt::Debug, TIndexBuilder: IndexBuilder>(
operation_iterator: impl Iterator<Item = ColumnOperation<T>>,
index_builder: &mut TIndexBuilder,
values: &mut Vec<T>,
) {
for symbol in operation_iterator {
match symbol {
ColumnOperation::NewDoc(doc) => {
index_builder.record_doc(doc);
}
ColumnOperation::Value(value) => {
index_builder.record_value();
values.push(value);
}
}
}
}
#[cfg(test)]
mod tests {
use column_operation::ColumnOperation;
use stacker::MemoryArena;
use super::*;
use crate::value::NumericalValue;
use crate::Cardinality;
#[test]
fn test_column_writer_required_simple() {
let mut arena = MemoryArena::default();
let mut column_writer = super::ColumnWriter::default();
column_writer.record(0u32, NumericalValue::from(14i64), &mut arena);
column_writer.record(1u32, NumericalValue::from(15i64), &mut arena);
column_writer.record(2u32, NumericalValue::from(-16i64), &mut arena);
assert_eq!(column_writer.get_cardinality(3), Cardinality::Required);
let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&mut arena, &mut buffer)
.collect();
assert_eq!(symbols.len(), 6);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
assert!(matches!(
symbols[1],
ColumnOperation::Value(NumericalValue::I64(14i64))
));
assert!(matches!(symbols[2], ColumnOperation::NewDoc(1u32)));
assert!(matches!(
symbols[3],
ColumnOperation::Value(NumericalValue::I64(15i64))
));
assert!(matches!(symbols[4], ColumnOperation::NewDoc(2u32)));
assert!(matches!(
symbols[5],
ColumnOperation::Value(NumericalValue::I64(-16i64))
));
}
#[test]
fn test_column_writer_optional_cardinality_missing_first() {
let mut arena = MemoryArena::default();
let mut column_writer = super::ColumnWriter::default();
column_writer.record(1u32, NumericalValue::from(15i64), &mut arena);
column_writer.record(2u32, NumericalValue::from(-16i64), &mut arena);
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&mut arena, &mut buffer)
.collect();
assert_eq!(symbols.len(), 4);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
assert!(matches!(
symbols[1],
ColumnOperation::Value(NumericalValue::I64(15i64))
));
assert!(matches!(symbols[2], ColumnOperation::NewDoc(2u32)));
assert!(matches!(
symbols[3],
ColumnOperation::Value(NumericalValue::I64(-16i64))
));
}
#[test]
fn test_column_writer_optional_cardinality_missing_last() {
let mut arena = MemoryArena::default();
let mut column_writer = super::ColumnWriter::default();
column_writer.record(0u32, NumericalValue::from(15i64), &mut arena);
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&mut arena, &mut buffer)
.collect();
assert_eq!(symbols.len(), 2);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
assert!(matches!(
symbols[1],
ColumnOperation::Value(NumericalValue::I64(15i64))
));
}
#[test]
fn test_column_writer_multivalued() {
let mut arena = MemoryArena::default();
let mut column_writer = super::ColumnWriter::default();
column_writer.record(0u32, NumericalValue::from(16i64), &mut arena);
column_writer.record(0u32, NumericalValue::from(17i64), &mut arena);
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&mut arena, &mut buffer)
.collect();
assert_eq!(symbols.len(), 3);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
assert!(matches!(
symbols[1],
ColumnOperation::Value(NumericalValue::I64(16i64))
));
assert!(matches!(
symbols[2],
ColumnOperation::Value(NumericalValue::I64(17i64))
));
}
}

View File

@@ -1,116 +0,0 @@
use std::io;
use std::io::Write;
use common::CountingWriter;
use sstable::value::RangeValueWriter;
use sstable::RangeSSTable;
use crate::column_type_header::ColumnTypeAndCardinality;
pub struct ColumnarSerializer<W: io::Write> {
wrt: CountingWriter<W>,
sstable_range: sstable::Writer<Vec<u8>, RangeValueWriter>,
prepare_key_buffer: Vec<u8>,
}
/// Returns a key consisting of the concatenation of the key and the column_type_and_cardinality
/// code.
fn prepare_key<'a>(
key: &[u8],
column_type_cardinality: ColumnTypeAndCardinality,
buffer: &'a mut Vec<u8>,
) {
buffer.clear();
buffer.extend_from_slice(key);
buffer.push(0u8);
buffer.push(column_type_cardinality.to_code());
}
impl<W: io::Write> ColumnarSerializer<W> {
pub(crate) fn new(wrt: W) -> ColumnarSerializer<W> {
let sstable_range: sstable::Writer<Vec<u8>, RangeValueWriter> =
sstable::Dictionary::<RangeSSTable>::builder(Vec::with_capacity(100_000)).unwrap();
ColumnarSerializer {
wrt: CountingWriter::wrap(wrt),
sstable_range,
prepare_key_buffer: Vec::new(),
}
}
pub fn serialize_column<'a>(
&'a mut self,
column_name: &[u8],
column_type_cardinality: ColumnTypeAndCardinality,
) -> impl io::Write + 'a {
let start_offset = self.wrt.written_bytes();
prepare_key(
column_name,
column_type_cardinality,
&mut self.prepare_key_buffer,
);
ColumnSerializer {
columnar_serializer: self,
start_offset,
}
}
pub(crate) fn finalize(mut self) -> io::Result<()> {
let sstable_bytes: Vec<u8> = self.sstable_range.finish()?;
let sstable_num_bytes: u64 = sstable_bytes.len() as u64;
self.wrt.write_all(&sstable_bytes)?;
self.wrt.write_all(&sstable_num_bytes.to_le_bytes()[..])?;
Ok(())
}
}
struct ColumnSerializer<'a, W: io::Write> {
columnar_serializer: &'a mut ColumnarSerializer<W>,
start_offset: u64,
}
impl<'a, W: io::Write> Drop for ColumnSerializer<'a, W> {
fn drop(&mut self) {
let end_offset: u64 = self.columnar_serializer.wrt.written_bytes();
let byte_range = self.start_offset..end_offset;
self.columnar_serializer.sstable_range.insert_cannot_fail(
&self.columnar_serializer.prepare_key_buffer[..],
&byte_range,
);
self.columnar_serializer.prepare_key_buffer.clear();
}
}
impl<'a, W: io::Write> io::Write for ColumnSerializer<'a, W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.columnar_serializer.wrt.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.columnar_serializer.wrt.flush()
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.columnar_serializer.wrt.write_all(buf)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::column_type_header::ColumnType;
use crate::Cardinality;
#[test]
fn test_prepare_key_bytes() {
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
let column_type_and_cardinality = ColumnTypeAndCardinality {
typ: ColumnType::Bytes,
cardinality: Cardinality::Optional,
};
prepare_key(b"root\0child", column_type_and_cardinality, &mut buffer);
assert_eq!(buffer.len(), 12);
assert_eq!(&buffer[..10], b"root\0child");
assert_eq!(buffer[10], 0u8);
assert_eq!(buffer[11], column_type_and_cardinality.to_code());
}
}

View File

@@ -1,218 +0,0 @@
use fastfield_codecs::serialize::{MultiValueIndexInfo, SingleValueIndexInfo};
use crate::DocId;
/// The `IndexBuilder` interprets a sequence of
/// calls of the form:
/// (record_doc,record_value+)*
/// and can then serialize the results into an index.
///
/// It has different implementation depending on whether the
/// cardinality is required, optional, or multivalued.
pub(crate) trait IndexBuilder {
fn record_doc(&mut self, doc: DocId);
#[inline]
fn record_value(&mut self) {}
}
/// The RequiredIndexBuilder does nothing.
#[derive(Default)]
pub struct RequiredIndexBuilder;
impl IndexBuilder for RequiredIndexBuilder {
#[inline(always)]
fn record_doc(&mut self, _doc: DocId) {}
}
#[derive(Default)]
pub struct OptionalIndexBuilder {
docs: Vec<DocId>,
}
struct SingleValueArrayIndex<'a> {
docs: &'a [DocId],
num_docs: DocId,
}
impl<'a> SingleValueIndexInfo for SingleValueArrayIndex<'a> {
fn num_vals(&self) -> u32 {
self.num_docs as u32
}
fn num_non_nulls(&self) -> u32 {
self.docs.len() as u32
}
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
Box::new(self.docs.iter().copied())
}
}
impl OptionalIndexBuilder {
pub fn finish(&mut self, num_docs: DocId) -> impl SingleValueIndexInfo + '_ {
debug_assert!(self
.docs
.last()
.copied()
.map(|last_doc| last_doc < num_docs)
.unwrap_or(true));
SingleValueArrayIndex {
docs: &self.docs[..],
num_docs,
}
}
fn reset(&mut self) {
self.docs.clear();
}
}
impl IndexBuilder for OptionalIndexBuilder {
#[inline(always)]
fn record_doc(&mut self, doc: DocId) {
debug_assert!(self
.docs
.last()
.copied()
.map(|prev_doc| doc > prev_doc)
.unwrap_or(true));
self.docs.push(doc);
}
}
#[derive(Default)]
pub struct MultivaluedIndexBuilder {
// TODO should we switch to `start_offset`?
end_values: Vec<DocId>,
total_num_vals_seen: u32,
}
pub struct MultivaluedValueArrayIndex<'a> {
end_offsets: &'a [DocId],
}
impl<'a> MultiValueIndexInfo for MultivaluedValueArrayIndex<'a> {
fn num_docs(&self) -> u32 {
self.end_offsets.len() as u32
}
fn num_vals(&self) -> u32 {
self.end_offsets.last().copied().unwrap_or(0u32)
}
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
if self.end_offsets.is_empty() {
return Box::new(std::iter::empty());
}
let n = self.end_offsets.len();
Box::new(std::iter::once(0u32).chain(self.end_offsets[..n - 1].iter().copied()))
}
}
impl MultivaluedIndexBuilder {
pub fn finish(&mut self, num_docs: DocId) -> impl MultiValueIndexInfo + '_ {
self.end_values
.resize(num_docs as usize, self.total_num_vals_seen);
MultivaluedValueArrayIndex {
end_offsets: &self.end_values[..],
}
}
fn reset(&mut self) {
self.end_values.clear();
self.total_num_vals_seen = 0;
}
}
impl IndexBuilder for MultivaluedIndexBuilder {
fn record_doc(&mut self, doc: DocId) {
self.end_values
.resize(doc as usize, self.total_num_vals_seen);
}
fn record_value(&mut self) {
self.total_num_vals_seen += 1;
}
}
/// The `SpareIndexBuilders` is there to avoid allocating a
/// new index builder for every single column.
#[derive(Default)]
pub struct SpareIndexBuilders {
required_index_builder: RequiredIndexBuilder,
optional_index_builder: OptionalIndexBuilder,
multivalued_index_builder: MultivaluedIndexBuilder,
}
impl SpareIndexBuilders {
pub fn borrow_required_index_builder(&mut self) -> &mut RequiredIndexBuilder {
&mut self.required_index_builder
}
pub fn borrow_optional_index_builder(&mut self) -> &mut OptionalIndexBuilder {
self.optional_index_builder.reset();
&mut self.optional_index_builder
}
pub fn borrow_multivalued_index_builder(&mut self) -> &mut MultivaluedIndexBuilder {
self.multivalued_index_builder.reset();
&mut self.multivalued_index_builder
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_optional_value_index_builder() {
let mut opt_value_index_builder = OptionalIndexBuilder::default();
opt_value_index_builder.record_doc(0u32);
opt_value_index_builder.record_value();
assert_eq!(
&opt_value_index_builder
.finish(1u32)
.iter()
.collect::<Vec<u32>>(),
&[0]
);
opt_value_index_builder.reset();
opt_value_index_builder.record_doc(1u32);
opt_value_index_builder.record_value();
assert_eq!(
&opt_value_index_builder
.finish(2u32)
.iter()
.collect::<Vec<u32>>(),
&[1]
);
}
#[test]
fn test_multivalued_value_index_builder() {
let mut multivalued_value_index_builder = MultivaluedIndexBuilder::default();
multivalued_value_index_builder.record_doc(1u32);
multivalued_value_index_builder.record_value();
multivalued_value_index_builder.record_value();
multivalued_value_index_builder.record_doc(2u32);
multivalued_value_index_builder.record_value();
assert_eq!(
multivalued_value_index_builder
.finish(4u32)
.iter()
.collect::<Vec<u32>>(),
vec![0, 0, 2, 3]
);
multivalued_value_index_builder.reset();
multivalued_value_index_builder.record_doc(2u32);
multivalued_value_index_builder.record_value();
multivalued_value_index_builder.record_value();
assert_eq!(
multivalued_value_index_builder
.finish(4u32)
.iter()
.collect::<Vec<u32>>(),
vec![0, 0, 0, 2]
);
}
}

View File

@@ -1,21 +1,16 @@
[package]
name = "tantivy-common"
version = "0.5.0"
version = "0.3.0"
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
license = "MIT"
edition = "2021"
description = "common traits and utility functions used by multiple tantivy subcrates"
documentation = "https://docs.rs/tantivy_common/"
homepage = "https://github.com/quickwit-oss/tantivy"
repository = "https://github.com/quickwit-oss/tantivy"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
byteorder = "1.4.3"
ownedbytes = { version= "0.5", path="../ownedbytes" }
async-trait = "0.1"
ownedbytes = { version="0.3", path="../ownedbytes" }
[dev-dependencies]
proptest = "1.0.0"

View File

@@ -151,7 +151,7 @@ impl TinySet {
if self.is_empty() {
None
} else {
let lowest = self.0.trailing_zeros();
let lowest = self.0.trailing_zeros() as u32;
self.0 ^= TinySet::singleton(lowest).0;
Some(lowest)
}
@@ -259,7 +259,11 @@ impl BitSet {
// we do not check saturated els.
let higher = el / 64u32;
let lower = el % 64u32;
self.len += u64::from(self.tinysets[higher as usize].insert_mut(lower));
self.len += if self.tinysets[higher as usize].insert_mut(lower) {
1
} else {
0
};
}
/// Inserts an element in the `BitSet`
@@ -268,7 +272,11 @@ impl BitSet {
// we do not check saturated els.
let higher = el / 64u32;
let lower = el % 64u32;
self.len -= u64::from(self.tinysets[higher as usize].remove_mut(lower));
self.len -= if self.tinysets[higher as usize].remove_mut(lower) {
1
} else {
0
};
}
/// Returns true iff the elements is in the `BitSet`.
@@ -277,7 +285,7 @@ impl BitSet {
self.tinyset(el / 64u32).contains(el % 64)
}
/// Returns the first non-empty `TinySet` associated with a bucket lower
/// Returns the first non-empty `TinySet` associated to a bucket lower
/// or greater than bucket.
///
/// Reminder: the tiny set with the bucket `bucket`, represents the
@@ -421,7 +429,7 @@ mod tests {
bitset.serialize(&mut out).unwrap();
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
assert_eq!(bitset.len(), i as usize);
assert_eq!(bitset.len() as usize, i as usize);
}
}
@@ -432,7 +440,7 @@ mod tests {
bitset.serialize(&mut out).unwrap();
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
assert_eq!(bitset.len(), 64);
assert_eq!(bitset.len() as usize, 64);
}
#[test]

View File

@@ -5,12 +5,11 @@ use std::ops::Deref;
pub use byteorder::LittleEndian as Endianness;
mod bitset;
pub mod file_slice;
mod serialize;
mod vint;
mod writer;
pub use bitset::*;
pub use ownedbytes::{OwnedBytes, StableDeref};
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
pub use vint::{
deserialize_vint_u128, read_u32_vint, read_u32_vint_no_advance, serialize_vint_u128,

View File

@@ -94,20 +94,6 @@ impl FixedSize for u32 {
const SIZE_IN_BYTES: usize = 4;
}
impl BinarySerializable for u16 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_u16::<Endianness>(*self)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u16> {
reader.read_u16::<Endianness>()
}
}
impl FixedSize for u16 {
const SIZE_IN_BYTES: usize = 2;
}
impl BinarySerializable for u64 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_u64::<Endianness>(*self)
@@ -121,19 +107,6 @@ impl FixedSize for u64 {
const SIZE_IN_BYTES: usize = 8;
}
impl BinarySerializable for u128 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_u128::<Endianness>(*self)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
reader.read_u128::<Endianness>()
}
}
impl FixedSize for u128 {
const SIZE_IN_BYTES: usize = 16;
}
impl BinarySerializable for f32 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_f32::<Endianness>(*self)
@@ -188,7 +161,8 @@ impl FixedSize for u8 {
impl BinarySerializable for bool {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_u8(u8::from(*self))
let val = if *self { 1 } else { 0 };
writer.write_u8(val)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
let val = reader.read_u8()?;

View File

@@ -157,7 +157,7 @@ fn vint_len(data: &[u8]) -> usize {
/// If the buffer does not start by a valid
/// vint payload
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
let (result, vlen) = read_u32_vint_no_advance(data);
let (result, vlen) = read_u32_vint_no_advance(*data);
*data = &data[vlen..];
result
}

View File

@@ -50,7 +50,7 @@ to get tantivy to fit your use case:
*Example 1* You could for instance use hadoop to build a very large search index in a timely manner, copy all of the resulting segment files in the same directory and edit the `meta.json` to get a functional index.[^2]
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated with segment `D-7`.
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
## Merging

View File

@@ -118,7 +118,7 @@ fn main() -> tantivy::Result<()> {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();

View File

@@ -105,7 +105,7 @@ impl SegmentCollector for StatsSegmentCollector {
type Fruit = Option<Stats>;
fn collect(&mut self, doc: u32, _score: Score) {
let value = self.fast_field_reader.get_val(doc) as f64;
let value = self.fast_field_reader.get_val(doc as u64) as f64;
self.stats.count += 1;
self.stats.sum += value;
self.stats.squared_sum += value * value;

View File

@@ -113,7 +113,7 @@ fn main() -> tantivy::Result<()> {
// on its id.
//
// Note that `tantivy` does nothing to enforce the idea that
// there is only one document associated with this id.
// there is only one document associated to this id.
//
// Also you might have noticed that we apply the delete before
// having committed. This does not matter really...

View File

@@ -44,7 +44,7 @@ fn main() -> tantivy::Result<()> {
// A segment contains different data structure.
// Inverted index stands for the combination of
// - the term dictionary
// - the inverted lists associated with each terms and their positions
// - the inverted lists associated to each terms and their positions
let inverted_index = segment_reader.inverted_index(title)?;
// A `Term` is a text token associated with a field.
@@ -105,7 +105,7 @@ fn main() -> tantivy::Result<()> {
// A segment contains different data structure.
// Inverted index stands for the combination of
// - the term dictionary
// - the inverted lists associated with each terms and their positions
// - the inverted lists associated to each terms and their positions
let inverted_index = segment_reader.inverted_index(title)?;
// This segment posting object is like a cursor over the documents matching the term.

View File

@@ -51,7 +51,7 @@ impl Warmer for DynamicPriceColumn {
let product_id_reader = segment.fast_fields().u64(self.field)?;
let product_ids: Vec<ProductId> = segment
.doc_ids_alive()
.map(|doc| product_id_reader.get_val(doc))
.map(|doc| product_id_reader.get_val(doc as u64))
.collect();
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
let mut price_vals: Vec<Price> = Vec::new();

View File

@@ -1,19 +1,17 @@
[package]
name = "fastfield_codecs"
version = "0.3.0"
version = "0.2.0"
authors = ["Pascal Seitz <pascal@quickwit.io>"]
license = "MIT"
edition = "2021"
description = "Fast field codecs used by tantivy"
documentation = "https://docs.rs/fastfield_codecs/"
homepage = "https://github.com/quickwit-oss/tantivy"
repository = "https://github.com/quickwit-oss/tantivy"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
common = { version = "0.5", path = "../common/", package = "tantivy-common" }
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
ownedbytes = { version = "0.3.0", path = "../ownedbytes" }
prettytable-rs = {version="0.9.0", optional= true}
rand = {version="0.8.3", optional= true}
fastdivide = "0.4"

View File

@@ -7,8 +7,8 @@ mod tests {
use std::iter;
use std::sync::Arc;
use common::OwnedBytes;
use fastfield_codecs::*;
use ownedbytes::OwnedBytes;
use rand::prelude::*;
use test::Bencher;
@@ -65,7 +65,7 @@ mod tests {
b.iter(|| {
let mut a = 0u64;
for _ in 0..n {
a = column.get_val(a as u32);
a = column.get_val(a as u64);
}
a
});
@@ -100,10 +100,9 @@ mod tests {
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn Column<u128>> {
let mut out = vec![];
let iter_gen = || data.iter().cloned();
serialize_u128(iter_gen, data.len() as u32, &mut out).unwrap();
serialize_u128(VecColumn::from(&data), &mut out).unwrap();
let out = OwnedBytes::new(out);
open_u128::<u128>(out).unwrap()
open_u128(out).unwrap()
}
#[bench]
@@ -111,15 +110,7 @@ mod tests {
let (major_item, _minor_item, data) = get_data_50percent_item();
let column = get_u128_column_from_data(&data);
b.iter(|| {
let mut positions = Vec::new();
column.get_docids_for_value_range(
major_item..=major_item,
0..data.len() as u32,
&mut positions,
);
positions
});
b.iter(|| column.get_between_vals(major_item..=major_item));
}
#[bench]
@@ -127,15 +118,7 @@ mod tests {
let (_major_item, minor_item, data) = get_data_50percent_item();
let column = get_u128_column_from_data(&data);
b.iter(|| {
let mut positions = Vec::new();
column.get_docids_for_value_range(
minor_item..=minor_item,
0..data.len() as u32,
&mut positions,
);
positions
});
b.iter(|| column.get_between_vals(minor_item..=minor_item));
}
#[bench]
@@ -143,11 +126,7 @@ mod tests {
let (_major_item, _minor_item, data) = get_data_50percent_item();
let column = get_u128_column_from_data(&data);
b.iter(|| {
let mut positions = Vec::new();
column.get_docids_for_value_range(0..=u128::MAX, 0..data.len() as u32, &mut positions);
positions
});
b.iter(|| column.get_between_vals(0..=u128::MAX));
}
#[bench]
@@ -157,7 +136,7 @@ mod tests {
b.iter(|| {
let mut a = 0u128;
for i in 0u64..column.num_vals() as u64 {
a += column.get_val(i as u32);
a += column.get_val(i);
}
a
});
@@ -171,7 +150,7 @@ mod tests {
let n = column.num_vals();
let mut a = 0u128;
for i in (0..n / 5).map(|val| val * 5) {
a += column.get_val(i);
a += column.get_val(i as u64);
}
a
});
@@ -196,9 +175,9 @@ mod tests {
let n = permutation.len();
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
b.iter(|| {
let mut a = 0;
let mut a = 0u64;
for i in (0..n / 7).map(|val| val * 7) {
a += column.get_val(i as u32);
a += column.get_val(i as u64);
}
a
});
@@ -211,7 +190,7 @@ mod tests {
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
b.iter(|| {
let mut a = 0u64;
for i in 0u32..n as u32 {
for i in 0u64..n as u64 {
a += column.get_val(i);
}
a
@@ -225,8 +204,8 @@ mod tests {
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
b.iter(|| {
let mut a = 0u64;
for i in 0..n {
a += column.get_val(i as u32);
for i in 0..n as u64 {
a += column.get_val(i);
}
a
});

View File

@@ -1,6 +1,6 @@
use std::io::{self, Write};
use common::OwnedBytes;
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::serialize::NormalizedHeader;
@@ -17,7 +17,7 @@ pub struct BitpackedReader {
impl Column for BitpackedReader {
#[inline]
fn get_val(&self, doc: u32) -> u64 {
fn get_val(&self, doc: u64) -> u64 {
self.bit_unpacker.get(doc, &self.data)
}
#[inline]
@@ -30,7 +30,7 @@ impl Column for BitpackedReader {
self.normalized_header.max_value
}
#[inline]
fn num_vals(&self) -> u32 {
fn num_vals(&self) -> u64 {
self.normalized_header.num_vals
}
}
@@ -68,14 +68,16 @@ impl FastFieldCodec for BitpackedCodec {
assert_eq!(column.min_value(), 0u64);
let num_bits = compute_num_bits(column.max_value());
let mut bit_packer = BitPacker::new();
for val in column.iter() {
let mut reader = column.reader();
while reader.advance() {
let val = reader.get();
bit_packer.write(val, num_bits, write)?;
}
bit_packer.close(write)?;
Ok(())
}
fn estimate(column: &dyn Column) -> Option<f32> {
fn estimate(column: &impl Column) -> Option<f32> {
let num_bits = compute_num_bits(column.max_value());
let num_bits_uncompressed = 64;
Some(num_bits as f32 / num_bits_uncompressed as f32)

View File

@@ -1,7 +1,8 @@
use std::sync::Arc;
use std::{io, iter};
use common::{BinarySerializable, CountingWriter, DeserializeFrom, OwnedBytes};
use common::{BinarySerializable, CountingWriter, DeserializeFrom};
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::line::Line;
@@ -35,7 +36,7 @@ impl BinarySerializable for Block {
}
}
fn compute_num_blocks(num_vals: u32) -> usize {
fn compute_num_blocks(num_vals: u64) -> usize {
(num_vals as usize + CHUNK_SIZE - 1) / CHUNK_SIZE
}
@@ -46,7 +47,7 @@ impl FastFieldCodec for BlockwiseLinearCodec {
type Reader = BlockwiseLinearReader;
fn open_from_bytes(
bytes: common::OwnedBytes,
bytes: ownedbytes::OwnedBytes,
normalized_header: NormalizedHeader,
) -> io::Result<Self::Reader> {
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
@@ -70,14 +71,16 @@ impl FastFieldCodec for BlockwiseLinearCodec {
}
// Estimate first_chunk and extrapolate
fn estimate(column: &dyn crate::Column) -> Option<f32> {
if column.num_vals() < 10 * CHUNK_SIZE as u32 {
fn estimate(column: &impl crate::Column) -> Option<f32> {
if column.num_vals() < 10 * CHUNK_SIZE as u64 {
return None;
}
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE).collect();
let mut first_chunk: Vec<u64> = crate::iter_from_reader(column.reader())
.take(CHUNK_SIZE as usize)
.collect();
let line = Line::train(&VecColumn::from(&first_chunk));
for (i, buffer_val) in first_chunk.iter_mut().enumerate() {
let interpolated_val = line.eval(i as u32);
let interpolated_val = line.eval(i as u64);
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
}
let estimated_bit_width = first_chunk
@@ -94,12 +97,12 @@ impl FastFieldCodec for BlockwiseLinearCodec {
};
let num_bits = estimated_bit_width as u64 * column.num_vals() as u64
// function metadata per block
+ metadata_per_block as u64 * (column.num_vals() as u64 / CHUNK_SIZE as u64);
+ metadata_per_block as u64 * (column.num_vals() / CHUNK_SIZE as u64);
let num_bits_uncompressed = 64 * column.num_vals();
Some(num_bits as f32 / num_bits_uncompressed as f32)
}
fn serialize(column: &dyn Column, wrt: &mut impl io::Write) -> io::Result<()> {
fn serialize(column: &dyn crate::Column, wrt: &mut impl io::Write) -> io::Result<()> {
// The BitpackedReader assumes a normalized vector.
assert_eq!(column.min_value(), 0);
let mut buffer = Vec::with_capacity(CHUNK_SIZE);
@@ -108,7 +111,7 @@ impl FastFieldCodec for BlockwiseLinearCodec {
let num_blocks = compute_num_blocks(num_vals);
let mut blocks = Vec::with_capacity(num_blocks);
let mut vals = column.iter();
let mut vals = crate::iter_from_reader(column.reader());
let mut bit_packer = BitPacker::new();
@@ -120,7 +123,7 @@ impl FastFieldCodec for BlockwiseLinearCodec {
assert!(!buffer.is_empty());
for (i, buffer_val) in buffer.iter_mut().enumerate() {
let interpolated_val = line.eval(i as u32);
let interpolated_val = line.eval(i as u64);
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
}
let bit_width = buffer.iter().copied().map(compute_num_bits).max().unwrap();
@@ -160,9 +163,9 @@ pub struct BlockwiseLinearReader {
impl Column for BlockwiseLinearReader {
#[inline(always)]
fn get_val(&self, idx: u32) -> u64 {
let block_id = (idx / CHUNK_SIZE as u32) as usize;
let idx_within_block = idx % (CHUNK_SIZE as u32);
fn get_val(&self, idx: u64) -> u64 {
let block_id = (idx / CHUNK_SIZE as u64) as usize;
let idx_within_block = idx % (CHUNK_SIZE as u64);
let block = &self.blocks[block_id];
let interpoled_val: u64 = block.line.eval(idx_within_block);
let block_bytes = &self.data[block.data_start_offset..];
@@ -170,19 +173,16 @@ impl Column for BlockwiseLinearReader {
interpoled_val.wrapping_add(bitpacked_diff)
}
#[inline(always)]
fn min_value(&self) -> u64 {
// The BlockwiseLinearReader assumes a normalized vector.
0u64
}
#[inline(always)]
fn max_value(&self) -> u64 {
self.normalized_header.max_value
}
#[inline(always)]
fn num_vals(&self) -> u32 {
fn num_vals(&self) -> u64 {
self.normalized_header.num_vals
}
}

View File

@@ -1,20 +1,25 @@
use std::marker::PhantomData;
use std::ops::{Range, RangeInclusive};
use std::ops::RangeInclusive;
use tantivy_bitpacker::minmax;
use crate::monotonic_mapping::StrictlyMonotonicFn;
pub trait Column<T: PartialOrd + Copy + 'static = u64>: Send + Sync {
/// Return a `ColumnReader`.
fn reader(&self) -> Box<dyn ColumnReader<T> + '_> {
// Box::new(ColumnReaderAdapter { column: self, idx: 0, })
Box::new(ColumnReaderAdapter::from(self))
}
/// `Column` provides columnar access on a field.
pub trait Column<T: PartialOrd = u64>: Send + Sync {
/// Return the value associated with the given idx.
/// Return the value associated to the given idx.
///
/// This accessor should return as fast as possible.
///
/// # Panics
///
/// May panic if `idx` is greater than the column length.
fn get_val(&self, idx: u32) -> T;
///
/// TODO remove to force people to use `.reader()`.
fn get_val(&self, idx: u64) -> T;
/// Fills an output buffer with the fast field values
/// associated with the `DocId` going from
@@ -27,27 +32,21 @@ pub trait Column<T: PartialOrd = u64>: Send + Sync {
#[inline]
fn get_range(&self, start: u64, output: &mut [T]) {
for (out, idx) in output.iter_mut().zip(start..) {
*out = self.get_val(idx as u32);
*out = self.get_val(idx);
}
}
/// Get the positions of values which are in the provided value range.
///
/// Note that position == docid for single value fast fields
/// Return the positions of values which are in the provided range.
#[inline]
fn get_docids_for_value_range(
&self,
value_range: RangeInclusive<T>,
doc_id_range: Range<u32>,
positions: &mut Vec<u32>,
) {
let doc_id_range = doc_id_range.start..doc_id_range.end.min(self.num_vals());
for idx in doc_id_range {
fn get_between_vals(&self, range: RangeInclusive<T>) -> Vec<u64> {
let mut vals = Vec::new();
for idx in 0..self.num_vals() {
let val = self.get_val(idx);
if value_range.contains(&val) {
positions.push(idx);
if range.contains(&val) {
vals.push(idx);
}
}
vals
}
/// Returns the minimum value for this fast field.
@@ -66,24 +65,84 @@ pub trait Column<T: PartialOrd = u64>: Send + Sync {
/// `.max_value()`.
fn max_value(&self) -> T;
/// The number of values in the column.
fn num_vals(&self) -> u32;
fn num_vals(&self) -> u64;
}
/// Returns a iterator over the data
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
/// `ColumnReader` makes it possible to read forward through a column.
pub trait ColumnReader<T = u64> {
/// Advance the reader to the target_idx.
///
/// After a successful call to seek,
/// `.get()` should returns `column.get_val(target_idx)`.
fn seek(&mut self, target_idx: u64) -> T;
fn advance(&mut self) -> bool;
/// Get the current value without advancing the reader
fn get(&self) -> T;
}
pub fn iter_from_reader<'a, T: 'static>(
mut column_reader: Box<dyn ColumnReader<T> + 'a>,
) -> impl Iterator<Item = T> + 'a {
std::iter::from_fn(move || {
if !column_reader.advance() {
return None;
}
Some(column_reader.get())
})
}
pub(crate) struct ColumnReaderAdapter<'a, C: ?Sized, T> {
column: &'a C,
idx: u64,
len: u64,
_phantom: PhantomData<T>,
}
impl<'a, C: Column<T> + ?Sized, T: Copy + PartialOrd + 'static> From<&'a C>
for ColumnReaderAdapter<'a, C, T>
{
fn from(column: &'a C) -> Self {
ColumnReaderAdapter {
column,
idx: u64::MAX,
len: column.num_vals(),
_phantom: PhantomData,
}
}
}
impl<'a, T, C: ?Sized> ColumnReader<T> for ColumnReaderAdapter<'a, C, T>
where
C: Column<T>,
T: PartialOrd<T> + Copy + 'static,
{
fn seek(&mut self, idx: u64) -> T {
self.idx = idx;
self.get()
}
fn advance(&mut self) -> bool {
self.idx = self.idx.wrapping_add(1);
self.idx < self.len
}
fn get(&self) -> T {
self.column.get_val(self.idx)
}
}
/// VecColumn provides `Column` over a slice.
pub struct VecColumn<'a, T = u64> {
values: &'a [T],
min_value: T,
max_value: T,
}
impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
fn get_val(&self, idx: u32) -> T {
impl<'a, C: Column<T>, T> Column<T> for &'a C
where T: Copy + PartialOrd + 'static
{
fn get_val(&self, idx: u64) -> T {
(*self).get_val(idx)
}
@@ -95,12 +154,12 @@ impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
(*self).max_value()
}
fn num_vals(&self) -> u32 {
fn num_vals(&self) -> u64 {
(*self).num_vals()
}
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
(*self).iter()
fn reader(&self) -> Box<dyn ColumnReader<T> + '_> {
(*self).reader()
}
fn get_range(&self, start: u64, output: &mut [T]) {
@@ -108,15 +167,11 @@ impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
}
}
impl<'a, T: Copy + PartialOrd + Send + Sync> Column<T> for VecColumn<'a, T> {
fn get_val(&self, position: u32) -> T {
impl<'a, T: Copy + PartialOrd + Send + Sync + 'static> Column<T> for VecColumn<'a, T> {
fn get_val(&self, position: u64) -> T {
self.values[position as usize]
}
fn iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
Box::new(self.values.iter().copied())
}
fn min_value(&self) -> T {
self.min_value
}
@@ -125,8 +180,8 @@ impl<'a, T: Copy + PartialOrd + Send + Sync> Column<T> for VecColumn<'a, T> {
self.max_value
}
fn num_vals(&self) -> u32 {
self.values.len() as u32
fn num_vals(&self) -> u64 {
self.values.len() as u64
}
fn get_range(&self, start: u64, output: &mut [T]) {
@@ -134,7 +189,7 @@ impl<'a, T: Copy + PartialOrd + Send + Sync> Column<T> for VecColumn<'a, T> {
}
}
impl<'a, T: Copy + PartialOrd + Default, V> From<&'a V> for VecColumn<'a, T>
impl<'a, T: Copy + Ord + Default, V> From<&'a V> for VecColumn<'a, T>
where V: AsRef<[T]> + ?Sized
{
fn from(values: &'a V) -> Self {
@@ -154,30 +209,16 @@ struct MonotonicMappingColumn<C, T, Input> {
_phantom: PhantomData<Input>,
}
/// Creates a view of a column transformed by a strictly monotonic mapping. See
/// [`StrictlyMonotonicFn`].
///
/// E.g. apply a gcd monotonic_mapping([100, 200, 300]) == [1, 2, 3]
/// monotonic_mapping.mapping() is expected to be injective, and we should always have
/// monotonic_mapping.inverse(monotonic_mapping.mapping(el)) == el
///
/// The inverse of the mapping is required for:
/// `fn get_positions_for_value_range(&self, range: RangeInclusive<T>) -> Vec<u64> `
/// The user provides the original value range and we need to monotonic map them in the same way the
/// serialization does before calling the underlying column.
///
/// Note that when opening a codec, the monotonic_mapping should be the inverse of the mapping
/// during serialization. And therefore the monotonic_mapping_inv when opening is the same as
/// monotonic_mapping during serialization.
pub fn monotonic_map_column<C, T, Input, Output>(
/// Creates a view of a column transformed by a monotonic mapping.
pub fn monotonic_map_column<C, T, Input: PartialOrd + Copy, Output: PartialOrd + Copy>(
from_column: C,
monotonic_mapping: T,
) -> impl Column<Output>
where
C: Column<Input>,
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
Input: PartialOrd + Send + Sync + Clone,
Output: PartialOrd + Send + Sync + Clone,
T: Fn(Input) -> Output + Send + Sync,
Input: Send + Sync + 'static,
Output: Send + Sync + 'static,
{
MonotonicMappingColumn {
from_column,
@@ -186,60 +227,72 @@ where
}
}
impl<C, T, Input, Output> Column<Output> for MonotonicMappingColumn<C, T, Input>
impl<C, T, Input: PartialOrd + Copy, Output: PartialOrd + Copy> Column<Output>
for MonotonicMappingColumn<C, T, Input>
where
C: Column<Input>,
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
Input: PartialOrd + Send + Sync + Clone,
Output: PartialOrd + Send + Sync + Clone,
T: Fn(Input) -> Output + Send + Sync,
Input: Send + Sync + 'static,
Output: Send + Sync + 'static,
{
#[inline]
fn get_val(&self, idx: u32) -> Output {
fn get_val(&self, idx: u64) -> Output {
let from_val = self.from_column.get_val(idx);
self.monotonic_mapping.mapping(from_val)
(self.monotonic_mapping)(from_val)
}
fn min_value(&self) -> Output {
let from_min_value = self.from_column.min_value();
self.monotonic_mapping.mapping(from_min_value)
(self.monotonic_mapping)(from_min_value)
}
fn max_value(&self) -> Output {
let from_max_value = self.from_column.max_value();
self.monotonic_mapping.mapping(from_max_value)
(self.monotonic_mapping)(from_max_value)
}
fn num_vals(&self) -> u32 {
fn num_vals(&self) -> u64 {
self.from_column.num_vals()
}
fn iter(&self) -> Box<dyn Iterator<Item = Output> + '_> {
Box::new(
self.from_column
.iter()
.map(|el| self.monotonic_mapping.mapping(el)),
)
}
fn get_docids_for_value_range(
&self,
range: RangeInclusive<Output>,
doc_id_range: Range<u32>,
positions: &mut Vec<u32>,
) {
self.from_column.get_docids_for_value_range(
self.monotonic_mapping.inverse(range.start().clone())
..=self.monotonic_mapping.inverse(range.end().clone()),
doc_id_range,
positions,
)
fn reader(&self) -> Box<dyn ColumnReader<Output> + '_> {
Box::new(MonotonicMappingColumnReader {
col_reader: self.from_column.reader(),
monotonic_mapping: &self.monotonic_mapping,
intermdiary_type: PhantomData,
})
}
// We voluntarily do not implement get_range as it yields a regression,
// and we do not have any specialized implementation anyway.
}
/// Wraps an iterator into a `Column`.
struct MonotonicMappingColumnReader<'a, Transform, U> {
col_reader: Box<dyn ColumnReader<U> + 'a>,
monotonic_mapping: &'a Transform,
intermdiary_type: PhantomData<U>,
}
impl<'a, U, V, Transform> ColumnReader<V> for MonotonicMappingColumnReader<'a, Transform, U>
where
U: Copy,
V: Copy,
Transform: Fn(U) -> V,
{
fn seek(&mut self, idx: u64) -> V {
let intermediary_value = self.col_reader.seek(idx);
(*self.monotonic_mapping)(intermediary_value)
}
fn advance(&mut self) -> bool {
self.col_reader.advance()
}
fn get(&self) -> V {
(*self.monotonic_mapping)(self.col_reader.get())
}
}
pub struct IterColumn<T>(T);
impl<T> From<T> for IterColumn<T>
@@ -253,9 +306,9 @@ where T: Iterator + Clone + ExactSizeIterator
impl<T> Column<T::Item> for IterColumn<T>
where
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
T::Item: PartialOrd,
T::Item: PartialOrd + Copy + 'static,
{
fn get_val(&self, idx: u32) -> T::Item {
fn get_val(&self, idx: u64) -> T::Item {
self.0.clone().nth(idx as usize).unwrap()
}
@@ -267,34 +320,27 @@ where
self.0.clone().last().unwrap()
}
fn num_vals(&self) -> u32 {
self.0.len() as u32
}
fn iter(&self) -> Box<dyn Iterator<Item = T::Item> + '_> {
Box::new(self.0.clone())
fn num_vals(&self) -> u64 {
self.0.len() as u64
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::monotonic_mapping::{
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternalBaseval,
StrictlyMonotonicMappingToInternalGCDBaseval,
};
use crate::MonotonicallyMappableToU64;
#[test]
fn test_monotonic_mapping() {
let vals = &[3u64, 5u64][..];
let vals = &[1u64, 3u64][..];
let col = VecColumn::from(vals);
let mapped = monotonic_map_column(col, StrictlyMonotonicMappingToInternalBaseval::new(2));
assert_eq!(mapped.min_value(), 1u64);
assert_eq!(mapped.max_value(), 3u64);
let mapped = monotonic_map_column(col, |el| el + 4);
assert_eq!(mapped.min_value(), 5u64);
assert_eq!(mapped.max_value(), 7u64);
assert_eq!(mapped.num_vals(), 2);
assert_eq!(mapped.num_vals(), 2);
assert_eq!(mapped.get_val(0), 1);
assert_eq!(mapped.get_val(1), 3);
assert_eq!(mapped.get_val(0), 5);
assert_eq!(mapped.get_val(1), 7);
}
#[test]
@@ -306,15 +352,10 @@ mod tests {
#[test]
fn test_monotonic_mapping_iter() {
let vals: Vec<u64> = (10..110u64).map(|el| el * 10).collect();
let vals: Vec<u64> = (-1..99).map(i64::to_u64).collect();
let col = VecColumn::from(&vals);
let mapped = monotonic_map_column(
col,
StrictlyMonotonicMappingInverter::from(
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100),
),
);
let val_i64s: Vec<u64> = mapped.iter().collect();
let mapped = monotonic_map_column(col, |el| i64::from_u64(el) * 10i64);
let val_i64s: Vec<i64> = crate::iter_from_reader(mapped.reader()).collect();
for i in 0..100 {
assert_eq!(val_i64s[i as usize], mapped.get_val(i));
}
@@ -322,26 +363,20 @@ mod tests {
#[test]
fn test_monotonic_mapping_get_range() {
let vals: Vec<u64> = (0..100u64).map(|el| el * 10).collect();
let vals: Vec<u64> = (-1..99).map(i64::to_u64).collect();
let col = VecColumn::from(&vals);
let mapped = monotonic_map_column(
col,
StrictlyMonotonicMappingInverter::from(
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 0),
),
);
assert_eq!(mapped.min_value(), 0u64);
assert_eq!(mapped.max_value(), 9900u64);
let mapped = monotonic_map_column(col, |el| i64::from_u64(el) * 10i64);
assert_eq!(mapped.min_value(), -10i64);
assert_eq!(mapped.max_value(), 980i64);
assert_eq!(mapped.num_vals(), 100);
let val_u64s: Vec<u64> = mapped.iter().collect();
assert_eq!(val_u64s.len(), 100);
let val_i64s: Vec<i64> = crate::iter_from_reader(mapped.reader()).collect();
assert_eq!(val_i64s.len(), 100);
for i in 0..100 {
assert_eq!(val_u64s[i as usize], mapped.get_val(i));
assert_eq!(val_u64s[i as usize], vals[i as usize] * 10);
assert_eq!(val_i64s[i as usize], mapped.get_val(i));
assert_eq!(val_i64s[i as usize], i64::from_u64(vals[i as usize]) * 10);
}
let mut buf = [0u64; 20];
let mut buf = [0i64; 20];
mapped.get_range(7, &mut buf[..]);
assert_eq!(&val_u64s[7..][..20], &buf);
assert_eq!(&val_i64s[7..][..20], &buf);
}
}

View File

@@ -57,7 +57,7 @@ fn num_bits(val: u128) -> u8 {
/// metadata.
pub fn get_compact_space(
values_deduped_sorted: &BTreeSet<u128>,
total_num_values: u32,
total_num_values: u64,
cost_per_blank: usize,
) -> CompactSpace {
let mut compact_space_builder = CompactSpaceBuilder::new();
@@ -208,7 +208,7 @@ impl CompactSpaceBuilder {
};
let covered_range_len = range_mapping.range_length();
ranges_mapping.push(range_mapping);
compact_start += covered_range_len;
compact_start += covered_range_len as u64;
}
// println!("num ranges {}", ranges_mapping.len());
CompactSpace { ranges_mapping }

View File

@@ -14,14 +14,15 @@ use std::{
cmp::Ordering,
collections::BTreeSet,
io::{self, Write},
ops::{Range, RangeInclusive},
ops::RangeInclusive,
};
use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt, VIntU128};
use common::{BinarySerializable, CountingWriter, VInt, VIntU128};
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
use crate::compact_space::build_compact_space::get_compact_space;
use crate::Column;
use crate::{iter_from_reader, Column, ColumnReader};
mod blank_range;
mod build_compact_space;
@@ -96,7 +97,7 @@ impl BinarySerializable for CompactSpace {
};
let range_length = range_mapping.range_length();
ranges_mapping.push(range_mapping);
compact_start += range_length;
compact_start += range_length as u64;
}
Ok(Self { ranges_mapping })
@@ -164,19 +165,22 @@ pub struct IPCodecParams {
bit_unpacker: BitUnpacker,
min_value: u128,
max_value: u128,
num_vals: u32,
num_vals: u64,
num_bits: u8,
}
impl CompactSpaceCompressor {
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
pub fn train_from(iter: impl Iterator<Item = u128>, num_vals: u32) -> Self {
pub fn train_from(column: &impl Column<u128>) -> Self {
let mut values_sorted = BTreeSet::new();
values_sorted.extend(iter);
let total_num_values = num_vals;
let total_num_values = column.num_vals();
values_sorted.extend(iter_from_reader(column.reader()));
let compact_space =
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
let amplitude_compact_space = compact_space.amplitude_compact_space();
assert!(
@@ -199,7 +203,7 @@ impl CompactSpaceCompressor {
bit_unpacker: BitUnpacker::new(num_bits),
min_value,
max_value,
num_vals: total_num_values,
num_vals: total_num_values as u64,
num_bits,
},
}
@@ -217,11 +221,12 @@ impl CompactSpaceCompressor {
pub fn compress_into(
self,
vals: impl Iterator<Item = u128>,
mut vals: Box<dyn ColumnReader<u128> + '_>,
write: &mut impl Write,
) -> io::Result<()> {
let mut bitpacker = BitPacker::default();
for val in vals {
while vals.advance() {
let val = vals.get();
let compact = self
.params
.compact_space
@@ -266,7 +271,7 @@ impl BinarySerializable for IPCodecParams {
let _header_flags = u64::deserialize(reader)?;
let min_value = VIntU128::deserialize(reader)?.0;
let max_value = VIntU128::deserialize(reader)?.0;
let num_vals = VIntU128::deserialize(reader)?.0 as u32;
let num_vals = VIntU128::deserialize(reader)?.0 as u64;
let num_bits = u8::deserialize(reader)?;
let compact_space = CompactSpace::deserialize(reader)?;
@@ -283,7 +288,7 @@ impl BinarySerializable for IPCodecParams {
impl Column<u128> for CompactSpaceDecompressor {
#[inline]
fn get_val(&self, doc: u32) -> u128 {
fn get_val(&self, doc: u64) -> u128 {
self.get(doc)
}
@@ -295,23 +300,16 @@ impl Column<u128> for CompactSpaceDecompressor {
self.max_value()
}
fn num_vals(&self) -> u32 {
fn num_vals(&self) -> u64 {
self.params.num_vals
}
#[inline]
fn iter(&self) -> Box<dyn Iterator<Item = u128> + '_> {
Box::new(self.iter())
fn get_between_vals(&self, range: RangeInclusive<u128>) -> Vec<u64> {
self.get_between_vals(range)
}
#[inline]
fn get_docids_for_value_range(
&self,
value_range: RangeInclusive<u128>,
positions_range: Range<u32>,
positions: &mut Vec<u32>,
) {
self.get_positions_for_value_range(value_range, positions_range, positions)
fn reader(&self) -> Box<dyn ColumnReader<u128> + '_> {
Box::new(self.specialized_reader())
}
}
@@ -346,19 +344,12 @@ impl CompactSpaceDecompressor {
/// Comparing on compact space: Real dataset 1.08 GElements/s
///
/// Comparing on original space: Real dataset .06 GElements/s (not completely optimized)
#[inline]
pub fn get_positions_for_value_range(
&self,
value_range: RangeInclusive<u128>,
position_range: Range<u32>,
positions: &mut Vec<u32>,
) {
if value_range.start() > value_range.end() {
return;
pub fn get_between_vals(&self, range: RangeInclusive<u128>) -> Vec<u64> {
if range.start() > range.end() {
return Vec::new();
}
let position_range = position_range.start..position_range.end.min(self.num_vals());
let from_value = *value_range.start();
let to_value = *value_range.end();
let from_value = *range.start();
let to_value = *range.end();
assert!(to_value >= from_value);
let compact_from = self.u128_to_compact(from_value);
let compact_to = self.u128_to_compact(to_value);
@@ -366,7 +357,7 @@ impl CompactSpaceDecompressor {
// Quick return, if both ranges fall into the same non-mapped space, the range can't cover
// any values, so we can early exit
match (compact_to, compact_from) {
(Err(pos1), Err(pos2)) if pos1 == pos2 => return,
(Err(pos1), Err(pos2)) if pos1 == pos2 => return Vec::new(),
_ => {}
}
@@ -388,20 +379,19 @@ impl CompactSpaceDecompressor {
});
let range = compact_from..=compact_to;
let scan_num_docs = position_range.end - position_range.start;
let mut positions = Vec::new();
let step_size = 4;
let cutoff = position_range.start + scan_num_docs - scan_num_docs % step_size;
let cutoff = self.params.num_vals - self.params.num_vals % step_size;
let mut push_if_in_range = |idx, val| {
if range.contains(&val) {
positions.push(idx);
}
};
let get_val = |idx| self.params.bit_unpacker.get(idx, &self.data);
let get_val = |idx| self.params.bit_unpacker.get(idx as u64, &self.data);
// unrolled loop
for idx in (position_range.start..cutoff).step_by(step_size as usize) {
for idx in (0..cutoff).step_by(step_size as usize) {
let idx1 = idx;
let idx2 = idx + 1;
let idx3 = idx + 2;
@@ -417,26 +407,24 @@ impl CompactSpaceDecompressor {
}
// handle rest
for idx in cutoff..position_range.end {
for idx in cutoff..self.params.num_vals {
push_if_in_range(idx, get_val(idx));
}
positions
}
fn specialized_reader(&self) -> CompactSpaceReader<'_> {
CompactSpaceReader {
data: self.data.as_slice(),
params: &self.params,
idx: 0u64,
len: self.params.num_vals,
}
}
#[inline]
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ {
(0..self.params.num_vals).map(move |idx| self.params.bit_unpacker.get(idx, &self.data))
}
#[inline]
fn iter(&self) -> impl Iterator<Item = u128> + '_ {
// TODO: Performance. It would be better to iterate on the ranges and check existence via
// the bit_unpacker.
self.iter_compact()
.map(|compact| self.compact_to_u128(compact))
}
#[inline]
pub fn get(&self, idx: u32) -> u128 {
pub fn get(&self, idx: u64) -> u128 {
let compact = self.params.bit_unpacker.get(idx, &self.data);
self.compact_to_u128(compact)
}
@@ -450,14 +438,35 @@ impl CompactSpaceDecompressor {
}
}
pub struct CompactSpaceReader<'a> {
data: &'a [u8],
params: &'a IPCodecParams,
idx: u64,
len: u64,
}
impl<'a> ColumnReader<u128> for CompactSpaceReader<'a> {
fn seek(&mut self, target_idx: u64) -> u128 {
self.idx = target_idx;
self.get()
}
fn advance(&mut self) -> bool {
self.idx = self.idx.wrapping_add(1);
self.idx < self.len
}
fn get(&self) -> u128 {
let compact_code = self.params.bit_unpacker.get(self.idx, self.data);
self.params.compact_space.compact_to_u128(compact_code)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::format_version::read_format_version;
use crate::null_index_footer::read_null_index_footer;
use crate::serialize::U128Header;
use crate::{open_u128, serialize_u128};
use crate::{open_u128, serialize_u128, VecColumn};
#[test]
fn compact_space_test() {
@@ -466,7 +475,7 @@ mod tests {
]
.into_iter()
.collect();
let compact_space = get_compact_space(ips, ips.len() as u32, 11);
let compact_space = get_compact_space(ips, ips.len() as u64, 11);
let amplitude = compact_space.amplitude_compact_space();
assert_eq!(amplitude, 17);
assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
@@ -497,30 +506,24 @@ mod tests {
#[test]
fn compact_space_amplitude_test() {
let ips = &[100000u128, 1000000].into_iter().collect();
let compact_space = get_compact_space(ips, ips.len() as u32, 1);
let compact_space = get_compact_space(ips, ips.len() as u64, 1);
let amplitude = compact_space.amplitude_compact_space();
assert_eq!(amplitude, 2);
}
fn test_all(mut data: OwnedBytes, expected: &[u128]) {
let _header = U128Header::deserialize(&mut data);
fn test_all(data: OwnedBytes, expected: &[u128]) {
let decompressor = CompactSpaceDecompressor::open(data).unwrap();
for (idx, expected_val) in expected.iter().cloned().enumerate() {
let val = decompressor.get(idx as u32);
let val = decompressor.get(idx as u64);
assert_eq!(val, expected_val);
let test_range = |range: RangeInclusive<u128>| {
let expected_positions = expected
.iter()
.positions(|val| range.contains(val))
.map(|pos| pos as u32)
.map(|pos| pos as u64)
.collect::<Vec<_>>();
let mut positions = Vec::new();
decompressor.get_positions_for_value_range(
range,
0..decompressor.num_vals(),
&mut positions,
);
let positions = decompressor.get_between_vals(range);
assert_eq!(positions, expected_positions);
};
@@ -533,18 +536,10 @@ mod tests {
fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
let mut out = Vec::new();
serialize_u128(
|| u128_vals.iter().cloned(),
u128_vals.len() as u32,
&mut out,
)
.unwrap();
serialize_u128(VecColumn::from(u128_vals), &mut out).unwrap();
let data = OwnedBytes::new(out);
let (data, _format_version) = read_format_version(data).unwrap();
let (data, _null_index_footer) = read_null_index_footer(data).unwrap();
test_all(data.clone(), u128_vals);
data
}
@@ -561,111 +556,26 @@ mod tests {
4_000_211_222u128,
333u128,
];
let mut data = test_aux_vals(vals);
let _header = U128Header::deserialize(&mut data);
let data = test_aux_vals(vals);
let decomp = CompactSpaceDecompressor::open(data).unwrap();
let complete_range = 0..vals.len() as u32;
for (pos, val) in vals.iter().enumerate() {
let val = *val;
let pos = pos as u32;
let mut positions = Vec::new();
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
assert_eq!(positions, vec![pos]);
}
// handle docid range out of bounds
let positions = get_positions_for_value_range_helper(&decomp, 0..=1, 1..u32::MAX);
assert_eq!(positions, vec![]);
let positions =
get_positions_for_value_range_helper(&decomp, 0..=1, complete_range.clone());
let positions = decomp.get_between_vals(0..=1);
assert_eq!(positions, vec![0]);
let positions =
get_positions_for_value_range_helper(&decomp, 0..=2, complete_range.clone());
let positions = decomp.get_between_vals(0..=2);
assert_eq!(positions, vec![0]);
let positions =
get_positions_for_value_range_helper(&decomp, 0..=3, complete_range.clone());
let positions = decomp.get_between_vals(0..=3);
assert_eq!(positions, vec![0, 2]);
assert_eq!(
get_positions_for_value_range_helper(
&decomp,
99999u128..=99999u128,
complete_range.clone()
),
vec![3]
);
assert_eq!(
get_positions_for_value_range_helper(
&decomp,
99999u128..=100000u128,
complete_range.clone()
),
vec![3, 4]
);
assert_eq!(
get_positions_for_value_range_helper(
&decomp,
99998u128..=100000u128,
complete_range.clone()
),
vec![3, 4]
);
assert_eq!(
get_positions_for_value_range_helper(
&decomp,
99998u128..=99999u128,
complete_range.clone()
),
vec![3]
);
assert_eq!(
get_positions_for_value_range_helper(
&decomp,
99998u128..=99998u128,
complete_range.clone()
),
vec![]
);
assert_eq!(
get_positions_for_value_range_helper(
&decomp,
333u128..=333u128,
complete_range.clone()
),
vec![8]
);
assert_eq!(
get_positions_for_value_range_helper(
&decomp,
332u128..=333u128,
complete_range.clone()
),
vec![8]
);
assert_eq!(
get_positions_for_value_range_helper(
&decomp,
332u128..=334u128,
complete_range.clone()
),
vec![8]
);
assert_eq!(
get_positions_for_value_range_helper(
&decomp,
333u128..=334u128,
complete_range.clone()
),
vec![8]
);
assert_eq!(decomp.get_between_vals(99999u128..=99999u128), vec![3]);
assert_eq!(decomp.get_between_vals(99999u128..=100000u128), vec![3, 4]);
assert_eq!(decomp.get_between_vals(99998u128..=100000u128), vec![3, 4]);
assert_eq!(decomp.get_between_vals(99998u128..=99999u128), vec![3]);
assert_eq!(decomp.get_between_vals(99998u128..=99998u128), vec![]);
assert_eq!(decomp.get_between_vals(333u128..=333u128), vec![8]);
assert_eq!(decomp.get_between_vals(332u128..=333u128), vec![8]);
assert_eq!(decomp.get_between_vals(332u128..=334u128), vec![8]);
assert_eq!(decomp.get_between_vals(333u128..=334u128), vec![8]);
assert_eq!(
get_positions_for_value_range_helper(
&decomp,
4_000_211_221u128..=5_000_000_000u128,
complete_range
),
decomp.get_between_vals(4_000_211_221u128..=5_000_000_000u128),
vec![6, 7]
);
}
@@ -688,32 +598,14 @@ mod tests {
4_000_211_222u128,
333u128,
];
let mut data = test_aux_vals(vals);
let _header = U128Header::deserialize(&mut data);
let data = test_aux_vals(vals);
let decomp = CompactSpaceDecompressor::open(data).unwrap();
let complete_range = 0..vals.len() as u32;
assert_eq!(
get_positions_for_value_range_helper(&decomp, 0..=5, complete_range.clone()),
vec![]
);
assert_eq!(
get_positions_for_value_range_helper(&decomp, 0..=100, complete_range.clone()),
vec![0]
);
assert_eq!(
get_positions_for_value_range_helper(&decomp, 0..=105, complete_range),
vec![0]
);
}
fn get_positions_for_value_range_helper<C: Column<T> + ?Sized, T: PartialOrd>(
column: &C,
value_range: RangeInclusive<T>,
doc_id_range: Range<u32>,
) -> Vec<u32> {
let mut positions = Vec::new();
column.get_docids_for_value_range(value_range, doc_id_range, &mut positions);
positions
let positions = decomp.get_between_vals(0..=5);
assert_eq!(positions, vec![]);
let positions = decomp.get_between_vals(0..=100);
assert_eq!(positions, vec![0]);
let positions = decomp.get_between_vals(0..=105);
assert_eq!(positions, vec![0]);
}
#[test]
@@ -734,29 +626,13 @@ mod tests {
5_000_000_000,
];
let mut out = Vec::new();
serialize_u128(|| vals.iter().cloned(), vals.len() as u32, &mut out).unwrap();
let decomp = open_u128::<u128>(OwnedBytes::new(out)).unwrap();
let complete_range = 0..vals.len() as u32;
serialize_u128(VecColumn::from(vals), &mut out).unwrap();
let decomp = open_u128(OwnedBytes::new(out)).unwrap();
assert_eq!(
get_positions_for_value_range_helper(&*decomp, 199..=200, complete_range.clone()),
vec![0]
);
assert_eq!(
get_positions_for_value_range_helper(&*decomp, 199..=201, complete_range.clone()),
vec![0, 1]
);
assert_eq!(
get_positions_for_value_range_helper(&*decomp, 200..=200, complete_range.clone()),
vec![0]
);
assert_eq!(
get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range),
vec![11]
);
assert_eq!(decomp.get_between_vals(199..=200), vec![0]);
assert_eq!(decomp.get_between_vals(199..=201), vec![0, 1]);
assert_eq!(decomp.get_between_vals(200..=200), vec![0]);
assert_eq!(decomp.get_between_vals(1_000_000..=1_000_000), vec![11]);
}
#[test]

View File

@@ -1,38 +0,0 @@
use std::io;
use common::{BinarySerializable, OwnedBytes};
const MAGIC_NUMBER: u16 = 4335u16;
const FASTFIELD_FORMAT_VERSION: u8 = 1;
pub(crate) fn append_format_version(output: &mut impl io::Write) -> io::Result<()> {
FASTFIELD_FORMAT_VERSION.serialize(output)?;
MAGIC_NUMBER.serialize(output)?;
Ok(())
}
pub(crate) fn read_format_version(data: OwnedBytes) -> io::Result<(OwnedBytes, u8)> {
let (data, magic_number_bytes) = data.rsplit(2);
let magic_number = u16::deserialize(&mut magic_number_bytes.as_slice())?;
if magic_number != MAGIC_NUMBER {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("magic number mismatch {} != {}", magic_number, MAGIC_NUMBER),
));
}
let (data, format_version_bytes) = data.rsplit(1);
let format_version = u8::deserialize(&mut format_version_bytes.as_slice())?;
if format_version > FASTFIELD_FORMAT_VERSION {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Unsupported fastfield format version: {}. Max supported version: {}",
format_version, FASTFIELD_FORMAT_VERSION
),
));
}
Ok((data, format_version))
}

View File

@@ -45,7 +45,7 @@ mod tests {
use std::io;
use std::num::NonZeroU64;
use common::OwnedBytes;
use ownedbytes::OwnedBytes;
use crate::gcd::{compute_gcd, find_gcd};
use crate::{FastFieldCodecType, VecColumn};

View File

@@ -1,12 +1,5 @@
#![warn(missing_docs)]
#![cfg_attr(all(feature = "unstable", test), feature(test))]
//! # `fastfield_codecs`
//!
//! - Columnar storage of data for tantivy [`Column`].
//! - Encode data in different codecs.
//! - Monotonically map values to u64/u128
#[cfg(test)]
#[macro_use]
extern crate more_asserts;
@@ -18,54 +11,36 @@ use std::io;
use std::io::Write;
use std::sync::Arc;
use common::{BinarySerializable, OwnedBytes};
use common::BinarySerializable;
use compact_space::CompactSpaceDecompressor;
use format_version::read_format_version;
use monotonic_mapping::{
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
StrictlyMonotonicMappingToInternalBaseval, StrictlyMonotonicMappingToInternalGCDBaseval,
};
use null_index_footer::read_null_index_footer;
use serialize::{Header, U128Header};
use ownedbytes::OwnedBytes;
use serialize::Header;
mod bitpacked;
mod blockwise_linear;
mod compact_space;
mod format_version;
mod line;
mod linear;
mod monotonic_mapping;
mod monotonic_mapping_u128;
#[allow(dead_code)]
mod null_index;
mod null_index_footer;
mod column;
mod gcd;
pub mod serialize;
mod serialize;
use self::bitpacked::BitpackedCodec;
use self::blockwise_linear::BlockwiseLinearCodec;
pub use self::column::{monotonic_map_column, Column, IterColumn, VecColumn};
pub use self::column::{iter_from_reader, monotonic_map_column, Column, ColumnReader, VecColumn};
use self::linear::LinearCodec;
pub use self::monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
pub use self::monotonic_mapping_u128::MonotonicallyMappableToU128;
pub use self::monotonic_mapping::MonotonicallyMappableToU64;
pub use self::serialize::{
estimate, serialize, serialize_and_load, serialize_u128, NormalizedHeader,
};
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
#[repr(u8)]
/// Available codecs to use to encode the u64 (via [`MonotonicallyMappableToU64`]) converted data.
pub enum FastFieldCodecType {
/// Bitpack all values in the value range. The number of bits is defined by the amplitude
/// `column.max_value() - column.min_value()`
Bitpacked = 1,
/// Linear interpolation puts a line between the first and last value and then bitpacks the
/// values by the offset from the line. The number of bits is defined by the max deviation from
/// the line.
Linear = 2,
/// Same as [`FastFieldCodecType::Linear`], but encodes in blocks of 512 elements.
BlockwiseLinear = 3,
}
@@ -83,11 +58,11 @@ impl BinarySerializable for FastFieldCodecType {
}
impl FastFieldCodecType {
pub(crate) fn to_code(self) -> u8 {
pub fn to_code(self) -> u8 {
self as u8
}
pub(crate) fn from_code(code: u8) -> Option<Self> {
pub fn from_code(code: u8) -> Option<Self> {
match code {
1 => Some(Self::Bitpacked),
2 => Some(Self::Linear),
@@ -97,59 +72,15 @@ impl FastFieldCodecType {
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
#[repr(u8)]
/// Available codecs to use to encode the u128 (via [`MonotonicallyMappableToU128`]) converted data.
pub enum U128FastFieldCodecType {
/// This codec takes a large number space (u128) and reduces it to a compact number space, by
/// removing the holes.
CompactSpace = 1,
}
impl BinarySerializable for U128FastFieldCodecType {
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
self.to_code().serialize(wrt)
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let code = u8::deserialize(reader)?;
let codec_type: Self = Self::from_code(code)
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
Ok(codec_type)
}
}
impl U128FastFieldCodecType {
pub(crate) fn to_code(self) -> u8 {
self as u8
}
pub(crate) fn from_code(code: u8) -> Option<Self> {
match code {
1 => Some(Self::CompactSpace),
_ => None,
}
}
/// Returns the correct codec reader wrapped in the `Arc` for the data.
pub fn open_u128(bytes: OwnedBytes) -> io::Result<Arc<dyn Column<u128>>> {
Ok(Arc::new(CompactSpaceDecompressor::open(bytes)?))
}
/// Returns the correct codec reader wrapped in the `Arc` for the data.
pub fn open_u128<Item: MonotonicallyMappableToU128>(
bytes: OwnedBytes,
) -> io::Result<Arc<dyn Column<Item>>> {
let (bytes, _format_version) = read_format_version(bytes)?;
let (mut bytes, _null_index_footer) = read_null_index_footer(bytes)?;
let header = U128Header::deserialize(&mut bytes)?;
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
let reader = CompactSpaceDecompressor::open(bytes)?;
let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<Item>> =
StrictlyMonotonicMappingToInternal::<Item>::new().into();
Ok(Arc::new(monotonic_map_column(reader, inverted)))
}
/// Returns the correct codec reader wrapped in the `Arc` for the data.
pub fn open<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::Result<Arc<dyn Column<T>>> {
let (bytes, _format_version) = read_format_version(bytes)?;
let (mut bytes, _null_index_footer) = read_null_index_footer(bytes)?;
pub fn open<T: MonotonicallyMappableToU64>(
mut bytes: OwnedBytes,
) -> io::Result<Arc<dyn Column<T>>> {
let header = Header::deserialize(&mut bytes)?;
match header.codec_type {
FastFieldCodecType::Bitpacked => open_specific_codec::<BitpackedCodec, _>(bytes, &header),
@@ -168,15 +99,11 @@ fn open_specific_codec<C: FastFieldCodec, Item: MonotonicallyMappableToU64>(
let reader = C::open_from_bytes(bytes, normalized_header)?;
let min_value = header.min_value;
if let Some(gcd) = header.gcd {
let mapping = StrictlyMonotonicMappingInverter::from(
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd.get(), min_value),
);
Ok(Arc::new(monotonic_map_column(reader, mapping)))
let monotonic_mapping = move |val: u64| Item::from_u64(min_value + val * gcd.get());
Ok(Arc::new(monotonic_map_column(reader, monotonic_mapping)))
} else {
let mapping = StrictlyMonotonicMappingInverter::from(
StrictlyMonotonicMappingToInternalBaseval::new(min_value),
);
Ok(Arc::new(monotonic_map_column(reader, mapping)))
let monotonic_mapping = move |val: u64| Item::from_u64(min_value + val);
Ok(Arc::new(monotonic_map_column(reader, monotonic_mapping)))
}
}
@@ -196,7 +123,7 @@ trait FastFieldCodec: 'static {
///
/// The column iterator should be preferred over using column `get_val` method for
/// performance reasons.
fn serialize(column: &dyn Column, write: &mut impl Write) -> io::Result<()>;
fn serialize(column: &dyn Column<u64>, write: &mut impl Write) -> io::Result<()>;
/// Returns an estimate of the compression ratio.
/// If the codec is not applicable, returns `None`.
@@ -205,10 +132,9 @@ trait FastFieldCodec: 'static {
///
/// It could make sense to also return a value representing
/// computational complexity.
fn estimate(column: &dyn Column) -> Option<f32>;
fn estimate(column: &impl Column) -> Option<f32>;
}
/// The list of all available codecs for u64 convertible data.
pub const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
FastFieldCodecType::Bitpacked,
FastFieldCodecType::BlockwiseLinear,
@@ -217,7 +143,6 @@ pub const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
#[cfg(test)]
mod tests {
use proptest::prelude::*;
use proptest::strategy::Strategy;
use proptest::{prop_oneof, proptest};
@@ -243,32 +168,15 @@ mod tests {
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
let reader = crate::open::<u64>(OwnedBytes::new(out)).unwrap();
assert_eq!(reader.num_vals(), data.len() as u32);
assert_eq!(reader.num_vals(), data.len() as u64);
for (doc, orig_val) in data.iter().copied().enumerate() {
let val = reader.get_val(doc as u32);
let val = reader.get_val(doc as u64);
assert_eq!(
val, orig_val,
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data \
`{data:?}`",
);
}
if !data.is_empty() {
let test_rand_idx = rand::thread_rng().gen_range(0..=data.len() - 1);
let expected_positions: Vec<u32> = data
.iter()
.enumerate()
.filter(|(_, el)| **el == data[test_rand_idx])
.map(|(pos, _)| pos as u32)
.collect();
let mut positions = Vec::new();
reader.get_docids_for_value_range(
data[test_rand_idx]..=data[test_rand_idx],
0..data.len() as u32,
&mut positions,
);
assert_eq!(expected_positions, positions);
}
Some((estimation, actual_compression))
}
@@ -404,7 +312,7 @@ mod tests {
#[test]
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
let mut data: Vec<u64> = (201..=20000_u64).collect();
let mut data: Vec<u64> = (200..=20000_u64).collect();
data.push(1_000_000);
let data: VecColumn = data.as_slice().into();
@@ -435,7 +343,7 @@ mod tests {
mod bench {
use std::sync::Arc;
use common::OwnedBytes;
use ownedbytes::OwnedBytes;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use test::{self, Bencher};
@@ -478,7 +386,7 @@ mod bench {
b.iter(|| {
let mut sum = 0u64;
for pos in value_iter() {
let val = col.get_val(pos as u32);
let val = col.get_val(pos as u64);
sum = sum.wrapping_add(val);
}
sum
@@ -490,7 +398,7 @@ mod bench {
b.iter(|| {
let mut sum = 0u64;
for pos in value_iter() {
let val = col.get_val(pos as u32);
let val = col.get_val(pos as u64);
sum = sum.wrapping_add(val);
}
sum

View File

@@ -1,5 +1,5 @@
use std::io;
use std::num::NonZeroU32;
use std::num::NonZeroU64;
use common::{BinarySerializable, VInt};
@@ -29,7 +29,7 @@ pub struct Line {
/// compute_slope(y0, y1)
/// = compute_slope(y0 + X % 2^64, y1 + X % 2^64)
/// `
fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU32) -> u64 {
fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU64) -> u64 {
let dy = y1.wrapping_sub(y0);
let sign = dy <= (1 << 63);
let abs_dy = if sign {
@@ -43,7 +43,7 @@ fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU32) -> u64 {
return 0u64;
}
let abs_slope = (abs_dy << 32) / num_vals.get() as u64;
let abs_slope = (abs_dy << 32) / num_vals.get();
if sign {
abs_slope
} else {
@@ -62,43 +62,30 @@ fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU32) -> u64 {
impl Line {
#[inline(always)]
pub fn eval(&self, x: u32) -> u64 {
let linear_part = ((x as u64).wrapping_mul(self.slope) >> 32) as i32 as u64;
pub fn eval(&self, x: u64) -> u64 {
let linear_part = (x.wrapping_mul(self.slope) >> 32) as i32 as u64;
self.intercept.wrapping_add(linear_part)
}
// Same as train, but the intercept is only estimated from provided sample positions
pub fn estimate(sample_positions_and_values: &[(u64, u64)]) -> Self {
let first_val = sample_positions_and_values[0].1;
let last_val = sample_positions_and_values[sample_positions_and_values.len() - 1].1;
let num_vals = sample_positions_and_values[sample_positions_and_values.len() - 1].0 + 1;
Self::train_from(
first_val,
last_val,
num_vals as u32,
sample_positions_and_values.iter().cloned(),
)
pub fn estimate(ys: &dyn Column, sample_positions: &[u64]) -> Self {
Self::train_from(ys, sample_positions.iter().cloned())
}
// Intercept is only computed from provided positions
fn train_from(
first_val: u64,
last_val: u64,
num_vals: u32,
positions_and_values: impl Iterator<Item = (u64, u64)>,
) -> Self {
// TODO replace with let else
let idx_last_val = if let Some(idx_last_val) = NonZeroU32::new(num_vals - 1) {
idx_last_val
fn train_from(ys: &dyn Column, positions: impl Iterator<Item = u64>) -> Self {
let last_idx = if let Some(last_idx) = NonZeroU64::new(ys.num_vals() - 1) {
last_idx
} else {
return Line::default();
};
let y0 = first_val;
let y1 = last_val;
let mut ys_reader = ys.reader();
let y0 = ys_reader.seek(0);
let y1 = ys_reader.seek(last_idx.get());
// We first independently pick our slope.
let slope = compute_slope(y0, y1, idx_last_val);
let slope = compute_slope(y0, y1, last_idx);
// We picked our slope. Note that it does not have to be perfect.
// Now we need to compute the best intercept.
@@ -128,8 +115,12 @@ impl Line {
intercept: 0,
};
let heuristic_shift = y0.wrapping_sub(MID_POINT);
line.intercept = positions_and_values
.map(|(pos, y)| y.wrapping_sub(line.eval(pos as u32)))
let mut ys_reader = ys.reader();
line.intercept = positions
.map(|pos| {
let y = ys_reader.seek(pos);
y.wrapping_sub(line.eval(pos))
})
.min_by_key(|&val| val.wrapping_sub(heuristic_shift))
.unwrap_or(0u64); //< Never happens.
line
@@ -146,14 +137,7 @@ impl Line {
/// This function is only invariable by translation if all of the
/// `ys` are packaged into half of the space. (See heuristic below)
pub fn train(ys: &dyn Column) -> Self {
let first_val = ys.iter().next().unwrap();
let last_val = ys.iter().nth(ys.num_vals() as usize - 1).unwrap();
Self::train_from(
first_val,
last_val,
ys.num_vals(),
ys.iter().enumerate().map(|(pos, val)| (pos as u64, val)),
)
Self::train_from(ys, 0..ys.num_vals())
}
}
@@ -199,7 +183,7 @@ mod tests {
let line = Line::train(&VecColumn::from(&ys));
ys.iter()
.enumerate()
.map(|(x, y)| y.wrapping_sub(line.eval(x as u32)))
.map(|(x, y)| y.wrapping_sub(line.eval(x as u64)))
.max()
}

View File

@@ -1,6 +1,7 @@
use std::io::{self, Write};
use common::{BinarySerializable, OwnedBytes};
use common::BinarySerializable;
use ownedbytes::OwnedBytes;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
use crate::line::Line;
@@ -18,25 +19,25 @@ pub struct LinearReader {
impl Column for LinearReader {
#[inline]
fn get_val(&self, doc: u32) -> u64 {
fn get_val(&self, doc: u64) -> u64 {
let interpoled_val: u64 = self.linear_params.line.eval(doc);
let bitpacked_diff = self.linear_params.bit_unpacker.get(doc, &self.data);
interpoled_val.wrapping_add(bitpacked_diff)
}
#[inline(always)]
#[inline]
fn min_value(&self) -> u64 {
// The LinearReader assumes a normalized vector.
0u64
}
#[inline(always)]
#[inline]
fn max_value(&self) -> u64 {
self.header.max_value
}
#[inline]
fn num_vals(&self) -> u32 {
fn num_vals(&self) -> u64 {
self.header.num_vals
}
}
@@ -88,11 +89,10 @@ impl FastFieldCodec for LinearCodec {
assert_eq!(column.min_value(), 0);
let line = Line::train(column);
let max_offset_from_line = column
.iter()
let max_offset_from_line = crate::iter_from_reader(column.reader())
.enumerate()
.map(|(pos, actual_value)| {
let calculated_value = line.eval(pos as u32);
let calculated_value = line.eval(pos as u64);
actual_value.wrapping_sub(calculated_value)
})
.max()
@@ -106,8 +106,13 @@ impl FastFieldCodec for LinearCodec {
linear_params.serialize(write)?;
let mut bit_packer = BitPacker::new();
for (pos, actual_value) in column.iter().enumerate() {
let calculated_value = line.eval(pos as u32);
let mut col_reader = column.reader();
for pos in 0.. {
if !col_reader.advance() {
break;
}
let actual_value = col_reader.get();
let calculated_value = line.eval(pos as u64);
let offset = actual_value.wrapping_sub(calculated_value);
bit_packer.write(offset, num_bits, write)?;
}
@@ -120,26 +125,25 @@ impl FastFieldCodec for LinearCodec {
/// where the local maxima for the deviation of the calculated value are and
/// the offset to shift all values to >=0 is also unknown.
#[allow(clippy::question_mark)]
fn estimate(column: &dyn Column) -> Option<f32> {
fn estimate(column: &impl Column) -> Option<f32> {
if column.num_vals() < 3 {
return None; // disable compressor for this case
}
let limit_num_vals = column.num_vals().min(100_000);
// let's sample at 0%, 5%, 10% .. 95%, 100%
let num_vals = column.num_vals() as f32 / 100.0;
let sample_positions = (0..20)
.map(|pos| (num_vals * pos as f32 * 5.0) as u64)
.collect::<Vec<_>>();
let num_samples = 100;
let step_size = (limit_num_vals / num_samples).max(1); // 20 samples
let mut sample_positions_and_values: Vec<_> = Vec::new();
for (pos, val) in column.iter().enumerate().step_by(step_size as usize) {
sample_positions_and_values.push((pos as u64, val));
}
let line = Line::estimate(column, &sample_positions);
let line = Line::estimate(&sample_positions_and_values);
let estimated_bit_width = sample_positions_and_values
let mut column_reader = column.reader();
let estimated_bit_width = sample_positions
.into_iter()
.map(|(pos, actual_value)| {
let interpolated_val = line.eval(pos as u32);
.map(|pos| {
let actual_value = column_reader.seek(pos);
let interpolated_val = line.eval(pos as u64);
actual_value.wrapping_sub(interpolated_val)
})
.map(|diff| ((diff as f32 * 1.5) * 2.0) as u64)
@@ -147,7 +151,6 @@ impl FastFieldCodec for LinearCodec {
.max()
.unwrap_or(0);
// Extrapolate to whole column
let num_bits = (estimated_bit_width as u64 * column.num_vals() as u64) + 64;
let num_bits_uncompressed = 64 * column.num_vals();
Some(num_bits as f32 / num_bits_uncompressed as f32)

View File

@@ -6,10 +6,10 @@ use std::io::BufRead;
use std::net::{IpAddr, Ipv6Addr};
use std::str::FromStr;
use common::OwnedBytes;
use fastfield_codecs::{open_u128, serialize_u128, Column, FastFieldCodecType, VecColumn};
use itertools::Itertools;
use measure_time::print_time;
use ownedbytes::OwnedBytes;
use prettytable::{Cell, Row, Table};
fn print_set_stats(ip_addrs: &[u128]) {
@@ -90,7 +90,7 @@ fn bench_ip() {
{
let mut data = vec![];
for dataset in dataset.chunks(500_000) {
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
serialize_u128(VecColumn::from(dataset), &mut data).unwrap();
}
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
println!("Compression 50_000 chunks {:.4}", compression);
@@ -101,10 +101,7 @@ fn bench_ip() {
}
let mut data = vec![];
{
print_time!("creation");
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u32, &mut data).unwrap();
}
serialize_u128(VecColumn::from(&dataset), &mut data).unwrap();
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
println!("Compression {:.2}", compression);
@@ -113,17 +110,11 @@ fn bench_ip() {
(data.len() * 8) as f32 / dataset.len() as f32
);
let decompressor = open_u128::<u128>(OwnedBytes::new(data)).unwrap();
let decompressor = open_u128(OwnedBytes::new(data)).unwrap();
// Sample some ranges
let mut doc_values = Vec::new();
for value in dataset.iter().take(1110).skip(1100).cloned() {
doc_values.clear();
print_time!("get range");
decompressor.get_docids_for_value_range(
value..=value,
0..decompressor.num_vals(),
&mut doc_values,
);
let doc_values = decompressor.get_between_vals(value..=value);
println!("{:?}", doc_values.len());
}
}

View File

@@ -1,11 +1,3 @@
use std::marker::PhantomData;
use fastdivide::DividerU64;
use crate::MonotonicallyMappableToU128;
/// Monotonic maps a value to u64 value space.
/// Monotonic mapping enables `PartialOrd` on u64 space without conversion to original space.
pub trait MonotonicallyMappableToU64: 'static + PartialOrd + Copy + Send + Sync {
/// Converts a value to u64.
///
@@ -19,163 +11,11 @@ pub trait MonotonicallyMappableToU64: 'static + PartialOrd + Copy + Send + Sync
fn from_u64(val: u64) -> Self;
}
/// Values need to be strictly monotonic mapped to a `Internal` value (u64 or u128) that can be
/// used in fast field codecs.
///
/// The monotonic mapping is required so that `PartialOrd` can be used on `Internal` without
/// converting to `External`.
///
/// All strictly monotonic functions are invertible because they are guaranteed to have a one-to-one
/// mapping from their range to their domain. The `inverse` method is required when opening a codec,
/// so a value can be converted back to its original domain (e.g. ip address or f64) from its
/// internal representation.
pub trait StrictlyMonotonicFn<External, Internal> {
/// Strictly monotonically maps the value from External to Internal.
fn mapping(&self, inp: External) -> Internal;
/// Inverse of `mapping`. Maps the value from Internal to External.
fn inverse(&self, out: Internal) -> External;
}
/// Inverts a strictly monotonic mapping from `StrictlyMonotonicFn<A, B>` to
/// `StrictlyMonotonicFn<B, A>`.
///
/// # Warning
///
/// This type comes with a footgun. A type being strictly monotonic does not impose that the inverse
/// mapping is strictly monotonic over the entire space External. e.g. a -> a * 2. Use at your own
/// risks.
pub(crate) struct StrictlyMonotonicMappingInverter<T> {
orig_mapping: T,
}
impl<T> From<T> for StrictlyMonotonicMappingInverter<T> {
fn from(orig_mapping: T) -> Self {
Self { orig_mapping }
}
}
impl<From, To, T> StrictlyMonotonicFn<To, From> for StrictlyMonotonicMappingInverter<T>
where T: StrictlyMonotonicFn<From, To>
{
#[inline(always)]
fn mapping(&self, val: To) -> From {
self.orig_mapping.inverse(val)
}
#[inline(always)]
fn inverse(&self, val: From) -> To {
self.orig_mapping.mapping(val)
}
}
/// Applies the strictly monotonic mapping from `T` without any additional changes.
pub(crate) struct StrictlyMonotonicMappingToInternal<T> {
_phantom: PhantomData<T>,
}
impl<T> StrictlyMonotonicMappingToInternal<T> {
pub(crate) fn new() -> StrictlyMonotonicMappingToInternal<T> {
Self {
_phantom: PhantomData,
}
}
}
impl<External: MonotonicallyMappableToU128, T: MonotonicallyMappableToU128>
StrictlyMonotonicFn<External, u128> for StrictlyMonotonicMappingToInternal<T>
where T: MonotonicallyMappableToU128
{
#[inline(always)]
fn mapping(&self, inp: External) -> u128 {
External::to_u128(inp)
}
#[inline(always)]
fn inverse(&self, out: u128) -> External {
External::from_u128(out)
}
}
impl<External: MonotonicallyMappableToU64, T: MonotonicallyMappableToU64>
StrictlyMonotonicFn<External, u64> for StrictlyMonotonicMappingToInternal<T>
where T: MonotonicallyMappableToU64
{
#[inline(always)]
fn mapping(&self, inp: External) -> u64 {
External::to_u64(inp)
}
#[inline(always)]
fn inverse(&self, out: u64) -> External {
External::from_u64(out)
}
}
/// Mapping dividing by gcd and a base value.
///
/// The function is assumed to be only called on values divided by passed
/// gcd value. (It is necessary for the function to be monotonic.)
pub(crate) struct StrictlyMonotonicMappingToInternalGCDBaseval {
gcd_divider: DividerU64,
gcd: u64,
min_value: u64,
}
impl StrictlyMonotonicMappingToInternalGCDBaseval {
pub(crate) fn new(gcd: u64, min_value: u64) -> Self {
let gcd_divider = DividerU64::divide_by(gcd);
Self {
gcd_divider,
gcd,
min_value,
}
}
}
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
for StrictlyMonotonicMappingToInternalGCDBaseval
{
#[inline(always)]
fn mapping(&self, inp: External) -> u64 {
self.gcd_divider
.divide(External::to_u64(inp) - self.min_value)
}
#[inline(always)]
fn inverse(&self, out: u64) -> External {
External::from_u64(self.min_value + out * self.gcd)
}
}
/// Strictly monotonic mapping with a base value.
pub(crate) struct StrictlyMonotonicMappingToInternalBaseval {
min_value: u64,
}
impl StrictlyMonotonicMappingToInternalBaseval {
#[inline(always)]
pub(crate) fn new(min_value: u64) -> Self {
Self { min_value }
}
}
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
for StrictlyMonotonicMappingToInternalBaseval
{
#[inline(always)]
fn mapping(&self, val: External) -> u64 {
External::to_u64(val) - self.min_value
}
#[inline(always)]
fn inverse(&self, val: u64) -> External {
External::from_u64(self.min_value + val)
}
}
impl MonotonicallyMappableToU64 for u64 {
#[inline(always)]
fn to_u64(self) -> u64 {
self
}
#[inline(always)]
fn from_u64(val: u64) -> Self {
val
}
@@ -196,7 +36,11 @@ impl MonotonicallyMappableToU64 for i64 {
impl MonotonicallyMappableToU64 for bool {
#[inline(always)]
fn to_u64(self) -> u64 {
u64::from(self)
if self {
1
} else {
0
}
}
#[inline(always)]
@@ -205,46 +49,12 @@ impl MonotonicallyMappableToU64 for bool {
}
}
// TODO remove me.
// Tantivy should refuse NaN values and work with NotNaN internally.
impl MonotonicallyMappableToU64 for f64 {
#[inline(always)]
fn to_u64(self) -> u64 {
common::f64_to_u64(self)
}
#[inline(always)]
fn from_u64(val: u64) -> Self {
common::u64_to_f64(val)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn strictly_monotonic_test() {
// identity mapping
test_round_trip(&StrictlyMonotonicMappingToInternal::<u64>::new(), 100u64);
// round trip to i64
test_round_trip(&StrictlyMonotonicMappingToInternal::<i64>::new(), 100u64);
// identity mapping
test_round_trip(&StrictlyMonotonicMappingToInternal::<u128>::new(), 100u128);
// base value to i64 round trip
let mapping = StrictlyMonotonicMappingToInternalBaseval::new(100);
test_round_trip::<_, _, u64>(&mapping, 100i64);
// base value and gcd to u64 round trip
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100);
test_round_trip::<_, _, u64>(&mapping, 100u64);
}
fn test_round_trip<T: StrictlyMonotonicFn<K, L>, K: std::fmt::Debug + Eq + Copy, L>(
mapping: &T,
test_val: K,
) {
assert_eq!(mapping.inverse(mapping.mapping(test_val)), test_val);
}
}

View File

@@ -1,40 +0,0 @@
use std::net::Ipv6Addr;
/// Montonic maps a value to u128 value space
/// Monotonic mapping enables `PartialOrd` on u128 space without conversion to original space.
pub trait MonotonicallyMappableToU128: 'static + PartialOrd + Copy + Send + Sync {
/// Converts a value to u128.
///
/// Internally all fast field values are encoded as u64.
fn to_u128(self) -> u128;
/// Converts a value from u128
///
/// Internally all fast field values are encoded as u64.
/// **Note: To be used for converting encoded Term, Posting values.**
fn from_u128(val: u128) -> Self;
}
impl MonotonicallyMappableToU128 for u128 {
fn to_u128(self) -> u128 {
self
}
fn from_u128(val: u128) -> Self {
val
}
}
impl MonotonicallyMappableToU128 for Ipv6Addr {
fn to_u128(self) -> u128 {
ip_to_u128(self)
}
fn from_u128(val: u128) -> Self {
Ipv6Addr::from(val.to_be_bytes())
}
}
fn ip_to_u128(ip_addr: Ipv6Addr) -> u128 {
u128::from_be_bytes(ip_addr.octets())
}

View File

@@ -1,453 +0,0 @@
use std::convert::TryInto;
use std::io::{self, Write};
use common::{BinarySerializable, OwnedBytes};
use itertools::Itertools;
use super::{get_bit_at, set_bit_at};
/// For the `DenseCodec`, `data` which contains the encoded blocks.
/// Each block consists of [u8; 12]. The first 8 bytes is a bitvec for 64 elements.
/// The last 4 bytes are the offset, the number of set bits so far.
///
/// When translating the original index to a dense index, the correct block can be computed
/// directly `orig_idx/64`. Inside the block the position is `orig_idx%64`.
///
/// When translating a dense index to the original index, we can use the offset to find the correct
/// block. Direct computation is not possible, but we can employ a linear or binary search.
#[derive(Clone)]
pub struct DenseCodec {
// data consists of blocks of 64 bits.
//
// The format is &[(u64, u32)]
// u64 is the bitvec
// u32 is the offset of the block, the number of set bits so far.
//
// At the end one block is appended, to store the number of values in the index in offset.
data: OwnedBytes,
}
const ELEMENTS_PER_BLOCK: u32 = 64;
const BLOCK_BITVEC_SIZE: usize = 8;
const BLOCK_OFFSET_SIZE: usize = 4;
const SERIALIZED_BLOCK_SIZE: usize = BLOCK_BITVEC_SIZE + BLOCK_OFFSET_SIZE;
#[inline]
fn count_ones(bitvec: u64, pos_in_bitvec: u32) -> u32 {
if pos_in_bitvec == 63 {
bitvec.count_ones()
} else {
let mask = (1u64 << (pos_in_bitvec + 1)) - 1;
let masked_bitvec = bitvec & mask;
masked_bitvec.count_ones()
}
}
#[derive(Clone, Copy)]
struct DenseIndexBlock {
bitvec: u64,
offset: u32,
}
impl From<[u8; SERIALIZED_BLOCK_SIZE]> for DenseIndexBlock {
fn from(data: [u8; SERIALIZED_BLOCK_SIZE]) -> Self {
let bitvec = u64::from_le_bytes(data[..BLOCK_BITVEC_SIZE].try_into().unwrap());
let offset = u32::from_le_bytes(data[BLOCK_BITVEC_SIZE..].try_into().unwrap());
Self { bitvec, offset }
}
}
impl DenseCodec {
/// Open the DenseCodec from OwnedBytes
pub fn open(data: OwnedBytes) -> Self {
Self { data }
}
#[inline]
/// Check if value at position is not null.
pub fn exists(&self, idx: u32) -> bool {
let block_pos = idx / ELEMENTS_PER_BLOCK;
let bitvec = self.dense_index_block(block_pos).bitvec;
let pos_in_bitvec = idx % ELEMENTS_PER_BLOCK;
get_bit_at(bitvec, pos_in_bitvec)
}
#[inline]
fn dense_index_block(&self, block_pos: u32) -> DenseIndexBlock {
dense_index_block(&self.data, block_pos)
}
/// Return the number of non-null values in an index
pub fn num_non_nulls(&self) -> u32 {
let last_block = (self.data.len() / SERIALIZED_BLOCK_SIZE) - 1;
self.dense_index_block(last_block as u32).offset
}
#[inline]
/// Translate from the original index to the codec index.
pub fn translate_to_codec_idx(&self, idx: u32) -> Option<u32> {
let block_pos = idx / ELEMENTS_PER_BLOCK;
let index_block = self.dense_index_block(block_pos);
let pos_in_block_bit_vec = idx % ELEMENTS_PER_BLOCK;
let ones_in_block = count_ones(index_block.bitvec, pos_in_block_bit_vec);
if get_bit_at(index_block.bitvec, pos_in_block_bit_vec) {
// -1 is ok, since idx does exist, so there's at least one
Some(index_block.offset + ones_in_block - 1)
} else {
None
}
}
/// Translate positions from the codec index to the original index.
///
/// # Panics
///
/// May panic if any `idx` is greater than the max codec index.
pub fn translate_codec_idx_to_original_idx<'a>(
&'a self,
iter: impl Iterator<Item = u32> + 'a,
) -> impl Iterator<Item = u32> + 'a {
let mut block_pos = 0u32;
iter.map(move |dense_idx| {
// update block_pos to limit search scope
block_pos = find_block(dense_idx, block_pos, &self.data);
let index_block = self.dense_index_block(block_pos);
// The next offset is higher than dense_idx and therefore:
// dense_idx <= offset + num_set_bits in block
let mut num_set_bits = 0;
for idx_in_bitvec in 0..ELEMENTS_PER_BLOCK {
if get_bit_at(index_block.bitvec, idx_in_bitvec) {
num_set_bits += 1;
}
if num_set_bits == (dense_idx - index_block.offset + 1) {
let orig_idx = block_pos * ELEMENTS_PER_BLOCK + idx_in_bitvec;
return orig_idx;
}
}
panic!("Internal Error: Offset calculation in dense idx seems to be wrong.");
})
}
}
#[inline]
fn dense_index_block(data: &[u8], block_pos: u32) -> DenseIndexBlock {
let data_start_pos = block_pos as usize * SERIALIZED_BLOCK_SIZE;
let block_data: [u8; SERIALIZED_BLOCK_SIZE] = data[data_start_pos..][..SERIALIZED_BLOCK_SIZE]
.try_into()
.unwrap();
block_data.into()
}
#[inline]
/// Finds the block position containing the dense_idx.
///
/// # Correctness
/// dense_idx needs to be smaller than the number of values in the index
///
/// The last offset number is equal to the number of values in the index.
fn find_block(dense_idx: u32, mut block_pos: u32, data: &[u8]) -> u32 {
loop {
let offset = dense_index_block(data, block_pos).offset;
if offset > dense_idx {
return block_pos - 1;
}
block_pos += 1;
}
}
/// Iterator over all values, true if set, otherwise false
pub fn serialize_dense_codec(
iter: impl Iterator<Item = bool>,
mut out: impl Write,
) -> io::Result<()> {
let mut offset: u32 = 0;
for chunk in &iter.chunks(ELEMENTS_PER_BLOCK as usize) {
let mut block: u64 = 0;
for (pos, is_bit_set) in chunk.enumerate() {
if is_bit_set {
set_bit_at(&mut block, pos as u64);
}
}
block.serialize(&mut out)?;
offset.serialize(&mut out)?;
offset += block.count_ones();
}
// Add sentinal block for the offset
let block: u64 = 0;
block.serialize(&mut out)?;
offset.serialize(&mut out)?;
Ok(())
}
#[cfg(test)]
mod tests {
use proptest::prelude::{any, prop, *};
use proptest::strategy::Strategy;
use proptest::{prop_oneof, proptest};
use super::*;
fn random_bitvec() -> BoxedStrategy<Vec<bool>> {
prop_oneof![
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..100),
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..64),
1 => prop::collection::vec(proptest::bool::weighted(0.0), 0..100),
1 => prop::collection::vec(proptest::bool::weighted(0.0), 0..64),
8 => vec![any::<bool>()],
2 => prop::collection::vec(any::<bool>(), 0..50),
]
.boxed()
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(500))]
#[test]
fn test_with_random_bitvecs(bitvec1 in random_bitvec(), bitvec2 in random_bitvec(), bitvec3 in random_bitvec()) {
let mut bitvec = Vec::new();
bitvec.extend_from_slice(&bitvec1);
bitvec.extend_from_slice(&bitvec2);
bitvec.extend_from_slice(&bitvec3);
test_null_index(bitvec);
}
}
#[test]
fn dense_codec_test_one_block_false() {
let mut iter = vec![false; 64];
iter.push(true);
test_null_index(iter);
}
fn test_null_index(data: Vec<bool>) {
let mut out = vec![];
serialize_dense_codec(data.iter().cloned(), &mut out).unwrap();
let null_index = DenseCodec::open(OwnedBytes::new(out));
let orig_idx_with_value: Vec<u32> = data
.iter()
.enumerate()
.filter(|(_pos, val)| **val)
.map(|(pos, _val)| pos as u32)
.collect();
assert_eq!(
null_index
.translate_codec_idx_to_original_idx(0..orig_idx_with_value.len() as u32)
.collect_vec(),
orig_idx_with_value
);
for (dense_idx, orig_idx) in orig_idx_with_value.iter().enumerate() {
assert_eq!(
null_index.translate_to_codec_idx(*orig_idx),
Some(dense_idx as u32)
);
}
for (pos, value) in data.iter().enumerate() {
assert_eq!(null_index.exists(pos as u32), *value);
}
}
#[test]
fn dense_codec_test_translation() {
let mut out = vec![];
let iter = ([true, false, true, false]).iter().cloned();
serialize_dense_codec(iter, &mut out).unwrap();
let null_index = DenseCodec::open(OwnedBytes::new(out));
assert_eq!(
null_index
.translate_codec_idx_to_original_idx(0..2)
.collect_vec(),
vec![0, 2]
);
}
#[test]
fn dense_codec_translate() {
let mut out = vec![];
let iter = ([true, false, true, false]).iter().cloned();
serialize_dense_codec(iter, &mut out).unwrap();
let null_index = DenseCodec::open(OwnedBytes::new(out));
assert_eq!(null_index.translate_to_codec_idx(0), Some(0));
assert_eq!(null_index.translate_to_codec_idx(2), Some(1));
}
#[test]
fn dense_codec_test_small() {
let mut out = vec![];
let iter = ([true, false, true, false]).iter().cloned();
serialize_dense_codec(iter, &mut out).unwrap();
let null_index = DenseCodec::open(OwnedBytes::new(out));
assert!(null_index.exists(0));
assert!(!null_index.exists(1));
assert!(null_index.exists(2));
assert!(!null_index.exists(3));
}
#[test]
fn dense_codec_test_large() {
let mut docs = vec![];
docs.extend((0..1000).map(|_idx| false));
docs.extend((0..=1000).map(|_idx| true));
let iter = docs.iter().cloned();
let mut out = vec![];
serialize_dense_codec(iter, &mut out).unwrap();
let null_index = DenseCodec::open(OwnedBytes::new(out));
assert!(!null_index.exists(0));
assert!(!null_index.exists(100));
assert!(!null_index.exists(999));
assert!(null_index.exists(1000));
assert!(null_index.exists(1999));
assert!(null_index.exists(2000));
assert!(!null_index.exists(2001));
}
#[test]
fn test_count_ones() {
let mut block = 0;
set_bit_at(&mut block, 0);
set_bit_at(&mut block, 2);
assert_eq!(count_ones(block, 0), 1);
assert_eq!(count_ones(block, 1), 1);
assert_eq!(count_ones(block, 2), 2);
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use test::Bencher;
use super::*;
const TOTAL_NUM_VALUES: u32 = 1_000_000;
fn gen_bools(fill_ratio: f64) -> DenseCodec {
let mut out = Vec::new();
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
let bools: Vec<_> = (0..TOTAL_NUM_VALUES)
.map(|_| rng.gen_bool(fill_ratio))
.collect();
serialize_dense_codec(bools.into_iter(), &mut out).unwrap();
let codec = DenseCodec::open(OwnedBytes::new(out));
codec
}
fn random_range_iterator(start: u32, end: u32, step_size: u32) -> impl Iterator<Item = u32> {
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
let mut current = start;
std::iter::from_fn(move || {
current += rng.gen_range(1..step_size + 1);
if current >= end {
None
} else {
Some(current)
}
})
}
fn walk_over_data(codec: &DenseCodec, max_step_size: u32) -> Option<u32> {
walk_over_data_from_positions(
codec,
random_range_iterator(0, TOTAL_NUM_VALUES, max_step_size),
)
}
fn walk_over_data_from_positions(
codec: &DenseCodec,
positions: impl Iterator<Item = u32>,
) -> Option<u32> {
let mut dense_idx: Option<u32> = None;
for idx in positions {
dense_idx = dense_idx.or(codec.translate_to_codec_idx(idx));
}
dense_idx
}
#[bench]
fn bench_dense_codec_translate_orig_to_codec_90percent_filled_random_stride(
bench: &mut Bencher,
) {
let codec = gen_bools(0.9f64);
bench.iter(|| walk_over_data(&codec, 100));
}
#[bench]
fn bench_dense_codec_translate_orig_to_codec_50percent_filled_random_stride(
bench: &mut Bencher,
) {
let codec = gen_bools(0.5f64);
bench.iter(|| walk_over_data(&codec, 100));
}
#[bench]
fn bench_dense_codec_translate_orig_to_codec_full_scan_10percent(bench: &mut Bencher) {
let codec = gen_bools(0.1f64);
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
}
#[bench]
fn bench_dense_codec_translate_orig_to_codec_full_scan_90percent(bench: &mut Bencher) {
let codec = gen_bools(0.9f64);
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
}
#[bench]
fn bench_dense_codec_translate_orig_to_codec_10percent_filled_random_stride(
bench: &mut Bencher,
) {
let codec = gen_bools(0.1f64);
bench.iter(|| walk_over_data(&codec, 100));
}
#[bench]
fn bench_dense_codec_translate_codec_to_orig_90percent_filled_random_stride_big_step(
bench: &mut Bencher,
) {
let codec = gen_bools(0.9f64);
let num_vals = codec.num_non_nulls();
bench.iter(|| {
codec
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 50_000))
.last()
});
}
#[bench]
fn bench_dense_codec_translate_codec_to_orig_90percent_filled_random_stride(
bench: &mut Bencher,
) {
let codec = gen_bools(0.9f64);
let num_vals = codec.num_non_nulls();
bench.iter(|| {
codec
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 100))
.last()
});
}
#[bench]
fn bench_dense_codec_translate_codec_to_orig_90percent_filled_full_scan(bench: &mut Bencher) {
let codec = gen_bools(0.9f64);
let num_vals = codec.num_non_nulls();
bench.iter(|| {
codec
.translate_codec_idx_to_original_idx(0..num_vals)
.last()
});
}
}

View File

@@ -1,14 +0,0 @@
pub use dense::{serialize_dense_codec, DenseCodec};
mod dense;
mod sparse;
#[inline]
fn get_bit_at(input: u64, n: u32) -> bool {
input & (1 << n) != 0
}
#[inline]
fn set_bit_at(input: &mut u64, n: u64) {
*input |= 1 << n;
}

View File

@@ -1,752 +0,0 @@
use std::io::{self, Write};
use common::{BitSet, OwnedBytes};
use super::{serialize_dense_codec, DenseCodec};
/// `SparseCodec` is the codec for data, when only few documents have values.
/// In contrast to `DenseCodec` opening a `SparseCodec` causes runtime data to be produced, for
/// faster access.
///
/// The lower 16 bits of doc ids are stored as u16 while the upper 16 bits are given by the block
/// id. Each block contains 1<<16 docids.
///
/// # Serialized Data Layout
/// The data starts with the block data. Each block is either dense or sparse encoded, depending on
/// the number of values in the block. A block is sparse when it contains less than
/// DENSE_BLOCK_THRESHOLD (6144) values.
/// [Sparse data block | dense data block, .. #repeat*; Desc: Either a sparse or dense encoded
/// block]
/// ### Sparse block data
/// [u16 LE, .. #repeat*; Desc: Positions with values in a block]
/// ### Dense block data
/// [Dense codec for the whole block; Desc: Similar to a bitvec(0..ELEMENTS_PER_BLOCK) + Metadata
/// for faster lookups. See dense.rs]
///
/// The data is followed by block metadata, to know which area of the raw block data belongs to
/// which block. Only metadata for blocks with elements is recorded to
/// keep the overhead low for scenarios with many very sparse columns. The block metadata consists
/// of the block index and the number of values in the block. Since we don't store empty blocks
/// num_vals is incremented by 1, e.g. 0 means 1 value.
///
/// The last u16 is storing the number of metadata blocks.
/// [u16 LE, .. #repeat*; Desc: Positions with values in a block][(u16 LE, u16 LE), .. #repeat*;
/// Desc: (Block Id u16, Num Elements u16)][u16 LE; Desc: num blocks with values u16]
///
/// # Opening
/// When opening the data layout, the data is expanded to `Vec<SparseCodecBlockVariant>`, where the
/// index is the block index. For each block `byte_start` and `offset` is computed.
pub struct SparseCodec {
data: OwnedBytes,
blocks: Vec<SparseCodecBlockVariant>,
}
/// The threshold for for number of elements after which we switch to dense block encoding
const DENSE_BLOCK_THRESHOLD: u32 = 6144;
const ELEMENTS_PER_BLOCK: u32 = u16::MAX as u32 + 1;
/// 1.5 bit per Element + 12 bytes for the sentinal block
const NUM_BYTES_DENSE_BLOCK: u32 = (ELEMENTS_PER_BLOCK + ELEMENTS_PER_BLOCK / 2 + 64 + 32) / 8;
#[derive(Clone)]
enum SparseCodecBlockVariant {
Empty { offset: u32 },
Dense(DenseBlock),
Sparse(SparseBlock),
}
impl SparseCodecBlockVariant {
/// The number of non-null values that preceeded that block.
#[inline]
fn offset(&self) -> u32 {
match self {
SparseCodecBlockVariant::Empty { offset } => *offset,
SparseCodecBlockVariant::Dense(dense) => dense.offset,
SparseCodecBlockVariant::Sparse(sparse) => sparse.offset,
}
}
}
/// A block consists of max u16 values
#[derive(Clone)]
struct DenseBlock {
/// The number of values set before the block
offset: u32,
/// The data for the dense encoding
codec: DenseCodec,
}
impl DenseBlock {
pub fn exists(&self, idx: u32) -> bool {
self.codec.exists(idx)
}
pub fn translate_to_codec_idx(&self, idx: u32) -> Option<u32> {
self.codec.translate_to_codec_idx(idx)
}
pub fn translate_codec_idx_to_original_idx(&self, idx: u32) -> u32 {
self.codec
.translate_codec_idx_to_original_idx(idx..=idx)
.next()
.unwrap()
}
}
/// A block consists of max u16 values
#[derive(Debug, Copy, Clone)]
struct SparseBlock {
/// The number of values in the block
num_vals: u32,
/// The number of values set before the block
offset: u32,
/// The start position of the data for the block
byte_start: u32,
}
impl SparseBlock {
fn empty_block(offset: u32) -> Self {
Self {
num_vals: 0,
byte_start: 0,
offset,
}
}
#[inline]
fn value_at_idx(&self, data: &[u8], idx: u16) -> u16 {
let start_offset: usize = self.byte_start as usize + (idx as u32 as usize * 2);
get_u16(data, start_offset)
}
#[inline]
#[allow(clippy::comparison_chain)]
// Looks for the element in the block. Returns the positions if found.
fn binary_search(&self, data: &[u8], target: u16) -> Option<u16> {
let mut size = self.num_vals as u16;
let mut left = 0;
let mut right = size;
// TODO try different implem.
// e.g. exponential search into binary search
while left < right {
let mid = left + size / 2;
// TODO do boundary check only once, and then use an
// unsafe `value_at_idx`
let mid_val = self.value_at_idx(data, mid);
if target > mid_val {
left = mid + 1;
} else if target < mid_val {
right = mid;
} else {
return Some(mid);
}
size = right - left;
}
None
}
}
#[inline]
fn get_u16(data: &[u8], byte_position: usize) -> u16 {
let bytes: [u8; 2] = data[byte_position..byte_position + 2].try_into().unwrap();
u16::from_le_bytes(bytes)
}
const SERIALIZED_BLOCK_METADATA_SIZE: usize = 4;
fn deserialize_sparse_codec_block(data: &OwnedBytes) -> Vec<SparseCodecBlockVariant> {
// The number of vals so far
let mut offset = 0;
let mut sparse_codec_blocks = Vec::new();
let num_blocks = get_u16(data, data.len() - 2);
let block_data_index_start =
data.len() - 2 - num_blocks as usize * SERIALIZED_BLOCK_METADATA_SIZE;
let mut byte_start = 0;
for block_num in 0..num_blocks as usize {
let block_data_index = block_data_index_start + SERIALIZED_BLOCK_METADATA_SIZE * block_num;
let block_idx = get_u16(data, block_data_index);
let num_vals = get_u16(data, block_data_index + 2) as u32 + 1;
sparse_codec_blocks.resize(
block_idx as usize,
SparseCodecBlockVariant::Empty { offset },
);
if is_sparse(num_vals) {
let block = SparseBlock {
num_vals,
offset,
byte_start,
};
sparse_codec_blocks.push(SparseCodecBlockVariant::Sparse(block));
byte_start += 2 * num_vals;
} else {
let block = DenseBlock {
offset,
codec: DenseCodec::open(data.slice(byte_start as usize..data.len()).clone()),
};
sparse_codec_blocks.push(SparseCodecBlockVariant::Dense(block));
// Dense blocks have a fixed size spanning ELEMENTS_PER_BLOCK.
byte_start += NUM_BYTES_DENSE_BLOCK;
}
offset += num_vals;
}
sparse_codec_blocks.push(SparseCodecBlockVariant::Empty { offset });
sparse_codec_blocks
}
/// Splits a value address into lower and upper 16bits.
/// The lower 16 bits are the value in the block
/// The upper 16 bits are the block index
#[derive(Debug, Clone, Copy)]
struct ValueAddr {
block_idx: u16,
value_in_block: u16,
}
/// Splits a idx into block index and value in the block
fn value_addr(idx: u32) -> ValueAddr {
/// Static assert number elements per block this method expects
#[allow(clippy::assertions_on_constants)]
const _: () = assert!(ELEMENTS_PER_BLOCK == (1 << 16));
let value_in_block = idx as u16;
let block_idx = (idx >> 16) as u16;
ValueAddr {
block_idx,
value_in_block,
}
}
impl SparseCodec {
/// Open the SparseCodec from OwnedBytes
pub fn open(data: OwnedBytes) -> Self {
let blocks = deserialize_sparse_codec_block(&data);
Self { data, blocks }
}
#[inline]
/// Check if value at position is not null.
pub fn exists(&self, idx: u32) -> bool {
let value_addr = value_addr(idx);
// There may be trailing nulls without data, those are not stored as blocks. It would be
// possible to create empty blocks, but for that we would need to serialize the number of
// values or pass them when opening
if let Some(block) = self.blocks.get(value_addr.block_idx as usize) {
match block {
SparseCodecBlockVariant::Empty { offset: _ } => false,
SparseCodecBlockVariant::Dense(block) => {
block.exists(value_addr.value_in_block as u32)
}
SparseCodecBlockVariant::Sparse(block) => block
.binary_search(&self.data, value_addr.value_in_block)
.is_some(),
}
} else {
false
}
}
/// Return the number of non-null values in an index
pub fn num_non_nulls(&self) -> u32 {
self.blocks.last().map(|block| block.offset()).unwrap_or(0)
}
#[inline]
/// Translate from the original index to the codec index.
pub fn translate_to_codec_idx(&self, idx: u32) -> Option<u32> {
let value_addr = value_addr(idx);
let block = self.blocks.get(value_addr.block_idx as usize)?;
match block {
SparseCodecBlockVariant::Empty { offset: _ } => None,
SparseCodecBlockVariant::Dense(block) => block
.translate_to_codec_idx(value_addr.value_in_block as u32)
.map(|pos_in_block| pos_in_block + block.offset),
SparseCodecBlockVariant::Sparse(block) => {
let pos_in_block = block.binary_search(&self.data, value_addr.value_in_block);
pos_in_block.map(|pos_in_block: u16| block.offset + pos_in_block as u32)
}
}
}
fn find_block(&self, dense_idx: u32, mut block_pos: u32) -> u32 {
loop {
let offset = self.blocks[block_pos as usize].offset();
if offset > dense_idx {
return block_pos - 1;
}
block_pos += 1;
}
}
/// Translate positions from the codec index to the original index.
///
/// # Panics
///
/// May panic if any `idx` is greater than the max codec index.
pub fn translate_codec_idx_to_original_idx<'a>(
&'a self,
iter: impl Iterator<Item = u32> + 'a,
) -> impl Iterator<Item = u32> + 'a {
// TODO: There's a big potential performance gain, by using iterators per block instead of
// random access for each element in a block
// group_by itertools won't help though, since it requires a temporary local variable
let mut block_pos = 0u32;
iter.map(move |codec_idx| {
// update block_pos to limit search scope
block_pos = self.find_block(codec_idx, block_pos);
let block_doc_idx_start = block_pos * ELEMENTS_PER_BLOCK;
let block = &self.blocks[block_pos as usize];
let idx_in_block = codec_idx - block.offset();
match block {
SparseCodecBlockVariant::Empty { offset: _ } => {
panic!(
"invalid input, cannot translate to original index. associated empty \
block with dense idx. block_pos {}, idx_in_block {}",
block_pos, idx_in_block
)
}
SparseCodecBlockVariant::Dense(dense) => {
dense.translate_codec_idx_to_original_idx(idx_in_block) + block_doc_idx_start
}
SparseCodecBlockVariant::Sparse(block) => {
block.value_at_idx(&self.data, idx_in_block as u16) as u32 + block_doc_idx_start
}
}
})
}
}
fn is_sparse(num_elem_in_block: u32) -> bool {
num_elem_in_block < DENSE_BLOCK_THRESHOLD
}
#[derive(Default)]
struct BlockDataSerialized {
block_idx: u16,
num_vals: u32,
}
/// Iterator over positions of set values.
pub fn serialize_sparse_codec<W: Write>(
mut iter: impl Iterator<Item = u32>,
mut out: W,
) -> io::Result<()> {
let mut block_metadata: Vec<BlockDataSerialized> = Vec::new();
let mut current_block = Vec::new();
// This if-statement for the first element ensures that
// `block_metadata` is not empty in the loop below.
if let Some(idx) = iter.next() {
let value_addr = value_addr(idx);
block_metadata.push(BlockDataSerialized {
block_idx: value_addr.block_idx,
num_vals: 1,
});
current_block.push(value_addr.value_in_block);
}
let flush_block = |current_block: &mut Vec<u16>, out: &mut W| -> io::Result<()> {
let is_sparse = is_sparse(current_block.len() as u32);
if is_sparse {
for val_in_block in current_block.iter() {
out.write_all(val_in_block.to_le_bytes().as_ref())?;
}
} else {
let mut bitset = BitSet::with_max_value(ELEMENTS_PER_BLOCK + 1);
for val_in_block in current_block.iter() {
bitset.insert(*val_in_block as u32);
}
let iter = (0..ELEMENTS_PER_BLOCK).map(|idx| bitset.contains(idx));
serialize_dense_codec(iter, out)?;
}
current_block.clear();
Ok(())
};
for idx in iter {
let value_addr = value_addr(idx);
if block_metadata[block_metadata.len() - 1].block_idx == value_addr.block_idx {
let last_idx_metadata = block_metadata.len() - 1;
block_metadata[last_idx_metadata].num_vals += 1;
} else {
// flush prev block
flush_block(&mut current_block, &mut out)?;
block_metadata.push(BlockDataSerialized {
block_idx: value_addr.block_idx,
num_vals: 1,
});
}
current_block.push(value_addr.value_in_block);
}
// handle last block
flush_block(&mut current_block, &mut out)?;
for block in &block_metadata {
out.write_all(block.block_idx.to_le_bytes().as_ref())?;
// We don't store empty blocks, therefore we can subtract 1.
// This way we will be able to use u16 when the number of elements is 1 << 16 or u16::MAX+1
out.write_all(((block.num_vals - 1) as u16).to_le_bytes().as_ref())?;
}
out.write_all((block_metadata.len() as u16).to_le_bytes().as_ref())?;
Ok(())
}
#[cfg(test)]
mod tests {
use itertools::Itertools;
use proptest::prelude::{any, prop, *};
use proptest::strategy::Strategy;
use proptest::{prop_oneof, proptest};
use super::*;
fn random_bitvec() -> BoxedStrategy<Vec<bool>> {
prop_oneof![
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..100),
1 => prop::collection::vec(proptest::bool::weighted(0.00), 0..(ELEMENTS_PER_BLOCK as usize * 3)), // empty blocks
1 => prop::collection::vec(proptest::bool::weighted(1.00), 0..(ELEMENTS_PER_BLOCK as usize + 10)), // full block
1 => prop::collection::vec(proptest::bool::weighted(0.01), 0..100),
1 => prop::collection::vec(proptest::bool::weighted(0.01), 0..u16::MAX as usize),
8 => vec![any::<bool>()],
]
.boxed()
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(50))]
#[test]
fn test_with_random_bitvecs(bitvec1 in random_bitvec(), bitvec2 in random_bitvec(), bitvec3 in random_bitvec()) {
let mut bitvec = Vec::new();
bitvec.extend_from_slice(&bitvec1);
bitvec.extend_from_slice(&bitvec2);
bitvec.extend_from_slice(&bitvec3);
test_null_index(bitvec);
}
}
#[test]
fn sparse_codec_test_one_block_false() {
let mut iter = vec![false; ELEMENTS_PER_BLOCK as usize];
iter.push(true);
test_null_index(iter);
}
#[test]
fn sparse_codec_test_one_block_true() {
let mut iter = vec![true; ELEMENTS_PER_BLOCK as usize];
iter.push(true);
test_null_index(iter);
}
fn test_null_index(data: Vec<bool>) {
let mut out = vec![];
serialize_sparse_codec(
data.iter()
.cloned()
.enumerate()
.filter(|(_pos, val)| *val)
.map(|(pos, _val)| pos as u32),
&mut out,
)
.unwrap();
let null_index = SparseCodec::open(OwnedBytes::new(out));
let orig_idx_with_value: Vec<u32> = data
.iter()
.enumerate()
.filter(|(_pos, val)| **val)
.map(|(pos, _val)| pos as u32)
.collect();
assert_eq!(
null_index
.translate_codec_idx_to_original_idx(0..orig_idx_with_value.len() as u32)
.collect_vec(),
orig_idx_with_value
);
let step_size = (orig_idx_with_value.len() / 100).max(1);
for (dense_idx, orig_idx) in orig_idx_with_value.iter().enumerate().step_by(step_size) {
assert_eq!(
null_index.translate_to_codec_idx(*orig_idx),
Some(dense_idx as u32)
);
}
// 100 samples
let step_size = (data.len() / 100).max(1);
for (pos, value) in data.iter().enumerate().step_by(step_size) {
assert_eq!(null_index.exists(pos as u32), *value);
}
}
#[test]
fn sparse_codec_test_translation() {
let mut out = vec![];
let iter = ([true, false, true, false]).iter().cloned();
serialize_sparse_codec(
iter.enumerate()
.filter(|(_pos, val)| *val)
.map(|(pos, _val)| pos as u32),
&mut out,
)
.unwrap();
let null_index = SparseCodec::open(OwnedBytes::new(out));
assert_eq!(
null_index
.translate_codec_idx_to_original_idx(0..2)
.collect_vec(),
vec![0, 2]
);
}
#[test]
fn sparse_codec_translate() {
let mut out = vec![];
let iter = ([true, false, true, false]).iter().cloned();
serialize_sparse_codec(
iter.enumerate()
.filter(|(_pos, val)| *val)
.map(|(pos, _val)| pos as u32),
&mut out,
)
.unwrap();
let null_index = SparseCodec::open(OwnedBytes::new(out));
assert_eq!(null_index.translate_to_codec_idx(0), Some(0));
assert_eq!(null_index.translate_to_codec_idx(2), Some(1));
}
#[test]
fn sparse_codec_test_small() {
let mut out = vec![];
let iter = ([true, false, true, false]).iter().cloned();
serialize_sparse_codec(
iter.enumerate()
.filter(|(_pos, val)| *val)
.map(|(pos, _val)| pos as u32),
&mut out,
)
.unwrap();
let null_index = SparseCodec::open(OwnedBytes::new(out));
assert!(null_index.exists(0));
assert!(!null_index.exists(1));
assert!(null_index.exists(2));
assert!(!null_index.exists(3));
}
#[test]
fn sparse_codec_test_large() {
let mut docs = vec![];
docs.extend((0..ELEMENTS_PER_BLOCK).map(|_idx| false));
docs.extend((0..=1).map(|_idx| true));
let iter = docs.iter().cloned();
let mut out = vec![];
serialize_sparse_codec(
iter.enumerate()
.filter(|(_pos, val)| *val)
.map(|(pos, _val)| pos as u32),
&mut out,
)
.unwrap();
let null_index = SparseCodec::open(OwnedBytes::new(out));
assert!(!null_index.exists(0));
assert!(!null_index.exists(100));
assert!(!null_index.exists(ELEMENTS_PER_BLOCK - 1));
assert!(null_index.exists(ELEMENTS_PER_BLOCK));
assert!(null_index.exists(ELEMENTS_PER_BLOCK + 1));
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use test::Bencher;
use super::*;
const TOTAL_NUM_VALUES: u32 = 1_000_000;
fn gen_bools(fill_ratio: f64) -> SparseCodec {
let mut out = Vec::new();
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
serialize_sparse_codec(
(0..TOTAL_NUM_VALUES)
.map(|_| rng.gen_bool(fill_ratio))
.enumerate()
.filter(|(_pos, val)| *val)
.map(|(pos, _val)| pos as u32),
&mut out,
)
.unwrap();
let codec = SparseCodec::open(OwnedBytes::new(out));
codec
}
fn random_range_iterator(start: u32, end: u32, step_size: u32) -> impl Iterator<Item = u32> {
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
let mut current = start;
std::iter::from_fn(move || {
current += rng.gen_range(1..step_size + 1);
if current >= end {
None
} else {
Some(current)
}
})
}
fn walk_over_data(codec: &SparseCodec, max_step_size: u32) -> Option<u32> {
walk_over_data_from_positions(
codec,
random_range_iterator(0, TOTAL_NUM_VALUES, max_step_size),
)
}
fn walk_over_data_from_positions(
codec: &SparseCodec,
positions: impl Iterator<Item = u32>,
) -> Option<u32> {
let mut dense_idx: Option<u32> = None;
for idx in positions {
dense_idx = dense_idx.or(codec.translate_to_codec_idx(idx));
}
dense_idx
}
#[bench]
fn bench_sparse_codec_translate_orig_to_codec_1percent_filled_random_stride(
bench: &mut Bencher,
) {
let codec = gen_bools(0.01f64);
bench.iter(|| walk_over_data(&codec, 100));
}
#[bench]
fn bench_sparse_codec_translate_orig_to_codec_5percent_filled_random_stride(
bench: &mut Bencher,
) {
let codec = gen_bools(0.05f64);
bench.iter(|| walk_over_data(&codec, 100));
}
#[bench]
fn bench_sparse_codec_translate_orig_to_codec_full_scan_10percent(bench: &mut Bencher) {
let codec = gen_bools(0.1f64);
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
}
#[bench]
fn bench_sparse_codec_translate_orig_to_codec_full_scan_90percent(bench: &mut Bencher) {
let codec = gen_bools(0.9f64);
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
}
#[bench]
fn bench_sparse_codec_translate_orig_to_codec_full_scan_1percent(bench: &mut Bencher) {
let codec = gen_bools(0.01f64);
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
}
#[bench]
fn bench_sparse_codec_translate_orig_to_codec_10percent_filled_random_stride(
bench: &mut Bencher,
) {
let codec = gen_bools(0.1f64);
bench.iter(|| walk_over_data(&codec, 100));
}
#[bench]
fn bench_sparse_codec_translate_orig_to_codec_90percent_filled_random_stride(
bench: &mut Bencher,
) {
let codec = gen_bools(0.9f64);
bench.iter(|| walk_over_data(&codec, 100));
}
#[bench]
fn bench_sparse_codec_translate_codec_to_orig_1percent_filled_random_stride_big_step(
bench: &mut Bencher,
) {
let codec = gen_bools(0.01f64);
let num_vals = codec.num_non_nulls();
bench.iter(|| {
codec
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 50_000))
.last()
});
}
#[bench]
fn bench_sparse_codec_translate_codec_to_orig_1percent_filled_random_stride(
bench: &mut Bencher,
) {
let codec = gen_bools(0.01f64);
let num_vals = codec.num_non_nulls();
bench.iter(|| {
codec
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 100))
.last()
});
}
#[bench]
fn bench_sparse_codec_translate_codec_to_orig_1percent_filled_full_scan(bench: &mut Bencher) {
let codec = gen_bools(0.01f64);
let num_vals = codec.num_non_nulls();
bench.iter(|| {
codec
.translate_codec_idx_to_original_idx(0..num_vals)
.last()
});
}
#[bench]
fn bench_sparse_codec_translate_codec_to_orig_90percent_filled_random_stride_big_step(
bench: &mut Bencher,
) {
let codec = gen_bools(0.90f64);
let num_vals = codec.num_non_nulls();
bench.iter(|| {
codec
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 50_000))
.last()
});
}
#[bench]
fn bench_sparse_codec_translate_codec_to_orig_90percent_filled_random_stride(
bench: &mut Bencher,
) {
let codec = gen_bools(0.9f64);
let num_vals = codec.num_non_nulls();
bench.iter(|| {
codec
.translate_codec_idx_to_original_idx(random_range_iterator(0, num_vals, 100))
.last()
});
}
#[bench]
fn bench_sparse_codec_translate_codec_to_orig_90percent_filled_full_scan(bench: &mut Bencher) {
let codec = gen_bools(0.9f64);
let num_vals = codec.num_non_nulls();
bench.iter(|| {
codec
.translate_codec_idx_to_original_idx(0..num_vals)
.last()
});
}
}

View File

@@ -1,145 +0,0 @@
use std::io::{self, Write};
use std::ops::Range;
use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt};
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub(crate) enum FastFieldCardinality {
Single = 1,
Multi = 2,
}
impl BinarySerializable for FastFieldCardinality {
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
self.to_code().serialize(wrt)
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let code = u8::deserialize(reader)?;
let codec_type: Self = Self::from_code(code)
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
Ok(codec_type)
}
}
impl FastFieldCardinality {
pub(crate) fn to_code(self) -> u8 {
self as u8
}
pub(crate) fn from_code(code: u8) -> Option<Self> {
match code {
1 => Some(Self::Single),
2 => Some(Self::Multi),
_ => None,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum NullIndexCodec {
Full = 1,
}
impl BinarySerializable for NullIndexCodec {
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
self.to_code().serialize(wrt)
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let code = u8::deserialize(reader)?;
let codec_type: Self = Self::from_code(code)
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
Ok(codec_type)
}
}
impl NullIndexCodec {
pub(crate) fn to_code(self) -> u8 {
self as u8
}
pub(crate) fn from_code(code: u8) -> Option<Self> {
match code {
1 => Some(Self::Full),
_ => None,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub(crate) struct NullIndexFooter {
pub(crate) cardinality: FastFieldCardinality,
pub(crate) null_index_codec: NullIndexCodec,
// Unused for NullIndexCodec::Full
pub(crate) null_index_byte_range: Range<u64>,
}
impl BinarySerializable for NullIndexFooter {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
self.cardinality.serialize(writer)?;
self.null_index_codec.serialize(writer)?;
VInt(self.null_index_byte_range.start).serialize(writer)?;
VInt(self.null_index_byte_range.end - self.null_index_byte_range.start)
.serialize(writer)?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let cardinality = FastFieldCardinality::deserialize(reader)?;
let null_index_codec = NullIndexCodec::deserialize(reader)?;
let null_index_byte_range_start = VInt::deserialize(reader)?.0;
let null_index_byte_range_end = VInt::deserialize(reader)?.0 + null_index_byte_range_start;
Ok(Self {
cardinality,
null_index_codec,
null_index_byte_range: null_index_byte_range_start..null_index_byte_range_end,
})
}
}
pub(crate) fn append_null_index_footer(
output: &mut impl io::Write,
null_index_footer: NullIndexFooter,
) -> io::Result<()> {
let mut counting_write = CountingWriter::wrap(output);
null_index_footer.serialize(&mut counting_write)?;
let footer_payload_len = counting_write.written_bytes();
BinarySerializable::serialize(&(footer_payload_len as u16), &mut counting_write)?;
Ok(())
}
pub(crate) fn read_null_index_footer(
data: OwnedBytes,
) -> io::Result<(OwnedBytes, NullIndexFooter)> {
let (data, null_footer_length_bytes) = data.rsplit(2);
let footer_length = u16::deserialize(&mut null_footer_length_bytes.as_slice())?;
let (data, null_index_footer_bytes) = data.rsplit(footer_length as usize);
let null_index_footer = NullIndexFooter::deserialize(&mut null_index_footer_bytes.as_ref())?;
Ok((data, null_index_footer))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn null_index_footer_deser_test() {
let null_index_footer = NullIndexFooter {
cardinality: FastFieldCardinality::Single,
null_index_codec: NullIndexCodec::Full,
null_index_byte_range: 100..120,
};
let mut out = vec![];
null_index_footer.serialize(&mut out).unwrap();
assert_eq!(
null_index_footer,
NullIndexFooter::deserialize(&mut &out[..]).unwrap()
);
}
}

View File

@@ -21,42 +21,34 @@ use std::io;
use std::num::NonZeroU64;
use std::sync::Arc;
use common::{BinarySerializable, OwnedBytes, VInt};
use common::{BinarySerializable, VInt};
use fastdivide::DividerU64;
use log::warn;
use ownedbytes::OwnedBytes;
use crate::bitpacked::BitpackedCodec;
use crate::blockwise_linear::BlockwiseLinearCodec;
use crate::compact_space::CompactSpaceCompressor;
use crate::format_version::append_format_version;
use crate::linear::LinearCodec;
use crate::monotonic_mapping::{
StrictlyMonotonicFn, StrictlyMonotonicMappingToInternal,
StrictlyMonotonicMappingToInternalGCDBaseval,
};
use crate::null_index_footer::{
append_null_index_footer, FastFieldCardinality, NullIndexCodec, NullIndexFooter,
};
use crate::{
monotonic_map_column, Column, FastFieldCodec, FastFieldCodecType, MonotonicallyMappableToU64,
U128FastFieldCodecType, VecColumn, ALL_CODEC_TYPES,
iter_from_reader, monotonic_map_column, Column, FastFieldCodec, FastFieldCodecType,
MonotonicallyMappableToU64, VecColumn, ALL_CODEC_TYPES,
};
/// The normalized header gives some parameters after applying the following
/// normalization of the vector:
/// `val -> (val - min_value) / gcd`
/// val -> (val - min_value) / gcd
///
/// By design, after normalization, `min_value = 0` and `gcd = 1`.
#[derive(Debug, Copy, Clone)]
pub struct NormalizedHeader {
/// The number of values in the underlying column.
pub num_vals: u32,
/// The max value of the underlying column.
pub num_vals: u64,
pub max_value: u64,
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct Header {
pub num_vals: u32,
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
pub gcd: Option<NonZeroU64>,
@@ -65,11 +57,8 @@ pub(crate) struct Header {
impl Header {
pub fn normalized(self) -> NormalizedHeader {
let gcd = self.gcd.map(|gcd| gcd.get()).unwrap_or(1);
let gcd_min_val_mapping =
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, self.min_value);
let max_value = gcd_min_val_mapping.mapping(self.max_value);
let max_value =
(self.max_value - self.min_value) / self.gcd.map(|gcd| gcd.get()).unwrap_or(1);
NormalizedHeader {
num_vals: self.num_vals,
max_value,
@@ -77,7 +66,10 @@ impl Header {
}
pub fn normalize_column<C: Column>(&self, from_column: C) -> impl Column {
normalize_column(from_column, self.min_value, self.gcd)
let min_value = self.min_value;
let gcd = self.gcd.map(|gcd| gcd.get()).unwrap_or(1);
let divider = DividerU64::divide_by(gcd);
monotonic_map_column(from_column, move |val| divider.divide(val - min_value))
}
pub fn compute_header(
@@ -87,10 +79,12 @@ impl Header {
let num_vals = column.num_vals();
let min_value = column.min_value();
let max_value = column.max_value();
let gcd = crate::gcd::find_gcd(column.iter().map(|val| val - min_value))
.filter(|gcd| gcd.get() > 1u64);
let normalized_column = normalize_column(column, min_value, gcd);
let codec_type = detect_codec(normalized_column, codecs)?;
let gcd =
crate::gcd::find_gcd(iter_from_reader(column.reader()).map(|val| val - min_value))
.filter(|gcd| gcd.get() > 1u64);
let divider = DividerU64::divide_by(gcd.map(|gcd| gcd.get()).unwrap_or(1u64));
let shifted_column = monotonic_map_column(&column, |val| divider.divide(val - min_value));
let codec_type = detect_codec(shifted_column, codecs)?;
Some(Header {
num_vals,
min_value,
@@ -101,42 +95,9 @@ impl Header {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub(crate) struct U128Header {
pub num_vals: u32,
pub codec_type: U128FastFieldCodecType,
}
impl BinarySerializable for U128Header {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
VInt(self.num_vals as u64).serialize(writer)?;
self.codec_type.serialize(writer)?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let num_vals = VInt::deserialize(reader)?.0 as u32;
let codec_type = U128FastFieldCodecType::deserialize(reader)?;
Ok(U128Header {
num_vals,
codec_type,
})
}
}
pub fn normalize_column<C: Column>(
from_column: C,
min_value: u64,
gcd: Option<NonZeroU64>,
) -> impl Column {
let gcd = gcd.map(|gcd| gcd.get()).unwrap_or(1);
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, min_value);
monotonic_map_column(from_column, mapping)
}
impl BinarySerializable for Header {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
VInt(self.num_vals as u64).serialize(writer)?;
VInt(self.num_vals).serialize(writer)?;
VInt(self.min_value).serialize(writer)?;
VInt(self.max_value - self.min_value).serialize(writer)?;
if let Some(gcd) = self.gcd {
@@ -149,7 +110,7 @@ impl BinarySerializable for Header {
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let num_vals = VInt::deserialize(reader)?.0 as u32;
let num_vals = VInt::deserialize(reader)?.0;
let min_value = VInt::deserialize(reader)?.0;
let amplitude = VInt::deserialize(reader)?.0;
let max_value = min_value + amplitude;
@@ -165,21 +126,16 @@ impl BinarySerializable for Header {
}
}
/// Return estimated compression for given codec in the value range [0.0..1.0], where 1.0 means no
/// compression.
pub fn estimate<T: MonotonicallyMappableToU64>(
typed_column: impl Column<T>,
codec_type: FastFieldCodecType,
) -> Option<f32> {
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
let column = monotonic_map_column(typed_column, T::to_u64);
let min_value = column.min_value();
let gcd = crate::gcd::find_gcd(column.iter().map(|val| val - min_value))
let gcd = crate::gcd::find_gcd(iter_from_reader(column.reader()).map(|val| val - min_value))
.filter(|gcd| gcd.get() > 1u64);
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(
gcd.map(|gcd| gcd.get()).unwrap_or(1u64),
min_value,
);
let normalized_column = monotonic_map_column(&column, mapping);
let divider = DividerU64::divide_by(gcd.map(|gcd| gcd.get()).unwrap_or(1u64));
let normalized_column = monotonic_map_column(&column, |val| divider.divide(val - min_value));
match codec_type {
FastFieldCodecType::Bitpacked => BitpackedCodec::estimate(&normalized_column),
FastFieldCodecType::Linear => LinearCodec::estimate(&normalized_column),
@@ -187,111 +143,25 @@ pub fn estimate<T: MonotonicallyMappableToU64>(
}
}
/// Serializes u128 values with the compact space codec.
pub fn serialize_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
iter_gen: F,
num_vals: u32,
pub fn serialize_u128(
typed_column: impl Column<u128>,
output: &mut impl io::Write,
) -> io::Result<()> {
serialize_u128_new(ValueIndexInfo::default(), iter_gen, num_vals, output)
}
#[allow(dead_code)]
pub enum ValueIndexInfo<'a> {
MultiValue(Box<dyn MultiValueIndexInfo + 'a>),
SingleValue(Box<dyn SingleValueIndexInfo + 'a>),
}
// TODO Remove me
impl Default for ValueIndexInfo<'static> {
fn default() -> Self {
struct Dummy {}
impl SingleValueIndexInfo for Dummy {
fn num_vals(&self) -> u32 {
todo!()
}
fn num_non_nulls(&self) -> u32 {
todo!()
}
fn iter(&self) -> Box<dyn Iterator<Item = u32>> {
todo!()
}
}
Self::SingleValue(Box::new(Dummy {}))
}
}
impl<'a> ValueIndexInfo<'a> {
fn get_cardinality(&self) -> FastFieldCardinality {
match self {
ValueIndexInfo::MultiValue(_) => FastFieldCardinality::Multi,
ValueIndexInfo::SingleValue(_) => FastFieldCardinality::Single,
}
}
}
pub trait MultiValueIndexInfo {
/// The number of docs in the column.
fn num_docs(&self) -> u32;
/// The number of values in the column.
fn num_vals(&self) -> u32;
/// Return the start index of the values for each doc
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_>;
}
pub trait SingleValueIndexInfo {
/// The number of values including nulls in the column.
fn num_vals(&self) -> u32;
/// The number of non-null values in the column.
fn num_non_nulls(&self) -> u32;
/// Return a iterator of the positions of docs with a value
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_>;
}
/// Serializes u128 values with the compact space codec.
pub fn serialize_u128_new<F: Fn() -> I, I: Iterator<Item = u128>>(
value_index: ValueIndexInfo,
iter_gen: F,
num_vals: u32,
output: &mut impl io::Write,
) -> io::Result<()> {
let header = U128Header {
num_vals,
codec_type: U128FastFieldCodecType::CompactSpace,
};
header.serialize(output)?;
let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
compressor.compress_into(iter_gen(), output).unwrap();
let null_index_footer = NullIndexFooter {
cardinality: value_index.get_cardinality(),
null_index_codec: NullIndexCodec::Full,
null_index_byte_range: 0..0,
};
append_null_index_footer(output, null_index_footer)?;
append_format_version(output)?;
// TODO write header, to later support more codecs
let compressor = CompactSpaceCompressor::train_from(&typed_column);
compressor
.compress_into(typed_column.reader(), output)
.unwrap();
Ok(())
}
/// Serializes the column with the codec with the best estimate on the data.
pub fn serialize<T: MonotonicallyMappableToU64>(
typed_column: impl Column<T>,
output: &mut impl io::Write,
codecs: &[FastFieldCodecType],
) -> io::Result<()> {
serialize_new(ValueIndexInfo::default(), typed_column, output, codecs)
}
/// Serializes the column with the codec with the best estimate on the data.
pub fn serialize_new<T: MonotonicallyMappableToU64>(
value_index: ValueIndexInfo,
typed_column: impl Column<T>,
output: &mut impl io::Write,
codecs: &[FastFieldCodecType],
) -> io::Result<()> {
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
let column = monotonic_map_column(typed_column, T::to_u64);
let header = Header::compute_header(&column, codecs).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
@@ -305,15 +175,6 @@ pub fn serialize_new<T: MonotonicallyMappableToU64>(
let normalized_column = header.normalize_column(column);
assert_eq!(normalized_column.min_value(), 0u64);
serialize_given_codec(normalized_column, header.codec_type, output)?;
let null_index_footer = NullIndexFooter {
cardinality: value_index.get_cardinality(),
null_index_codec: NullIndexCodec::Full,
null_index_byte_range: 0..0,
};
append_null_index_footer(output, null_index_footer)?;
append_format_version(output)?;
Ok(())
}
@@ -365,7 +226,6 @@ fn serialize_given_codec(
Ok(())
}
/// Helper function to serialize a column (autodetect from all codecs) and then open it
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
column: &[T],
) -> Arc<dyn Column<T>> {
@@ -378,22 +238,11 @@ pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
mod tests {
use super::*;
#[test]
fn test_serialize_deserialize_u128_header() {
let original = U128Header {
num_vals: 11,
codec_type: U128FastFieldCodecType::CompactSpace,
};
let mut out = Vec::new();
original.serialize(&mut out).unwrap();
let restored = U128Header::deserialize(&mut &out[..]).unwrap();
assert_eq!(restored, original);
}
#[test]
fn test_serialize_deserialize() {
let original = [1u64, 5u64, 10u64];
let restored: Vec<u64> = serialize_and_load(&original[..]).iter().collect();
let restored: Vec<u64> =
crate::iter_from_reader(serialize_and_load(&original[..]).reader()).collect();
assert_eq!(&restored, &original[..]);
}
@@ -403,7 +252,7 @@ mod tests {
let col = VecColumn::from(&[false, true][..]);
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
assert_eq!(buffer.len(), 3 + 5 + 8 + 4 + 2);
assert_eq!(buffer.len(), 5 + 8);
}
#[test]
@@ -412,7 +261,7 @@ mod tests {
let col = VecColumn::from(&[true][..]);
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
// 5 bytes of header, 0 bytes of value, 7 bytes of padding.
assert_eq!(buffer.len(), 3 + 5 + 7 + 4 + 2);
assert_eq!(buffer.len(), 5 + 7);
}
#[test]
@@ -422,6 +271,6 @@ mod tests {
let col = VecColumn::from(&vals[..]);
serialize(col, &mut buffer, &[FastFieldCodecType::Bitpacked]).unwrap();
// Values are stored over 3 bits.
assert_eq!(buffer.len(), 3 + 7 + (3 * 80 / 8) + 7 + 4 + 2);
assert_eq!(buffer.len(), 7 + (3 * 80 / 8) + 7);
}
}

View File

@@ -1,14 +1,10 @@
[package]
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
name = "ownedbytes"
version = "0.5.0"
version = "0.3.0"
edition = "2021"
description = "Expose data as static slice"
license = "MIT"
documentation = "https://docs.rs/ownedbytes/"
homepage = "https://github.com/quickwit-oss/tantivy"
repository = "https://github.com/quickwit-oss/tantivy"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View File

@@ -3,7 +3,7 @@ use std::ops::{Deref, Range};
use std::sync::Arc;
use std::{fmt, io, mem};
pub use stable_deref_trait::StableDeref;
use stable_deref_trait::StableDeref;
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
/// this data as a slice.
@@ -80,21 +80,6 @@ impl OwnedBytes {
(left, right)
}
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
///
/// Right will hold `split_len` bytes.
///
/// This operation is cheap and does not require to copy any memory.
/// On the other hand, both `left` and `right` retain a handle over
/// the entire slice of memory. In other words, the memory will only
/// be released when both left and right are dropped.
#[inline]
#[must_use]
pub fn rsplit(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
let data_len = self.data.len();
self.split(data_len - split_len)
}
/// Splits the right part of the `OwnedBytes` at the given offset.
///
/// `self` is truncated to `split_len`, left with the remaining bytes.

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy-query-grammar"
version = "0.19.0"
version = "0.18.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]

View File

@@ -5,8 +5,7 @@ use combine::parser::range::{take_while, take_while1};
use combine::parser::repeat::escaped;
use combine::parser::Parser;
use combine::{
attempt, between, choice, eof, many, many1, one_of, optional, parser, satisfy, sep_by,
skip_many1, value,
attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value,
};
use once_cell::sync::Lazy;
use regex::Regex;
@@ -63,20 +62,6 @@ fn word<'a>() -> impl Parser<&'a str, Output = String> {
})
}
// word variant that allows more characters, e.g. for range queries that don't allow field
// specifier
fn relaxed_word<'a>() -> impl Parser<&'a str, Output = String> {
(
satisfy(|c: char| {
!c.is_whitespace() && !['`', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
}),
many(satisfy(|c: char| {
!c.is_whitespace() && !['{', '}', '"', '[', ']', '(', ')'].contains(&c)
})),
)
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
}
/// Parses a date time according to rfc3339
/// 2015-08-02T18:54:42+02
/// 2021-04-13T19:46:26.266051969+00:00
@@ -196,8 +181,8 @@ fn spaces1<'a>() -> impl Parser<&'a str, Output = ()> {
fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
let range_term_val = || {
attempt(date_time())
.or(word())
.or(negative_number())
.or(relaxed_word())
.or(char('*').with(value("*".to_string())))
};
@@ -265,17 +250,6 @@ fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
})
}
/// Function that parses a set out of a Stream
/// Supports ranges like: `IN [val1 val2 val3]`
fn set<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
let term_list = between(char('['), char(']'), sep_by(term_val(), spaces()));
let set_content = ((string("IN"), spaces()), term_list).map(|(_, elements)| elements);
(optional(attempt(field_name().skip(spaces()))), set_content)
.map(|(field, elements)| UserInputLeaf::Set { field, elements })
}
fn negate(expr: UserInputAst) -> UserInputAst {
expr.unary(Occur::MustNot)
}
@@ -290,7 +264,6 @@ fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
string("NOT").skip(spaces1()).with(leaf()).map(negate),
))
.or(attempt(range().map(UserInputAst::from)))
.or(attempt(set().map(UserInputAst::from)))
.or(literal().map(UserInputAst::from))
.parse_stream(input)
.into_result()
@@ -676,34 +649,6 @@ mod test {
.expect("Cannot parse date range")
.0;
assert_eq!(res6, expected_flexible_dates);
// IP Range Unbounded
let expected_weight = UserInputLeaf::Range {
field: Some("ip".to_string()),
lower: UserInputBound::Inclusive("::1".to_string()),
upper: UserInputBound::Unbounded,
};
let res1 = range()
.parse("ip: >=::1")
.expect("Cannot parse ip v6 format")
.0;
let res2 = range()
.parse("ip:[::1 TO *}")
.expect("Cannot parse ip v6 format")
.0;
assert_eq!(res1, expected_weight);
assert_eq!(res2, expected_weight);
// IP Range Bounded
let expected_weight = UserInputLeaf::Range {
field: Some("ip".to_string()),
lower: UserInputBound::Inclusive("::0.0.0.50".to_string()),
upper: UserInputBound::Exclusive("::0.0.0.52".to_string()),
};
let res1 = range()
.parse("ip:[::0.0.0.50 TO ::0.0.0.52}")
.expect("Cannot parse ip v6 format")
.0;
assert_eq!(res1, expected_weight);
}
#[test]
@@ -760,14 +705,6 @@ mod test {
test_parse_query_to_ast_helper("+(a b) +d", "(+(*\"a\" *\"b\") +\"d\")");
}
#[test]
fn test_parse_test_query_set() {
test_parse_query_to_ast_helper("abc: IN [a b c]", r#""abc": IN ["a" "b" "c"]"#);
test_parse_query_to_ast_helper("abc: IN [1]", r#""abc": IN ["1"]"#);
test_parse_query_to_ast_helper("abc: IN []", r#""abc": IN []"#);
test_parse_query_to_ast_helper("IN [1 2]", r#"IN ["1" "2"]"#);
}
#[test]
fn test_parse_test_query_other() {
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");

View File

@@ -12,10 +12,6 @@ pub enum UserInputLeaf {
lower: UserInputBound,
upper: UserInputBound,
},
Set {
field: Option<String>,
elements: Vec<String>,
},
}
impl Debug for UserInputLeaf {
@@ -35,19 +31,6 @@ impl Debug for UserInputLeaf {
upper.display_upper(formatter)?;
Ok(())
}
UserInputLeaf::Set { field, elements } => {
if let Some(ref field) = field {
write!(formatter, "\"{}\": ", field)?;
}
write!(formatter, "IN [")?;
for (i, element) in elements.iter().enumerate() {
if i != 0 {
write!(formatter, " ")?;
}
write!(formatter, "\"{}\"", element)?;
}
write!(formatter, "]")
}
UserInputLeaf::All => write!(formatter, "*"),
}
}

View File

@@ -11,7 +11,7 @@ use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
use super::metric::{AverageAggregation, StatsAggregation};
use super::segment_agg_result::BucketCount;
use super::VecWithNames;
use crate::fastfield::{type_and_cardinality, MultiValuedFastFieldReader};
use crate::fastfield::{type_and_cardinality, FastType, MultiValuedFastFieldReader};
use crate::schema::{Cardinality, Type};
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
@@ -194,7 +194,13 @@ fn get_ff_reader_and_validate(
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
let field_type = reader.schema().get_field_entry(field).field_type();
if let Some((_ff_type, field_cardinality)) = type_and_cardinality(field_type) {
if let Some((ff_type, field_cardinality)) = type_and_cardinality(field_type) {
if ff_type == FastType::Date {
return Err(TantivyError::InvalidArgument(
"Unsupported field type date in aggregation".to_string(),
));
}
if cardinality != field_cardinality {
return Err(TantivyError::InvalidArgument(format!(
"Invalid field cardinality on field {} expected {:?}, but got {:?}",

View File

@@ -4,7 +4,9 @@
//! intermediate average results, which is the sum and the number of values. The actual average is
//! calculated on the step from intermediate to final aggregation result tree.
use rustc_hash::FxHashMap;
use std::collections::HashMap;
use fnv::FnvHashMap;
use serde::{Deserialize, Serialize};
use super::agg_req::BucketAggregationInternal;
@@ -12,12 +14,11 @@ use super::bucket::GetDocCount;
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
use super::metric::{SingleMetricResult, Stats};
use super::Key;
use crate::schema::Schema;
use crate::TantivyError;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
/// The final aggegation result.
pub struct AggregationResults(pub FxHashMap<String, AggregationResult>);
pub struct AggregationResults(pub HashMap<String, AggregationResult>);
impl AggregationResults {
pub(crate) fn get_value_from_aggregation(
@@ -130,12 +131,9 @@ pub enum BucketResult {
}
impl BucketResult {
pub(crate) fn empty_from_req(
req: &BucketAggregationInternal,
schema: &Schema,
) -> crate::Result<Self> {
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
empty_bucket.into_final_bucket_result(req, schema)
empty_bucket.into_final_bucket_result(req)
}
}
@@ -147,7 +145,7 @@ pub enum BucketEntries<T> {
/// Vector format bucket entries
Vec(Vec<T>),
/// HashMap format bucket entries
HashMap(FxHashMap<String, T>),
HashMap(FnvHashMap<String, T>),
}
/// This is the default entry for a bucket, which contains a key, count, and optionally
@@ -178,9 +176,6 @@ pub enum BucketEntries<T> {
/// ```
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BucketEntry {
#[serde(skip_serializing_if = "Option::is_none")]
/// The string representation of the bucket.
pub key_as_string: Option<String>,
/// The identifier of the bucket.
pub key: Key,
/// Number of documents in the bucket.
@@ -245,10 +240,4 @@ pub struct RangeBucketEntry {
/// The to range of the bucket. Equals `f64::MAX` when `None`.
#[serde(skip_serializing_if = "Option::is_none")]
pub to: Option<f64>,
/// The optional string representation for the `from` range.
#[serde(skip_serializing_if = "Option::is_none")]
pub from_as_string: Option<String>,
/// The optional string representation for the `to` range.
#[serde(skip_serializing_if = "Option::is_none")]
pub to_as_string: Option<String>,
}

View File

@@ -10,12 +10,12 @@ use crate::aggregation::agg_req_with_accessor::{
AggregationsWithAccessor, BucketAggregationWithAccessor,
};
use crate::aggregation::agg_result::BucketEntry;
use crate::aggregation::f64_from_fastfield_u64;
use crate::aggregation::intermediate_agg_result::{
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
};
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
use crate::aggregation::{f64_from_fastfield_u64, format_date};
use crate::schema::{Schema, Type};
use crate::schema::Type;
use crate::{DocId, TantivyError};
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
@@ -206,7 +206,6 @@ pub struct SegmentHistogramCollector {
field_type: Type,
interval: f64,
offset: f64,
min_doc_count: u64,
first_bucket_num: i64,
bounds: HistogramBounds,
}
@@ -216,30 +215,6 @@ impl SegmentHistogramCollector {
self,
agg_with_accessor: &BucketAggregationWithAccessor,
) -> crate::Result<IntermediateBucketResult> {
// Compute the number of buckets to validate against max num buckets
// Note: We use min_doc_count here, but it's only an lowerbound here, since were are on the
// intermediate level and after merging the number of documents of a bucket could exceed
// `min_doc_count`.
{
let cut_off_buckets_front = self
.buckets
.iter()
.take_while(|bucket| bucket.doc_count <= self.min_doc_count)
.count();
let cut_off_buckets_back = self.buckets[cut_off_buckets_front..]
.iter()
.rev()
.take_while(|bucket| bucket.doc_count <= self.min_doc_count)
.count();
let estimate_num_buckets =
self.buckets.len() - cut_off_buckets_front - cut_off_buckets_back;
agg_with_accessor
.bucket_count
.add_count(estimate_num_buckets as u32);
agg_with_accessor.bucket_count.validate_bucket_count()?;
}
let mut buckets = Vec::with_capacity(
self.buckets
.iter()
@@ -276,6 +251,11 @@ impl SegmentHistogramCollector {
);
};
agg_with_accessor
.bucket_count
.add_count(buckets.len() as u32);
agg_with_accessor.bucket_count.validate_bucket_count()?;
Ok(IntermediateBucketResult::Histogram { buckets })
}
@@ -328,7 +308,6 @@ impl SegmentHistogramCollector {
first_bucket_num,
bounds,
sub_aggregations,
min_doc_count: req.min_doc_count(),
})
}
@@ -352,10 +331,10 @@ impl SegmentHistogramCollector {
.expect("unexpected fast field cardinatility");
let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() {
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0]));
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1]));
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2]));
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3]));
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0] as u64));
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1] as u64));
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2] as u64));
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3] as u64));
let bucket_pos0 = get_bucket_num(val0);
let bucket_pos1 = get_bucket_num(val1);
@@ -392,7 +371,7 @@ impl SegmentHistogramCollector {
)?;
}
for &doc in iter.remainder() {
let val = f64_from_fastfield_u64(accessor.get_val(doc), &self.field_type);
let val = f64_from_fastfield_u64(accessor.get_val(doc as u64), &self.field_type);
if !bounds.contains(val) {
continue;
}
@@ -401,7 +380,7 @@ impl SegmentHistogramCollector {
debug_assert_eq!(
self.buckets[bucket_pos].key,
get_bucket_val(val, self.interval, self.offset)
get_bucket_val(val, self.interval, self.offset) as f64
);
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
}
@@ -428,7 +407,7 @@ impl SegmentHistogramCollector {
if bounds.contains(val) {
debug_assert_eq!(
self.buckets[bucket_pos].key,
get_bucket_val(val, self.interval, self.offset)
get_bucket_val(val, self.interval, self.offset) as f64
);
self.increment_bucket(bucket_pos, doc, bucket_with_accessor)?;
@@ -446,7 +425,7 @@ impl SegmentHistogramCollector {
let bucket = &mut self.buckets[bucket_pos];
bucket.doc_count += 1;
if let Some(sub_aggregation) = self.sub_aggregations.as_mut() {
sub_aggregation[bucket_pos].collect(doc, bucket_with_accessor)?;
(&mut sub_aggregation[bucket_pos]).collect(doc, bucket_with_accessor)?;
}
Ok(())
}
@@ -472,9 +451,8 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
buckets: Vec<IntermediateHistogramBucketEntry>,
histogram_req: &HistogramAggregation,
sub_aggregation: &AggregationsInternal,
schema: &Schema,
) -> crate::Result<Vec<BucketEntry>> {
// Generate the full list of buckets without gaps.
// Generate the the full list of buckets without gaps.
//
// The bounds are the min max from the current buckets, optionally extended by
// extended_bounds from the request
@@ -513,9 +491,7 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
sub_aggregation: empty_sub_aggregation.clone(),
},
})
.map(|intermediate_bucket| {
intermediate_bucket.into_final_bucket_entry(sub_aggregation, schema)
})
.map(|intermediate_bucket| intermediate_bucket.into_final_bucket_entry(sub_aggregation))
.collect::<crate::Result<Vec<_>>>()
}
@@ -524,43 +500,20 @@ pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
buckets: Vec<IntermediateHistogramBucketEntry>,
histogram_req: &HistogramAggregation,
sub_aggregation: &AggregationsInternal,
schema: &Schema,
) -> crate::Result<Vec<BucketEntry>> {
let mut buckets = if histogram_req.min_doc_count() == 0 {
if histogram_req.min_doc_count() == 0 {
// With min_doc_count != 0, we may need to add buckets, so that there are no
// gaps, since intermediate result does not contain empty buckets (filtered to
// reduce serialization size).
intermediate_buckets_to_final_buckets_fill_gaps(
buckets,
histogram_req,
sub_aggregation,
schema,
)?
intermediate_buckets_to_final_buckets_fill_gaps(buckets, histogram_req, sub_aggregation)
} else {
buckets
.into_iter()
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count())
.map(|histogram_bucket| {
histogram_bucket.into_final_bucket_entry(sub_aggregation, schema)
})
.collect::<crate::Result<Vec<_>>>()?
};
// If we have a date type on the histogram buckets, we add the `key_as_string` field as rfc339
let field = schema
.get_field(&histogram_req.field)
.ok_or_else(|| TantivyError::FieldNotFound(histogram_req.field.to_string()))?;
if schema.get_field_entry(field).field_type().is_date() {
for bucket in buckets.iter_mut() {
if let crate::aggregation::Key::F64(val) = bucket.key {
let key_as_string = format_date(val as i64)?;
bucket.key_as_string = Some(key_as_string);
}
}
.map(|histogram_bucket| histogram_bucket.into_final_bucket_entry(sub_aggregation))
.collect::<crate::Result<Vec<_>>>()
}
Ok(buckets)
}
/// Applies req extended_bounds/hard_bounds on the min_max value
@@ -1419,63 +1372,6 @@ mod tests {
Ok(())
}
#[test]
fn histogram_date_test_single_segment() -> crate::Result<()> {
histogram_date_test_with_opt(true)
}
#[test]
fn histogram_date_test_multi_segment() -> crate::Result<()> {
histogram_date_test_with_opt(false)
}
fn histogram_date_test_with_opt(merge_segments: bool) -> crate::Result<()> {
let index = get_test_index_2_segments(merge_segments)?;
let agg_req: Aggregations = vec![(
"histogram".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
field: "date".to_string(),
interval: 86400000000.0, // one day in microseconds
..Default::default()
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let agg_res = exec_request(agg_req, &index)?;
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
assert_eq!(res["histogram"]["buckets"][0]["key"], 1546300800000000.0);
assert_eq!(
res["histogram"]["buckets"][0]["key_as_string"],
"2019-01-01T00:00:00Z"
);
assert_eq!(res["histogram"]["buckets"][0]["doc_count"], 1);
assert_eq!(res["histogram"]["buckets"][1]["key"], 1546387200000000.0);
assert_eq!(
res["histogram"]["buckets"][1]["key_as_string"],
"2019-01-02T00:00:00Z"
);
assert_eq!(res["histogram"]["buckets"][1]["doc_count"], 5);
assert_eq!(res["histogram"]["buckets"][2]["key"], 1546473600000000.0);
assert_eq!(
res["histogram"]["buckets"][2]["key_as_string"],
"2019-01-03T00:00:00Z"
);
assert_eq!(res["histogram"]["buckets"][3], Value::Null);
Ok(())
}
#[test]
fn histogram_invalid_request() -> crate::Result<()> {
let index = get_test_index_2_segments(true)?;
@@ -1542,36 +1438,4 @@ mod tests {
Ok(())
}
#[test]
fn histogram_test_max_buckets_segments() -> crate::Result<()> {
let values = vec![0.0, 70000.0];
let index = get_test_index_from_values(true, &values)?;
let agg_req: Aggregations = vec![(
"my_interval".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
field: "score_f64".to_string(),
interval: 1.0,
..Default::default()
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let res = exec_request(agg_req, &index);
assert_eq!(
res.unwrap_err().to_string(),
"An invalid argument was passed: 'Aborting aggregation because too many buckets were \
created'"
.to_string()
);
Ok(())
}
}

View File

@@ -1,8 +1,7 @@
use std::fmt::Debug;
use std::ops::Range;
use fastfield_codecs::MonotonicallyMappableToU64;
use rustc_hash::FxHashMap;
use fnv::FnvHashMap;
use serde::{Deserialize, Serialize};
use crate::aggregation::agg_req_with_accessor::{
@@ -12,9 +11,7 @@ use crate::aggregation::intermediate_agg_result::{
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
};
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
use crate::aggregation::{
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
};
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey};
use crate::schema::Type;
use crate::{DocId, TantivyError};
@@ -179,12 +176,12 @@ impl SegmentRangeCollector {
) -> crate::Result<IntermediateBucketResult> {
let field_type = self.field_type;
let buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
let buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
.buckets
.into_iter()
.map(move |range_bucket| {
Ok((
range_to_string(&range_bucket.range, &field_type)?,
range_to_string(&range_bucket.range, &field_type),
range_bucket
.bucket
.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
@@ -212,8 +209,8 @@ impl SegmentRangeCollector {
let key = range
.key
.clone()
.map(|key| Ok(Key::Str(key)))
.unwrap_or_else(|| range_to_key(&range.range, &field_type))?;
.map(Key::Str)
.unwrap_or_else(|| range_to_key(&range.range, &field_type));
let to = if range.range.end == u64::MAX {
None
} else {
@@ -231,7 +228,6 @@ impl SegmentRangeCollector {
sub_aggregation,
)?)
};
Ok(SegmentRangeAndBucketEntry {
range: range.range.clone(),
bucket: SegmentRangeBucketEntry {
@@ -267,10 +263,10 @@ impl SegmentRangeCollector {
.as_single()
.expect("unexpected fast field cardinality");
for docs in iter.by_ref() {
let val1 = accessor.get_val(docs[0]);
let val2 = accessor.get_val(docs[1]);
let val3 = accessor.get_val(docs[2]);
let val4 = accessor.get_val(docs[3]);
let val1 = accessor.get_val(docs[0] as u64);
let val2 = accessor.get_val(docs[1] as u64);
let val3 = accessor.get_val(docs[2] as u64);
let val4 = accessor.get_val(docs[3] as u64);
let bucket_pos1 = self.get_bucket_pos(val1);
let bucket_pos2 = self.get_bucket_pos(val2);
let bucket_pos3 = self.get_bucket_pos(val3);
@@ -282,7 +278,7 @@ impl SegmentRangeCollector {
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
}
for &doc in iter.remainder() {
let val = accessor.get_val(doc);
let val = accessor.get_val(doc as u64);
let bucket_pos = self.get_bucket_pos(val);
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
}
@@ -327,8 +323,8 @@ impl SegmentRangeCollector {
/// Converts the user provided f64 range value to fast field value space.
///
/// Internally fast field values are always stored as u64.
/// If the fast field has u64 `[1, 2, 5]`, these values are stored as is in the fast field.
/// A fast field with f64 `[1.0, 2.0, 5.0]` is converted to u64 space, using a
/// If the fast field has u64 [1,2,5], these values are stored as is in the fast field.
/// A fast field with f64 [1.0, 2.0, 5.0] is converted to u64 space, using a
/// monotonic mapping function, so the order is preserved.
///
/// Consequently, a f64 user range 1.0..3.0 needs to be converted to fast field value space using
@@ -406,45 +402,34 @@ fn extend_validate_ranges(
Ok(converted_buckets)
}
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> crate::Result<String> {
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
// it should be rendered as "*-0" and not "*-*"
let to_str = |val: u64, is_start: bool| {
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
Ok("*".to_string())
} else if *field_type == Type::Date {
let val = i64::from_u64(val);
format_date(val)
"*".to_string()
} else {
Ok(f64_from_fastfield_u64(val, field_type).to_string())
f64_from_fastfield_u64(val, field_type).to_string()
}
};
Ok(format!(
"{}-{}",
to_str(range.start, true)?,
to_str(range.end, false)?
))
format!("{}-{}", to_str(range.start, true), to_str(range.end, false))
}
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> crate::Result<Key> {
Ok(Key::Str(range_to_string(range, field_type)?))
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
Key::Str(range_to_string(range, field_type))
}
#[cfg(test)]
mod tests {
use fastfield_codecs::MonotonicallyMappableToU64;
use serde_json::Value;
use super::*;
use crate::aggregation::agg_req::{
Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
};
use crate::aggregation::tests::{
exec_request, exec_request_with_query, get_test_index_2_segments,
get_test_index_with_num_docs,
};
use crate::aggregation::tests::{exec_request_with_query, get_test_index_with_num_docs};
pub fn get_collector_from_ranges(
ranges: Vec<RangeAggregationRange>,
@@ -582,77 +567,6 @@ mod tests {
Ok(())
}
#[test]
fn range_date_test_single_segment() -> crate::Result<()> {
range_date_test_with_opt(true)
}
#[test]
fn range_date_test_multi_segment() -> crate::Result<()> {
range_date_test_with_opt(false)
}
fn range_date_test_with_opt(merge_segments: bool) -> crate::Result<()> {
let index = get_test_index_2_segments(merge_segments)?;
let agg_req: Aggregations = vec![(
"date_ranges".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Range(RangeAggregation {
field: "date".to_string(),
ranges: vec![
RangeAggregationRange {
key: None,
from: None,
to: Some(1546300800000000.0f64),
},
RangeAggregationRange {
key: None,
from: Some(1546300800000000.0f64),
to: Some(1546387200000000.0f64),
},
],
keyed: false,
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let agg_res = exec_request(agg_req, &index)?;
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
assert_eq!(
res["date_ranges"]["buckets"][0]["from_as_string"],
Value::Null
);
assert_eq!(
res["date_ranges"]["buckets"][0]["key"],
"*-2019-01-01T00:00:00Z"
);
assert_eq!(
res["date_ranges"]["buckets"][1]["from_as_string"],
"2019-01-01T00:00:00Z"
);
assert_eq!(
res["date_ranges"]["buckets"][1]["to_as_string"],
"2019-01-02T00:00:00Z"
);
assert_eq!(
res["date_ranges"]["buckets"][2]["from_as_string"],
"2019-01-02T00:00:00Z"
);
assert_eq!(
res["date_ranges"]["buckets"][2]["to_as_string"],
Value::Null
);
Ok(())
}
#[test]
fn range_custom_key_keyed_buckets_test() -> crate::Result<()> {
let index = get_test_index_with_num_docs(false, 100)?;

View File

@@ -1,7 +1,7 @@
use std::fmt::Debug;
use fnv::FnvHashMap;
use itertools::Itertools;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use super::{CustomOrder, Order, OrderTarget};
@@ -17,11 +17,7 @@ use crate::fastfield::MultiValuedFastFieldReader;
use crate::schema::Type;
use crate::{DocId, TantivyError};
/// Creates a bucket for every unique term and counts the number of occurences.
/// Note that doc_count in the response buckets equals term count here.
///
/// If the text is untokenized and single value, that means one term per document and therefore it
/// is in fact doc count.
/// Creates a bucket for every unique term
///
/// ### Terminology
/// Shard parameters are supposed to be equivalent to elasticsearch shard parameter.
@@ -68,25 +64,6 @@ use crate::{DocId, TantivyError};
/// }
/// }
/// ```
///
/// /// # Response JSON Format
/// ```json
/// {
/// ...
/// "aggregations": {
/// "genres": {
/// "doc_count_error_upper_bound": 0,
/// "sum_other_doc_count": 0,
/// "buckets": [
/// { "key": "drumnbass", "doc_count": 6 },
/// { "key": "raggae", "doc_count": 4 },
/// { "key": "jazz", "doc_count": 2 }
/// ]
/// }
/// }
/// }
/// ```
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct TermsAggregation {
/// The field to aggregate on.
@@ -199,7 +176,7 @@ impl TermsAggregationInternal {
#[derive(Clone, Debug, PartialEq)]
/// Container to store term_ids and their buckets.
struct TermBuckets {
pub(crate) entries: FxHashMap<u32, TermBucketEntry>,
pub(crate) entries: FnvHashMap<u32, TermBucketEntry>,
blueprint: Option<SegmentAggregationResultsCollector>,
}
@@ -397,7 +374,7 @@ impl SegmentTermCollector {
.expect("internal error: inverted index not loaded for term aggregation");
let term_dict = inverted_index.terms();
let mut dict: FxHashMap<String, IntermediateTermBucketEntry> = Default::default();
let mut dict: FnvHashMap<String, IntermediateTermBucketEntry> = Default::default();
let mut buffer = vec![];
for (term_id, entry) in entries {
term_dict
@@ -1129,9 +1106,9 @@ mod tests {
assert_eq!(res["my_texts"]["buckets"][0]["key"], "terma");
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 4);
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termc");
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termb");
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 0);
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termb");
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termc");
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 0);
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
assert_eq!(res["my_texts"]["doc_count_error_upper_bound"], 0);
@@ -1229,43 +1206,11 @@ mod tests {
.collect();
let res = exec_request_with_query(agg_req, &index, None);
assert!(res.is_err());
Ok(())
}
#[test]
fn terms_aggregation_multi_token_per_doc() -> crate::Result<()> {
let terms = vec!["Hello Hello", "Hallo Hallo"];
let index = get_test_index_from_terms(true, &[terms])?;
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "text_id".to_string(),
min_doc_count: Some(0),
..Default::default()
}),
sub_aggregation: Default::default(),
}),
)]
.into_iter()
.collect();
let res = exec_request_with_query(agg_req, &index, None).unwrap();
assert_eq!(res["my_texts"]["buckets"][0]["key"], "hello");
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
assert_eq!(res["my_texts"]["buckets"][1]["key"], "hallo");
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 2);
Ok(())
}
#[test]
fn test_json_format() -> crate::Result<()> {
let agg_req: Aggregations = vec![(

View File

@@ -7,7 +7,6 @@ use super::intermediate_agg_result::IntermediateAggregationResults;
use super::segment_agg_result::SegmentAggregationResultsCollector;
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
use crate::collector::{Collector, SegmentCollector};
use crate::schema::Schema;
use crate::{SegmentReader, TantivyError};
/// The default max bucket count, before the aggregation fails.
@@ -17,7 +16,6 @@ pub const MAX_BUCKET_COUNT: u32 = 65000;
///
/// The collector collects all aggregations by the underlying aggregation request.
pub struct AggregationCollector {
schema: Schema,
agg: Aggregations,
max_bucket_count: u32,
}
@@ -27,9 +25,8 @@ impl AggregationCollector {
///
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>, schema: Schema) -> Self {
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
Self {
schema,
agg,
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
}
@@ -116,7 +113,7 @@ impl Collector for AggregationCollector {
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit> {
let res = merge_fruits(segment_fruits)?;
res.into_final_bucket_result(self.agg.clone(), &self.schema)
res.into_final_bucket_result(self.agg.clone())
}
}

View File

@@ -1,18 +0,0 @@
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::TantivyError;
pub(crate) fn format_date(val: i64) -> crate::Result<String> {
let datetime =
OffsetDateTime::from_unix_timestamp_nanos(1_000 * (val as i128)).map_err(|err| {
TantivyError::InvalidArgument(format!(
"Could not convert {:?} to OffsetDateTime, err {:?}",
val, err
))
})?;
let key_as_string = datetime
.format(&Rfc3339)
.map_err(|_err| TantivyError::InvalidArgument("Could not serialize date".to_string()))?;
Ok(key_as_string)
}

View File

@@ -3,14 +3,15 @@
//! indices.
use std::cmp::Ordering;
use std::collections::HashMap;
use fnv::FnvHashMap;
use itertools::Itertools;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use super::agg_req::{
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
MetricAggregation, RangeAggregation,
MetricAggregation,
};
use super::agg_result::{AggregationResult, BucketResult, RangeBucketEntry};
use super::bucket::{
@@ -19,11 +20,9 @@ use super::bucket::{
};
use super::metric::{IntermediateAverage, IntermediateStats};
use super::segment_agg_result::SegmentMetricResultCollector;
use super::{format_date, Key, SerializedKey, VecWithNames};
use super::{Key, SerializedKey, VecWithNames};
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
use crate::aggregation::bucket::TermsAggregationInternal;
use crate::schema::Schema;
use crate::TantivyError;
/// Contains the intermediate aggregation result, which is optimized to be merged with other
/// intermediate results.
@@ -37,12 +36,8 @@ pub struct IntermediateAggregationResults {
impl IntermediateAggregationResults {
/// Convert intermediate result and its aggregation request to the final result.
pub fn into_final_bucket_result(
self,
req: Aggregations,
schema: &Schema,
) -> crate::Result<AggregationResults> {
self.into_final_bucket_result_internal(&(req.into()), schema)
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
self.into_final_bucket_result_internal(&(req.into()))
}
/// Convert intermediate result and its aggregation request to the final result.
@@ -52,19 +47,18 @@ impl IntermediateAggregationResults {
pub(crate) fn into_final_bucket_result_internal(
self,
req: &AggregationsInternal,
schema: &Schema,
) -> crate::Result<AggregationResults> {
// Important assumption:
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
// request
let mut results: FxHashMap<String, AggregationResult> = FxHashMap::default();
let mut results: HashMap<String, AggregationResult> = HashMap::new();
if let Some(buckets) = self.buckets {
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets, schema)?
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
} else {
// When there are no buckets, we create empty buckets, so that the serialized json
// format is constant
add_empty_final_buckets_to_result(&mut results, &req.buckets, schema)?
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
};
if let Some(metrics) = self.metrics {
@@ -138,7 +132,7 @@ impl IntermediateAggregationResults {
}
fn convert_and_add_final_metrics_to_result(
results: &mut FxHashMap<String, AggregationResult>,
results: &mut HashMap<String, AggregationResult>,
metrics: VecWithNames<IntermediateMetricResult>,
) {
results.extend(
@@ -149,7 +143,7 @@ fn convert_and_add_final_metrics_to_result(
}
fn add_empty_final_metrics_to_result(
results: &mut FxHashMap<String, AggregationResult>,
results: &mut HashMap<String, AggregationResult>,
req_metrics: &VecWithNames<MetricAggregation>,
) -> crate::Result<()> {
results.extend(req_metrics.iter().map(|(key, req)| {
@@ -163,30 +157,27 @@ fn add_empty_final_metrics_to_result(
}
fn add_empty_final_buckets_to_result(
results: &mut FxHashMap<String, AggregationResult>,
results: &mut HashMap<String, AggregationResult>,
req_buckets: &VecWithNames<BucketAggregationInternal>,
schema: &Schema,
) -> crate::Result<()> {
let requested_buckets = req_buckets.iter();
for (key, req) in requested_buckets {
let empty_bucket =
AggregationResult::BucketResult(BucketResult::empty_from_req(req, schema)?);
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
results.insert(key.to_string(), empty_bucket);
}
Ok(())
}
fn convert_and_add_final_buckets_to_result(
results: &mut FxHashMap<String, AggregationResult>,
results: &mut HashMap<String, AggregationResult>,
buckets: VecWithNames<IntermediateBucketResult>,
req_buckets: &VecWithNames<BucketAggregationInternal>,
schema: &Schema,
) -> crate::Result<()> {
assert_eq!(buckets.len(), req_buckets.len());
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
for ((key, bucket), req) in buckets_with_request {
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req, schema)?);
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
results.insert(key, result);
}
Ok(())
@@ -276,21 +267,13 @@ impl IntermediateBucketResult {
pub(crate) fn into_final_bucket_result(
self,
req: &BucketAggregationInternal,
schema: &Schema,
) -> crate::Result<BucketResult> {
match self {
IntermediateBucketResult::Range(range_res) => {
let mut buckets: Vec<RangeBucketEntry> = range_res
.buckets
.into_values()
.map(|bucket| {
bucket.into_final_bucket_entry(
&req.sub_aggregation,
schema,
req.as_range()
.expect("unexpected aggregation, expected histogram aggregation"),
)
})
.into_iter()
.map(|(_, bucket)| bucket.into_final_bucket_entry(&req.sub_aggregation))
.collect::<crate::Result<Vec<_>>>()?;
buckets.sort_by(|left, right| {
@@ -305,7 +288,7 @@ impl IntermediateBucketResult {
.keyed;
let buckets = if is_keyed {
let mut bucket_map =
FxHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
for bucket in buckets {
bucket_map.insert(bucket.key.to_string(), bucket);
}
@@ -321,12 +304,11 @@ impl IntermediateBucketResult {
req.as_histogram()
.expect("unexpected aggregation, expected histogram aggregation"),
&req.sub_aggregation,
schema,
)?;
let buckets = if req.as_histogram().unwrap().keyed {
let mut bucket_map =
FxHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
for bucket in buckets {
bucket_map.insert(bucket.key.to_string(), bucket);
}
@@ -340,7 +322,6 @@ impl IntermediateBucketResult {
req.as_term()
.expect("unexpected aggregation, expected term aggregation"),
&req.sub_aggregation,
schema,
),
}
}
@@ -415,13 +396,13 @@ impl IntermediateBucketResult {
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
/// Range aggregation including error counts
pub struct IntermediateRangeBucketResult {
pub(crate) buckets: FxHashMap<SerializedKey, IntermediateRangeBucketEntry>,
pub(crate) buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry>,
}
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
/// Term aggregation including error counts
pub struct IntermediateTermBucketResult {
pub(crate) entries: FxHashMap<String, IntermediateTermBucketEntry>,
pub(crate) entries: FnvHashMap<String, IntermediateTermBucketEntry>,
pub(crate) sum_other_doc_count: u64,
pub(crate) doc_count_error_upper_bound: u64,
}
@@ -431,7 +412,6 @@ impl IntermediateTermBucketResult {
self,
req: &TermsAggregation,
sub_aggregation_req: &AggregationsInternal,
schema: &Schema,
) -> crate::Result<BucketResult> {
let req = TermsAggregationInternal::from_req(req);
let mut buckets: Vec<BucketEntry> = self
@@ -440,12 +420,11 @@ impl IntermediateTermBucketResult {
.filter(|bucket| bucket.1.doc_count >= req.min_doc_count)
.map(|(key, entry)| {
Ok(BucketEntry {
key_as_string: None,
key: Key::Str(key),
doc_count: entry.doc_count,
sub_aggregation: entry
.sub_aggregation
.into_final_bucket_result_internal(sub_aggregation_req, schema)?,
.into_final_bucket_result_internal(sub_aggregation_req)?,
})
})
.collect::<crate::Result<_>>()?;
@@ -520,8 +499,8 @@ trait MergeFruits {
}
fn merge_maps<V: MergeFruits + Clone>(
entries_left: &mut FxHashMap<SerializedKey, V>,
mut entries_right: FxHashMap<SerializedKey, V>,
entries_left: &mut FnvHashMap<SerializedKey, V>,
mut entries_right: FnvHashMap<SerializedKey, V>,
) {
for (name, entry_left) in entries_left.iter_mut() {
if let Some(entry_right) = entries_right.remove(name) {
@@ -550,15 +529,13 @@ impl IntermediateHistogramBucketEntry {
pub(crate) fn into_final_bucket_entry(
self,
req: &AggregationsInternal,
schema: &Schema,
) -> crate::Result<BucketEntry> {
Ok(BucketEntry {
key_as_string: None,
key: Key::F64(self.key),
doc_count: self.doc_count,
sub_aggregation: self
.sub_aggregation
.into_final_bucket_result_internal(req, schema)?,
.into_final_bucket_result_internal(req)?,
})
}
}
@@ -595,38 +572,16 @@ impl IntermediateRangeBucketEntry {
pub(crate) fn into_final_bucket_entry(
self,
req: &AggregationsInternal,
schema: &Schema,
range_req: &RangeAggregation,
) -> crate::Result<RangeBucketEntry> {
let mut range_bucket_entry = RangeBucketEntry {
Ok(RangeBucketEntry {
key: self.key,
doc_count: self.doc_count,
sub_aggregation: self
.sub_aggregation
.into_final_bucket_result_internal(req, schema)?,
.into_final_bucket_result_internal(req)?,
to: self.to,
from: self.from,
to_as_string: None,
from_as_string: None,
};
// If we have a date type on the histogram buckets, we add the `key_as_string` field as
// rfc339
let field = schema
.get_field(&range_req.field)
.ok_or_else(|| TantivyError::FieldNotFound(range_req.field.to_string()))?;
if schema.get_field_entry(field).field_type().is_date() {
if let Some(val) = range_bucket_entry.to {
let key_as_string = format_date(val as i64)?;
range_bucket_entry.to_as_string = Some(key_as_string);
}
if let Some(val) = range_bucket_entry.from {
let key_as_string = format_date(val as i64)?;
range_bucket_entry.from_as_string = Some(key_as_string);
}
}
Ok(range_bucket_entry)
})
}
}
@@ -671,7 +626,7 @@ mod tests {
fn get_sub_test_tree(data: &[(String, u64)]) -> IntermediateAggregationResults {
let mut map = HashMap::new();
let mut buckets = FxHashMap::default();
let mut buckets = FnvHashMap::default();
for (key, doc_count) in data {
buckets.insert(
key.to_string(),
@@ -698,7 +653,7 @@ mod tests {
data: &[(String, u64, String, u64)],
) -> IntermediateAggregationResults {
let mut map = HashMap::new();
let mut buckets: FxHashMap<_, _> = Default::default();
let mut buckets: FnvHashMap<_, _> = Default::default();
for (key, doc_count, sub_aggregation_key, sub_aggregation_count) in data {
buckets.insert(
key.to_string(),

View File

@@ -60,10 +60,10 @@ impl SegmentAverageCollector {
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() {
let val1 = field.get_val(docs[0]);
let val2 = field.get_val(docs[1]);
let val3 = field.get_val(docs[2]);
let val4 = field.get_val(docs[3]);
let val1 = field.get_val(docs[0] as u64);
let val2 = field.get_val(docs[1] as u64);
let val3 = field.get_val(docs[2] as u64);
let val4 = field.get_val(docs[3] as u64);
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
@@ -74,7 +74,7 @@ impl SegmentAverageCollector {
self.data.collect(val4);
}
for &doc in iter.remainder() {
let val = field.get_val(doc);
let val = field.get_val(doc as u64);
let val = f64_from_fastfield_u64(val, &self.field_type);
self.data.collect(val);
}

View File

@@ -166,10 +166,10 @@ impl SegmentStatsCollector {
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
let mut iter = doc.chunks_exact(4);
for docs in iter.by_ref() {
let val1 = field.get_val(docs[0]);
let val2 = field.get_val(docs[1]);
let val3 = field.get_val(docs[2]);
let val4 = field.get_val(docs[3]);
let val1 = field.get_val(docs[0] as u64);
let val2 = field.get_val(docs[1] as u64);
let val3 = field.get_val(docs[2] as u64);
let val4 = field.get_val(docs[3] as u64);
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
@@ -180,7 +180,7 @@ impl SegmentStatsCollector {
self.stats.collect(val4);
}
for &doc in iter.remainder() {
let val = field.get_val(doc);
let val = field.get_val(doc as u64);
let val = f64_from_fastfield_u64(val, &self.field_type);
self.stats.collect(val);
}
@@ -222,7 +222,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let reader = index.reader()?;
let searcher = reader.searcher();
@@ -300,7 +300,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();

View File

@@ -10,19 +10,21 @@
//!
//! There are two categories: [Metrics](metric) and [Buckets](bucket).
//!
//! ## Prerequisite
//! Currently aggregations work only on [fast fields](`crate::fastfield`). Single value fast fields
//! of type `u64`, `f64`, `i64`, `date` and fast fields on text fields.
//! # Usage
//!
//!
//! ## Usage
//! To use aggregations, build an aggregation request by constructing
//! [`Aggregations`](agg_req::Aggregations).
//! Create an [`AggregationCollector`] from this request. `AggregationCollector` implements the
//! [`Collector`](crate::collector::Collector) trait and can be passed as collector into
//! [`Searcher::search()`](crate::Searcher::search).
//!
//! #### Limitations
//!
//! ## JSON Format
//! Currently aggregations work only on single value fast fields of type `u64`, `f64`, `i64` and
//! fast fields on text fields.
//!
//! # JSON Format
//! Aggregations request and result structures de/serialize into elasticsearch compatible JSON.
//!
//! ```verbatim
@@ -33,7 +35,7 @@
//! let json_response_string: String = &serde_json::to_string(&agg_res)?;
//! ```
//!
//! ## Supported Aggregations
//! # Supported Aggregations
//! - [Bucket](bucket)
//! - [Histogram](bucket::HistogramAggregation)
//! - [Range](bucket::RangeAggregation)
@@ -53,10 +55,9 @@
//! use tantivy::query::AllQuery;
//! use tantivy::aggregation::agg_result::AggregationResults;
//! use tantivy::IndexReader;
//! use tantivy::schema::Schema;
//!
//! # #[allow(dead_code)]
//! fn aggregate_on_index(reader: &IndexReader, schema: Schema) {
//! fn aggregate_on_index(reader: &IndexReader) {
//! let agg_req: Aggregations = vec![
//! (
//! "average".to_string(),
@@ -68,7 +69,7 @@
//! .into_iter()
//! .collect();
//!
//! let collector = AggregationCollector::from_aggs(agg_req, None, schema);
//! let collector = AggregationCollector::from_aggs(agg_req, None);
//!
//! let searcher = reader.searcher();
//! let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
@@ -158,7 +159,6 @@ mod agg_req_with_accessor;
pub mod agg_result;
pub mod bucket;
mod collector;
mod date;
pub mod intermediate_agg_result;
pub mod metric;
mod segment_agg_result;
@@ -169,7 +169,6 @@ pub use collector::{
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
MAX_BUCKET_COUNT,
};
pub(crate) use date::format_date;
use fastfield_codecs::MonotonicallyMappableToU64;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
@@ -286,11 +285,11 @@ impl Display for Key {
/// Inverse of `to_fastfield_u64`. Used to convert to `f64` for metrics.
///
/// # Panics
/// Only `u64`, `f64`, `date`, and `i64` are supported.
/// Only `u64`, `f64`, and `i64` are supported.
pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
match field_type {
Type::U64 => val as f64,
Type::I64 | Type::Date => i64::from_u64(val) as f64,
Type::I64 => i64::from_u64(val) as f64,
Type::F64 => f64::from_u64(val),
_ => {
panic!("unexpected type {:?}. This should not happen", field_type)
@@ -298,9 +297,10 @@ pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
}
}
/// Converts the `f64` value to fast field value space, which is always u64.
/// Converts the `f64` value to fast field value space.
///
/// If the fast field has `u64`, values are stored unchanged as `u64` in the fast field.
/// If the fast field has `u64`, values are stored as `u64` in the fast field.
/// A `f64` value of e.g. `2.0` therefore needs to be converted to `1u64`.
///
/// If the fast field has `f64` values are converted and stored to `u64` using a
/// monotonic mapping.
@@ -310,7 +310,7 @@ pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &Type) -> Option<u64> {
match field_type {
Type::U64 => Some(val as u64),
Type::I64 | Type::Date => Some((val as i64).to_u64()),
Type::I64 => Some((val as i64).to_u64()),
Type::F64 => Some(val.to_u64()),
_ => None,
}
@@ -319,7 +319,6 @@ pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &Type) -> Option<u64> {
#[cfg(test)]
mod tests {
use serde_json::Value;
use time::OffsetDateTime;
use super::agg_req::{Aggregation, Aggregations, BucketAggregation};
use super::bucket::RangeAggregation;
@@ -335,7 +334,7 @@ mod tests {
use crate::aggregation::DistributedAggregationCollector;
use crate::query::{AllQuery, TermQuery};
use crate::schema::{Cardinality, IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
use crate::{DateTime, Index, Term};
use crate::{Index, Term};
fn get_avg_req(field_name: &str) -> Aggregation {
Aggregation::Metric(MetricAggregation::Average(
@@ -361,7 +360,7 @@ mod tests {
index: &Index,
query: Option<(&str, &str)>,
) -> crate::Result<Value> {
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req, None);
let reader = index.reader()?;
let searcher = reader.searcher();
@@ -451,9 +450,9 @@ mod tests {
text_field_id => term.to_string(),
string_field_id => term.to_string(),
score_field => i as u64,
score_field_f64 => i,
score_field_f64 => i as f64,
score_field_i64 => i as i64,
fraction_field => i/100.0,
fraction_field => i as f64/100.0,
))?;
}
index_writer.commit()?;
@@ -555,10 +554,10 @@ mod tests {
let searcher = reader.searcher();
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
intermediate_agg_result
.into_final_bucket_result(agg_req, &index.schema())
.into_final_bucket_result(agg_req)
.unwrap()
} else {
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req, None);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
@@ -651,7 +650,6 @@ mod tests {
.set_fast()
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let date_field = schema_builder.add_date_field("date", FAST);
schema_builder.add_text_field("dummy_text", STRING);
let score_fieldtype =
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
@@ -669,7 +667,6 @@ mod tests {
// writing the segment
index_writer.add_document(doc!(
text_field => "cool",
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800).unwrap()),
score_field => 1u64,
score_field_f64 => 1f64,
score_field_i64 => 1i64,
@@ -678,7 +675,6 @@ mod tests {
))?;
index_writer.add_document(doc!(
text_field => "cool",
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
score_field => 3u64,
score_field_f64 => 3f64,
score_field_i64 => 3i64,
@@ -687,21 +683,18 @@ mod tests {
))?;
index_writer.add_document(doc!(
text_field => "cool",
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
score_field => 5u64,
score_field_f64 => 5f64,
score_field_i64 => 5i64,
))?;
index_writer.add_document(doc!(
text_field => "nohit",
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
score_field => 6u64,
score_field_f64 => 6f64,
score_field_i64 => 6i64,
))?;
index_writer.add_document(doc!(
text_field => "cool",
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
score_field => 7u64,
score_field_f64 => 7f64,
score_field_i64 => 7i64,
@@ -709,14 +702,12 @@ mod tests {
index_writer.commit()?;
index_writer.add_document(doc!(
text_field => "cool",
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
score_field => 11u64,
score_field_f64 => 11f64,
score_field_i64 => 11i64,
))?;
index_writer.add_document(doc!(
text_field => "cool",
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
score_field => 14u64,
score_field_f64 => 14f64,
score_field_i64 => 14i64,
@@ -724,7 +715,6 @@ mod tests {
index_writer.add_document(doc!(
text_field => "cool",
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
score_field => 44u64,
score_field_f64 => 44.5f64,
score_field_i64 => 44i64,
@@ -735,7 +725,6 @@ mod tests {
// no hits segment
index_writer.add_document(doc!(
text_field => "nohit",
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
score_field => 44u64,
score_field_f64 => 44.5f64,
score_field_i64 => 44i64,
@@ -808,7 +797,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
@@ -1008,10 +997,9 @@ mod tests {
// Test de/serialization roundtrip on intermediate_agg_result
let res: IntermediateAggregationResults =
serde_json::from_str(&serde_json::to_string(&res).unwrap()).unwrap();
res.into_final_bucket_result(agg_req.clone(), &index.schema())
.unwrap()
res.into_final_bucket_result(agg_req.clone()).unwrap()
} else {
let collector = AggregationCollector::from_aggs(agg_req.clone(), None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req.clone(), None);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
@@ -1069,7 +1057,7 @@ mod tests {
);
// Test empty result set
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req, None);
let searcher = reader.searcher();
searcher.search(&query_with_no_hits, &collector).unwrap();
@@ -1134,7 +1122,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
@@ -1247,7 +1235,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
@@ -1278,7 +1266,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
@@ -1309,7 +1297,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
@@ -1348,7 +1336,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
@@ -1377,7 +1365,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
@@ -1406,7 +1394,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
@@ -1443,7 +1431,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
@@ -1478,7 +1466,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
@@ -1517,7 +1505,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
@@ -1547,7 +1535,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =
@@ -1604,7 +1592,7 @@ mod tests {
.into_iter()
.collect();
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
let collector = AggregationCollector::from_aggs(agg_req_1, None);
let searcher = reader.searcher();
let agg_res: AggregationResults =

View File

@@ -305,7 +305,7 @@ impl BucketCount {
}
pub(crate) fn add_count(&self, count: u32) {
self.bucket_count
.fetch_add(count, std::sync::atomic::Ordering::Relaxed);
.fetch_add(count as u32, std::sync::atomic::Ordering::Relaxed);
}
pub(crate) fn get_count(&self) -> u32 {
self.bucket_count.load(std::sync::atomic::Ordering::Relaxed)

View File

@@ -38,7 +38,7 @@ pub trait CustomSegmentScorer<TScore>: 'static {
pub trait CustomScorer<TScore>: Sync {
/// Type of the associated [`CustomSegmentScorer`].
type Child: CustomSegmentScorer<TScore>;
/// Builds a child scorer for a specific segment. The child scorer is associated with
/// Builds a child scorer for a specific segment. The child scorer is associated to
/// a specific segment.
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>;
}

View File

@@ -91,7 +91,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// // a document can be associated with any number of facets
/// // a document can be associated to any number of facets
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// facet => Facet::from("/lang/en"),
@@ -338,7 +338,11 @@ impl SegmentCollector for FacetSegmentCollector {
let mut previous_collapsed_ord: usize = usize::MAX;
for &facet_ord in &self.facet_ords_buf {
let collapsed_ord = self.collapse_mapping[facet_ord as usize];
self.counts[collapsed_ord] += u64::from(collapsed_ord != previous_collapsed_ord);
self.counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord {
0
} else {
1
};
previous_collapsed_ord = collapsed_ord;
}
}
@@ -357,7 +361,7 @@ impl SegmentCollector for FacetSegmentCollector {
let mut facet = vec![];
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
// TODO handle errors.
if facet_dict.ord_to_term(facet_ord, &mut facet).is_ok() {
if facet_dict.ord_to_term(facet_ord as u64, &mut facet).is_ok() {
if let Ok(facet) = Facet::from_encoded(facet) {
facet_counts.insert(facet, count);
}
@@ -616,7 +620,7 @@ mod tests {
.map(|mut doc| {
doc.add_facet(
facet_field,
&format!("/facet/{}", thread_rng().sample(uniform)),
&format!("/facet/{}", thread_rng().sample(&uniform)),
);
doc
})

View File

@@ -177,7 +177,7 @@ where
type Fruit = TSegmentCollector::Fruit;
fn collect(&mut self, doc: u32, score: Score) {
let value = self.fast_field_reader.get_val(doc);
let value = self.fast_field_reader.get_val(doc as u64);
if (self.predicate)(value) {
self.segment_collector.collect(doc, score)
}

View File

@@ -37,7 +37,7 @@ impl HistogramCollector {
/// The scale/range of the histogram is not dynamic. It is required to
/// define it by supplying following parameter:
/// - `min_value`: the minimum value that can be recorded in the histogram.
/// - `bucket_width`: the length of the interval that is associated with each buckets.
/// - `bucket_width`: the length of the interval that is associated to each buckets.
/// - `num_buckets`: The overall number of buckets.
///
/// Together, this parameters define a partition of `[min_value, min_value + num_buckets *
@@ -94,7 +94,7 @@ impl SegmentCollector for SegmentHistogramCollector {
type Fruit = Vec<u64>;
fn collect(&mut self, doc: DocId, _score: Score) {
let value = self.ff_reader.get_val(doc);
let value = self.ff_reader.get_val(doc as u64);
self.histogram_computer.add_value(value);
}

View File

@@ -142,7 +142,7 @@ pub trait Collector: Sync + Send {
/// e.g. `usize` for the `Count` collector.
type Fruit: Fruit;
/// Type of the `SegmentCollector` associated with this collector.
/// Type of the `SegmentCollector` associated to this collector.
type Child: SegmentCollector;
/// `set_segment` is called before beginning to enumerate
@@ -156,7 +156,7 @@ pub trait Collector: Sync + Send {
/// Returns true iff the collector requires to compute scores for documents.
fn requires_scoring(&self) -> bool;
/// Combines the fruit associated with the collection of each segments
/// Combines the fruit associated to the collection of each segments
/// into one fruit.
fn merge_fruits(
&self,
@@ -170,35 +170,19 @@ pub trait Collector: Sync + Send {
segment_ord: u32,
reader: &SegmentReader,
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
let mut segment_collector = self.for_segment(segment_ord, reader)?;
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
match (reader.alive_bitset(), self.requires_scoring()) {
(Some(alive_bitset), true) => {
weight.for_each(reader, &mut |doc, score| {
if alive_bitset.is_alive(doc) {
segment_collector.collect(doc, score);
}
})?;
}
(Some(alive_bitset), false) => {
weight.for_each_no_score(reader, &mut |doc| {
if alive_bitset.is_alive(doc) {
segment_collector.collect(doc, 0.0);
}
})?;
}
(None, true) => {
weight.for_each(reader, &mut |doc, score| {
if let Some(alive_bitset) = reader.alive_bitset() {
weight.for_each(reader, &mut |doc, score| {
if alive_bitset.is_alive(doc) {
segment_collector.collect(doc, score);
})?;
}
(None, false) => {
weight.for_each_no_score(reader, &mut |doc| {
segment_collector.collect(doc, 0.0);
})?;
}
}
})?;
} else {
weight.for_each(reader, &mut |doc, score| {
segment_collector.collect(doc, score);
})?;
}
Ok(segment_collector.harvest())
}
}

View File

@@ -201,7 +201,7 @@ impl SegmentCollector for FastFieldSegmentCollector {
type Fruit = Vec<u64>;
fn collect(&mut self, doc: DocId, _score: Score) {
let val = self.reader.get_val(doc);
let val = self.reader.get_val(doc as u64);
self.vals.push(val);
}

View File

@@ -137,7 +137,7 @@ struct ScorerByFastFieldReader {
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
fn score(&mut self, doc: DocId) -> u64 {
self.ff_reader.get_val(doc)
self.ff_reader.get_val(doc as u64)
}
}
@@ -458,7 +458,7 @@ impl TopDocs {
///
/// // We can now define our actual scoring function
/// move |doc: DocId, original_score: Score| {
/// let popularity: u64 = popularity_reader.get_val(doc);
/// let popularity: u64 = popularity_reader.get_val(doc as u64);
/// // Well.. For the sake of the example we use a simple logarithm
/// // function.
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
@@ -567,8 +567,8 @@ impl TopDocs {
///
/// // We can now define our actual scoring function
/// move |doc: DocId| {
/// let popularity: u64 = popularity_reader.get_val(doc);
/// let boosted: u64 = boosted_reader.get_val(doc);
/// let popularity: u64 = popularity_reader.get_val(doc as u64);
/// let boosted: u64 = boosted_reader.get_val(doc as u64);
/// // Score do not have to be `f64` in tantivy.
/// // Here we return a couple to get lexicographical order
/// // for free.
@@ -693,7 +693,7 @@ impl Collector for TopDocs {
}
}
/// Segment Collector associated with `TopDocs`.
/// Segment Collector associated to `TopDocs`.
pub struct TopScoreSegmentCollector(TopSegmentCollector<Score>);
impl SegmentCollector for TopScoreSegmentCollector {

View File

@@ -40,7 +40,7 @@ pub trait ScoreTweaker<TScore>: Sync {
/// Type of the associated [`ScoreSegmentTweaker`].
type Child: ScoreSegmentTweaker<TScore>;
/// Builds a child tweaker for a specific segment. The child scorer is associated with
/// Builds a child tweaker for a specific segment. The child scorer is associated to
/// a specific segment.
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
}

View File

@@ -19,7 +19,7 @@ use crate::error::{DataCorruption, TantivyError};
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
use crate::indexer::segment_updater::save_metas;
use crate::reader::{IndexReader, IndexReaderBuilder};
use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::schema::{Field, FieldType, Schema};
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::IndexWriter;
@@ -149,11 +149,12 @@ impl IndexBuilder {
/// Creates a new index using the [`RamDirectory`].
///
/// The index will be allocated in anonymous memory.
/// This is useful for indexing small set of documents
/// for instances like unit test or temporary in memory index.
/// This should only be used for unit tests.
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
let ram_directory = RamDirectory::create();
self.create(ram_directory)
Ok(self
.create(ram_directory)
.expect("Creating a RamDirectory should never fail"))
}
/// Creates a new index in a given filepath.
@@ -227,44 +228,10 @@ impl IndexBuilder {
))
}
}
fn validate(&self) -> crate::Result<()> {
if let Some(schema) = self.schema.as_ref() {
if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref() {
let schema_field = schema.get_field(&sort_by_field.field).ok_or_else(|| {
TantivyError::InvalidArgument(format!(
"Field to sort index {} not found in schema",
sort_by_field.field
))
})?;
let entry = schema.get_field_entry(schema_field);
if !entry.is_fast() {
return Err(TantivyError::InvalidArgument(format!(
"Field {} is no fast field. Field needs to be a single value fast field \
to be used to sort an index",
sort_by_field.field
)));
}
if entry.field_type().fastfield_cardinality() != Some(Cardinality::SingleValue) {
return Err(TantivyError::InvalidArgument(format!(
"Only single value fast field Cardinality supported for sorting index {}",
sort_by_field.field
)));
}
}
Ok(())
} else {
Err(TantivyError::InvalidArgument(
"no schema passed".to_string(),
))
}
}
/// Creates a new index given an implementation of the trait `Directory`.
///
/// If a directory previously existed, it will be erased.
fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
self.validate()?;
let dir = dir.into();
let directory = ManagedDirectory::wrap(dir)?;
save_new_metas(
@@ -813,7 +780,7 @@ mod tests {
let field = schema.get_field("num_likes").unwrap();
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let index = Index::create_in_dir(tempdir_path, schema).unwrap();
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)

View File

@@ -130,10 +130,10 @@ impl SegmentMeta {
/// Returns the relative path of a component of our segment.
///
/// It just joins the segment id with the extension
/// associated with a segment component.
/// associated to a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
let mut path = self.id().uuid_string();
path.push_str(&match component {
path.push_str(&*match component {
SegmentComponent::Postings => ".idx".to_string(),
SegmentComponent::Positions => ".pos".to_string(),
SegmentComponent::Terms => ".term".to_string(),
@@ -326,13 +326,13 @@ pub struct IndexMeta {
/// `IndexSettings` to configure index options.
#[serde(default)]
pub index_settings: IndexSettings,
/// List of `SegmentMeta` information associated with each finalized segment of the index.
/// List of `SegmentMeta` information associated to each finalized segment of the index.
pub segments: Vec<SegmentMeta>,
/// Index `Schema`
pub schema: Schema,
/// Opstamp associated with the last `commit` operation.
/// Opstamp associated to the last `commit` operation.
pub opstamp: Opstamp,
/// Payload associated with the last commit.
/// Payload associated to the last commit.
///
/// Upon commit, clients can optionally add a small `String` payload to their commit
/// to help identify this commit.

View File

@@ -9,17 +9,18 @@ use crate::schema::{IndexRecordOption, Term};
use crate::termdict::TermDictionary;
/// The inverted index reader is in charge of accessing
/// the inverted index associated with a specific field.
/// the inverted index associated to a specific field.
///
/// # Note
///
/// It is safe to delete the segment associated with
/// It is safe to delete the segment associated to
/// an `InvertedIndexReader`. As long as it is open,
/// the [`FileSlice`] it is relying on should
/// the `FileSlice` it is relying on should
/// stay available.
///
///
/// `InvertedIndexReader` are created by calling
/// [`SegmentReader::inverted_index()`](crate::SegmentReader::inverted_index).
/// the `SegmentReader`'s [`.inverted_index(...)`] method
pub struct InvertedIndexReader {
termdict: TermDictionary,
postings_file_slice: FileSlice,
@@ -29,7 +30,7 @@ pub struct InvertedIndexReader {
}
impl InvertedIndexReader {
#[allow(clippy::needless_pass_by_value)] // for symmetry
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
pub(crate) fn new(
termdict: TermDictionary,
postings_file_slice: FileSlice,
@@ -74,7 +75,7 @@ impl InvertedIndexReader {
///
/// This is useful for enumerating through a list of terms,
/// and consuming the associated posting lists while avoiding
/// reallocating a [`BlockSegmentPostings`].
/// reallocating a `BlockSegmentPostings`.
///
/// # Warning
///
@@ -95,7 +96,7 @@ impl InvertedIndexReader {
/// Returns a block postings given a `Term`.
/// This method is for an advanced usage only.
///
/// Most users should prefer using [`Self::read_postings()`] instead.
/// Most user should prefer using `read_postings` instead.
pub fn read_block_postings(
&self,
term: &Term,
@@ -109,7 +110,7 @@ impl InvertedIndexReader {
/// Returns a block postings given a `term_info`.
/// This method is for an advanced usage only.
///
/// Most users should prefer using [`Self::read_postings()`] instead.
/// Most user should prefer using `read_postings` instead.
pub fn read_block_postings_from_terminfo(
&self,
term_info: &TermInfo,
@@ -129,7 +130,7 @@ impl InvertedIndexReader {
/// Returns a posting object given a `term_info`.
/// This method is for an advanced usage only.
///
/// Most users should prefer using [`Self::read_postings()`] instead.
/// Most user should prefer using `read_postings` instead.
pub fn read_postings_from_terminfo(
&self,
term_info: &TermInfo,
@@ -163,12 +164,12 @@ impl InvertedIndexReader {
/// or `None` if the term has never been encountered and indexed.
///
/// If the field was not indexed with the indexing options that cover
/// the requested options, the returned [`SegmentPostings`] the method does not fail
/// the requested options, the returned `SegmentPostings` the method does not fail
/// and returns a `SegmentPostings` with as much information as possible.
///
/// For instance, requesting [`IndexRecordOption::WithFreqs`] for a
/// [`TextOptions`](crate::schema::TextOptions) that does not index position
/// will return a [`SegmentPostings`] with `DocId`s and frequencies.
/// For instance, requesting `IndexRecordOption::Freq` for a
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
/// with `DocId`s and frequencies.
pub fn read_postings(
&self,
term: &Term,
@@ -200,16 +201,23 @@ impl InvertedIndexReader {
#[cfg(feature = "quickwit")]
impl InvertedIndexReader {
pub(crate) async fn get_term_info_async(&self, term: &Term) -> io::Result<Option<TermInfo>> {
pub(crate) async fn get_term_info_async(
&self,
term: &Term,
) -> crate::AsyncIoResult<Option<TermInfo>> {
self.termdict.get_async(term.value_bytes()).await
}
/// Returns a block postings given a `Term`.
/// This method is for an advanced usage only.
///
/// Most users should prefer using [`Self::read_postings()`] instead.
pub async fn warm_postings(&self, term: &Term, with_positions: bool) -> io::Result<()> {
let term_info_opt: Option<TermInfo> = self.get_term_info_async(term).await?;
/// Most user should prefer using `read_postings` instead.
pub async fn warm_postings(
&self,
term: &Term,
with_positions: bool,
) -> crate::AsyncIoResult<()> {
let term_info_opt = self.get_term_info_async(term).await?;
if let Some(term_info) = term_info_opt {
self.postings_file_slice
.read_bytes_slice_async(term_info.postings_range.clone())
@@ -223,20 +231,8 @@ impl InvertedIndexReader {
Ok(())
}
/// Read the block postings for all terms.
/// This method is for an advanced usage only.
///
/// If you know which terms to pre-load, prefer using [`Self::warm_postings`] instead.
pub async fn warm_postings_full(&self, with_positions: bool) -> io::Result<()> {
self.postings_file_slice.read_bytes_async().await?;
if with_positions {
self.positions_file_slice.read_bytes_async().await?;
}
Ok(())
}
/// Returns the number of documents containing the term asynchronously.
pub async fn doc_freq_async(&self, term: &Term) -> io::Result<u32> {
pub async fn doc_freq_async(&self, term: &Term) -> crate::AsyncIoResult<u32> {
Ok(self
.get_term_info_async(term)
.await?

View File

@@ -4,7 +4,7 @@ use std::{fmt, io};
use crate::collector::Collector;
use crate::core::{Executor, SegmentReader};
use crate::query::{EnableScoring, Query};
use crate::query::Query;
use crate::schema::{Document, Schema, Term};
use crate::space_usage::SearcherSpaceUsage;
use crate::store::{CacheStats, StoreReader};
@@ -69,7 +69,7 @@ pub struct Searcher {
}
impl Searcher {
/// Returns the `Index` associated with the `Searcher`
/// Returns the `Index` associated to the `Searcher`
pub fn index(&self) -> &Index {
&self.inner.index
}
@@ -108,7 +108,7 @@ impl Searcher {
store_reader.get_async(doc_address.doc_id).await
}
/// Access the schema associated with the index of this searcher.
/// Access the schema associated to the index of this searcher.
pub fn schema(&self) -> &Schema {
&self.inner.schema
}
@@ -161,11 +161,11 @@ impl Searcher {
///
/// Search works as follows :
///
/// First the weight object associated with the query is created.
/// First the weight object associated to the query is created.
///
/// Then, the query loops over the segments and for each segment :
/// - setup the collector and informs it that the segment being processed has changed.
/// - creates a SegmentCollector for collecting documents associated with the segment
/// - creates a SegmentCollector for collecting documents associated to the segment
/// - creates a `Scorer` object associated for this segment
/// - iterate through the matched documents and push them to the segment collector.
///
@@ -199,12 +199,7 @@ impl Searcher {
executor: &Executor,
) -> crate::Result<C::Fruit> {
let scoring_enabled = collector.requires_scoring();
let enabled_scoring = if scoring_enabled {
EnableScoring::Enabled(self)
} else {
EnableScoring::Disabled(self.schema())
};
let weight = query.weight(enabled_scoring)?;
let weight = query.weight(self, scoring_enabled)?;
let segment_readers = self.segment_readers();
let fruits = executor.map(
|(segment_ord, segment_reader)| {

View File

@@ -70,7 +70,7 @@ impl Segment {
/// Returns the relative path of a component of our segment.
///
/// It just joins the segment id with the extension
/// associated with a segment component.
/// associated to a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
self.meta.relative_path(component)
}

View File

@@ -6,7 +6,7 @@ use std::slice;
/// except the delete component that takes an `segment_uuid`.`delete_opstamp`.`component_extension`
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum SegmentComponent {
/// Postings (or inverted list). Sorted lists of document ids, associated with terms
/// Postings (or inverted list). Sorted lists of document ids, associated to terms
Postings,
/// Positions of terms in each document.
Positions,

View File

@@ -57,7 +57,7 @@ impl SegmentId {
/// Picking the first 8 chars is ok to identify
/// segments in a display message (e.g. a5c4dfcb).
pub fn short_uuid_string(&self) -> String {
self.0.as_simple().to_string()[..8].to_string()
(&self.0.as_simple().to_string()[..8]).to_string()
}
/// Returns a segment uuid string.

View File

@@ -89,7 +89,7 @@ impl SegmentReader {
&self.fast_fields_readers
}
/// Accessor to the `FacetReader` associated with a given `Field`.
/// Accessor to the `FacetReader` associated to a given `Field`.
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
let field_entry = self.schema.get_field_entry(field);
@@ -208,18 +208,18 @@ impl SegmentReader {
})
}
/// Returns a field reader associated with the field given in argument.
/// Returns a field reader associated to the field given in argument.
/// If the field was not present in the index during indexing time,
/// the InvertedIndexReader is empty.
///
/// The field reader is in charge of iterating through the
/// term dictionary associated with a specific field,
/// and opening the posting list associated with any term.
/// term dictionary associated to a specific field,
/// and opening the posting list associated to any term.
///
/// If the field is not marked as index, a warning is logged and an empty `InvertedIndexReader`
/// If the field is not marked as index, a warn is logged and an empty `InvertedIndexReader`
/// is returned.
/// Similarly, if the field is marked as indexed but no term has been indexed for the given
/// index, an empty `InvertedIndexReader` is returned (but no warning is logged).
/// Similarly if the field is marked as indexed but no term has been indexed for the given
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
if let Some(inv_idx_reader) = self
.inv_idx_reader_cache
@@ -241,7 +241,7 @@ impl SegmentReader {
if postings_file_opt.is_none() || record_option_opt.is_none() {
// no documents in the segment contained this field.
// As a result, no data is associated with the inverted index.
// As a result, no data is associated to the inverted index.
//
// Returns an empty inverted index.
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);

View File

@@ -75,7 +75,7 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
let mut prev_offset = 0;
for (file_addr, offset) in self.offsets {
VInt(offset - prev_offset).serialize(&mut self.write)?;
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
file_addr.serialize(&mut self.write)?;
prev_offset = offset;
}
@@ -154,14 +154,14 @@ impl CompositeFile {
}
}
/// Returns the `FileSlice` associated with
/// a given `Field` and stored in a `CompositeFile`.
/// Returns the `FileSlice` associated
/// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
self.open_read_with_idx(field, 0)
}
/// Returns the `FileSlice` associated with
/// a given `Field` and stored in a `CompositeFile`.
/// Returns the `FileSlice` associated
/// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
self.offsets_index
.get(&FileAddr { field, idx })

View File

@@ -39,7 +39,7 @@ impl RetryPolicy {
/// The `DirectoryLock` is an object that represents a file lock.
///
/// It is associated with a lock file, that gets deleted on `Drop.`
/// It is associated to a lock file, that gets deleted on `Drop.`
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
struct DirectoryLockGuard {
@@ -55,7 +55,7 @@ impl<T: Send + Sync + 'static> From<Box<T>> for DirectoryLock {
impl Drop for DirectoryLockGuard {
fn drop(&mut self) {
if let Err(e) = self.directory.delete(&self.path) {
if let Err(e) = self.directory.delete(&*self.path) {
error!("Failed to remove the lock file. {:?}", e);
}
}

View File

@@ -1,19 +1,23 @@
use std::ops::{Deref, Range, RangeBounds};
use std::sync::Arc;
use std::ops::{Deref, Range};
use std::sync::{Arc, Weak};
use std::{fmt, io};
use async_trait::async_trait;
use ownedbytes::{OwnedBytes, StableDeref};
use common::HasLen;
use stable_deref_trait::StableDeref;
use crate::HasLen;
use crate::directory::OwnedBytes;
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
/// Objects that represents files sections in tantivy.
///
/// By contract, whatever happens to the directory file, as long as a FileHandle
/// is alive, the data associated with it cannot be altered or destroyed.
///
/// The underlying behavior is therefore specific to the `Directory` that
/// created it. Despite its name, a [`FileSlice`] may or may not directly map to an actual file
/// The underlying behavior is therefore specific to the `Directory` that created it.
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
/// on the filesystem.
#[async_trait]
@@ -23,12 +27,13 @@ pub trait FileHandle: 'static + Send + Sync + HasLen + fmt::Debug {
/// This method may panic if the range requested is invalid.
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes>;
#[cfg(feature = "quickwit")]
#[doc(hidden)]
async fn read_bytes_async(&self, _byte_range: Range<usize>) -> io::Result<OwnedBytes> {
Err(io::Error::new(
io::ErrorKind::Unsupported,
"Async read is not supported.",
))
async fn read_bytes_async(
&self,
_byte_range: Range<usize>,
) -> crate::AsyncIoResult<OwnedBytes> {
Err(crate::error::AsyncIoError::AsyncUnsupported)
}
}
@@ -39,7 +44,8 @@ impl FileHandle for &'static [u8] {
Ok(OwnedBytes::new(bytes))
}
async fn read_bytes_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
#[cfg(feature = "quickwit")]
async fn read_bytes_async(&self, byte_range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
Ok(self.read_bytes(byte_range)?)
}
}
@@ -67,34 +73,6 @@ impl fmt::Debug for FileSlice {
}
}
/// Takes a range, a `RangeBounds` object, and returns
/// a `Range` that corresponds to the relative application of the
/// `RangeBounds` object to the original `Range`.
///
/// For instance, combine_ranges(`[2..11)`, `[5..7]`) returns `[7..10]`
/// as it reads, what is the sub-range that starts at the 5 element of
/// `[2..11)` and ends at the 9th element included.
///
/// This function panics, if the result would suggest something outside
/// of the bounds of the original range.
fn combine_ranges<R: RangeBounds<usize>>(orig_range: Range<usize>, rel_range: R) -> Range<usize> {
let start: usize = orig_range.start
+ match rel_range.start_bound().cloned() {
std::ops::Bound::Included(rel_start) => rel_start,
std::ops::Bound::Excluded(rel_start) => rel_start + 1,
std::ops::Bound::Unbounded => 0,
};
assert!(start <= orig_range.end);
let end: usize = match rel_range.end_bound().cloned() {
std::ops::Bound::Included(rel_end) => orig_range.start + rel_end + 1,
std::ops::Bound::Excluded(rel_end) => orig_range.start + rel_end,
std::ops::Bound::Unbounded => orig_range.end,
};
assert!(end >= start);
assert!(end <= orig_range.end);
start..end
}
impl FileSlice {
/// Wraps a FileHandle.
pub fn new(file_handle: Arc<dyn FileHandle>) -> Self {
@@ -118,11 +96,11 @@ impl FileSlice {
///
/// Panics if `byte_range.end` exceeds the filesize.
#[must_use]
#[inline]
pub fn slice<R: RangeBounds<usize>>(&self, byte_range: R) -> FileSlice {
pub fn slice(&self, byte_range: Range<usize>) -> FileSlice {
assert!(byte_range.end <= self.len());
FileSlice {
data: self.data.clone(),
range: combine_ranges(self.range.clone(), byte_range),
range: self.range.start + byte_range.start..self.range.start + byte_range.end,
}
}
@@ -142,8 +120,9 @@ impl FileSlice {
self.data.read_bytes(self.range.clone())
}
#[cfg(feature = "quickwit")]
#[doc(hidden)]
pub async fn read_bytes_async(&self) -> io::Result<OwnedBytes> {
pub async fn read_bytes_async(&self) -> crate::AsyncIoResult<OwnedBytes> {
self.data.read_bytes_async(self.range.clone()).await
}
@@ -161,8 +140,12 @@ impl FileSlice {
.read_bytes(self.range.start + range.start..self.range.start + range.end)
}
#[cfg(feature = "quickwit")]
#[doc(hidden)]
pub async fn read_bytes_slice_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
pub async fn read_bytes_slice_async(
&self,
byte_range: Range<usize>,
) -> crate::AsyncIoResult<OwnedBytes> {
assert!(
self.range.start + byte_range.end <= self.range.end,
"`to` exceeds the fileslice length"
@@ -224,7 +207,8 @@ impl FileHandle for FileSlice {
self.read_bytes_slice(range)
}
async fn read_bytes_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
#[cfg(feature = "quickwit")]
async fn read_bytes_async(&self, byte_range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
self.read_bytes_slice_async(byte_range).await
}
}
@@ -241,19 +225,21 @@ impl FileHandle for OwnedBytes {
Ok(self.slice(range))
}
async fn read_bytes_async(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
self.read_bytes(range)
#[cfg(feature = "quickwit")]
async fn read_bytes_async(&self, range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
let bytes = self.read_bytes(range)?;
Ok(bytes)
}
}
#[cfg(test)]
mod tests {
use std::io;
use std::ops::Bound;
use std::sync::Arc;
use common::HasLen;
use super::{FileHandle, FileSlice};
use crate::{file_slice::combine_ranges, HasLen};
#[test]
fn test_file_slice() -> io::Result<()> {
@@ -324,23 +310,4 @@ mod tests {
b"bcd"
);
}
#[test]
fn test_combine_range() {
assert_eq!(combine_ranges(1..3, 0..1), 1..2);
assert_eq!(combine_ranges(1..3, 1..), 2..3);
assert_eq!(combine_ranges(1..4, ..2), 1..3);
assert_eq!(combine_ranges(3..10, 2..5), 5..8);
assert_eq!(combine_ranges(2..11, 5..=7), 7..10);
assert_eq!(
combine_ranges(2..11, (Bound::Excluded(5), Bound::Unbounded)),
8..11
);
}
#[test]
#[should_panic]
fn test_combine_range_panics() {
let _ = combine_ranges(3..5, 1..4);
}
}

View File

@@ -9,7 +9,7 @@ use crc32fast::Hasher;
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
// Watches a file and executes registered callbacks when the file is modified.
pub struct FileWatcher {

View File

@@ -38,7 +38,7 @@ impl Footer {
counting_write.write_all(serde_json::to_string(&self)?.as_ref())?;
let footer_payload_len = counting_write.written_bytes();
BinarySerializable::serialize(&(footer_payload_len as u32), write)?;
BinarySerializable::serialize(&FOOTER_MAGIC_NUMBER, write)?;
BinarySerializable::serialize(&(FOOTER_MAGIC_NUMBER as u32), write)?;
Ok(())
}
@@ -90,10 +90,9 @@ impl Footer {
));
}
let footer: Footer =
serde_json::from_slice(&file.read_bytes_slice(
file.len() - total_footer_size..file.len() - footer_metadata_len,
)?)?;
let footer: Footer = serde_json::from_slice(&file.read_bytes_slice(
file.len() - total_footer_size..file.len() - footer_metadata_len as usize,
)?)?;
let body = file.slice_to(file.len() - total_footer_size);
Ok((footer, body))

View File

@@ -388,7 +388,7 @@ mod tests_mmap_specific {
let tempdir_path = PathBuf::from(tempdir.path());
let living_files = HashSet::new();
let mmap_directory = MmapDirectory::open(tempdir_path).unwrap();
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(Box::new(mmap_directory)).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();

View File

@@ -3,13 +3,13 @@ use std::fs::{self, File, OpenOptions};
use std::io::{self, BufWriter, Read, Seek, Write};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock, Weak};
use std::sync::{Arc, RwLock};
use std::{fmt, result};
use common::StableDeref;
use fs2::FileExt;
use memmap2::Mmap;
use serde::{Deserialize, Serialize};
use stable_deref_trait::StableDeref;
use tempfile::TempDir;
use crate::core::META_FILEPATH;
@@ -18,13 +18,10 @@ use crate::directory::error::{
};
use crate::directory::file_watcher::FileWatcher;
use crate::directory::{
AntiCallToken, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes, TerminatingWrite,
WatchCallback, WatchHandle, WritePtr,
AntiCallToken, ArcBytes, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes,
TerminatingWrite, WatchCallback, WatchHandle, WeakArcBytes, WritePtr,
};
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
/// Create a default io error given a string.
pub(crate) fn make_io_err(msg: String) -> io::Error {
io::Error::new(io::ErrorKind::Other, msg)
@@ -304,7 +301,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
"Path {:?} does not have parent directory.",
)
})?;
let mut tempfile = tempfile::Builder::new().tempfile_in(parent_path)?;
let mut tempfile = tempfile::Builder::new().tempfile_in(&parent_path)?;
tempfile.write_all(content)?;
tempfile.flush()?;
tempfile.as_file_mut().sync_data()?;
@@ -337,11 +334,11 @@ impl Directory for MmapDirectory {
Ok(Arc::new(owned_bytes))
}
/// Any entry associated with the path in the mmap will be
/// Any entry associated to the path in the mmap will be
/// removed before the file is deleted.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
let full_path = self.resolve_path(path);
fs::remove_file(full_path).map_err(|e| {
fs::remove_file(&full_path).map_err(|e| {
if e.kind() == io::ErrorKind::NotFound {
DeleteError::FileDoesNotExist(path.to_owned())
} else {
@@ -395,7 +392,7 @@ impl Directory for MmapDirectory {
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path);
let mut buffer = Vec::new();
match File::open(full_path) {
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer).map_err(|io_error| {
OpenReadError::wrap_io_error(io_error, path.to_path_buf())
@@ -425,7 +422,7 @@ impl Directory for MmapDirectory {
let file: File = OpenOptions::new()
.write(true)
.create(true) //< if the file does not exist yet, create it.
.open(full_path)
.open(&full_path)
.map_err(LockError::wrap_io_error)?;
if lock.is_blocking {
file.lock_exclusive().map_err(LockError::wrap_io_error)?;
@@ -475,8 +472,6 @@ mod tests {
// There are more tests in directory/mod.rs
// The following tests are specific to the MmapDirectory
use std::time::Duration;
use common::HasLen;
use super::*;
@@ -571,21 +566,9 @@ mod tests {
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
}
fn assert_eventually<P: Fn() -> Option<String>>(predicate: P) {
for _ in 0..30 {
if predicate().is_none() {
break;
}
std::thread::sleep(Duration::from_millis(200));
}
if let Some(error_msg) = predicate() {
panic!("{}", error_msg);
}
}
#[test]
fn test_mmap_released() {
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
fn test_mmap_released() -> crate::Result<()> {
let mmap_directory = MmapDirectory::create_from_tempdir()?;
let mut schema_builder: SchemaBuilder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
@@ -594,56 +577,40 @@ mod tests {
let index =
Index::create(mmap_directory.clone(), schema, IndexSettings::default()).unwrap();
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_for_tests()?;
let mut log_merge_policy = LogMergePolicy::default();
log_merge_policy.set_min_num_segments(3);
index_writer.set_merge_policy(Box::new(log_merge_policy));
for _num_commits in 0..10 {
for _ in 0..10 {
index_writer.add_document(doc!(text_field=>"abc")).unwrap();
index_writer.add_document(doc!(text_field=>"abc"))?;
}
index_writer.commit().unwrap();
index_writer.commit()?;
}
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
.try_into()?;
for _ in 0..4 {
index_writer.add_document(doc!(text_field=>"abc")).unwrap();
index_writer.commit().unwrap();
reader.reload().unwrap();
index_writer.add_document(doc!(text_field=>"abc"))?;
index_writer.commit()?;
reader.reload()?;
}
index_writer.wait_merging_threads().unwrap();
index_writer.wait_merging_threads()?;
reader.reload().unwrap();
reader.reload()?;
let num_segments = reader.searcher().segment_readers().len();
assert!(num_segments <= 4);
let num_components_except_deletes_and_tempstore =
crate::core::SegmentComponent::iterator().len() - 2;
let max_num_mmapped = num_components_except_deletes_and_tempstore * num_segments;
assert_eventually(|| {
let num_mmapped = mmap_directory.get_cache_info().mmapped.len();
if num_mmapped > max_num_mmapped {
Some(format!(
"Expected at most {max_num_mmapped} mmapped files, got {num_mmapped}"
))
} else {
None
}
});
assert_eq!(
num_segments * num_components_except_deletes_and_tempstore,
mmap_directory.get_cache_info().mmapped.len()
);
}
// This test failed on CI. The last Mmap is dropped from the merging thread so there might
// be a race condition indeed.
assert_eventually(|| {
let num_mmapped = mmap_directory.get_cache_info().mmapped.len();
if num_mmapped > 0 {
Some(format!("Expected no mmapped files, got {num_mmapped}"))
} else {
None
}
});
assert!(mmap_directory.get_cache_info().mmapped.is_empty());
Ok(())
}
}

View File

@@ -5,6 +5,7 @@ mod mmap_directory;
mod directory;
mod directory_lock;
mod file_slice;
mod file_watcher;
mod footer;
mod managed_directory;
@@ -19,12 +20,14 @@ mod composite_file;
use std::io::BufWriter;
use std::path::PathBuf;
pub use common::file_slice::{FileHandle, FileSlice};
pub use common::{AntiCallToken, OwnedBytes, TerminatingWrite};
pub use common::{AntiCallToken, TerminatingWrite};
pub use ownedbytes::OwnedBytes;
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
pub use self::file_slice::{FileHandle, FileSlice};
pub use self::ram_directory::RamDirectory;
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};

View File

@@ -136,20 +136,6 @@ impl RamDirectory {
Self::default()
}
/// Deep clones the directory.
///
/// Ulterior writes on one of the copy
/// will not affect the other copy.
pub fn deep_clone(&self) -> RamDirectory {
let inner_clone = InnerDirectory {
fs: self.fs.read().unwrap().fs.clone(),
watch_router: Default::default(),
};
RamDirectory {
fs: Arc::new(RwLock::new(inner_clone)),
}
}
/// Returns the sum of the size of the different files
/// in the [`RamDirectory`].
pub fn total_mem_usage(&self) -> usize {
@@ -270,23 +256,4 @@ mod tests {
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
}
#[test]
fn test_ram_directory_deep_clone() {
let dir = RamDirectory::default();
let test = Path::new("test");
let test2 = Path::new("test2");
dir.atomic_write(test, b"firstwrite").unwrap();
let dir_clone = dir.deep_clone();
assert_eq!(
dir_clone.atomic_read(test).unwrap(),
dir.atomic_read(test).unwrap()
);
dir.atomic_write(test, b"original").unwrap();
dir_clone.atomic_write(test, b"clone").unwrap();
dir_clone.atomic_write(test2, b"clone2").unwrap();
assert_eq!(dir.atomic_read(test).unwrap(), b"original");
assert_eq!(&dir_clone.atomic_read(test).unwrap(), b"clone");
assert_eq!(&dir_clone.atomic_read(test2).unwrap(), b"clone2");
}
}

View File

@@ -104,6 +104,28 @@ pub enum TantivyError {
InternalError(String),
}
#[cfg(feature = "quickwit")]
#[derive(Error, Debug)]
#[doc(hidden)]
pub enum AsyncIoError {
#[error("io::Error `{0}`")]
Io(#[from] io::Error),
#[error("Asynchronous API is unsupported by this directory")]
AsyncUnsupported,
}
#[cfg(feature = "quickwit")]
impl From<AsyncIoError> for TantivyError {
fn from(async_io_err: AsyncIoError) -> Self {
match async_io_err {
AsyncIoError::Io(io_err) => TantivyError::from(io_err),
AsyncIoError::AsyncUnsupported => {
TantivyError::SystemError(format!("{:?}", async_io_err))
}
}
}
}
impl From<io::Error> for TantivyError {
fn from(io_err: io::Error) -> TantivyError {
TantivyError::IoError(Arc::new(io_err))

View File

@@ -1,7 +1,8 @@
use std::io;
use std::io::Write;
use common::{intersect_bitsets, BitSet, OwnedBytes, ReadOnlyBitSet};
use common::{intersect_bitsets, BitSet, ReadOnlyBitSet};
use ownedbytes::OwnedBytes;
use crate::space_usage::ByteCount;
use crate::DocId;

View File

@@ -6,7 +6,7 @@ pub use self::writer::BytesFastFieldWriter;
#[cfg(test)]
mod tests {
use crate::query::{EnableScoring, TermQuery};
use crate::query::TermQuery;
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value, FAST, INDEXED, STORED};
use crate::{DocAddress, DocSet, Index, Searcher, Term};
@@ -82,7 +82,7 @@ mod tests {
let field = searcher.schema().get_field("string_bytes").unwrap();
let term = Term::from_field_bytes(field, b"lucene".as_ref());
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
let term_weight = term_query.specialized_weight(EnableScoring::Enabled(&searcher))?;
let term_weight = term_query.specialized_weight(&searcher, true)?;
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0)?;
assert_eq!(term_scorer.doc(), 0u32);
Ok(())
@@ -95,8 +95,7 @@ mod tests {
let field = searcher.schema().get_field("string_bytes").unwrap();
let term = Term::from_field_bytes(field, b"lucene".as_ref());
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
let term_weight_err =
term_query.specialized_weight(EnableScoring::Disabled(searcher.schema()));
let term_weight_err = term_query.specialized_weight(&searcher, false);
assert!(matches!(
term_weight_err,
Err(crate::TantivyError::SchemaError(_))

Some files were not shown because too many files have changed in this diff Show More