mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 21:12:54 +00:00
Compare commits
58 Commits
optional_c
...
use_stats
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e07f1970ea | ||
|
|
78273bfb0d | ||
|
|
2650111b76 | ||
|
|
1176555eff | ||
|
|
f8d111a75e | ||
|
|
e17996f2fd | ||
|
|
f3621c0487 | ||
|
|
14222a47a3 | ||
|
|
8312c882a5 | ||
|
|
7a8fce0ae7 | ||
|
|
196e42f33e | ||
|
|
82a183bc2d | ||
|
|
3090d49615 | ||
|
|
7c6cc818ae | ||
|
|
514d23a20c | ||
|
|
4f9efe654c | ||
|
|
1afa5bf3db | ||
|
|
07a51eb7c8 | ||
|
|
2080c370c2 | ||
|
|
b22f96624e | ||
|
|
b78dc5e313 | ||
|
|
3f915925af | ||
|
|
9c5fef5af7 | ||
|
|
9948a84ebe | ||
|
|
45156fd869 | ||
|
|
bc959006fa | ||
|
|
7385a8f80c | ||
|
|
13b89cba17 | ||
|
|
f4804ce2f5 | ||
|
|
2a6d1eaf78 | ||
|
|
540a9972bd | ||
|
|
bb48c3e488 | ||
|
|
3339a3ec05 | ||
|
|
f39165e1e7 | ||
|
|
32cb1d22da | ||
|
|
4a6bf50e78 | ||
|
|
2ac1cc2fc0 | ||
|
|
f9171a3981 | ||
|
|
a2cf6a79b4 | ||
|
|
f6e87a5319 | ||
|
|
f9971e15fe | ||
|
|
3cdc8e7472 | ||
|
|
fbb0f8b55d | ||
|
|
136a8f4124 | ||
|
|
5d4535de83 | ||
|
|
2c50b02eb3 | ||
|
|
509adab79d | ||
|
|
96c93a6ba3 | ||
|
|
495824361a | ||
|
|
485a8f507e | ||
|
|
1119e59eae | ||
|
|
ee1f2c1f28 | ||
|
|
600548fd26 | ||
|
|
9929c0c221 | ||
|
|
f53e65648b | ||
|
|
0281b22b77 | ||
|
|
a05c184830 | ||
|
|
0b40a7fe43 |
17
CHANGELOG.md
17
CHANGELOG.md
@@ -1,26 +1,29 @@
|
||||
Tantivy 0.19
|
||||
================================
|
||||
#### Bugfixes
|
||||
- Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
|
||||
- Fix interpolation overflow in linear interpolation fastfield codec [#1480](https://github.com/quickwit-oss/tantivy/pull/1480) (@PSeitz @fulmicoton)
|
||||
|
||||
#### Features/Improvements
|
||||
- Add support for `IN` in queryparser , e.g. `field: IN [val1 val2 val3]` [#1683](https://github.com/quickwit-oss/tantivy/pull/1683) (@trinity-1686a)
|
||||
- Skip score calculation, when no scoring is required [#1646](https://github.com/quickwit-oss/tantivy/pull/1646) (@PSeitz)
|
||||
- Limit fast fields to u32 (`get_val(u32)`) [#1644](https://github.com/quickwit-oss/tantivy/pull/1644) (@PSeitz)
|
||||
- Major bugfix: Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
|
||||
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
|
||||
The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
||||
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing). (@evanxg852000)
|
||||
- The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
||||
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing) [#1396](https://github.com/quickwit-oss/tantivy/pull/1396) (@evanxg852000)
|
||||
- Add IP address field type [#1553](https://github.com/quickwit-oss/tantivy/pull/1553) (@PSeitz)
|
||||
- Add boolean field type [#1382](https://github.com/quickwit-oss/tantivy/pull/1382) (@boraarslan)
|
||||
- Remove Searcher pool and make `Searcher` cloneable. (@PSeitz)
|
||||
- Validate settings on create [#1570](https://github.com/quickwit-oss/tantivy/pull/1570 (@PSeitz)
|
||||
- Fix interpolation overflow in linear interpolation fastfield codec [#1480](https://github.com/quickwit-oss/tantivy/pull/1480 (@PSeitz @fulmicoton)
|
||||
- Validate settings on create [#1570](https://github.com/quickwit-oss/tantivy/pull/1570) (@PSeitz)
|
||||
- Detect and apply gcd on fastfield codecs [#1418](https://github.com/quickwit-oss/tantivy/pull/1418) (@PSeitz)
|
||||
- Doc store
|
||||
- use separate thread to compress block store [#1389](https://github.com/quickwit-oss/tantivy/pull/1389) [#1510](https://github.com/quickwit-oss/tantivy/pull/1510 (@PSeitz @fulmicoton)
|
||||
- use separate thread to compress block store [#1389](https://github.com/quickwit-oss/tantivy/pull/1389) [#1510](https://github.com/quickwit-oss/tantivy/pull/1510) (@PSeitz @fulmicoton)
|
||||
- Expose doc store cache size [#1403](https://github.com/quickwit-oss/tantivy/pull/1403) (@PSeitz)
|
||||
- Enable compression levels for doc store [#1378](https://github.com/quickwit-oss/tantivy/pull/1378) (@PSeitz)
|
||||
- Make block size configurable [#1374](https://github.com/quickwit-oss/tantivy/pull/1374) (@kryesh)
|
||||
- Make `tantivy::TantivyError` cloneable [#1402](https://github.com/quickwit-oss/tantivy/pull/1402) (@PSeitz)
|
||||
- Add support for phrase slop in query language [#1393](https://github.com/quickwit-oss/tantivy/pull/1393) (@saroh)
|
||||
- Aggregation
|
||||
- Add aggregation support for date type [#1693](https://github.com/quickwit-oss/tantivy/pull/1693)(@PSeitz)
|
||||
- Add support for keyed parameter in range and histgram aggregations [#1424](https://github.com/quickwit-oss/tantivy/pull/1424) (@k-yomo)
|
||||
- Add aggregation bucket limit [#1363](https://github.com/quickwit-oss/tantivy/pull/1363) (@PSeitz)
|
||||
- Faster indexing
|
||||
|
||||
29
Cargo.toml
29
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.19.0-dev"
|
||||
version = "0.19.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -15,7 +15,7 @@ rust-version = "1.62"
|
||||
|
||||
[dependencies]
|
||||
oneshot = "0.1.5"
|
||||
base64 = "0.13.0"
|
||||
base64 = "0.21.0"
|
||||
byteorder = "1.4.3"
|
||||
crc32fast = "1.3.2"
|
||||
once_cell = "1.10.0"
|
||||
@@ -25,7 +25,7 @@ tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.5.3", optional = true }
|
||||
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
||||
brotli = { version = "3.3.4", optional = true }
|
||||
zstd = { version = "0.11", optional = true, default-features = false }
|
||||
zstd = { version = "0.12", optional = true, default-features = false }
|
||||
snap = { version = "1.0.5", optional = true }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
log = "0.4.16"
|
||||
@@ -36,12 +36,6 @@ fs2 = { version = "0.4.3", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
tantivy-query-grammar = { version="0.18.0", path="./query-grammar" }
|
||||
tantivy-bitpacker = { version="0.2", path="./bitpacker" }
|
||||
common = { version = "0.3", path = "./common/", package = "tantivy-common" }
|
||||
fastfield_codecs = { version="0.2", path="./fastfield_codecs", default-features = false }
|
||||
ownedbytes = { version="0.3", path="./ownedbytes" }
|
||||
stable_deref_trait = "1.2.0"
|
||||
rust-stemmers = "1.2.0"
|
||||
downcast-rs = "1.2.0"
|
||||
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
||||
@@ -54,14 +48,21 @@ murmurhash32 = "0.2.0"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.7.5"
|
||||
lru = "0.9.0"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.10.3"
|
||||
measure_time = "0.8.2"
|
||||
ciborium = { version = "0.2", optional = true}
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true }
|
||||
stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" }
|
||||
tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" }
|
||||
tantivy-bitpacker = { version= "0.3", path="./bitpacker" }
|
||||
common = { version= "0.5", path = "./common/", package = "tantivy-common" }
|
||||
fastfield_codecs = { version= "0.3", path="./fastfield_codecs", default-features = false }
|
||||
tokenizer-api = { version="0.1", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
|
||||
@@ -73,7 +74,7 @@ pretty_assertions = "1.2.1"
|
||||
proptest = "1.0.0"
|
||||
criterion = "0.4"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.9.0"
|
||||
env_logger = "0.10.0"
|
||||
pprof = { version = "0.11.0", features = ["flamegraph", "criterion"] }
|
||||
futures = "0.3.21"
|
||||
|
||||
@@ -103,10 +104,10 @@ zstd-compression = ["zstd"]
|
||||
failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
quickwit = ["ciborium"]
|
||||
quickwit = ["sstable"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
|
||||
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes", "stacker", "sstable", "columnar", "tokenizer-api"]
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
# tests that define specific behavior in fail check points
|
||||
|
||||
10
README.md
10
README.md
@@ -29,7 +29,7 @@ Your mileage WILL vary depending on the nature of queries and their load.
|
||||
# Features
|
||||
|
||||
- Full-text search
|
||||
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||
- Configurable tokenizer (stemming available for 17 Latin languages) with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
|
||||
- Tiny startup time (<10ms), perfect for command-line tools
|
||||
- BM25 scoring (the same as Lucene)
|
||||
@@ -42,12 +42,12 @@ Your mileage WILL vary depending on the nature of queries and their load.
|
||||
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene)
|
||||
- `&[u8]` fast fields
|
||||
- Text, i64, u64, f64, dates, and hierarchical facet fields
|
||||
- LZ4 compressed document store
|
||||
- Compressed document store (LZ4, Zstd, None, Brotli, Snap)
|
||||
- Range queries
|
||||
- Faceted search
|
||||
- Configurable indexing (optional term frequency and position indexing)
|
||||
- JSON Field
|
||||
- Aggregation Collector: range buckets, average, and stats metrics
|
||||
- Aggregation Collector: histogram, range buckets, average, and stats metrics
|
||||
- LogMergePolicy with deletes
|
||||
- Searcher Warmer API
|
||||
- Cheesy logo with a horse
|
||||
@@ -81,6 +81,10 @@ There are many ways to support this project.
|
||||
|
||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
|
||||
## Tokenizer
|
||||
|
||||
When implementing a tokenizer for tantivy depend on the `tantivy-tokenizer-api` crate.
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
Tantivy currently requires at least Rust 1.62 or later to compile.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.2.0"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
@@ -8,6 +8,8 @@ categories = []
|
||||
description = """Tantivy-sub crate: bitpacking"""
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
keywords = []
|
||||
documentation = "https://docs.rs/tantivy-bitpacker/latest/tantivy_bitpacker"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -25,15 +25,14 @@ impl BitPacker {
|
||||
num_bits: u8,
|
||||
output: &mut TWrite,
|
||||
) -> io::Result<()> {
|
||||
let val_u64 = val as u64;
|
||||
let num_bits = num_bits as usize;
|
||||
if self.mini_buffer_written + num_bits > 64 {
|
||||
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
|
||||
self.mini_buffer |= val.wrapping_shl(self.mini_buffer_written as u32);
|
||||
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
|
||||
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
|
||||
self.mini_buffer = val.wrapping_shr((64 - self.mini_buffer_written) as u32);
|
||||
self.mini_buffer_written = self.mini_buffer_written + num_bits - 64;
|
||||
} else {
|
||||
self.mini_buffer |= val_u64 << self.mini_buffer_written;
|
||||
self.mini_buffer |= val << self.mini_buffer_written;
|
||||
self.mini_buffer_written += num_bits;
|
||||
if self.mini_buffer_written == 64 {
|
||||
output.write_all(self.mini_buffer.to_le_bytes().as_ref())?;
|
||||
@@ -92,17 +91,15 @@ impl BitUnpacker {
|
||||
return 0u64;
|
||||
}
|
||||
let addr_in_bits = idx * self.num_bits as u32;
|
||||
let addr = addr_in_bits >> 3;
|
||||
let addr = (addr_in_bits >> 3) as usize;
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
debug_assert!(
|
||||
addr + 8 <= data.len() as u32,
|
||||
addr + 8 <= data.len(),
|
||||
"The fast field field should have been padded with 7 bytes."
|
||||
);
|
||||
let bytes: [u8; 8] = (&data[(addr as usize)..(addr as usize) + 8])
|
||||
.try_into()
|
||||
.unwrap();
|
||||
let bytes: [u8; 8] = (&data[addr..addr + 8]).try_into().unwrap();
|
||||
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
let val_shifted = val_unshifted_unmasked >> bit_shift;
|
||||
val_shifted & self.mask
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ impl BlockedBitpacker {
|
||||
#[inline]
|
||||
pub fn add(&mut self, val: u64) {
|
||||
self.buffer.push(val);
|
||||
if self.buffer.len() == BLOCK_SIZE as usize {
|
||||
if self.buffer.len() == BLOCK_SIZE {
|
||||
self.flush();
|
||||
}
|
||||
}
|
||||
@@ -126,8 +126,8 @@ impl BlockedBitpacker {
|
||||
}
|
||||
#[inline]
|
||||
pub fn get(&self, idx: usize) -> u64 {
|
||||
let metadata_pos = idx / BLOCK_SIZE as usize;
|
||||
let pos_in_block = idx % BLOCK_SIZE as usize;
|
||||
let metadata_pos = idx / BLOCK_SIZE;
|
||||
let pos_in_block = idx % BLOCK_SIZE;
|
||||
if let Some(metadata) = self.offset_and_bits.get(metadata_pos) {
|
||||
let unpacked = BitUnpacker::new(metadata.num_bits()).get(
|
||||
pos_in_block as u32,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
mod bitpacker;
|
||||
mod blocked_bitpacker;
|
||||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
pub use crate::bitpacker::{BitPacker, BitUnpacker};
|
||||
pub use crate::blocked_bitpacker::BlockedBitpacker;
|
||||
|
||||
@@ -37,44 +39,104 @@ pub fn compute_num_bits(n: u64) -> u8 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes the (min, max) of an iterator of `PartialOrd` values.
|
||||
///
|
||||
/// For values implementing `Ord` (in a way consistent to their `PartialOrd` impl),
|
||||
/// this function behaves as expected.
|
||||
///
|
||||
/// For values with partial ordering, the behavior is non-trivial and may
|
||||
/// depends on the order of the values.
|
||||
/// For floats however, it simply returns the same results as if NaN were
|
||||
/// skipped.
|
||||
pub fn minmax<I, T>(mut vals: I) -> Option<(T, T)>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
T: Copy + Ord,
|
||||
T: Copy + PartialOrd,
|
||||
{
|
||||
if let Some(first_el) = vals.next() {
|
||||
return Some(vals.fold((first_el, first_el), |(min_val, max_val), el| {
|
||||
(min_val.min(el), max_val.max(el))
|
||||
}));
|
||||
let first_el = vals.find(|val| {
|
||||
// We use this to make sure we skip all NaN values when
|
||||
// working with a float type.
|
||||
val.partial_cmp(val) == Some(Ordering::Equal)
|
||||
})?;
|
||||
let mut min_so_far: T = first_el;
|
||||
let mut max_so_far: T = first_el;
|
||||
for val in vals {
|
||||
if val.partial_cmp(&min_so_far) == Some(Ordering::Less) {
|
||||
min_so_far = val;
|
||||
}
|
||||
if val.partial_cmp(&max_so_far) == Some(Ordering::Greater) {
|
||||
max_so_far = val;
|
||||
}
|
||||
}
|
||||
None
|
||||
Some((min_so_far, max_so_far))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bits() {
|
||||
assert_eq!(compute_num_bits(1), 1u8);
|
||||
assert_eq!(compute_num_bits(0), 0u8);
|
||||
assert_eq!(compute_num_bits(2), 2u8);
|
||||
assert_eq!(compute_num_bits(3), 2u8);
|
||||
assert_eq!(compute_num_bits(4), 3u8);
|
||||
assert_eq!(compute_num_bits(255), 8u8);
|
||||
assert_eq!(compute_num_bits(256), 9u8);
|
||||
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_minmax_empty() {
|
||||
let vals: Vec<u32> = vec![];
|
||||
assert_eq!(minmax(vals.into_iter()), None);
|
||||
}
|
||||
#[test]
|
||||
fn test_compute_num_bits() {
|
||||
assert_eq!(compute_num_bits(1), 1u8);
|
||||
assert_eq!(compute_num_bits(0), 0u8);
|
||||
assert_eq!(compute_num_bits(2), 2u8);
|
||||
assert_eq!(compute_num_bits(3), 2u8);
|
||||
assert_eq!(compute_num_bits(4), 3u8);
|
||||
assert_eq!(compute_num_bits(255), 8u8);
|
||||
assert_eq!(compute_num_bits(256), 9u8);
|
||||
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_one() {
|
||||
assert_eq!(minmax(vec![1].into_iter()), Some((1, 1)));
|
||||
}
|
||||
#[test]
|
||||
fn test_minmax_empty() {
|
||||
let vals: Vec<u32> = vec![];
|
||||
assert_eq!(minmax(vals.into_iter()), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_two() {
|
||||
assert_eq!(minmax(vec![1, 2].into_iter()), Some((1, 2)));
|
||||
assert_eq!(minmax(vec![2, 1].into_iter()), Some((1, 2)));
|
||||
#[test]
|
||||
fn test_minmax_one() {
|
||||
assert_eq!(minmax(vec![1].into_iter()), Some((1, 1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_two() {
|
||||
assert_eq!(minmax(vec![1, 2].into_iter()), Some((1, 2)));
|
||||
assert_eq!(minmax(vec![2, 1].into_iter()), Some((1, 2)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_nan() {
|
||||
assert_eq!(
|
||||
minmax(vec![f64::NAN, 1f64, 2f64].into_iter()),
|
||||
Some((1f64, 2f64))
|
||||
);
|
||||
assert_eq!(
|
||||
minmax(vec![2f64, f64::NAN, 1f64].into_iter()),
|
||||
Some((1f64, 2f64))
|
||||
);
|
||||
assert_eq!(
|
||||
minmax(vec![2f64, 1f64, f64::NAN].into_iter()),
|
||||
Some((1f64, 2f64))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_inf() {
|
||||
assert_eq!(
|
||||
minmax(vec![f64::INFINITY, 1f64, 2f64].into_iter()),
|
||||
Some((1f64, f64::INFINITY))
|
||||
);
|
||||
assert_eq!(
|
||||
minmax(vec![-f64::INFINITY, 1f64, 2f64].into_iter()),
|
||||
Some((-f64::INFINITY, 2f64))
|
||||
);
|
||||
assert_eq!(
|
||||
minmax(vec![2f64, f64::INFINITY, 1f64].into_iter()),
|
||||
Some((1f64, f64::INFINITY))
|
||||
);
|
||||
assert_eq!(
|
||||
minmax(vec![2f64, 1f64, -f64::INFINITY].into_iter()),
|
||||
Some((-f64::INFINITY, 2f64))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
18
columnar/Cargo.toml
Normal file
18
columnar/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "tantivy-columnar"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
stacker = { path = "../stacker", package="tantivy-stacker"}
|
||||
serde_json = "1"
|
||||
thiserror = "1"
|
||||
fnv = "1"
|
||||
sstable = { path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { path = "../common", package = "tantivy-common" }
|
||||
fastfield_codecs = { path = "../fastfield_codecs"}
|
||||
itertools = "0.10"
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
67
columnar/README.md
Normal file
67
columnar/README.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Columnar format
|
||||
|
||||
This crate describes columnar format used in tantivy.
|
||||
|
||||
## Goals
|
||||
|
||||
This format is special in the following way.
|
||||
- it needs to be compact
|
||||
- it does not required to be loaded in memory.
|
||||
- it is designed to fit well with quickwit's strange constraint:
|
||||
we need to be able to load columns rapidly.
|
||||
- columns of several types can be associated with the same column name.
|
||||
- it needs to support columns with different types `(str, u64, i64, f64)`
|
||||
and different cardinality `(required, optional, multivalued)`.
|
||||
- columns, once loaded, offer cheap random access.
|
||||
|
||||
# Coercion rules
|
||||
|
||||
Users can create a columnar by inserting rows to a `ColumnarWriter`,
|
||||
and serializing it into a `Write` object.
|
||||
Nothing prevents a user from recording values with different type to the same `column_name`.
|
||||
|
||||
In that case, `tantivy-columnar`'s behavior is as follows:
|
||||
- JsonValues are grouped into 3 types (String, Number, bool).
|
||||
Values that corresponds to different groups are mapped to different columns. For instance, String values are treated independently
|
||||
from Number or boolean values. `tantivy-columnar` will simply emit several columns associated to a given column_name.
|
||||
- Only one column for a given json value type is emitted. If number values with different number types are recorded (e.g. u64, i64, f64),
|
||||
`tantivy-columnar` will pick the first type that can represents the set of appended value, with the following prioriy order (`i64`, `u64`, `f64`).
|
||||
`i64` is picked over `u64` as it is likely to yield less change of types. Most use cases strictly requiring `u64` show the
|
||||
restriction on 50% of the values (e.g. a 64-bit hash). On the other hand, a lot of use cases can show rare negative value.
|
||||
|
||||
# Columnar format
|
||||
|
||||
This columnar format may have more than one column (with different types) associated to the same `column_name` (see [Coercion rules](#coercion-rules) above).
|
||||
The `(column_name, columne_type)` couple however uniquely identifies a column.
|
||||
That couple is serialized as a column `column_key`. The format of that key is:
|
||||
`[column_name][ZERO_BYTE][column_type_header: u8]`
|
||||
|
||||
```
|
||||
COLUMNAR:=
|
||||
[COLUMNAR_DATA]
|
||||
[COLUMNAR_KEY_TO_DATA_INDEX]
|
||||
[COLUMNAR_FOOTER];
|
||||
|
||||
|
||||
# Columns are sorted by their column key.
|
||||
COLUMNAR_DATA:=
|
||||
[COLUMN_DATA]+;
|
||||
|
||||
COLUMNAR_FOOTER := [RANGE_SSTABLE_BYTES_LEN: 8 bytes little endian]
|
||||
|
||||
```
|
||||
|
||||
The columnar file starts by the actual column data, concatenated one after the other,
|
||||
sorted by column key.
|
||||
|
||||
A sstable associates
|
||||
`(column name, column_cardinality, column_type) to range of bytes.
|
||||
|
||||
Column name may not contain the zero byte `\0`.
|
||||
|
||||
Listing all columns associated to `column_name` can therefore
|
||||
be done by listing all keys prefixed by
|
||||
`[column_name][ZERO_BYTE]`
|
||||
|
||||
The associated range of bytes refer to a range of bytes
|
||||
|
||||
201
columnar/src/column_type_header.rs
Normal file
201
columnar/src/column_type_header.rs
Normal file
@@ -0,0 +1,201 @@
|
||||
use crate::utils::{place_bits, select_bits};
|
||||
use crate::value::NumericalType;
|
||||
use crate::InvalidData;
|
||||
|
||||
/// Enum describing the number of values that can exist per document
|
||||
/// (or per row if you will).
|
||||
///
|
||||
/// The cardinality must fit on 2 bits.
|
||||
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[repr(u8)]
|
||||
pub enum Cardinality {
|
||||
/// All documents contain exactly one value.
|
||||
/// Required is the default for auto-detecting the Cardinality, since it is the most strict.
|
||||
#[default]
|
||||
Required = 0,
|
||||
/// All documents contain at most one value.
|
||||
Optional = 1,
|
||||
/// All documents may contain any number of values.
|
||||
Multivalued = 2,
|
||||
}
|
||||
|
||||
impl Cardinality {
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn try_from_code(code: u8) -> Result<Cardinality, InvalidData> {
|
||||
match code {
|
||||
0 => Ok(Cardinality::Required),
|
||||
1 => Ok(Cardinality::Optional),
|
||||
2 => Ok(Cardinality::Multivalued),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The column type represents the column type and can fit on 6-bits.
|
||||
///
|
||||
/// - bits[0..3]: Column category type.
|
||||
/// - bits[3..6]: Numerical type if necessary.
|
||||
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)]
|
||||
pub enum ColumnType {
|
||||
Bytes,
|
||||
Numerical(NumericalType),
|
||||
Bool,
|
||||
}
|
||||
|
||||
impl ColumnType {
|
||||
/// Encoded over 6 bits.
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
let column_type_category;
|
||||
let numerical_type_code: u8;
|
||||
match self {
|
||||
ColumnType::Bytes => {
|
||||
column_type_category = ColumnTypeCategory::Str;
|
||||
numerical_type_code = 0u8;
|
||||
}
|
||||
ColumnType::Numerical(numerical_type) => {
|
||||
column_type_category = ColumnTypeCategory::Numerical;
|
||||
numerical_type_code = numerical_type.to_code();
|
||||
}
|
||||
ColumnType::Bool => {
|
||||
column_type_category = ColumnTypeCategory::Bool;
|
||||
numerical_type_code = 0u8;
|
||||
}
|
||||
}
|
||||
place_bits::<0, 3>(column_type_category.to_code()) | place_bits::<3, 6>(numerical_type_code)
|
||||
}
|
||||
|
||||
pub(crate) fn try_from_code(code: u8) -> Result<ColumnType, InvalidData> {
|
||||
if select_bits::<6, 8>(code) != 0u8 {
|
||||
return Err(InvalidData);
|
||||
}
|
||||
let column_type_category_code = select_bits::<0, 3>(code);
|
||||
let numerical_type_code = select_bits::<3, 6>(code);
|
||||
let column_type_category = ColumnTypeCategory::try_from_code(column_type_category_code)?;
|
||||
match column_type_category {
|
||||
ColumnTypeCategory::Bool => {
|
||||
if numerical_type_code != 0u8 {
|
||||
return Err(InvalidData);
|
||||
}
|
||||
Ok(ColumnType::Bool)
|
||||
}
|
||||
ColumnTypeCategory::Str => {
|
||||
if numerical_type_code != 0u8 {
|
||||
return Err(InvalidData);
|
||||
}
|
||||
Ok(ColumnType::Bytes)
|
||||
}
|
||||
ColumnTypeCategory::Numerical => {
|
||||
let numerical_type = NumericalType::try_from_code(numerical_type_code)?;
|
||||
Ok(ColumnType::Numerical(numerical_type))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Column types are grouped into different categories that
|
||||
/// corresponds to the different types of `JsonValue` types.
|
||||
///
|
||||
/// The columnar writer will apply coercion rules to make sure that
|
||||
/// at most one column exist per `ColumnTypeCategory`.
|
||||
///
|
||||
/// See also [README.md].
|
||||
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
|
||||
#[repr(u8)]
|
||||
pub(crate) enum ColumnTypeCategory {
|
||||
Bool = 0u8,
|
||||
Str = 1u8,
|
||||
Numerical = 2u8,
|
||||
}
|
||||
|
||||
impl ColumnTypeCategory {
|
||||
pub fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub fn try_from_code(code: u8) -> Result<Self, InvalidData> {
|
||||
match code {
|
||||
0u8 => Ok(Self::Bool),
|
||||
1u8 => Ok(Self::Str),
|
||||
2u8 => Ok(Self::Numerical),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the type and cardinality of a column.
|
||||
/// This is encoded over one-byte and added to a column key in the
|
||||
/// columnar sstable.
|
||||
///
|
||||
/// - [0..6] bits: encodes the column type
|
||||
/// - [6..8] bits: encodes the cardinality
|
||||
#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone)]
|
||||
pub struct ColumnTypeAndCardinality {
|
||||
pub typ: ColumnType,
|
||||
pub cardinality: Cardinality,
|
||||
}
|
||||
|
||||
impl ColumnTypeAndCardinality {
|
||||
pub fn to_code(self) -> u8 {
|
||||
place_bits::<0, 6>(self.typ.to_code()) | place_bits::<6, 8>(self.cardinality.to_code())
|
||||
}
|
||||
|
||||
pub fn try_from_code(code: u8) -> Result<ColumnTypeAndCardinality, InvalidData> {
|
||||
let typ_code = select_bits::<0, 6>(code);
|
||||
let cardinality_code = select_bits::<6, 8>(code);
|
||||
let cardinality = Cardinality::try_from_code(cardinality_code)?;
|
||||
let typ = ColumnType::try_from_code(typ_code)?;
|
||||
assert_eq!(typ.to_code(), typ_code);
|
||||
Ok(ColumnTypeAndCardinality { cardinality, typ })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::ColumnTypeAndCardinality;
|
||||
use crate::column_type_header::{Cardinality, ColumnType};
|
||||
|
||||
#[test]
|
||||
fn test_column_type_header_to_code() {
|
||||
let mut column_type_header_set: HashSet<ColumnTypeAndCardinality> = HashSet::new();
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Ok(column_type_header) = ColumnTypeAndCardinality::try_from_code(code) {
|
||||
assert_eq!(column_type_header.to_code(), code);
|
||||
assert!(column_type_header_set.insert(column_type_header));
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
column_type_header_set.len(),
|
||||
3 /* cardinality */ *
|
||||
(1 + 1 + 3) // column_types (str, bool, numerical x 3)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_type_to_code() {
|
||||
let mut column_type_set: HashSet<ColumnType> = HashSet::new();
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Ok(column_type) = ColumnType::try_from_code(code) {
|
||||
assert_eq!(column_type.to_code(), code);
|
||||
assert!(column_type_set.insert(column_type));
|
||||
}
|
||||
}
|
||||
assert_eq!(column_type_set.len(), 2 + 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cardinality_to_code() {
|
||||
let mut num_cardinality = 0;
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Ok(cardinality) = Cardinality::try_from_code(code) {
|
||||
assert_eq!(cardinality.to_code(), code);
|
||||
num_cardinality += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(num_cardinality, 3);
|
||||
}
|
||||
}
|
||||
84
columnar/src/dictionary.rs
Normal file
84
columnar/src/dictionary.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
use std::io;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use sstable::SSTable;
|
||||
|
||||
pub(crate) struct TermIdMapping {
|
||||
unordered_to_ord: Vec<OrderedId>,
|
||||
}
|
||||
|
||||
impl TermIdMapping {
|
||||
pub fn to_ord(&self, unordered: UnorderedId) -> OrderedId {
|
||||
self.unordered_to_ord[unordered.0 as usize]
|
||||
}
|
||||
}
|
||||
|
||||
/// When we add values, we cannot know their ordered id yet.
|
||||
/// For this reason, we temporarily assign them a `UnorderedId`
|
||||
/// that will be mapped to an `OrderedId` upon serialization.
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct UnorderedId(pub u32);
|
||||
|
||||
#[derive(Clone, Copy, Hash, PartialEq, Eq, Debug)]
|
||||
pub struct OrderedId(pub u32);
|
||||
|
||||
/// `DictionaryBuilder` for dictionary encoding.
|
||||
///
|
||||
/// It stores the different terms encounterred and assigns them a temporary value
|
||||
/// we call unordered id.
|
||||
///
|
||||
/// Upon serialization, we will sort the ids and hence build a `UnorderedId -> Term ordinal`
|
||||
/// mapping.
|
||||
#[derive(Default)]
|
||||
pub(crate) struct DictionaryBuilder {
|
||||
dict: FnvHashMap<Vec<u8>, UnorderedId>,
|
||||
}
|
||||
|
||||
impl DictionaryBuilder {
|
||||
/// Get or allocate an unordered id.
|
||||
/// (This ID is simply an auto-incremented id.)
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8]) -> UnorderedId {
|
||||
if let Some(term_id) = self.dict.get(term) {
|
||||
return *term_id;
|
||||
}
|
||||
let new_id = UnorderedId(self.dict.len() as u32);
|
||||
self.dict.insert(term.to_vec(), new_id);
|
||||
new_id
|
||||
}
|
||||
|
||||
/// Serialize the dictionary into an fst, and returns the
|
||||
/// `UnorderedId -> TermOrdinal` map.
|
||||
pub fn serialize<'a, W: io::Write + 'a>(&self, wrt: &mut W) -> io::Result<TermIdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> =
|
||||
self.dict.iter().map(|(k, v)| (k.as_slice(), *v)).collect();
|
||||
terms.sort_unstable_by_key(|(key, _)| *key);
|
||||
// TODO Remove the allocation.
|
||||
let mut unordered_to_ord: Vec<OrderedId> = vec![OrderedId(0u32); terms.len()];
|
||||
let mut sstable_builder = sstable::VoidSSTable::writer(wrt);
|
||||
for (ord, (key, unordered_id)) in terms.into_iter().enumerate() {
|
||||
let ordered_id = OrderedId(ord as u32);
|
||||
sstable_builder.insert(key, &())?;
|
||||
unordered_to_ord[unordered_id.0 as usize] = ordered_id;
|
||||
}
|
||||
sstable_builder.finish()?;
|
||||
Ok(TermIdMapping { unordered_to_ord })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dictionary_builder() {
|
||||
let mut dictionary_builder = DictionaryBuilder::default();
|
||||
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello");
|
||||
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy");
|
||||
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax");
|
||||
let mut buffer = Vec::new();
|
||||
let id_mapping = dictionary_builder.serialize(&mut buffer).unwrap();
|
||||
assert_eq!(id_mapping.to_ord(hello_uid), OrderedId(1));
|
||||
assert_eq!(id_mapping.to_ord(happy_uid), OrderedId(0));
|
||||
assert_eq!(id_mapping.to_ord(tax_uid), OrderedId(2));
|
||||
}
|
||||
}
|
||||
89
columnar/src/lib.rs
Normal file
89
columnar/src/lib.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
mod column_type_header;
|
||||
mod dictionary;
|
||||
mod reader;
|
||||
pub(crate) mod utils;
|
||||
mod value;
|
||||
mod writer;
|
||||
|
||||
pub use column_type_header::Cardinality;
|
||||
pub use reader::ColumnarReader;
|
||||
pub use value::{NumericalType, NumericalValue};
|
||||
pub use writer::ColumnarWriter;
|
||||
|
||||
pub type DocId = u32;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct InvalidData;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Range;
|
||||
|
||||
use common::file_slice::FileSlice;
|
||||
|
||||
use crate::column_type_header::{ColumnType, ColumnTypeAndCardinality};
|
||||
use crate::reader::ColumnarReader;
|
||||
use crate::value::NumericalValue;
|
||||
use crate::{Cardinality, ColumnarWriter};
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer_bytes() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_str(1u32, "my_string", "hello");
|
||||
dataframe_writer.record_str(3u32, "my_string", "helloeee");
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar_fileslice = FileSlice::from(buffer);
|
||||
let columnar = ColumnarReader::open(columnar_fileslice).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<(ColumnTypeAndCardinality, Range<u64>)> =
|
||||
columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].1, 0..158);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer_bool() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_bool(1u32, "bool.value", false);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar_fileslice = FileSlice::from(buffer);
|
||||
let columnar = ColumnarReader::open(columnar_fileslice).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<(ColumnTypeAndCardinality, Range<u64>)> =
|
||||
columnar.read_columns("bool.value").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(
|
||||
cols[0].0,
|
||||
ColumnTypeAndCardinality {
|
||||
cardinality: Cardinality::Optional,
|
||||
typ: ColumnType::Bool
|
||||
}
|
||||
);
|
||||
assert_eq!(cols[0].1, 0..21);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dataframe_writer_numerical() {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(1u32, "srical.value", NumericalValue::U64(12u64));
|
||||
dataframe_writer.record_numerical(2u32, "srical.value", NumericalValue::U64(13u64));
|
||||
dataframe_writer.record_numerical(4u32, "srical.value", NumericalValue::U64(15u64));
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer.serialize(5, &mut buffer).unwrap();
|
||||
let columnar_fileslice = FileSlice::from(buffer);
|
||||
let columnar = ColumnarReader::open(columnar_fileslice).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<(ColumnTypeAndCardinality, Range<u64>)> =
|
||||
columnar.read_columns("srical.value").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
// Right now this 31 bytes are spent as follows
|
||||
//
|
||||
// - header 14 bytes
|
||||
// - vals 8 //< due to padding? could have been 1byte?.
|
||||
// - null footer 6 bytes
|
||||
// - version footer 3 bytes // Should be file-wide
|
||||
assert_eq!(cols[0].1, 0..31);
|
||||
}
|
||||
}
|
||||
110
columnar/src/reader/mod.rs
Normal file
110
columnar/src/reader/mod.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use std::ops::Range;
|
||||
use std::{io, mem};
|
||||
|
||||
use common::file_slice::FileSlice;
|
||||
use common::BinarySerializable;
|
||||
use sstable::{Dictionary, RangeSSTable};
|
||||
|
||||
use crate::column_type_header::ColumnTypeAndCardinality;
|
||||
|
||||
fn io_invalid_data(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::InvalidData, msg)
|
||||
}
|
||||
|
||||
/// The ColumnarReader makes it possible to access a set of columns
|
||||
/// associated to field names.
|
||||
pub struct ColumnarReader {
|
||||
column_dictionary: Dictionary<RangeSSTable>,
|
||||
column_data: FileSlice,
|
||||
}
|
||||
|
||||
impl ColumnarReader {
|
||||
/// Opens a new Columnar file.
|
||||
pub fn open<F>(file_slice: F) -> io::Result<ColumnarReader>
|
||||
where FileSlice: From<F> {
|
||||
Self::open_inner(file_slice.into())
|
||||
}
|
||||
|
||||
fn open_inner(file_slice: FileSlice) -> io::Result<ColumnarReader> {
|
||||
let (file_slice_without_sstable_len, sstable_len_bytes) =
|
||||
file_slice.split_from_end(mem::size_of::<u64>());
|
||||
let mut sstable_len_bytes = sstable_len_bytes.read_bytes()?;
|
||||
let sstable_len = u64::deserialize(&mut sstable_len_bytes)?;
|
||||
let (column_data, sstable) =
|
||||
file_slice_without_sstable_len.split_from_end(sstable_len as usize);
|
||||
let column_dictionary = Dictionary::open(sstable)?;
|
||||
Ok(ColumnarReader {
|
||||
column_dictionary,
|
||||
column_data,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO fix ugly API
|
||||
pub fn list_columns(
|
||||
&self,
|
||||
) -> io::Result<Vec<(String, ColumnTypeAndCardinality, Range<u64>, u64)>> {
|
||||
let mut stream = self.column_dictionary.stream()?;
|
||||
let mut results = Vec::new();
|
||||
while stream.advance() {
|
||||
let key_bytes: &[u8] = stream.key();
|
||||
let column_code: u8 = key_bytes.last().cloned().unwrap();
|
||||
let column_type_and_cardinality = ColumnTypeAndCardinality::try_from_code(column_code)
|
||||
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
|
||||
let range = stream.value().clone();
|
||||
let column_name = String::from_utf8_lossy(&key_bytes[..key_bytes.len() - 1]);
|
||||
let range_len = range.end - range.start;
|
||||
results.push((
|
||||
column_name.to_string(),
|
||||
column_type_and_cardinality,
|
||||
range,
|
||||
range_len,
|
||||
));
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
/// Get all columns for the given column name.
|
||||
///
|
||||
/// There can be more than one column associated to a given column name, provided they have
|
||||
/// different types.
|
||||
// TODO fix ugly API
|
||||
pub fn read_columns(
|
||||
&self,
|
||||
column_name: &str,
|
||||
) -> io::Result<Vec<(ColumnTypeAndCardinality, Range<u64>)>> {
|
||||
// Each column is a associated to a given `column_key`,
|
||||
// that starts by `column_name\0column_header`.
|
||||
//
|
||||
// Listing the columns associated to the given column name is therefore equivalent to
|
||||
// listing `column_key` with the prefix `column_name\0`.
|
||||
//
|
||||
// This is in turn equivalent to searching for the range
|
||||
// `[column_name,\0`..column_name\1)`.
|
||||
let mut start_key = column_name.to_string();
|
||||
start_key.push('\0');
|
||||
let mut end_key = column_name.to_string();
|
||||
end_key.push(1u8 as char);
|
||||
let mut stream = self
|
||||
.column_dictionary
|
||||
.range()
|
||||
.ge(start_key.as_bytes())
|
||||
.lt(end_key.as_bytes())
|
||||
.into_stream()?;
|
||||
let mut results = Vec::new();
|
||||
while stream.advance() {
|
||||
let key_bytes: &[u8] = stream.key();
|
||||
assert!(key_bytes.starts_with(start_key.as_bytes()));
|
||||
let column_code: u8 = key_bytes.last().cloned().unwrap();
|
||||
let column_type_and_cardinality = ColumnTypeAndCardinality::try_from_code(column_code)
|
||||
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
|
||||
let range = stream.value().clone();
|
||||
results.push((column_type_and_cardinality, range));
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
/// Return the number of columns in the columnar.
|
||||
pub fn num_columns(&self) -> usize {
|
||||
self.column_dictionary.num_terms()
|
||||
}
|
||||
}
|
||||
76
columnar/src/utils.rs
Normal file
76
columnar/src/utils.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
const fn compute_mask(num_bits: u8) -> u8 {
|
||||
if num_bits == 8 {
|
||||
u8::MAX
|
||||
} else {
|
||||
(1u8 << num_bits) - 1
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[must_use]
|
||||
pub(crate) fn select_bits<const START: u8, const END: u8>(code: u8) -> u8 {
|
||||
assert!(START <= END);
|
||||
assert!(END <= 8);
|
||||
let num_bits: u8 = END - START;
|
||||
let mask: u8 = compute_mask(num_bits);
|
||||
(code >> START) & mask
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[must_use]
|
||||
pub(crate) fn place_bits<const START: u8, const END: u8>(code: u8) -> u8 {
|
||||
assert!(START <= END);
|
||||
assert!(END <= 8);
|
||||
let num_bits: u8 = END - START;
|
||||
let mask: u8 = compute_mask(num_bits);
|
||||
assert!(code <= mask);
|
||||
code << START
|
||||
}
|
||||
|
||||
/// Pop-front one bytes from a slice of bytes.
|
||||
#[inline(always)]
|
||||
pub fn pop_first_byte(bytes: &mut &[u8]) -> Option<u8> {
|
||||
if bytes.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let first_byte = bytes[0];
|
||||
*bytes = &bytes[1..];
|
||||
Some(first_byte)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_select_bits() {
|
||||
assert_eq!(255u8, select_bits::<0, 8>(255u8));
|
||||
assert_eq!(0u8, select_bits::<0, 0>(255u8));
|
||||
assert_eq!(8u8, select_bits::<0, 4>(8u8));
|
||||
assert_eq!(4u8, select_bits::<1, 4>(8u8));
|
||||
assert_eq!(0u8, select_bits::<1, 3>(8u8));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_place_bits() {
|
||||
assert_eq!(255u8, place_bits::<0, 8>(255u8));
|
||||
assert_eq!(4u8, place_bits::<2, 3>(1u8));
|
||||
assert_eq!(0u8, place_bits::<2, 2>(0u8));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_place_bits_overflows() {
|
||||
let _ = place_bits::<1, 4>(8u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pop_first_byte() {
|
||||
let mut cursor: &[u8] = &b"abcd"[..];
|
||||
assert_eq!(pop_first_byte(&mut cursor), Some(b'a'));
|
||||
assert_eq!(pop_first_byte(&mut cursor), Some(b'b'));
|
||||
assert_eq!(pop_first_byte(&mut cursor), Some(b'c'));
|
||||
assert_eq!(pop_first_byte(&mut cursor), Some(b'd'));
|
||||
assert_eq!(pop_first_byte(&mut cursor), None);
|
||||
}
|
||||
}
|
||||
124
columnar/src/value.rs
Normal file
124
columnar/src/value.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
use crate::InvalidData;
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
pub enum NumericalValue {
|
||||
I64(i64),
|
||||
U64(u64),
|
||||
F64(f64),
|
||||
}
|
||||
|
||||
impl From<u64> for NumericalValue {
|
||||
fn from(val: u64) -> NumericalValue {
|
||||
NumericalValue::U64(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i64> for NumericalValue {
|
||||
fn from(val: i64) -> Self {
|
||||
NumericalValue::I64(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<f64> for NumericalValue {
|
||||
fn from(val: f64) -> Self {
|
||||
NumericalValue::F64(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl NumericalValue {
|
||||
pub fn numerical_type(&self) -> NumericalType {
|
||||
match self {
|
||||
NumericalValue::F64(_) => NumericalType::F64,
|
||||
NumericalValue::I64(_) => NumericalType::I64,
|
||||
NumericalValue::U64(_) => NumericalType::U64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for NumericalValue {}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Hash, Eq, PartialEq)]
|
||||
#[repr(u8)]
|
||||
pub enum NumericalType {
|
||||
#[default]
|
||||
I64 = 0,
|
||||
U64 = 1,
|
||||
F64 = 2,
|
||||
}
|
||||
|
||||
impl NumericalType {
|
||||
pub fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub fn try_from_code(code: u8) -> Result<NumericalType, InvalidData> {
|
||||
match code {
|
||||
0 => Ok(NumericalType::I64),
|
||||
1 => Ok(NumericalType::U64),
|
||||
2 => Ok(NumericalType::F64),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// We voluntarily avoid using `Into` here to keep this
|
||||
/// implementation quirk as private as possible.
|
||||
///
|
||||
/// # Panics
|
||||
/// This coercion trait actually panics if it is used
|
||||
/// to convert a loose types to a stricter type.
|
||||
///
|
||||
/// The level is strictness is somewhat arbitrary.
|
||||
/// - i64
|
||||
/// - u64
|
||||
/// - f64.
|
||||
pub(crate) trait Coerce {
|
||||
fn coerce(numerical_value: NumericalValue) -> Self;
|
||||
}
|
||||
|
||||
impl Coerce for i64 {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
match value {
|
||||
NumericalValue::I64(val) => val,
|
||||
NumericalValue::U64(val) => val as i64,
|
||||
NumericalValue::F64(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Coerce for u64 {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
match value {
|
||||
NumericalValue::I64(val) => val as u64,
|
||||
NumericalValue::U64(val) => val,
|
||||
NumericalValue::F64(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Coerce for f64 {
|
||||
fn coerce(value: NumericalValue) -> Self {
|
||||
match value {
|
||||
NumericalValue::I64(val) => val as f64,
|
||||
NumericalValue::U64(val) => val as f64,
|
||||
NumericalValue::F64(val) => val,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::NumericalType;
|
||||
|
||||
#[test]
|
||||
fn test_numerical_type_code() {
|
||||
let mut num_numerical_type = 0;
|
||||
for code in u8::MIN..=u8::MAX {
|
||||
if let Ok(numerical_type) = NumericalType::try_from_code(code) {
|
||||
assert_eq!(numerical_type.to_code(), code);
|
||||
num_numerical_type += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(num_numerical_type, 3);
|
||||
}
|
||||
}
|
||||
346
columnar/src/writer/column_operation.rs
Normal file
346
columnar/src/writer/column_operation.rs
Normal file
@@ -0,0 +1,346 @@
|
||||
use crate::dictionary::UnorderedId;
|
||||
use crate::utils::{place_bits, pop_first_byte, select_bits};
|
||||
use crate::value::NumericalValue;
|
||||
use crate::{DocId, InvalidData, NumericalType};
|
||||
|
||||
/// When we build a columnar dataframe, we first just group
|
||||
/// all mutations per column, and appends them in append-only buffer
|
||||
/// in the stacker.
|
||||
///
|
||||
/// These ColumnOperation<T> are therefore serialize/deserialized
|
||||
/// in memory.
|
||||
///
|
||||
/// We represents all of these operations as `ColumnOperation`.
|
||||
#[derive(Eq, PartialEq, Debug, Clone, Copy)]
|
||||
pub(super) enum ColumnOperation<T> {
|
||||
NewDoc(DocId),
|
||||
Value(T),
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
struct ColumnOperationMetadata {
|
||||
op_type: ColumnOperationType,
|
||||
len: u8,
|
||||
}
|
||||
|
||||
impl ColumnOperationMetadata {
|
||||
fn to_code(self) -> u8 {
|
||||
place_bits::<0, 4>(self.len) | place_bits::<4, 8>(self.op_type.to_code())
|
||||
}
|
||||
|
||||
fn try_from_code(code: u8) -> Result<Self, InvalidData> {
|
||||
let len = select_bits::<0, 4>(code);
|
||||
let typ_code = select_bits::<4, 8>(code);
|
||||
let column_type = ColumnOperationType::try_from_code(typ_code)?;
|
||||
Ok(ColumnOperationMetadata {
|
||||
op_type: column_type,
|
||||
len,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
#[repr(u8)]
|
||||
enum ColumnOperationType {
|
||||
NewDoc = 0u8,
|
||||
AddValue = 1u8,
|
||||
}
|
||||
|
||||
impl ColumnOperationType {
|
||||
pub fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub fn try_from_code(code: u8) -> Result<Self, InvalidData> {
|
||||
match code {
|
||||
0 => Ok(Self::NewDoc),
|
||||
1 => Ok(Self::AddValue),
|
||||
_ => Err(InvalidData),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<V: SymbolValue> ColumnOperation<V> {
|
||||
pub(super) fn serialize(self) -> impl AsRef<[u8]> {
|
||||
let mut minibuf = MiniBuffer::default();
|
||||
let column_op_metadata = match self {
|
||||
ColumnOperation::NewDoc(new_doc) => {
|
||||
let symbol_len = new_doc.serialize(&mut minibuf.bytes[1..]);
|
||||
ColumnOperationMetadata {
|
||||
op_type: ColumnOperationType::NewDoc,
|
||||
len: symbol_len,
|
||||
}
|
||||
}
|
||||
ColumnOperation::Value(val) => {
|
||||
let symbol_len = val.serialize(&mut minibuf.bytes[1..]);
|
||||
ColumnOperationMetadata {
|
||||
op_type: ColumnOperationType::AddValue,
|
||||
len: symbol_len,
|
||||
}
|
||||
}
|
||||
};
|
||||
minibuf.bytes[0] = column_op_metadata.to_code();
|
||||
// +1 for the metadata
|
||||
minibuf.len = 1 + column_op_metadata.len;
|
||||
minibuf
|
||||
}
|
||||
|
||||
/// Deserialize a colummn operation.
|
||||
/// Returns None if the buffer is empty.
|
||||
///
|
||||
/// Panics if the payload is invalid:
|
||||
/// this deserialize method is meant to target in memory.
|
||||
pub(super) fn deserialize(bytes: &mut &[u8]) -> Option<Self> {
|
||||
let column_op_metadata_byte = pop_first_byte(bytes)?;
|
||||
let column_op_metadata = ColumnOperationMetadata::try_from_code(column_op_metadata_byte)
|
||||
.expect("Invalid op metadata byte");
|
||||
let symbol_bytes: &[u8];
|
||||
(symbol_bytes, *bytes) = bytes.split_at(column_op_metadata.len as usize);
|
||||
match column_op_metadata.op_type {
|
||||
ColumnOperationType::NewDoc => {
|
||||
let new_doc = u32::deserialize(symbol_bytes);
|
||||
Some(ColumnOperation::NewDoc(new_doc))
|
||||
}
|
||||
ColumnOperationType::AddValue => {
|
||||
let value = V::deserialize(symbol_bytes);
|
||||
Some(ColumnOperation::Value(value))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<T> for ColumnOperation<T> {
|
||||
fn from(value: T) -> Self {
|
||||
ColumnOperation::Value(value)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialization trait very local to the writer.
|
||||
// As we write fast fields, we accumulate them in "in memory".
|
||||
// In order to limit memory usage, and in order
|
||||
// to benefit from the stacker, we do this by serialization our data
|
||||
// as "Symbols".
|
||||
#[allow(clippy::from_over_into)]
|
||||
pub(super) trait SymbolValue: Clone + Copy {
|
||||
// Serializes the symbol into the given buffer.
|
||||
// Returns the number of bytes written into the buffer.
|
||||
/// # Panics
|
||||
/// May not exceed 9bytes
|
||||
fn serialize(self, buffer: &mut [u8]) -> u8;
|
||||
// Panics if invalid
|
||||
fn deserialize(bytes: &[u8]) -> Self;
|
||||
}
|
||||
|
||||
impl SymbolValue for bool {
|
||||
fn serialize(self, buffer: &mut [u8]) -> u8 {
|
||||
buffer[0] = u8::from(self);
|
||||
1u8
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Self {
|
||||
bytes[0] == 1u8
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct MiniBuffer {
|
||||
pub bytes: [u8; 10],
|
||||
pub len: u8,
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for MiniBuffer {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.bytes[..self.len as usize]
|
||||
}
|
||||
}
|
||||
|
||||
impl SymbolValue for NumericalValue {
|
||||
fn deserialize(mut bytes: &[u8]) -> Self {
|
||||
let type_code = pop_first_byte(&mut bytes).unwrap();
|
||||
let symbol_type = NumericalType::try_from_code(type_code).unwrap();
|
||||
let mut octet: [u8; 8] = [0u8; 8];
|
||||
octet[..bytes.len()].copy_from_slice(bytes);
|
||||
match symbol_type {
|
||||
NumericalType::U64 => {
|
||||
let val: u64 = u64::from_le_bytes(octet);
|
||||
NumericalValue::U64(val)
|
||||
}
|
||||
NumericalType::I64 => {
|
||||
let encoded: u64 = u64::from_le_bytes(octet);
|
||||
let val: i64 = decode_zig_zag(encoded);
|
||||
NumericalValue::I64(val)
|
||||
}
|
||||
NumericalType::F64 => {
|
||||
debug_assert_eq!(bytes.len(), 8);
|
||||
let val: f64 = f64::from_le_bytes(octet);
|
||||
NumericalValue::F64(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// F64: Serialize with a fixed size of 9 bytes
|
||||
/// U64: Serialize without leading zeroes
|
||||
/// I64: ZigZag encoded and serialize without leading zeroes
|
||||
fn serialize(self, output: &mut [u8]) -> u8 {
|
||||
match self {
|
||||
NumericalValue::F64(val) => {
|
||||
output[0] = NumericalType::F64 as u8;
|
||||
output[1..9].copy_from_slice(&val.to_le_bytes());
|
||||
9u8
|
||||
}
|
||||
NumericalValue::U64(val) => {
|
||||
let len = compute_num_bytes_for_u64(val) as u8;
|
||||
output[0] = NumericalType::U64 as u8;
|
||||
output[1..9].copy_from_slice(&val.to_le_bytes());
|
||||
len + 1u8
|
||||
}
|
||||
NumericalValue::I64(val) => {
|
||||
let zig_zag_encoded = encode_zig_zag(val);
|
||||
let len = compute_num_bytes_for_u64(zig_zag_encoded) as u8;
|
||||
output[0] = NumericalType::I64 as u8;
|
||||
output[1..9].copy_from_slice(&zig_zag_encoded.to_le_bytes());
|
||||
len + 1u8
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SymbolValue for u32 {
|
||||
fn serialize(self, output: &mut [u8]) -> u8 {
|
||||
let len = compute_num_bytes_for_u64(self as u64);
|
||||
output[0..4].copy_from_slice(&self.to_le_bytes());
|
||||
len as u8
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Self {
|
||||
let mut quartet: [u8; 4] = [0u8; 4];
|
||||
quartet[..bytes.len()].copy_from_slice(bytes);
|
||||
u32::from_le_bytes(quartet)
|
||||
}
|
||||
}
|
||||
|
||||
impl SymbolValue for UnorderedId {
|
||||
fn serialize(self, output: &mut [u8]) -> u8 {
|
||||
self.0.serialize(output)
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Self {
|
||||
UnorderedId(u32::deserialize(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_num_bytes_for_u64(val: u64) -> usize {
|
||||
let msb = (64u32 - val.leading_zeros()) as usize;
|
||||
(msb + 7) / 8
|
||||
}
|
||||
|
||||
fn encode_zig_zag(n: i64) -> u64 {
|
||||
((n << 1) ^ (n >> 63)) as u64
|
||||
}
|
||||
|
||||
fn decode_zig_zag(n: u64) -> i64 {
|
||||
((n >> 1) as i64) ^ (-((n & 1) as i64))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[track_caller]
|
||||
fn test_zig_zag_aux(val: i64) {
|
||||
let encoded = super::encode_zig_zag(val);
|
||||
assert_eq!(decode_zig_zag(encoded), val);
|
||||
if let Some(abs_val) = val.checked_abs() {
|
||||
let abs_val = abs_val as u64;
|
||||
assert!(encoded <= abs_val * 2);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zig_zag() {
|
||||
assert_eq!(encode_zig_zag(0i64), 0u64);
|
||||
assert_eq!(encode_zig_zag(-1i64), 1u64);
|
||||
assert_eq!(encode_zig_zag(1i64), 2u64);
|
||||
test_zig_zag_aux(0i64);
|
||||
test_zig_zag_aux(i64::MIN);
|
||||
test_zig_zag_aux(i64::MAX);
|
||||
}
|
||||
|
||||
use proptest::prelude::any;
|
||||
use proptest::proptest;
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_proptest_zig_zag(val in any::<i64>()) {
|
||||
test_zig_zag_aux(val);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_op_metadata_byte_serialization() {
|
||||
for len in 0..=15 {
|
||||
for op_type in [ColumnOperationType::AddValue, ColumnOperationType::NewDoc] {
|
||||
let column_op_metadata = ColumnOperationMetadata { op_type, len };
|
||||
let column_op_metadata_code = column_op_metadata.to_code();
|
||||
let serdeser_metadata =
|
||||
ColumnOperationMetadata::try_from_code(column_op_metadata_code).unwrap();
|
||||
assert_eq!(column_op_metadata, serdeser_metadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn ser_deser_symbol(column_op: ColumnOperation<NumericalValue>) {
|
||||
let buf = column_op.serialize();
|
||||
let mut buffer = buf.as_ref().to_vec();
|
||||
buffer.extend_from_slice(b"234234");
|
||||
let mut bytes = &buffer[..];
|
||||
let serdeser_symbol = ColumnOperation::deserialize(&mut bytes).unwrap();
|
||||
assert_eq!(bytes.len() + buf.as_ref().len() as usize, buffer.len());
|
||||
assert_eq!(column_op, serdeser_symbol);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bytes_for_u64() {
|
||||
assert_eq!(compute_num_bytes_for_u64(0), 0);
|
||||
assert_eq!(compute_num_bytes_for_u64(1), 1);
|
||||
assert_eq!(compute_num_bytes_for_u64(255), 1);
|
||||
assert_eq!(compute_num_bytes_for_u64(256), 2);
|
||||
assert_eq!(compute_num_bytes_for_u64((1 << 16) - 1), 2);
|
||||
assert_eq!(compute_num_bytes_for_u64(1 << 16), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_symbol_serialization() {
|
||||
ser_deser_symbol(ColumnOperation::NewDoc(0));
|
||||
ser_deser_symbol(ColumnOperation::NewDoc(3));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(0i64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(1i64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(257u64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(-257i64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::I64(i64::MIN)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(0u64)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(u64::MIN)));
|
||||
ser_deser_symbol(ColumnOperation::Value(NumericalValue::U64(u64::MAX)));
|
||||
}
|
||||
|
||||
fn test_column_operation_unordered_aux(val: u32, expected_len: usize) {
|
||||
let column_op = ColumnOperation::Value(UnorderedId(val));
|
||||
let minibuf = column_op.serialize();
|
||||
assert_eq!(minibuf.as_ref().len() as usize, expected_len);
|
||||
let mut buf = minibuf.as_ref().to_vec();
|
||||
buf.extend_from_slice(&[2, 2, 2, 2, 2, 2]);
|
||||
let mut cursor = &buf[..];
|
||||
let column_op_serdeser: ColumnOperation<UnorderedId> =
|
||||
ColumnOperation::deserialize(&mut cursor).unwrap();
|
||||
assert_eq!(column_op_serdeser, ColumnOperation::Value(UnorderedId(val)));
|
||||
assert_eq!(cursor.len() + expected_len, buf.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_operation_unordered() {
|
||||
test_column_operation_unordered_aux(300u32, 3);
|
||||
test_column_operation_unordered_aux(1u32, 2);
|
||||
test_column_operation_unordered_aux(0u32, 1);
|
||||
}
|
||||
}
|
||||
265
columnar/src/writer/column_writers.rs
Normal file
265
columnar/src/writer/column_writers.rs
Normal file
@@ -0,0 +1,265 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use stacker::{ExpUnrolledLinkedList, MemoryArena};
|
||||
|
||||
use crate::dictionary::{DictionaryBuilder, UnorderedId};
|
||||
use crate::writer::column_operation::{ColumnOperation, SymbolValue};
|
||||
use crate::{Cardinality, DocId, NumericalType, NumericalValue};
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
#[repr(u8)]
|
||||
enum DocumentStep {
|
||||
Same = 0,
|
||||
Next = 1,
|
||||
Skipped = 2,
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn delta_with_last_doc(last_doc_opt: Option<u32>, doc: u32) -> DocumentStep {
|
||||
let expected_next_doc = last_doc_opt.map(|last_doc| last_doc + 1).unwrap_or(0u32);
|
||||
match doc.cmp(&expected_next_doc) {
|
||||
Ordering::Less => DocumentStep::Same,
|
||||
Ordering::Equal => DocumentStep::Next,
|
||||
Ordering::Greater => DocumentStep::Skipped,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Default)]
|
||||
pub struct ColumnWriter {
|
||||
// Detected cardinality of the column so far.
|
||||
cardinality: Cardinality,
|
||||
// Last document inserted.
|
||||
// None if no doc has been added yet.
|
||||
last_doc_opt: Option<u32>,
|
||||
// Buffer containing the serialized values.
|
||||
values: ExpUnrolledLinkedList,
|
||||
}
|
||||
|
||||
impl ColumnWriter {
|
||||
/// Returns an iterator over the Symbol that have been recorded
|
||||
/// for the given column.
|
||||
pub(super) fn operation_iterator<'a, V: SymbolValue>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<V>> + 'a {
|
||||
buffer.clear();
|
||||
self.values.read_to_end(arena, buffer);
|
||||
let mut cursor: &[u8] = &buffer[..];
|
||||
std::iter::from_fn(move || ColumnOperation::deserialize(&mut cursor))
|
||||
}
|
||||
|
||||
/// Records a change of the document being recorded.
|
||||
///
|
||||
/// This function will also update the cardinality of the column
|
||||
/// if necessary.
|
||||
pub(super) fn record<S: SymbolValue>(&mut self, doc: DocId, value: S, arena: &mut MemoryArena) {
|
||||
// Difference between `doc` and the last doc.
|
||||
match delta_with_last_doc(self.last_doc_opt, doc) {
|
||||
DocumentStep::Same => {
|
||||
// This is the last encounterred document.
|
||||
self.cardinality = Cardinality::Multivalued;
|
||||
}
|
||||
DocumentStep::Next => {
|
||||
self.last_doc_opt = Some(doc);
|
||||
self.write_symbol::<S>(ColumnOperation::NewDoc(doc), arena);
|
||||
}
|
||||
DocumentStep::Skipped => {
|
||||
self.cardinality = self.cardinality.max(Cardinality::Optional);
|
||||
self.last_doc_opt = Some(doc);
|
||||
self.write_symbol::<S>(ColumnOperation::NewDoc(doc), arena);
|
||||
}
|
||||
}
|
||||
self.write_symbol(ColumnOperation::Value(value), arena);
|
||||
}
|
||||
|
||||
// Get the cardinality.
|
||||
// The overall number of docs in the column is necessary to
|
||||
// deal with the case where the all docs contain 1 value, except some documents
|
||||
// at the end of the column.
|
||||
pub(crate) fn get_cardinality(&self, num_docs: DocId) -> Cardinality {
|
||||
match delta_with_last_doc(self.last_doc_opt, num_docs) {
|
||||
DocumentStep::Same | DocumentStep::Next => self.cardinality,
|
||||
DocumentStep::Skipped => self.cardinality.max(Cardinality::Optional),
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends a new symbol to the `ColumnWriter`.
|
||||
fn write_symbol<V: SymbolValue>(
|
||||
&mut self,
|
||||
column_operation: ColumnOperation<V>,
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
self.values
|
||||
.writer(arena)
|
||||
.extend_from_slice(column_operation.serialize().as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Default)]
|
||||
pub(crate) struct NumericalColumnWriter {
|
||||
compatible_numerical_types: CompatibleNumericalTypes,
|
||||
column_writer: ColumnWriter,
|
||||
}
|
||||
|
||||
/// State used to store what types are still acceptable
|
||||
/// after having seen a set of numerical values.
|
||||
#[derive(Clone, Copy)]
|
||||
struct CompatibleNumericalTypes {
|
||||
all_values_within_i64_range: bool,
|
||||
all_values_within_u64_range: bool,
|
||||
// f64 is always acceptable.
|
||||
}
|
||||
|
||||
impl Default for CompatibleNumericalTypes {
|
||||
fn default() -> CompatibleNumericalTypes {
|
||||
CompatibleNumericalTypes {
|
||||
all_values_within_i64_range: true,
|
||||
all_values_within_u64_range: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CompatibleNumericalTypes {
|
||||
fn accept_value(&mut self, numerical_value: NumericalValue) {
|
||||
match numerical_value {
|
||||
NumericalValue::I64(val_i64) => {
|
||||
let value_within_u64_range = val_i64 >= 0i64;
|
||||
self.all_values_within_u64_range &= value_within_u64_range;
|
||||
}
|
||||
NumericalValue::U64(val_u64) => {
|
||||
let value_within_i64_range = val_u64 < i64::MAX as u64;
|
||||
self.all_values_within_i64_range &= value_within_i64_range;
|
||||
}
|
||||
NumericalValue::F64(_) => {
|
||||
self.all_values_within_i64_range = false;
|
||||
self.all_values_within_u64_range = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_numerical_type(self) -> NumericalType {
|
||||
if self.all_values_within_i64_range {
|
||||
NumericalType::I64
|
||||
} else if self.all_values_within_u64_range {
|
||||
NumericalType::U64
|
||||
} else {
|
||||
NumericalType::F64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NumericalColumnWriter {
|
||||
pub fn column_type_and_cardinality(&self, num_docs: DocId) -> (NumericalType, Cardinality) {
|
||||
let numerical_type = self.compatible_numerical_types.to_numerical_type();
|
||||
let cardinality = self.column_writer.get_cardinality(num_docs);
|
||||
(numerical_type, cardinality)
|
||||
}
|
||||
|
||||
pub fn record_numerical_value(
|
||||
&mut self,
|
||||
doc: DocId,
|
||||
value: NumericalValue,
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
self.compatible_numerical_types.accept_value(value);
|
||||
self.column_writer.record(doc, value, arena);
|
||||
}
|
||||
|
||||
pub(super) fn operation_iterator<'a>(
|
||||
self,
|
||||
arena: &MemoryArena,
|
||||
buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<NumericalValue>> + 'a {
|
||||
self.column_writer.operation_iterator(arena, buffer)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Default)]
|
||||
pub(crate) struct StrColumnWriter {
|
||||
pub(crate) dictionary_id: u32,
|
||||
pub(crate) column_writer: ColumnWriter,
|
||||
}
|
||||
|
||||
impl StrColumnWriter {
|
||||
pub(crate) fn with_dictionary_id(dictionary_id: u32) -> StrColumnWriter {
|
||||
StrColumnWriter {
|
||||
dictionary_id,
|
||||
column_writer: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn record_bytes(
|
||||
&mut self,
|
||||
doc: DocId,
|
||||
bytes: &[u8],
|
||||
dictionaries: &mut [DictionaryBuilder],
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
let unordered_id = dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes);
|
||||
self.column_writer.record(doc, unordered_id, arena);
|
||||
}
|
||||
|
||||
pub(super) fn operation_iterator<'a>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
byte_buffer: &'a mut Vec<u8>,
|
||||
) -> impl Iterator<Item = ColumnOperation<UnorderedId>> + 'a {
|
||||
self.column_writer.operation_iterator(arena, byte_buffer)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_delta_with_last_doc() {
|
||||
assert_eq!(delta_with_last_doc(None, 0u32), DocumentStep::Next);
|
||||
assert_eq!(delta_with_last_doc(None, 1u32), DocumentStep::Skipped);
|
||||
assert_eq!(delta_with_last_doc(None, 2u32), DocumentStep::Skipped);
|
||||
assert_eq!(delta_with_last_doc(Some(0u32), 0u32), DocumentStep::Same);
|
||||
assert_eq!(delta_with_last_doc(Some(1u32), 1u32), DocumentStep::Same);
|
||||
assert_eq!(delta_with_last_doc(Some(1u32), 2u32), DocumentStep::Next);
|
||||
assert_eq!(delta_with_last_doc(Some(1u32), 3u32), DocumentStep::Skipped);
|
||||
assert_eq!(delta_with_last_doc(Some(1u32), 4u32), DocumentStep::Skipped);
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn test_column_writer_coercion_iter_aux(
|
||||
values: impl Iterator<Item = NumericalValue>,
|
||||
expected_numerical_type: NumericalType,
|
||||
) {
|
||||
let mut compatible_numerical_types = CompatibleNumericalTypes::default();
|
||||
for value in values {
|
||||
compatible_numerical_types.accept_value(value);
|
||||
}
|
||||
assert_eq!(
|
||||
compatible_numerical_types.to_numerical_type(),
|
||||
expected_numerical_type
|
||||
);
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn test_column_writer_coercion_aux(
|
||||
values: &[NumericalValue],
|
||||
expected_numerical_type: NumericalType,
|
||||
) {
|
||||
test_column_writer_coercion_iter_aux(values.iter().copied(), expected_numerical_type);
|
||||
test_column_writer_coercion_iter_aux(values.iter().rev().copied(), expected_numerical_type);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_coercion() {
|
||||
test_column_writer_coercion_aux(&[], NumericalType::I64);
|
||||
test_column_writer_coercion_aux(&[1i64.into()], NumericalType::I64);
|
||||
test_column_writer_coercion_aux(&[1u64.into()], NumericalType::I64);
|
||||
// We don't detect exact integer at the moment. We could!
|
||||
test_column_writer_coercion_aux(&[1f64.into()], NumericalType::F64);
|
||||
test_column_writer_coercion_aux(&[u64::MAX.into()], NumericalType::U64);
|
||||
test_column_writer_coercion_aux(&[(i64::MAX as u64).into()], NumericalType::U64);
|
||||
test_column_writer_coercion_aux(&[(1u64 << 63).into()], NumericalType::U64);
|
||||
test_column_writer_coercion_aux(&[1i64.into(), 1u64.into()], NumericalType::I64);
|
||||
test_column_writer_coercion_aux(&[u64::MAX.into(), (-1i64).into()], NumericalType::F64);
|
||||
}
|
||||
}
|
||||
516
columnar/src/writer/mod.rs
Normal file
516
columnar/src/writer/mod.rs
Normal file
@@ -0,0 +1,516 @@
|
||||
mod column_operation;
|
||||
mod column_writers;
|
||||
mod serializer;
|
||||
mod value_index;
|
||||
|
||||
use std::io;
|
||||
|
||||
use column_operation::ColumnOperation;
|
||||
use common::CountingWriter;
|
||||
use fastfield_codecs::serialize::ValueIndexInfo;
|
||||
use fastfield_codecs::{Column, MonotonicallyMappableToU64, VecColumn};
|
||||
use serializer::ColumnarSerializer;
|
||||
use stacker::{Addr, ArenaHashMap, MemoryArena};
|
||||
|
||||
use crate::column_type_header::{ColumnType, ColumnTypeAndCardinality, ColumnTypeCategory};
|
||||
use crate::dictionary::{DictionaryBuilder, TermIdMapping, UnorderedId};
|
||||
use crate::value::{Coerce, NumericalType, NumericalValue};
|
||||
use crate::writer::column_writers::{ColumnWriter, NumericalColumnWriter, StrColumnWriter};
|
||||
use crate::writer::value_index::{IndexBuilder, SpareIndexBuilders};
|
||||
use crate::{Cardinality, DocId};
|
||||
|
||||
/// This is a set of buffers that are used to temporarily write the values into before passing them
|
||||
/// to the fast field codecs.
|
||||
#[derive(Default)]
|
||||
struct SpareBuffers {
|
||||
value_index_builders: SpareIndexBuilders,
|
||||
i64_values: Vec<i64>,
|
||||
u64_values: Vec<u64>,
|
||||
f64_values: Vec<f64>,
|
||||
bool_values: Vec<bool>,
|
||||
}
|
||||
|
||||
/// Makes it possible to create a new columnar.
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy_columnar::ColumnarWriter;
|
||||
///
|
||||
/// let mut columnar_writer = ColumnarWriter::default();
|
||||
/// columnar_writer.record_str(0u32 /* doc id */, "product_name", "Red backpack");
|
||||
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10u64);
|
||||
/// columnar_writer.record_str(1u32 /* doc id */, "product_name", "Apple");
|
||||
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10.5f64); //< uh oh we ended up mixing integer and floats.
|
||||
/// let mut wrt: Vec<u8> = Vec::new();
|
||||
/// columnar_writer.serialize(2u32, &mut wrt).unwrap();
|
||||
/// ```
|
||||
pub struct ColumnarWriter {
|
||||
numerical_field_hash_map: ArenaHashMap,
|
||||
bool_field_hash_map: ArenaHashMap,
|
||||
bytes_field_hash_map: ArenaHashMap,
|
||||
arena: MemoryArena,
|
||||
// Dictionaries used to store dictionary-encoded values.
|
||||
dictionaries: Vec<DictionaryBuilder>,
|
||||
buffers: SpareBuffers,
|
||||
}
|
||||
|
||||
impl Default for ColumnarWriter {
|
||||
fn default() -> Self {
|
||||
ColumnarWriter {
|
||||
numerical_field_hash_map: ArenaHashMap::new(10_000),
|
||||
bool_field_hash_map: ArenaHashMap::new(10_000),
|
||||
bytes_field_hash_map: ArenaHashMap::new(10_000),
|
||||
dictionaries: Vec::new(),
|
||||
arena: MemoryArena::default(),
|
||||
buffers: SpareBuffers::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnarWriter {
|
||||
pub fn record_numerical<T: Into<NumericalValue> + Copy>(
|
||||
&mut self,
|
||||
doc: DocId,
|
||||
column_name: &str,
|
||||
numerical_value: T,
|
||||
) {
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<NumericalColumnWriter>| {
|
||||
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record_numerical_value(doc, numerical_value.into(), arena);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn record_bool(&mut self, doc: DocId, column_name: &str, val: bool) {
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(doc, val, arena);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn record_str(&mut self, doc: DocId, column_name: &str, value: &str) {
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
let (hash_map, arena, dictionaries) = (
|
||||
&mut self.bytes_field_hash_map,
|
||||
&mut self.arena,
|
||||
&mut self.dictionaries,
|
||||
);
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<StrColumnWriter>| {
|
||||
let mut column: StrColumnWriter = column_opt.unwrap_or_else(|| {
|
||||
// Each column has its own dictionary
|
||||
let dictionary_id = dictionaries.len() as u32;
|
||||
dictionaries.push(DictionaryBuilder::default());
|
||||
StrColumnWriter::with_dictionary_id(dictionary_id)
|
||||
});
|
||||
column.record_bytes(doc, value.as_bytes(), dictionaries, arena);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn serialize(&mut self, num_docs: DocId, wrt: &mut dyn io::Write) -> io::Result<()> {
|
||||
let mut serializer = ColumnarSerializer::new(wrt);
|
||||
let mut field_columns: Vec<(&[u8], ColumnTypeCategory, Addr)> = self
|
||||
.numerical_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, ColumnTypeCategory::Numerical, addr))
|
||||
.collect();
|
||||
field_columns.extend(
|
||||
self.bytes_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, ColumnTypeCategory::Str, addr)),
|
||||
);
|
||||
field_columns.extend(
|
||||
self.bool_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, ColumnTypeCategory::Bool, addr)),
|
||||
);
|
||||
field_columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
|
||||
let (arena, buffers, dictionaries) = (&self.arena, &mut self.buffers, &self.dictionaries);
|
||||
let mut symbol_byte_buffer: Vec<u8> = Vec::new();
|
||||
for (column_name, bytes_or_numerical, addr) in field_columns {
|
||||
match bytes_or_numerical {
|
||||
ColumnTypeCategory::Bool => {
|
||||
let column_writer: ColumnWriter = self.bool_field_hash_map.read(addr);
|
||||
let cardinality = column_writer.get_cardinality(num_docs);
|
||||
let column_type_and_cardinality = ColumnTypeAndCardinality {
|
||||
cardinality,
|
||||
typ: ColumnType::Bool,
|
||||
};
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, column_type_and_cardinality);
|
||||
serialize_bool_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::Str => {
|
||||
let str_column_writer: StrColumnWriter = self.bytes_field_hash_map.read(addr);
|
||||
let dictionary_builder =
|
||||
&dictionaries[str_column_writer.dictionary_id as usize];
|
||||
let cardinality = str_column_writer.column_writer.get_cardinality(num_docs);
|
||||
let column_type_and_cardinality = ColumnTypeAndCardinality {
|
||||
cardinality,
|
||||
typ: ColumnType::Bytes,
|
||||
};
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, column_type_and_cardinality);
|
||||
serialize_bytes_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
dictionary_builder,
|
||||
str_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
ColumnTypeCategory::Numerical => {
|
||||
let numerical_column_writer: NumericalColumnWriter =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let (numerical_type, cardinality) =
|
||||
numerical_column_writer.column_type_and_cardinality(num_docs);
|
||||
let column_type_and_cardinality = ColumnTypeAndCardinality {
|
||||
cardinality,
|
||||
typ: ColumnType::Numerical(numerical_type),
|
||||
};
|
||||
let mut column_serializer =
|
||||
serializer.serialize_column(column_name, column_type_and_cardinality);
|
||||
serialize_numerical_column(
|
||||
cardinality,
|
||||
num_docs,
|
||||
numerical_type,
|
||||
numerical_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
|
||||
buffers,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
}
|
||||
};
|
||||
}
|
||||
serializer.finalize()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_bytes_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: DocId,
|
||||
dictionary_builder: &DictionaryBuilder,
|
||||
operation_it: impl Iterator<Item = ColumnOperation<UnorderedId>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
wrt: impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
..
|
||||
} = buffers;
|
||||
let mut counting_writer = CountingWriter::wrap(wrt);
|
||||
let term_id_mapping: TermIdMapping = dictionary_builder.serialize(&mut counting_writer)?;
|
||||
let dictionary_num_bytes: u32 = counting_writer.written_bytes() as u32;
|
||||
let mut wrt = counting_writer.finish();
|
||||
let operation_iterator = operation_it.map(|symbol: ColumnOperation<UnorderedId>| {
|
||||
// We map unordered ids to ordered ids.
|
||||
match symbol {
|
||||
ColumnOperation::Value(unordered_id) => {
|
||||
let ordered_id = term_id_mapping.to_ord(unordered_id);
|
||||
ColumnOperation::Value(ordered_id.0 as u64)
|
||||
}
|
||||
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
|
||||
}
|
||||
});
|
||||
serialize_column(
|
||||
operation_iterator,
|
||||
cardinality,
|
||||
num_docs,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
&mut wrt,
|
||||
)?;
|
||||
wrt.write_all(&dictionary_num_bytes.to_le_bytes()[..])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_numerical_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: DocId,
|
||||
numerical_type: NumericalType,
|
||||
op_iterator: impl Iterator<Item = ColumnOperation<NumericalValue>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
wrt: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
i64_values,
|
||||
f64_values,
|
||||
..
|
||||
} = buffers;
|
||||
match numerical_type {
|
||||
NumericalType::I64 => {
|
||||
serialize_column(
|
||||
coerce_numerical_symbol::<i64>(op_iterator),
|
||||
cardinality,
|
||||
num_docs,
|
||||
value_index_builders,
|
||||
i64_values,
|
||||
wrt,
|
||||
)?;
|
||||
}
|
||||
NumericalType::U64 => {
|
||||
serialize_column(
|
||||
coerce_numerical_symbol::<u64>(op_iterator),
|
||||
cardinality,
|
||||
num_docs,
|
||||
value_index_builders,
|
||||
u64_values,
|
||||
wrt,
|
||||
)?;
|
||||
}
|
||||
NumericalType::F64 => {
|
||||
serialize_column(
|
||||
coerce_numerical_symbol::<f64>(op_iterator),
|
||||
cardinality,
|
||||
num_docs,
|
||||
value_index_builders,
|
||||
f64_values,
|
||||
wrt,
|
||||
)?;
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_bool_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: DocId,
|
||||
column_operations_it: impl Iterator<Item = ColumnOperation<bool>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
wrt: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
value_index_builders,
|
||||
bool_values,
|
||||
..
|
||||
} = buffers;
|
||||
serialize_column(
|
||||
column_operations_it,
|
||||
cardinality,
|
||||
num_docs,
|
||||
value_index_builders,
|
||||
bool_values,
|
||||
wrt,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_column<
|
||||
T: Copy + Default + std::fmt::Debug + Send + Sync + MonotonicallyMappableToU64 + PartialOrd,
|
||||
>(
|
||||
op_iterator: impl Iterator<Item = ColumnOperation<T>>,
|
||||
cardinality: Cardinality,
|
||||
num_docs: DocId,
|
||||
value_index_builders: &mut SpareIndexBuilders,
|
||||
values: &mut Vec<T>,
|
||||
mut wrt: impl io::Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
for<'a> VecColumn<'a, T>: Column<T>,
|
||||
{
|
||||
values.clear();
|
||||
match cardinality {
|
||||
Cardinality::Required => {
|
||||
consume_operation_iterator(
|
||||
op_iterator,
|
||||
value_index_builders.borrow_required_index_builder(),
|
||||
values,
|
||||
);
|
||||
fastfield_codecs::serialize(
|
||||
VecColumn::from(&values[..]),
|
||||
&mut wrt,
|
||||
&fastfield_codecs::ALL_CODEC_TYPES[..],
|
||||
)?;
|
||||
}
|
||||
Cardinality::Optional => {
|
||||
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
|
||||
consume_operation_iterator(op_iterator, optional_index_builder, values);
|
||||
let optional_index = optional_index_builder.finish(num_docs);
|
||||
fastfield_codecs::serialize::serialize_new(
|
||||
ValueIndexInfo::SingleValue(Box::new(optional_index)),
|
||||
VecColumn::from(&values[..]),
|
||||
&mut wrt,
|
||||
&fastfield_codecs::ALL_CODEC_TYPES[..],
|
||||
)?;
|
||||
}
|
||||
Cardinality::Multivalued => {
|
||||
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
|
||||
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_docs);
|
||||
fastfield_codecs::serialize::serialize_new(
|
||||
ValueIndexInfo::MultiValue(Box::new(multivalued_index)),
|
||||
VecColumn::from(&values[..]),
|
||||
&mut wrt,
|
||||
&fastfield_codecs::ALL_CODEC_TYPES[..],
|
||||
)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn coerce_numerical_symbol<T>(
|
||||
operation_iterator: impl Iterator<Item = ColumnOperation<NumericalValue>>,
|
||||
) -> impl Iterator<Item = ColumnOperation<T>>
|
||||
where T: Coerce {
|
||||
operation_iterator.map(|symbol| match symbol {
|
||||
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
|
||||
ColumnOperation::Value(numerical_value) => {
|
||||
ColumnOperation::Value(Coerce::coerce(numerical_value))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn consume_operation_iterator<T: std::fmt::Debug, TIndexBuilder: IndexBuilder>(
|
||||
operation_iterator: impl Iterator<Item = ColumnOperation<T>>,
|
||||
index_builder: &mut TIndexBuilder,
|
||||
values: &mut Vec<T>,
|
||||
) {
|
||||
for symbol in operation_iterator {
|
||||
match symbol {
|
||||
ColumnOperation::NewDoc(doc) => {
|
||||
index_builder.record_doc(doc);
|
||||
}
|
||||
ColumnOperation::Value(value) => {
|
||||
index_builder.record_value();
|
||||
values.push(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use column_operation::ColumnOperation;
|
||||
use stacker::MemoryArena;
|
||||
|
||||
use super::*;
|
||||
use crate::value::NumericalValue;
|
||||
use crate::Cardinality;
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_required_simple() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(0u32, NumericalValue::from(14i64), &mut arena);
|
||||
column_writer.record(1u32, NumericalValue::from(15i64), &mut arena);
|
||||
column_writer.record(2u32, NumericalValue::from(-16i64), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Required);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 6);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(14i64))
|
||||
));
|
||||
assert!(matches!(symbols[2], ColumnOperation::NewDoc(1u32)));
|
||||
assert!(matches!(
|
||||
symbols[3],
|
||||
ColumnOperation::Value(NumericalValue::I64(15i64))
|
||||
));
|
||||
assert!(matches!(symbols[4], ColumnOperation::NewDoc(2u32)));
|
||||
assert!(matches!(
|
||||
symbols[5],
|
||||
ColumnOperation::Value(NumericalValue::I64(-16i64))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_optional_cardinality_missing_first() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(1u32, NumericalValue::from(15i64), &mut arena);
|
||||
column_writer.record(2u32, NumericalValue::from(-16i64), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 4);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(15i64))
|
||||
));
|
||||
assert!(matches!(symbols[2], ColumnOperation::NewDoc(2u32)));
|
||||
assert!(matches!(
|
||||
symbols[3],
|
||||
ColumnOperation::Value(NumericalValue::I64(-16i64))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_optional_cardinality_missing_last() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(0u32, NumericalValue::from(15i64), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 2);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(15i64))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_writer_multivalued() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut column_writer = super::ColumnWriter::default();
|
||||
column_writer.record(0u32, NumericalValue::from(16i64), &mut arena);
|
||||
column_writer.record(0u32, NumericalValue::from(17i64), &mut arena);
|
||||
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
|
||||
let mut buffer = Vec::new();
|
||||
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
|
||||
.operation_iterator(&mut arena, &mut buffer)
|
||||
.collect();
|
||||
assert_eq!(symbols.len(), 3);
|
||||
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
|
||||
assert!(matches!(
|
||||
symbols[1],
|
||||
ColumnOperation::Value(NumericalValue::I64(16i64))
|
||||
));
|
||||
assert!(matches!(
|
||||
symbols[2],
|
||||
ColumnOperation::Value(NumericalValue::I64(17i64))
|
||||
));
|
||||
}
|
||||
}
|
||||
116
columnar/src/writer/serializer.rs
Normal file
116
columnar/src/writer/serializer.rs
Normal file
@@ -0,0 +1,116 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use common::CountingWriter;
|
||||
use sstable::value::RangeValueWriter;
|
||||
use sstable::RangeSSTable;
|
||||
|
||||
use crate::column_type_header::ColumnTypeAndCardinality;
|
||||
|
||||
pub struct ColumnarSerializer<W: io::Write> {
|
||||
wrt: CountingWriter<W>,
|
||||
sstable_range: sstable::Writer<Vec<u8>, RangeValueWriter>,
|
||||
prepare_key_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Returns a key consisting of the concatenation of the key and the column_type_and_cardinality
|
||||
/// code.
|
||||
fn prepare_key(
|
||||
key: &[u8],
|
||||
column_type_cardinality: ColumnTypeAndCardinality,
|
||||
buffer: &mut Vec<u8>,
|
||||
) {
|
||||
buffer.clear();
|
||||
buffer.extend_from_slice(key);
|
||||
buffer.push(0u8);
|
||||
buffer.push(column_type_cardinality.to_code());
|
||||
}
|
||||
|
||||
impl<W: io::Write> ColumnarSerializer<W> {
|
||||
pub(crate) fn new(wrt: W) -> ColumnarSerializer<W> {
|
||||
let sstable_range: sstable::Writer<Vec<u8>, RangeValueWriter> =
|
||||
sstable::Dictionary::<RangeSSTable>::builder(Vec::with_capacity(100_000)).unwrap();
|
||||
ColumnarSerializer {
|
||||
wrt: CountingWriter::wrap(wrt),
|
||||
sstable_range,
|
||||
prepare_key_buffer: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize_column<'a>(
|
||||
&'a mut self,
|
||||
column_name: &[u8],
|
||||
column_type_cardinality: ColumnTypeAndCardinality,
|
||||
) -> impl io::Write + 'a {
|
||||
let start_offset = self.wrt.written_bytes();
|
||||
prepare_key(
|
||||
column_name,
|
||||
column_type_cardinality,
|
||||
&mut self.prepare_key_buffer,
|
||||
);
|
||||
ColumnSerializer {
|
||||
columnar_serializer: self,
|
||||
start_offset,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn finalize(mut self) -> io::Result<()> {
|
||||
let sstable_bytes: Vec<u8> = self.sstable_range.finish()?;
|
||||
let sstable_num_bytes: u64 = sstable_bytes.len() as u64;
|
||||
self.wrt.write_all(&sstable_bytes)?;
|
||||
self.wrt.write_all(&sstable_num_bytes.to_le_bytes()[..])?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ColumnSerializer<'a, W: io::Write> {
|
||||
columnar_serializer: &'a mut ColumnarSerializer<W>,
|
||||
start_offset: u64,
|
||||
}
|
||||
|
||||
impl<'a, W: io::Write> Drop for ColumnSerializer<'a, W> {
|
||||
fn drop(&mut self) {
|
||||
let end_offset: u64 = self.columnar_serializer.wrt.written_bytes();
|
||||
let byte_range = self.start_offset..end_offset;
|
||||
self.columnar_serializer.sstable_range.insert_cannot_fail(
|
||||
&self.columnar_serializer.prepare_key_buffer[..],
|
||||
&byte_range,
|
||||
);
|
||||
self.columnar_serializer.prepare_key_buffer.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, W: io::Write> io::Write for ColumnSerializer<'a, W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.columnar_serializer.wrt.write(buf)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.columnar_serializer.wrt.flush()
|
||||
}
|
||||
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
self.columnar_serializer.wrt.write_all(buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::column_type_header::ColumnType;
|
||||
use crate::Cardinality;
|
||||
|
||||
#[test]
|
||||
fn test_prepare_key_bytes() {
|
||||
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
|
||||
let column_type_and_cardinality = ColumnTypeAndCardinality {
|
||||
typ: ColumnType::Bytes,
|
||||
cardinality: Cardinality::Optional,
|
||||
};
|
||||
prepare_key(b"root\0child", column_type_and_cardinality, &mut buffer);
|
||||
assert_eq!(buffer.len(), 12);
|
||||
assert_eq!(&buffer[..10], b"root\0child");
|
||||
assert_eq!(buffer[10], 0u8);
|
||||
assert_eq!(buffer[11], column_type_and_cardinality.to_code());
|
||||
}
|
||||
}
|
||||
220
columnar/src/writer/value_index.rs
Normal file
220
columnar/src/writer/value_index.rs
Normal file
@@ -0,0 +1,220 @@
|
||||
use fastfield_codecs::serialize::{MultiValueIndexInfo, SingleValueIndexInfo};
|
||||
|
||||
use crate::DocId;
|
||||
|
||||
/// The `IndexBuilder` interprets a sequence of
|
||||
/// calls of the form:
|
||||
/// (record_doc,record_value+)*
|
||||
/// and can then serialize the results into an index to associate docids with their value[s].
|
||||
///
|
||||
/// It has different implementation depending on whether the
|
||||
/// cardinality is required, optional, or multivalued.
|
||||
pub(crate) trait IndexBuilder {
|
||||
fn record_doc(&mut self, doc: DocId);
|
||||
#[inline]
|
||||
fn record_value(&mut self) {}
|
||||
}
|
||||
|
||||
/// The RequiredIndexBuilder does nothing.
|
||||
#[derive(Default)]
|
||||
pub struct RequiredIndexBuilder;
|
||||
|
||||
impl IndexBuilder for RequiredIndexBuilder {
|
||||
#[inline(always)]
|
||||
fn record_doc(&mut self, _doc: DocId) {}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct OptionalIndexBuilder {
|
||||
docs: Vec<DocId>,
|
||||
}
|
||||
|
||||
struct SingleValueArrayIndex<'a> {
|
||||
// DocIds with a value. DocIds are strictly increasing
|
||||
docs: &'a [DocId],
|
||||
num_docs: DocId,
|
||||
}
|
||||
|
||||
impl<'a> SingleValueIndexInfo for SingleValueArrayIndex<'a> {
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.num_docs as u32
|
||||
}
|
||||
|
||||
fn num_non_nulls(&self) -> u32 {
|
||||
self.docs.len() as u32
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
Box::new(self.docs.iter().copied())
|
||||
}
|
||||
}
|
||||
|
||||
impl OptionalIndexBuilder {
|
||||
pub fn finish(&mut self, num_docs: DocId) -> impl SingleValueIndexInfo + '_ {
|
||||
debug_assert!(self
|
||||
.docs
|
||||
.last()
|
||||
.copied()
|
||||
.map(|last_doc| last_doc < num_docs)
|
||||
.unwrap_or(true));
|
||||
SingleValueArrayIndex {
|
||||
docs: &self.docs[..],
|
||||
num_docs,
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.docs.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexBuilder for OptionalIndexBuilder {
|
||||
#[inline(always)]
|
||||
fn record_doc(&mut self, doc: DocId) {
|
||||
debug_assert!(self
|
||||
.docs
|
||||
.last()
|
||||
.copied()
|
||||
.map(|prev_doc| doc > prev_doc)
|
||||
.unwrap_or(true));
|
||||
self.docs.push(doc);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MultivaluedIndexBuilder {
|
||||
// TODO should we switch to `start_offset`?
|
||||
// contains the num values so far for each `DocId`.
|
||||
end_offsets: Vec<DocId>,
|
||||
total_num_vals_seen: u32,
|
||||
}
|
||||
|
||||
pub struct MultivaluedValueArrayIndex<'a> {
|
||||
end_offsets: &'a [DocId],
|
||||
}
|
||||
|
||||
impl<'a> MultiValueIndexInfo for MultivaluedValueArrayIndex<'a> {
|
||||
fn num_docs(&self) -> u32 {
|
||||
self.end_offsets.len() as u32
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.end_offsets.last().copied().unwrap_or(0u32)
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
if self.end_offsets.is_empty() {
|
||||
return Box::new(std::iter::empty());
|
||||
}
|
||||
let n = self.end_offsets.len();
|
||||
Box::new(std::iter::once(0u32).chain(self.end_offsets[..n - 1].iter().copied()))
|
||||
}
|
||||
}
|
||||
|
||||
impl MultivaluedIndexBuilder {
|
||||
pub fn finish(&mut self, num_docs: DocId) -> impl MultiValueIndexInfo + '_ {
|
||||
self.end_offsets
|
||||
.resize(num_docs as usize, self.total_num_vals_seen);
|
||||
MultivaluedValueArrayIndex {
|
||||
end_offsets: &self.end_offsets[..],
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.end_offsets.clear();
|
||||
self.total_num_vals_seen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexBuilder for MultivaluedIndexBuilder {
|
||||
fn record_doc(&mut self, doc: DocId) {
|
||||
self.end_offsets
|
||||
.resize(doc as usize, self.total_num_vals_seen);
|
||||
}
|
||||
|
||||
fn record_value(&mut self) {
|
||||
self.total_num_vals_seen += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// The `SpareIndexBuilders` is there to avoid allocating a
|
||||
/// new index builder for every single column.
|
||||
#[derive(Default)]
|
||||
pub struct SpareIndexBuilders {
|
||||
required_index_builder: RequiredIndexBuilder,
|
||||
optional_index_builder: OptionalIndexBuilder,
|
||||
multivalued_index_builder: MultivaluedIndexBuilder,
|
||||
}
|
||||
|
||||
impl SpareIndexBuilders {
|
||||
pub fn borrow_required_index_builder(&mut self) -> &mut RequiredIndexBuilder {
|
||||
&mut self.required_index_builder
|
||||
}
|
||||
|
||||
pub fn borrow_optional_index_builder(&mut self) -> &mut OptionalIndexBuilder {
|
||||
self.optional_index_builder.reset();
|
||||
&mut self.optional_index_builder
|
||||
}
|
||||
|
||||
pub fn borrow_multivalued_index_builder(&mut self) -> &mut MultivaluedIndexBuilder {
|
||||
self.multivalued_index_builder.reset();
|
||||
&mut self.multivalued_index_builder
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_optional_value_index_builder() {
|
||||
let mut opt_value_index_builder = OptionalIndexBuilder::default();
|
||||
opt_value_index_builder.record_doc(0u32);
|
||||
opt_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
&opt_value_index_builder
|
||||
.finish(1u32)
|
||||
.iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
&[0]
|
||||
);
|
||||
opt_value_index_builder.reset();
|
||||
opt_value_index_builder.record_doc(1u32);
|
||||
opt_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
&opt_value_index_builder
|
||||
.finish(2u32)
|
||||
.iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
&[1]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_value_index_builder() {
|
||||
let mut multivalued_value_index_builder = MultivaluedIndexBuilder::default();
|
||||
multivalued_value_index_builder.record_doc(1u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_doc(2u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
multivalued_value_index_builder
|
||||
.finish(4u32)
|
||||
.iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
vec![0, 0, 2, 3]
|
||||
);
|
||||
multivalued_value_index_builder.reset();
|
||||
multivalued_value_index_builder.record_doc(2u32);
|
||||
multivalued_value_index_builder.record_value();
|
||||
multivalued_value_index_builder.record_value();
|
||||
assert_eq!(
|
||||
multivalued_value_index_builder
|
||||
.finish(4u32)
|
||||
.iter()
|
||||
.collect::<Vec<u32>>(),
|
||||
vec![0, 0, 0, 2]
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,21 @@
|
||||
[package]
|
||||
name = "tantivy-common"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
description = "common traits and utility functions used by multiple tantivy subcrates"
|
||||
documentation = "https://docs.rs/tantivy_common/"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
ownedbytes = { version="0.3", path="../ownedbytes" }
|
||||
ownedbytes = { version= "0.5", path="../ownedbytes" }
|
||||
async-trait = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1.0.0"
|
||||
|
||||
@@ -151,7 +151,7 @@ impl TinySet {
|
||||
if self.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let lowest = self.0.trailing_zeros() as u32;
|
||||
let lowest = self.0.trailing_zeros();
|
||||
self.0 ^= TinySet::singleton(lowest).0;
|
||||
Some(lowest)
|
||||
}
|
||||
@@ -421,7 +421,7 @@ mod tests {
|
||||
bitset.serialize(&mut out).unwrap();
|
||||
|
||||
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
|
||||
assert_eq!(bitset.len() as usize, i as usize);
|
||||
assert_eq!(bitset.len(), i as usize);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -432,7 +432,7 @@ mod tests {
|
||||
bitset.serialize(&mut out).unwrap();
|
||||
|
||||
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
|
||||
assert_eq!(bitset.len() as usize, 64);
|
||||
assert_eq!(bitset.len(), 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,19 +1,18 @@
|
||||
use std::ops::{Deref, Range};
|
||||
use std::ops::{Deref, Range, RangeBounds};
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common::HasLen;
|
||||
use stable_deref_trait::StableDeref;
|
||||
use ownedbytes::{OwnedBytes, StableDeref};
|
||||
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::HasLen;
|
||||
|
||||
/// Objects that represents files sections in tantivy.
|
||||
///
|
||||
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||
///
|
||||
/// The underlying behavior is therefore specific to the [`Directory`](crate::Directory) that
|
||||
/// The underlying behavior is therefore specific to the `Directory` that
|
||||
/// created it. Despite its name, a [`FileSlice`] may or may not directly map to an actual file
|
||||
/// on the filesystem.
|
||||
|
||||
@@ -24,13 +23,12 @@ pub trait FileHandle: 'static + Send + Sync + HasLen + fmt::Debug {
|
||||
/// This method may panic if the range requested is invalid.
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes>;
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[doc(hidden)]
|
||||
async fn read_bytes_async(
|
||||
&self,
|
||||
_byte_range: Range<usize>,
|
||||
) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
Err(crate::error::AsyncIoError::AsyncUnsupported)
|
||||
async fn read_bytes_async(&self, _byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"Async read is not supported.",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,8 +39,7 @@ impl FileHandle for &'static [u8] {
|
||||
Ok(OwnedBytes::new(bytes))
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
Ok(self.read_bytes(byte_range)?)
|
||||
}
|
||||
}
|
||||
@@ -70,6 +67,34 @@ impl fmt::Debug for FileSlice {
|
||||
}
|
||||
}
|
||||
|
||||
/// Takes a range, a `RangeBounds` object, and returns
|
||||
/// a `Range` that corresponds to the relative application of the
|
||||
/// `RangeBounds` object to the original `Range`.
|
||||
///
|
||||
/// For instance, combine_ranges(`[2..11)`, `[5..7]`) returns `[7..10]`
|
||||
/// as it reads, what is the sub-range that starts at the 5 element of
|
||||
/// `[2..11)` and ends at the 9th element included.
|
||||
///
|
||||
/// This function panics, if the result would suggest something outside
|
||||
/// of the bounds of the original range.
|
||||
fn combine_ranges<R: RangeBounds<usize>>(orig_range: Range<usize>, rel_range: R) -> Range<usize> {
|
||||
let start: usize = orig_range.start
|
||||
+ match rel_range.start_bound().cloned() {
|
||||
std::ops::Bound::Included(rel_start) => rel_start,
|
||||
std::ops::Bound::Excluded(rel_start) => rel_start + 1,
|
||||
std::ops::Bound::Unbounded => 0,
|
||||
};
|
||||
assert!(start <= orig_range.end);
|
||||
let end: usize = match rel_range.end_bound().cloned() {
|
||||
std::ops::Bound::Included(rel_end) => orig_range.start + rel_end + 1,
|
||||
std::ops::Bound::Excluded(rel_end) => orig_range.start + rel_end,
|
||||
std::ops::Bound::Unbounded => orig_range.end,
|
||||
};
|
||||
assert!(end >= start);
|
||||
assert!(end <= orig_range.end);
|
||||
start..end
|
||||
}
|
||||
|
||||
impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
pub fn new(file_handle: Arc<dyn FileHandle>) -> Self {
|
||||
@@ -93,11 +118,11 @@ impl FileSlice {
|
||||
///
|
||||
/// Panics if `byte_range.end` exceeds the filesize.
|
||||
#[must_use]
|
||||
pub fn slice(&self, byte_range: Range<usize>) -> FileSlice {
|
||||
assert!(byte_range.end <= self.len());
|
||||
#[inline]
|
||||
pub fn slice<R: RangeBounds<usize>>(&self, byte_range: R) -> FileSlice {
|
||||
FileSlice {
|
||||
data: self.data.clone(),
|
||||
range: self.range.start + byte_range.start..self.range.start + byte_range.end,
|
||||
range: combine_ranges(self.range.clone(), byte_range),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,9 +142,8 @@ impl FileSlice {
|
||||
self.data.read_bytes(self.range.clone())
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[doc(hidden)]
|
||||
pub async fn read_bytes_async(&self) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
pub async fn read_bytes_async(&self) -> io::Result<OwnedBytes> {
|
||||
self.data.read_bytes_async(self.range.clone()).await
|
||||
}
|
||||
|
||||
@@ -137,12 +161,8 @@ impl FileSlice {
|
||||
.read_bytes(self.range.start + range.start..self.range.start + range.end)
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[doc(hidden)]
|
||||
pub async fn read_bytes_slice_async(
|
||||
&self,
|
||||
byte_range: Range<usize>,
|
||||
) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
pub async fn read_bytes_slice_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
assert!(
|
||||
self.range.start + byte_range.end <= self.range.end,
|
||||
"`to` exceeds the fileslice length"
|
||||
@@ -204,8 +224,7 @@ impl FileHandle for FileSlice {
|
||||
self.read_bytes_slice(range)
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
self.read_bytes_slice_async(byte_range).await
|
||||
}
|
||||
}
|
||||
@@ -222,21 +241,20 @@ impl FileHandle for OwnedBytes {
|
||||
Ok(self.slice(range))
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
async fn read_bytes_async(&self, range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
let bytes = self.read_bytes(range)?;
|
||||
Ok(bytes)
|
||||
async fn read_bytes_async(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
self.read_bytes(range)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io;
|
||||
use std::ops::Bound;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::HasLen;
|
||||
|
||||
use super::{FileHandle, FileSlice};
|
||||
use crate::file_slice::combine_ranges;
|
||||
use crate::HasLen;
|
||||
|
||||
#[test]
|
||||
fn test_file_slice() -> io::Result<()> {
|
||||
@@ -307,4 +325,23 @@ mod tests {
|
||||
b"bcd"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_combine_range() {
|
||||
assert_eq!(combine_ranges(1..3, 0..1), 1..2);
|
||||
assert_eq!(combine_ranges(1..3, 1..), 2..3);
|
||||
assert_eq!(combine_ranges(1..4, ..2), 1..3);
|
||||
assert_eq!(combine_ranges(3..10, 2..5), 5..8);
|
||||
assert_eq!(combine_ranges(2..11, 5..=7), 7..10);
|
||||
assert_eq!(
|
||||
combine_ranges(2..11, (Bound::Excluded(5), Bound::Unbounded)),
|
||||
8..11
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_combine_range_panics() {
|
||||
let _ = combine_ranges(3..5, 1..4);
|
||||
}
|
||||
}
|
||||
166
common/src/group_by.rs
Normal file
166
common/src/group_by.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
use std::cell::RefCell;
|
||||
use std::iter::Peekable;
|
||||
use std::rc::Rc;
|
||||
|
||||
pub trait GroupByIteratorExtended: Iterator {
|
||||
/// Return an `Iterator` that groups iterator elements. Consecutive elements that map to the
|
||||
/// same key are assigned to the same group.
|
||||
///
|
||||
/// The returned Iterator item is `(K, impl Iterator)`, where Iterator are the items of the
|
||||
/// group.
|
||||
///
|
||||
/// ```
|
||||
/// use tantivy_common::GroupByIteratorExtended;
|
||||
///
|
||||
/// // group data into blocks of larger than zero or not.
|
||||
/// let data: Vec<i32> = vec![1, 3, -2, -2, 1, 0, 1, 2];
|
||||
/// // groups: |---->|------>|--------->|
|
||||
///
|
||||
/// let mut data_grouped = Vec::new();
|
||||
/// // Note: group is an iterator
|
||||
/// for (key, group) in data.into_iter().group_by(|val| *val >= 0) {
|
||||
/// data_grouped.push((key, group.collect()));
|
||||
/// }
|
||||
/// assert_eq!(data_grouped, vec![(true, vec![1, 3]), (false, vec![-2, -2]), (true, vec![1, 0, 1, 2])]);
|
||||
/// ```
|
||||
fn group_by<K, F>(self, key: F) -> GroupByIterator<Self, F, K>
|
||||
where
|
||||
Self: Sized,
|
||||
F: FnMut(&Self::Item) -> K,
|
||||
K: PartialEq + Copy,
|
||||
Self::Item: Copy,
|
||||
{
|
||||
GroupByIterator::new(self, key)
|
||||
}
|
||||
}
|
||||
impl<I: Iterator> GroupByIteratorExtended for I {}
|
||||
|
||||
pub struct GroupByIterator<I, F, K: Copy>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
{
|
||||
// I really would like to avoid the Rc<RefCell>, but the Iterator is shared between
|
||||
// `GroupByIterator` and `GroupIter`. In practice they are used consecutive and
|
||||
// `GroupByIter` is finished before calling next on `GroupByIterator`. I'm not sure there
|
||||
// is a solution with lifetimes for that, because we would need to enforce it in the usage
|
||||
// somehow.
|
||||
//
|
||||
// One potential solution would be to replace the iterator approach with something similar.
|
||||
inner: Rc<RefCell<GroupByShared<I, F, K>>>,
|
||||
}
|
||||
|
||||
struct GroupByShared<I, F, K: Copy>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
{
|
||||
iter: Peekable<I>,
|
||||
group_by_fn: F,
|
||||
}
|
||||
|
||||
impl<I, F, K> GroupByIterator<I, F, K>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
K: Copy,
|
||||
{
|
||||
fn new(inner: I, group_by_fn: F) -> Self {
|
||||
let inner = GroupByShared {
|
||||
iter: inner.peekable(),
|
||||
group_by_fn,
|
||||
};
|
||||
|
||||
Self {
|
||||
inner: Rc::new(RefCell::new(inner)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, F, K> Iterator for GroupByIterator<I, F, K>
|
||||
where
|
||||
I: Iterator,
|
||||
I::Item: Copy,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
K: Copy,
|
||||
{
|
||||
type Item = (K, GroupIterator<I, F, K>);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
let value = *inner.iter.peek()?;
|
||||
let key = (inner.group_by_fn)(&value);
|
||||
|
||||
let inner = self.inner.clone();
|
||||
|
||||
let group_iter = GroupIterator {
|
||||
inner,
|
||||
group_key: key,
|
||||
};
|
||||
Some((key, group_iter))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GroupIterator<I, F, K: Copy>
|
||||
where
|
||||
I: Iterator,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
{
|
||||
inner: Rc<RefCell<GroupByShared<I, F, K>>>,
|
||||
group_key: K,
|
||||
}
|
||||
|
||||
impl<I, F, K: PartialEq + Copy> Iterator for GroupIterator<I, F, K>
|
||||
where
|
||||
I: Iterator,
|
||||
I::Item: Copy,
|
||||
F: FnMut(&I::Item) -> K,
|
||||
{
|
||||
type Item = I::Item;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
// peek if next value is in group
|
||||
let peek_val = *inner.iter.peek()?;
|
||||
if (inner.group_by_fn)(&peek_val) == self.group_key {
|
||||
inner.iter.next()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn group_by_collect<I: Iterator<Item = u32>>(iter: I) -> Vec<(I::Item, Vec<I::Item>)> {
|
||||
iter.group_by(|val| val / 10)
|
||||
.map(|(el, iter)| (el, iter.collect::<Vec<_>>()))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn group_by_two_groups() {
|
||||
let vals = vec![1u32, 4, 15];
|
||||
let grouped_vals = group_by_collect(vals.into_iter());
|
||||
assert_eq!(grouped_vals, vec![(0, vec![1, 4]), (1, vec![15])]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn group_by_test_empty() {
|
||||
let vals = vec![];
|
||||
let grouped_vals = group_by_collect(vals.into_iter());
|
||||
assert_eq!(grouped_vals, vec![]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn group_by_three_groups() {
|
||||
let vals = vec![1u32, 4, 15, 1];
|
||||
let grouped_vals = group_by_collect(vals.into_iter());
|
||||
assert_eq!(
|
||||
grouped_vals,
|
||||
vec![(0, vec![1, 4]), (1, vec![15]), (0, vec![1])]
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -5,11 +5,14 @@ use std::ops::Deref;
|
||||
pub use byteorder::LittleEndian as Endianness;
|
||||
|
||||
mod bitset;
|
||||
pub mod file_slice;
|
||||
mod group_by;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
mod writer;
|
||||
|
||||
pub use bitset::*;
|
||||
pub use group_by::GroupByIteratorExtended;
|
||||
pub use ownedbytes::{OwnedBytes, StableDeref};
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
pub use vint::{
|
||||
deserialize_vint_u128, read_u32_vint, read_u32_vint_no_advance, serialize_vint_u128,
|
||||
|
||||
@@ -94,6 +94,20 @@ impl FixedSize for u32 {
|
||||
const SIZE_IN_BYTES: usize = 4;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u16 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u16::<Endianness>(*self)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<u16> {
|
||||
reader.read_u16::<Endianness>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u16 {
|
||||
const SIZE_IN_BYTES: usize = 2;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u64::<Endianness>(*self)
|
||||
|
||||
@@ -118,7 +118,7 @@ fn main() -> tantivy::Result<()> {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
// # Basic Example
|
||||
// # Faceted Search
|
||||
//
|
||||
// This example covers the basic functionalities of
|
||||
// This example covers the faceted search functionalities of
|
||||
// tantivy.
|
||||
//
|
||||
// We will :
|
||||
// - define our schema
|
||||
// = create an index in a directory
|
||||
// - index few documents in our index
|
||||
// - search for the best document matchings "sea whale"
|
||||
// - retrieve the best document original content.
|
||||
|
||||
// - define a text field "name" in our schema
|
||||
// - define a facet field "classification" in our schema
|
||||
// - create an index in memory
|
||||
// - index few documents with respective facets in our index
|
||||
// - search and count the number of documents that the classifications start the facet "/Felidae"
|
||||
// - Search the facet "/Felidae/Pantherinae" and count the number of documents that the
|
||||
// classifications include the facet.
|
||||
//
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::FacetCollector;
|
||||
@@ -21,7 +23,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the sake of this example
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
let name = schema_builder.add_text_field("felin_name", TEXT | STORED);
|
||||
let name = schema_builder.add_text_field("name", TEXT | STORED);
|
||||
// this is our faceted field: its scientific classification
|
||||
let classification = schema_builder.add_facet_field("classification", FacetOptions::default());
|
||||
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
[package]
|
||||
name = "fastfield_codecs"
|
||||
version = "0.2.0"
|
||||
version = "0.3.0"
|
||||
authors = ["Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
description = "Fast field codecs used by tantivy"
|
||||
documentation = "https://docs.rs/fastfield_codecs/"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
|
||||
ownedbytes = { version = "0.3.0", path = "../ownedbytes" }
|
||||
prettytable-rs = {version="0.9.0", optional= true}
|
||||
common = { version = "0.5", path = "../common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
|
||||
prettytable-rs = {version="0.10.0", optional= true}
|
||||
rand = {version="0.8.3", optional= true}
|
||||
fastdivide = "0.4"
|
||||
log = "0.4"
|
||||
|
||||
@@ -4,11 +4,11 @@ extern crate test;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::iter;
|
||||
use std::ops::RangeInclusive;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use fastfield_codecs::*;
|
||||
use ownedbytes::OwnedBytes;
|
||||
use rand::prelude::*;
|
||||
use test::Bencher;
|
||||
|
||||
@@ -71,27 +71,24 @@ mod tests {
|
||||
});
|
||||
}
|
||||
|
||||
fn get_exp_data() -> Vec<u64> {
|
||||
const FIFTY_PERCENT_RANGE: RangeInclusive<u64> = 1..=50;
|
||||
const SINGLE_ITEM: u64 = 90;
|
||||
const SINGLE_ITEM_RANGE: RangeInclusive<u64> = 90..=90;
|
||||
const ONE_PERCENT_ITEM_RANGE: RangeInclusive<u64> = 49..=49;
|
||||
fn get_data_50percent_item() -> Vec<u128> {
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
|
||||
let mut data = vec![];
|
||||
for i in 0..100 {
|
||||
let num = i * i;
|
||||
data.extend(iter::repeat(i as u64).take(num));
|
||||
for _ in 0..300_000 {
|
||||
let val = rng.gen_range(1..=100);
|
||||
data.push(val);
|
||||
}
|
||||
data.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
data.push(SINGLE_ITEM);
|
||||
|
||||
// lengt = 328350
|
||||
data.shuffle(&mut rng);
|
||||
let data = data.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
data
|
||||
}
|
||||
|
||||
fn get_data_50percent_item() -> (u128, u128, Vec<u128>) {
|
||||
let mut permutation = get_exp_data();
|
||||
let major_item = 20;
|
||||
let minor_item = 10;
|
||||
permutation.extend(iter::repeat(major_item).take(permutation.len()));
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
let permutation = permutation.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
(major_item as u128, minor_item as u128, permutation)
|
||||
}
|
||||
fn get_u128_column_random() -> Arc<dyn Column<u128>> {
|
||||
let permutation = generate_random();
|
||||
let permutation = permutation.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
@@ -106,15 +103,82 @@ mod tests {
|
||||
open_u128::<u128>(out).unwrap()
|
||||
}
|
||||
|
||||
// U64 RANGE START
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_50percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
FIFTY_PERCENT_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_1percent_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
ONE_PERCENT_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_single_hit(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
SINGLE_ITEM_RANGE,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
positions
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u64_hit_all(b: &mut Bencher) {
|
||||
let data = get_data_50percent_item();
|
||||
let data = data.iter().map(|el| *el as u64).collect::<Vec<_>>();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(0..=u64::MAX, 0..data.len() as u32, &mut positions);
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U64 RANGE END
|
||||
|
||||
// U128 RANGE START
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
||||
let (major_item, _minor_item, data) = get_data_50percent_item();
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
major_item..=major_item,
|
||||
*FIFTY_PERCENT_RANGE.start() as u128..=*FIFTY_PERCENT_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
@@ -124,13 +188,13 @@ mod tests {
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
||||
let (_major_item, minor_item, data) = get_data_50percent_item();
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
let mut positions = Vec::new();
|
||||
column.get_docids_for_value_range(
|
||||
minor_item..=minor_item,
|
||||
*SINGLE_ITEM_RANGE.start() as u128..=*SINGLE_ITEM_RANGE.end() as u128,
|
||||
0..data.len() as u32,
|
||||
&mut positions,
|
||||
);
|
||||
@@ -140,7 +204,7 @@ mod tests {
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
||||
let (_major_item, _minor_item, data) = get_data_50percent_item();
|
||||
let data = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| {
|
||||
@@ -149,6 +213,7 @@ mod tests {
|
||||
positions
|
||||
});
|
||||
}
|
||||
// U128 RANGE END
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_u128(b: &mut Bencher) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
use common::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::serialize::NormalizedHeader;
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use std::sync::Arc;
|
||||
use std::{io, iter};
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, DeserializeFrom};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use common::{BinarySerializable, CountingWriter, DeserializeFrom, OwnedBytes};
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::line::Line;
|
||||
@@ -47,7 +46,7 @@ impl FastFieldCodec for BlockwiseLinearCodec {
|
||||
type Reader = BlockwiseLinearReader;
|
||||
|
||||
fn open_from_bytes(
|
||||
bytes: ownedbytes::OwnedBytes,
|
||||
bytes: common::OwnedBytes,
|
||||
normalized_header: NormalizedHeader,
|
||||
) -> io::Result<Self::Reader> {
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
@@ -75,7 +74,7 @@ impl FastFieldCodec for BlockwiseLinearCodec {
|
||||
if column.num_vals() < 10 * CHUNK_SIZE as u32 {
|
||||
return None;
|
||||
}
|
||||
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE as usize).collect();
|
||||
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE).collect();
|
||||
let line = Line::train(&VecColumn::from(&first_chunk));
|
||||
for (i, buffer_val) in first_chunk.iter_mut().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
@@ -171,15 +170,18 @@ impl Column for BlockwiseLinearReader {
|
||||
interpoled_val.wrapping_add(bitpacked_diff)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> u64 {
|
||||
// The BlockwiseLinearReader assumes a normalized vector.
|
||||
0u64
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.normalized_header.max_value
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.normalized_header.num_vals
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::fmt::{self, Debug};
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
|
||||
@@ -6,7 +7,7 @@ use tantivy_bitpacker::minmax;
|
||||
use crate::monotonic_mapping::StrictlyMonotonicFn;
|
||||
|
||||
/// `Column` provides columnar access on a field.
|
||||
pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
||||
pub trait Column<T: PartialOrd + Debug = u64>: Send + Sync {
|
||||
/// Return the value associated with the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
@@ -83,7 +84,7 @@ pub struct VecColumn<'a, T = u64> {
|
||||
max_value: T,
|
||||
}
|
||||
|
||||
impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
|
||||
impl<'a, C: Column<T>, T: Copy + PartialOrd + fmt::Debug> Column<T> for &'a C {
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
(*self).get_val(idx)
|
||||
}
|
||||
@@ -109,7 +110,7 @@ impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Send + Sync> Column<T> for VecColumn<'a, T> {
|
||||
impl<'a, T: Copy + PartialOrd + Send + Sync + Debug> Column<T> for VecColumn<'a, T> {
|
||||
fn get_val(&self, position: u32) -> T {
|
||||
self.values[position as usize]
|
||||
}
|
||||
@@ -135,7 +136,7 @@ impl<'a, T: Copy + PartialOrd + Send + Sync> Column<T> for VecColumn<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + Ord + Default, V> From<&'a V> for VecColumn<'a, T>
|
||||
impl<'a, T: Copy + PartialOrd + Default, V> From<&'a V> for VecColumn<'a, T>
|
||||
where V: AsRef<[T]> + ?Sized
|
||||
{
|
||||
fn from(values: &'a V) -> Self {
|
||||
@@ -177,8 +178,8 @@ pub fn monotonic_map_column<C, T, Input, Output>(
|
||||
where
|
||||
C: Column<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Send + Sync + Clone,
|
||||
Output: PartialOrd + Send + Sync + Clone,
|
||||
Input: PartialOrd + Send + Sync + Copy + Debug,
|
||||
Output: PartialOrd + Send + Sync + Copy + Debug,
|
||||
{
|
||||
MonotonicMappingColumn {
|
||||
from_column,
|
||||
@@ -191,8 +192,8 @@ impl<C, T, Input, Output> Column<Output> for MonotonicMappingColumn<C, T, Input>
|
||||
where
|
||||
C: Column<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Send + Sync + Clone,
|
||||
Output: PartialOrd + Send + Sync + Clone,
|
||||
Input: PartialOrd + Send + Sync + Copy + Debug,
|
||||
Output: PartialOrd + Send + Sync + Copy + Debug,
|
||||
{
|
||||
#[inline]
|
||||
fn get_val(&self, idx: u32) -> Output {
|
||||
@@ -228,12 +229,15 @@ where
|
||||
doc_id_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
self.from_column.get_docids_for_value_range(
|
||||
self.monotonic_mapping.inverse(range.start().clone())
|
||||
..=self.monotonic_mapping.inverse(range.end().clone()),
|
||||
doc_id_range,
|
||||
positions,
|
||||
)
|
||||
if range.start() > &self.max_value() || range.end() < &self.min_value() {
|
||||
return;
|
||||
}
|
||||
let range = self.monotonic_mapping.inverse_coerce(range);
|
||||
if range.start() > range.end() {
|
||||
return;
|
||||
}
|
||||
self.from_column
|
||||
.get_docids_for_value_range(range, doc_id_range, positions)
|
||||
}
|
||||
|
||||
// We voluntarily do not implement get_range as it yields a regression,
|
||||
@@ -254,7 +258,7 @@ where T: Iterator + Clone + ExactSizeIterator
|
||||
impl<T> Column<T::Item> for IterColumn<T>
|
||||
where
|
||||
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
|
||||
T::Item: PartialOrd,
|
||||
T::Item: PartialOrd + fmt::Debug,
|
||||
{
|
||||
fn get_val(&self, idx: u32) -> T::Item {
|
||||
self.0.clone().nth(idx as usize).unwrap()
|
||||
|
||||
@@ -208,7 +208,7 @@ impl CompactSpaceBuilder {
|
||||
};
|
||||
let covered_range_len = range_mapping.range_length();
|
||||
ranges_mapping.push(range_mapping);
|
||||
compact_start += covered_range_len as u64;
|
||||
compact_start += covered_range_len;
|
||||
}
|
||||
// println!("num ranges {}", ranges_mapping.len());
|
||||
CompactSpace { ranges_mapping }
|
||||
|
||||
@@ -17,8 +17,7 @@ use std::{
|
||||
ops::{Range, RangeInclusive},
|
||||
};
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, VInt, VIntU128};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt, VIntU128};
|
||||
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::compact_space::build_compact_space::get_compact_space;
|
||||
@@ -97,7 +96,7 @@ impl BinarySerializable for CompactSpace {
|
||||
};
|
||||
let range_length = range_mapping.range_length();
|
||||
ranges_mapping.push(range_mapping);
|
||||
compact_start += range_length as u64;
|
||||
compact_start += range_length;
|
||||
}
|
||||
|
||||
Ok(Self { ranges_mapping })
|
||||
@@ -407,10 +406,10 @@ impl CompactSpaceDecompressor {
|
||||
let idx2 = idx + 1;
|
||||
let idx3 = idx + 2;
|
||||
let idx4 = idx + 3;
|
||||
let val1 = get_val(idx1 as u32);
|
||||
let val2 = get_val(idx2 as u32);
|
||||
let val3 = get_val(idx3 as u32);
|
||||
let val4 = get_val(idx4 as u32);
|
||||
let val1 = get_val(idx1);
|
||||
let val2 = get_val(idx2);
|
||||
let val3 = get_val(idx3);
|
||||
let val4 = get_val(idx4);
|
||||
push_if_in_range(idx1, val1);
|
||||
push_if_in_range(idx2, val2);
|
||||
push_if_in_range(idx3, val3);
|
||||
@@ -419,14 +418,13 @@ impl CompactSpaceDecompressor {
|
||||
|
||||
// handle rest
|
||||
for idx in cutoff..position_range.end {
|
||||
push_if_in_range(idx, get_val(idx as u32));
|
||||
push_if_in_range(idx, get_val(idx));
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ {
|
||||
(0..self.params.num_vals)
|
||||
.map(move |idx| self.params.bit_unpacker.get(idx, &self.data) as u64)
|
||||
(0..self.params.num_vals).map(move |idx| self.params.bit_unpacker.get(idx, &self.data))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -455,7 +453,11 @@ impl CompactSpaceDecompressor {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use super::*;
|
||||
use crate::format_version::read_format_version;
|
||||
use crate::null_index_footer::read_null_index_footer;
|
||||
use crate::serialize::U128Header;
|
||||
use crate::{open_u128, serialize_u128};
|
||||
|
||||
@@ -541,7 +543,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let data = OwnedBytes::new(out);
|
||||
let (data, _format_version) = read_format_version(data).unwrap();
|
||||
let (data, _null_index_footer) = read_null_index_footer(data).unwrap();
|
||||
test_all(data.clone(), u128_vals);
|
||||
|
||||
data
|
||||
}
|
||||
|
||||
@@ -559,11 +564,12 @@ mod tests {
|
||||
333u128,
|
||||
];
|
||||
let mut data = test_aux_vals(vals);
|
||||
|
||||
let _header = U128Header::deserialize(&mut data);
|
||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
let complete_range = 0..vals.len() as u32;
|
||||
for (pos, val) in vals.iter().enumerate() {
|
||||
let val = *val as u128;
|
||||
let val = *val;
|
||||
let pos = pos as u32;
|
||||
let mut positions = Vec::new();
|
||||
decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions);
|
||||
@@ -660,7 +666,7 @@ mod tests {
|
||||
get_positions_for_value_range_helper(
|
||||
&decomp,
|
||||
4_000_211_221u128..=5_000_000_000u128,
|
||||
complete_range.clone()
|
||||
complete_range
|
||||
),
|
||||
vec![6, 7]
|
||||
);
|
||||
@@ -697,12 +703,12 @@ mod tests {
|
||||
vec![0]
|
||||
);
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(&decomp, 0..=105, complete_range.clone()),
|
||||
get_positions_for_value_range_helper(&decomp, 0..=105, complete_range),
|
||||
vec![0]
|
||||
);
|
||||
}
|
||||
|
||||
fn get_positions_for_value_range_helper<C: Column<T> + ?Sized, T: PartialOrd>(
|
||||
fn get_positions_for_value_range_helper<C: Column<T> + ?Sized, T: PartialOrd + fmt::Debug>(
|
||||
column: &C,
|
||||
value_range: RangeInclusive<T>,
|
||||
doc_id_range: Range<u32>,
|
||||
@@ -750,11 +756,7 @@ mod tests {
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
get_positions_for_value_range_helper(
|
||||
&*decomp,
|
||||
1_000_000..=1_000_000,
|
||||
complete_range.clone()
|
||||
),
|
||||
get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range),
|
||||
vec![11]
|
||||
);
|
||||
}
|
||||
|
||||
38
fastfield_codecs/src/format_version.rs
Normal file
38
fastfield_codecs/src/format_version.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
use std::io;
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
|
||||
const MAGIC_NUMBER: u16 = 4335u16;
|
||||
const FASTFIELD_FORMAT_VERSION: u8 = 1;
|
||||
|
||||
pub(crate) fn append_format_version(output: &mut impl io::Write) -> io::Result<()> {
|
||||
FASTFIELD_FORMAT_VERSION.serialize(output)?;
|
||||
MAGIC_NUMBER.serialize(output)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn read_format_version(data: OwnedBytes) -> io::Result<(OwnedBytes, u8)> {
|
||||
let (data, magic_number_bytes) = data.rsplit(2);
|
||||
|
||||
let magic_number = u16::deserialize(&mut magic_number_bytes.as_slice())?;
|
||||
if magic_number != MAGIC_NUMBER {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("magic number mismatch {} != {}", magic_number, MAGIC_NUMBER),
|
||||
));
|
||||
}
|
||||
let (data, format_version_bytes) = data.rsplit(1);
|
||||
let format_version = u8::deserialize(&mut format_version_bytes.as_slice())?;
|
||||
if format_version > FASTFIELD_FORMAT_VERSION {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Unsupported fastfield format version: {}. Max supported version: {}",
|
||||
format_version, FASTFIELD_FORMAT_VERSION
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
Ok((data, format_version))
|
||||
}
|
||||
@@ -45,7 +45,7 @@ mod tests {
|
||||
use std::io;
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
use common::OwnedBytes;
|
||||
|
||||
use crate::gcd::{compute_gcd, find_gcd};
|
||||
use crate::{FastFieldCodecType, VecColumn};
|
||||
|
||||
@@ -14,30 +14,35 @@ extern crate more_asserts;
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
extern crate test;
|
||||
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use common::BinarySerializable;
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use compact_space::CompactSpaceDecompressor;
|
||||
use format_version::read_format_version;
|
||||
use monotonic_mapping::{
|
||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||
StrictlyMonotonicMappingToInternalBaseval, StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use null_index_footer::read_null_index_footer;
|
||||
use serialize::{Header, U128Header};
|
||||
|
||||
mod bitpacked;
|
||||
mod blockwise_linear;
|
||||
mod compact_space;
|
||||
mod format_version;
|
||||
mod line;
|
||||
mod linear;
|
||||
mod monotonic_mapping;
|
||||
mod monotonic_mapping_u128;
|
||||
#[allow(dead_code)]
|
||||
mod null_index;
|
||||
mod null_index_footer;
|
||||
|
||||
mod column;
|
||||
mod gcd;
|
||||
mod serialize;
|
||||
pub mod serialize;
|
||||
|
||||
use self::bitpacked::BitpackedCodec;
|
||||
use self::blockwise_linear::BlockwiseLinearCodec;
|
||||
@@ -128,9 +133,11 @@ impl U128FastFieldCodecType {
|
||||
}
|
||||
|
||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||
pub fn open_u128<Item: MonotonicallyMappableToU128>(
|
||||
mut bytes: OwnedBytes,
|
||||
pub fn open_u128<Item: MonotonicallyMappableToU128 + fmt::Debug>(
|
||||
bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn Column<Item>>> {
|
||||
let (bytes, _format_version) = read_format_version(bytes)?;
|
||||
let (mut bytes, _null_index_footer) = read_null_index_footer(bytes)?;
|
||||
let header = U128Header::deserialize(&mut bytes)?;
|
||||
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
|
||||
let reader = CompactSpaceDecompressor::open(bytes)?;
|
||||
@@ -140,9 +147,11 @@ pub fn open_u128<Item: MonotonicallyMappableToU128>(
|
||||
}
|
||||
|
||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||
pub fn open<T: MonotonicallyMappableToU64>(
|
||||
mut bytes: OwnedBytes,
|
||||
pub fn open<T: MonotonicallyMappableToU64 + fmt::Debug>(
|
||||
bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn Column<T>>> {
|
||||
let (bytes, _format_version) = read_format_version(bytes)?;
|
||||
let (mut bytes, _null_index_footer) = read_null_index_footer(bytes)?;
|
||||
let header = Header::deserialize(&mut bytes)?;
|
||||
match header.codec_type {
|
||||
FastFieldCodecType::Bitpacked => open_specific_codec::<BitpackedCodec, _>(bytes, &header),
|
||||
@@ -153,7 +162,7 @@ pub fn open<T: MonotonicallyMappableToU64>(
|
||||
}
|
||||
}
|
||||
|
||||
fn open_specific_codec<C: FastFieldCodec, Item: MonotonicallyMappableToU64>(
|
||||
fn open_specific_codec<C: FastFieldCodec, Item: MonotonicallyMappableToU64 + fmt::Debug>(
|
||||
bytes: OwnedBytes,
|
||||
header: &Header,
|
||||
) -> io::Result<Arc<dyn Column<Item>>> {
|
||||
@@ -314,6 +323,9 @@ mod tests {
|
||||
pub fn get_codec_test_datasets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
let mut data_and_names = vec![];
|
||||
|
||||
let data = vec![10];
|
||||
data_and_names.push((data, "minimal test"));
|
||||
|
||||
let data = (10..=10_000_u64).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "simple monotonically increasing"));
|
||||
|
||||
@@ -321,6 +333,9 @@ mod tests {
|
||||
vec![5, 6, 7, 8, 9, 10, 99, 100],
|
||||
"offset in linear interpol",
|
||||
));
|
||||
|
||||
data_and_names.push((vec![3, 18446744073709551613, 5], "docid range regression"));
|
||||
|
||||
data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small"));
|
||||
data_and_names.push((vec![10], "single value"));
|
||||
|
||||
@@ -428,7 +443,7 @@ mod tests {
|
||||
mod bench {
|
||||
use std::sync::Arc;
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
use common::OwnedBytes;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::{self, Bencher};
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
use ownedbytes::OwnedBytes;
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::line::Line;
|
||||
@@ -25,13 +24,13 @@ impl Column for LinearReader {
|
||||
interpoled_val.wrapping_add(bitpacked_diff)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> u64 {
|
||||
// The LinearReader assumes a normalized vector.
|
||||
0u64
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[inline(always)]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.header.max_value
|
||||
}
|
||||
|
||||
@@ -6,10 +6,10 @@ use std::io::BufRead;
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use common::OwnedBytes;
|
||||
use fastfield_codecs::{open_u128, serialize_u128, Column, FastFieldCodecType, VecColumn};
|
||||
use itertools::Itertools;
|
||||
use measure_time::print_time;
|
||||
use ownedbytes::OwnedBytes;
|
||||
use prettytable::{Cell, Row, Table};
|
||||
|
||||
fn print_set_stats(ip_addrs: &[u128]) {
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
use fastdivide::DividerU64;
|
||||
|
||||
@@ -6,7 +8,9 @@ use crate::MonotonicallyMappableToU128;
|
||||
|
||||
/// Monotonic maps a value to u64 value space.
|
||||
/// Monotonic mapping enables `PartialOrd` on u64 space without conversion to original space.
|
||||
pub trait MonotonicallyMappableToU64: 'static + PartialOrd + Copy + Send + Sync {
|
||||
pub trait MonotonicallyMappableToU64:
|
||||
'static + PartialOrd + Copy + Send + Sync + fmt::Debug
|
||||
{
|
||||
/// Converts a value to u64.
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
@@ -29,11 +33,29 @@ pub trait MonotonicallyMappableToU64: 'static + PartialOrd + Copy + Send + Sync
|
||||
/// mapping from their range to their domain. The `inverse` method is required when opening a codec,
|
||||
/// so a value can be converted back to its original domain (e.g. ip address or f64) from its
|
||||
/// internal representation.
|
||||
pub trait StrictlyMonotonicFn<External, Internal> {
|
||||
pub trait StrictlyMonotonicFn<External: Copy, Internal: Copy> {
|
||||
/// Strictly monotonically maps the value from External to Internal.
|
||||
fn mapping(&self, inp: External) -> Internal;
|
||||
/// Inverse of `mapping`. Maps the value from Internal to External.
|
||||
fn inverse(&self, out: Internal) -> External;
|
||||
|
||||
/// Maps a user provded value from External to Internal.
|
||||
/// It may be necessary to coerce the value if it is outside the value space.
|
||||
/// In that case it tries to find the next greater value in the value space.
|
||||
///
|
||||
/// Returns a bool to mark if a value was outside the value space and had to be coerced _up_.
|
||||
/// With that information we can detect if two values in a range both map outside the same value
|
||||
/// space.
|
||||
///
|
||||
/// coerce_up means the next valid upper value in the value space will be chosen if the value
|
||||
/// has to be coerced.
|
||||
fn mapping_coerce(&self, inp: RangeInclusive<External>) -> RangeInclusive<Internal> {
|
||||
self.mapping(*inp.start())..=self.mapping(*inp.end())
|
||||
}
|
||||
/// Inverse of `mapping_coerce`.
|
||||
fn inverse_coerce(&self, out: RangeInclusive<Internal>) -> RangeInclusive<External> {
|
||||
self.inverse(*out.start())..=self.inverse(*out.end())
|
||||
}
|
||||
}
|
||||
|
||||
/// Inverts a strictly monotonic mapping from `StrictlyMonotonicFn<A, B>` to
|
||||
@@ -54,15 +76,29 @@ impl<T> From<T> for StrictlyMonotonicMappingInverter<T> {
|
||||
}
|
||||
|
||||
impl<From, To, T> StrictlyMonotonicFn<To, From> for StrictlyMonotonicMappingInverter<T>
|
||||
where T: StrictlyMonotonicFn<From, To>
|
||||
where
|
||||
T: StrictlyMonotonicFn<From, To>,
|
||||
From: Copy,
|
||||
To: Copy,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn mapping(&self, val: To) -> From {
|
||||
self.orig_mapping.inverse(val)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inverse(&self, val: From) -> To {
|
||||
self.orig_mapping.mapping(val)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn mapping_coerce(&self, inp: RangeInclusive<To>) -> RangeInclusive<From> {
|
||||
self.orig_mapping.inverse_coerce(inp)
|
||||
}
|
||||
#[inline]
|
||||
fn inverse_coerce(&self, out: RangeInclusive<From>) -> RangeInclusive<To> {
|
||||
self.orig_mapping.mapping_coerce(out)
|
||||
}
|
||||
}
|
||||
|
||||
/// Applies the strictly monotonic mapping from `T` without any additional changes.
|
||||
@@ -82,10 +118,12 @@ impl<External: MonotonicallyMappableToU128, T: MonotonicallyMappableToU128>
|
||||
StrictlyMonotonicFn<External, u128> for StrictlyMonotonicMappingToInternal<T>
|
||||
where T: MonotonicallyMappableToU128
|
||||
{
|
||||
#[inline(always)]
|
||||
fn mapping(&self, inp: External) -> u128 {
|
||||
External::to_u128(inp)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inverse(&self, out: u128) -> External {
|
||||
External::from_u128(out)
|
||||
}
|
||||
@@ -95,10 +133,12 @@ impl<External: MonotonicallyMappableToU64, T: MonotonicallyMappableToU64>
|
||||
StrictlyMonotonicFn<External, u64> for StrictlyMonotonicMappingToInternal<T>
|
||||
where T: MonotonicallyMappableToU64
|
||||
{
|
||||
#[inline(always)]
|
||||
fn mapping(&self, inp: External) -> u64 {
|
||||
External::to_u64(inp)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inverse(&self, out: u64) -> External {
|
||||
External::from_u64(out)
|
||||
}
|
||||
@@ -126,14 +166,41 @@ impl StrictlyMonotonicMappingToInternalGCDBaseval {
|
||||
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
||||
for StrictlyMonotonicMappingToInternalGCDBaseval
|
||||
{
|
||||
#[inline(always)]
|
||||
fn mapping(&self, inp: External) -> u64 {
|
||||
self.gcd_divider
|
||||
.divide(External::to_u64(inp) - self.min_value)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inverse(&self, out: u64) -> External {
|
||||
External::from_u64(self.min_value + out * self.gcd)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(clippy::reversed_empty_ranges)]
|
||||
fn mapping_coerce(&self, inp: RangeInclusive<External>) -> RangeInclusive<u64> {
|
||||
let end = External::to_u64(*inp.end());
|
||||
if end < self.min_value || inp.end() < inp.start() {
|
||||
return 1..=0;
|
||||
}
|
||||
let map_coerce = |mut inp, coerce_up| {
|
||||
let inp_lower_bound = self.inverse(0);
|
||||
if inp < inp_lower_bound {
|
||||
inp = inp_lower_bound;
|
||||
}
|
||||
let val = External::to_u64(inp);
|
||||
let need_coercion = coerce_up && (val - self.min_value) % self.gcd != 0;
|
||||
let mut mapped_val = self.mapping(inp);
|
||||
if need_coercion {
|
||||
mapped_val += 1;
|
||||
}
|
||||
mapped_val
|
||||
};
|
||||
let start = map_coerce(*inp.start(), true);
|
||||
let end = map_coerce(*inp.end(), false);
|
||||
start..=end
|
||||
}
|
||||
}
|
||||
|
||||
/// Strictly monotonic mapping with a base value.
|
||||
@@ -141,6 +208,7 @@ pub(crate) struct StrictlyMonotonicMappingToInternalBaseval {
|
||||
min_value: u64,
|
||||
}
|
||||
impl StrictlyMonotonicMappingToInternalBaseval {
|
||||
#[inline(always)]
|
||||
pub(crate) fn new(min_value: u64) -> Self {
|
||||
Self { min_value }
|
||||
}
|
||||
@@ -149,20 +217,35 @@ impl StrictlyMonotonicMappingToInternalBaseval {
|
||||
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
||||
for StrictlyMonotonicMappingToInternalBaseval
|
||||
{
|
||||
#[inline]
|
||||
#[allow(clippy::reversed_empty_ranges)]
|
||||
fn mapping_coerce(&self, inp: RangeInclusive<External>) -> RangeInclusive<u64> {
|
||||
if External::to_u64(*inp.end()) < self.min_value {
|
||||
return 1..=0;
|
||||
}
|
||||
let start = self.mapping(External::to_u64(*inp.start()).max(self.min_value));
|
||||
let end = self.mapping(External::to_u64(*inp.end()));
|
||||
start..=end
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mapping(&self, val: External) -> u64 {
|
||||
External::to_u64(val) - self.min_value
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inverse(&self, val: u64) -> External {
|
||||
External::from_u64(self.min_value + val)
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for u64 {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
self
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> Self {
|
||||
val
|
||||
}
|
||||
@@ -192,11 +275,15 @@ impl MonotonicallyMappableToU64 for bool {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO remove me.
|
||||
// Tantivy should refuse NaN values and work with NotNaN internally.
|
||||
impl MonotonicallyMappableToU64 for f64 {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
common::f64_to_u64(self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> Self {
|
||||
common::u64_to_f64(val)
|
||||
}
|
||||
@@ -224,7 +311,7 @@ mod tests {
|
||||
test_round_trip::<_, _, u64>(&mapping, 100u64);
|
||||
}
|
||||
|
||||
fn test_round_trip<T: StrictlyMonotonicFn<K, L>, K: std::fmt::Debug + Eq + Copy, L>(
|
||||
fn test_round_trip<T: StrictlyMonotonicFn<K, L>, K: std::fmt::Debug + Eq + Copy, L: Copy>(
|
||||
mapping: &T,
|
||||
test_val: K,
|
||||
) {
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
use std::fmt;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
/// Montonic maps a value to u128 value space
|
||||
/// Monotonic mapping enables `PartialOrd` on u128 space without conversion to original space.
|
||||
pub trait MonotonicallyMappableToU128: 'static + PartialOrd + Copy + Send + Sync {
|
||||
pub trait MonotonicallyMappableToU128:
|
||||
'static + PartialOrd + Copy + Send + Sync + fmt::Debug
|
||||
{
|
||||
/// Converts a value to u128.
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
|
||||
500
fastfield_codecs/src/null_index/dense.rs
Normal file
500
fastfield_codecs/src/null_index/dense.rs
Normal file
@@ -0,0 +1,500 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::{get_bit_at, set_bit_at};
|
||||
|
||||
/// For the `DenseCodec`, `data` which contains the encoded blocks.
|
||||
/// Each block consists of [u8; 12]. The first 8 bytes is a bitvec for 64 elements.
|
||||
/// The last 4 bytes are the offset, the number of set bits so far.
|
||||
///
|
||||
/// When translating the original index to a dense index, the correct block can be computed
|
||||
/// directly `orig_idx/64`. Inside the block the position is `orig_idx%64`.
|
||||
///
|
||||
/// When translating a dense index to the original index, we can use the offset to find the correct
|
||||
/// block. Direct computation is not possible, but we can employ a linear or binary search.
|
||||
#[derive(Clone)]
|
||||
pub struct DenseCodec {
|
||||
// data consists of blocks of 64 bits.
|
||||
//
|
||||
// The format is &[(u64, u32)]
|
||||
// u64 is the bitvec
|
||||
// u32 is the offset of the block, the number of set bits so far.
|
||||
//
|
||||
// At the end one block is appended, to store the number of values in the index in offset.
|
||||
data: OwnedBytes,
|
||||
}
|
||||
const ELEMENTS_PER_BLOCK: u32 = 64;
|
||||
const BLOCK_BITVEC_SIZE: usize = 8;
|
||||
const BLOCK_OFFSET_SIZE: usize = 4;
|
||||
const SERIALIZED_BLOCK_SIZE: usize = BLOCK_BITVEC_SIZE + BLOCK_OFFSET_SIZE;
|
||||
|
||||
/// Interpreting the bitvec as a list of 64 bits from the low weight to the
|
||||
/// high weight.
|
||||
///
|
||||
/// This function returns the number of bits set to 1 within
|
||||
/// `[0..pos_in_vec)`.
|
||||
#[inline]
|
||||
fn count_ones(bitvec: u64, pos_in_bitvec: u32) -> u32 {
|
||||
let mask = (1u64 << pos_in_bitvec) - 1;
|
||||
let masked_bitvec = bitvec & mask;
|
||||
masked_bitvec.count_ones()
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct DenseIndexBlock {
|
||||
bitvec: u64,
|
||||
offset: u32,
|
||||
}
|
||||
|
||||
impl From<[u8; SERIALIZED_BLOCK_SIZE]> for DenseIndexBlock {
|
||||
fn from(data: [u8; SERIALIZED_BLOCK_SIZE]) -> Self {
|
||||
let bitvec = u64::from_le_bytes(data[..BLOCK_BITVEC_SIZE].try_into().unwrap());
|
||||
let offset = u32::from_le_bytes(data[BLOCK_BITVEC_SIZE..].try_into().unwrap());
|
||||
Self { bitvec, offset }
|
||||
}
|
||||
}
|
||||
|
||||
impl DenseCodec {
|
||||
/// Open the DenseCodec from OwnedBytes
|
||||
pub fn open(data: OwnedBytes) -> Self {
|
||||
Self { data }
|
||||
}
|
||||
#[inline]
|
||||
/// Check if value at position is not null.
|
||||
pub fn exists(&self, idx: u32) -> bool {
|
||||
let block_pos = idx / ELEMENTS_PER_BLOCK;
|
||||
let bitvec = self.dense_index_block(block_pos).bitvec;
|
||||
let pos_in_bitvec = idx % ELEMENTS_PER_BLOCK;
|
||||
get_bit_at(bitvec, pos_in_bitvec)
|
||||
}
|
||||
#[inline]
|
||||
fn dense_index_block(&self, block_pos: u32) -> DenseIndexBlock {
|
||||
dense_index_block(&self.data, block_pos)
|
||||
}
|
||||
|
||||
/// Return the number of non-null values in an index
|
||||
pub fn num_non_nulls(&self) -> u32 {
|
||||
let last_block = (self.data.len() / SERIALIZED_BLOCK_SIZE) - 1;
|
||||
self.dense_index_block(last_block as u32).offset
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Translate from the original index to the codec index.
|
||||
pub fn translate_to_codec_idx(&self, idx: u32) -> Option<u32> {
|
||||
let block_pos = idx / ELEMENTS_PER_BLOCK;
|
||||
let index_block = self.dense_index_block(block_pos);
|
||||
let pos_in_block_bit_vec = idx % ELEMENTS_PER_BLOCK;
|
||||
let ones_in_block = count_ones(index_block.bitvec, pos_in_block_bit_vec);
|
||||
if get_bit_at(index_block.bitvec, pos_in_block_bit_vec) {
|
||||
Some(index_block.offset + ones_in_block)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Translate positions from the codec index to the original index.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if any `idx` is greater than the max codec index.
|
||||
pub fn translate_codec_idx_to_original_idx<'a>(
|
||||
&'a self,
|
||||
iter: impl Iterator<Item = u32> + 'a,
|
||||
) -> impl Iterator<Item = u32> + 'a {
|
||||
let mut block_pos = 0u32;
|
||||
iter.map(move |dense_idx| {
|
||||
// update block_pos to limit search scope
|
||||
block_pos = find_block(dense_idx, block_pos, &self.data);
|
||||
let index_block = self.dense_index_block(block_pos);
|
||||
|
||||
// The next offset is higher than dense_idx and therefore:
|
||||
// dense_idx <= offset + num_set_bits in block
|
||||
let mut num_set_bits = 0;
|
||||
for idx_in_bitvec in 0..ELEMENTS_PER_BLOCK {
|
||||
if get_bit_at(index_block.bitvec, idx_in_bitvec) {
|
||||
num_set_bits += 1;
|
||||
}
|
||||
if num_set_bits == (dense_idx - index_block.offset + 1) {
|
||||
let orig_idx = block_pos * ELEMENTS_PER_BLOCK + idx_in_bitvec;
|
||||
return orig_idx;
|
||||
}
|
||||
}
|
||||
panic!("Internal Error: Offset calculation in dense idx seems to be wrong.");
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn dense_index_block(data: &[u8], block_pos: u32) -> DenseIndexBlock {
|
||||
let data_start_pos = block_pos as usize * SERIALIZED_BLOCK_SIZE;
|
||||
let block_data: [u8; SERIALIZED_BLOCK_SIZE] = data[data_start_pos..][..SERIALIZED_BLOCK_SIZE]
|
||||
.try_into()
|
||||
.unwrap();
|
||||
block_data.into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Finds the block position containing the dense_idx.
|
||||
///
|
||||
/// # Correctness
|
||||
/// dense_idx needs to be smaller than the number of values in the index
|
||||
///
|
||||
/// The last offset number is equal to the number of values in the index.
|
||||
fn find_block(dense_idx: u32, mut block_pos: u32, data: &[u8]) -> u32 {
|
||||
loop {
|
||||
let offset = dense_index_block(data, block_pos).offset;
|
||||
if offset > dense_idx {
|
||||
return block_pos - 1;
|
||||
}
|
||||
block_pos += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterator over all values, true if set, otherwise false
|
||||
pub fn serialize_dense_codec(
|
||||
iter: impl Iterator<Item = bool>,
|
||||
mut out: impl Write,
|
||||
) -> io::Result<()> {
|
||||
let mut offset: u32 = 0;
|
||||
|
||||
for chunk in &iter.chunks(ELEMENTS_PER_BLOCK as usize) {
|
||||
let mut block: u64 = 0;
|
||||
for (pos, is_bit_set) in chunk.enumerate() {
|
||||
if is_bit_set {
|
||||
set_bit_at(&mut block, pos as u64);
|
||||
}
|
||||
}
|
||||
|
||||
block.serialize(&mut out)?;
|
||||
offset.serialize(&mut out)?;
|
||||
|
||||
offset += block.count_ones();
|
||||
}
|
||||
// Add sentinal block for the offset
|
||||
let block: u64 = 0;
|
||||
block.serialize(&mut out)?;
|
||||
offset.serialize(&mut out)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use proptest::prelude::{any, prop, *};
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn random_bitvec() -> BoxedStrategy<Vec<bool>> {
|
||||
prop_oneof![
|
||||
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..100),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..64),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.0), 0..100),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.0), 0..64),
|
||||
8 => vec![any::<bool>()],
|
||||
2 => prop::collection::vec(any::<bool>(), 0..50),
|
||||
]
|
||||
.boxed()
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(500))]
|
||||
#[test]
|
||||
fn test_with_random_bitvecs(bitvec1 in random_bitvec(), bitvec2 in random_bitvec(), bitvec3 in random_bitvec()) {
|
||||
let mut bitvec = Vec::new();
|
||||
bitvec.extend_from_slice(&bitvec1);
|
||||
bitvec.extend_from_slice(&bitvec2);
|
||||
bitvec.extend_from_slice(&bitvec3);
|
||||
test_null_index(bitvec);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dense_codec_test_one_block_false() {
|
||||
let mut iter = vec![false; 64];
|
||||
iter.push(true);
|
||||
test_null_index(iter);
|
||||
}
|
||||
|
||||
fn test_null_index(data: Vec<bool>) {
|
||||
let mut out = vec![];
|
||||
|
||||
serialize_dense_codec(data.iter().cloned(), &mut out).unwrap();
|
||||
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||
|
||||
let orig_idx_with_value: Vec<u32> = data
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| **val)
|
||||
.map(|(pos, _val)| pos as u32)
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
null_index
|
||||
.translate_codec_idx_to_original_idx(0..orig_idx_with_value.len() as u32)
|
||||
.collect_vec(),
|
||||
orig_idx_with_value
|
||||
);
|
||||
|
||||
for (dense_idx, orig_idx) in orig_idx_with_value.iter().enumerate() {
|
||||
assert_eq!(
|
||||
null_index.translate_to_codec_idx(*orig_idx),
|
||||
Some(dense_idx as u32)
|
||||
);
|
||||
}
|
||||
|
||||
for (pos, value) in data.iter().enumerate() {
|
||||
assert_eq!(null_index.exists(pos as u32), *value);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dense_codec_test_translation() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_dense_codec(iter, &mut out).unwrap();
|
||||
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||
|
||||
assert_eq!(
|
||||
null_index
|
||||
.translate_codec_idx_to_original_idx(0..2)
|
||||
.collect_vec(),
|
||||
vec![0, 2]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dense_codec_translate() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_dense_codec(iter, &mut out).unwrap();
|
||||
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||
assert_eq!(null_index.translate_to_codec_idx(0), Some(0));
|
||||
assert_eq!(null_index.translate_to_codec_idx(2), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dense_codec_test_small() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_dense_codec(iter, &mut out).unwrap();
|
||||
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||
assert!(null_index.exists(0));
|
||||
assert!(!null_index.exists(1));
|
||||
assert!(null_index.exists(2));
|
||||
assert!(!null_index.exists(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dense_codec_test_large() {
|
||||
let mut docs = vec![];
|
||||
docs.extend((0..1000).map(|_idx| false));
|
||||
docs.extend((0..=1000).map(|_idx| true));
|
||||
|
||||
let iter = docs.iter().cloned();
|
||||
let mut out = vec![];
|
||||
serialize_dense_codec(iter, &mut out).unwrap();
|
||||
let null_index = DenseCodec::open(OwnedBytes::new(out));
|
||||
assert!(!null_index.exists(0));
|
||||
assert!(!null_index.exists(100));
|
||||
assert!(!null_index.exists(999));
|
||||
assert!(null_index.exists(1000));
|
||||
assert!(null_index.exists(1999));
|
||||
assert!(null_index.exists(2000));
|
||||
assert!(!null_index.exists(2001));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_ones() {
|
||||
let mut block = 0;
|
||||
set_bit_at(&mut block, 0);
|
||||
set_bit_at(&mut block, 2);
|
||||
|
||||
assert_eq!(count_ones(block, 0), 0);
|
||||
assert_eq!(count_ones(block, 1), 1);
|
||||
assert_eq!(count_ones(block, 2), 1);
|
||||
assert_eq!(count_ones(block, 3), 2);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::Bencher;
|
||||
|
||||
use super::*;
|
||||
|
||||
const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||
fn gen_bools(fill_ratio: f64) -> DenseCodec {
|
||||
let mut out = Vec::new();
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let bools: Vec<_> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.collect();
|
||||
serialize_dense_codec(bools.into_iter(), &mut out).unwrap();
|
||||
|
||||
let codec = DenseCodec::open(OwnedBytes::new(out));
|
||||
codec
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
start: u32,
|
||||
end: u32,
|
||||
avg_step_size: u32,
|
||||
avg_deviation: u32,
|
||||
) -> impl Iterator<Item = u32> {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let mut current = start;
|
||||
std::iter::from_fn(move || {
|
||||
current += rng.gen_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
|
||||
if current >= end {
|
||||
None
|
||||
} else {
|
||||
Some(current)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent as f32 / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
}
|
||||
|
||||
fn walk_over_data(codec: &DenseCodec, avg_step_size: u32) -> Option<u32> {
|
||||
walk_over_data_from_positions(
|
||||
codec,
|
||||
random_range_iterator(0, TOTAL_NUM_VALUES, avg_step_size, 0),
|
||||
)
|
||||
}
|
||||
|
||||
fn walk_over_data_from_positions(
|
||||
codec: &DenseCodec,
|
||||
positions: impl Iterator<Item = u32>,
|
||||
) -> Option<u32> {
|
||||
let mut dense_idx: Option<u32> = None;
|
||||
for idx in positions {
|
||||
dense_idx = dense_idx.or(codec.translate_to_codec_idx(idx));
|
||||
}
|
||||
dense_idx
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_1percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_5percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_5percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 1000));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_1percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_10percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_90percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_10percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_50percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.5f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_90percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(n_percent_step_iterator(0.005, num_non_nulls))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(n_percent_step_iterator(10.0, num_non_nulls))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_full_scan(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(0..num_vals)
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_90percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.90f64);
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(n_percent_step_iterator(0.005, num_non_nulls))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_90percent_filled_full_scan(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(0..num_vals)
|
||||
.last()
|
||||
});
|
||||
}
|
||||
}
|
||||
14
fastfield_codecs/src/null_index/mod.rs
Normal file
14
fastfield_codecs/src/null_index/mod.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
pub use dense::{serialize_dense_codec, DenseCodec};
|
||||
|
||||
mod dense;
|
||||
mod sparse;
|
||||
|
||||
#[inline]
|
||||
fn get_bit_at(input: u64, n: u32) -> bool {
|
||||
input & (1 << n) != 0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_bit_at(input: &mut u64, n: u64) {
|
||||
*input |= 1 << n;
|
||||
}
|
||||
768
fastfield_codecs/src/null_index/sparse.rs
Normal file
768
fastfield_codecs/src/null_index/sparse.rs
Normal file
@@ -0,0 +1,768 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::{BitSet, GroupByIteratorExtended, OwnedBytes};
|
||||
|
||||
use super::{serialize_dense_codec, DenseCodec};
|
||||
|
||||
/// `SparseCodec` is the codec for data, when only few documents have values.
|
||||
/// In contrast to `DenseCodec` opening a `SparseCodec` causes runtime data to be produced, for
|
||||
/// faster access.
|
||||
///
|
||||
/// The lower 16 bits of doc ids are stored as u16 while the upper 16 bits are given by the block
|
||||
/// id. Each block contains 1<<16 docids.
|
||||
///
|
||||
/// # Serialized Data Layout
|
||||
/// The data starts with the block data. Each block is either dense or sparse encoded, depending on
|
||||
/// the number of values in the block. A block is sparse when it contains less than
|
||||
/// DENSE_BLOCK_THRESHOLD (6144) values.
|
||||
/// [Sparse data block | dense data block, .. #repeat*; Desc: Either a sparse or dense encoded
|
||||
/// block]
|
||||
/// ### Sparse block data
|
||||
/// [u16 LE, .. #repeat*; Desc: Positions with values in a block]
|
||||
/// ### Dense block data
|
||||
/// [Dense codec for the whole block; Desc: Similar to a bitvec(0..ELEMENTS_PER_BLOCK) + Metadata
|
||||
/// for faster lookups. See dense.rs]
|
||||
///
|
||||
/// The data is followed by block metadata, to know which area of the raw block data belongs to
|
||||
/// which block. Only metadata for blocks with elements is recorded to
|
||||
/// keep the overhead low for scenarios with many very sparse columns. The block metadata consists
|
||||
/// of the block index and the number of values in the block. Since we don't store empty blocks
|
||||
/// num_vals is incremented by 1, e.g. 0 means 1 value.
|
||||
///
|
||||
/// The last u16 is storing the number of metadata blocks.
|
||||
/// [u16 LE, .. #repeat*; Desc: Positions with values in a block][(u16 LE, u16 LE), .. #repeat*;
|
||||
/// Desc: (Block Id u16, Num Elements u16)][u16 LE; Desc: num blocks with values u16]
|
||||
///
|
||||
/// # Opening
|
||||
/// When opening the data layout, the data is expanded to `Vec<SparseCodecBlockVariant>`, where the
|
||||
/// index is the block index. For each block `byte_start` and `offset` is computed.
|
||||
pub struct SparseCodec {
|
||||
data: OwnedBytes,
|
||||
blocks: Vec<SparseCodecBlockVariant>,
|
||||
}
|
||||
|
||||
/// The threshold for for number of elements after which we switch to dense block encoding
|
||||
const DENSE_BLOCK_THRESHOLD: u32 = 6144;
|
||||
|
||||
const ELEMENTS_PER_BLOCK: u32 = u16::MAX as u32 + 1;
|
||||
|
||||
/// 1.5 bit per Element + 12 bytes for the sentinal block
|
||||
const NUM_BYTES_DENSE_BLOCK: u32 = (ELEMENTS_PER_BLOCK + ELEMENTS_PER_BLOCK / 2 + 64 + 32) / 8;
|
||||
|
||||
#[derive(Clone)]
|
||||
enum SparseCodecBlockVariant {
|
||||
Empty { offset: u32 },
|
||||
Dense(DenseBlock),
|
||||
Sparse(SparseBlock),
|
||||
}
|
||||
|
||||
impl SparseCodecBlockVariant {
|
||||
/// The number of non-null values that preceeded that block.
|
||||
#[inline]
|
||||
fn offset(&self) -> u32 {
|
||||
match self {
|
||||
SparseCodecBlockVariant::Empty { offset } => *offset,
|
||||
SparseCodecBlockVariant::Dense(dense) => dense.offset,
|
||||
SparseCodecBlockVariant::Sparse(sparse) => sparse.offset,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A block consists of max u16 values
|
||||
#[derive(Clone)]
|
||||
struct DenseBlock {
|
||||
/// The number of values set before the block
|
||||
offset: u32,
|
||||
/// The data for the dense encoding
|
||||
codec: DenseCodec,
|
||||
}
|
||||
|
||||
impl DenseBlock {
|
||||
#[inline]
|
||||
pub fn exists(&self, idx: u32) -> bool {
|
||||
self.codec.exists(idx)
|
||||
}
|
||||
#[inline]
|
||||
pub fn translate_to_codec_idx(&self, idx: u32) -> Option<u32> {
|
||||
self.codec.translate_to_codec_idx(idx)
|
||||
}
|
||||
#[inline]
|
||||
pub fn translate_codec_idx_to_original_idx_iter<'a>(
|
||||
&'a self,
|
||||
iter: impl Iterator<Item = u32> + 'a,
|
||||
) -> impl Iterator<Item = u32> + 'a {
|
||||
self.codec.translate_codec_idx_to_original_idx(iter)
|
||||
}
|
||||
#[inline]
|
||||
pub fn translate_codec_idx_to_original_idx(&self, idx: u32) -> u32 {
|
||||
self.codec
|
||||
.translate_codec_idx_to_original_idx(idx..=idx)
|
||||
.next()
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// A block consists of max u16 values
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
struct SparseBlock {
|
||||
/// The number of values in the block
|
||||
num_vals: u32,
|
||||
/// The number of values set before the block
|
||||
offset: u32,
|
||||
/// The start position of the data for the block
|
||||
byte_start: u32,
|
||||
}
|
||||
|
||||
impl SparseBlock {
|
||||
fn empty_block(offset: u32) -> Self {
|
||||
Self {
|
||||
num_vals: 0,
|
||||
byte_start: 0,
|
||||
offset,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn value_at_idx(&self, data: &[u8], idx: u16) -> u16 {
|
||||
let start_offset: usize = self.byte_start as usize + (idx as u32 as usize * 2);
|
||||
get_u16(data, start_offset)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(clippy::comparison_chain)]
|
||||
// Looks for the element in the block. Returns the positions if found.
|
||||
fn binary_search(&self, data: &[u8], target: u16) -> Option<u16> {
|
||||
let mut size = self.num_vals as u16;
|
||||
let mut left = 0;
|
||||
let mut right = size;
|
||||
// TODO try different implem.
|
||||
// e.g. exponential search into binary search
|
||||
while left < right {
|
||||
let mid = left + size / 2;
|
||||
|
||||
// TODO do boundary check only once, and then use an
|
||||
// unsafe `value_at_idx`
|
||||
let mid_val = self.value_at_idx(data, mid);
|
||||
|
||||
if target > mid_val {
|
||||
left = mid + 1;
|
||||
} else if target < mid_val {
|
||||
right = mid;
|
||||
} else {
|
||||
return Some(mid);
|
||||
}
|
||||
|
||||
size = right - left;
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_u16(data: &[u8], byte_position: usize) -> u16 {
|
||||
let bytes: [u8; 2] = data[byte_position..byte_position + 2].try_into().unwrap();
|
||||
u16::from_le_bytes(bytes)
|
||||
}
|
||||
|
||||
const SERIALIZED_BLOCK_METADATA_SIZE: usize = 4;
|
||||
|
||||
fn deserialize_sparse_codec_block(data: &OwnedBytes) -> Vec<SparseCodecBlockVariant> {
|
||||
// The number of vals so far
|
||||
let mut offset = 0;
|
||||
let mut sparse_codec_blocks = Vec::new();
|
||||
let num_blocks = get_u16(data, data.len() - 2);
|
||||
let block_data_index_start =
|
||||
data.len() - 2 - num_blocks as usize * SERIALIZED_BLOCK_METADATA_SIZE;
|
||||
let mut byte_start = 0;
|
||||
for block_num in 0..num_blocks as usize {
|
||||
let block_data_index = block_data_index_start + SERIALIZED_BLOCK_METADATA_SIZE * block_num;
|
||||
let block_idx = get_u16(data, block_data_index);
|
||||
let num_vals = get_u16(data, block_data_index + 2) as u32 + 1;
|
||||
sparse_codec_blocks.resize(
|
||||
block_idx as usize,
|
||||
SparseCodecBlockVariant::Empty { offset },
|
||||
);
|
||||
|
||||
if is_sparse(num_vals) {
|
||||
let block = SparseBlock {
|
||||
num_vals,
|
||||
offset,
|
||||
byte_start,
|
||||
};
|
||||
sparse_codec_blocks.push(SparseCodecBlockVariant::Sparse(block));
|
||||
byte_start += 2 * num_vals;
|
||||
} else {
|
||||
let block = DenseBlock {
|
||||
offset,
|
||||
codec: DenseCodec::open(data.slice(byte_start as usize..data.len()).clone()),
|
||||
};
|
||||
sparse_codec_blocks.push(SparseCodecBlockVariant::Dense(block));
|
||||
// Dense blocks have a fixed size spanning ELEMENTS_PER_BLOCK.
|
||||
byte_start += NUM_BYTES_DENSE_BLOCK;
|
||||
}
|
||||
|
||||
offset += num_vals;
|
||||
}
|
||||
sparse_codec_blocks.push(SparseCodecBlockVariant::Empty { offset });
|
||||
sparse_codec_blocks
|
||||
}
|
||||
|
||||
/// Splits a value address into lower and upper 16bits.
|
||||
/// The lower 16 bits are the value in the block
|
||||
/// The upper 16 bits are the block index
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct ValueAddr {
|
||||
block_idx: u16,
|
||||
value_in_block: u16,
|
||||
}
|
||||
|
||||
/// Splits a idx into block index and value in the block
|
||||
#[inline]
|
||||
fn value_addr(idx: u32) -> ValueAddr {
|
||||
/// Static assert number elements per block this method expects
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
const _: () = assert!(ELEMENTS_PER_BLOCK == (1 << 16));
|
||||
|
||||
let value_in_block = idx as u16;
|
||||
let block_idx = (idx >> 16) as u16;
|
||||
ValueAddr {
|
||||
block_idx,
|
||||
value_in_block,
|
||||
}
|
||||
}
|
||||
|
||||
impl SparseCodec {
|
||||
/// Open the SparseCodec from OwnedBytes
|
||||
pub fn open(data: OwnedBytes) -> Self {
|
||||
let blocks = deserialize_sparse_codec_block(&data);
|
||||
Self { data, blocks }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Check if value at position is not null.
|
||||
pub fn exists(&self, idx: u32) -> bool {
|
||||
let value_addr = value_addr(idx);
|
||||
// There may be trailing nulls without data, those are not stored as blocks. It would be
|
||||
// possible to create empty blocks, but for that we would need to serialize the number of
|
||||
// values or pass them when opening
|
||||
|
||||
if let Some(block) = self.blocks.get(value_addr.block_idx as usize) {
|
||||
match block {
|
||||
SparseCodecBlockVariant::Empty { offset: _ } => false,
|
||||
SparseCodecBlockVariant::Dense(block) => {
|
||||
block.exists(value_addr.value_in_block as u32)
|
||||
}
|
||||
SparseCodecBlockVariant::Sparse(block) => block
|
||||
.binary_search(&self.data, value_addr.value_in_block)
|
||||
.is_some(),
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the number of non-null values in an index
|
||||
pub fn num_non_nulls(&self) -> u32 {
|
||||
self.blocks.last().map(|block| block.offset()).unwrap_or(0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Translate from the original index to the codec index.
|
||||
pub fn translate_to_codec_idx(&self, idx: u32) -> Option<u32> {
|
||||
let value_addr = value_addr(idx);
|
||||
let block = self.blocks.get(value_addr.block_idx as usize)?;
|
||||
|
||||
match block {
|
||||
SparseCodecBlockVariant::Empty { offset: _ } => None,
|
||||
SparseCodecBlockVariant::Dense(block) => block
|
||||
.translate_to_codec_idx(value_addr.value_in_block as u32)
|
||||
.map(|pos_in_block| pos_in_block + block.offset),
|
||||
SparseCodecBlockVariant::Sparse(block) => {
|
||||
let pos_in_block = block.binary_search(&self.data, value_addr.value_in_block);
|
||||
pos_in_block.map(|pos_in_block: u16| block.offset + pos_in_block as u32)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn find_block(&self, dense_idx: u32, mut block_pos: u32) -> u32 {
|
||||
loop {
|
||||
let offset = self.blocks[block_pos as usize].offset();
|
||||
if offset > dense_idx {
|
||||
return block_pos - 1;
|
||||
}
|
||||
block_pos += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Translate positions from the codec index to the original index.
|
||||
/// Correctness: Provided values must be in increasing values
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if any `idx` is greater than the max codec index.
|
||||
pub fn translate_codec_idx_to_original_idx<'a>(
|
||||
&'a self,
|
||||
iter: impl Iterator<Item = u32> + 'a,
|
||||
) -> impl Iterator<Item = u32> + 'a {
|
||||
let mut block_pos = 0u32;
|
||||
iter.group_by(move |codec_idx| {
|
||||
block_pos = self.find_block(*codec_idx, block_pos);
|
||||
block_pos
|
||||
})
|
||||
.flat_map(move |(block_pos, block_iter)| {
|
||||
let block_doc_idx_start = block_pos * ELEMENTS_PER_BLOCK;
|
||||
let block = &self.blocks[block_pos as usize];
|
||||
let offset = block.offset();
|
||||
let indexes_in_block_iter = block_iter.map(move |codec_idx| codec_idx - offset);
|
||||
match block {
|
||||
SparseCodecBlockVariant::Empty { offset: _ } => {
|
||||
panic!(
|
||||
"invalid input, cannot translate to original index. associated empty \
|
||||
block with dense idx. block_pos {}, idx_in_block {:?}",
|
||||
block_pos,
|
||||
indexes_in_block_iter.collect::<Vec<_>>()
|
||||
)
|
||||
}
|
||||
SparseCodecBlockVariant::Dense(dense) => {
|
||||
Box::new(dense.translate_codec_idx_to_original_idx_iter(indexes_in_block_iter))
|
||||
as Box<dyn Iterator<Item = u32>>
|
||||
}
|
||||
SparseCodecBlockVariant::Sparse(block) => {
|
||||
Box::new(indexes_in_block_iter.map(move |idx_in_block| {
|
||||
block.value_at_idx(&self.data, idx_in_block as u16) as u32
|
||||
}))
|
||||
}
|
||||
}
|
||||
.map(move |idx| idx + block_doc_idx_start)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_sparse(num_elem_in_block: u32) -> bool {
|
||||
num_elem_in_block < DENSE_BLOCK_THRESHOLD
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct BlockDataSerialized {
|
||||
block_idx: u16,
|
||||
num_vals: u32,
|
||||
}
|
||||
|
||||
/// Iterator over positions of set values.
|
||||
pub fn serialize_sparse_codec<W: Write>(
|
||||
mut iter: impl Iterator<Item = u32>,
|
||||
mut out: W,
|
||||
) -> io::Result<()> {
|
||||
let mut block_metadata: Vec<BlockDataSerialized> = Vec::new();
|
||||
let mut current_block = Vec::new();
|
||||
// This if-statement for the first element ensures that
|
||||
// `block_metadata` is not empty in the loop below.
|
||||
if let Some(idx) = iter.next() {
|
||||
let value_addr = value_addr(idx);
|
||||
block_metadata.push(BlockDataSerialized {
|
||||
block_idx: value_addr.block_idx,
|
||||
num_vals: 1,
|
||||
});
|
||||
current_block.push(value_addr.value_in_block);
|
||||
}
|
||||
let flush_block = |current_block: &mut Vec<u16>, out: &mut W| -> io::Result<()> {
|
||||
let is_sparse = is_sparse(current_block.len() as u32);
|
||||
if is_sparse {
|
||||
for val_in_block in current_block.iter() {
|
||||
out.write_all(val_in_block.to_le_bytes().as_ref())?;
|
||||
}
|
||||
} else {
|
||||
let mut bitset = BitSet::with_max_value(ELEMENTS_PER_BLOCK + 1);
|
||||
for val_in_block in current_block.iter() {
|
||||
bitset.insert(*val_in_block as u32);
|
||||
}
|
||||
|
||||
let iter = (0..ELEMENTS_PER_BLOCK).map(|idx| bitset.contains(idx));
|
||||
serialize_dense_codec(iter, out)?;
|
||||
}
|
||||
current_block.clear();
|
||||
Ok(())
|
||||
};
|
||||
for idx in iter {
|
||||
let value_addr = value_addr(idx);
|
||||
if block_metadata[block_metadata.len() - 1].block_idx == value_addr.block_idx {
|
||||
let last_idx_metadata = block_metadata.len() - 1;
|
||||
block_metadata[last_idx_metadata].num_vals += 1;
|
||||
} else {
|
||||
// flush prev block
|
||||
flush_block(&mut current_block, &mut out)?;
|
||||
|
||||
block_metadata.push(BlockDataSerialized {
|
||||
block_idx: value_addr.block_idx,
|
||||
num_vals: 1,
|
||||
});
|
||||
}
|
||||
current_block.push(value_addr.value_in_block);
|
||||
}
|
||||
// handle last block
|
||||
flush_block(&mut current_block, &mut out)?;
|
||||
|
||||
for block in &block_metadata {
|
||||
out.write_all(block.block_idx.to_le_bytes().as_ref())?;
|
||||
// We don't store empty blocks, therefore we can subtract 1.
|
||||
// This way we will be able to use u16 when the number of elements is 1 << 16 or u16::MAX+1
|
||||
out.write_all(((block.num_vals - 1) as u16).to_le_bytes().as_ref())?;
|
||||
}
|
||||
out.write_all((block_metadata.len() as u16).to_le_bytes().as_ref())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use itertools::Itertools;
|
||||
use proptest::prelude::{any, prop, *};
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn random_bitvec() -> BoxedStrategy<Vec<bool>> {
|
||||
prop_oneof![
|
||||
1 => prop::collection::vec(proptest::bool::weighted(1.0), 0..100),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.00), 0..(ELEMENTS_PER_BLOCK as usize * 3)), // empty blocks
|
||||
1 => prop::collection::vec(proptest::bool::weighted(1.00), 0..(ELEMENTS_PER_BLOCK as usize + 10)), // full block
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.01), 0..100),
|
||||
1 => prop::collection::vec(proptest::bool::weighted(0.01), 0..u16::MAX as usize),
|
||||
8 => vec![any::<bool>()],
|
||||
]
|
||||
.boxed()
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(50))]
|
||||
#[test]
|
||||
fn test_with_random_bitvecs(bitvec1 in random_bitvec(), bitvec2 in random_bitvec(), bitvec3 in random_bitvec()) {
|
||||
let mut bitvec = Vec::new();
|
||||
bitvec.extend_from_slice(&bitvec1);
|
||||
bitvec.extend_from_slice(&bitvec2);
|
||||
bitvec.extend_from_slice(&bitvec3);
|
||||
test_null_index(bitvec);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_test_one_block_false() {
|
||||
let mut iter = vec![false; ELEMENTS_PER_BLOCK as usize];
|
||||
iter.push(true);
|
||||
test_null_index(iter);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_test_one_block_true() {
|
||||
let mut iter = vec![true; ELEMENTS_PER_BLOCK as usize];
|
||||
iter.push(true);
|
||||
test_null_index(iter);
|
||||
}
|
||||
|
||||
fn test_null_index(data: Vec<bool>) {
|
||||
let mut out = vec![];
|
||||
|
||||
serialize_sparse_codec(
|
||||
data.iter()
|
||||
.cloned()
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let null_index = SparseCodec::open(OwnedBytes::new(out));
|
||||
|
||||
let orig_idx_with_value: Vec<u32> = data
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| **val)
|
||||
.map(|(pos, _val)| pos as u32)
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
null_index
|
||||
.translate_codec_idx_to_original_idx(0..orig_idx_with_value.len() as u32)
|
||||
.collect_vec(),
|
||||
orig_idx_with_value
|
||||
);
|
||||
|
||||
let step_size = (orig_idx_with_value.len() / 100).max(1);
|
||||
for (dense_idx, orig_idx) in orig_idx_with_value.iter().enumerate().step_by(step_size) {
|
||||
assert_eq!(
|
||||
null_index.translate_to_codec_idx(*orig_idx),
|
||||
Some(dense_idx as u32)
|
||||
);
|
||||
}
|
||||
|
||||
// 100 samples
|
||||
let step_size = (data.len() / 100).max(1);
|
||||
for (pos, value) in data.iter().enumerate().step_by(step_size) {
|
||||
assert_eq!(null_index.exists(pos as u32), *value);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_test_translation() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_sparse_codec(
|
||||
iter.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let null_index = SparseCodec::open(OwnedBytes::new(out));
|
||||
|
||||
assert_eq!(
|
||||
null_index
|
||||
.translate_codec_idx_to_original_idx(0..2)
|
||||
.collect_vec(),
|
||||
vec![0, 2]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_translate() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_sparse_codec(
|
||||
iter.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let null_index = SparseCodec::open(OwnedBytes::new(out));
|
||||
assert_eq!(null_index.translate_to_codec_idx(0), Some(0));
|
||||
assert_eq!(null_index.translate_to_codec_idx(2), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_test_small() {
|
||||
let mut out = vec![];
|
||||
|
||||
let iter = ([true, false, true, false]).iter().cloned();
|
||||
serialize_sparse_codec(
|
||||
iter.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let null_index = SparseCodec::open(OwnedBytes::new(out));
|
||||
assert!(null_index.exists(0));
|
||||
assert!(!null_index.exists(1));
|
||||
assert!(null_index.exists(2));
|
||||
assert!(!null_index.exists(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_codec_test_large() {
|
||||
let mut docs = vec![];
|
||||
docs.extend((0..ELEMENTS_PER_BLOCK).map(|_idx| false));
|
||||
docs.extend((0..=1).map(|_idx| true));
|
||||
|
||||
let iter = docs.iter().cloned();
|
||||
let mut out = vec![];
|
||||
serialize_sparse_codec(
|
||||
iter.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
let null_index = SparseCodec::open(OwnedBytes::new(out));
|
||||
assert!(!null_index.exists(0));
|
||||
assert!(!null_index.exists(100));
|
||||
assert!(!null_index.exists(ELEMENTS_PER_BLOCK - 1));
|
||||
assert!(null_index.exists(ELEMENTS_PER_BLOCK));
|
||||
assert!(null_index.exists(ELEMENTS_PER_BLOCK + 1));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::Bencher;
|
||||
|
||||
use super::*;
|
||||
|
||||
const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||
fn gen_bools(fill_ratio: f64) -> SparseCodec {
|
||||
let mut out = Vec::new();
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
serialize_sparse_codec(
|
||||
(0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _val)| pos as u32),
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let codec = SparseCodec::open(OwnedBytes::new(out));
|
||||
codec
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
start: u32,
|
||||
end: u32,
|
||||
avg_step_size: u32,
|
||||
avg_deviation: u32,
|
||||
) -> impl Iterator<Item = u32> {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let mut current = start;
|
||||
std::iter::from_fn(move || {
|
||||
current += rng.gen_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
|
||||
if current >= end {
|
||||
None
|
||||
} else {
|
||||
Some(current)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent as f32 / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
}
|
||||
|
||||
fn walk_over_data(codec: &SparseCodec, avg_step_size: u32) -> Option<u32> {
|
||||
walk_over_data_from_positions(
|
||||
codec,
|
||||
random_range_iterator(0, TOTAL_NUM_VALUES, avg_step_size, 0),
|
||||
)
|
||||
}
|
||||
|
||||
fn walk_over_data_from_positions(
|
||||
codec: &SparseCodec,
|
||||
positions: impl Iterator<Item = u32>,
|
||||
) -> Option<u32> {
|
||||
let mut dense_idx: Option<u32> = None;
|
||||
for idx in positions {
|
||||
dense_idx = dense_idx.or(codec.translate_to_codec_idx(idx));
|
||||
}
|
||||
dense_idx
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_1percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_5percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_5percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.05f64);
|
||||
bench.iter(|| walk_over_data(&codec, 1000));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_1percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_10percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_full_scan_90percent_filled(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data_from_positions(&codec, 0..TOTAL_NUM_VALUES));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_10percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.1f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_50percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.5f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_orig_to_codec_90percent_filled_1percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
bench.iter(|| walk_over_data(&codec, 100));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(n_percent_step_iterator(0.005, num_non_nulls))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_10percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(n_percent_step_iterator(10.0, num_non_nulls))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_1percent_filled_full_scan(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.01f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(0..num_vals)
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_90percent_filled_0comma005percent_hit(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.90f64);
|
||||
let num_non_nulls = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(n_percent_step_iterator(0.005, num_non_nulls))
|
||||
.last()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_translate_codec_to_orig_90percent_filled_full_scan(bench: &mut Bencher) {
|
||||
let codec = gen_bools(0.9f64);
|
||||
let num_vals = codec.num_non_nulls();
|
||||
bench.iter(|| {
|
||||
codec
|
||||
.translate_codec_idx_to_original_idx(0..num_vals)
|
||||
.last()
|
||||
});
|
||||
}
|
||||
}
|
||||
145
fastfield_codecs/src/null_index_footer.rs
Normal file
145
fastfield_codecs/src/null_index_footer.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
use std::io::{self, Write};
|
||||
use std::ops::Range;
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt};
|
||||
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||
pub(crate) enum FastFieldCardinality {
|
||||
Single = 1,
|
||||
Multi = 2,
|
||||
}
|
||||
|
||||
impl BinarySerializable for FastFieldCardinality {
|
||||
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
self.to_code().serialize(wrt)
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let code = u8::deserialize(reader)?;
|
||||
let codec_type: Self = Self::from_code(code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||
Ok(codec_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldCardinality {
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||
match code {
|
||||
1 => Some(Self::Single),
|
||||
2 => Some(Self::Multi),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum NullIndexCodec {
|
||||
Full = 1,
|
||||
}
|
||||
|
||||
impl BinarySerializable for NullIndexCodec {
|
||||
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
self.to_code().serialize(wrt)
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let code = u8::deserialize(reader)?;
|
||||
let codec_type: Self = Self::from_code(code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||
Ok(codec_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl NullIndexCodec {
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||
match code {
|
||||
1 => Some(Self::Full),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub(crate) struct NullIndexFooter {
|
||||
pub(crate) cardinality: FastFieldCardinality,
|
||||
pub(crate) null_index_codec: NullIndexCodec,
|
||||
// Unused for NullIndexCodec::Full
|
||||
pub(crate) null_index_byte_range: Range<u64>,
|
||||
}
|
||||
|
||||
impl BinarySerializable for NullIndexFooter {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.cardinality.serialize(writer)?;
|
||||
self.null_index_codec.serialize(writer)?;
|
||||
VInt(self.null_index_byte_range.start).serialize(writer)?;
|
||||
VInt(self.null_index_byte_range.end - self.null_index_byte_range.start)
|
||||
.serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let cardinality = FastFieldCardinality::deserialize(reader)?;
|
||||
let null_index_codec = NullIndexCodec::deserialize(reader)?;
|
||||
let null_index_byte_range_start = VInt::deserialize(reader)?.0;
|
||||
let null_index_byte_range_end = VInt::deserialize(reader)?.0 + null_index_byte_range_start;
|
||||
Ok(Self {
|
||||
cardinality,
|
||||
null_index_codec,
|
||||
null_index_byte_range: null_index_byte_range_start..null_index_byte_range_end,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn append_null_index_footer(
|
||||
output: &mut impl io::Write,
|
||||
null_index_footer: NullIndexFooter,
|
||||
) -> io::Result<()> {
|
||||
let mut counting_write = CountingWriter::wrap(output);
|
||||
null_index_footer.serialize(&mut counting_write)?;
|
||||
let footer_payload_len = counting_write.written_bytes();
|
||||
BinarySerializable::serialize(&(footer_payload_len as u16), &mut counting_write)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn read_null_index_footer(
|
||||
data: OwnedBytes,
|
||||
) -> io::Result<(OwnedBytes, NullIndexFooter)> {
|
||||
let (data, null_footer_length_bytes) = data.rsplit(2);
|
||||
|
||||
let footer_length = u16::deserialize(&mut null_footer_length_bytes.as_slice())?;
|
||||
let (data, null_index_footer_bytes) = data.rsplit(footer_length as usize);
|
||||
let null_index_footer = NullIndexFooter::deserialize(&mut null_index_footer_bytes.as_ref())?;
|
||||
|
||||
Ok((data, null_index_footer))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn null_index_footer_deser_test() {
|
||||
let null_index_footer = NullIndexFooter {
|
||||
cardinality: FastFieldCardinality::Single,
|
||||
null_index_codec: NullIndexCodec::Full,
|
||||
null_index_byte_range: 100..120,
|
||||
};
|
||||
|
||||
let mut out = vec![];
|
||||
null_index_footer.serialize(&mut out).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
null_index_footer,
|
||||
NullIndexFooter::deserialize(&mut &out[..]).unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -17,22 +17,25 @@
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::io;
|
||||
use std::num::NonZeroU64;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use common::{BinarySerializable, VInt};
|
||||
use common::{BinarySerializable, OwnedBytes, VInt};
|
||||
use log::warn;
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
use crate::bitpacked::BitpackedCodec;
|
||||
use crate::blockwise_linear::BlockwiseLinearCodec;
|
||||
use crate::compact_space::CompactSpaceCompressor;
|
||||
use crate::format_version::append_format_version;
|
||||
use crate::linear::LinearCodec;
|
||||
use crate::monotonic_mapping::{
|
||||
StrictlyMonotonicFn, StrictlyMonotonicMappingToInternal,
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
use crate::null_index_footer::{
|
||||
append_null_index_footer, FastFieldCardinality, NullIndexCodec, NullIndexFooter,
|
||||
};
|
||||
use crate::{
|
||||
monotonic_map_column, Column, FastFieldCodec, FastFieldCodecType, MonotonicallyMappableToU64,
|
||||
U128FastFieldCodecType, VecColumn, ALL_CODEC_TYPES,
|
||||
@@ -164,7 +167,7 @@ impl BinarySerializable for Header {
|
||||
|
||||
/// Return estimated compression for given codec in the value range [0.0..1.0], where 1.0 means no
|
||||
/// compression.
|
||||
pub fn estimate<T: MonotonicallyMappableToU64>(
|
||||
pub fn estimate<T: MonotonicallyMappableToU64 + fmt::Debug>(
|
||||
typed_column: impl Column<T>,
|
||||
codec_type: FastFieldCodecType,
|
||||
) -> Option<f32> {
|
||||
@@ -189,6 +192,69 @@ pub fn serialize_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
|
||||
iter_gen: F,
|
||||
num_vals: u32,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
serialize_u128_new(ValueIndexInfo::default(), iter_gen, num_vals, output)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub enum ValueIndexInfo<'a> {
|
||||
MultiValue(Box<dyn MultiValueIndexInfo + 'a>),
|
||||
SingleValue(Box<dyn SingleValueIndexInfo + 'a>),
|
||||
}
|
||||
|
||||
// TODO Remove me
|
||||
impl Default for ValueIndexInfo<'static> {
|
||||
fn default() -> Self {
|
||||
struct Dummy {}
|
||||
impl SingleValueIndexInfo for Dummy {
|
||||
fn num_vals(&self) -> u32 {
|
||||
todo!()
|
||||
}
|
||||
fn num_non_nulls(&self) -> u32 {
|
||||
todo!()
|
||||
}
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u32>> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
Self::SingleValue(Box::new(Dummy {}))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ValueIndexInfo<'a> {
|
||||
fn get_cardinality(&self) -> FastFieldCardinality {
|
||||
match self {
|
||||
ValueIndexInfo::MultiValue(_) => FastFieldCardinality::Multi,
|
||||
ValueIndexInfo::SingleValue(_) => FastFieldCardinality::Single,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait MultiValueIndexInfo {
|
||||
/// The number of docs in the column.
|
||||
fn num_docs(&self) -> u32;
|
||||
/// The number of values in the column.
|
||||
fn num_vals(&self) -> u32;
|
||||
/// Return the start index of the values for each doc
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_>;
|
||||
}
|
||||
|
||||
pub trait SingleValueIndexInfo {
|
||||
/// The number of values including nulls in the column.
|
||||
fn num_vals(&self) -> u32;
|
||||
/// The number of non-null values in the column.
|
||||
fn num_non_nulls(&self) -> u32;
|
||||
/// Return a iterator of the positions of docs with a value
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_>;
|
||||
}
|
||||
|
||||
/// Serializes u128 values with the compact space codec.
|
||||
pub fn serialize_u128_new<F: Fn() -> I, I: Iterator<Item = u128>>(
|
||||
value_index: ValueIndexInfo,
|
||||
iter_gen: F,
|
||||
num_vals: u32,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let header = U128Header {
|
||||
num_vals,
|
||||
@@ -198,11 +264,29 @@ pub fn serialize_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
|
||||
let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
|
||||
compressor.compress_into(iter_gen(), output).unwrap();
|
||||
|
||||
let null_index_footer = NullIndexFooter {
|
||||
cardinality: value_index.get_cardinality(),
|
||||
null_index_codec: NullIndexCodec::Full,
|
||||
null_index_byte_range: 0..0,
|
||||
};
|
||||
append_null_index_footer(output, null_index_footer)?;
|
||||
append_format_version(output)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serializes the column with the codec with the best estimate on the data.
|
||||
pub fn serialize<T: MonotonicallyMappableToU64>(
|
||||
pub fn serialize<T: MonotonicallyMappableToU64 + fmt::Debug>(
|
||||
typed_column: impl Column<T>,
|
||||
output: &mut impl io::Write,
|
||||
codecs: &[FastFieldCodecType],
|
||||
) -> io::Result<()> {
|
||||
serialize_new(ValueIndexInfo::default(), typed_column, output, codecs)
|
||||
}
|
||||
|
||||
/// Serializes the column with the codec with the best estimate on the data.
|
||||
pub fn serialize_new<T: MonotonicallyMappableToU64 + fmt::Debug>(
|
||||
value_index: ValueIndexInfo,
|
||||
typed_column: impl Column<T>,
|
||||
output: &mut impl io::Write,
|
||||
codecs: &[FastFieldCodecType],
|
||||
@@ -221,6 +305,15 @@ pub fn serialize<T: MonotonicallyMappableToU64>(
|
||||
let normalized_column = header.normalize_column(column);
|
||||
assert_eq!(normalized_column.min_value(), 0u64);
|
||||
serialize_given_codec(normalized_column, header.codec_type, output)?;
|
||||
|
||||
let null_index_footer = NullIndexFooter {
|
||||
cardinality: value_index.get_cardinality(),
|
||||
null_index_codec: NullIndexCodec::Full,
|
||||
null_index_byte_range: 0..0,
|
||||
};
|
||||
append_null_index_footer(output, null_index_footer)?;
|
||||
append_format_version(output)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -273,7 +366,7 @@ fn serialize_given_codec(
|
||||
}
|
||||
|
||||
/// Helper function to serialize a column (autodetect from all codecs) and then open it
|
||||
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
||||
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default + fmt::Debug>(
|
||||
column: &[T],
|
||||
) -> Arc<dyn Column<T>> {
|
||||
let mut buffer = Vec::new();
|
||||
@@ -310,7 +403,7 @@ mod tests {
|
||||
let col = VecColumn::from(&[false, true][..]);
|
||||
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 5 + 8);
|
||||
assert_eq!(buffer.len(), 3 + 5 + 8 + 4 + 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -319,7 +412,7 @@ mod tests {
|
||||
let col = VecColumn::from(&[true][..]);
|
||||
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
// 5 bytes of header, 0 bytes of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 5 + 7);
|
||||
assert_eq!(buffer.len(), 3 + 5 + 7 + 4 + 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -329,6 +422,6 @@ mod tests {
|
||||
let col = VecColumn::from(&vals[..]);
|
||||
serialize(col, &mut buffer, &[FastFieldCodecType::Bitpacked]).unwrap();
|
||||
// Values are stored over 3 bits.
|
||||
assert_eq!(buffer.len(), 7 + (3 * 80 / 8) + 7);
|
||||
assert_eq!(buffer.len(), 3 + 7 + (3 * 80 / 8) + 7 + 4 + 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
[package]
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
name = "ownedbytes"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
description = "Expose data as static slice"
|
||||
license = "MIT"
|
||||
documentation = "https://docs.rs/ownedbytes/"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::ops::{Deref, Range};
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io, mem};
|
||||
|
||||
use stable_deref_trait::StableDeref;
|
||||
pub use stable_deref_trait::StableDeref;
|
||||
|
||||
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
||||
/// this data as a slice.
|
||||
@@ -80,6 +80,21 @@ impl OwnedBytes {
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
|
||||
///
|
||||
/// Right will hold `split_len` bytes.
|
||||
///
|
||||
/// This operation is cheap and does not require to copy any memory.
|
||||
/// On the other hand, both `left` and `right` retain a handle over
|
||||
/// the entire slice of memory. In other words, the memory will only
|
||||
/// be released when both left and right are dropped.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn rsplit(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
|
||||
let data_len = self.data.len();
|
||||
self.split(data_len - split_len)
|
||||
}
|
||||
|
||||
/// Splits the right part of the `OwnedBytes` at the given offset.
|
||||
///
|
||||
/// `self` is truncated to `split_len`, left with the remaining bytes.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.18.0"
|
||||
version = "0.19.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
|
||||
@@ -11,7 +11,7 @@ use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
||||
use super::metric::{AverageAggregation, StatsAggregation};
|
||||
use super::segment_agg_result::BucketCount;
|
||||
use super::VecWithNames;
|
||||
use crate::fastfield::{type_and_cardinality, FastType, MultiValuedFastFieldReader};
|
||||
use crate::fastfield::{type_and_cardinality, MultiValuedFastFieldReader};
|
||||
use crate::schema::{Cardinality, Type};
|
||||
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
|
||||
|
||||
@@ -194,13 +194,7 @@ fn get_ff_reader_and_validate(
|
||||
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
|
||||
let field_type = reader.schema().get_field_entry(field).field_type();
|
||||
|
||||
if let Some((ff_type, field_cardinality)) = type_and_cardinality(field_type) {
|
||||
if ff_type == FastType::Date {
|
||||
return Err(TantivyError::InvalidArgument(
|
||||
"Unsupported field type date in aggregation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some((_ff_type, field_cardinality)) = type_and_cardinality(field_type) {
|
||||
if cardinality != field_cardinality {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Invalid field cardinality on field {} expected {:?}, but got {:?}",
|
||||
|
||||
@@ -12,6 +12,7 @@ use super::bucket::GetDocCount;
|
||||
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
|
||||
use super::metric::{SingleMetricResult, Stats};
|
||||
use super::Key;
|
||||
use crate::schema::Schema;
|
||||
use crate::TantivyError;
|
||||
|
||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@@ -129,9 +130,12 @@ pub enum BucketResult {
|
||||
}
|
||||
|
||||
impl BucketResult {
|
||||
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
|
||||
pub(crate) fn empty_from_req(
|
||||
req: &BucketAggregationInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<Self> {
|
||||
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
||||
empty_bucket.into_final_bucket_result(req)
|
||||
empty_bucket.into_final_bucket_result(req, schema)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,6 +178,9 @@ pub enum BucketEntries<T> {
|
||||
/// ```
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BucketEntry {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The string representation of the bucket.
|
||||
pub key_as_string: Option<String>,
|
||||
/// The identifier of the bucket.
|
||||
pub key: Key,
|
||||
/// Number of documents in the bucket.
|
||||
@@ -238,4 +245,10 @@ pub struct RangeBucketEntry {
|
||||
/// The to range of the bucket. Equals `f64::MAX` when `None`.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub to: Option<f64>,
|
||||
/// The optional string representation for the `from` range.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub from_as_string: Option<String>,
|
||||
/// The optional string representation for the `to` range.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub to_as_string: Option<String>,
|
||||
}
|
||||
|
||||
@@ -10,12 +10,12 @@ use crate::aggregation::agg_req_with_accessor::{
|
||||
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
||||
};
|
||||
use crate::aggregation::agg_result::BucketEntry;
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::schema::Type;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, format_date};
|
||||
use crate::schema::{Schema, Type};
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
||||
@@ -206,6 +206,7 @@ pub struct SegmentHistogramCollector {
|
||||
field_type: Type,
|
||||
interval: f64,
|
||||
offset: f64,
|
||||
min_doc_count: u64,
|
||||
first_bucket_num: i64,
|
||||
bounds: HistogramBounds,
|
||||
}
|
||||
@@ -215,6 +216,30 @@ impl SegmentHistogramCollector {
|
||||
self,
|
||||
agg_with_accessor: &BucketAggregationWithAccessor,
|
||||
) -> crate::Result<IntermediateBucketResult> {
|
||||
// Compute the number of buckets to validate against max num buckets
|
||||
// Note: We use min_doc_count here, but it's only an lowerbound here, since were are on the
|
||||
// intermediate level and after merging the number of documents of a bucket could exceed
|
||||
// `min_doc_count`.
|
||||
{
|
||||
let cut_off_buckets_front = self
|
||||
.buckets
|
||||
.iter()
|
||||
.take_while(|bucket| bucket.doc_count <= self.min_doc_count)
|
||||
.count();
|
||||
let cut_off_buckets_back = self.buckets[cut_off_buckets_front..]
|
||||
.iter()
|
||||
.rev()
|
||||
.take_while(|bucket| bucket.doc_count <= self.min_doc_count)
|
||||
.count();
|
||||
let estimate_num_buckets =
|
||||
self.buckets.len() - cut_off_buckets_front - cut_off_buckets_back;
|
||||
|
||||
agg_with_accessor
|
||||
.bucket_count
|
||||
.add_count(estimate_num_buckets as u32);
|
||||
agg_with_accessor.bucket_count.validate_bucket_count()?;
|
||||
}
|
||||
|
||||
let mut buckets = Vec::with_capacity(
|
||||
self.buckets
|
||||
.iter()
|
||||
@@ -251,11 +276,6 @@ impl SegmentHistogramCollector {
|
||||
);
|
||||
};
|
||||
|
||||
agg_with_accessor
|
||||
.bucket_count
|
||||
.add_count(buckets.len() as u32);
|
||||
agg_with_accessor.bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(IntermediateBucketResult::Histogram { buckets })
|
||||
}
|
||||
|
||||
@@ -308,6 +328,7 @@ impl SegmentHistogramCollector {
|
||||
first_bucket_num,
|
||||
bounds,
|
||||
sub_aggregations,
|
||||
min_doc_count: req.min_doc_count(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -380,7 +401,7 @@ impl SegmentHistogramCollector {
|
||||
|
||||
debug_assert_eq!(
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset) as f64
|
||||
get_bucket_val(val, self.interval, self.offset)
|
||||
);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
@@ -407,7 +428,7 @@ impl SegmentHistogramCollector {
|
||||
if bounds.contains(val) {
|
||||
debug_assert_eq!(
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset) as f64
|
||||
get_bucket_val(val, self.interval, self.offset)
|
||||
);
|
||||
|
||||
self.increment_bucket(bucket_pos, doc, bucket_with_accessor)?;
|
||||
@@ -451,6 +472,7 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<Vec<BucketEntry>> {
|
||||
// Generate the full list of buckets without gaps.
|
||||
//
|
||||
@@ -491,7 +513,9 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
sub_aggregation: empty_sub_aggregation.clone(),
|
||||
},
|
||||
})
|
||||
.map(|intermediate_bucket| intermediate_bucket.into_final_bucket_entry(sub_aggregation))
|
||||
.map(|intermediate_bucket| {
|
||||
intermediate_bucket.into_final_bucket_entry(sub_aggregation, schema)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()
|
||||
}
|
||||
|
||||
@@ -500,20 +524,43 @@ pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<Vec<BucketEntry>> {
|
||||
if histogram_req.min_doc_count() == 0 {
|
||||
let mut buckets = if histogram_req.min_doc_count() == 0 {
|
||||
// With min_doc_count != 0, we may need to add buckets, so that there are no
|
||||
// gaps, since intermediate result does not contain empty buckets (filtered to
|
||||
// reduce serialization size).
|
||||
|
||||
intermediate_buckets_to_final_buckets_fill_gaps(buckets, histogram_req, sub_aggregation)
|
||||
intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
buckets,
|
||||
histogram_req,
|
||||
sub_aggregation,
|
||||
schema,
|
||||
)?
|
||||
} else {
|
||||
buckets
|
||||
.into_iter()
|
||||
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count())
|
||||
.map(|histogram_bucket| histogram_bucket.into_final_bucket_entry(sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()
|
||||
.map(|histogram_bucket| {
|
||||
histogram_bucket.into_final_bucket_entry(sub_aggregation, schema)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?
|
||||
};
|
||||
|
||||
// If we have a date type on the histogram buckets, we add the `key_as_string` field as rfc339
|
||||
let field = schema
|
||||
.get_field(&histogram_req.field)
|
||||
.ok_or_else(|| TantivyError::FieldNotFound(histogram_req.field.to_string()))?;
|
||||
if schema.get_field_entry(field).field_type().is_date() {
|
||||
for bucket in buckets.iter_mut() {
|
||||
if let crate::aggregation::Key::F64(val) = bucket.key {
|
||||
let key_as_string = format_date(val as i64)?;
|
||||
bucket.key_as_string = Some(key_as_string);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(buckets)
|
||||
}
|
||||
|
||||
/// Applies req extended_bounds/hard_bounds on the min_max value
|
||||
@@ -1372,6 +1419,63 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_date_test_single_segment() -> crate::Result<()> {
|
||||
histogram_date_test_with_opt(true)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_date_test_multi_segment() -> crate::Result<()> {
|
||||
histogram_date_test_with_opt(false)
|
||||
}
|
||||
|
||||
fn histogram_date_test_with_opt(merge_segments: bool) -> crate::Result<()> {
|
||||
let index = get_test_index_2_segments(merge_segments)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "date".to_string(),
|
||||
interval: 86400000000.0, // one day in microseconds
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_res = exec_request(agg_req, &index)?;
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
|
||||
assert_eq!(res["histogram"]["buckets"][0]["key"], 1546300800000000.0);
|
||||
assert_eq!(
|
||||
res["histogram"]["buckets"][0]["key_as_string"],
|
||||
"2019-01-01T00:00:00Z"
|
||||
);
|
||||
assert_eq!(res["histogram"]["buckets"][0]["doc_count"], 1);
|
||||
|
||||
assert_eq!(res["histogram"]["buckets"][1]["key"], 1546387200000000.0);
|
||||
assert_eq!(
|
||||
res["histogram"]["buckets"][1]["key_as_string"],
|
||||
"2019-01-02T00:00:00Z"
|
||||
);
|
||||
|
||||
assert_eq!(res["histogram"]["buckets"][1]["doc_count"], 5);
|
||||
|
||||
assert_eq!(res["histogram"]["buckets"][2]["key"], 1546473600000000.0);
|
||||
assert_eq!(
|
||||
res["histogram"]["buckets"][2]["key_as_string"],
|
||||
"2019-01-03T00:00:00Z"
|
||||
);
|
||||
|
||||
assert_eq!(res["histogram"]["buckets"][3], Value::Null);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_invalid_request() -> crate::Result<()> {
|
||||
let index = get_test_index_2_segments(true)?;
|
||||
@@ -1438,4 +1542,36 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_test_max_buckets_segments() -> crate::Result<()> {
|
||||
let values = vec![0.0, 70000.0];
|
||||
|
||||
let index = get_test_index_from_values(true, &values)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_interval".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 1.0,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request(agg_req, &index);
|
||||
|
||||
assert_eq!(
|
||||
res.unwrap_err().to_string(),
|
||||
"An invalid argument was passed: 'Aborting aggregation because too many buckets were \
|
||||
created'"
|
||||
.to_string()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -11,7 +12,9 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey};
|
||||
use crate::aggregation::{
|
||||
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
|
||||
};
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
@@ -181,7 +184,7 @@ impl SegmentRangeCollector {
|
||||
.into_iter()
|
||||
.map(move |range_bucket| {
|
||||
Ok((
|
||||
range_to_string(&range_bucket.range, &field_type),
|
||||
range_to_string(&range_bucket.range, &field_type)?,
|
||||
range_bucket
|
||||
.bucket
|
||||
.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
||||
@@ -209,8 +212,8 @@ impl SegmentRangeCollector {
|
||||
let key = range
|
||||
.key
|
||||
.clone()
|
||||
.map(Key::Str)
|
||||
.unwrap_or_else(|| range_to_key(&range.range, &field_type));
|
||||
.map(|key| Ok(Key::Str(key)))
|
||||
.unwrap_or_else(|| range_to_key(&range.range, &field_type))?;
|
||||
let to = if range.range.end == u64::MAX {
|
||||
None
|
||||
} else {
|
||||
@@ -228,6 +231,7 @@ impl SegmentRangeCollector {
|
||||
sub_aggregation,
|
||||
)?)
|
||||
};
|
||||
|
||||
Ok(SegmentRangeAndBucketEntry {
|
||||
range: range.range.clone(),
|
||||
bucket: SegmentRangeBucketEntry {
|
||||
@@ -402,34 +406,45 @@ fn extend_validate_ranges(
|
||||
Ok(converted_buckets)
|
||||
}
|
||||
|
||||
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
||||
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> crate::Result<String> {
|
||||
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
||||
// it should be rendered as "*-0" and not "*-*"
|
||||
let to_str = |val: u64, is_start: bool| {
|
||||
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
|
||||
"*".to_string()
|
||||
Ok("*".to_string())
|
||||
} else if *field_type == Type::Date {
|
||||
let val = i64::from_u64(val);
|
||||
format_date(val)
|
||||
} else {
|
||||
f64_from_fastfield_u64(val, field_type).to_string()
|
||||
Ok(f64_from_fastfield_u64(val, field_type).to_string())
|
||||
}
|
||||
};
|
||||
|
||||
format!("{}-{}", to_str(range.start, true), to_str(range.end, false))
|
||||
Ok(format!(
|
||||
"{}-{}",
|
||||
to_str(range.start, true)?,
|
||||
to_str(range.end, false)?
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
|
||||
Key::Str(range_to_string(range, field_type))
|
||||
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> crate::Result<Key> {
|
||||
Ok(Key::Str(range_to_string(range, field_type)?))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req::{
|
||||
Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
|
||||
};
|
||||
use crate::aggregation::tests::{exec_request_with_query, get_test_index_with_num_docs};
|
||||
use crate::aggregation::tests::{
|
||||
exec_request, exec_request_with_query, get_test_index_2_segments,
|
||||
get_test_index_with_num_docs,
|
||||
};
|
||||
|
||||
pub fn get_collector_from_ranges(
|
||||
ranges: Vec<RangeAggregationRange>,
|
||||
@@ -567,6 +582,77 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_date_test_single_segment() -> crate::Result<()> {
|
||||
range_date_test_with_opt(true)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_date_test_multi_segment() -> crate::Result<()> {
|
||||
range_date_test_with_opt(false)
|
||||
}
|
||||
|
||||
fn range_date_test_with_opt(merge_segments: bool) -> crate::Result<()> {
|
||||
let index = get_test_index_2_segments(merge_segments)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"date_ranges".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "date".to_string(),
|
||||
ranges: vec![
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: None,
|
||||
to: Some(1546300800000000.0f64),
|
||||
},
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: Some(1546300800000000.0f64),
|
||||
to: Some(1546387200000000.0f64),
|
||||
},
|
||||
],
|
||||
keyed: false,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_res = exec_request(agg_req, &index)?;
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][0]["from_as_string"],
|
||||
Value::Null
|
||||
);
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][0]["key"],
|
||||
"*-2019-01-01T00:00:00Z"
|
||||
);
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][1]["from_as_string"],
|
||||
"2019-01-01T00:00:00Z"
|
||||
);
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][1]["to_as_string"],
|
||||
"2019-01-02T00:00:00Z"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][2]["from_as_string"],
|
||||
"2019-01-02T00:00:00Z"
|
||||
);
|
||||
assert_eq!(
|
||||
res["date_ranges"]["buckets"][2]["to_as_string"],
|
||||
Value::Null
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_custom_key_keyed_buckets_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
@@ -7,6 +7,7 @@ use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::schema::Schema;
|
||||
use crate::{SegmentReader, TantivyError};
|
||||
|
||||
/// The default max bucket count, before the aggregation fails.
|
||||
@@ -16,6 +17,7 @@ pub const MAX_BUCKET_COUNT: u32 = 65000;
|
||||
///
|
||||
/// The collector collects all aggregations by the underlying aggregation request.
|
||||
pub struct AggregationCollector {
|
||||
schema: Schema,
|
||||
agg: Aggregations,
|
||||
max_bucket_count: u32,
|
||||
}
|
||||
@@ -25,8 +27,9 @@ impl AggregationCollector {
|
||||
///
|
||||
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
|
||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>, schema: Schema) -> Self {
|
||||
Self {
|
||||
schema,
|
||||
agg,
|
||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||
}
|
||||
@@ -113,7 +116,7 @@ impl Collector for AggregationCollector {
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let res = merge_fruits(segment_fruits)?;
|
||||
res.into_final_bucket_result(self.agg.clone())
|
||||
res.into_final_bucket_result(self.agg.clone(), &self.schema)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
18
src/aggregation/date.rs
Normal file
18
src/aggregation/date.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use crate::TantivyError;
|
||||
|
||||
pub(crate) fn format_date(val: i64) -> crate::Result<String> {
|
||||
let datetime =
|
||||
OffsetDateTime::from_unix_timestamp_nanos(1_000 * (val as i128)).map_err(|err| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"Could not convert {:?} to OffsetDateTime, err {:?}",
|
||||
val, err
|
||||
))
|
||||
})?;
|
||||
let key_as_string = datetime
|
||||
.format(&Rfc3339)
|
||||
.map_err(|_err| TantivyError::InvalidArgument("Could not serialize date".to_string()))?;
|
||||
Ok(key_as_string)
|
||||
}
|
||||
@@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::agg_req::{
|
||||
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
|
||||
MetricAggregation,
|
||||
MetricAggregation, RangeAggregation,
|
||||
};
|
||||
use super::agg_result::{AggregationResult, BucketResult, RangeBucketEntry};
|
||||
use super::bucket::{
|
||||
@@ -19,9 +19,11 @@ use super::bucket::{
|
||||
};
|
||||
use super::metric::{IntermediateAverage, IntermediateStats};
|
||||
use super::segment_agg_result::SegmentMetricResultCollector;
|
||||
use super::{Key, SerializedKey, VecWithNames};
|
||||
use super::{format_date, Key, SerializedKey, VecWithNames};
|
||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
||||
use crate::aggregation::bucket::TermsAggregationInternal;
|
||||
use crate::schema::Schema;
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
||||
/// intermediate results.
|
||||
@@ -35,8 +37,12 @@ pub struct IntermediateAggregationResults {
|
||||
|
||||
impl IntermediateAggregationResults {
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
|
||||
self.into_final_bucket_result_internal(&(req.into()))
|
||||
pub fn into_final_bucket_result(
|
||||
self,
|
||||
req: Aggregations,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<AggregationResults> {
|
||||
self.into_final_bucket_result_internal(&(req.into()), schema)
|
||||
}
|
||||
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
@@ -46,6 +52,7 @@ impl IntermediateAggregationResults {
|
||||
pub(crate) fn into_final_bucket_result_internal(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<AggregationResults> {
|
||||
// Important assumption:
|
||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||
@@ -53,11 +60,11 @@ impl IntermediateAggregationResults {
|
||||
let mut results: FxHashMap<String, AggregationResult> = FxHashMap::default();
|
||||
|
||||
if let Some(buckets) = self.buckets {
|
||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets, schema)?
|
||||
} else {
|
||||
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||
// format is constant
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets, schema)?
|
||||
};
|
||||
|
||||
if let Some(metrics) = self.metrics {
|
||||
@@ -158,10 +165,12 @@ fn add_empty_final_metrics_to_result(
|
||||
fn add_empty_final_buckets_to_result(
|
||||
results: &mut FxHashMap<String, AggregationResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<()> {
|
||||
let requested_buckets = req_buckets.iter();
|
||||
for (key, req) in requested_buckets {
|
||||
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
||||
let empty_bucket =
|
||||
AggregationResult::BucketResult(BucketResult::empty_from_req(req, schema)?);
|
||||
results.insert(key.to_string(), empty_bucket);
|
||||
}
|
||||
Ok(())
|
||||
@@ -171,12 +180,13 @@ fn convert_and_add_final_buckets_to_result(
|
||||
results: &mut FxHashMap<String, AggregationResult>,
|
||||
buckets: VecWithNames<IntermediateBucketResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<()> {
|
||||
assert_eq!(buckets.len(), req_buckets.len());
|
||||
|
||||
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||
for ((key, bucket), req) in buckets_with_request {
|
||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
|
||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req, schema)?);
|
||||
results.insert(key, result);
|
||||
}
|
||||
Ok(())
|
||||
@@ -194,21 +204,23 @@ pub enum IntermediateAggregationResult {
|
||||
/// Holds the intermediate data for metric results
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum IntermediateMetricResult {
|
||||
/// Average containing intermediate average data result
|
||||
/// Intermediate average result
|
||||
Average(IntermediateAverage),
|
||||
/// AverageData variant
|
||||
/// Intermediate stats result
|
||||
Stats(IntermediateStats),
|
||||
}
|
||||
|
||||
impl From<SegmentMetricResultCollector> for IntermediateMetricResult {
|
||||
fn from(tree: SegmentMetricResultCollector) -> Self {
|
||||
match tree {
|
||||
SegmentMetricResultCollector::Average(collector) => {
|
||||
IntermediateMetricResult::Average(IntermediateAverage::from_collector(collector))
|
||||
}
|
||||
SegmentMetricResultCollector::Stats(collector) => {
|
||||
IntermediateMetricResult::Stats(collector.stats)
|
||||
}
|
||||
SegmentMetricResultCollector::Stats(collector) => match collector.collecting_for {
|
||||
super::metric::SegmentStatsType::Stats => {
|
||||
IntermediateMetricResult::Stats(collector.stats)
|
||||
}
|
||||
super::metric::SegmentStatsType::Avg => IntermediateMetricResult::Average(
|
||||
IntermediateAverage::from_collector(collector),
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -266,13 +278,21 @@ impl IntermediateBucketResult {
|
||||
pub(crate) fn into_final_bucket_result(
|
||||
self,
|
||||
req: &BucketAggregationInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<BucketResult> {
|
||||
match self {
|
||||
IntermediateBucketResult::Range(range_res) => {
|
||||
let mut buckets: Vec<RangeBucketEntry> = range_res
|
||||
.buckets
|
||||
.into_iter()
|
||||
.map(|(_, bucket)| bucket.into_final_bucket_entry(&req.sub_aggregation))
|
||||
.into_values()
|
||||
.map(|bucket| {
|
||||
bucket.into_final_bucket_entry(
|
||||
&req.sub_aggregation,
|
||||
schema,
|
||||
req.as_range()
|
||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||
)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
buckets.sort_by(|left, right| {
|
||||
@@ -303,6 +323,7 @@ impl IntermediateBucketResult {
|
||||
req.as_histogram()
|
||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||
&req.sub_aggregation,
|
||||
schema,
|
||||
)?;
|
||||
|
||||
let buckets = if req.as_histogram().unwrap().keyed {
|
||||
@@ -321,6 +342,7 @@ impl IntermediateBucketResult {
|
||||
req.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
&req.sub_aggregation,
|
||||
schema,
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -411,6 +433,7 @@ impl IntermediateTermBucketResult {
|
||||
self,
|
||||
req: &TermsAggregation,
|
||||
sub_aggregation_req: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<BucketResult> {
|
||||
let req = TermsAggregationInternal::from_req(req);
|
||||
let mut buckets: Vec<BucketEntry> = self
|
||||
@@ -419,11 +442,12 @@ impl IntermediateTermBucketResult {
|
||||
.filter(|bucket| bucket.1.doc_count >= req.min_doc_count)
|
||||
.map(|(key, entry)| {
|
||||
Ok(BucketEntry {
|
||||
key_as_string: None,
|
||||
key: Key::Str(key),
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: entry
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(sub_aggregation_req)?,
|
||||
.into_final_bucket_result_internal(sub_aggregation_req, schema)?,
|
||||
})
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
@@ -528,13 +552,15 @@ impl IntermediateHistogramBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
) -> crate::Result<BucketEntry> {
|
||||
Ok(BucketEntry {
|
||||
key_as_string: None,
|
||||
key: Key::F64(self.key),
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
.into_final_bucket_result_internal(req, schema)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -571,16 +597,38 @@ impl IntermediateRangeBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
schema: &Schema,
|
||||
range_req: &RangeAggregation,
|
||||
) -> crate::Result<RangeBucketEntry> {
|
||||
Ok(RangeBucketEntry {
|
||||
let mut range_bucket_entry = RangeBucketEntry {
|
||||
key: self.key,
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
.into_final_bucket_result_internal(req, schema)?,
|
||||
to: self.to,
|
||||
from: self.from,
|
||||
})
|
||||
to_as_string: None,
|
||||
from_as_string: None,
|
||||
};
|
||||
|
||||
// If we have a date type on the histogram buckets, we add the `key_as_string` field as
|
||||
// rfc339
|
||||
let field = schema
|
||||
.get_field(&range_req.field)
|
||||
.ok_or_else(|| TantivyError::FieldNotFound(range_req.field.to_string()))?;
|
||||
if schema.get_field_entry(field).field_type().is_date() {
|
||||
if let Some(val) = range_bucket_entry.to {
|
||||
let key_as_string = format_date(val as i64)?;
|
||||
range_bucket_entry.to_as_string = Some(key_as_string);
|
||||
}
|
||||
if let Some(val) = range_bucket_entry.from {
|
||||
let key_as_string = format_date(val as i64)?;
|
||||
range_bucket_entry.from_as_string = Some(key_as_string);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(range_bucket_entry)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use crate::schema::Type;
|
||||
use crate::DocId;
|
||||
use super::SegmentStatsCollector;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
/// A single-value metric aggregation that computes the average of numeric values that are
|
||||
@@ -36,51 +33,6 @@ impl AverageAggregation {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub(crate) struct SegmentAverageCollector {
|
||||
pub data: IntermediateAverage,
|
||||
field_type: Type,
|
||||
}
|
||||
|
||||
impl Debug for SegmentAverageCollector {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("AverageCollector")
|
||||
.field("data", &self.data)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentAverageCollector {
|
||||
pub fn from_req(field_type: Type) -> Self {
|
||||
Self {
|
||||
field_type,
|
||||
data: Default::default(),
|
||||
}
|
||||
}
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = field.get_val(docs[0]);
|
||||
let val2 = field.get_val(docs[1]);
|
||||
let val3 = field.get_val(docs[2]);
|
||||
let val4 = field.get_val(docs[3]);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
let val4 = f64_from_fastfield_u64(val4, &self.field_type);
|
||||
self.data.collect(val1);
|
||||
self.data.collect(val2);
|
||||
self.data.collect(val3);
|
||||
self.data.collect(val4);
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
let val = field.get_val(doc);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.data.collect(val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Contains mergeable version of average data.
|
||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct IntermediateAverage {
|
||||
@@ -89,8 +41,11 @@ pub struct IntermediateAverage {
|
||||
}
|
||||
|
||||
impl IntermediateAverage {
|
||||
pub(crate) fn from_collector(collector: SegmentAverageCollector) -> Self {
|
||||
collector.data
|
||||
pub(crate) fn from_collector(collector: SegmentStatsCollector) -> Self {
|
||||
Self {
|
||||
sum: collector.stats.sum,
|
||||
doc_count: collector.stats.count,
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge average data into this instance.
|
||||
@@ -106,9 +61,4 @@ impl IntermediateAverage {
|
||||
Some(self.sum / self.doc_count as f64)
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
fn collect(&mut self, val: f64) {
|
||||
self.doc_count += 1;
|
||||
self.sum += val;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ impl StatsAggregation {
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Stats {
|
||||
/// The number of documents.
|
||||
pub count: usize,
|
||||
pub count: u64,
|
||||
/// The sum of the fast field values.
|
||||
pub sum: f64,
|
||||
/// The standard deviation of the fast field values. `None` for count == 0.
|
||||
@@ -73,11 +73,16 @@ impl Stats {
|
||||
/// `IntermediateStats` contains the mergeable version for stats.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct IntermediateStats {
|
||||
count: usize,
|
||||
sum: f64,
|
||||
squared_sum: f64,
|
||||
min: f64,
|
||||
max: f64,
|
||||
/// the number of values
|
||||
pub count: u64,
|
||||
/// the sum of the values
|
||||
pub sum: f64,
|
||||
/// the squared sum of the values
|
||||
pub squared_sum: f64,
|
||||
/// the min value of the values
|
||||
pub min: f64,
|
||||
/// the max value of the values
|
||||
pub max: f64,
|
||||
}
|
||||
impl Default for IntermediateStats {
|
||||
fn default() -> Self {
|
||||
@@ -150,17 +155,25 @@ impl IntermediateStats {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) enum SegmentStatsType {
|
||||
Stats,
|
||||
Avg,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) struct SegmentStatsCollector {
|
||||
pub(crate) stats: IntermediateStats,
|
||||
field_type: Type,
|
||||
pub(crate) collecting_for: SegmentStatsType,
|
||||
}
|
||||
|
||||
impl SegmentStatsCollector {
|
||||
pub fn from_req(field_type: Type) -> Self {
|
||||
pub fn from_req(field_type: Type, collecting_for: SegmentStatsType) -> Self {
|
||||
Self {
|
||||
field_type,
|
||||
stats: IntermediateStats::default(),
|
||||
collecting_for,
|
||||
}
|
||||
}
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||
@@ -222,7 +235,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -300,7 +313,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
//!
|
||||
//! ## Prerequisite
|
||||
//! Currently aggregations work only on [fast fields](`crate::fastfield`). Single value fast fields
|
||||
//! of type `u64`, `f64`, `i64` and fast fields on text fields.
|
||||
//! of type `u64`, `f64`, `i64`, `date` and fast fields on text fields.
|
||||
//!
|
||||
//! ## Usage
|
||||
//! To use aggregations, build an aggregation request by constructing
|
||||
@@ -53,9 +53,10 @@
|
||||
//! use tantivy::query::AllQuery;
|
||||
//! use tantivy::aggregation::agg_result::AggregationResults;
|
||||
//! use tantivy::IndexReader;
|
||||
//! use tantivy::schema::Schema;
|
||||
//!
|
||||
//! # #[allow(dead_code)]
|
||||
//! fn aggregate_on_index(reader: &IndexReader) {
|
||||
//! fn aggregate_on_index(reader: &IndexReader, schema: Schema) {
|
||||
//! let agg_req: Aggregations = vec![
|
||||
//! (
|
||||
//! "average".to_string(),
|
||||
@@ -67,7 +68,7 @@
|
||||
//! .into_iter()
|
||||
//! .collect();
|
||||
//!
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req, None, schema);
|
||||
//!
|
||||
//! let searcher = reader.searcher();
|
||||
//! let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
@@ -157,6 +158,7 @@ mod agg_req_with_accessor;
|
||||
pub mod agg_result;
|
||||
pub mod bucket;
|
||||
mod collector;
|
||||
mod date;
|
||||
pub mod intermediate_agg_result;
|
||||
pub mod metric;
|
||||
mod segment_agg_result;
|
||||
@@ -167,6 +169,7 @@ pub use collector::{
|
||||
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
||||
MAX_BUCKET_COUNT,
|
||||
};
|
||||
pub(crate) use date::format_date;
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -283,11 +286,11 @@ impl Display for Key {
|
||||
/// Inverse of `to_fastfield_u64`. Used to convert to `f64` for metrics.
|
||||
///
|
||||
/// # Panics
|
||||
/// Only `u64`, `f64`, and `i64` are supported.
|
||||
/// Only `u64`, `f64`, `date`, and `i64` are supported.
|
||||
pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
||||
match field_type {
|
||||
Type::U64 => val as f64,
|
||||
Type::I64 => i64::from_u64(val) as f64,
|
||||
Type::I64 | Type::Date => i64::from_u64(val) as f64,
|
||||
Type::F64 => f64::from_u64(val),
|
||||
_ => {
|
||||
panic!("unexpected type {:?}. This should not happen", field_type)
|
||||
@@ -295,10 +298,9 @@ pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts the `f64` value to fast field value space.
|
||||
/// Converts the `f64` value to fast field value space, which is always u64.
|
||||
///
|
||||
/// If the fast field has `u64`, values are stored as `u64` in the fast field.
|
||||
/// A `f64` value of e.g. `2.0` therefore needs to be converted to `1u64`.
|
||||
/// If the fast field has `u64`, values are stored unchanged as `u64` in the fast field.
|
||||
///
|
||||
/// If the fast field has `f64` values are converted and stored to `u64` using a
|
||||
/// monotonic mapping.
|
||||
@@ -308,7 +310,7 @@ pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
||||
pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &Type) -> Option<u64> {
|
||||
match field_type {
|
||||
Type::U64 => Some(val as u64),
|
||||
Type::I64 => Some((val as i64).to_u64()),
|
||||
Type::I64 | Type::Date => Some((val as i64).to_u64()),
|
||||
Type::F64 => Some(val.to_u64()),
|
||||
_ => None,
|
||||
}
|
||||
@@ -317,6 +319,7 @@ pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &Type) -> Option<u64> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use serde_json::Value;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregation};
|
||||
use super::bucket::RangeAggregation;
|
||||
@@ -332,7 +335,7 @@ mod tests {
|
||||
use crate::aggregation::DistributedAggregationCollector;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{Cardinality, IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use crate::{Index, Term};
|
||||
use crate::{DateTime, Index, Term};
|
||||
|
||||
fn get_avg_req(field_name: &str) -> Aggregation {
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
@@ -358,7 +361,7 @@ mod tests {
|
||||
index: &Index,
|
||||
query: Option<(&str, &str)>,
|
||||
) -> crate::Result<Value> {
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -448,9 +451,9 @@ mod tests {
|
||||
text_field_id => term.to_string(),
|
||||
string_field_id => term.to_string(),
|
||||
score_field => i as u64,
|
||||
score_field_f64 => i as f64,
|
||||
score_field_f64 => i,
|
||||
score_field_i64 => i as i64,
|
||||
fraction_field => i as f64/100.0,
|
||||
fraction_field => i/100.0,
|
||||
))?;
|
||||
}
|
||||
index_writer.commit()?;
|
||||
@@ -552,10 +555,10 @@ mod tests {
|
||||
let searcher = reader.searcher();
|
||||
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
||||
intermediate_agg_result
|
||||
.into_final_bucket_result(agg_req)
|
||||
.into_final_bucket_result(agg_req, &index.schema())
|
||||
.unwrap()
|
||||
} else {
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
@@ -648,6 +651,7 @@ mod tests {
|
||||
.set_fast()
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
schema_builder.add_text_field("dummy_text", STRING);
|
||||
let score_fieldtype =
|
||||
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
|
||||
@@ -665,6 +669,7 @@ mod tests {
|
||||
// writing the segment
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800).unwrap()),
|
||||
score_field => 1u64,
|
||||
score_field_f64 => 1f64,
|
||||
score_field_i64 => 1i64,
|
||||
@@ -673,6 +678,7 @@ mod tests {
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||
score_field => 3u64,
|
||||
score_field_f64 => 3f64,
|
||||
score_field_i64 => 3i64,
|
||||
@@ -681,18 +687,21 @@ mod tests {
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||
score_field => 5u64,
|
||||
score_field_f64 => 5f64,
|
||||
score_field_i64 => 5i64,
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "nohit",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||
score_field => 6u64,
|
||||
score_field_f64 => 6f64,
|
||||
score_field_i64 => 6i64,
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||
score_field => 7u64,
|
||||
score_field_f64 => 7f64,
|
||||
score_field_i64 => 7i64,
|
||||
@@ -700,12 +709,14 @@ mod tests {
|
||||
index_writer.commit()?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400).unwrap()),
|
||||
score_field => 11u64,
|
||||
score_field_f64 => 11f64,
|
||||
score_field_i64 => 11i64,
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
|
||||
score_field => 14u64,
|
||||
score_field_f64 => 14f64,
|
||||
score_field_i64 => 14i64,
|
||||
@@ -713,6 +724,7 @@ mod tests {
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
|
||||
score_field => 44u64,
|
||||
score_field_f64 => 44.5f64,
|
||||
score_field_i64 => 44i64,
|
||||
@@ -723,6 +735,7 @@ mod tests {
|
||||
// no hits segment
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "nohit",
|
||||
date_field => DateTime::from_utc(OffsetDateTime::from_unix_timestamp(1_546_300_800 + 86400 + 86400).unwrap()),
|
||||
score_field => 44u64,
|
||||
score_field_f64 => 44.5f64,
|
||||
score_field_i64 => 44i64,
|
||||
@@ -795,7 +808,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
@@ -995,9 +1008,10 @@ mod tests {
|
||||
// Test de/serialization roundtrip on intermediate_agg_result
|
||||
let res: IntermediateAggregationResults =
|
||||
serde_json::from_str(&serde_json::to_string(&res).unwrap()).unwrap();
|
||||
res.into_final_bucket_result(agg_req.clone()).unwrap()
|
||||
res.into_final_bucket_result(agg_req.clone(), &index.schema())
|
||||
.unwrap()
|
||||
} else {
|
||||
let collector = AggregationCollector::from_aggs(agg_req.clone(), None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req.clone(), None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
@@ -1055,7 +1069,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test empty result set
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&query_with_no_hits, &collector).unwrap();
|
||||
|
||||
@@ -1120,7 +1134,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
|
||||
@@ -1194,7 +1208,7 @@ mod tests {
|
||||
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
score_field => val as u64,
|
||||
score_field_f64 => val as f64,
|
||||
score_field_f64 => val,
|
||||
score_field_i64 => val as i64,
|
||||
))?;
|
||||
}
|
||||
@@ -1233,13 +1247,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&term_query, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1264,13 +1275,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&term_query, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1295,13 +1303,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&term_query, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1334,13 +1339,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&term_query, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1363,13 +1365,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&AllQuery, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1392,13 +1391,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&AllQuery, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1429,13 +1425,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&AllQuery, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1464,13 +1457,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&AllQuery, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1503,13 +1493,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&AllQuery, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1533,13 +1520,10 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&AllQuery, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1583,20 +1567,17 @@ mod tests {
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req_1.clone(),
|
||||
sub_aggregation: sub_agg_req_1,
|
||||
}),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None, index.schema());
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
searcher.search(&term_query, &collector).unwrap().into();
|
||||
|
||||
agg_res
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTer
|
||||
use super::collector::MAX_BUCKET_COUNT;
|
||||
use super::intermediate_agg_result::{IntermediateAggregationResults, IntermediateBucketResult};
|
||||
use super::metric::{
|
||||
AverageAggregation, SegmentAverageCollector, SegmentStatsCollector, StatsAggregation,
|
||||
AverageAggregation, SegmentStatsCollector, SegmentStatsType, StatsAggregation,
|
||||
};
|
||||
use super::VecWithNames;
|
||||
use crate::aggregation::agg_req::BucketAggregationType;
|
||||
@@ -163,7 +163,6 @@ impl SegmentAggregationResultsCollector {
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) enum SegmentMetricResultCollector {
|
||||
Average(SegmentAverageCollector),
|
||||
Stats(SegmentStatsCollector),
|
||||
}
|
||||
|
||||
@@ -171,22 +170,19 @@ impl SegmentMetricResultCollector {
|
||||
pub fn from_req_and_validate(req: &MetricAggregationWithAccessor) -> crate::Result<Self> {
|
||||
match &req.metric {
|
||||
MetricAggregation::Average(AverageAggregation { field: _ }) => {
|
||||
Ok(SegmentMetricResultCollector::Average(
|
||||
SegmentAverageCollector::from_req(req.field_type),
|
||||
Ok(SegmentMetricResultCollector::Stats(
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Avg),
|
||||
))
|
||||
}
|
||||
MetricAggregation::Stats(StatsAggregation { field: _ }) => {
|
||||
Ok(SegmentMetricResultCollector::Stats(
|
||||
SegmentStatsCollector::from_req(req.field_type),
|
||||
SegmentStatsCollector::from_req(req.field_type, SegmentStatsType::Stats),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], metric: &MetricAggregationWithAccessor) {
|
||||
match self {
|
||||
SegmentMetricResultCollector::Average(avg_collector) => {
|
||||
avg_collector.collect_block(doc, &*metric.accessor);
|
||||
}
|
||||
SegmentMetricResultCollector::Stats(stats_collector) => {
|
||||
stats_collector.collect_block(doc, &*metric.accessor);
|
||||
}
|
||||
@@ -305,7 +301,7 @@ impl BucketCount {
|
||||
}
|
||||
pub(crate) fn add_count(&self, count: u32) {
|
||||
self.bucket_count
|
||||
.fetch_add(count as u32, std::sync::atomic::Ordering::Relaxed);
|
||||
.fetch_add(count, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
pub(crate) fn get_count(&self) -> u32 {
|
||||
self.bucket_count.load(std::sync::atomic::Ordering::Relaxed)
|
||||
|
||||
@@ -357,7 +357,7 @@ impl SegmentCollector for FacetSegmentCollector {
|
||||
let mut facet = vec![];
|
||||
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
|
||||
// TODO handle errors.
|
||||
if facet_dict.ord_to_term(facet_ord as u64, &mut facet).is_ok() {
|
||||
if facet_dict.ord_to_term(facet_ord, &mut facet).is_ok() {
|
||||
if let Ok(facet) = Facet::from_encoded(facet) {
|
||||
facet_counts.insert(facet, count);
|
||||
}
|
||||
|
||||
@@ -170,7 +170,7 @@ pub trait Collector: Sync + Send {
|
||||
segment_ord: u32,
|
||||
reader: &SegmentReader,
|
||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
|
||||
let mut segment_collector = self.for_segment(segment_ord, reader)?;
|
||||
|
||||
match (reader.alive_bitset(), self.requires_scoring()) {
|
||||
(Some(alive_bitset), true) => {
|
||||
|
||||
@@ -149,7 +149,8 @@ impl IndexBuilder {
|
||||
/// Creates a new index using the [`RamDirectory`].
|
||||
///
|
||||
/// The index will be allocated in anonymous memory.
|
||||
/// This should only be used for unit tests.
|
||||
/// This is useful for indexing small set of documents
|
||||
/// for instances like unit test or temporary in memory index.
|
||||
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
|
||||
let ram_directory = RamDirectory::create();
|
||||
self.create(ram_directory)
|
||||
@@ -812,7 +813,7 @@ mod tests {
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
||||
let index = Index::create_in_dir(tempdir_path, schema).unwrap();
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
|
||||
@@ -200,10 +200,7 @@ impl InvertedIndexReader {
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
impl InvertedIndexReader {
|
||||
pub(crate) async fn get_term_info_async(
|
||||
&self,
|
||||
term: &Term,
|
||||
) -> crate::AsyncIoResult<Option<TermInfo>> {
|
||||
pub(crate) async fn get_term_info_async(&self, term: &Term) -> io::Result<Option<TermInfo>> {
|
||||
self.termdict.get_async(term.value_bytes()).await
|
||||
}
|
||||
|
||||
@@ -211,12 +208,8 @@ impl InvertedIndexReader {
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub async fn warm_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
with_positions: bool,
|
||||
) -> crate::AsyncIoResult<()> {
|
||||
let term_info_opt = self.get_term_info_async(term).await?;
|
||||
pub async fn warm_postings(&self, term: &Term, with_positions: bool) -> io::Result<()> {
|
||||
let term_info_opt: Option<TermInfo> = self.get_term_info_async(term).await?;
|
||||
if let Some(term_info) = term_info_opt {
|
||||
self.postings_file_slice
|
||||
.read_bytes_slice_async(term_info.postings_range.clone())
|
||||
@@ -234,7 +227,7 @@ impl InvertedIndexReader {
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// If you know which terms to pre-load, prefer using [`Self::warm_postings`] instead.
|
||||
pub async fn warm_postings_full(&self, with_positions: bool) -> crate::AsyncIoResult<()> {
|
||||
pub async fn warm_postings_full(&self, with_positions: bool) -> io::Result<()> {
|
||||
self.postings_file_slice.read_bytes_async().await?;
|
||||
if with_positions {
|
||||
self.positions_file_slice.read_bytes_async().await?;
|
||||
@@ -243,7 +236,7 @@ impl InvertedIndexReader {
|
||||
}
|
||||
|
||||
/// Returns the number of documents containing the term asynchronously.
|
||||
pub async fn doc_freq_async(&self, term: &Term) -> crate::AsyncIoResult<u32> {
|
||||
pub async fn doc_freq_async(&self, term: &Term) -> io::Result<u32> {
|
||||
Ok(self
|
||||
.get_term_info_async(term)
|
||||
.await?
|
||||
|
||||
@@ -198,11 +198,10 @@ impl Searcher {
|
||||
collector: &C,
|
||||
executor: &Executor,
|
||||
) -> crate::Result<C::Fruit> {
|
||||
let scoring_enabled = collector.requires_scoring();
|
||||
let enabled_scoring = if scoring_enabled {
|
||||
EnableScoring::Enabled(self)
|
||||
let enabled_scoring = if collector.requires_scoring() {
|
||||
EnableScoring::enabled_from_searcher(self)
|
||||
} else {
|
||||
EnableScoring::Disabled(self.schema())
|
||||
EnableScoring::disabled_from_searcher(self)
|
||||
};
|
||||
let weight = query.weight(enabled_scoring)?;
|
||||
let segment_readers = self.segment_readers();
|
||||
|
||||
@@ -75,7 +75,7 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
|
||||
let mut prev_offset = 0;
|
||||
for (file_addr, offset) in self.offsets {
|
||||
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
|
||||
VInt(offset - prev_offset).serialize(&mut self.write)?;
|
||||
file_addr.serialize(&mut self.write)?;
|
||||
prev_offset = offset;
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ impl LockError {
|
||||
/// Error that may occur when opening a directory
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenDirectoryError {
|
||||
/// The underlying directory does not exists.
|
||||
/// The underlying directory does not exist.
|
||||
#[error("Directory does not exist: '{0}'.")]
|
||||
DoesNotExist(PathBuf),
|
||||
/// The path exists but is not a directory.
|
||||
@@ -151,8 +151,8 @@ impl fmt::Debug for Incompatibility {
|
||||
/// Error that may occur when accessing a file read
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenReadError {
|
||||
/// The file does not exists.
|
||||
#[error("Files does not exists: {0:?}")]
|
||||
/// The file does not exist.
|
||||
#[error("Files does not exist: {0:?}")]
|
||||
FileDoesNotExist(PathBuf),
|
||||
/// Any kind of io::Error.
|
||||
#[error(
|
||||
@@ -181,8 +181,8 @@ impl OpenReadError {
|
||||
/// Error that may occur when trying to delete a file
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum DeleteError {
|
||||
/// The file does not exists.
|
||||
#[error("File does not exists: '{0}'.")]
|
||||
/// The file does not exist.
|
||||
#[error("File does not exist: '{0}'.")]
|
||||
FileDoesNotExist(PathBuf),
|
||||
/// Any kind of IO error that happens when
|
||||
/// interacting with the underlying IO device.
|
||||
|
||||
@@ -38,7 +38,7 @@ impl Footer {
|
||||
counting_write.write_all(serde_json::to_string(&self)?.as_ref())?;
|
||||
let footer_payload_len = counting_write.written_bytes();
|
||||
BinarySerializable::serialize(&(footer_payload_len as u32), write)?;
|
||||
BinarySerializable::serialize(&(FOOTER_MAGIC_NUMBER as u32), write)?;
|
||||
BinarySerializable::serialize(&FOOTER_MAGIC_NUMBER, write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -90,9 +90,10 @@ impl Footer {
|
||||
));
|
||||
}
|
||||
|
||||
let footer: Footer = serde_json::from_slice(&file.read_bytes_slice(
|
||||
file.len() - total_footer_size..file.len() - footer_metadata_len as usize,
|
||||
)?)?;
|
||||
let footer: Footer =
|
||||
serde_json::from_slice(&file.read_bytes_slice(
|
||||
file.len() - total_footer_size..file.len() - footer_metadata_len,
|
||||
)?)?;
|
||||
|
||||
let body = file.slice_to(file.len() - total_footer_size);
|
||||
Ok((footer, body))
|
||||
|
||||
@@ -388,7 +388,7 @@ mod tests_mmap_specific {
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
let living_files = HashSet::new();
|
||||
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mmap_directory = MmapDirectory::open(tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(Box::new(mmap_directory)).unwrap();
|
||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||
write.write_all(&[0u8, 1u8]).unwrap();
|
||||
|
||||
@@ -6,10 +6,10 @@ use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
use std::{fmt, result};
|
||||
|
||||
use common::StableDeref;
|
||||
use fs2::FileExt;
|
||||
use memmap2::Mmap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use stable_deref_trait::StableDeref;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use crate::core::META_FILEPATH;
|
||||
@@ -341,7 +341,7 @@ impl Directory for MmapDirectory {
|
||||
/// removed before the file is deleted.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
fs::remove_file(&full_path).map_err(|e| {
|
||||
fs::remove_file(full_path).map_err(|e| {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
DeleteError::FileDoesNotExist(path.to_owned())
|
||||
} else {
|
||||
@@ -395,7 +395,7 @@ impl Directory for MmapDirectory {
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
let mut buffer = Vec::new();
|
||||
match File::open(&full_path) {
|
||||
match File::open(full_path) {
|
||||
Ok(mut file) => {
|
||||
file.read_to_end(&mut buffer).map_err(|io_error| {
|
||||
OpenReadError::wrap_io_error(io_error, path.to_path_buf())
|
||||
@@ -425,7 +425,7 @@ impl Directory for MmapDirectory {
|
||||
let file: File = OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true) //< if the file does not exist yet, create it.
|
||||
.open(&full_path)
|
||||
.open(full_path)
|
||||
.map_err(LockError::wrap_io_error)?;
|
||||
if lock.is_blocking {
|
||||
file.lock_exclusive().map_err(LockError::wrap_io_error)?;
|
||||
|
||||
@@ -5,7 +5,6 @@ mod mmap_directory;
|
||||
|
||||
mod directory;
|
||||
mod directory_lock;
|
||||
mod file_slice;
|
||||
mod file_watcher;
|
||||
mod footer;
|
||||
mod managed_directory;
|
||||
@@ -20,13 +19,12 @@ mod composite_file;
|
||||
use std::io::BufWriter;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub use common::{AntiCallToken, TerminatingWrite};
|
||||
pub use ownedbytes::OwnedBytes;
|
||||
pub use common::file_slice::{FileHandle, FileSlice};
|
||||
pub use common::{AntiCallToken, OwnedBytes, TerminatingWrite};
|
||||
|
||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub use self::file_slice::{FileHandle, FileSlice};
|
||||
pub use self::ram_directory::RamDirectory;
|
||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
|
||||
|
||||
@@ -232,7 +232,7 @@ impl Directory for RamDirectory {
|
||||
let path_buf = PathBuf::from(path);
|
||||
self.fs.write().unwrap().write(path_buf, data);
|
||||
if path == *META_FILEPATH {
|
||||
let _ = self.fs.write().unwrap().watch_router.broadcast();
|
||||
drop(self.fs.write().unwrap().watch_router.broadcast());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ mod tests {
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||
mem::drop(handle_a);
|
||||
let _ = watch_event_router.broadcast();
|
||||
drop(watch_event_router.broadcast());
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
22
src/error.rs
22
src/error.rs
@@ -104,28 +104,6 @@ pub enum TantivyError {
|
||||
InternalError(String),
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[derive(Error, Debug)]
|
||||
#[doc(hidden)]
|
||||
pub enum AsyncIoError {
|
||||
#[error("io::Error `{0}`")]
|
||||
Io(#[from] io::Error),
|
||||
#[error("Asynchronous API is unsupported by this directory")]
|
||||
AsyncUnsupported,
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
impl From<AsyncIoError> for TantivyError {
|
||||
fn from(async_io_err: AsyncIoError) -> Self {
|
||||
match async_io_err {
|
||||
AsyncIoError::Io(io_err) => TantivyError::from(io_err),
|
||||
AsyncIoError::AsyncUnsupported => {
|
||||
TantivyError::SystemError(format!("{:?}", async_io_err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for TantivyError {
|
||||
fn from(io_err: io::Error) -> TantivyError {
|
||||
TantivyError::IoError(Arc::new(io_err))
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use common::{intersect_bitsets, BitSet, ReadOnlyBitSet};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use common::{intersect_bitsets, BitSet, OwnedBytes, ReadOnlyBitSet};
|
||||
|
||||
use crate::space_usage::ByteCount;
|
||||
use crate::DocId;
|
||||
@@ -176,7 +175,7 @@ mod bench {
|
||||
|
||||
fn get_alive() -> Vec<u32> {
|
||||
let mut data = (0..1_000_000_u32).collect::<Vec<u32>>();
|
||||
for _ in 0..(1_000_000) * 1 / 8 {
|
||||
for _ in 0..1_000_000 / 8 {
|
||||
remove_rand(&mut data);
|
||||
}
|
||||
data
|
||||
|
||||
@@ -96,7 +96,7 @@ mod tests {
|
||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||
let term_weight_err =
|
||||
term_query.specialized_weight(EnableScoring::Disabled(searcher.schema()));
|
||||
term_query.specialized_weight(EnableScoring::disabled_from_schema(searcher.schema()));
|
||||
assert!(matches!(
|
||||
term_weight_err,
|
||||
Err(crate::TantivyError::SchemaError(_))
|
||||
|
||||
@@ -64,9 +64,7 @@ impl FacetReader {
|
||||
facet_ord: TermOrdinal,
|
||||
output: &mut Facet,
|
||||
) -> crate::Result<()> {
|
||||
let found_term = self
|
||||
.term_dict
|
||||
.ord_to_term(facet_ord as u64, &mut self.buffer)?;
|
||||
let found_term = self.term_dict.ord_to_term(facet_ord, &mut self.buffer)?;
|
||||
assert!(found_term, "Term ordinal {} no found.", facet_ord);
|
||||
let facet_str = str::from_utf8(&self.buffer[..])
|
||||
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
|
||||
|
||||
@@ -12,13 +12,15 @@
|
||||
//!
|
||||
//!
|
||||
//! Fields have to be declared as `FAST` in the schema.
|
||||
//! Currently supported fields are: u64, i64, f64, bytes and text.
|
||||
//! Currently supported fields are: u64, i64, f64, bytes, ip and text.
|
||||
//!
|
||||
//! Fast fields are stored in with [different codecs](fastfield_codecs). The best codec is detected
|
||||
//! automatically, when serializing.
|
||||
//!
|
||||
//! Read access performance is comparable to that of an array lookup.
|
||||
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
|
||||
pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet};
|
||||
@@ -28,10 +30,10 @@ pub use self::facet_reader::FacetReader;
|
||||
pub(crate) use self::multivalued::{get_fastfield_codecs_for_multivalue, MultivalueStartIndex};
|
||||
pub use self::multivalued::{
|
||||
MultiValueIndex, MultiValueU128FastFieldWriter, MultiValuedFastFieldReader,
|
||||
MultiValuedFastFieldWriter, MultiValuedU128FastFieldReader,
|
||||
MultiValuedFastFieldWriter,
|
||||
};
|
||||
pub(crate) use self::readers::type_and_cardinality;
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub(crate) use self::readers::{type_and_cardinality, FastType};
|
||||
pub use self::serializer::{Column, CompositeFastFieldSerializer};
|
||||
use self::writer::unexpected_value;
|
||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||
@@ -47,6 +49,33 @@ mod readers;
|
||||
mod serializer;
|
||||
mod writer;
|
||||
|
||||
/// Trait for types that provide a zero value.
|
||||
///
|
||||
/// The resulting value is never used, just as placeholder, e.g. for `vec.resize()`.
|
||||
pub trait MakeZero {
|
||||
/// Build a default value. This default value is never used, so the value does not
|
||||
/// really matter.
|
||||
fn make_zero() -> Self;
|
||||
}
|
||||
|
||||
impl<T: FastValue> MakeZero for T {
|
||||
fn make_zero() -> Self {
|
||||
T::from_u64(0)
|
||||
}
|
||||
}
|
||||
|
||||
impl MakeZero for u128 {
|
||||
fn make_zero() -> Self {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
impl MakeZero for Ipv6Addr {
|
||||
fn make_zero() -> Self {
|
||||
Ipv6Addr::from(0u128.to_be_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for types that are allowed for fast fields:
|
||||
/// (u64, i64 and f64, bool, DateTime).
|
||||
pub trait FastValue:
|
||||
@@ -54,12 +83,6 @@ pub trait FastValue:
|
||||
{
|
||||
/// Returns the `schema::Type` for this FastValue.
|
||||
fn to_type() -> Type;
|
||||
|
||||
/// Build a default value. This default value is never used, so the value does not
|
||||
/// really matter.
|
||||
fn make_zero() -> Self {
|
||||
Self::from_u64(0u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for u64 {
|
||||
@@ -101,12 +124,6 @@ impl FastValue for DateTime {
|
||||
fn to_type() -> Type {
|
||||
Type::Date
|
||||
}
|
||||
|
||||
fn make_zero() -> Self {
|
||||
DateTime {
|
||||
timestamp_micros: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn value_to_u64(value: &Value) -> crate::Result<u64> {
|
||||
@@ -145,7 +162,7 @@ impl FastFieldType {
|
||||
mod tests {
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Range;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -159,7 +176,9 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::schema::{Cardinality, Document, Field, Schema, SchemaBuilder, FAST, STRING, TEXT};
|
||||
use crate::schema::{
|
||||
Cardinality, Document, Field, Schema, SchemaBuilder, FAST, INDEXED, STRING, TEXT,
|
||||
};
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::{DateOptions, DatePrecision, Index, SegmentId, SegmentReader};
|
||||
|
||||
@@ -207,7 +226,7 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 25);
|
||||
assert_eq!(file.len(), 34);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let fast_field_bytes = composite_file.open_read(*FIELD).unwrap().read_bytes()?;
|
||||
let fast_field_reader = open::<u64>(fast_field_bytes)?;
|
||||
@@ -256,7 +275,7 @@ mod tests {
|
||||
serializer.close()?;
|
||||
}
|
||||
let file = directory.open_read(path)?;
|
||||
assert_eq!(file.len(), 53);
|
||||
assert_eq!(file.len(), 62);
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite
|
||||
@@ -297,7 +316,7 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 26);
|
||||
assert_eq!(file.len(), 35);
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite
|
||||
@@ -336,7 +355,7 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 80040);
|
||||
assert_eq!(file.len(), 80049);
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite
|
||||
@@ -378,7 +397,7 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 40_usize);
|
||||
assert_eq!(file.len(), 49_usize);
|
||||
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
@@ -473,7 +492,7 @@ mod tests {
|
||||
let fast_field_reader = open::<u64>(data)?;
|
||||
|
||||
for a in 0..n {
|
||||
assert_eq!(fast_field_reader.get_val(a as u32), permutation[a as usize]);
|
||||
assert_eq!(fast_field_reader.get_val(a as u32), permutation[a]);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -520,11 +539,6 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_date() {
|
||||
assert_eq!(0, DateTime::make_zero().into_timestamp_secs());
|
||||
}
|
||||
|
||||
fn get_vals_for_docs(ff: &MultiValuedFastFieldReader<u64>, docs: Range<u32>) -> Vec<u64> {
|
||||
let mut all = vec![];
|
||||
|
||||
@@ -822,7 +836,7 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 24);
|
||||
assert_eq!(file.len(), 33);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let data = composite_file.open_read(field).unwrap().read_bytes()?;
|
||||
let fast_field_reader = open::<bool>(data)?;
|
||||
@@ -860,7 +874,7 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 36);
|
||||
assert_eq!(file.len(), 45);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let data = composite_file.open_read(field).unwrap().read_bytes()?;
|
||||
let fast_field_reader = open::<bool>(data)?;
|
||||
@@ -892,7 +906,7 @@ mod tests {
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
assert_eq!(file.len(), 23);
|
||||
assert_eq!(file.len(), 32);
|
||||
let data = composite_file.open_read(field).unwrap().read_bytes()?;
|
||||
let fast_field_reader = open::<bool>(data)?;
|
||||
assert_eq!(fast_field_reader.get_val(0), false);
|
||||
@@ -926,10 +940,10 @@ mod tests {
|
||||
pub fn test_gcd_date() -> crate::Result<()> {
|
||||
let size_prec_sec =
|
||||
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Seconds)?;
|
||||
assert_eq!(size_prec_sec, 28 + (1_000 * 13) / 8); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
|
||||
assert_eq!(size_prec_sec, 5 + 4 + 28 + (1_000 * 13) / 8); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
|
||||
let size_prec_micro =
|
||||
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Microseconds)?;
|
||||
assert_eq!(size_prec_micro, 26 + (1_000 * 33) / 8); // 33 bits per val = ceil(log_2(number of microsecsseconds in 2hours);
|
||||
assert_eq!(size_prec_micro, 5 + 4 + 26 + (1_000 * 33) / 8); // 33 bits per val = ceil(log_2(number of microsecsseconds in 2hours);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -969,4 +983,117 @@ mod tests {
|
||||
}
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gcd_bug_regression_1757() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let num_field = schema_builder.add_u64_field("url_norm_hash", FAST | INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer
|
||||
.add_document(doc! {
|
||||
num_field => 100u64,
|
||||
})
|
||||
.unwrap();
|
||||
writer
|
||||
.add_document(doc! {
|
||||
num_field => 200u64,
|
||||
})
|
||||
.unwrap();
|
||||
writer
|
||||
.add_document(doc! {
|
||||
num_field => 300u64,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
writer.commit().unwrap();
|
||||
}
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment = &searcher.segment_readers()[0];
|
||||
let field = segment.fast_fields().u64(num_field).unwrap();
|
||||
|
||||
let numbers = vec![100, 200, 300];
|
||||
let test_range = |range: RangeInclusive<u64>| {
|
||||
let expexted_count = numbers.iter().filter(|num| range.contains(num)).count();
|
||||
let mut vec = vec![];
|
||||
field.get_docids_for_value_range(range, 0..u32::MAX, &mut vec);
|
||||
assert_eq!(vec.len(), expexted_count);
|
||||
};
|
||||
test_range(50..=50);
|
||||
test_range(150..=150);
|
||||
test_range(350..=350);
|
||||
test_range(100..=250);
|
||||
test_range(101..=200);
|
||||
test_range(101..=199);
|
||||
test_range(100..=300);
|
||||
test_range(100..=299);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mapping_bug_docids_for_value_range() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let num_field = schema_builder.add_u64_field("url_norm_hash", FAST | INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// Values without gcd, but with min_value
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer
|
||||
.add_document(doc! {
|
||||
num_field => 1000u64,
|
||||
})
|
||||
.unwrap();
|
||||
writer
|
||||
.add_document(doc! {
|
||||
num_field => 1001u64,
|
||||
})
|
||||
.unwrap();
|
||||
writer
|
||||
.add_document(doc! {
|
||||
num_field => 1003u64,
|
||||
})
|
||||
.unwrap();
|
||||
writer.commit().unwrap();
|
||||
}
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment = &searcher.segment_readers()[0];
|
||||
let field = segment.fast_fields().u64(num_field).unwrap();
|
||||
|
||||
let numbers = vec![1000, 1001, 1003];
|
||||
let test_range = |range: RangeInclusive<u64>| {
|
||||
let expexted_count = numbers.iter().filter(|num| range.contains(num)).count();
|
||||
let mut vec = vec![];
|
||||
field.get_docids_for_value_range(range, 0..u32::MAX, &mut vec);
|
||||
assert_eq!(vec.len(), expexted_count);
|
||||
};
|
||||
let test_range_variant = |start, stop| {
|
||||
let start_range = start..=stop;
|
||||
test_range(start_range);
|
||||
let start_range = start..=(stop - 1);
|
||||
test_range(start_range);
|
||||
let start_range = start..=(stop + 1);
|
||||
test_range(start_range);
|
||||
let start_range = (start - 1)..=stop;
|
||||
test_range(start_range);
|
||||
let start_range = (start - 1)..=(stop - 1);
|
||||
test_range(start_range);
|
||||
let start_range = (start - 1)..=(stop + 1);
|
||||
test_range(start_range);
|
||||
let start_range = (start + 1)..=stop;
|
||||
test_range(start_range);
|
||||
let start_range = (start + 1)..=(stop - 1);
|
||||
test_range(start_range);
|
||||
let start_range = (start + 1)..=(stop + 1);
|
||||
test_range(start_range);
|
||||
};
|
||||
test_range_variant(50, 50);
|
||||
test_range_variant(1000, 1000);
|
||||
test_range_variant(1000, 1002);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,6 +80,7 @@ impl MultiValueIndex {
|
||||
///
|
||||
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
|
||||
/// match a docid to its value position.
|
||||
#[allow(clippy::bool_to_int_with_if)]
|
||||
pub(crate) fn positions_to_docids(&self, doc_id_range: Range<u32>, positions: &mut Vec<u32>) {
|
||||
if positions.is_empty() {
|
||||
return;
|
||||
|
||||
@@ -5,7 +5,7 @@ mod writer;
|
||||
use fastfield_codecs::FastFieldCodecType;
|
||||
pub use index::MultiValueIndex;
|
||||
|
||||
pub use self::reader::{MultiValuedFastFieldReader, MultiValuedU128FastFieldReader};
|
||||
pub use self::reader::MultiValuedFastFieldReader;
|
||||
pub(crate) use self::writer::MultivalueStartIndex;
|
||||
pub use self::writer::{MultiValueU128FastFieldWriter, MultiValuedFastFieldWriter};
|
||||
|
||||
@@ -525,7 +525,7 @@ mod bench {
|
||||
serializer.close().unwrap();
|
||||
field
|
||||
};
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
let file = directory.open_read(path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data_idx = fast_fields_composite
|
||||
|
||||
@@ -1,107 +1,31 @@
|
||||
use core::fmt;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::{Column, MonotonicallyMappableToU128};
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::MultiValueIndex;
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::fastfield::MakeZero;
|
||||
use crate::DocId;
|
||||
|
||||
/// Reader for a multivalued `u64` fast field.
|
||||
/// Reader for a multivalued fast field.
|
||||
///
|
||||
/// The reader is implemented as two `u64` fast field.
|
||||
/// The reader is implemented as two fast fields, one u64 fast field for the index and one for the
|
||||
/// values.
|
||||
///
|
||||
/// The `vals_reader` will access the concatenated list of all
|
||||
/// values for all reader.
|
||||
/// The `idx_reader` associated, for each document, the index of its first value.
|
||||
/// Stores the start position for each document.
|
||||
/// The `vals_reader` will access the concatenated list of all values.
|
||||
/// The `idx_reader` associates, for each document, the index of its first value.
|
||||
#[derive(Clone)]
|
||||
pub struct MultiValuedFastFieldReader<Item: FastValue> {
|
||||
idx_reader: MultiValueIndex,
|
||||
vals_reader: Arc<dyn Column<Item>>,
|
||||
}
|
||||
|
||||
impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
pub(crate) fn open(
|
||||
idx_reader: Arc<dyn Column<u64>>,
|
||||
vals_reader: Arc<dyn Column<Item>>,
|
||||
) -> MultiValuedFastFieldReader<Item> {
|
||||
MultiValuedFastFieldReader {
|
||||
idx_reader: MultiValueIndex::new(idx_reader),
|
||||
vals_reader,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the array of values associated with the given `doc`.
|
||||
#[inline]
|
||||
fn get_vals_for_range(&self, range: Range<u32>, vals: &mut Vec<Item>) {
|
||||
let len = (range.end - range.start) as usize;
|
||||
vals.resize(len, Item::make_zero());
|
||||
self.vals_reader
|
||||
.get_range(range.start as u64, &mut vals[..]);
|
||||
}
|
||||
|
||||
/// Returns the array of values associated with the given `doc`.
|
||||
#[inline]
|
||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||
let range = self.idx_reader.range(doc);
|
||||
self.get_vals_for_range(range, vals);
|
||||
}
|
||||
|
||||
/// returns the multivalue index
|
||||
pub fn get_index_reader(&self) -> &MultiValueIndex {
|
||||
&self.idx_reader
|
||||
}
|
||||
|
||||
/// Returns the minimum value for this fast field.
|
||||
///
|
||||
/// The min value does not take in account of possible
|
||||
/// deleted document, and should be considered as a lower bound
|
||||
/// of the actual minimum value.
|
||||
pub fn min_value(&self) -> Item {
|
||||
self.vals_reader.min_value()
|
||||
}
|
||||
|
||||
/// Returns the maximum value for this fast field.
|
||||
///
|
||||
/// The max value does not take in account of possible
|
||||
/// deleted document, and should be considered as an upper bound
|
||||
/// of the actual maximum value.
|
||||
pub fn max_value(&self) -> Item {
|
||||
self.vals_reader.max_value()
|
||||
}
|
||||
|
||||
/// Returns the number of values associated with the document `DocId`.
|
||||
#[inline]
|
||||
pub fn num_vals(&self, doc: DocId) -> u32 {
|
||||
self.idx_reader.num_vals_for_doc(doc)
|
||||
}
|
||||
|
||||
/// Returns the overall number of values in this field.
|
||||
#[inline]
|
||||
pub fn total_num_vals(&self) -> u32 {
|
||||
self.idx_reader.total_num_vals()
|
||||
}
|
||||
}
|
||||
|
||||
/// Reader for a multivalued `u128` fast field.
|
||||
///
|
||||
/// The reader is implemented as a `u64` fast field for the index and a `u128` fast field.
|
||||
///
|
||||
/// The `vals_reader` will access the concatenated list of all
|
||||
/// values for all reader.
|
||||
/// The `idx_reader` associated, for each document, the index of its first value.
|
||||
#[derive(Clone)]
|
||||
pub struct MultiValuedU128FastFieldReader<T: MonotonicallyMappableToU128> {
|
||||
pub struct MultiValuedFastFieldReader<T> {
|
||||
idx_reader: MultiValueIndex,
|
||||
vals_reader: Arc<dyn Column<T>>,
|
||||
}
|
||||
|
||||
impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
|
||||
impl<T: PartialOrd + MakeZero + Copy + fmt::Debug> MultiValuedFastFieldReader<T> {
|
||||
pub(crate) fn open(
|
||||
idx_reader: Arc<dyn Column<u64>>,
|
||||
vals_reader: Arc<dyn Column<T>>,
|
||||
) -> MultiValuedU128FastFieldReader<T> {
|
||||
) -> MultiValuedFastFieldReader<T> {
|
||||
Self {
|
||||
idx_reader: MultiValueIndex::new(idx_reader),
|
||||
vals_reader,
|
||||
@@ -122,7 +46,7 @@ impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
|
||||
#[inline]
|
||||
fn get_vals_for_range(&self, range: Range<u32>, vals: &mut Vec<T>) {
|
||||
let len = (range.end - range.start) as usize;
|
||||
vals.resize(len, T::from_u128(0));
|
||||
vals.resize(len, T::make_zero());
|
||||
self.vals_reader
|
||||
.get_range(range.start as u64, &mut vals[..]);
|
||||
}
|
||||
@@ -199,8 +123,131 @@ impl<T: MonotonicallyMappableToU128> MultiValuedU128FastFieldReader<T> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use time::{Duration, OffsetDateTime};
|
||||
|
||||
use crate::collector::Count;
|
||||
use crate::core::Index;
|
||||
use crate::query::RangeQuery;
|
||||
use crate::schema::{Cardinality, Facet, FacetOptions, NumericOptions, Schema};
|
||||
use crate::{DateOptions, DatePrecision, DateTime};
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_date_docids_for_value_range_1() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field(
|
||||
"multi_date_field",
|
||||
DateOptions::default()
|
||||
.set_fast(Cardinality::MultiValues)
|
||||
.set_indexed()
|
||||
.set_fieldnorm()
|
||||
.set_precision(DatePrecision::Microseconds)
|
||||
.set_stored(),
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let first_time_stamp = OffsetDateTime::now_utc();
|
||||
index_writer.add_document(doc!(
|
||||
date_field => DateTime::from_utc(first_time_stamp),
|
||||
date_field => DateTime::from_utc(first_time_stamp),
|
||||
))?;
|
||||
// add another second
|
||||
let two_secs_ahead = first_time_stamp + Duration::seconds(2);
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
|
||||
let date_ff_reader = reader.fast_fields().dates(date_field).unwrap();
|
||||
let mut docids = vec![];
|
||||
date_ff_reader.get_docids_for_value_range(
|
||||
DateTime::from_utc(first_time_stamp)..=DateTime::from_utc(two_secs_ahead),
|
||||
0..5,
|
||||
&mut docids,
|
||||
);
|
||||
assert_eq!(docids, vec![0]);
|
||||
|
||||
let count_multiples =
|
||||
|range_query: RangeQuery| searcher.search(&range_query, &Count).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
count_multiples(RangeQuery::new_date(
|
||||
date_field,
|
||||
DateTime::from_utc(first_time_stamp)..DateTime::from_utc(two_secs_ahead)
|
||||
)),
|
||||
1
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_date_docids_for_value_range_2() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field(
|
||||
"multi_date_field",
|
||||
DateOptions::default()
|
||||
.set_fast(Cardinality::MultiValues)
|
||||
// TODO: Test different precision after fixing https://github.com/quickwit-oss/tantivy/issues/1783
|
||||
.set_precision(DatePrecision::Microseconds)
|
||||
.set_indexed()
|
||||
.set_fieldnorm()
|
||||
.set_stored(),
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let first_time_stamp = OffsetDateTime::now_utc();
|
||||
index_writer.add_document(doc!(
|
||||
date_field => DateTime::from_utc(first_time_stamp),
|
||||
date_field => DateTime::from_utc(first_time_stamp),
|
||||
))?;
|
||||
index_writer.add_document(doc!())?;
|
||||
// add one second
|
||||
index_writer.add_document(doc!(
|
||||
date_field => DateTime::from_utc(first_time_stamp + Duration::seconds(1)),
|
||||
))?;
|
||||
// add another second
|
||||
let two_secs_ahead = first_time_stamp + Duration::seconds(2);
|
||||
index_writer.add_document(doc!(
|
||||
date_field => DateTime::from_utc(two_secs_ahead),
|
||||
date_field => DateTime::from_utc(two_secs_ahead),
|
||||
date_field => DateTime::from_utc(two_secs_ahead),
|
||||
))?;
|
||||
// add three seconds
|
||||
index_writer.add_document(doc!(
|
||||
date_field => DateTime::from_utc(first_time_stamp + Duration::seconds(3)),
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
assert_eq!(reader.num_docs(), 5);
|
||||
|
||||
let date_ff_reader = reader.fast_fields().dates(date_field).unwrap();
|
||||
let mut docids = vec![];
|
||||
date_ff_reader.get_docids_for_value_range(
|
||||
DateTime::from_utc(first_time_stamp)..=DateTime::from_utc(two_secs_ahead),
|
||||
0..5,
|
||||
&mut docids,
|
||||
);
|
||||
assert_eq!(docids, vec![0, 2, 3]);
|
||||
|
||||
let count_multiples =
|
||||
|range_query: RangeQuery| searcher.search(&range_query, &Count).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
count_multiples(RangeQuery::new_date(
|
||||
date_field,
|
||||
DateTime::from_utc(first_time_stamp)..DateTime::from_utc(two_secs_ahead)
|
||||
)),
|
||||
2
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multifastfield_reader() -> crate::Result<()> {
|
||||
|
||||
@@ -264,7 +264,7 @@ fn iter_remapped_multivalue_index<'a, C: Column>(
|
||||
std::iter::once(0).chain(doc_id_map.iter_old_doc_ids().map(move |old_doc| {
|
||||
let num_vals_for_doc = column.get_val(old_doc + 1) - column.get_val(old_doc);
|
||||
offset += num_vals_for_doc;
|
||||
offset as u64
|
||||
offset
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -3,11 +3,9 @@ use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::{open, open_u128, Column};
|
||||
|
||||
use super::multivalued::MultiValuedU128FastFieldReader;
|
||||
use super::multivalued::MultiValuedFastFieldReader;
|
||||
use crate::directory::{CompositeFile, FileSlice};
|
||||
use crate::fastfield::{
|
||||
BytesFastFieldReader, FastFieldNotAvailableError, FastValue, MultiValuedFastFieldReader,
|
||||
};
|
||||
use crate::fastfield::{BytesFastFieldReader, FastFieldNotAvailableError, FastValue};
|
||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||
use crate::space_usage::PerFieldSpaceUsage;
|
||||
use crate::{DateTime, TantivyError};
|
||||
@@ -161,20 +159,14 @@ impl FastFieldReaders {
|
||||
/// Returns the `ip` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u128 fast field, this method returns an Error.
|
||||
pub fn ip_addrs(
|
||||
&self,
|
||||
field: Field,
|
||||
) -> crate::Result<MultiValuedU128FastFieldReader<Ipv6Addr>> {
|
||||
pub fn ip_addrs(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<Ipv6Addr>> {
|
||||
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
|
||||
let idx_reader: Arc<dyn Column<u64>> = self.typed_fast_field_reader(field)?;
|
||||
|
||||
let bytes = self.fast_field_data(field, 1)?.read_bytes()?;
|
||||
let vals_reader = open_u128::<Ipv6Addr>(bytes)?;
|
||||
|
||||
Ok(MultiValuedU128FastFieldReader::open(
|
||||
idx_reader,
|
||||
vals_reader,
|
||||
))
|
||||
Ok(MultiValuedFastFieldReader::open(idx_reader, vals_reader))
|
||||
}
|
||||
|
||||
/// Returns the `u128` fast field reader reader associated to `field`.
|
||||
@@ -189,17 +181,14 @@ impl FastFieldReaders {
|
||||
/// Returns the `u128` multi-valued fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u128 multi-valued fast field, this method returns an Error.
|
||||
pub fn u128s(&self, field: Field) -> crate::Result<MultiValuedU128FastFieldReader<u128>> {
|
||||
pub fn u128s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u128>> {
|
||||
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
|
||||
let idx_reader: Arc<dyn Column<u64>> = self.typed_fast_field_reader(field)?;
|
||||
|
||||
let bytes = self.fast_field_data(field, 1)?.read_bytes()?;
|
||||
let vals_reader = open_u128::<u128>(bytes)?;
|
||||
|
||||
Ok(MultiValuedU128FastFieldReader::open(
|
||||
idx_reader,
|
||||
vals_reader,
|
||||
))
|
||||
Ok(MultiValuedFastFieldReader::open(idx_reader, vals_reader))
|
||||
}
|
||||
|
||||
/// Returns the `u64` fast field reader reader associated with `field`, regardless of whether
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::fmt;
|
||||
use std::io::{self, Write};
|
||||
|
||||
pub use fastfield_codecs::Column;
|
||||
@@ -49,7 +50,7 @@ impl CompositeFastFieldSerializer {
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
|
||||
/// automatically.
|
||||
pub fn create_auto_detect_u64_fast_field<T: MonotonicallyMappableToU64>(
|
||||
pub fn create_auto_detect_u64_fast_field<T: MonotonicallyMappableToU64 + fmt::Debug>(
|
||||
&mut self,
|
||||
field: Field,
|
||||
fastfield_accessor: impl Column<T>,
|
||||
@@ -59,7 +60,9 @@ impl CompositeFastFieldSerializer {
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
|
||||
/// automatically.
|
||||
pub fn create_auto_detect_u64_fast_field_with_idx<T: MonotonicallyMappableToU64>(
|
||||
pub fn create_auto_detect_u64_fast_field_with_idx<
|
||||
T: MonotonicallyMappableToU64 + fmt::Debug,
|
||||
>(
|
||||
&mut self,
|
||||
field: Field,
|
||||
fastfield_accessor: impl Column<T>,
|
||||
@@ -72,7 +75,9 @@ impl CompositeFastFieldSerializer {
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec of the the provided
|
||||
/// will be chosen.
|
||||
pub fn create_auto_detect_u64_fast_field_with_idx_and_codecs<T: MonotonicallyMappableToU64>(
|
||||
pub fn create_auto_detect_u64_fast_field_with_idx_and_codecs<
|
||||
T: MonotonicallyMappableToU64 + fmt::Debug,
|
||||
>(
|
||||
&mut self,
|
||||
field: Field,
|
||||
fastfield_accessor: impl Column<T>,
|
||||
|
||||
@@ -360,20 +360,10 @@ impl U128FastFieldWriter {
|
||||
.map(|idx| self.vals[idx as usize])
|
||||
};
|
||||
|
||||
serializer.create_u128_fast_field_with_idx(
|
||||
self.field,
|
||||
iter_gen,
|
||||
self.val_count as u32,
|
||||
0,
|
||||
)?;
|
||||
serializer.create_u128_fast_field_with_idx(self.field, iter_gen, self.val_count, 0)?;
|
||||
} else {
|
||||
let iter_gen = || self.vals.iter().cloned();
|
||||
serializer.create_u128_fast_field_with_idx(
|
||||
self.field,
|
||||
iter_gen,
|
||||
self.val_count as u32,
|
||||
0,
|
||||
)?;
|
||||
serializer.create_u128_fast_field_with_idx(self.field, iter_gen, self.val_count, 0)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -252,8 +252,8 @@ mod tests {
|
||||
&demux_mapping,
|
||||
target_settings,
|
||||
vec![
|
||||
Box::new(RamDirectory::default()),
|
||||
Box::new(RamDirectory::default()),
|
||||
Box::<RamDirectory>::default(),
|
||||
Box::<RamDirectory>::default(),
|
||||
],
|
||||
)?;
|
||||
|
||||
|
||||
@@ -152,7 +152,7 @@ pub(crate) fn advance_deletes(
|
||||
let num_deleted_docs = max_doc - num_alive_docs;
|
||||
if num_deleted_docs > num_deleted_docs_before {
|
||||
// There are new deletes. We need to write a new delete file.
|
||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||
segment = segment.with_delete_meta(num_deleted_docs, target_opstamp);
|
||||
let mut alive_doc_file = segment.open_write(SegmentComponent::Delete)?;
|
||||
write_alive_bitset(&alive_bitset, &mut alive_doc_file)?;
|
||||
alive_doc_file.terminate()?;
|
||||
@@ -678,7 +678,7 @@ impl IndexWriter {
|
||||
/// only after calling `commit()`.
|
||||
#[doc(hidden)]
|
||||
pub fn delete_query(&self, query: Box<dyn Query>) -> crate::Result<Opstamp> {
|
||||
let weight = query.weight(EnableScoring::Disabled(&self.index.schema()))?;
|
||||
let weight = query.weight(EnableScoring::disabled_from_schema(&self.index.schema()))?;
|
||||
let opstamp = self.stamper.stamp();
|
||||
let delete_operation = DeleteOperation {
|
||||
opstamp,
|
||||
@@ -759,7 +759,8 @@ impl IndexWriter {
|
||||
match user_op {
|
||||
UserOperation::Delete(term) => {
|
||||
let query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||
let weight = query.weight(EnableScoring::Disabled(&self.index.schema()))?;
|
||||
let weight =
|
||||
query.weight(EnableScoring::disabled_from_schema(&self.index.schema()))?;
|
||||
let delete_operation = DeleteOperation {
|
||||
opstamp,
|
||||
target: weight,
|
||||
@@ -984,7 +985,7 @@ mod tests {
|
||||
"LogMergePolicy { min_num_segments: 8, max_docs_before_merge: 10000000, \
|
||||
min_layer_size: 10000, level_log_size: 0.75, del_docs_ratio_before_merge: 1.0 }"
|
||||
);
|
||||
let merge_policy = Box::new(NoMergePolicy::default());
|
||||
let merge_policy = Box::<NoMergePolicy>::default();
|
||||
index_writer.set_merge_policy(merge_policy);
|
||||
assert_eq!(
|
||||
format!("{:?}", index_writer.get_merge_policy()),
|
||||
@@ -1813,8 +1814,8 @@ mod tests {
|
||||
}
|
||||
|
||||
let num_docs_expected = expected_ids_and_num_occurrences
|
||||
.iter()
|
||||
.map(|(_, id_occurrences)| *id_occurrences as usize)
|
||||
.values()
|
||||
.map(|id_occurrences| *id_occurrences as usize)
|
||||
.sum::<usize>();
|
||||
assert_eq!(searcher.num_docs() as usize, num_docs_expected);
|
||||
assert_eq!(old_searcher.num_docs() as usize, num_docs_expected);
|
||||
|
||||
@@ -67,11 +67,12 @@ pub(crate) fn index_json_values<'a>(
|
||||
doc: DocId,
|
||||
json_values: impl Iterator<Item = crate::Result<&'a serde_json::Map<String, serde_json::Value>>>,
|
||||
text_analyzer: &TextAnalyzer,
|
||||
expand_dots_enabled: bool,
|
||||
term_buffer: &mut Term,
|
||||
postings_writer: &mut dyn PostingsWriter,
|
||||
ctx: &mut IndexingContext,
|
||||
) -> crate::Result<()> {
|
||||
let mut json_term_writer = JsonTermWriter::wrap(term_buffer);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(term_buffer, expand_dots_enabled);
|
||||
let mut positions_per_path: IndexingPositionsPerPath = Default::default();
|
||||
for json_value_res in json_values {
|
||||
let json_value = json_value_res?;
|
||||
@@ -88,11 +89,11 @@ pub(crate) fn index_json_values<'a>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn index_json_object<'a>(
|
||||
fn index_json_object(
|
||||
doc: DocId,
|
||||
json_value: &serde_json::Map<String, serde_json::Value>,
|
||||
text_analyzer: &TextAnalyzer,
|
||||
json_term_writer: &mut JsonTermWriter<'a>,
|
||||
json_term_writer: &mut JsonTermWriter,
|
||||
postings_writer: &mut dyn PostingsWriter,
|
||||
ctx: &mut IndexingContext,
|
||||
positions_per_path: &mut IndexingPositionsPerPath,
|
||||
@@ -112,11 +113,11 @@ fn index_json_object<'a>(
|
||||
}
|
||||
}
|
||||
|
||||
fn index_json_value<'a>(
|
||||
fn index_json_value(
|
||||
doc: DocId,
|
||||
json_value: &serde_json::Value,
|
||||
text_analyzer: &TextAnalyzer,
|
||||
json_term_writer: &mut JsonTermWriter<'a>,
|
||||
json_term_writer: &mut JsonTermWriter,
|
||||
postings_writer: &mut dyn PostingsWriter,
|
||||
ctx: &mut IndexingContext,
|
||||
positions_per_path: &mut IndexingPositionsPerPath,
|
||||
@@ -259,6 +260,7 @@ pub(crate) fn set_string_and_get_terms(
|
||||
pub struct JsonTermWriter<'a> {
|
||||
term_buffer: &'a mut Term,
|
||||
path_stack: Vec<usize>,
|
||||
expand_dots_enabled: bool,
|
||||
}
|
||||
|
||||
/// Splits a json path supplied to the query parser in such a way that
|
||||
@@ -298,23 +300,25 @@ impl<'a> JsonTermWriter<'a> {
|
||||
pub fn from_field_and_json_path(
|
||||
field: Field,
|
||||
json_path: &str,
|
||||
expand_dots_enabled: bool,
|
||||
term_buffer: &'a mut Term,
|
||||
) -> Self {
|
||||
term_buffer.set_field_and_type(field, Type::Json);
|
||||
let mut json_term_writer = Self::wrap(term_buffer);
|
||||
let mut json_term_writer = Self::wrap(term_buffer, expand_dots_enabled);
|
||||
for segment in split_json_path(json_path) {
|
||||
json_term_writer.push_path_segment(&segment);
|
||||
}
|
||||
json_term_writer
|
||||
}
|
||||
|
||||
pub fn wrap(term_buffer: &'a mut Term) -> Self {
|
||||
pub fn wrap(term_buffer: &'a mut Term, expand_dots_enabled: bool) -> Self {
|
||||
term_buffer.clear_with_type(Type::Json);
|
||||
let mut path_stack = Vec::with_capacity(10);
|
||||
path_stack.push(0);
|
||||
Self {
|
||||
term_buffer,
|
||||
path_stack,
|
||||
expand_dots_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,11 +340,24 @@ impl<'a> JsonTermWriter<'a> {
|
||||
self.trim_to_end_of_path();
|
||||
let buffer = self.term_buffer.value_bytes_mut();
|
||||
let buffer_len = buffer.len();
|
||||
|
||||
if self.path_stack.len() > 1 {
|
||||
buffer[buffer_len - 1] = JSON_PATH_SEGMENT_SEP;
|
||||
}
|
||||
self.term_buffer.append_bytes(segment.as_bytes());
|
||||
self.term_buffer.append_bytes(&[JSON_PATH_SEGMENT_SEP]);
|
||||
if self.expand_dots_enabled && segment.as_bytes().contains(&b'.') {
|
||||
// We need to replace `.` by JSON_PATH_SEGMENT_SEP.
|
||||
self.term_buffer
|
||||
.append_bytes(segment.as_bytes())
|
||||
.iter_mut()
|
||||
.for_each(|byte| {
|
||||
if *byte == b'.' {
|
||||
*byte = JSON_PATH_SEGMENT_SEP;
|
||||
}
|
||||
});
|
||||
} else {
|
||||
self.term_buffer.append_bytes(segment.as_bytes());
|
||||
}
|
||||
self.term_buffer.push_byte(JSON_PATH_SEGMENT_SEP);
|
||||
self.path_stack.push(self.term_buffer.len_bytes());
|
||||
}
|
||||
|
||||
@@ -391,7 +408,7 @@ mod tests {
|
||||
fn test_json_writer() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("attributes");
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_str("red");
|
||||
@@ -425,7 +442,7 @@ mod tests {
|
||||
fn test_string_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_str("red");
|
||||
assert_eq!(
|
||||
@@ -438,7 +455,7 @@ mod tests {
|
||||
fn test_i64_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_fast_value(-4i64);
|
||||
assert_eq!(
|
||||
@@ -451,7 +468,7 @@ mod tests {
|
||||
fn test_u64_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_fast_value(4u64);
|
||||
assert_eq!(
|
||||
@@ -464,7 +481,7 @@ mod tests {
|
||||
fn test_f64_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_fast_value(4.0f64);
|
||||
assert_eq!(
|
||||
@@ -477,7 +494,7 @@ mod tests {
|
||||
fn test_bool_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_fast_value(true);
|
||||
assert_eq!(
|
||||
@@ -490,7 +507,7 @@ mod tests {
|
||||
fn test_push_after_set_path_segment() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("attribute");
|
||||
json_writer.set_str("something");
|
||||
json_writer.push_path_segment("color");
|
||||
@@ -505,7 +522,7 @@ mod tests {
|
||||
fn test_pop_segment() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.push_path_segment("hue");
|
||||
json_writer.pop_path_segment();
|
||||
@@ -520,7 +537,7 @@ mod tests {
|
||||
fn test_json_writer_path() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color");
|
||||
assert_eq!(json_writer.path(), b"color");
|
||||
json_writer.push_path_segment("hue");
|
||||
@@ -529,6 +546,37 @@ mod tests {
|
||||
assert_eq!(json_writer.path(), b"color\x01hue");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_path_expand_dots_disabled() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_writer.push_path_segment("color.hue");
|
||||
assert_eq!(json_writer.path(), b"color.hue");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_path_expand_dots_enabled() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, true);
|
||||
json_writer.push_path_segment("color.hue");
|
||||
assert_eq!(json_writer.path(), b"color\x01hue");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_path_expand_dots_enabled_pop_segment() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term, true);
|
||||
json_writer.push_path_segment("hello");
|
||||
assert_eq!(json_writer.path(), b"hello");
|
||||
json_writer.push_path_segment("color.hue");
|
||||
assert_eq!(json_writer.path(), b"hello\x01color\x01hue");
|
||||
json_writer.pop_path_segment();
|
||||
assert_eq!(json_writer.path(), b"hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_json_path_simple() {
|
||||
let json_path = split_json_path("titi.toto");
|
||||
|
||||
@@ -13,7 +13,7 @@ use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{
|
||||
get_fastfield_codecs_for_multivalue, AliveBitSet, Column, CompositeFastFieldSerializer,
|
||||
MultiValueIndex, MultiValuedFastFieldReader, MultiValuedU128FastFieldReader,
|
||||
MultiValueIndex, MultiValuedFastFieldReader,
|
||||
};
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
||||
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
|
||||
@@ -331,18 +331,18 @@ impl IndexMerger {
|
||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
) -> crate::Result<()> {
|
||||
let segment_and_ff_readers: Vec<(&SegmentReader, MultiValuedU128FastFieldReader<u128>)> =
|
||||
self.readers
|
||||
.iter()
|
||||
.map(|segment_reader| {
|
||||
let ff_reader: MultiValuedU128FastFieldReader<u128> =
|
||||
segment_reader.fast_fields().u128s(field).expect(
|
||||
"Failed to find index for multivalued field. This is a bug in \
|
||||
tantivy, please report.",
|
||||
);
|
||||
(segment_reader, ff_reader)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let segment_and_ff_readers: Vec<(&SegmentReader, MultiValuedFastFieldReader<u128>)> = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|segment_reader| {
|
||||
let ff_reader: MultiValuedFastFieldReader<u128> =
|
||||
segment_reader.fast_fields().u128s(field).expect(
|
||||
"Failed to find index for multivalued field. This is a bug in tantivy, \
|
||||
please report.",
|
||||
);
|
||||
(segment_reader, ff_reader)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Self::write_1_n_fast_field_idx_generic(
|
||||
field,
|
||||
@@ -366,7 +366,7 @@ impl IndexMerger {
|
||||
.map(|doc| reader.num_vals(doc))
|
||||
.sum()
|
||||
} else {
|
||||
reader.total_num_vals() as u32
|
||||
reader.total_num_vals()
|
||||
}
|
||||
})
|
||||
.sum();
|
||||
@@ -968,7 +968,7 @@ impl IndexMerger {
|
||||
let doc_bytes = doc_bytes_res?;
|
||||
store_writer.store_bytes(&doc_bytes)?;
|
||||
} else {
|
||||
return Err(DataCorruption::comment_only(&format!(
|
||||
return Err(DataCorruption::comment_only(format!(
|
||||
"unexpected missing document in docstore on merge, doc address \
|
||||
{old_doc_addr:?}",
|
||||
))
|
||||
|
||||
@@ -60,7 +60,7 @@ type AddBatchReceiver = channel::Receiver<AddBatch>;
|
||||
mod tests_mmap {
|
||||
use crate::collector::Count;
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{Schema, STORED, TEXT};
|
||||
use crate::schema::{JsonObjectOptions, Schema, TEXT};
|
||||
use crate::{Index, Term};
|
||||
|
||||
#[test]
|
||||
@@ -81,9 +81,9 @@ mod tests_mmap {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_field_espace() {
|
||||
fn test_json_field_expand_dots_disabled_dot_escaped_required() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json_field = schema_builder.add_json_field("json", TEXT | STORED);
|
||||
let json_field = schema_builder.add_json_field("json", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let json = serde_json::json!({"k8s.container.name": "prometheus", "val": "hello"});
|
||||
@@ -99,4 +99,26 @@ mod tests_mmap {
|
||||
let num_docs = searcher.search(&query, &Count).unwrap();
|
||||
assert_eq!(num_docs, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_field_expand_dots_enabled_dot_escape_not_required() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json_options: JsonObjectOptions =
|
||||
JsonObjectOptions::from(TEXT).set_expand_dots_enabled();
|
||||
let json_field = schema_builder.add_json_field("json", json_options);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let json = serde_json::json!({"k8s.container.name": "prometheus", "val": "hello"});
|
||||
index_writer.add_document(doc!(json_field=>json)).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let parse_query = QueryParser::for_index(&index, Vec::new());
|
||||
let query = parse_query
|
||||
.parse_query(r#"json.k8s.container.name:prometheus"#)
|
||||
.unwrap();
|
||||
let num_docs = searcher.search(&query, &Count).unwrap();
|
||||
assert_eq!(num_docs, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -577,7 +577,7 @@ impl SegmentUpdater {
|
||||
for merge_operation in merge_candidates {
|
||||
// If a merge cannot be started this is not a fatal error.
|
||||
// We do log a warning in `start_merge`.
|
||||
let _ = self.start_merge(merge_operation);
|
||||
drop(self.start_merge(merge_operation));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -866,7 +866,7 @@ mod tests {
|
||||
}
|
||||
|
||||
assert_eq!(indices.len(), 3);
|
||||
let output_directory: Box<dyn Directory> = Box::new(RamDirectory::default());
|
||||
let output_directory: Box<dyn Directory> = Box::<RamDirectory>::default();
|
||||
let index = merge_indices(&indices, output_directory)?;
|
||||
assert_eq!(index.schema(), schema);
|
||||
|
||||
|
||||
@@ -180,7 +180,7 @@ impl SegmentWriter {
|
||||
self.per_field_postings_writers.get_for_field_mut(field);
|
||||
term_buffer.clear_with_field_and_type(field_entry.field_type().value_type(), field);
|
||||
|
||||
match *field_entry.field_type() {
|
||||
match field_entry.field_type() {
|
||||
FieldType::Facet(_) => {
|
||||
for value in values {
|
||||
let facet = value.as_facet().ok_or_else(make_schema_error)?;
|
||||
@@ -307,7 +307,7 @@ impl SegmentWriter {
|
||||
self.fieldnorms_writer.record(doc_id, field, num_vals);
|
||||
}
|
||||
}
|
||||
FieldType::JsonObject(_) => {
|
||||
FieldType::JsonObject(json_options) => {
|
||||
let text_analyzer = &self.per_field_text_analyzers[field.field_id() as usize];
|
||||
let json_values_it =
|
||||
values.map(|value| value.as_json().ok_or_else(make_schema_error));
|
||||
@@ -315,6 +315,7 @@ impl SegmentWriter {
|
||||
doc_id,
|
||||
json_values_it,
|
||||
text_analyzer,
|
||||
json_options.is_expand_dots_enabled(),
|
||||
term_buffer,
|
||||
postings_writer,
|
||||
ctx,
|
||||
@@ -557,7 +558,7 @@ mod tests {
|
||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
||||
let mut term_stream = term_dict.stream().unwrap();
|
||||
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
|
||||
json_term_writer.push_path_segment("bool");
|
||||
json_term_writer.set_fast_value(true);
|
||||
@@ -648,7 +649,7 @@ mod tests {
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_term_writer.push_path_segment("mykey");
|
||||
json_term_writer.set_str("token");
|
||||
let term_info = inv_index
|
||||
@@ -692,7 +693,7 @@ mod tests {
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_term_writer.push_path_segment("mykey");
|
||||
json_term_writer.set_str("two tokens");
|
||||
let term_info = inv_index
|
||||
@@ -737,7 +738,7 @@ mod tests {
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
||||
json_term_writer.push_path_segment("mykey");
|
||||
json_term_writer.push_path_segment("field");
|
||||
json_term_writer.set_str("hello");
|
||||
|
||||
@@ -16,11 +16,11 @@ mod atomic_impl {
|
||||
|
||||
impl AtomicU64Wrapper {
|
||||
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
|
||||
AtomicU64Wrapper(AtomicU64::new(first_opstamp as u64))
|
||||
AtomicU64Wrapper(AtomicU64::new(first_opstamp))
|
||||
}
|
||||
|
||||
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
|
||||
self.0.fetch_add(val as u64, order) as u64
|
||||
self.0.fetch_add(val, order)
|
||||
}
|
||||
|
||||
pub fn revert(&self, val: u64, order: Ordering) -> u64 {
|
||||
@@ -77,7 +77,7 @@ impl Stamper {
|
||||
}
|
||||
|
||||
pub fn stamp(&self) -> Opstamp {
|
||||
self.0.fetch_add(1u64, Ordering::SeqCst) as u64
|
||||
self.0.fetch_add(1u64, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Given a desired count `n`, `stamps` returns an iterator that
|
||||
|
||||
25
src/lib.rs
25
src/lib.rs
@@ -1,17 +1,14 @@
|
||||
#![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")]
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
#![cfg_attr(
|
||||
feature = "cargo-clippy",
|
||||
allow(
|
||||
clippy::module_inception,
|
||||
clippy::needless_range_loop,
|
||||
clippy::bool_assert_comparison
|
||||
)
|
||||
)]
|
||||
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
|
||||
#![warn(missing_docs)]
|
||||
#![allow(clippy::len_without_is_empty)]
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
#![allow(
|
||||
clippy::len_without_is_empty,
|
||||
clippy::derive_partial_eq_without_eq,
|
||||
clippy::module_inception,
|
||||
clippy::needless_range_loop,
|
||||
clippy::bool_assert_comparison
|
||||
)]
|
||||
|
||||
//! # `tantivy`
|
||||
//!
|
||||
@@ -144,7 +141,7 @@ use crate::time::{OffsetDateTime, PrimitiveDateTime, UtcOffset};
|
||||
/// All constructors and conversions are provided as explicit
|
||||
/// functions and not by implementing any `From`/`Into` traits
|
||||
/// to prevent unintended usage.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct DateTime {
|
||||
// Timestamp in microseconds.
|
||||
pub(crate) timestamp_micros: i64,
|
||||
@@ -177,7 +174,7 @@ impl DateTime {
|
||||
/// The given date/time is converted to UTC and the actual
|
||||
/// time zone is discarded.
|
||||
pub const fn from_utc(dt: OffsetDateTime) -> Self {
|
||||
let timestamp_micros = dt.unix_timestamp() as i64 * 1_000_000 + dt.microsecond() as i64;
|
||||
let timestamp_micros = dt.unix_timestamp() * 1_000_000 + dt.microsecond() as i64;
|
||||
Self { timestamp_micros }
|
||||
}
|
||||
|
||||
@@ -259,10 +256,6 @@ pub use crate::future_result::FutureResult;
|
||||
/// and instead, refer to this as `crate::Result<T>`.
|
||||
pub type Result<T> = std::result::Result<T, TantivyError>;
|
||||
|
||||
/// Result for an Async io operation.
|
||||
#[cfg(feature = "quickwit")]
|
||||
pub type AsyncIoResult<T> = std::result::Result<T, crate::error::AsyncIoError>;
|
||||
|
||||
mod core;
|
||||
mod indexer;
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ impl PositionReader {
|
||||
.map(|num_bits| num_bits as usize)
|
||||
.sum();
|
||||
let num_bytes_to_skip = num_bits * COMPRESSION_BLOCK_SIZE / 8;
|
||||
self.bit_widths.advance(num_blocks as usize);
|
||||
self.bit_widths.advance(num_blocks);
|
||||
self.positions.advance(num_bytes_to_skip);
|
||||
self.anchor_offset += (num_blocks * COMPRESSION_BLOCK_SIZE) as u64;
|
||||
}
|
||||
|
||||
@@ -291,7 +291,7 @@ pub mod tests {
|
||||
const PADDING_VALUE: u32 = 234_234_345u32;
|
||||
let expected_length = 154;
|
||||
let mut encoder = BlockEncoder::new();
|
||||
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
|
||||
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).collect();
|
||||
for offset in &[0u32, 1u32, 2u32] {
|
||||
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
||||
assert!(encoded_data.len() <= expected_length);
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use crate::postings::stacker::{MemoryArena, TermHashMap};
|
||||
use stacker::{ArenaHashMap, MemoryArena};
|
||||
|
||||
/// IndexingContext contains all of the transient memory arenas
|
||||
/// required for building the inverted index.
|
||||
pub(crate) struct IndexingContext {
|
||||
/// The term index is an adhoc hashmap,
|
||||
/// itself backed by a dedicated memory arena.
|
||||
pub term_index: TermHashMap,
|
||||
pub term_index: ArenaHashMap,
|
||||
/// Arena is a memory arena that stores posting lists / term frequencies / positions.
|
||||
pub arena: MemoryArena,
|
||||
}
|
||||
@@ -13,9 +13,9 @@ pub(crate) struct IndexingContext {
|
||||
impl IndexingContext {
|
||||
/// Create a new IndexingContext given the size of the term hash map.
|
||||
pub(crate) fn new(table_size: usize) -> IndexingContext {
|
||||
let term_index = TermHashMap::new(table_size);
|
||||
let term_index = ArenaHashMap::new(table_size);
|
||||
IndexingContext {
|
||||
arena: MemoryArena::new(),
|
||||
arena: MemoryArena::default(),
|
||||
term_index,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use std::io;
|
||||
|
||||
use stacker::Addr;
|
||||
|
||||
use crate::fastfield::MultiValuedFastFieldWriter;
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
||||
use crate::postings::recorder::{BufferLender, DocIdRecorder, Recorder};
|
||||
use crate::postings::stacker::Addr;
|
||||
use crate::postings::{
|
||||
FieldSerializer, IndexingContext, IndexingPosition, PostingsWriter, UnorderedTermId,
|
||||
};
|
||||
|
||||
@@ -15,9 +15,10 @@ mod recorder;
|
||||
mod segment_postings;
|
||||
mod serializer;
|
||||
mod skip;
|
||||
mod stacker;
|
||||
mod term_info;
|
||||
|
||||
pub(crate) use stacker::compute_table_size;
|
||||
|
||||
pub use self::block_segment_postings::BlockSegmentPostings;
|
||||
pub(crate) use self::indexing_context::IndexingContext;
|
||||
pub(crate) use self::per_field_postings_writer::PerFieldPostingsWriter;
|
||||
@@ -26,10 +27,9 @@ pub(crate) use self::postings_writer::{serialize_postings, IndexingPosition, Pos
|
||||
pub use self::segment_postings::SegmentPostings;
|
||||
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
||||
pub(crate) use self::skip::{BlockInfo, SkipReader};
|
||||
pub(crate) use self::stacker::compute_table_size;
|
||||
pub use self::term_info::TermInfo;
|
||||
|
||||
pub(crate) type UnorderedTermId = u64;
|
||||
pub(crate) type UnorderedTermId = stacker::UnorderedId;
|
||||
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
|
||||
@@ -631,7 +631,7 @@ mod bench {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.unwrap()
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.read_postings(&TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
while segment_postings.advance() != TERMINATED {}
|
||||
@@ -647,25 +647,25 @@ mod bench {
|
||||
let segment_postings_a = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.unwrap()
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.read_postings(&TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let segment_postings_b = segment_reader
|
||||
.inverted_index(TERM_B.field())
|
||||
.unwrap()
|
||||
.read_postings(&*TERM_B, IndexRecordOption::Basic)
|
||||
.read_postings(&TERM_B, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let segment_postings_c = segment_reader
|
||||
.inverted_index(TERM_C.field())
|
||||
.unwrap()
|
||||
.read_postings(&*TERM_C, IndexRecordOption::Basic)
|
||||
.read_postings(&TERM_C, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let segment_postings_d = segment_reader
|
||||
.inverted_index(TERM_D.field())
|
||||
.unwrap()
|
||||
.read_postings(&*TERM_D, IndexRecordOption::Basic)
|
||||
.read_postings(&TERM_D, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let mut intersection = Intersection::new(vec![
|
||||
@@ -687,7 +687,7 @@ mod bench {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.unwrap()
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.read_postings(&TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
@@ -705,7 +705,7 @@ mod bench {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.unwrap()
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.read_postings(&TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
for doc in &existing_docs {
|
||||
@@ -746,7 +746,7 @@ mod bench {
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(TERM_A.field())
|
||||
.unwrap()
|
||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||
.read_postings(&TERM_A, IndexRecordOption::Basic)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let mut s = 0u32;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user