Compare commits

...

31 Commits

Author SHA1 Message Date
PSeitz
7ce8a65619 fix: doc store for files larger 4GB (#1856)
Fixes an issue in the skip list deserialization, which deserialized the byte start offset incorrectly as u32.
`get_doc` will fail for any docs that live in a block with start offset larger than u32::MAX (~4GB).
Causes index corruption, if a segment with a doc store larger 4GB is merged.

tantivy version 0.19 is affected
2023-03-13 15:07:55 +09:00
PSeitz
7bf0a14041 fix: auto downgrade index record option, instead of vint error (#1857)
Prev: thread 'main' panicked at 'called `Result::unwrap()` on an `Err` value: IoError(Custom { kind: InvalidData, error: "Reach end of buffer while reading VInt" })', src/main.rs:46:14
Now: Automatic downgrade to next available level
2023-03-13 15:07:28 +09:00
PSeitz
c91d4e4e65 fix sort order test for term aggregation (#1858)
fix sort order test for term aggregation
fix invalid request test
2023-03-13 13:49:10 +08:00
PSeitz
6f6f639170 fmt code, update lz4_flex (#1838)
formatting on nightly changed
2023-03-13 14:14:15 +09:00
Paul Masurel
a022e97dc2 Bumped tantivy version 2023-03-13 14:10:41 +09:00
Paul Masurel
6474a0f58e Created branch specifically for Quickwit 0.5 2023-03-11 12:27:20 +09:00
PSeitz
0f20787917 fix doc store cache docs (#1821)
* fix doc store cache docs

addresses an issue reported in #1820

* rename doc_store_cache_size
2023-01-23 07:06:49 +01:00
Paul Masurel
2874554ee4 Removed the sorting logic that forced column type to be sorted like (#1816)
* Removed the sorting logic that forced column type to be sorted like
ColumnTypes.

* add comments

Co-authored-by: PSeitz <PSeitz@users.noreply.github.com>
2023-01-20 12:43:28 +01:00
PSeitz
cbc70a9eae Cargo.toml cleanup (#1817) 2023-01-20 12:30:35 +01:00
PSeitz
226d0f88bc add columnar to workspace (#1808) 2023-01-20 11:47:10 +01:00
Paul Masurel
9548570e88 Fixing broken test build 2023-01-20 18:18:32 +09:00
Paul Masurel
9a296b29b7 Renamed dense file to dense.rs 2023-01-20 17:22:25 +09:00
PSeitz
b31fd389d8 collect columns for merge (#1812)
* collect columns for merge

* return column_type from, fix visibility

* fix

Co-authored-by: Paul Masurel <paul@quickwit.io>
2023-01-20 07:58:29 +01:00
Paul Masurel
89cec79813 Make it possible to force a column type and intricate bugfix. (#1815) 2023-01-20 14:30:56 +09:00
PSeitz
d09d91a856 fix tests (#1813) 2023-01-19 23:41:21 +09:00
PSeitz
50d8a8bc32 Update README (#1804)
Some parts are outdated

For the debugging tutorial, debugging is really easy now with VSCode, and there are plenty of other sources for debugging rust
2023-01-19 18:09:45 +09:00
Paul Masurel
08919a2900 Improvement on the scalar / random bitpacker code. (#1781)
* Improvement on the scalar / random bitpacker code.

Added proptesting
Added simple benchmark
Added assert and comments on the very non trivial hidden contract
Remove the need for an extra padding.

The last point introduces a small performance regression (~10%).

* Fixing unit tests
2023-01-19 18:09:13 +09:00
Lonre Wang
8ba333f1b4 Typo fix (#1803)
* Update text_options.rs

* Update src/schema/text_options.rs

Co-authored-by: Paul Masurel <paul@quickwit.io>
2023-01-19 17:56:05 +09:00
PSeitz
a2ca12995e update aggregation docs (#1807) 2023-01-19 09:52:47 +01:00
Paul Masurel
e3d504d833 Minor code cleanup (#1810) 2023-01-19 17:47:26 +09:00
Paul Masurel
5a42c5aae9 Add support for multivalues (#1809) 2023-01-19 16:55:01 +09:00
Paul Masurel
a86b104a40 Differentiating between str and bytes, + unit test 2023-01-19 14:38:12 +09:00
PSeitz
f9abd256b7 add ip addr to columnar (#1805) 2023-01-19 05:36:06 +01:00
Paul Masurel
9f42b6440a Completed unit test for dictionary encoded column 2023-01-19 12:15:27 +09:00
Paul Masurel
c723ed3f0b Columnar merge (#1806) 2023-01-19 11:52:27 +09:00
trinity-1686a
d72ea7d353 modify getters for sstable metadata (#1793)
* add way to get up to `limit` terms from sstable

* make some function of sstable load less data

* add some tests to sstable

* add tests on sstable dictionary

* fix some bugs with sstable
2023-01-18 14:42:55 +01:00
Paul Masurel
5180b612ef Removing the demuxer code (#1799) 2023-01-18 16:12:35 +09:00
PSeitz
f687b3a5aa start migrate Field to &str (#1772)
start migrate Field to &str in preparation of columnar
return Result for get_field
2023-01-18 16:12:07 +09:00
PSeitz
c4af63e588 add rename (#1797) 2023-01-18 13:28:37 +09:00
Adrien Guillo
4b343b3189 Merge pull request #1802 from quickwit-oss/guilload/clippy-fixes
Fix some Clippy warnings
2023-01-17 10:39:55 -05:00
Adrien Guillo
c51d9f9f83 Fix some Clippy warnings 2023-01-17 10:17:51 -05:00
94 changed files with 2358 additions and 1354 deletions

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.19.0"
version = "0.19.1-quickwit"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
@@ -23,7 +23,7 @@ regex = { version = "1.5.5", default-features = false, features = ["std", "unico
aho-corasick = "0.7"
tantivy-fst = "0.4.0"
memmap2 = { version = "0.5.3", optional = true }
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
lz4_flex = { version = "0.10", default-features = false, features = ["checked-decode"], optional = true }
brotli = { version = "3.3.4", optional = true }
zstd = { version = "0.12", optional = true, default-features = false }
snap = { version = "1.0.5", optional = true }
@@ -55,6 +55,7 @@ measure_time = "0.8.2"
async-trait = "0.1.53"
arc-swap = "1.5.0"
#columnar = { version="0.1", path="./columnar", package ="tantivy-columnar" }
sstable = { version="0.1", path="./sstable", package ="tantivy-sstable", optional = true }
stacker = { version="0.1", path="./stacker", package ="tantivy-stacker" }
tantivy-query-grammar = { version= "0.19.0", path="./query-grammar" }
@@ -107,7 +108,7 @@ unstable = [] # useful for benches.
quickwit = ["sstable"]
[workspace]
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes", "stacker", "sstable", "tokenizer-api"]
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
# Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points

View File

@@ -41,7 +41,7 @@ Your mileage WILL vary depending on the nature of queries and their load.
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene)
- `&[u8]` fast fields
- Text, i64, u64, f64, dates, and hierarchical facet fields
- Text, i64, u64, f64, dates, ip, bool, and hierarchical facet fields
- Compressed document store (LZ4, Zstd, None, Brotli, Snap)
- Range queries
- Faceted search
@@ -80,56 +80,21 @@ There are many ways to support this project.
# Contributing code
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
Feel free to update CHANGELOG.md with your contribution.
## Tokenizer
When implementing a tokenizer for tantivy depend on the `tantivy-tokenizer-api` crate.
## Minimum supported Rust version
Tantivy currently requires at least Rust 1.62 or later to compile.
## Clone and build locally
Tantivy compiles on stable Rust.
To check out and run tests, you can simply run:
```bash
git clone https://github.com/quickwit-oss/tantivy.git
cd tantivy
cargo build
```
## Run tests
Some tests will not run with just `cargo test` because of `fail-rs`.
To run the tests exhaustively, run `./run-tests.sh`.
## Debug
You might find it useful to step through the programme with a debugger.
### A failing test
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that the `target/` directory exists. Use this bash script to find the name of the most recent debug build of Tantivy and run it under `rust-gdb`:
```bash
find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY
```
Now that you are in `rust-gdb`, you can set breakpoints on lines and methods that match your source code and run the debug executable with flags that you normally pass to `cargo test` like this:
```bash
$gdb run --test-threads 1 --test $NAME_OF_TEST
```
### An example
By default, `rustc` compiles everything in the `examples/` directory in debug mode. This makes it easy for you to make examples to reproduce bugs:
```bash
rust-gdb target/debug/examples/$EXAMPLE_NAME
$ gdb run
git clone https://github.com/quickwit-oss/tantivy.git
cd tantivy
cargo test
```
# Companies Using Tantivy

View File

@@ -34,7 +34,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let index = Index::create_in_ram(schema.clone());
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
for doc_json in HDFS_LOGS.trim().split('\n') {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
@@ -46,7 +46,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
for doc_json in HDFS_LOGS.trim().split('\n') {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
@@ -59,7 +59,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let index = Index::create_in_ram(schema_with_store.clone());
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
for doc_json in HDFS_LOGS.trim().split('\n') {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
@@ -71,7 +71,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let index = Index::create_in_ram(schema_with_store.clone());
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
for doc_json in HDFS_LOGS.trim().split('\n') {
let doc = schema.parse_document(doc_json).unwrap();
index_writer.add_document(doc).unwrap();
}
@@ -85,7 +85,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let json_field = dynamic_schema.get_field("json").unwrap();
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
for doc_json in HDFS_LOGS.trim().split('\n') {
let json_val: serde_json::Map<String, serde_json::Value> =
serde_json::from_str(doc_json).unwrap();
let doc = tantivy::doc!(json_field=>json_val);
@@ -101,7 +101,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
let json_field = dynamic_schema.get_field("json").unwrap();
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
for _ in 0..NUM_REPEATS {
for doc_json in HDFS_LOGS.trim().split("\n") {
for doc_json in HDFS_LOGS.trim().split('\n') {
let json_val: serde_json::Map<String, serde_json::Value> =
serde_json::from_str(doc_json).unwrap();
let doc = tantivy::doc!(json_field=>json_val);

View File

@@ -15,3 +15,7 @@ homepage = "https://github.com/quickwit-oss/tantivy"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
[dev-dependencies]
rand = "0.8"
proptest = "1"

View File

@@ -4,9 +4,39 @@ extern crate test;
#[cfg(test)]
mod tests {
use tantivy_bitpacker::BlockedBitpacker;
use rand::seq::IteratorRandom;
use rand::thread_rng;
use tantivy_bitpacker::{BitPacker, BitUnpacker, BlockedBitpacker};
use test::Bencher;
#[inline(never)]
fn create_bitpacked_data(bit_width: u8, num_els: u32) -> Vec<u8> {
let mut bitpacker = BitPacker::new();
let mut buffer = Vec::new();
for _ in 0..num_els {
// the values do not matter.
bitpacker.write(0u64, bit_width, &mut buffer).unwrap();
bitpacker.flush(&mut buffer).unwrap();
}
buffer
}
#[bench]
fn bench_bitpacking_read(b: &mut Bencher) {
let bit_width = 3;
let num_els = 1_000_000u32;
let bit_unpacker = BitUnpacker::new(bit_width);
let data = create_bitpacked_data(bit_width, num_els);
let idxs: Vec<u32> = (0..num_els).choose_multiple(&mut thread_rng(), 100_000);
b.iter(|| {
let mut out = 0u64;
for &idx in &idxs {
out = out.wrapping_add(bit_unpacker.get(idx, &data[..]));
}
out
});
}
#[bench]
fn bench_blockedbitp_read(b: &mut Bencher) {
let mut blocked_bitpacker = BlockedBitpacker::new();
@@ -14,9 +44,9 @@ mod tests {
blocked_bitpacker.add(val * val);
}
b.iter(|| {
let mut out = 0;
let mut out = 0u64;
for val in 0..=21500 {
out = blocked_bitpacker.get(val);
out = out.wrapping_add(blocked_bitpacker.get(val));
}
out
});

View File

@@ -56,27 +56,31 @@ impl BitPacker {
pub fn close<TWrite: io::Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
self.flush(output)?;
// Padding the write file to simplify reads.
output.write_all(&[0u8; 7])?;
Ok(())
}
}
#[derive(Clone, Debug, Default)]
#[derive(Clone, Debug, Default, Copy)]
pub struct BitUnpacker {
num_bits: u64,
num_bits: u32,
mask: u64,
}
impl BitUnpacker {
/// Creates a bit unpacker, that assumes the same bitwidth for all values.
///
/// The bitunpacker works by doing an unaligned read of 8 bytes.
/// For this reason, values of `num_bits` between
/// [57..63] are forbidden.
pub fn new(num_bits: u8) -> BitUnpacker {
assert!(num_bits <= 7 * 8 || num_bits == 64);
let mask: u64 = if num_bits == 64 {
!0u64
} else {
(1u64 << num_bits) - 1u64
};
BitUnpacker {
num_bits: u64::from(num_bits),
num_bits: u32::from(num_bits),
mask,
}
}
@@ -87,28 +91,40 @@ impl BitUnpacker {
#[inline]
pub fn get(&self, idx: u32, data: &[u8]) -> u64 {
if self.num_bits == 0 {
return 0u64;
}
let addr_in_bits = idx * self.num_bits as u32;
let addr_in_bits = idx * self.num_bits;
let addr = (addr_in_bits >> 3) as usize;
if addr + 8 > data.len() {
if self.num_bits == 0 {
return 0;
}
let bit_shift = addr_in_bits & 7;
return self.get_slow_path(addr, bit_shift, data);
}
let bit_shift = addr_in_bits & 7;
debug_assert!(
addr + 8 <= data.len(),
"The fast field field should have been padded with 7 bytes."
);
let bytes: [u8; 8] = (&data[addr..addr + 8]).try_into().unwrap();
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
let val_shifted = val_unshifted_unmasked >> bit_shift;
val_shifted & self.mask
}
#[inline(never)]
fn get_slow_path(&self, addr: usize, bit_shift: u32, data: &[u8]) -> u64 {
let mut bytes: [u8; 8] = [0u8; 8];
let available_bytes = data.len() - addr;
// This function is meant to only be called if we did not have 8 bytes to load.
debug_assert!(available_bytes < 8);
bytes[..available_bytes].copy_from_slice(&data[addr..]);
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
let val_shifted = val_unshifted_unmasked >> bit_shift;
val_shifted & self.mask
}
}
#[cfg(test)]
mod test {
use super::{BitPacker, BitUnpacker};
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>, Vec<u8>) {
fn create_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>, Vec<u8>) {
let mut data = Vec::new();
let mut bitpacker = BitPacker::new();
let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
@@ -119,13 +135,13 @@ mod test {
bitpacker.write(val, num_bits, &mut data).unwrap();
}
bitpacker.close(&mut data).unwrap();
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8);
let bitunpacker = BitUnpacker::new(num_bits);
(bitunpacker, vals, data)
}
fn test_bitpacker_util(len: usize, num_bits: u8) {
let (bitunpacker, vals, data) = create_fastfield_bitpacker(len, num_bits);
let (bitunpacker, vals, data) = create_bitpacker(len, num_bits);
for (i, val) in vals.iter().enumerate() {
assert_eq!(bitunpacker.get(i as u32, &data), *val);
}
@@ -139,4 +155,49 @@ mod test {
test_bitpacker_util(6, 14);
test_bitpacker_util(1000, 14);
}
use proptest::prelude::*;
fn num_bits_strategy() -> impl Strategy<Value = u8> {
prop_oneof!(Just(0), Just(1), 2u8..56u8, Just(56), Just(64),)
}
fn vals_strategy() -> impl Strategy<Value = (u8, Vec<u64>)> {
(num_bits_strategy(), 0usize..100usize).prop_flat_map(|(num_bits, len)| {
let max_val = if num_bits == 64 {
u64::MAX
} else {
(1u64 << num_bits as u32) - 1
};
let vals = proptest::collection::vec(0..=max_val, len);
vals.prop_map(move |vals| (num_bits, vals))
})
}
fn test_bitpacker_aux(num_bits: u8, vals: &[u64]) {
let mut buffer: Vec<u8> = Vec::new();
let mut bitpacker = BitPacker::new();
for &val in vals {
bitpacker.write(val, num_bits, &mut buffer).unwrap();
}
bitpacker.flush(&mut buffer).unwrap();
assert_eq!(buffer.len(), (vals.len() * num_bits as usize + 7) / 8);
let bitunpacker = BitUnpacker::new(num_bits);
let max_val = if num_bits == 64 {
u64::MAX
} else {
(1u64 << num_bits) - 1
};
for (i, val) in vals.iter().copied().enumerate() {
assert!(val <= max_val);
assert_eq!(bitunpacker.get(i as u32, &buffer), val);
}
}
proptest::proptest! {
#[test]
fn test_bitpacker_proptest((num_bits, vals) in vals_strategy()) {
test_bitpacker_aux(num_bits, &vals);
}
}
}

View File

@@ -5,28 +5,23 @@ edition = "2021"
license = "MIT"
[dependencies]
itertools = "0.10.5"
log = "0.4.17"
fnv = "1.0.7"
fastdivide = "0.4.0"
rand = { version = "0.8.5", optional = true }
measure_time = { version = "0.8.2", optional = true }
prettytable-rs = { version = "0.10.0", optional = true }
stacker = { path = "../stacker", package="tantivy-stacker"}
serde_json = "1"
thiserror = "1"
fnv = "1"
sstable = { path = "../sstable", package = "tantivy-sstable" }
common = { path = "../common", package = "tantivy-common" }
itertools = "0.10"
log = "0.4"
tantivy-bitpacker = { version= "0.3", path = "../bitpacker/" }
prettytable-rs = {version="0.10.0", optional= true}
rand = {version="0.8.3", optional= true}
fastdivide = "0.4"
measure_time = { version="0.8.2", optional=true}
[dev-dependencies]
proptest = "1"
more-asserts = "0.3.0"
rand = "0.8.3"
# temporary
[workspace]
members = []
proptest = "1.0.0"
more-asserts = "0.3.1"
rand = "0.8.5"
[features]
unstable = []

View File

@@ -35,6 +35,7 @@ remove all doc_id occurences -> row_id
use the rank & select naming in unit tests branch.
multi-linear -> blockwise
linear codec -> simply a multiplication for the index column
rename columnar to something more explicit, like column_dictionary or columnar_table
# Other
fix enhance column-cli

View File

@@ -5,9 +5,16 @@ use std::sync::Arc;
use sstable::{Dictionary, VoidSSTable};
use crate::column::Column;
use crate::column_index::ColumnIndex;
use crate::RowId;
/// Dictionary encoded column.
///
/// The column simply gives access to a regular u64-column that, in
/// which the values are term-ordinals.
///
/// These ordinals are ids uniquely identify the bytes that are stored in
/// the column. These ordinals are small, and sorted in the same order
/// as the term_ord_column.
#[derive(Clone)]
pub struct BytesColumn {
pub(crate) dictionary: Arc<Dictionary<VoidSSTable>>,
@@ -15,26 +22,57 @@ pub struct BytesColumn {
}
impl BytesColumn {
/// Fills the given `output` buffer with the term associated to the ordinal `ord`.
///
/// Returns `false` if the term does not exist (e.g. `term_ord` is greater or equal to the
/// overll number of terms).
pub fn term_ord_to_str(&self, term_ord: u64, output: &mut Vec<u8>) -> io::Result<bool> {
self.dictionary.ord_to_term(term_ord, output)
pub fn ord_to_bytes(&self, ord: u64, output: &mut Vec<u8>) -> io::Result<bool> {
self.dictionary.ord_to_term(ord, output)
}
pub fn term_ords(&self) -> &Column<u64> {
/// Returns the number of rows in the column.
pub fn num_rows(&self) -> RowId {
self.term_ord_column.num_rows()
}
/// Returns the column of ordinals
pub fn ords(&self) -> &Column<u64> {
&self.term_ord_column
}
}
impl Deref for BytesColumn {
type Target = ColumnIndex<'static>;
#[derive(Clone)]
pub struct StrColumn(BytesColumn);
fn deref(&self) -> &Self::Target {
&**self.term_ords()
impl From<BytesColumn> for StrColumn {
fn from(bytes_col: BytesColumn) -> Self {
StrColumn(bytes_col)
}
}
#[cfg(test)]
mod tests {
use crate::{ColumnarReader, ColumnarWriter};
impl StrColumn {
/// Fills the buffer
pub fn ord_to_str(&self, term_ord: u64, output: &mut String) -> io::Result<bool> {
unsafe {
let buf = output.as_mut_vec();
self.0.dictionary.ord_to_term(term_ord, buf)?;
// TODO consider remove checks if it hurts performance.
if std::str::from_utf8(buf.as_slice()).is_err() {
buf.clear();
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Not valid utf-8",
));
}
}
Ok(true)
}
}
impl Deref for StrColumn {
type Target = BytesColumn;
fn deref(&self) -> &Self::Target {
&self.0
}
}

View File

@@ -5,8 +5,11 @@ use std::ops::Deref;
use std::sync::Arc;
use common::BinarySerializable;
pub use dictionary_encoded::BytesColumn;
pub use serialize::{open_column_bytes, open_column_u64, serialize_column_u64};
pub use dictionary_encoded::{BytesColumn, StrColumn};
pub use serialize::{
open_column_bytes, open_column_u128, open_column_u64, serialize_column_mappable_to_u128,
serialize_column_mappable_to_u64,
};
use crate::column_index::ColumnIndex;
use crate::column_values::ColumnValues;
@@ -18,21 +21,43 @@ pub struct Column<T> {
pub values: Arc<dyn ColumnValues<T>>,
}
use crate::column_index::Set;
impl<T: PartialOrd> Column<T> {
pub fn first(&self, row_id: RowId) -> Option<T> {
pub fn num_rows(&self) -> RowId {
match &self.idx {
ColumnIndex::Full => Some(self.values.get_val(row_id)),
ColumnIndex::Optional(opt_idx) => {
let value_row_idx = opt_idx.rank_if_exists(row_id)?;
Some(self.values.get_val(value_row_idx))
}
ColumnIndex::Multivalued(_multivalued_index) => {
todo!();
ColumnIndex::Full => self.values.num_vals() as u32,
ColumnIndex::Optional(optional_index) => optional_index.num_rows(),
ColumnIndex::Multivalued(col_index) => {
// The multivalued index contains all value start row_id,
// and one extra value at the end with the overall number of rows.
col_index.num_vals() - 1
}
}
}
pub fn min_value(&self) -> T {
self.values.min_value()
}
pub fn max_value(&self) -> T {
self.values.max_value()
}
}
impl<T: PartialOrd + Copy + Send + Sync + 'static> Column<T> {
pub fn first(&self, row_id: RowId) -> Option<T> {
self.values(row_id).next()
}
pub fn values(&self, row_id: RowId) -> impl Iterator<Item = T> + '_ {
self.value_row_ids(row_id)
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
}
pub fn first_or_default_col(self, default_value: T) -> Arc<dyn ColumnValues<T>> {
Arc::new(FirstValueWithDefault {
column: self,
default_value,
})
}
}
impl<T> Deref for Column<T> {
@@ -54,3 +79,31 @@ impl BinarySerializable for Cardinality {
Ok(cardinality)
}
}
// TODO simplify or optimize
struct FirstValueWithDefault<T: Copy> {
column: Column<T>,
default_value: T,
}
impl<T: PartialOrd + Send + Sync + Copy + 'static> ColumnValues<T> for FirstValueWithDefault<T> {
fn get_val(&self, idx: u32) -> T {
self.column.first(idx).unwrap_or(self.default_value)
}
fn min_value(&self) -> T {
self.column.values.min_value()
}
fn max_value(&self) -> T {
self.column.values.max_value()
}
fn num_vals(&self) -> u32 {
match &self.column.idx {
ColumnIndex::Full => self.column.values.num_vals(),
ColumnIndex::Optional(optional_idx) => optional_idx.num_rows(),
ColumnIndex::Multivalued(_) => todo!(),
}
}
}

View File

@@ -2,24 +2,51 @@ use std::io;
use std::io::Write;
use std::sync::Arc;
use common::{CountingWriter, OwnedBytes};
use common::OwnedBytes;
use sstable::Dictionary;
use crate::column::{BytesColumn, Column};
use crate::column_index::{serialize_column_index, SerializableColumnIndex};
use crate::column_values::serialize::serialize_column_values_u128;
use crate::column_values::{
serialize_column_values, ColumnValues, MonotonicallyMappableToU64, ALL_CODEC_TYPES,
serialize_column_values, ColumnValues, FastFieldCodecType, MonotonicallyMappableToU128,
MonotonicallyMappableToU64,
};
pub fn serialize_column_u64<T: MonotonicallyMappableToU64>(
pub fn serialize_column_mappable_to_u128<
F: Fn() -> I,
I: Iterator<Item = T>,
T: MonotonicallyMappableToU128,
>(
column_index: SerializableColumnIndex<'_>,
column_values: F,
num_vals: u32,
output: &mut impl Write,
) -> io::Result<()> {
let column_index_num_bytes = serialize_column_index(column_index, output)?;
serialize_column_values_u128(
|| column_values().map(|val| val.to_u128()),
num_vals,
output,
)?;
output.write_all(&column_index_num_bytes.to_le_bytes())?;
Ok(())
}
pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64>(
column_index: SerializableColumnIndex<'_>,
column_values: &impl ColumnValues<T>,
output: &mut impl Write,
) -> io::Result<()> {
let mut counting_writer = CountingWriter::wrap(output);
serialize_column_index(column_index, &mut counting_writer)?;
let column_index_num_bytes = counting_writer.written_bytes() as u32;
let output = counting_writer.finish();
serialize_column_values(column_values, &ALL_CODEC_TYPES[..], output)?;
let column_index_num_bytes = serialize_column_index(column_index, output)?;
serialize_column_values(
column_values,
&[
FastFieldCodecType::Bitpacked,
FastFieldCodecType::BlockwiseLinear,
],
output,
)?;
output.write_all(&column_index_num_bytes.to_le_bytes())?;
Ok(())
}
@@ -41,14 +68,34 @@ pub fn open_column_u64<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::
})
}
pub fn open_column_bytes(data: OwnedBytes) -> io::Result<BytesColumn> {
pub fn open_column_u128<T: MonotonicallyMappableToU128>(
bytes: OwnedBytes,
) -> io::Result<Column<T>> {
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
let column_index_num_bytes = u32::from_le_bytes(
column_index_num_bytes_payload
.as_slice()
.try_into()
.unwrap(),
);
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
let column_index = crate::column_index::open_column_index(column_index_data)?;
let column_values = crate::column_values::open_u128_mapped(column_values_data)?;
Ok(Column {
idx: column_index,
values: column_values,
})
}
pub fn open_column_bytes<T: From<BytesColumn>>(data: OwnedBytes) -> io::Result<T> {
let (body, dictionary_len_bytes) = data.rsplit(4);
let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap());
let (dictionary_bytes, column_bytes) = body.split(dictionary_len as usize);
let dictionary = Arc::new(Dictionary::from_bytes(dictionary_bytes)?);
let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes)?;
Ok(BytesColumn {
let bytes_column = BytesColumn {
dictionary,
term_ord_column,
})
};
Ok(bytes_column.into())
}

View File

@@ -2,6 +2,7 @@ mod multivalued_index;
mod optional_index;
mod serialize;
use std::ops::Range;
use std::sync::Arc;
pub use optional_index::{OptionalIndex, SerializableOptionalIndex, Set};
@@ -14,8 +15,12 @@ use crate::{Cardinality, RowId};
pub enum ColumnIndex<'a> {
Full,
Optional(OptionalIndex),
// TODO remove the Arc<dyn> apart from serialization this is not
// dynamic at all.
// TODO Remove the static by fixing the codec if possible.
/// The column values enclosed contains for all row_id,
/// the value start_index.
///
/// In addition, at index num_rows, an extra value is added
/// containing the overal number of values.
Multivalued(Arc<dyn ColumnValues<RowId> + 'a>),
}
@@ -28,13 +33,22 @@ impl<'a> ColumnIndex<'a> {
}
}
pub fn num_rows(&self) -> RowId {
pub fn value_row_ids(&self, row_id: RowId) -> Range<RowId> {
match self {
ColumnIndex::Full => {
todo!()
ColumnIndex::Full => row_id..row_id + 1,
ColumnIndex::Optional(optional_index) => {
if let Some(val) = optional_index.rank_if_exists(row_id) {
val..val + 1
} else {
0..0
}
}
ColumnIndex::Multivalued(multivalued_index) => {
let multivalued_index_ref = &**multivalued_index;
let start: u32 = multivalued_index_ref.get_val(row_id);
let end: u32 = multivalued_index_ref.get_val(row_id + 1);
start..end
}
ColumnIndex::Optional(optional_index) => optional_index.num_rows(),
ColumnIndex::Multivalued(multivalued_index) => multivalued_index.num_vals() - 1,
}
}
}

View File

@@ -11,11 +11,11 @@ use crate::RowId;
pub struct MultivaluedIndex(Arc<dyn ColumnValues<RowId>>);
pub fn serialize_multivalued_index(
multivalued_index: MultivaluedIndex,
multivalued_index: &dyn ColumnValues<RowId>,
output: &mut impl Write,
) -> io::Result<()> {
crate::column_values::serialize_column_values(
&*multivalued_index.0,
&*multivalued_index,
&[FastFieldCodecType::Bitpacked, FastFieldCodecType::Linear],
output,
)?;
@@ -23,5 +23,7 @@ pub fn serialize_multivalued_index(
}
pub fn open_multivalued_index(bytes: OwnedBytes) -> io::Result<Arc<dyn ColumnValues<RowId>>> {
todo!();
let start_index_column: Arc<dyn ColumnValues<RowId>> =
crate::column_values::open_u64_mapped(bytes)?;
Ok(start_index_column)
}

View File

@@ -1,7 +1,7 @@
mod set_block;
mod dense;
mod sparse;
pub use set_block::{DenseBlock, DenseBlockCodec, DENSE_BLOCK_NUM_BYTES};
pub use dense::{DenseBlock, DenseBlockCodec, DENSE_BLOCK_NUM_BYTES};
pub use sparse::{SparseBlock, SparseBlockCodec};
#[cfg(test)]

View File

@@ -1,7 +1,8 @@
use std::collections::HashMap;
use crate::column_index::optional_index::set_block::set_block::DENSE_BLOCK_NUM_BYTES;
use crate::column_index::optional_index::set_block::{DenseBlockCodec, SparseBlockCodec};
use crate::column_index::optional_index::set_block::{
DenseBlockCodec, SparseBlockCodec, DENSE_BLOCK_NUM_BYTES,
};
use crate::column_index::optional_index::{Set, SetCodec};
fn test_set_helper<C: SetCodec<Item = u16>>(vals: &[u16]) -> usize {
@@ -51,6 +52,7 @@ fn test_sparse_block_set_u16_max() {
use proptest::prelude::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(1))]
#[test]
fn test_prop_test_dense(els in proptest::collection::btree_set(0..=u16::MAX, 0..=u16::MAX as usize)) {
let vals: Vec<u16> = els.into_iter().collect();

View File

@@ -1,19 +1,20 @@
use std::io;
use std::io::Write;
use common::OwnedBytes;
use common::{CountingWriter, OwnedBytes};
use crate::column_index::multivalued_index::{serialize_multivalued_index, MultivaluedIndex};
use crate::column_index::multivalued_index::serialize_multivalued_index;
use crate::column_index::optional_index::serialize_optional_index;
use crate::column_index::{ColumnIndex, SerializableOptionalIndex};
use crate::Cardinality;
use crate::column_values::ColumnValues;
use crate::{Cardinality, RowId};
pub enum SerializableColumnIndex<'a> {
Full,
Optional(Box<dyn SerializableOptionalIndex<'a> + 'a>),
// TODO remove the Arc<dyn> apart from serialization this is not
// dynamic at all.
Multivalued(MultivaluedIndex),
Multivalued(Box<dyn ColumnValues<RowId> + 'a>),
}
impl<'a> SerializableColumnIndex<'a> {
@@ -29,19 +30,21 @@ impl<'a> SerializableColumnIndex<'a> {
pub fn serialize_column_index(
column_index: SerializableColumnIndex,
output: &mut impl Write,
) -> io::Result<()> {
) -> io::Result<u32> {
let mut output = CountingWriter::wrap(output);
let cardinality = column_index.get_cardinality().to_code();
output.write_all(&[cardinality])?;
match column_index {
SerializableColumnIndex::Full => {}
SerializableColumnIndex::Optional(optional_index) => {
serialize_optional_index(&*optional_index, output)?
serialize_optional_index(&*optional_index, &mut output)?
}
SerializableColumnIndex::Multivalued(multivalued_index) => {
serialize_multivalued_index(multivalued_index, output)?
serialize_multivalued_index(&*multivalued_index, &mut output)?
}
}
Ok(())
let column_index_num_bytes = output.written_bytes() as u32;
Ok(column_index_num_bytes)
}
pub fn open_column_index(mut bytes: OwnedBytes) -> io::Result<ColumnIndex<'static>> {

View File

@@ -78,6 +78,32 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
}
}
impl<T: Copy + PartialOrd> ColumnValues<T> for std::sync::Arc<dyn ColumnValues<T>> {
fn get_val(&self, idx: u32) -> T {
self.as_ref().get_val(idx)
}
fn min_value(&self) -> T {
self.as_ref().min_value()
}
fn max_value(&self) -> T {
self.as_ref().max_value()
}
fn num_vals(&self) -> u32 {
self.as_ref().num_vals()
}
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
self.as_ref().iter()
}
fn get_range(&self, start: u64, output: &mut [T]) {
self.as_ref().get_range(start, output)
}
}
impl<'a, C: ColumnValues<T> + ?Sized, T: Copy + PartialOrd> ColumnValues<T> for &'a C {
fn get_val(&self, idx: u32) -> T {
(*self).get_val(idx)

View File

@@ -28,7 +28,7 @@ mod compact_space;
mod line;
mod linear;
pub(crate) mod monotonic_mapping;
// mod monotonic_mapping_u128;
pub(crate) mod monotonic_mapping_u128;
mod column;
mod column_with_cardinality;
@@ -37,8 +37,10 @@ pub mod serialize;
pub use self::column::{monotonic_map_column, ColumnValues, IterColumn, VecColumn};
pub use self::monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
// pub use self::monotonic_mapping_u128::MonotonicallyMappableToU128;
pub use self::serialize::{serialize_and_load, serialize_column_values, NormalizedHeader};
pub use self::monotonic_mapping_u128::MonotonicallyMappableToU128;
#[cfg(test)]
pub use self::serialize::tests::serialize_and_load;
pub use self::serialize::{serialize_column_values, NormalizedHeader};
use crate::column_values::bitpacked::BitpackedCodec;
use crate::column_values::blockwise_linear::BlockwiseLinearCodec;
use crate::column_values::linear::LinearCodec;
@@ -122,19 +124,17 @@ impl U128FastFieldCodecType {
}
/// Returns the correct codec reader wrapped in the `Arc` for the data.
// pub fn open_u128<Item: MonotonicallyMappableToU128>(
// bytes: OwnedBytes,
// ) -> io::Result<Arc<dyn Column<Item>>> {
// todo!();
// // let (bytes, _format_version) = read_format_version(bytes)?;
// // let (mut bytes, _null_index_footer) = read_null_index_footer(bytes)?;
// // let header = U128Header::deserialize(&mut bytes)?;
// // assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
// // let reader = CompactSpaceDecompressor::open(bytes)?;
// // let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<Item>> =
// // StrictlyMonotonicMappingToInternal::<Item>::new().into();
// // Ok(Arc::new(monotonic_map_column(reader, inverted)))
// }
pub fn open_u128_mapped<T: MonotonicallyMappableToU128>(
mut bytes: OwnedBytes,
) -> io::Result<Arc<dyn ColumnValues<T>>> {
let header = U128Header::deserialize(&mut bytes)?;
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
let reader = CompactSpaceDecompressor::open(bytes)?;
let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<T>> =
StrictlyMonotonicMappingToInternal::<T>::new().into();
Ok(Arc::new(monotonic_map_column(reader, inverted)))
}
/// Returns the correct codec reader wrapped in the `Arc` for the data.
pub fn open_u64_mapped<T: MonotonicallyMappableToU64>(
@@ -198,13 +198,6 @@ pub(crate) trait FastFieldCodec: 'static {
fn estimate(column: &dyn ColumnValues) -> Option<f32>;
}
/// The list of all available codecs for u64 convertible data.
pub const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
FastFieldCodecType::Bitpacked,
FastFieldCodecType::BlockwiseLinear,
FastFieldCodecType::Linear,
];
#[cfg(all(test, feature = "unstable"))]
mod bench {
use std::sync::Arc;

View File

@@ -2,6 +2,7 @@ use std::marker::PhantomData;
use fastdivide::DividerU64;
use super::MonotonicallyMappableToU128;
use crate::RowId;
/// Monotonic maps a value to u64 value space.
@@ -80,21 +81,20 @@ impl<T> StrictlyMonotonicMappingToInternal<T> {
}
}
// TODO
// impl<External: MonotonicallyMappableToU128, T: MonotonicallyMappableToU128>
// StrictlyMonotonicFn<External, u128> for StrictlyMonotonicMappingToInternal<T>
// where T: MonotonicallyMappableToU128
// {
// #[inline(always)]
// fn mapping(&self, inp: External) -> u128 {
// External::to_u128(inp)
// }
impl<External: MonotonicallyMappableToU128, T: MonotonicallyMappableToU128>
StrictlyMonotonicFn<External, u128> for StrictlyMonotonicMappingToInternal<T>
where T: MonotonicallyMappableToU128
{
#[inline(always)]
fn mapping(&self, inp: External) -> u128 {
External::to_u128(inp)
}
// #[inline(always)]
// fn inverse(&self, out: u128) -> External {
// External::from_u128(out)
// }
// }
#[inline(always)]
fn inverse(&self, out: u128) -> External {
External::from_u128(out)
}
}
impl<External: MonotonicallyMappableToU64, T: MonotonicallyMappableToU64>
StrictlyMonotonicFn<External, u64> for StrictlyMonotonicMappingToInternal<T>
@@ -194,6 +194,20 @@ impl MonotonicallyMappableToU64 for i64 {
}
}
impl MonotonicallyMappableToU64 for crate::DateTime {
#[inline(always)]
fn to_u64(self) -> u64 {
common::i64_to_u64(self.timestamp_micros)
}
#[inline(always)]
fn from_u64(val: u64) -> Self {
crate::DateTime {
timestamp_micros: common::u64_to_i64(val),
}
}
}
impl MonotonicallyMappableToU64 for bool {
#[inline(always)]
fn to_u64(self) -> u64 {

View File

@@ -19,9 +19,8 @@
use std::io;
use std::num::NonZeroU64;
use std::sync::Arc;
use common::{BinarySerializable, OwnedBytes, VInt};
use common::{BinarySerializable, VInt};
use log::warn;
use super::bitpacked::BitpackedCodec;
@@ -33,8 +32,9 @@ use super::monotonic_mapping::{
};
use super::{
monotonic_map_column, ColumnValues, FastFieldCodec, FastFieldCodecType,
MonotonicallyMappableToU64, U128FastFieldCodecType, VecColumn, ALL_CODEC_TYPES,
MonotonicallyMappableToU64, U128FastFieldCodecType,
};
use crate::column_values::compact_space::CompactSpaceCompressor;
/// The normalized header gives some parameters after applying the following
/// normalization of the vector:
@@ -160,54 +160,22 @@ impl BinarySerializable for Header {
}
}
/// Return estimated compression for given codec in the value range [0.0..1.0], where 1.0 means no
/// compression.
pub(crate) fn estimate<T: MonotonicallyMappableToU64>(
typed_column: impl ColumnValues<T>,
codec_type: FastFieldCodecType,
) -> Option<f32> {
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
let min_value = column.min_value();
let gcd = super::gcd::find_gcd(column.iter().map(|val| val - min_value))
.filter(|gcd| gcd.get() > 1u64);
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(
gcd.map(|gcd| gcd.get()).unwrap_or(1u64),
min_value,
);
let normalized_column = monotonic_map_column(&column, mapping);
match codec_type {
FastFieldCodecType::Bitpacked => BitpackedCodec::estimate(&normalized_column),
FastFieldCodecType::Linear => LinearCodec::estimate(&normalized_column),
FastFieldCodecType::BlockwiseLinear => BlockwiseLinearCodec::estimate(&normalized_column),
}
}
// TODO
/// Serializes u128 values with the compact space codec.
// pub fn serialize_u128_new<F: Fn() -> I, I: Iterator<Item = u128>>(
// value_index: ColumnIndex,
// iter_gen: F,
// num_vals: u32,
// output: &mut impl io::Write,
// ) -> io::Result<()> {
// let header = U128Header {
// num_vals,
// codec_type: U128FastFieldCodecType::CompactSpace,
// };
// header.serialize(output)?;
// let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
// compressor.compress_into(iter_gen(), output).unwrap();
pub fn serialize_column_values_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
iter_gen: F,
num_vals: u32,
output: &mut impl io::Write,
) -> io::Result<()> {
let header = U128Header {
num_vals,
codec_type: U128FastFieldCodecType::CompactSpace,
};
header.serialize(output)?;
let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
compressor.compress_into(iter_gen(), output)?;
// let null_index_footer = ColumnFooter {
// cardinality: value_index.get_cardinality(),
// null_index_codec: NullIndexCodec::Full,
// null_index_byte_range: 0..0,
// };
// append_null_index_footer(output, null_index_footer)?;
// append_format_version(output)?;
// Ok(())
// }
Ok(())
}
/// Serializes the column with the codec with the best estimate on the data.
pub fn serialize_column_values<T: MonotonicallyMappableToU64>(
@@ -279,20 +247,29 @@ pub(crate) fn serialize_given_codec(
Ok(())
}
/// Helper function to serialize a column (autodetect from all codecs) and then open it
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
column: &[T],
) -> Arc<dyn ColumnValues<T>> {
let mut buffer = Vec::new();
super::serialize_column_values(&VecColumn::from(&column), &ALL_CODEC_TYPES, &mut buffer)
.unwrap();
super::open_u64_mapped(OwnedBytes::new(buffer)).unwrap()
}
#[cfg(test)]
mod tests {
use super::*;
pub mod tests {
use std::sync::Arc;
use common::OwnedBytes;
use super::*;
use crate::column_values::{open_u64_mapped, VecColumn};
const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
FastFieldCodecType::Bitpacked,
FastFieldCodecType::Linear,
FastFieldCodecType::BlockwiseLinear,
];
/// Helper function to serialize a column (autodetect from all codecs) and then open it
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
column: &[T],
) -> Arc<dyn ColumnValues<T>> {
let mut buffer = Vec::new();
serialize_column_values(&VecColumn::from(&column), &ALL_CODEC_TYPES, &mut buffer).unwrap();
open_u64_mapped(OwnedBytes::new(buffer)).unwrap()
}
#[test]
fn test_serialize_deserialize_u128_header() {
let original = U128Header {
@@ -319,7 +296,7 @@ mod tests {
serialize_column_values(&col, &ALL_CODEC_TYPES, &mut buffer).unwrap();
// TODO put the header as a footer so that it serves as a padding.
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
assert_eq!(buffer.len(), 5 + 1 + 7);
assert_eq!(buffer.len(), 5 + 1);
}
#[test]
@@ -328,7 +305,7 @@ mod tests {
let col = VecColumn::from(&[true][..]);
serialize_column_values(&col, &ALL_CODEC_TYPES, &mut buffer).unwrap();
// 5 bytes of header, 0 bytes of value, 7 bytes of padding.
assert_eq!(buffer.len(), 5 + 7);
assert_eq!(buffer.len(), 5);
}
#[test]
@@ -338,6 +315,6 @@ mod tests {
let col = VecColumn::from(&vals[..]);
serialize_column_values(&col, &[FastFieldCodecType::Bitpacked], &mut buffer).unwrap();
// Values are stored over 3 bits.
assert_eq!(buffer.len(), 7 + (3 * 80 / 8) + 7);
assert_eq!(buffer.len(), 7 + (3 * 80 / 8));
}
}

View File

@@ -1,115 +1,149 @@
use crate::utils::{place_bits, select_bits};
use std::net::Ipv6Addr;
use crate::value::NumericalType;
use crate::InvalidData;
/// The column type represents the column type and can fit on 6-bits.
///
/// - bits[0..3]: Column category type.
/// - bits[3..6]: Numerical type if necessary.
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)]
/// The column type represents the column type.
/// Any changes need to be propagated to `COLUMN_TYPES`.
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy, Ord, PartialOrd)]
#[repr(u8)]
pub enum ColumnType {
Bytes,
Numerical(NumericalType),
Bool,
I64 = 0u8,
U64 = 1u8,
F64 = 2u8,
Bytes = 3u8,
Str = 4u8,
Bool = 5u8,
IpAddr = 6u8,
DateTime = 7u8,
}
// The order needs to match _exactly_ the order in the enum
const COLUMN_TYPES: [ColumnType; 8] = [
ColumnType::I64,
ColumnType::U64,
ColumnType::F64,
ColumnType::Bytes,
ColumnType::Str,
ColumnType::Bool,
ColumnType::IpAddr,
ColumnType::DateTime,
];
impl ColumnType {
/// Encoded over 6 bits.
pub(crate) fn to_code(self) -> u8 {
let column_type_category;
let numerical_type_code: u8;
match self {
ColumnType::Bytes => {
column_type_category = ColumnTypeCategory::Str;
numerical_type_code = 0u8;
}
ColumnType::Numerical(numerical_type) => {
column_type_category = ColumnTypeCategory::Numerical;
numerical_type_code = numerical_type.to_code();
}
ColumnType::Bool => {
column_type_category = ColumnTypeCategory::Bool;
numerical_type_code = 0u8;
}
}
place_bits::<0, 3>(column_type_category.to_code()) | place_bits::<3, 6>(numerical_type_code)
}
pub(crate) fn try_from_code(code: u8) -> Result<ColumnType, InvalidData> {
if select_bits::<6, 8>(code) != 0u8 {
return Err(InvalidData);
}
let column_type_category_code = select_bits::<0, 3>(code);
let numerical_type_code = select_bits::<3, 6>(code);
let column_type_category = ColumnTypeCategory::try_from_code(column_type_category_code)?;
match column_type_category {
ColumnTypeCategory::Bool => {
if numerical_type_code != 0u8 {
return Err(InvalidData);
}
Ok(ColumnType::Bool)
}
ColumnTypeCategory::Str => {
if numerical_type_code != 0u8 {
return Err(InvalidData);
}
Ok(ColumnType::Bytes)
}
ColumnTypeCategory::Numerical => {
let numerical_type = NumericalType::try_from_code(numerical_type_code)?;
Ok(ColumnType::Numerical(numerical_type))
}
}
}
}
/// Column types are grouped into different categories that
/// corresponds to the different types of `JsonValue` types.
///
/// The columnar writer will apply coercion rules to make sure that
/// at most one column exist per `ColumnTypeCategory`.
///
/// See also [README.md].
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
#[repr(u8)]
pub(crate) enum ColumnTypeCategory {
Bool = 0u8,
Str = 1u8,
Numerical = 2u8,
}
impl ColumnTypeCategory {
pub fn to_code(self) -> u8 {
self as u8
}
pub fn try_from_code(code: u8) -> Result<Self, InvalidData> {
match code {
0u8 => Ok(Self::Bool),
1u8 => Ok(Self::Str),
2u8 => Ok(Self::Numerical),
_ => Err(InvalidData),
pub(crate) fn try_from_code(code: u8) -> Result<ColumnType, InvalidData> {
COLUMN_TYPES.get(code as usize).copied().ok_or(InvalidData)
}
}
impl From<NumericalType> for ColumnType {
fn from(numerical_type: NumericalType) -> Self {
match numerical_type {
NumericalType::I64 => ColumnType::I64,
NumericalType::U64 => ColumnType::U64,
NumericalType::F64 => ColumnType::F64,
}
}
}
impl ColumnType {
pub fn numerical_type(&self) -> Option<NumericalType> {
match self {
ColumnType::I64 => Some(NumericalType::I64),
ColumnType::U64 => Some(NumericalType::U64),
ColumnType::F64 => Some(NumericalType::F64),
ColumnType::Bytes
| ColumnType::Str
| ColumnType::Bool
| ColumnType::IpAddr
| ColumnType::DateTime => None,
}
}
}
// TODO remove if possible
pub trait HasAssociatedColumnType: 'static + Send + Sync + Copy + PartialOrd {
fn column_type() -> ColumnType;
fn default_value() -> Self;
}
impl HasAssociatedColumnType for u64 {
fn column_type() -> ColumnType {
ColumnType::U64
}
fn default_value() -> Self {
0u64
}
}
impl HasAssociatedColumnType for i64 {
fn column_type() -> ColumnType {
ColumnType::I64
}
fn default_value() -> Self {
0i64
}
}
impl HasAssociatedColumnType for f64 {
fn column_type() -> ColumnType {
ColumnType::F64
}
fn default_value() -> Self {
Default::default()
}
}
impl HasAssociatedColumnType for bool {
fn column_type() -> ColumnType {
ColumnType::Bool
}
fn default_value() -> Self {
Default::default()
}
}
impl HasAssociatedColumnType for crate::DateTime {
fn column_type() -> ColumnType {
ColumnType::DateTime
}
fn default_value() -> Self {
Default::default()
}
}
impl HasAssociatedColumnType for Ipv6Addr {
fn column_type() -> ColumnType {
ColumnType::IpAddr
}
fn default_value() -> Self {
Ipv6Addr::from([0u8; 16])
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
use crate::Cardinality;
#[test]
fn test_column_type_to_code() {
let mut column_type_set: HashSet<ColumnType> = HashSet::new();
for code in u8::MIN..=u8::MAX {
if let Ok(column_type) = ColumnType::try_from_code(code) {
assert_eq!(column_type.to_code(), code);
assert!(column_type_set.insert(column_type));
for (code, expected_column_type) in super::COLUMN_TYPES.iter().copied().enumerate() {
if let Ok(column_type) = ColumnType::try_from_code(code as u8) {
assert_eq!(column_type, expected_column_type);
}
}
assert_eq!(column_type_set.len(), 2 + 3);
for code in COLUMN_TYPES.len() as u8..=u8::MAX {
assert!(ColumnType::try_from_code(code as u8).is_err());
}
}
#[test]

View File

@@ -0,0 +1,208 @@
use std::collections::HashMap;
use std::io;
use crate::columnar::ColumnarReader;
use crate::dynamic_column::DynamicColumn;
use crate::ColumnType;
pub enum MergeDocOrder {
/// Columnar tables are simply stacked one above the other.
/// If the i-th columnar_readers has n_rows_i rows, then
/// in the resulting columnar,
/// rows [r0..n_row_0) contains the row of columnar_readers[0], in ordder
/// rows [n_row_0..n_row_0 + n_row_1 contains the row of columnar_readers[1], in order.
/// ..
Stack,
/// Some more complex mapping, that can interleaves rows from the different readers and
/// possibly drop rows.
Complex(()),
}
pub fn merge_columnar(
_columnar_readers: &[ColumnarReader],
mapping: MergeDocOrder,
_output: &mut impl io::Write,
) -> io::Result<()> {
match mapping {
MergeDocOrder::Stack => {
// implement me :)
todo!();
}
MergeDocOrder::Complex(_) => {
// for later
todo!();
}
}
}
/// Column types are grouped into different categories.
/// After merge, all columns belonging to the same category are coerced to
/// the same column type.
///
/// In practise, today, only Numerical colummns are coerced into one type today.
///
/// See also [README.md].
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
#[repr(u8)]
enum ColumnTypeCategory {
Bool,
Str,
Numerical,
DateTime,
Bytes,
IpAddr,
}
impl From<ColumnType> for ColumnTypeCategory {
fn from(column_type: ColumnType) -> Self {
match column_type {
ColumnType::I64 => ColumnTypeCategory::Numerical,
ColumnType::U64 => ColumnTypeCategory::Numerical,
ColumnType::F64 => ColumnTypeCategory::Numerical,
ColumnType::Bytes => ColumnTypeCategory::Bytes,
ColumnType::Str => ColumnTypeCategory::Str,
ColumnType::Bool => ColumnTypeCategory::Bool,
ColumnType::IpAddr => ColumnTypeCategory::IpAddr,
ColumnType::DateTime => ColumnTypeCategory::DateTime,
}
}
}
fn collect_columns(
columnar_readers: &[&ColumnarReader],
) -> io::Result<HashMap<String, HashMap<ColumnTypeCategory, Vec<DynamicColumn>>>> {
// Each column name may have multiple types of column associated.
// For merging we are interested in the same column type category since they can be merged.
let mut field_name_to_group: HashMap<String, HashMap<ColumnTypeCategory, Vec<DynamicColumn>>> =
HashMap::new();
for columnar_reader in columnar_readers {
let column_name_and_handle = columnar_reader.list_columns()?;
for (column_name, handle) in column_name_and_handle {
let column_type_to_handles = field_name_to_group
.entry(column_name.to_string())
.or_default();
let columns = column_type_to_handles
.entry(handle.column_type().into())
.or_default();
columns.push(handle.open()?);
}
}
normalize_columns(&mut field_name_to_group);
Ok(field_name_to_group)
}
/// Coerce numerical type columns to the same type
/// TODO rename to `coerce_columns`
fn normalize_columns(map: &mut HashMap<String, HashMap<ColumnTypeCategory, Vec<DynamicColumn>>>) {
for (_field_name, type_category_to_columns) in map.iter_mut() {
for (type_category, columns) in type_category_to_columns {
if type_category == &ColumnTypeCategory::Numerical {
let casted_columns = cast_to_common_numerical_column(&columns);
*columns = casted_columns;
}
}
}
}
/// Receives a list of columns of numerical types (u64, i64, f64)
///
/// Returns a list of `DynamicColumn` which are all of the same numerical type
fn cast_to_common_numerical_column(columns: &[DynamicColumn]) -> Vec<DynamicColumn> {
assert!(columns
.iter()
.all(|column| column.column_type().numerical_type().is_some()));
let coerce_to_i64: Vec<_> = columns
.iter()
.map(|column| column.clone().coerce_to_i64())
.collect();
if coerce_to_i64.iter().all(|column| column.is_some()) {
return coerce_to_i64
.into_iter()
.map(|column| column.unwrap())
.collect();
}
let coerce_to_u64: Vec<_> = columns
.iter()
.map(|column| column.clone().coerce_to_u64())
.collect();
if coerce_to_u64.iter().all(|column| column.is_some()) {
return coerce_to_u64
.into_iter()
.map(|column| column.unwrap())
.collect();
}
columns
.iter()
.map(|column| {
column
.clone()
.coerce_to_f64()
.expect("couldn't cast column to f64")
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ColumnarWriter;
#[test]
fn test_column_coercion() {
// i64 type
let columnar1 = {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(1u32, "numbers", 1i64);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(2, &mut buffer).unwrap();
ColumnarReader::open(buffer).unwrap()
};
// u64 type
let columnar2 = {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(1u32, "numbers", u64::MAX - 100);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(2, &mut buffer).unwrap();
ColumnarReader::open(buffer).unwrap()
};
// f64 type
let columnar3 = {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(1u32, "numbers", 30.5);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(2, &mut buffer).unwrap();
ColumnarReader::open(buffer).unwrap()
};
let column_map = collect_columns(&[&columnar1, &columnar2, &columnar3]).unwrap();
assert_eq!(column_map.len(), 1);
let cat_to_columns = column_map.get("numbers").unwrap();
assert_eq!(cat_to_columns.len(), 1);
let numerical = cat_to_columns.get(&ColumnTypeCategory::Numerical).unwrap();
assert!(numerical.iter().all(|column| column.is_f64()));
let column_map = collect_columns(&[&columnar1, &columnar1]).unwrap();
assert_eq!(column_map.len(), 1);
let cat_to_columns = column_map.get("numbers").unwrap();
assert_eq!(cat_to_columns.len(), 1);
let numerical = cat_to_columns.get(&ColumnTypeCategory::Numerical).unwrap();
assert!(numerical.iter().all(|column| column.is_i64()));
let column_map = collect_columns(&[&columnar2, &columnar2]).unwrap();
assert_eq!(column_map.len(), 1);
let cat_to_columns = column_map.get("numbers").unwrap();
assert_eq!(cat_to_columns.len(), 1);
let numerical = cat_to_columns.get(&ColumnTypeCategory::Numerical).unwrap();
assert!(numerical.iter().all(|column| column.is_u64()));
}
}

View File

@@ -1,28 +1,10 @@
// Copyright (C) 2022 Quickwit, Inc.
//
// Quickwit is offered under the AGPL v3.0 and as commercial software.
// For commercial licensing, contact us at hello@quickwit.io.
//
// AGPL:
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
mod column_type;
mod format_version;
mod merge;
mod reader;
mod writer;
pub use column_type::ColumnType;
pub use column_type::{ColumnType, HasAssociatedColumnType};
pub use merge::{merge_columnar, MergeDocOrder};
pub use reader::ColumnarReader;
pub use writer::ColumnarWriter;

View File

@@ -44,7 +44,7 @@ impl ColumnarReader {
})
}
// TODO fix ugly API
// TODO Add unit tests
pub fn list_columns(&self) -> io::Result<Vec<(String, DynamicColumnHandle)>> {
let mut stream = self.column_dictionary.stream()?;
let mut results = Vec::new();
@@ -55,7 +55,8 @@ impl ColumnarReader {
.map_err(|_| io_invalid_data(format!("Unknown column code `{column_code}`")))?;
let range = stream.value().clone();
let column_name =
String::from_utf8_lossy(&key_bytes[..key_bytes.len() - 1]).to_string();
// The last two bytes are respectively the 0u8 separator and the column_type.
String::from_utf8_lossy(&key_bytes[..key_bytes.len() - 2]).to_string();
let file_slice = self
.column_data
.slice(range.start as usize..range.end as usize);

View File

@@ -1,3 +1,5 @@
use std::net::Ipv6Addr;
use crate::dictionary::UnorderedId;
use crate::utils::{place_bits, pop_first_byte, select_bits};
use crate::value::NumericalValue;
@@ -25,12 +27,12 @@ struct ColumnOperationMetadata {
impl ColumnOperationMetadata {
fn to_code(self) -> u8 {
place_bits::<0, 4>(self.len) | place_bits::<4, 8>(self.op_type.to_code())
place_bits::<0, 6>(self.len) | place_bits::<6, 8>(self.op_type.to_code())
}
fn try_from_code(code: u8) -> Result<Self, InvalidData> {
let len = select_bits::<0, 4>(code);
let typ_code = select_bits::<4, 8>(code);
let len = select_bits::<0, 6>(code);
let typ_code = select_bits::<6, 8>(code);
let column_type = ColumnOperationType::try_from_code(typ_code)?;
Ok(ColumnOperationMetadata {
op_type: column_type,
@@ -142,9 +144,21 @@ impl SymbolValue for bool {
}
}
impl SymbolValue for Ipv6Addr {
fn serialize(self, buffer: &mut [u8]) -> u8 {
buffer[0..16].copy_from_slice(&self.octets());
16
}
fn deserialize(bytes: &[u8]) -> Self {
let octets: [u8; 16] = bytes[0..16].try_into().unwrap();
Ipv6Addr::from(octets)
}
}
#[derive(Default)]
struct MiniBuffer {
pub bytes: [u8; 10],
pub bytes: [u8; 17],
pub len: u8,
}

View File

@@ -102,18 +102,29 @@ pub(crate) struct NumericalColumnWriter {
column_writer: ColumnWriter,
}
impl NumericalColumnWriter {
pub fn force_numerical_type(&mut self, numerical_type: NumericalType) {
assert!(self
.compatible_numerical_types
.is_type_accepted(numerical_type));
self.compatible_numerical_types = CompatibleNumericalTypes::StaticType(numerical_type);
}
}
/// State used to store what types are still acceptable
/// after having seen a set of numerical values.
#[derive(Clone, Copy)]
struct CompatibleNumericalTypes {
all_values_within_i64_range: bool,
all_values_within_u64_range: bool,
// f64 is always acceptable.
enum CompatibleNumericalTypes {
Dynamic {
all_values_within_i64_range: bool,
all_values_within_u64_range: bool,
},
StaticType(NumericalType),
}
impl Default for CompatibleNumericalTypes {
fn default() -> CompatibleNumericalTypes {
CompatibleNumericalTypes {
CompatibleNumericalTypes::Dynamic {
all_values_within_i64_range: true,
all_values_within_u64_range: true,
}
@@ -121,39 +132,64 @@ impl Default for CompatibleNumericalTypes {
}
impl CompatibleNumericalTypes {
fn is_type_accepted(&self, numerical_type: NumericalType) -> bool {
match self {
CompatibleNumericalTypes::Dynamic {
all_values_within_i64_range,
all_values_within_u64_range,
} => match numerical_type {
NumericalType::I64 => *all_values_within_i64_range,
NumericalType::U64 => *all_values_within_u64_range,
NumericalType::F64 => true,
},
CompatibleNumericalTypes::StaticType(static_numerical_type) => {
*static_numerical_type == numerical_type
}
}
}
fn accept_value(&mut self, numerical_value: NumericalValue) {
match numerical_value {
NumericalValue::I64(val_i64) => {
let value_within_u64_range = val_i64 >= 0i64;
self.all_values_within_u64_range &= value_within_u64_range;
}
NumericalValue::U64(val_u64) => {
let value_within_i64_range = val_u64 < i64::MAX as u64;
self.all_values_within_i64_range &= value_within_i64_range;
}
NumericalValue::F64(_) => {
self.all_values_within_i64_range = false;
self.all_values_within_u64_range = false;
match self {
CompatibleNumericalTypes::Dynamic {
all_values_within_i64_range,
all_values_within_u64_range,
} => match numerical_value {
NumericalValue::I64(val_i64) => {
let value_within_u64_range = val_i64 >= 0i64;
*all_values_within_u64_range &= value_within_u64_range;
}
NumericalValue::U64(val_u64) => {
let value_within_i64_range = val_u64 < i64::MAX as u64;
*all_values_within_i64_range &= value_within_i64_range;
}
NumericalValue::F64(_) => {
*all_values_within_i64_range = false;
*all_values_within_u64_range = false;
}
},
CompatibleNumericalTypes::StaticType(typ) => {
assert_eq!(numerical_value.numerical_type(), *typ);
}
}
}
pub fn to_numerical_type(self) -> NumericalType {
if self.all_values_within_i64_range {
NumericalType::I64
} else if self.all_values_within_u64_range {
NumericalType::U64
} else {
NumericalType::F64
for numerical_type in [NumericalType::I64, NumericalType::U64] {
if self.is_type_accepted(numerical_type) {
return numerical_type;
}
}
NumericalType::F64
}
}
impl NumericalColumnWriter {
pub fn column_type_and_cardinality(&self, num_docs: RowId) -> (NumericalType, Cardinality) {
let numerical_type = self.compatible_numerical_types.to_numerical_type();
let cardinality = self.column_writer.get_cardinality(num_docs);
(numerical_type, cardinality)
pub fn numerical_type(&self) -> NumericalType {
self.compatible_numerical_types.to_numerical_type()
}
pub fn cardinality(&self, num_docs: RowId) -> Cardinality {
self.column_writer.get_cardinality(num_docs)
}
pub fn record_numerical_value(
@@ -175,15 +211,15 @@ impl NumericalColumnWriter {
}
}
#[derive(Copy, Clone, Default)]
pub(crate) struct StrColumnWriter {
#[derive(Copy, Clone)]
pub(crate) struct StrOrBytesColumnWriter {
pub(crate) dictionary_id: u32,
pub(crate) column_writer: ColumnWriter,
}
impl StrColumnWriter {
pub(crate) fn with_dictionary_id(dictionary_id: u32) -> StrColumnWriter {
StrColumnWriter {
impl StrOrBytesColumnWriter {
pub(crate) fn with_dictionary_id(dictionary_id: u32) -> StrOrBytesColumnWriter {
StrOrBytesColumnWriter {
dictionary_id,
column_writer: Default::default(),
}
@@ -262,4 +298,27 @@ mod tests {
test_column_writer_coercion_aux(&[1i64.into(), 1u64.into()], NumericalType::I64);
test_column_writer_coercion_aux(&[u64::MAX.into(), (-1i64).into()], NumericalType::F64);
}
#[test]
#[should_panic]
fn test_compatible_numerical_types_static_incompatible_type() {
let mut compatible_numerical_types =
CompatibleNumericalTypes::StaticType(NumericalType::U64);
compatible_numerical_types.accept_value(NumericalValue::I64(1i64));
}
#[test]
fn test_compatible_numerical_types_static_different_type_forbidden() {
let mut compatible_numerical_types =
CompatibleNumericalTypes::StaticType(NumericalType::U64);
compatible_numerical_types.accept_value(NumericalValue::U64(u64::MAX));
}
#[test]
fn test_compatible_numerical_types_static() {
for typ in [NumericalType::I64, NumericalType::I64, NumericalType::F64] {
let compatible_numerical_types = CompatibleNumericalTypes::StaticType(typ);
assert_eq!(compatible_numerical_types.to_numerical_type(), typ);
}
}
}

View File

@@ -4,6 +4,7 @@ mod serializer;
mod value_index;
use std::io;
use std::net::Ipv6Addr;
use column_operation::ColumnOperation;
use common::CountingWriter;
@@ -11,10 +12,12 @@ use serializer::ColumnarSerializer;
use stacker::{Addr, ArenaHashMap, MemoryArena};
use crate::column_index::SerializableColumnIndex;
use crate::column_values::{ColumnValues, MonotonicallyMappableToU64, VecColumn};
use crate::columnar::column_type::{ColumnType, ColumnTypeCategory};
use crate::column_values::{
ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
};
use crate::columnar::column_type::ColumnType;
use crate::columnar::writer::column_writers::{
ColumnWriter, NumericalColumnWriter, StrColumnWriter,
ColumnWriter, NumericalColumnWriter, StrOrBytesColumnWriter,
};
use crate::columnar::writer::value_index::{IndexBuilder, PreallocatedIndexBuilders};
use crate::dictionary::{DictionaryBuilder, TermIdMapping, UnorderedId};
@@ -30,6 +33,7 @@ struct SpareBuffers {
u64_values: Vec<u64>,
f64_values: Vec<f64>,
bool_values: Vec<bool>,
ip_addr_values: Vec<Ipv6Addr>,
}
/// Makes it possible to create a new columnar.
@@ -47,8 +51,11 @@ struct SpareBuffers {
/// ```
pub struct ColumnarWriter {
numerical_field_hash_map: ArenaHashMap,
datetime_field_hash_map: ArenaHashMap,
bool_field_hash_map: ArenaHashMap,
ip_addr_field_hash_map: ArenaHashMap,
bytes_field_hash_map: ArenaHashMap,
str_field_hash_map: ArenaHashMap,
arena: MemoryArena,
// Dictionaries used to store dictionary-encoded values.
dictionaries: Vec<DictionaryBuilder>,
@@ -60,7 +67,10 @@ impl Default for ColumnarWriter {
ColumnarWriter {
numerical_field_hash_map: ArenaHashMap::new(10_000),
bool_field_hash_map: ArenaHashMap::new(10_000),
ip_addr_field_hash_map: ArenaHashMap::new(10_000),
bytes_field_hash_map: ArenaHashMap::new(10_000),
str_field_hash_map: ArenaHashMap::new(10_000),
datetime_field_hash_map: ArenaHashMap::new(10_000),
dictionaries: Vec::new(),
arena: MemoryArena::default(),
buffers: SpareBuffers::default(),
@@ -68,20 +78,115 @@ impl Default for ColumnarWriter {
}
}
#[inline]
fn mutate_or_create_column<V, TMutator>(
arena_hash_map: &mut ArenaHashMap,
column_name: &str,
updater: TMutator,
) where
V: Copy + 'static,
TMutator: FnMut(Option<V>) -> V,
{
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
arena_hash_map.mutate_or_create(column_name.as_bytes(), updater);
}
impl ColumnarWriter {
pub fn mem_usage(&self) -> usize {
// TODO add dictionary builders.
self.arena.mem_usage()
+ self.numerical_field_hash_map.mem_usage()
+ self.bool_field_hash_map.mem_usage()
+ self.bytes_field_hash_map.mem_usage()
+ self.str_field_hash_map.mem_usage()
+ self.ip_addr_field_hash_map.mem_usage()
+ self.datetime_field_hash_map.mem_usage()
}
pub fn record_column_type(&mut self, column_name: &str, column_type: ColumnType) {
match column_type {
ColumnType::Str | ColumnType::Bytes => {
let (hash_map, dictionaries) = (
if column_type == ColumnType::Str {
&mut self.str_field_hash_map
} else {
&mut self.bytes_field_hash_map
},
&mut self.dictionaries,
);
mutate_or_create_column(
hash_map,
column_name,
|column_opt: Option<StrOrBytesColumnWriter>| {
if let Some(column_writer) = column_opt {
column_writer
} else {
let dictionary_id = dictionaries.len() as u32;
dictionaries.push(DictionaryBuilder::default());
StrOrBytesColumnWriter::with_dictionary_id(dictionary_id)
}
},
);
}
ColumnType::Bool => {
mutate_or_create_column(
&mut self.bool_field_hash_map,
column_name,
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
);
}
ColumnType::DateTime => {
mutate_or_create_column(
&mut self.datetime_field_hash_map,
column_name,
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
);
}
ColumnType::I64 | ColumnType::F64 | ColumnType::U64 => {
let numerical_type = column_type.numerical_type().unwrap();
mutate_or_create_column(
&mut self.numerical_field_hash_map,
column_name,
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.force_numerical_type(numerical_type);
column
},
);
}
ColumnType::IpAddr => mutate_or_create_column(
&mut self.ip_addr_field_hash_map,
column_name,
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
),
}
}
pub fn force_numerical_type(&mut self, column_name: &str, numerical_type: NumericalType) {
mutate_or_create_column(
&mut self.numerical_field_hash_map,
column_name,
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.force_numerical_type(numerical_type);
column
},
);
}
pub fn record_numerical<T: Into<NumericalValue> + Copy>(
&mut self,
doc: RowId,
column_name: &str,
numerical_value: T,
) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
hash_map.mutate_or_create(
column_name.as_bytes(),
mutate_or_create_column(
hash_map,
column_name,
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.record_numerical_value(doc, numerical_value.into(), arena);
@@ -90,23 +195,62 @@ impl ColumnarWriter {
);
}
pub fn record_bool(&mut self, doc: RowId, column_name: &str, val: bool) {
pub fn record_ip_addr(&mut self, doc: RowId, column_name: &str, ip_addr: Ipv6Addr) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
let (hash_map, arena) = (&mut self.ip_addr_field_hash_map, &mut self.arena);
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, val, arena);
column.record(doc, ip_addr, arena);
column
},
);
}
pub fn record_bool(&mut self, doc: RowId, column_name: &str, val: bool) {
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, val, arena);
column
});
}
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: crate::DateTime) {
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, NumericalValue::I64(datetime.timestamp_micros), arena);
column
});
}
pub fn record_str(&mut self, doc: RowId, column_name: &str, value: &str) {
let (hash_map, arena, dictionaries) = (
&mut self.str_field_hash_map,
&mut self.arena,
&mut self.dictionaries,
);
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<StrOrBytesColumnWriter>| {
let mut column: StrOrBytesColumnWriter = column_opt.unwrap_or_else(|| {
// Each column has its own dictionary
let dictionary_id = dictionaries.len() as u32;
dictionaries.push(DictionaryBuilder::default());
StrOrBytesColumnWriter::with_dictionary_id(dictionary_id)
});
column.record_bytes(doc, value.as_bytes(), dictionaries, arena);
column
},
);
}
pub fn record_bytes(&mut self, doc: RowId, column_name: &str, value: &[u8]) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
@@ -118,43 +262,67 @@ impl ColumnarWriter {
);
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<StrColumnWriter>| {
let mut column: StrColumnWriter = column_opt.unwrap_or_else(|| {
|column_opt: Option<StrOrBytesColumnWriter>| {
let mut column: StrOrBytesColumnWriter = column_opt.unwrap_or_else(|| {
// Each column has its own dictionary
let dictionary_id = dictionaries.len() as u32;
dictionaries.push(DictionaryBuilder::default());
StrColumnWriter::with_dictionary_id(dictionary_id)
StrOrBytesColumnWriter::with_dictionary_id(dictionary_id)
});
column.record_bytes(doc, value.as_bytes(), dictionaries, arena);
column.record_bytes(doc, value, dictionaries, arena);
column
},
);
}
pub fn serialize(&mut self, num_docs: RowId, wrt: &mut dyn io::Write) -> io::Result<()> {
let mut serializer = ColumnarSerializer::new(wrt);
let mut field_columns: Vec<(&[u8], ColumnTypeCategory, Addr)> = self
let mut columns: Vec<(&[u8], ColumnType, Addr)> = self
.numerical_field_hash_map
.iter()
.map(|(term, addr, _)| (term, ColumnTypeCategory::Numerical, addr))
.map(|(column_name, addr, _)| {
let numerical_column_writer: NumericalColumnWriter =
self.numerical_field_hash_map.read(addr);
let column_type = numerical_column_writer.numerical_type().into();
(column_name, column_type, addr)
})
.collect();
field_columns.extend(
columns.extend(
self.bytes_field_hash_map
.iter()
.map(|(term, addr, _)| (term, ColumnTypeCategory::Str, addr)),
.map(|(term, addr, _)| (term, ColumnType::Bytes, addr)),
);
field_columns.extend(
columns.extend(
self.str_field_hash_map
.iter()
.map(|(column_name, addr, _)| (column_name, ColumnType::Str, addr)),
);
columns.extend(
self.bool_field_hash_map
.iter()
.map(|(term, addr, _)| (term, ColumnTypeCategory::Bool, addr)),
.map(|(column_name, addr, _)| (column_name, ColumnType::Bool, addr)),
);
field_columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
columns.extend(
self.ip_addr_field_hash_map
.iter()
.map(|(column_name, addr, _)| (column_name, ColumnType::IpAddr, addr)),
);
columns.extend(
self.datetime_field_hash_map
.iter()
.map(|(column_name, addr, _)| (column_name, ColumnType::DateTime, addr)),
);
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
let (arena, buffers, dictionaries) = (&self.arena, &mut self.buffers, &self.dictionaries);
let mut symbol_byte_buffer: Vec<u8> = Vec::new();
for (column_name, bytes_or_numerical, addr) in field_columns {
match bytes_or_numerical {
ColumnTypeCategory::Bool => {
let column_writer: ColumnWriter = self.bool_field_hash_map.read(addr);
for (column_name, column_type, addr) in columns {
match column_type {
ColumnType::Bool | ColumnType::DateTime => {
let column_writer: ColumnWriter = if column_type == ColumnType::Bool {
self.bool_field_hash_map.read(addr)
} else {
self.datetime_field_hash_map.read(addr)
};
let cardinality = column_writer.get_cardinality(num_docs);
let mut column_serializer =
serializer.serialize_column(column_name, ColumnType::Bool);
@@ -166,29 +334,50 @@ impl ColumnarWriter {
&mut column_serializer,
)?;
}
ColumnTypeCategory::Str => {
let str_column_writer: StrColumnWriter = self.bytes_field_hash_map.read(addr);
let dictionary_builder =
&dictionaries[str_column_writer.dictionary_id as usize];
let cardinality = str_column_writer.column_writer.get_cardinality(num_docs);
ColumnType::IpAddr => {
let column_writer: ColumnWriter = self.ip_addr_field_hash_map.read(addr);
let cardinality = column_writer.get_cardinality(num_docs);
let mut column_serializer =
serializer.serialize_column(column_name, ColumnType::Bytes);
serialize_bytes_column(
serializer.serialize_column(column_name, ColumnType::IpAddr);
serialize_ip_addr_column(
cardinality,
num_docs,
dictionary_builder,
str_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
&mut column_serializer,
)?;
}
ColumnTypeCategory::Numerical => {
ColumnType::Bytes | ColumnType::Str => {
let str_or_bytes_column_writer: StrOrBytesColumnWriter =
if column_type == ColumnType::Bytes {
self.bytes_field_hash_map.read(addr)
} else {
self.str_field_hash_map.read(addr)
};
let dictionary_builder =
&dictionaries[str_or_bytes_column_writer.dictionary_id as usize];
let cardinality = str_or_bytes_column_writer
.column_writer
.get_cardinality(num_docs);
let mut column_serializer =
serializer.serialize_column(column_name, column_type);
serialize_bytes_or_str_column(
cardinality,
num_docs,
dictionary_builder,
str_or_bytes_column_writer
.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
&mut column_serializer,
)?;
}
ColumnType::I64 | ColumnType::F64 | ColumnType::U64 => {
let numerical_column_writer: NumericalColumnWriter =
self.numerical_field_hash_map.read(addr);
let (numerical_type, cardinality) =
numerical_column_writer.column_type_and_cardinality(num_docs);
let mut column_serializer = serializer
.serialize_column(column_name, ColumnType::Numerical(numerical_type));
let numerical_type = column_type.numerical_type().unwrap();
let cardinality = numerical_column_writer.cardinality(num_docs);
let mut column_serializer =
serializer.serialize_column(column_name, ColumnType::from(numerical_type));
serialize_numerical_column(
cardinality,
num_docs,
@@ -205,7 +394,7 @@ impl ColumnarWriter {
}
}
fn serialize_bytes_column(
fn serialize_bytes_or_str_column(
cardinality: Cardinality,
num_docs: RowId,
dictionary_builder: &DictionaryBuilder,
@@ -232,7 +421,7 @@ fn serialize_bytes_column(
ColumnOperation::NewDoc(doc) => ColumnOperation::NewDoc(doc),
}
});
serialize_column(
send_to_serialize_column_mappable_to_u64(
operation_iterator,
cardinality,
num_docs,
@@ -261,7 +450,7 @@ fn serialize_numerical_column(
} = buffers;
match numerical_type {
NumericalType::I64 => {
serialize_column(
send_to_serialize_column_mappable_to_u64(
coerce_numerical_symbol::<i64>(op_iterator),
cardinality,
num_docs,
@@ -271,7 +460,7 @@ fn serialize_numerical_column(
)?;
}
NumericalType::U64 => {
serialize_column(
send_to_serialize_column_mappable_to_u64(
coerce_numerical_symbol::<u64>(op_iterator),
cardinality,
num_docs,
@@ -281,7 +470,7 @@ fn serialize_numerical_column(
)?;
}
NumericalType::F64 => {
serialize_column(
send_to_serialize_column_mappable_to_u64(
coerce_numerical_symbol::<f64>(op_iterator),
cardinality,
num_docs,
@@ -306,7 +495,7 @@ fn serialize_bool_column(
bool_values,
..
} = buffers;
serialize_column(
send_to_serialize_column_mappable_to_u64(
column_operations_it,
cardinality,
num_docs,
@@ -317,7 +506,76 @@ fn serialize_bool_column(
Ok(())
}
fn serialize_column<
fn serialize_ip_addr_column(
cardinality: Cardinality,
num_docs: RowId,
column_operations_it: impl Iterator<Item = ColumnOperation<Ipv6Addr>>,
buffers: &mut SpareBuffers,
wrt: &mut impl io::Write,
) -> io::Result<()> {
let SpareBuffers {
value_index_builders,
ip_addr_values,
..
} = buffers;
send_to_serialize_column_mappable_to_u128(
column_operations_it,
cardinality,
num_docs,
value_index_builders,
ip_addr_values,
wrt,
)?;
Ok(())
}
fn send_to_serialize_column_mappable_to_u128<
T: Copy + std::fmt::Debug + Send + Sync + MonotonicallyMappableToU128 + PartialOrd,
>(
op_iterator: impl Iterator<Item = ColumnOperation<T>>,
cardinality: Cardinality,
num_docs: RowId,
value_index_builders: &mut PreallocatedIndexBuilders,
values: &mut Vec<T>,
mut wrt: impl io::Write,
) -> io::Result<()>
where
for<'a> VecColumn<'a, T>: ColumnValues<T>,
{
values.clear();
// TODO: split index and values
let serializable_column_index = match cardinality {
Cardinality::Full => {
consume_operation_iterator(
op_iterator,
value_index_builders.borrow_required_index_builder(),
values,
);
SerializableColumnIndex::Full
}
Cardinality::Optional => {
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
consume_operation_iterator(op_iterator, optional_index_builder, values);
let optional_index = optional_index_builder.finish(num_docs);
SerializableColumnIndex::Optional(Box::new(optional_index))
}
Cardinality::Multivalued => {
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
let multivalued_index = multivalued_index_builder.finish(num_docs);
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
}
};
crate::column::serialize_column_mappable_to_u128(
serializable_column_index,
|| values.iter().cloned(),
values.len() as u32,
&mut wrt,
)?;
Ok(())
}
fn send_to_serialize_column_mappable_to_u64<
T: Copy + Default + std::fmt::Debug + Send + Sync + MonotonicallyMappableToU64 + PartialOrd,
>(
op_iterator: impl Iterator<Item = ColumnOperation<T>>,
@@ -350,11 +608,10 @@ where
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
let multivalued_index = multivalued_index_builder.finish(num_docs);
todo!();
// SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
}
};
crate::column::serialize_column_u64(
crate::column::serialize_column_mappable_to_u64(
serializable_column_index,
&VecColumn::from(&values[..]),
&mut wrt,
@@ -392,59 +649,12 @@ fn consume_operation_iterator<T: std::fmt::Debug, TIndexBuilder: IndexBuilder>(
}
}
// /// Serializes the column with the codec with the best estimate on the data.
// fn serialize_numerical<T: MonotonicallyMappableToU64>(
// value_index: ValueIndexInfo,
// typed_column: impl Column<T>,
// output: &mut impl io::Write,
// codecs: &[FastFieldCodecType],
// ) -> io::Result<()> {
// let counting_writer = CountingWriter::wrap(output);
// serialize_value_index(value_index, output)?;
// let value_index_len = counting_writer.written_bytes();
// let output = counting_writer.finish();
// serialize_column(value_index, output)?;
// let column = monotonic_map_column(
// typed_column,
// crate::column::monotonic_mapping::StrictlyMonotonicMappingToInternal::<T>::new(),
// );
// let header = Header::compute_header(&column, codecs).ok_or_else(|| {
// io::Error::new(
// io::ErrorKind::InvalidInput,
// format!(
// "Data cannot be serialized with this list of codec. {:?}",
// codecs
// ),
// )
// })?;
// header.serialize(output)?;
// let normalized_column = header.normalize_column(column);
// assert_eq!(normalized_column.min_value(), 0u64);
// serialize_given_codec(normalized_column, header.codec_type, output)?;
// let column_header = ColumnFooter {
// value_index_len: todo!(),
// cardinality: todo!(),
// };
// let null_index_footer = NullIndexFooter {
// cardinality: value_index.get_cardinality(),
// null_index_codec: NullIndexCodec::Full,
// null_index_byte_range: 0..0,
// };
// append_null_index_footer(output, null_index_footer)?;
// Ok(())
// }
#[cfg(test)]
mod tests {
use column_operation::ColumnOperation;
use stacker::MemoryArena;
use super::*;
use crate::value::NumericalValue;
use crate::columnar::writer::column_operation::ColumnOperation;
use crate::{Cardinality, NumericalValue};
#[test]
fn test_column_writer_required_simple() {

View File

@@ -97,10 +97,10 @@ mod tests {
#[test]
fn test_prepare_key_bytes() {
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
prepare_key(b"root\0child", ColumnType::Bytes, &mut buffer);
prepare_key(b"root\0child", ColumnType::Str, &mut buffer);
assert_eq!(buffer.len(), 12);
assert_eq!(&buffer[..10], b"root\0child");
assert_eq!(buffer[10], 0u8);
assert_eq!(buffer[11], ColumnType::Bytes.to_code());
assert_eq!(buffer[11], ColumnType::Str.to_code());
}
}

View File

@@ -45,16 +45,6 @@ impl<'a> SerializableOptionalIndex<'a> for SingleValueArrayIndex<'a> {
}
}
impl OptionalIndexBuilder {
fn num_non_nulls(&self) -> u32 {
self.docs.len() as u32
}
fn iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
Box::new(self.docs.iter().copied())
}
}
impl OptionalIndexBuilder {
pub fn finish<'a>(&'a mut self, num_rows: RowId) -> impl SerializableOptionalIndex + 'a {
debug_assert!(self
@@ -96,7 +86,7 @@ pub struct MultivaluedIndexBuilder {
impl MultivaluedIndexBuilder {
pub fn finish(&mut self, num_docs: RowId) -> impl ColumnValues<u32> + '_ {
self.start_offsets
.resize(num_docs as usize, self.total_num_vals_seen);
.resize(num_docs as usize + 1, self.total_num_vals_seen);
VecColumn {
values: &&self.start_offsets[..],
min_value: 0,
@@ -188,7 +178,7 @@ mod tests {
.finish(4u32)
.iter()
.collect::<Vec<u32>>(),
vec![0, 0, 2, 3]
vec![0, 0, 2, 3, 3]
);
multivalued_value_index_builder.reset();
multivalued_value_index_builder.record_row(2u32);
@@ -199,7 +189,7 @@ mod tests {
.finish(4u32)
.iter()
.collect::<Vec<u32>>(),
vec![0, 0, 0, 2]
vec![0, 0, 0, 2, 2]
);
}
}

View File

@@ -1,12 +1,14 @@
use std::io;
use std::net::IpAddr;
use std::net::Ipv6Addr;
use std::sync::Arc;
use common::file_slice::FileSlice;
use common::{HasLen, OwnedBytes};
use crate::column::{BytesColumn, Column};
use crate::column::{BytesColumn, Column, StrColumn};
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
use crate::columnar::ColumnType;
use crate::DateTime;
use crate::{DateTime, NumericalType};
#[derive(Clone)]
pub enum DynamicColumn {
@@ -14,41 +16,163 @@ pub enum DynamicColumn {
I64(Column<i64>),
U64(Column<u64>),
F64(Column<f64>),
IpAddr(Column<IpAddr>),
IpAddr(Column<Ipv6Addr>),
DateTime(Column<DateTime>),
Str(BytesColumn),
Bytes(BytesColumn),
Str(StrColumn),
}
impl From<Column<i64>> for DynamicColumn {
fn from(column_i64: Column<i64>) -> Self {
DynamicColumn::I64(column_i64)
impl DynamicColumn {
pub fn column_type(&self) -> ColumnType {
match self {
DynamicColumn::Bool(_) => ColumnType::Bool,
DynamicColumn::I64(_) => ColumnType::I64,
DynamicColumn::U64(_) => ColumnType::U64,
DynamicColumn::F64(_) => ColumnType::F64,
DynamicColumn::IpAddr(_) => ColumnType::IpAddr,
DynamicColumn::DateTime(_) => ColumnType::DateTime,
DynamicColumn::Bytes(_) => ColumnType::Bytes,
DynamicColumn::Str(_) => ColumnType::Str,
}
}
pub fn is_numerical(&self) -> bool {
self.column_type().numerical_type().is_some()
}
pub fn is_f64(&self) -> bool {
self.column_type().numerical_type() == Some(NumericalType::F64)
}
pub fn is_i64(&self) -> bool {
self.column_type().numerical_type() == Some(NumericalType::I64)
}
pub fn is_u64(&self) -> bool {
self.column_type().numerical_type() == Some(NumericalType::U64)
}
pub fn coerce_to_f64(self) -> Option<DynamicColumn> {
match self {
DynamicColumn::I64(column) => Some(DynamicColumn::F64(Column {
idx: column.idx,
values: Arc::new(monotonic_map_column(column.values, MapI64ToF64)),
})),
DynamicColumn::U64(column) => Some(DynamicColumn::F64(Column {
idx: column.idx,
values: Arc::new(monotonic_map_column(column.values, MapU64ToF64)),
})),
DynamicColumn::F64(_) => Some(self),
_ => None,
}
}
pub fn coerce_to_i64(self) -> Option<DynamicColumn> {
match self {
DynamicColumn::U64(column) => {
if column.max_value() > i64::MAX as u64 {
return None;
}
Some(DynamicColumn::I64(Column {
idx: column.idx,
values: Arc::new(monotonic_map_column(column.values, MapU64ToI64)),
}))
}
DynamicColumn::I64(_) => Some(self),
_ => None,
}
}
pub fn coerce_to_u64(self) -> Option<DynamicColumn> {
match self {
DynamicColumn::I64(column) => {
if column.min_value() < 0 {
return None;
}
Some(DynamicColumn::U64(Column {
idx: column.idx,
values: Arc::new(monotonic_map_column(column.values, MapI64ToU64)),
}))
}
DynamicColumn::U64(_) => Some(self),
_ => None,
}
}
}
impl From<Column<u64>> for DynamicColumn {
fn from(column_u64: Column<u64>) -> Self {
DynamicColumn::U64(column_u64)
struct MapI64ToF64;
impl StrictlyMonotonicFn<i64, f64> for MapI64ToF64 {
#[inline(always)]
fn mapping(&self, inp: i64) -> f64 {
inp as f64
}
#[inline(always)]
fn inverse(&self, out: f64) -> i64 {
out as i64
}
}
impl From<Column<f64>> for DynamicColumn {
fn from(column_f64: Column<f64>) -> Self {
DynamicColumn::F64(column_f64)
struct MapU64ToF64;
impl StrictlyMonotonicFn<u64, f64> for MapU64ToF64 {
#[inline(always)]
fn mapping(&self, inp: u64) -> f64 {
inp as f64
}
#[inline(always)]
fn inverse(&self, out: f64) -> u64 {
out as u64
}
}
impl From<Column<bool>> for DynamicColumn {
fn from(bool_column: Column<bool>) -> Self {
DynamicColumn::Bool(bool_column)
struct MapU64ToI64;
impl StrictlyMonotonicFn<u64, i64> for MapU64ToI64 {
#[inline(always)]
fn mapping(&self, inp: u64) -> i64 {
inp as i64
}
#[inline(always)]
fn inverse(&self, out: i64) -> u64 {
out as u64
}
}
impl From<BytesColumn> for DynamicColumn {
fn from(dictionary_encoded_col: BytesColumn) -> Self {
DynamicColumn::Str(dictionary_encoded_col)
struct MapI64ToU64;
impl StrictlyMonotonicFn<i64, u64> for MapI64ToU64 {
#[inline(always)]
fn mapping(&self, inp: i64) -> u64 {
inp as u64
}
#[inline(always)]
fn inverse(&self, out: u64) -> i64 {
out as i64
}
}
macro_rules! static_dynamic_conversions {
($typ:ty, $enum_name:ident) => {
impl Into<Option<$typ>> for DynamicColumn {
fn into(self) -> Option<$typ> {
if let DynamicColumn::$enum_name(col) = self {
Some(col)
} else {
None
}
}
}
impl From<$typ> for DynamicColumn {
fn from(typed_column: $typ) -> Self {
DynamicColumn::$enum_name(typed_column)
}
}
};
}
static_dynamic_conversions!(Column<bool>, Bool);
static_dynamic_conversions!(Column<u64>, U64);
static_dynamic_conversions!(Column<i64>, I64);
static_dynamic_conversions!(Column<f64>, F64);
static_dynamic_conversions!(Column<crate::DateTime>, DateTime);
static_dynamic_conversions!(StrColumn, Str);
static_dynamic_conversions!(BytesColumn, Bytes);
static_dynamic_conversions!(Column<Ipv6Addr>, IpAddr);
#[derive(Clone)]
pub struct DynamicColumnHandle {
pub(crate) file_slice: FileSlice,
@@ -56,31 +180,53 @@ pub struct DynamicColumnHandle {
}
impl DynamicColumnHandle {
// TODO rename load
pub fn open(&self) -> io::Result<DynamicColumn> {
let column_bytes: OwnedBytes = self.file_slice.read_bytes()?;
self.open_internal(column_bytes)
}
// TODO rename load_async
pub async fn open_async(&self) -> io::Result<DynamicColumn> {
let column_bytes: OwnedBytes = self.file_slice.read_bytes_async().await?;
self.open_internal(column_bytes)
}
/// Returns the `u64` fast field reader reader associated with `fields` of types
/// Str, u64, i64, f64, or datetime.
///
/// If not, the fastfield reader will returns the u64-value associated with the original
/// FastValue.
pub fn open_u64_lenient(&self) -> io::Result<Option<Column<u64>>> {
let column_bytes = self.file_slice.read_bytes()?;
match self.column_type {
ColumnType::Str | ColumnType::Bytes => {
let column: BytesColumn = crate::column::open_column_bytes(column_bytes)?;
Ok(Some(column.term_ord_column))
}
ColumnType::Bool => Ok(None),
ColumnType::IpAddr => Ok(None),
ColumnType::I64 | ColumnType::U64 | ColumnType::F64 | ColumnType::DateTime => {
let column = crate::column::open_column_u64::<u64>(column_bytes)?;
Ok(Some(column))
}
}
}
fn open_internal(&self, column_bytes: OwnedBytes) -> io::Result<DynamicColumn> {
let dynamic_column: DynamicColumn = match self.column_type {
ColumnType::Bytes => crate::column::open_column_bytes(column_bytes)?.into(),
ColumnType::Numerical(numerical_type) => match numerical_type {
crate::NumericalType::I64 => {
crate::column::open_column_u64::<i64>(column_bytes)?.into()
}
crate::NumericalType::U64 => {
crate::column::open_column_u64::<u64>(column_bytes)?.into()
}
crate::NumericalType::F64 => {
crate::column::open_column_u64::<f64>(column_bytes)?.into()
}
},
ColumnType::Bytes => {
crate::column::open_column_bytes::<BytesColumn>(column_bytes)?.into()
}
ColumnType::Str => crate::column::open_column_bytes::<StrColumn>(column_bytes)?.into(),
ColumnType::I64 => crate::column::open_column_u64::<i64>(column_bytes)?.into(),
ColumnType::U64 => crate::column::open_column_u64::<u64>(column_bytes)?.into(),
ColumnType::F64 => crate::column::open_column_u64::<f64>(column_bytes)?.into(),
ColumnType::Bool => crate::column::open_column_u64::<bool>(column_bytes)?.into(),
ColumnType::IpAddr => crate::column::open_column_u128::<Ipv6Addr>(column_bytes)?.into(),
ColumnType::DateTime => {
crate::column::open_column_u64::<crate::DateTime>(column_bytes)?.into()
}
};
Ok(dynamic_column)
}

View File

@@ -18,16 +18,21 @@ mod dynamic_column;
pub(crate) mod utils;
mod value;
pub use columnar::{ColumnarReader, ColumnarWriter};
pub use column::{BytesColumn, Column, StrColumn};
pub use column_values::ColumnValues;
pub use columnar::{
merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
MergeDocOrder,
};
pub use value::{NumericalType, NumericalValue};
// pub use self::dynamic_column::DynamicColumnHandle;
pub use self::dynamic_column::{DynamicColumn, DynamicColumnHandle};
pub type RowId = u32;
#[derive(Clone, Copy)]
#[derive(Clone, Copy, PartialOrd, PartialEq, Default, Debug)]
pub struct DateTime {
timestamp_micros: i64,
pub timestamp_micros: i64,
}
#[derive(Copy, Clone, Debug)]

View File

@@ -1,10 +1,13 @@
use std::net::Ipv6Addr;
use crate::column_values::MonotonicallyMappableToU128;
use crate::columnar::ColumnType;
use crate::dynamic_column::{DynamicColumn, DynamicColumnHandle};
use crate::value::NumericalValue;
use crate::{Cardinality, ColumnarReader, ColumnarWriter};
#[test]
fn test_dataframe_writer_bytes() {
fn test_dataframe_writer_str() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_str(1u32, "my_string", "hello");
dataframe_writer.record_str(3u32, "my_string", "helloeee");
@@ -14,7 +17,21 @@ fn test_dataframe_writer_bytes() {
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 165);
assert_eq!(cols[0].num_bytes(), 158);
}
#[test]
fn test_dataframe_writer_bytes() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_bytes(1u32, "my_string", b"hello");
dataframe_writer.record_bytes(3u32, "my_string", b"helloeee");
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 158);
}
#[test]
@@ -28,7 +45,7 @@ fn test_dataframe_writer_bool() {
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("bool.value").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 29);
assert_eq!(cols[0].num_bytes(), 22);
assert_eq!(cols[0].column_type(), ColumnType::Bool);
let dyn_bool_col = cols[0].open().unwrap();
let DynamicColumn::Bool(bool_col) = dyn_bool_col else { panic!(); };
@@ -36,6 +53,59 @@ fn test_dataframe_writer_bool() {
assert_eq!(&vals, &[None, Some(false), None, Some(true), None,]);
}
#[test]
fn test_dataframe_writer_u64_multivalued() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(2u32, "divisor", 2u64);
dataframe_writer.record_numerical(3u32, "divisor", 3u64);
dataframe_writer.record_numerical(4u32, "divisor", 2u64);
dataframe_writer.record_numerical(5u32, "divisor", 5u64);
dataframe_writer.record_numerical(6u32, "divisor", 2u64);
dataframe_writer.record_numerical(6u32, "divisor", 3u64);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(7, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("divisor").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 29);
let dyn_i64_col = cols[0].open().unwrap();
let DynamicColumn::I64(divisor_col) = dyn_i64_col else { panic!(); };
assert_eq!(
divisor_col.get_cardinality(),
crate::Cardinality::Multivalued
);
assert_eq!(divisor_col.num_rows(), 7);
}
#[test]
fn test_dataframe_writer_ip_addr() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_ip_addr(1, "ip_addr", Ipv6Addr::from_u128(1001));
dataframe_writer.record_ip_addr(3, "ip_addr", Ipv6Addr::from_u128(1050));
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("ip_addr").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 42);
assert_eq!(cols[0].column_type(), ColumnType::IpAddr);
let dyn_bool_col = cols[0].open().unwrap();
let DynamicColumn::IpAddr(ip_col) = dyn_bool_col else { panic!(); };
let vals: Vec<Option<Ipv6Addr>> = (0..5).map(|row_id| ip_col.first(row_id)).collect();
assert_eq!(
&vals,
&[
None,
Some(Ipv6Addr::from_u128(1001)),
None,
Some(Ipv6Addr::from_u128(1050)),
None,
]
);
}
#[test]
fn test_dataframe_writer_numerical() {
let mut dataframe_writer = ColumnarWriter::default();
@@ -53,7 +123,7 @@ fn test_dataframe_writer_numerical() {
// - header 14 bytes
// - vals 8 //< due to padding? could have been 1byte?.
// - null footer 6 bytes
assert_eq!(cols[0].num_bytes(), 40);
assert_eq!(cols[0].num_bytes(), 33);
let column = cols[0].open().unwrap();
let DynamicColumn::I64(column_i64) = column else { panic!(); };
assert_eq!(column_i64.idx.get_cardinality(), Cardinality::Optional);
@@ -67,18 +137,76 @@ fn test_dataframe_writer_numerical() {
}
#[test]
fn test_dictionary_encoded() {
fn test_dictionary_encoded_str() {
let mut buffer = Vec::new();
let mut columnar_writer = ColumnarWriter::default();
columnar_writer.record_str(1, "my.column", "my.key");
columnar_writer.record_str(3, "my.column", "my.key2");
columnar_writer.record_str(1, "my.column", "a");
columnar_writer.record_str(3, "my.column", "c");
columnar_writer.record_str(3, "my.column2", "different_column!");
columnar_writer.record_str(4, "my.column", "b");
columnar_writer.serialize(5, &mut buffer).unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar_reader.num_columns(), 2);
let col_handles = columnar_reader.read_columns("my.column").unwrap();
assert_eq!(col_handles.len(), 1);
let DynamicColumn::Str(str_col) = col_handles[0].open().unwrap() else { panic!(); };
let index: Vec<Option<u64>> = (0..5).map(|row_id| str_col.ords().first(row_id)).collect();
assert_eq!(index, &[None, Some(0), None, Some(2), Some(1)]);
assert_eq!(str_col.num_rows(), 5);
// let term_ords = (0..)
let mut term_buffer = String::new();
let term_ords = str_col.ords();
assert_eq!(term_ords.first(0), None);
assert_eq!(term_ords.first(1), Some(0));
str_col.ord_to_str(0u64, &mut term_buffer).unwrap();
assert_eq!(term_buffer, "a");
assert_eq!(term_ords.first(2), None);
assert_eq!(term_ords.first(3), Some(2));
str_col.ord_to_str(2u64, &mut term_buffer).unwrap();
assert_eq!(term_buffer, "c");
assert_eq!(term_ords.first(4), Some(1));
str_col.ord_to_str(1u64, &mut term_buffer).unwrap();
assert_eq!(term_buffer, "b");
}
#[test]
fn test_dictionary_encoded_bytes() {
let mut buffer = Vec::new();
let mut columnar_writer = ColumnarWriter::default();
columnar_writer.record_bytes(1, "my.column", b"a");
columnar_writer.record_bytes(3, "my.column", b"c");
columnar_writer.record_bytes(3, "my.column2", b"different_column!");
columnar_writer.record_bytes(4, "my.column", b"b");
columnar_writer.serialize(5, &mut buffer).unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar_reader.num_columns(), 2);
let col_handles = columnar_reader.read_columns("my.column").unwrap();
assert_eq!(col_handles.len(), 1);
let DynamicColumn::Bytes(bytes_col) = col_handles[0].open().unwrap() else { panic!(); };
let index: Vec<Option<u64>> = (0..5)
.map(|row_id| bytes_col.ords().first(row_id))
.collect();
assert_eq!(index, &[None, Some(0), None, Some(2), Some(1)]);
assert_eq!(bytes_col.num_rows(), 5);
let mut term_buffer = Vec::new();
let term_ords = bytes_col.ords();
assert_eq!(term_ords.first(0), None);
assert_eq!(term_ords.first(1), Some(0));
bytes_col
.dictionary
.ord_to_term(0u64, &mut term_buffer)
.unwrap();
assert_eq!(term_buffer, b"a");
assert_eq!(term_ords.first(2), None);
assert_eq!(term_ords.first(3), Some(2));
bytes_col
.dictionary
.ord_to_term(2u64, &mut term_buffer)
.unwrap();
assert_eq!(term_buffer, b"c");
assert_eq!(term_ords.first(4), Some(1));
bytes_col
.dictionary
.ord_to_term(1u64, &mut term_buffer)
.unwrap();
assert_eq!(term_buffer, b"b");
}

View File

@@ -1,12 +1,22 @@
use crate::InvalidData;
#[derive(Copy, Clone, Debug, PartialEq)]
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum NumericalValue {
I64(i64),
U64(u64),
F64(f64),
}
impl NumericalValue {
pub fn numerical_type(&self) -> NumericalType {
match self {
NumericalValue::I64(_) => NumericalType::I64,
NumericalValue::U64(_) => NumericalType::U64,
NumericalValue::F64(_) => NumericalType::F64,
}
}
}
impl From<u64> for NumericalValue {
fn from(val: u64) -> NumericalValue {
NumericalValue::U64(val)
@@ -25,18 +35,6 @@ impl From<f64> for NumericalValue {
}
}
impl NumericalValue {
pub fn numerical_type(&self) -> NumericalType {
match self {
NumericalValue::F64(_) => NumericalType::F64,
NumericalValue::I64(_) => NumericalType::I64,
NumericalValue::U64(_) => NumericalType::U64,
}
}
}
impl Eq for NumericalValue {}
#[derive(Clone, Copy, Debug, Default, Hash, Eq, PartialEq)]
#[repr(u8)]
pub enum NumericalType {
@@ -106,6 +104,13 @@ impl Coerce for f64 {
}
}
impl Coerce for crate::DateTime {
fn coerce(value: NumericalValue) -> Self {
let timestamp_micros = i64::coerce(value);
crate::DateTime { timestamp_micros }
}
}
#[cfg(test)]
mod tests {
use super::NumericalType;

View File

@@ -27,7 +27,7 @@ fn main() -> tantivy::Result<()> {
let score_fieldtype =
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
let highscore_field = schema_builder.add_f64_field("highscore", score_fieldtype.clone());
let price_field = schema_builder.add_f64_field("price", score_fieldtype.clone());
let price_field = schema_builder.add_f64_field("price", score_fieldtype);
let schema = schema_builder.build();
@@ -112,7 +112,7 @@ fn main() -> tantivy::Result<()> {
],
..Default::default()
}),
sub_aggregation: sub_agg_req_1.clone(),
sub_aggregation: sub_agg_req_1,
}),
)]
.into_iter()
@@ -123,7 +123,7 @@ fn main() -> tantivy::Result<()> {
let searcher = reader.searcher();
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
let res: Value = serde_json::to_value(&agg_res)?;
let res: Value = serde_json::to_value(agg_res)?;
println!("{}", serde_json::to_string_pretty(&res)?);
Ok(())

View File

@@ -14,7 +14,7 @@ use fastfield_codecs::Column;
// Importing tantivy...
use tantivy::collector::{Collector, SegmentCollector};
use tantivy::query::QueryParser;
use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT};
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, Score, SegmentReader};
#[derive(Default)]
@@ -52,11 +52,11 @@ impl Stats {
}
struct StatsCollector {
field: Field,
field: String,
}
impl StatsCollector {
fn with_field(field: Field) -> StatsCollector {
fn with_field(field: String) -> StatsCollector {
StatsCollector { field }
}
}
@@ -73,7 +73,7 @@ impl Collector for StatsCollector {
_segment_local_id: u32,
segment_reader: &SegmentReader,
) -> tantivy::Result<StatsSegmentCollector> {
let fast_field_reader = segment_reader.fast_fields().u64(self.field)?;
let fast_field_reader = segment_reader.fast_fields().u64(&self.field)?;
Ok(StatsSegmentCollector {
fast_field_reader,
stats: Stats::default(),
@@ -171,7 +171,9 @@ fn main() -> tantivy::Result<()> {
// here we want to get a hit on the 'ken' in Frankenstein
let query = query_parser.parse_query("broom")?;
if let Some(stats) = searcher.search(&query, &StatsCollector::with_field(price))? {
if let Some(stats) =
searcher.search(&query, &StatsCollector::with_field("price".to_string()))?
{
println!("count: {}", stats.count());
println!("mean: {}", stats.mean());
println!("standard deviation: {}", stats.standard_deviation());

View File

@@ -27,7 +27,7 @@ fn main() -> Result<()> {
reader.reload()?;
let searcher = reader.searcher();
// The end is excluded i.e. here we are searching up to 1969
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
let docs_in_the_sixties = RangeQuery::new_u64("year".to_string(), 1960..1970);
// Uses a Count collector to sum the total number of docs in the range
let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
assert_eq!(num_60s_books, 10);

View File

@@ -4,7 +4,7 @@ use std::sync::{Arc, RwLock, Weak};
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::{Field, Schema, FAST, TEXT};
use tantivy::schema::{Schema, FAST, TEXT};
use tantivy::{
doc, DocAddress, DocId, Index, IndexReader, Opstamp, Searcher, SearcherGeneration, SegmentId,
SegmentReader, Warmer,
@@ -25,13 +25,13 @@ pub trait PriceFetcher: Send + Sync + 'static {
}
struct DynamicPriceColumn {
field: Field,
field: String,
price_cache: RwLock<HashMap<(SegmentId, Option<Opstamp>), Arc<Vec<Price>>>>,
price_fetcher: Box<dyn PriceFetcher>,
}
impl DynamicPriceColumn {
pub fn with_product_id_field<T: PriceFetcher>(field: Field, price_fetcher: T) -> Self {
pub fn with_product_id_field<T: PriceFetcher>(field: String, price_fetcher: T) -> Self {
DynamicPriceColumn {
field,
price_cache: Default::default(),
@@ -48,7 +48,7 @@ impl Warmer for DynamicPriceColumn {
fn warm(&self, searcher: &Searcher) -> tantivy::Result<()> {
for segment in searcher.segment_readers() {
let key = (segment.segment_id(), segment.delete_opstamp());
let product_id_reader = segment.fast_fields().u64(self.field)?;
let product_id_reader = segment.fast_fields().u64(&self.field)?;
let product_ids: Vec<ProductId> = segment
.doc_ids_alive()
.map(|doc| product_id_reader.get_val(doc))
@@ -123,7 +123,7 @@ fn main() -> tantivy::Result<()> {
let price_table = ExternalPriceTable::default();
let price_dynamic_column = Arc::new(DynamicPriceColumn::with_product_id_field(
product_id,
"product_id".to_string(),
price_table.clone(),
));
price_table.update_price(OLIVE_OIL, 12);

View File

@@ -402,8 +402,8 @@ mod tests {
let mut buffer = Vec::new();
let col = VecColumn::from(&[false, true][..]);
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
assert_eq!(buffer.len(), 3 + 5 + 8 + 4 + 2);
// 5 bytes of header, 1 byte of value
assert_eq!(buffer.len(), 3 + 5 + 1 + 4 + 2);
}
#[test]
@@ -411,8 +411,8 @@ mod tests {
let mut buffer = Vec::new();
let col = VecColumn::from(&[true][..]);
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
// 5 bytes of header, 0 bytes of value, 7 bytes of padding.
assert_eq!(buffer.len(), 3 + 5 + 7 + 4 + 2);
// 5 bytes of header, 0 bytes of value
assert_eq!(buffer.len(), 3 + 5 + 4 + 2);
}
#[test]
@@ -422,6 +422,6 @@ mod tests {
let col = VecColumn::from(&vals[..]);
serialize(col, &mut buffer, &[FastFieldCodecType::Bitpacked]).unwrap();
// Values are stored over 3 bits.
assert_eq!(buffer.len(), 3 + 7 + (3 * 80 / 8) + 7 + 4 + 2);
assert_eq!(buffer.len(), 3 + 7 + (3 * 80 / 8) + 4 + 2);
}
}

View File

@@ -1,2 +0,0 @@
#!/bin/bash
cargo test

View File

@@ -94,10 +94,7 @@ impl BucketAggregationWithAccessor {
BucketAggregationType::Terms(TermsAggregation {
field: field_name, ..
}) => {
let field = reader
.schema()
.get_field(field_name)
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
let field = reader.schema().get_field(field_name)?;
inverted_index = Some(reader.inverted_index(field)?);
get_ff_reader_and_validate(reader, field_name, Cardinality::MultiValues)?
}
@@ -195,10 +192,7 @@ fn get_ff_reader_and_validate(
field_name: &str,
cardinality: Cardinality,
) -> crate::Result<(FastFieldAccessor, Type)> {
let field = reader
.schema()
.get_field(field_name)
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
let field = reader.schema().get_field(field_name)?;
let field_type = reader.schema().get_field_entry(field).field_type();
if let Some((_ff_type, field_cardinality)) = type_and_cardinality(field_type) {
@@ -218,10 +212,10 @@ fn get_ff_reader_and_validate(
let ff_fields = reader.fast_fields();
match cardinality {
Cardinality::SingleValue => ff_fields
.u64_lenient(field)
.u64_lenient(field_name)
.map(|field| (FastFieldAccessor::Single(field), field_type.value_type())),
Cardinality::MultiValues => ff_fields
.u64s_lenient(field)
.u64s_lenient(field_name)
.map(|field| (FastFieldAccessor::Multi(field), field_type.value_type())),
}
}

View File

@@ -548,9 +548,7 @@ pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
};
// If we have a date type on the histogram buckets, we add the `key_as_string` field as rfc339
let field = schema
.get_field(&histogram_req.field)
.ok_or_else(|| TantivyError::FieldNotFound(histogram_req.field.to_string()))?;
let field = schema.get_field(&histogram_req.field)?;
if schema.get_field_entry(field).field_type().is_date() {
for bucket in buckets.iter_mut() {
if let crate::aggregation::Key::F64(val) = bucket.key {

View File

@@ -362,13 +362,19 @@ impl SegmentTermCollector {
let mut entries: Vec<(u32, TermBucketEntry)> =
self.term_buckets.entries.into_iter().collect();
let order_by_key = self.req.order.target == OrderTarget::Key;
let order_by_sub_aggregation =
matches!(self.req.order.target, OrderTarget::SubAggregation(_));
match self.req.order.target {
OrderTarget::Key => {
// defer order and cut_off after loading the texts from the dictionary
// We rely on the fact, that term ordinals match the order of the strings
// TODO: We could have a special collector, that keeps only TOP n results at any
// time.
if self.req.order.order == Order::Desc {
entries.sort_unstable_by_key(|bucket| std::cmp::Reverse(bucket.0));
} else {
entries.sort_unstable_by_key(|bucket| bucket.0);
}
}
OrderTarget::SubAggregation(_name) => {
// don't sort and cut off since it's hard to make assumptions on the quality of the
@@ -384,12 +390,11 @@ impl SegmentTermCollector {
}
}
let (term_doc_count_before_cutoff, mut sum_other_doc_count) =
if order_by_key || order_by_sub_aggregation {
(0, 0)
} else {
cut_off_buckets(&mut entries, self.req.segment_size as usize)
};
let (term_doc_count_before_cutoff, sum_other_doc_count) = if order_by_sub_aggregation {
(0, 0)
} else {
cut_off_buckets(&mut entries, self.req.segment_size as usize)
};
let inverted_index = agg_with_accessor
.inverted_index
@@ -412,6 +417,10 @@ impl SegmentTermCollector {
if self.req.min_doc_count == 0 {
let mut stream = term_dict.stream()?;
while let Some((key, _ord)) = stream.next() {
if dict.len() >= self.req.segment_size as usize {
break;
}
let key = std::str::from_utf8(key)
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
if !dict.contains_key(key) {
@@ -420,20 +429,6 @@ impl SegmentTermCollector {
}
}
if order_by_key {
let mut dict_entries = dict.into_iter().collect_vec();
if self.req.order.order == Order::Desc {
dict_entries.sort_unstable_by(|(key1, _), (key2, _)| key1.cmp(key2));
} else {
dict_entries.sort_unstable_by(|(key1, _), (key2, _)| key2.cmp(key1));
}
let (_, sum_other_docs) =
cut_off_buckets(&mut dict_entries, self.req.segment_size as usize);
sum_other_doc_count += sum_other_docs;
dict = dict_entries.into_iter().collect();
}
Ok(IntermediateBucketResult::Terms(
IntermediateTermBucketResult {
entries: dict,
@@ -923,14 +918,14 @@ mod tests {
];
let index = get_test_index_from_values_and_terms(merge_segments, &segment_and_terms)?;
// key desc
// key asc
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(),
order: Some(CustomOrder {
order: Order::Desc,
order: Order::Asc,
target: OrderTarget::Key,
}),
..Default::default()
@@ -957,7 +952,7 @@ mod tests {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(),
order: Some(CustomOrder {
order: Order::Desc,
order: Order::Asc,
target: OrderTarget::Key,
}),
size: Some(2),
@@ -981,14 +976,14 @@ mod tests {
assert_eq!(res["my_texts"]["sum_other_doc_count"], 3);
// key desc and segment_size cut_off
// key asc and segment_size cut_off
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(),
order: Some(CustomOrder {
order: Order::Desc,
order: Order::Asc,
target: OrderTarget::Key,
}),
size: Some(2),
@@ -1011,14 +1006,14 @@ mod tests {
serde_json::Value::Null
);
// key asc
// key desc
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(),
order: Some(CustomOrder {
order: Order::Asc,
order: Order::Desc,
target: OrderTarget::Key,
}),
..Default::default()
@@ -1038,14 +1033,14 @@ mod tests {
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 5);
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
// key asc, size cut_off
// key desc, size cut_off
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(),
order: Some(CustomOrder {
order: Order::Asc,
order: Order::Desc,
target: OrderTarget::Key,
}),
size: Some(2),
@@ -1068,14 +1063,14 @@ mod tests {
);
assert_eq!(res["my_texts"]["sum_other_doc_count"], 5);
// key asc, segment_size cut_off
// key desc, segment_size cut_off
let agg_req: Aggregations = vec![(
"my_texts".to_string(),
Aggregation::Bucket(BucketAggregation {
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
field: "string_id".to_string(),
order: Some(CustomOrder {
order: Order::Asc,
order: Order::Desc,
target: OrderTarget::Key,
}),
size: Some(2),
@@ -1352,68 +1347,3 @@ mod tests {
Ok(())
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use itertools::Itertools;
use rand::seq::SliceRandom;
use rand::thread_rng;
use super::*;
fn get_collector_with_buckets(num_docs: u64) -> TermBuckets {
TermBuckets::from_req_and_validate(&Default::default(), num_docs as usize).unwrap()
}
fn get_rand_terms(total_terms: u64, num_terms_returned: u64) -> Vec<u64> {
let mut rng = thread_rng();
let all_terms = (0..total_terms - 1).collect_vec();
let mut vals = vec![];
for _ in 0..num_terms_returned {
let val = all_terms.as_slice().choose(&mut rng).unwrap();
vals.push(*val);
}
vals
}
fn bench_term_buckets(b: &mut test::Bencher, num_terms: u64, total_terms: u64) {
let mut collector = get_collector_with_buckets(total_terms);
let vals = get_rand_terms(total_terms, num_terms);
let aggregations_with_accessor: AggregationsWithAccessor = Default::default();
let bucket_count: BucketCount = BucketCount {
bucket_count: Default::default(),
max_bucket_count: 1_000_001u32,
};
b.iter(|| {
for &val in &vals {
collector
.increment_bucket(&[val], 0, &aggregations_with_accessor, &bucket_count, &None)
.unwrap();
}
})
}
#[bench]
fn bench_term_buckets_500_of_1_000_000(b: &mut test::Bencher) {
bench_term_buckets(b, 500u64, 1_000_000u64)
}
#[bench]
fn bench_term_buckets_1_000_000_of_50_000(b: &mut test::Bencher) {
bench_term_buckets(b, 1_000_000u64, 50_000u64)
}
#[bench]
fn bench_term_buckets_1_000_000_of_50(b: &mut test::Bencher) {
bench_term_buckets(b, 1_000_000u64, 50u64)
}
#[bench]
fn bench_term_buckets_1_000_000_of_1_000_000(b: &mut test::Bencher) {
bench_term_buckets(b, 1_000_000u64, 1_000_000u64)
}
}

View File

@@ -26,7 +26,6 @@ use super::{format_date, Key, SerializedKey, VecWithNames};
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
use crate::aggregation::bucket::TermsAggregationInternal;
use crate::schema::Schema;
use crate::TantivyError;
/// Contains the intermediate aggregation result, which is optimized to be merged with other
/// intermediate results.
@@ -500,7 +499,7 @@ impl IntermediateTermBucketResult {
match req.order.target {
OrderTarget::Key => {
buckets.sort_by(|left, right| {
if req.order.order == Order::Desc {
if req.order.order == Order::Asc {
left.key.partial_cmp(&right.key)
} else {
right.key.partial_cmp(&left.key)
@@ -658,9 +657,7 @@ impl IntermediateRangeBucketEntry {
// If we have a date type on the histogram buckets, we add the `key_as_string` field as
// rfc339
let field = schema
.get_field(&range_req.field)
.ok_or_else(|| TantivyError::FieldNotFound(range_req.field.to_string()))?;
let field = schema.get_field(&range_req.field)?;
if schema.get_field_entry(field).field_type().is_date() {
if let Some(val) = range_bucket_entry.to {
let key_as_string = format_date(val as i64)?;

View File

@@ -6,14 +6,13 @@ use super::{IntermediateStats, SegmentStatsCollector};
/// A single-value metric aggregation that computes the average of numeric values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "avg": {
/// "field": "score",
/// "field": "score"
/// }
/// }
/// ```

View File

@@ -6,14 +6,13 @@ use super::{IntermediateStats, SegmentStatsCollector};
/// A single-value metric aggregation that counts the number of values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "value_count": {
/// "field": "score",
/// "field": "score"
/// }
/// }
/// ```

View File

@@ -6,14 +6,13 @@ use super::{IntermediateStats, SegmentStatsCollector};
/// A single-value metric aggregation that computes the maximum of numeric values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "max": {
/// "field": "score",
/// "field": "score"
/// }
/// }
/// ```

View File

@@ -6,14 +6,13 @@ use super::{IntermediateStats, SegmentStatsCollector};
/// A single-value metric aggregation that computes the minimum of numeric values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "min": {
/// "field": "score",
/// "field": "score"
/// }
/// }
/// ```

View File

@@ -80,12 +80,12 @@ mod tests {
"price_stats": { "stats": { "field": "price" } },
"price_sum": { "sum": { "field": "price" } }
}"#;
let aggregations: Aggregations = serde_json::from_str(&aggregations_json).unwrap();
let aggregations: Aggregations = serde_json::from_str(aggregations_json).unwrap();
let collector = AggregationCollector::from_aggs(aggregations, None, index.schema());
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let aggregations_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
let aggregations_res_json = serde_json::to_value(&aggregations_res).unwrap();
let aggregations_res_json = serde_json::to_value(aggregations_res).unwrap();
assert_eq!(aggregations_res_json["price_avg"]["value"], 2.5);
assert_eq!(aggregations_res_json["price_count"]["value"], 6.0);

View File

@@ -7,14 +7,13 @@ use crate::{DocId, TantivyError};
/// A multi-value metric aggregation that computes a collection of statistics on numeric values that
/// are extracted from the aggregated documents.
/// Supported field types are `u64`, `i64`, and `f64`.
/// See [`Stats`] for returned statistics.
///
/// # JSON Format
/// ```json
/// {
/// "stats": {
/// "field": "score",
/// "field": "score"
/// }
/// }
/// ```

View File

@@ -6,14 +6,13 @@ use super::{IntermediateStats, SegmentStatsCollector};
/// A single-value metric aggregation that sums up numeric values that are
/// extracted from the aggregated documents.
/// Supported field types are u64, i64, and f64.
/// See [super::SingleMetricResult] for return value.
///
/// # JSON Format
/// ```json
/// {
/// "sum": {
/// "field": "score",
/// "field": "score"
/// }
/// }
/// ```

View File

@@ -1,6 +1,5 @@
//! # Aggregations
//!
//!
//! An aggregation summarizes your data as statistics on buckets or metrics.
//!
//! Aggregations can provide answer to questions like:
@@ -41,6 +40,10 @@
//! - [Metric](metric)
//! - [Average](metric::AverageAggregation)
//! - [Stats](metric::StatsAggregation)
//! - [Min](metric::MinAggregation)
//! - [Max](metric::MaxAggregation)
//! - [Sum](metric::SumAggregation)
//! - [Count](metric::CountAggregation)
//!
//! # Example
//! Compute the average metric, by building [`agg_req::Aggregations`], which is built from an
@@ -75,7 +78,7 @@
//! }
//! ```
//! # Example JSON
//! Requests are compatible with the elasticsearch json request format.
//! Requests are compatible with the elasticsearch JSON request format.
//!
//! ```
//! use tantivy::aggregation::agg_req::Aggregations;
@@ -1153,12 +1156,6 @@ mod tests {
r#"FieldNotFound("not_exist_field")"#
);
let agg_res = avg_on_field("scores_i64");
assert_eq!(
format!("{:?}", agg_res),
r#"InvalidArgument("Invalid field cardinality on field scores_i64 expected SingleValue, but got MultiValues")"#
);
Ok(())
}

View File

@@ -130,7 +130,7 @@ where
let fast_field_reader = segment_reader
.fast_fields()
.typed_fast_field_reader(self.field)?;
.typed_fast_field_reader(schema.get_field_name(self.field))?;
let segment_collector = self
.collector

View File

@@ -5,7 +5,7 @@ use fastfield_codecs::Column;
use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::FastValue;
use crate::schema::{Field, Type};
use crate::schema::Type;
use crate::{DocId, Score};
/// Histogram builds an histogram of the values of a fastfield for the
@@ -28,7 +28,7 @@ pub struct HistogramCollector {
min_value: u64,
num_buckets: usize,
divider: DividerU64,
field: Field,
field: String,
}
impl HistogramCollector {
@@ -46,7 +46,7 @@ impl HistogramCollector {
/// # Disclaimer
/// This function panics if the field given is of type f64.
pub fn new<TFastValue: FastValue>(
field: Field,
field: String,
min_value: TFastValue,
bucket_width: u64,
num_buckets: usize,
@@ -112,7 +112,7 @@ impl Collector for HistogramCollector {
_segment_local_id: crate::SegmentOrdinal,
segment: &crate::SegmentReader,
) -> crate::Result<Self::Child> {
let ff_reader = segment.fast_fields().u64_lenient(self.field)?;
let ff_reader = segment.fast_fields().u64_lenient(&self.field)?;
Ok(SegmentHistogramCollector {
histogram_computer: HistogramComputer {
counts: vec![0; self.num_buckets],
@@ -211,13 +211,13 @@ mod tests {
#[test]
fn test_no_segments() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let val_field = schema_builder.add_u64_field("val_field", FAST);
schema_builder.add_u64_field("val_field", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let reader = index.reader()?;
let searcher = reader.searcher();
let all_query = AllQuery;
let histogram_collector = HistogramCollector::new(val_field, 0u64, 2, 5);
let histogram_collector = HistogramCollector::new("val_field".to_string(), 0u64, 2, 5);
let histogram = searcher.search(&all_query, &histogram_collector)?;
assert_eq!(histogram, vec![0; 5]);
Ok(())
@@ -238,7 +238,8 @@ mod tests {
let reader = index.reader()?;
let searcher = reader.searcher();
let all_query = AllQuery;
let histogram_collector = HistogramCollector::new(val_field, -20i64, 10u64, 4);
let histogram_collector =
HistogramCollector::new("val_field".to_string(), -20i64, 10u64, 4);
let histogram = searcher.search(&all_query, &histogram_collector)?;
assert_eq!(histogram, vec![1, 1, 0, 1]);
Ok(())
@@ -262,7 +263,8 @@ mod tests {
let reader = index.reader()?;
let searcher = reader.searcher();
let all_query = AllQuery;
let histogram_collector = HistogramCollector::new(val_field, -20i64, 10u64, 4);
let histogram_collector =
HistogramCollector::new("val_field".to_string(), -20i64, 10u64, 4);
let histogram = searcher.search(&all_query, &histogram_collector)?;
assert_eq!(histogram, vec![1, 1, 0, 1]);
Ok(())
@@ -285,7 +287,7 @@ mod tests {
let searcher = reader.searcher();
let all_query = AllQuery;
let week_histogram_collector = HistogramCollector::new(
date_field,
"date_field".to_string(),
DateTime::from_primitive(
Date::from_calendar_date(1980, Month::January, 1)?.with_hms(0, 0, 0)?,
),

View File

@@ -155,7 +155,7 @@ impl SegmentCollector for TestSegmentCollector {
///
/// This collector is mainly useful for tests.
pub struct FastFieldTestCollector {
field: Field,
field: String,
}
pub struct FastFieldSegmentCollector {
@@ -164,7 +164,7 @@ pub struct FastFieldSegmentCollector {
}
impl FastFieldTestCollector {
pub fn for_field(field: Field) -> FastFieldTestCollector {
pub fn for_field(field: String) -> FastFieldTestCollector {
FastFieldTestCollector { field }
}
}
@@ -180,7 +180,7 @@ impl Collector for FastFieldTestCollector {
) -> crate::Result<FastFieldSegmentCollector> {
let reader = segment_reader
.fast_fields()
.u64(self.field)
.u64(&self.field)
.expect("Requested field is not a fast field.");
Ok(FastFieldSegmentCollector {
vals: Vec::new(),
@@ -238,7 +238,9 @@ impl Collector for BytesFastFieldTestCollector {
_segment_local_id: u32,
segment_reader: &SegmentReader,
) -> crate::Result<BytesFastFieldSegmentCollector> {
let reader = segment_reader.fast_fields().bytes(self.field)?;
let reader = segment_reader
.fast_fields()
.bytes(segment_reader.schema().get_field_name(self.field))?;
Ok(BytesFastFieldSegmentCollector {
vals: Vec::new(),
reader,

View File

@@ -156,7 +156,7 @@ impl CustomScorer<u64> for ScorerByField {
// The conversion will then happen only on the top-K docs.
let ff_reader = segment_reader
.fast_fields()
.typed_fast_field_reader(self.field)?;
.typed_fast_field_reader(segment_reader.schema().get_field_name(self.field))?;
Ok(ScorerByFastFieldReader { ff_reader })
}
}
@@ -454,7 +454,7 @@ impl TopDocs {
/// // In our case, we will get a reader for the popularity
/// // fast field.
/// let popularity_reader =
/// segment_reader.fast_fields().u64(popularity).unwrap();
/// segment_reader.fast_fields().u64("popularity").unwrap();
///
/// // We can now define our actual scoring function
/// move |doc: DocId, original_score: Score| {
@@ -561,9 +561,9 @@ impl TopDocs {
/// // Note that this is implemented by using a `(u64, u64)`
/// // as a score.
/// let popularity_reader =
/// segment_reader.fast_fields().u64(popularity).unwrap();
/// segment_reader.fast_fields().u64("popularity").unwrap();
/// let boosted_reader =
/// segment_reader.fast_fields().u64(boosted).unwrap();
/// segment_reader.fast_fields().u64("boosted").unwrap();
///
/// // We can now define our actual scoring function
/// move |doc: DocId| {

View File

@@ -231,7 +231,7 @@ impl IndexBuilder {
fn validate(&self) -> crate::Result<()> {
if let Some(schema) = self.schema.as_ref() {
if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref() {
let schema_field = schema.get_field(&sort_by_field.field).ok_or_else(|| {
let schema_field = schema.get_field(&sort_by_field.field).map_err(|_| {
TantivyError::InvalidArgument(format!(
"Field to sort index {} not found in schema",
sort_by_field.field

View File

@@ -135,6 +135,8 @@ impl InvertedIndexReader {
term_info: &TermInfo,
option: IndexRecordOption,
) -> io::Result<SegmentPostings> {
let option = option.downgrade(self.record_option);
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
let position_reader = {
if option.has_positions() {

View File

@@ -249,7 +249,7 @@ impl SearcherInner {
index: Index,
segment_readers: Vec<SegmentReader>,
generation: TrackedObject<SearcherGeneration>,
doc_store_cache_size: usize,
doc_store_cache_num_blocks: usize,
) -> io::Result<SearcherInner> {
assert_eq!(
&segment_readers
@@ -261,7 +261,7 @@ impl SearcherInner {
);
let store_readers: Vec<StoreReader> = segment_readers
.iter()
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size))
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_num_blocks))
.collect::<io::Result<Vec<_>>>()?;
Ok(SearcherInner {

View File

@@ -95,7 +95,8 @@ impl SegmentReader {
match field_entry.field_type() {
FieldType::Facet(_) => {
let term_ords_reader = self.fast_fields().u64s(field)?;
let term_ords_reader =
self.fast_fields().u64s(self.schema.get_field_name(field))?;
let termdict = self
.termdict_composite
.open_read(field)
@@ -133,9 +134,12 @@ impl SegmentReader {
&self.fieldnorm_readers
}
/// Accessor to the segment's `StoreReader`.
pub fn get_store_reader(&self, cache_size: usize) -> io::Result<StoreReader> {
StoreReader::open(self.store_file.clone(), cache_size)
/// Accessor to the segment's [`StoreReader`](crate::store::StoreReader).
///
/// `cache_num_blocks` sets the number of decompressed blocks to be cached in an LRU.
/// The size of blocks is configurable, this should be reflexted in the
pub fn get_store_reader(&self, cache_num_blocks: usize) -> io::Result<StoreReader> {
StoreReader::open(self.store_file.clone(), cache_num_blocks)
}
/// Open a new segment for reading.

View File

@@ -25,7 +25,7 @@ mod tests {
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
let bytes_reader = segment_reader.fast_fields().bytes(bytes_field).unwrap();
let bytes_reader = segment_reader.fast_fields().bytes("bytesfield").unwrap();
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
assert!(bytes_reader.get_bytes(1).is_empty());
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
@@ -109,8 +109,7 @@ mod tests {
let searcher = create_index_for_test(FAST)?;
assert_eq!(searcher.num_docs(), 1);
let fast_fields = searcher.segment_reader(0u32).fast_fields();
let field = searcher.schema().get_field("string_bytes").unwrap();
let fast_field_reader = fast_fields.bytes(field).unwrap();
let fast_field_reader = fast_fields.bytes("string_bytes").unwrap();
assert_eq!(fast_field_reader.get_bytes(0u32), b"tantivy");
Ok(())
}

View File

@@ -226,7 +226,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 34);
assert_eq!(file.len(), 27);
let composite_file = CompositeFile::open(&file)?;
let fast_field_bytes = composite_file.open_read(*FIELD).unwrap().read_bytes()?;
let fast_field_reader = open::<u64>(fast_field_bytes)?;
@@ -275,7 +275,7 @@ mod tests {
serializer.close()?;
}
let file = directory.open_read(path)?;
assert_eq!(file.len(), 62);
assert_eq!(file.len(), 55);
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite
@@ -316,7 +316,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 35);
assert_eq!(file.len(), 28);
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite
@@ -355,7 +355,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 80049);
assert_eq!(file.len(), 80042);
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite
@@ -397,7 +397,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 49_usize);
assert_eq!(file.len(), 42_usize);
{
let fast_fields_composite = CompositeFile::open(&file)?;
@@ -583,7 +583,7 @@ mod tests {
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
let text_fast_field = fast_fields.u64s("text").unwrap();
assert_eq!(
get_vals_for_docs(&text_fast_field, 0..5),
@@ -622,7 +622,7 @@ mod tests {
assert_eq!(searcher.segment_readers().len(), 2);
let segment_reader = searcher.segment_reader(1);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
let text_fast_field = fast_fields.u64s("text").unwrap();
assert_eq!(get_vals_for_docs(&text_fast_field, 0..3), vec![0, 1, 0]);
}
@@ -638,7 +638,7 @@ mod tests {
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
let text_fast_field = fast_fields.u64s("text").unwrap();
assert_eq!(
get_vals_for_docs(&text_fast_field, 0..8),
@@ -681,7 +681,7 @@ mod tests {
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
let text_fast_field = fast_fields.u64s("text").unwrap();
assert_eq!(get_vals_for_docs(&text_fast_field, 0..6), vec![1, 0, 0, 2]);
@@ -712,7 +712,7 @@ mod tests {
assert_eq!(searcher.segment_readers().len(), 2);
let segment_reader = searcher.segment_reader(1);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
let text_fast_field = fast_fields.u64s("text").unwrap();
assert_eq!(get_vals_for_docs(&text_fast_field, 0..2), vec![0, 1]);
}
@@ -728,7 +728,7 @@ mod tests {
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let text_fast_field = fast_fields.u64s(text_field).unwrap();
let text_fast_field = fast_fields.u64s("text").unwrap();
assert_eq!(
get_vals_for_docs(&text_fast_field, 0..9),
@@ -773,8 +773,8 @@ mod tests {
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let date_fast_field = fast_fields.date(date_field).unwrap();
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
let date_fast_field = fast_fields.date("date").unwrap();
let dates_fast_field = fast_fields.dates("multi_date").unwrap();
let mut dates = vec![];
{
assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64);
@@ -836,7 +836,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 33);
assert_eq!(file.len(), 26);
let composite_file = CompositeFile::open(&file)?;
let data = composite_file.open_read(field).unwrap().read_bytes()?;
let fast_field_reader = open::<bool>(data)?;
@@ -874,7 +874,7 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 45);
assert_eq!(file.len(), 38);
let composite_file = CompositeFile::open(&file)?;
let data = composite_file.open_read(field).unwrap().read_bytes()?;
let fast_field_reader = open::<bool>(data)?;
@@ -906,7 +906,7 @@ mod tests {
}
let file = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&file)?;
assert_eq!(file.len(), 32);
assert_eq!(file.len(), 25);
let data = composite_file.open_read(field).unwrap().read_bytes()?;
let fast_field_reader = open::<bool>(data)?;
assert_eq!(fast_field_reader.get_val(0), false);
@@ -940,10 +940,10 @@ mod tests {
pub fn test_gcd_date() -> crate::Result<()> {
let size_prec_sec =
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Seconds)?;
assert_eq!(size_prec_sec, 5 + 4 + 28 + (1_000 * 13) / 8); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
assert_eq!(size_prec_sec, 5 + 4 + 21 + (1_000 * 13) / 8); // 13 bits per val = ceil(log_2(number of seconds in 2hours);
let size_prec_micro =
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Microseconds)?;
assert_eq!(size_prec_micro, 5 + 4 + 26 + (1_000 * 33) / 8); // 33 bits per val = ceil(log_2(number of microsecsseconds in 2hours);
assert_eq!(size_prec_micro, 5 + 4 + 19 + (1_000 * 33) / 8); // 33 bits per val = ceil(log_2(number of microsecsseconds in 2hours);
Ok(())
}
@@ -1014,7 +1014,7 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment = &searcher.segment_readers()[0];
let field = segment.fast_fields().u64(num_field).unwrap();
let field = segment.fast_fields().u64("url_norm_hash").unwrap();
let numbers = vec![100, 200, 300];
let test_range = |range: RangeInclusive<u64>| {
@@ -1063,7 +1063,7 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment = &searcher.segment_readers()[0];
let field = segment.fast_fields().u64(num_field).unwrap();
let field = segment.fast_fields().u64("url_norm_hash").unwrap();
let numbers = vec![1000, 1001, 1003];
let test_range = |range: RangeInclusive<u64>| {

View File

@@ -52,7 +52,7 @@ mod tests {
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
let mut vals = Vec::new();
let multi_value_reader = segment_reader.fast_fields().u64s(field)?;
let multi_value_reader = segment_reader.fast_fields().u64s("multifield")?;
{
multi_value_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[4u64]);
@@ -229,7 +229,7 @@ mod tests {
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
let mut vals = Vec::new();
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
let multi_value_reader = segment_reader.fast_fields().i64s("multifield").unwrap();
multi_value_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[-4i64]);
multi_value_reader.get_vals(0, &mut vals);
@@ -261,7 +261,7 @@ mod tests {
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
let mut vals = Vec::new();
let multi_value_reader = segment_reader.fast_fields().bools(bool_field).unwrap();
let multi_value_reader = segment_reader.fast_fields().bools("multifield").unwrap();
multi_value_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[false]);
multi_value_reader.get_vals(0, &mut vals);

View File

@@ -159,7 +159,7 @@ mod tests {
let searcher = reader.searcher();
let reader = searcher.segment_reader(0);
let date_ff_reader = reader.fast_fields().dates(date_field).unwrap();
let date_ff_reader = reader.fast_fields().dates("multi_date_field").unwrap();
let mut docids = vec![];
date_ff_reader.get_docids_for_value_range(
DateTime::from_utc(first_time_stamp)..=DateTime::from_utc(two_secs_ahead),
@@ -173,7 +173,7 @@ mod tests {
assert_eq!(
count_multiples(RangeQuery::new_date(
date_field,
"multi_date_field".to_string(),
DateTime::from_utc(first_time_stamp)..DateTime::from_utc(two_secs_ahead)
)),
1
@@ -226,7 +226,7 @@ mod tests {
let reader = searcher.segment_reader(0);
assert_eq!(reader.num_docs(), 5);
let date_ff_reader = reader.fast_fields().dates(date_field).unwrap();
let date_ff_reader = reader.fast_fields().dates("multi_date_field").unwrap();
let mut docids = vec![];
date_ff_reader.get_docids_for_value_range(
DateTime::from_utc(first_time_stamp)..=DateTime::from_utc(two_secs_ahead),
@@ -240,7 +240,7 @@ mod tests {
assert_eq!(
count_multiples(RangeQuery::new_date(
date_field,
"multi_date_field".to_string(),
DateTime::from_utc(first_time_stamp)..DateTime::from_utc(two_secs_ahead)
)),
2
@@ -324,7 +324,7 @@ mod tests {
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
let field_reader = segment_reader.fast_fields().i64s(item_field)?;
let field_reader = segment_reader.fast_fields().i64s("items")?;
assert_eq!(field_reader.min_value(), -2);
assert_eq!(field_reader.max_value(), 6);

View File

@@ -114,9 +114,11 @@ impl FastFieldReaders {
pub(crate) fn typed_fast_field_reader_with_idx<TFastValue: FastValue>(
&self,
field: Field,
field_name: &str,
index: usize,
) -> crate::Result<Arc<dyn Column<TFastValue>>> {
let field = self.schema.get_field(field_name)?;
let fast_field_slice = self.fast_field_data(field, index)?;
let bytes = fast_field_slice.read_bytes()?;
let column = fastfield_codecs::open(bytes)?;
@@ -125,32 +127,37 @@ impl FastFieldReaders {
pub(crate) fn typed_fast_field_reader<TFastValue: FastValue>(
&self,
field: Field,
field_name: &str,
) -> crate::Result<Arc<dyn Column<TFastValue>>> {
self.typed_fast_field_reader_with_idx(field, 0)
self.typed_fast_field_reader_with_idx(field_name, 0)
}
pub(crate) fn typed_fast_field_multi_reader<TFastValue: FastValue>(
&self,
field: Field,
field_name: &str,
) -> crate::Result<MultiValuedFastFieldReader<TFastValue>> {
let idx_reader = self.typed_fast_field_reader(field)?;
let vals_reader = self.typed_fast_field_reader_with_idx(field, 1)?;
let idx_reader = self.typed_fast_field_reader(field_name)?;
let vals_reader = self.typed_fast_field_reader_with_idx(field_name, 1)?;
Ok(MultiValuedFastFieldReader::open(idx_reader, vals_reader))
}
/// Returns the `u64` fast field reader reader associated with `field`.
///
/// If `field` is not a u64 fast field, this method returns an Error.
pub fn u64(&self, field: Field) -> crate::Result<Arc<dyn Column<u64>>> {
self.check_type(field, FastType::U64, Cardinality::SingleValue)?;
self.typed_fast_field_reader(field)
pub fn u64(&self, field_name: &str) -> crate::Result<Arc<dyn Column<u64>>> {
self.check_type(
self.schema.get_field(field_name)?,
FastType::U64,
Cardinality::SingleValue,
)?;
self.typed_fast_field_reader(field_name)
}
/// Returns the `ip` fast field reader reader associated to `field`.
///
/// If `field` is not a u128 fast field, this method returns an Error.
pub fn ip_addr(&self, field: Field) -> crate::Result<Arc<dyn Column<Ipv6Addr>>> {
pub fn ip_addr(&self, field_name: &str) -> crate::Result<Arc<dyn Column<Ipv6Addr>>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::U128, Cardinality::SingleValue)?;
let bytes = self.fast_field_data(field, 0)?.read_bytes()?;
Ok(open_u128::<Ipv6Addr>(bytes)?)
@@ -159,9 +166,13 @@ impl FastFieldReaders {
/// Returns the `ip` fast field reader reader associated to `field`.
///
/// If `field` is not a u128 fast field, this method returns an Error.
pub fn ip_addrs(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<Ipv6Addr>> {
pub fn ip_addrs(
&self,
field_name: &str,
) -> crate::Result<MultiValuedFastFieldReader<Ipv6Addr>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
let idx_reader: Arc<dyn Column<u64>> = self.typed_fast_field_reader(field)?;
let idx_reader: Arc<dyn Column<u64>> = self.typed_fast_field_reader(field_name)?;
let bytes = self.fast_field_data(field, 1)?.read_bytes()?;
let vals_reader = open_u128::<Ipv6Addr>(bytes)?;
@@ -172,7 +183,8 @@ impl FastFieldReaders {
/// Returns the `u128` fast field reader reader associated to `field`.
///
/// If `field` is not a u128 fast field, this method returns an Error.
pub(crate) fn u128(&self, field: Field) -> crate::Result<Arc<dyn Column<u128>>> {
pub(crate) fn u128(&self, field_name: &str) -> crate::Result<Arc<dyn Column<u128>>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::U128, Cardinality::SingleValue)?;
let bytes = self.fast_field_data(field, 0)?.read_bytes()?;
Ok(open_u128::<u128>(bytes)?)
@@ -181,9 +193,11 @@ impl FastFieldReaders {
/// Returns the `u128` multi-valued fast field reader reader associated to `field`.
///
/// If `field` is not a u128 multi-valued fast field, this method returns an Error.
pub fn u128s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u128>> {
pub fn u128s(&self, field_name: &str) -> crate::Result<MultiValuedFastFieldReader<u128>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
let idx_reader: Arc<dyn Column<u64>> = self.typed_fast_field_reader(field)?;
let idx_reader: Arc<dyn Column<u64>> =
self.typed_fast_field_reader(self.schema.get_field_name(field))?;
let bytes = self.fast_field_data(field, 1)?.read_bytes()?;
let vals_reader = open_u128::<u128>(bytes)?;
@@ -196,80 +210,88 @@ impl FastFieldReaders {
///
/// If not, the fastfield reader will returns the u64-value associated with the original
/// FastValue.
pub fn u64_lenient(&self, field: Field) -> crate::Result<Arc<dyn Column<u64>>> {
self.typed_fast_field_reader(field)
pub fn u64_lenient(&self, field_name: &str) -> crate::Result<Arc<dyn Column<u64>>> {
self.typed_fast_field_reader(field_name)
}
/// Returns the `i64` fast field reader reader associated with `field`.
///
/// If `field` is not a i64 fast field, this method returns an Error.
pub fn i64(&self, field: Field) -> crate::Result<Arc<dyn Column<i64>>> {
pub fn i64(&self, field_name: &str) -> crate::Result<Arc<dyn Column<i64>>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::I64, Cardinality::SingleValue)?;
self.typed_fast_field_reader(field)
self.typed_fast_field_reader(self.schema.get_field_name(field))
}
/// Returns the `date` fast field reader reader associated with `field`.
///
/// If `field` is not a date fast field, this method returns an Error.
pub fn date(&self, field: Field) -> crate::Result<Arc<dyn Column<DateTime>>> {
pub fn date(&self, field_name: &str) -> crate::Result<Arc<dyn Column<DateTime>>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::Date, Cardinality::SingleValue)?;
self.typed_fast_field_reader(field)
self.typed_fast_field_reader(field_name)
}
/// Returns the `f64` fast field reader reader associated with `field`.
///
/// If `field` is not a f64 fast field, this method returns an Error.
pub fn f64(&self, field: Field) -> crate::Result<Arc<dyn Column<f64>>> {
pub fn f64(&self, field_name: &str) -> crate::Result<Arc<dyn Column<f64>>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::F64, Cardinality::SingleValue)?;
self.typed_fast_field_reader(field)
self.typed_fast_field_reader(field_name)
}
/// Returns the `bool` fast field reader reader associated with `field`.
///
/// If `field` is not a bool fast field, this method returns an Error.
pub fn bool(&self, field: Field) -> crate::Result<Arc<dyn Column<bool>>> {
pub fn bool(&self, field_name: &str) -> crate::Result<Arc<dyn Column<bool>>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::Bool, Cardinality::SingleValue)?;
self.typed_fast_field_reader(field)
self.typed_fast_field_reader(field_name)
}
/// Returns a `u64s` multi-valued fast field reader reader associated with `field`.
///
/// If `field` is not a u64 multi-valued fast field, this method returns an Error.
pub fn u64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
pub fn u64s(&self, field_name: &str) -> crate::Result<MultiValuedFastFieldReader<u64>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::U64, Cardinality::MultiValues)?;
self.typed_fast_field_multi_reader(field)
self.typed_fast_field_multi_reader(field_name)
}
/// Returns a `u64s` multi-valued fast field reader reader associated with `field`, regardless
/// of whether the given field is effectively of type `u64` or not.
///
/// If `field` is not a u64 multi-valued fast field, this method returns an Error.
pub fn u64s_lenient(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
self.typed_fast_field_multi_reader(field)
pub fn u64s_lenient(&self, field_name: &str) -> crate::Result<MultiValuedFastFieldReader<u64>> {
self.typed_fast_field_multi_reader(field_name)
}
/// Returns a `i64s` multi-valued fast field reader reader associated with `field`.
///
/// If `field` is not a i64 multi-valued fast field, this method returns an Error.
pub fn i64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<i64>> {
pub fn i64s(&self, field_name: &str) -> crate::Result<MultiValuedFastFieldReader<i64>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::I64, Cardinality::MultiValues)?;
self.typed_fast_field_multi_reader(field)
self.typed_fast_field_multi_reader(self.schema.get_field_name(field))
}
/// Returns a `f64s` multi-valued fast field reader reader associated with `field`.
///
/// If `field` is not a f64 multi-valued fast field, this method returns an Error.
pub fn f64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<f64>> {
pub fn f64s(&self, field_name: &str) -> crate::Result<MultiValuedFastFieldReader<f64>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::F64, Cardinality::MultiValues)?;
self.typed_fast_field_multi_reader(field)
self.typed_fast_field_multi_reader(self.schema.get_field_name(field))
}
/// Returns a `bools` multi-valued fast field reader reader associated with `field`.
///
/// If `field` is not a bool multi-valued fast field, this method returns an Error.
pub fn bools(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<bool>> {
pub fn bools(&self, field_name: &str) -> crate::Result<MultiValuedFastFieldReader<bool>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::Bool, Cardinality::MultiValues)?;
self.typed_fast_field_multi_reader(field)
self.typed_fast_field_multi_reader(self.schema.get_field_name(field))
}
/// Returns a `time::OffsetDateTime` multi-valued fast field reader reader associated with
@@ -277,15 +299,17 @@ impl FastFieldReaders {
///
/// If `field` is not a `time::OffsetDateTime` multi-valued fast field, this method returns an
/// Error.
pub fn dates(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<DateTime>> {
pub fn dates(&self, field_name: &str) -> crate::Result<MultiValuedFastFieldReader<DateTime>> {
let field = self.schema.get_field(field_name)?;
self.check_type(field, FastType::Date, Cardinality::MultiValues)?;
self.typed_fast_field_multi_reader(field)
self.typed_fast_field_multi_reader(self.schema.get_field_name(field))
}
/// Returns the `bytes` fast field reader associated with `field`.
///
/// If `field` is not a bytes fast field, returns an Error.
pub fn bytes(&self, field: Field) -> crate::Result<BytesFastFieldReader> {
pub fn bytes(&self, field_name: &str) -> crate::Result<BytesFastFieldReader> {
let field = self.schema.get_field(field_name)?;
let field_entry = self.schema.get_field_entry(field);
if let FieldType::Bytes(bytes_option) = field_entry.field_type() {
if !bytes_option.is_fast() {

View File

@@ -1,322 +0,0 @@
use common::BitSet;
use itertools::Itertools;
use crate::fastfield::AliveBitSet;
use crate::{merge_filtered_segments, Directory, Index, IndexSettings, Segment, SegmentOrdinal};
/// DemuxMapping can be used to reorganize data from multiple segments.
///
/// DemuxMapping is useful in a multitenant settings, in which each document might actually belong
/// to a different tenant. It allows to reorganize documents as follows:
///
/// e.g. if you have two tenant ids TENANT_A and TENANT_B and two segments with
/// the documents (simplified)
/// Seg 1 [TENANT_A, TENANT_B]
/// Seg 2 [TENANT_A, TENANT_B]
///
/// You may want to group your documents to
/// Seg 1 [TENANT_A, TENANT_A]
/// Seg 2 [TENANT_B, TENANT_B]
///
/// Demuxing is the tool for that.
/// Semantically you can define a mapping from [old segment ordinal, old doc_id] -> [new segment
/// ordinal].
#[derive(Debug, Default)]
pub struct DemuxMapping {
/// [index old segment ordinal] -> [index doc_id] = new segment ordinal
mapping: Vec<DocIdToSegmentOrdinal>,
}
/// DocIdToSegmentOrdinal maps from doc_id within a segment to the new segment ordinal for demuxing.
///
/// For every source segment there is a `DocIdToSegmentOrdinal` to distribute its doc_ids.
#[derive(Debug, Default)]
pub struct DocIdToSegmentOrdinal {
doc_id_index_to_segment_ord: Vec<SegmentOrdinal>,
}
impl DocIdToSegmentOrdinal {
/// Creates a new DocIdToSegmentOrdinal with size of num_doc_ids.
/// Initially all doc_ids point to segment ordinal 0 and need to be set
/// the via `set` method.
pub fn with_max_doc(max_doc: usize) -> Self {
DocIdToSegmentOrdinal {
doc_id_index_to_segment_ord: vec![0; max_doc],
}
}
/// Returns the number of documents in this mapping.
/// It should be equal to the `max_doc` of the segment it targets.
pub fn max_doc(&self) -> u32 {
self.doc_id_index_to_segment_ord.len() as u32
}
/// Associates a doc_id with an output `SegmentOrdinal`.
pub fn set(&mut self, doc_id: u32, segment_ord: SegmentOrdinal) {
self.doc_id_index_to_segment_ord[doc_id as usize] = segment_ord;
}
/// Iterates over the new SegmentOrdinal in the order of the doc_id.
pub fn iter(&self) -> impl Iterator<Item = SegmentOrdinal> + '_ {
self.doc_id_index_to_segment_ord.iter().cloned()
}
}
impl DemuxMapping {
/// Adds a DocIdToSegmentOrdinal. The order of the pus calls
/// defines the old segment ordinal. e.g. first push = ordinal 0.
pub fn add(&mut self, segment_mapping: DocIdToSegmentOrdinal) {
self.mapping.push(segment_mapping);
}
/// Returns the old number of segments.
pub fn get_old_num_segments(&self) -> usize {
self.mapping.len()
}
}
fn docs_for_segment_ord(
doc_id_to_segment_ord: &DocIdToSegmentOrdinal,
target_segment_ord: SegmentOrdinal,
) -> AliveBitSet {
let mut bitset = BitSet::with_max_value(doc_id_to_segment_ord.max_doc());
for doc_id in doc_id_to_segment_ord
.iter()
.enumerate()
.filter(|(_doc_id, new_segment_ord)| *new_segment_ord == target_segment_ord)
.map(|(doc_id, _)| doc_id)
{
// add document if segment ordinal = target segment ordinal
bitset.insert(doc_id as u32);
}
AliveBitSet::from_bitset(&bitset)
}
fn get_alive_bitsets(
demux_mapping: &DemuxMapping,
target_segment_ord: SegmentOrdinal,
) -> Vec<AliveBitSet> {
demux_mapping
.mapping
.iter()
.map(|doc_id_to_segment_ord| {
docs_for_segment_ord(doc_id_to_segment_ord, target_segment_ord)
})
.collect_vec()
}
/// Demux the segments according to `demux_mapping`. See `DemuxMapping`.
/// The number of output_directories need to match max new segment ordinal from `demux_mapping`.
///
/// The ordinal of `segments` need to match the ordinals provided in `demux_mapping`.
pub fn demux(
segments: &[Segment],
demux_mapping: &DemuxMapping,
target_settings: IndexSettings,
output_directories: Vec<Box<dyn Directory>>,
) -> crate::Result<Vec<Index>> {
let mut indices = vec![];
for (target_segment_ord, output_directory) in output_directories.into_iter().enumerate() {
let alive_bitset = get_alive_bitsets(demux_mapping, target_segment_ord as u32)
.into_iter()
.map(Some)
.collect_vec();
let index = merge_filtered_segments(
segments,
target_settings.clone(),
alive_bitset,
output_directory,
)?;
indices.push(index);
}
Ok(indices)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::collector::TopDocs;
use crate::directory::RamDirectory;
use crate::query::QueryParser;
use crate::schema::{Schema, TEXT};
use crate::{DocAddress, Term};
#[test]
fn test_demux_map_to_alive_bitset() {
let max_value = 2;
let mut demux_mapping = DemuxMapping::default();
// segment ordinal 0 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 0);
demux_mapping.add(doc_id_to_segment);
// segment ordinal 1 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 1);
demux_mapping.add(doc_id_to_segment);
{
let bit_sets_for_demuxing_to_segment_ord_0 = get_alive_bitsets(&demux_mapping, 0);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_0[0].is_deleted(0),
true
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_0[0].is_deleted(1),
false
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_0[1].is_deleted(0),
true
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_0[1].is_deleted(1),
true
);
}
{
let bit_sets_for_demuxing_to_segment_ord_1 = get_alive_bitsets(&demux_mapping, 1);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_1[0].is_deleted(0),
false
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_1[0].is_deleted(1),
true
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_1[1].is_deleted(0),
false
);
assert_eq!(
bit_sets_for_demuxing_to_segment_ord_1[1].is_deleted(1),
false
);
}
}
#[test]
fn test_demux_segments() -> crate::Result<()> {
let first_index = {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"texto1"))?;
index_writer.add_document(doc!(text_field=>"texto2"))?;
index_writer.commit()?;
index
};
let second_index = {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"texto3"))?;
index_writer.add_document(doc!(text_field=>"texto4"))?;
index_writer.delete_term(Term::from_field_text(text_field, "4"));
index_writer.commit()?;
index
};
let mut segments: Vec<Segment> = Vec::new();
segments.extend(first_index.searchable_segments()?);
segments.extend(second_index.searchable_segments()?);
let target_settings = first_index.settings().clone();
let mut demux_mapping = DemuxMapping::default();
{
let max_value = 2;
// segment ordinal 0 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 0);
demux_mapping.add(doc_id_to_segment);
// segment ordinal 1 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 1);
demux_mapping.add(doc_id_to_segment);
}
assert_eq!(demux_mapping.get_old_num_segments(), 2);
let demuxed_indices = demux(
&segments,
&demux_mapping,
target_settings,
vec![
Box::<RamDirectory>::default(),
Box::<RamDirectory>::default(),
],
)?;
{
let index = &demuxed_indices[0];
let segments = index.searchable_segments()?;
assert_eq!(segments.len(), 1);
let segment_metas = segments[0].meta();
assert_eq!(segment_metas.num_deleted_docs(), 0);
assert_eq!(segment_metas.num_docs(), 1);
let searcher = index.reader().unwrap().searcher();
{
let text_field = index.schema().get_field("text").unwrap();
let do_search = |term: &str| {
let query = QueryParser::for_index(index, vec![text_field])
.parse_query(term)
.unwrap();
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(3)).unwrap();
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>()
};
assert_eq!(do_search("texto1"), vec![] as Vec<u32>);
assert_eq!(do_search("texto2"), vec![0]);
}
}
{
let index = &demuxed_indices[1];
let segments = index.searchable_segments()?;
assert_eq!(segments.len(), 1);
let segment_metas = segments[0].meta();
assert_eq!(segment_metas.num_deleted_docs(), 0);
assert_eq!(segment_metas.num_docs(), 3);
let searcher = index.reader().unwrap().searcher();
{
let text_field = index.schema().get_field("text").unwrap();
let do_search = |term: &str| {
let query = QueryParser::for_index(index, vec![text_field])
.parse_query(term)
.unwrap();
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(3)).unwrap();
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>()
};
assert_eq!(do_search("texto1"), vec![0]);
assert_eq!(do_search("texto2"), vec![] as Vec<u32>);
assert_eq!(do_search("texto3"), vec![1]);
assert_eq!(do_search("texto4"), vec![2]);
}
}
Ok(())
}
}

View File

@@ -99,7 +99,7 @@ pub(crate) fn expect_field_id_for_sort_field(
schema: &Schema,
sort_by_field: &IndexSortByField,
) -> crate::Result<Field> {
schema.get_field(&sort_by_field.field).ok_or_else(|| {
schema.get_field(&sort_by_field.field).map_err(|_| {
TantivyError::InvalidArgument(format!(
"field to sort index by not found: {:?}",
sort_by_field.field
@@ -462,15 +462,14 @@ mod tests_indexsorting {
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let my_number = index.schema().get_field("my_number").unwrap();
index.schema().get_field("my_number").unwrap();
let fast_field = fast_fields.u64(my_number).unwrap();
let fast_field = fast_fields.u64("my_number").unwrap();
assert_eq!(fast_field.get_val(0), 10u64);
assert_eq!(fast_field.get_val(1), 20u64);
assert_eq!(fast_field.get_val(2), 30u64);
let multi_numbers = index.schema().get_field("multi_numbers").unwrap();
let multifield = fast_fields.u64s(multi_numbers).unwrap();
let multifield = fast_fields.u64s("multi_numbers").unwrap();
let mut vals = vec![];
multifield.get_vals(0u32, &mut vals);
assert_eq!(vals, &[] as &[u64]);

View File

@@ -1465,7 +1465,7 @@ mod tests {
let segment_reader = searcher.segment_reader(0);
assert_eq!(segment_reader.num_docs(), 8);
assert_eq!(segment_reader.max_doc(), 10);
let fast_field_reader = segment_reader.fast_fields().u64(id_field)?;
let fast_field_reader = segment_reader.fast_fields().u64("id")?;
let in_order_alive_ids: Vec<u64> = segment_reader
.doc_ids_alive()
.map(|doc| fast_field_reader.get_val(doc))
@@ -1526,7 +1526,7 @@ mod tests {
let segment_reader = searcher.segment_reader(0);
assert_eq!(segment_reader.num_docs(), 8);
assert_eq!(segment_reader.max_doc(), 10);
let fast_field_reader = segment_reader.fast_fields().u64(id_field)?;
let fast_field_reader = segment_reader.fast_fields().u64("id")?;
let in_order_alive_ids: Vec<u64> = segment_reader
.doc_ids_alive()
.map(|doc| fast_field_reader.get_val(doc))
@@ -1778,7 +1778,7 @@ mod tests {
.segment_readers()
.iter()
.flat_map(|segment_reader| {
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
let ff_reader = segment_reader.fast_fields().u64("id").unwrap();
segment_reader
.doc_ids_alive()
.map(move |doc| ff_reader.get_val(doc))
@@ -1789,7 +1789,7 @@ mod tests {
.segment_readers()
.iter()
.flat_map(|segment_reader| {
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
let ff_reader = segment_reader.fast_fields().u64("id").unwrap();
segment_reader
.doc_ids_alive()
.map(move |doc| ff_reader.get_val(doc))
@@ -1804,7 +1804,7 @@ mod tests {
let mut all_ips = Vec::new();
let mut num_ips = 0;
for segment_reader in searcher.segment_readers().iter() {
let ip_reader = segment_reader.fast_fields().ip_addrs(ips_field).unwrap();
let ip_reader = segment_reader.fast_fields().ip_addrs("ips").unwrap();
for doc in segment_reader.doc_ids_alive() {
let mut vals = vec![];
ip_reader.get_vals(doc, &mut vals);
@@ -1851,7 +1851,7 @@ mod tests {
.segment_readers()
.iter()
.map(|segment_reader| {
let ff_reader = segment_reader.fast_fields().ip_addrs(ips_field).unwrap();
let ff_reader = segment_reader.fast_fields().ip_addrs("ips").unwrap();
ff_reader.get_index_reader().num_docs() as usize
})
.sum();
@@ -1863,7 +1863,7 @@ mod tests {
.segment_readers()
.iter()
.flat_map(|segment_reader| {
let ff_reader = segment_reader.fast_fields().ip_addr(ip_field).unwrap();
let ff_reader = segment_reader.fast_fields().ip_addr("ip").unwrap();
segment_reader.doc_ids_alive().flat_map(move |doc| {
let val = ff_reader.get_val(doc);
if val == Ipv6Addr::from_u128(0) {
@@ -1902,7 +1902,7 @@ mod tests {
.segment_readers()
.iter()
.flat_map(|segment_reader| {
let ff_reader = segment_reader.fast_fields().ip_addrs(ips_field).unwrap();
let ff_reader = segment_reader.fast_fields().ip_addrs("ips").unwrap();
segment_reader.doc_ids_alive().flat_map(move |doc| {
let mut vals = vec![];
ff_reader.get_vals(doc, &mut vals);
@@ -1914,9 +1914,9 @@ mod tests {
// multivalue fast field tests
for segment_reader in searcher.segment_readers().iter() {
let id_reader = segment_reader.fast_fields().u64(id_field).unwrap();
let ff_reader = segment_reader.fast_fields().u64s(multi_numbers).unwrap();
let bool_ff_reader = segment_reader.fast_fields().bools(multi_bools).unwrap();
let id_reader = segment_reader.fast_fields().u64("id").unwrap();
let ff_reader = segment_reader.fast_fields().u64s("multi_numbers").unwrap();
let bool_ff_reader = segment_reader.fast_fields().bools("multi_bools").unwrap();
for doc in segment_reader.doc_ids_alive() {
let mut vals = vec![];
ff_reader.get_vals(doc, &mut vals);
@@ -2109,7 +2109,7 @@ mod tests {
// test facets
for segment_reader in searcher.segment_readers().iter() {
let mut facet_reader = segment_reader.facet_reader(facet_field).unwrap();
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
let ff_reader = segment_reader.fast_fields().u64("id").unwrap();
for doc_id in segment_reader.doc_ids_alive() {
let mut facet_ords = Vec::new();
facet_reader.facet_ords(doc_id, &mut facet_ords);

View File

@@ -16,7 +16,7 @@ use crate::fastfield::{
MultiValueIndex, MultiValuedFastFieldReader,
};
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
use crate::indexer::sorted_doc_id_column::RemappedDocIdColumn;
use crate::indexer::sorted_doc_id_multivalue_column::RemappedDocIdMultiValueColumn;
use crate::indexer::SegmentSerializer;
@@ -335,8 +335,10 @@ impl IndexMerger {
.readers
.iter()
.map(|segment_reader| {
let ff_reader: MultiValuedFastFieldReader<u128> =
segment_reader.fast_fields().u128s(field).expect(
let ff_reader: MultiValuedFastFieldReader<u128> = segment_reader
.fast_fields()
.u128s(self.schema.get_field_name(field))
.expect(
"Failed to find index for multivalued field. This is a bug in tantivy, \
please report.",
);
@@ -401,10 +403,13 @@ impl IndexMerger {
.readers
.iter()
.map(|reader| {
let u128_reader: Arc<dyn Column<u128>> = reader.fast_fields().u128(field).expect(
"Failed to find a reader for single fast field. This is a tantivy bug and it \
should never happen.",
);
let u128_reader: Arc<dyn Column<u128>> = reader
.fast_fields()
.u128(self.schema.get_field_name(field))
.expect(
"Failed to find a reader for single fast field. This is a tantivy bug and \
it should never happen.",
);
u128_reader
})
.collect::<Vec<_>>();
@@ -431,7 +436,11 @@ impl IndexMerger {
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
let fast_field_accessor = RemappedDocIdColumn::new(&self.readers, doc_id_mapping, field);
let fast_field_accessor = RemappedDocIdColumn::new(
&self.readers,
doc_id_mapping,
self.schema.get_field_name(field),
);
fast_field_serializer.create_auto_detect_u64_fast_field(field, fast_field_accessor)?;
Ok(())
@@ -464,8 +473,8 @@ impl IndexMerger {
reader: &SegmentReader,
sort_by_field: &IndexSortByField,
) -> crate::Result<Arc<dyn Column>> {
let field_id = expect_field_id_for_sort_field(reader.schema(), sort_by_field)?; // for now expect fastfield, but not strictly required
let value_accessor = reader.fast_fields().u64_lenient(field_id)?;
reader.schema().get_field(&sort_by_field.field)?;
let value_accessor = reader.fast_fields().u64_lenient(&sort_by_field.field)?;
Ok(value_accessor)
}
/// Collecting value_accessors into a vec to bind the lifetime.
@@ -569,7 +578,7 @@ impl IndexMerger {
.map(|reader| {
let u64s_reader: MultiValuedFastFieldReader<u64> = reader
.fast_fields()
.typed_fast_field_multi_reader::<u64>(field)
.typed_fast_field_multi_reader::<u64>(self.schema.get_field_name(field))
.expect(
"Failed to find index for multivalued field. This is a bug in tantivy, \
please report.",
@@ -613,7 +622,7 @@ impl IndexMerger {
.map(|reader| {
let ff_reader: MultiValuedFastFieldReader<u64> = reader
.fast_fields()
.u64s(field)
.u64s(self.schema.get_field_name(field))
.expect("Could not find multivalued u64 fast value reader.");
ff_reader
})
@@ -684,8 +693,11 @@ impl IndexMerger {
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
let fastfield_accessor =
RemappedDocIdMultiValueColumn::new(&self.readers, doc_id_mapping, field);
let fastfield_accessor = RemappedDocIdMultiValueColumn::new(
&self.readers,
doc_id_mapping,
self.schema.get_field_name(field),
);
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx_and_codecs(
field,
fastfield_accessor,
@@ -706,10 +718,13 @@ impl IndexMerger {
.readers
.iter()
.map(|reader| {
let bytes_reader = reader.fast_fields().bytes(field).expect(
"Failed to find index for bytes field. This is a bug in tantivy, please \
report.",
);
let bytes_reader = reader
.fast_fields()
.bytes(self.schema.get_field_name(field))
.expect(
"Failed to find index for bytes field. This is a bug in tantivy, please \
report.",
);
(reader, bytes_reader)
})
.collect::<Vec<_>>();
@@ -1206,7 +1221,10 @@ mod tests {
{
let get_fast_vals = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
searcher.search(&query, &FastFieldTestCollector::for_field(score_field))
searcher.search(
&query,
&FastFieldTestCollector::for_field("score".to_string()),
)
};
let get_fast_vals_bytes = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
@@ -1244,7 +1262,7 @@ mod tests {
let mut index_writer = index.writer_for_tests()?;
let reader = index.reader().unwrap();
let search_term = |searcher: &Searcher, term: Term| {
let collector = FastFieldTestCollector::for_field(score_field);
let collector = FastFieldTestCollector::for_field("score".to_string());
let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
searcher
@@ -1366,7 +1384,7 @@ mod tests {
let score_field_reader = searcher
.segment_reader(0)
.fast_fields()
.u64(score_field)
.u64("score")
.unwrap();
assert_eq!(score_field_reader.min_value(), 4000);
assert_eq!(score_field_reader.max_value(), 7000);
@@ -1374,7 +1392,7 @@ mod tests {
let score_field_reader = searcher
.segment_reader(1)
.fast_fields()
.u64(score_field)
.u64("score")
.unwrap();
assert_eq!(score_field_reader.min_value(), 1);
assert_eq!(score_field_reader.max_value(), 3);
@@ -1420,7 +1438,7 @@ mod tests {
let score_field_reader = searcher
.segment_reader(0)
.fast_fields()
.u64(score_field)
.u64("score")
.unwrap();
assert_eq!(score_field_reader.min_value(), 3);
assert_eq!(score_field_reader.max_value(), 7000);
@@ -1467,7 +1485,7 @@ mod tests {
let score_field_reader = searcher
.segment_reader(0)
.fast_fields()
.u64(score_field)
.u64("score")
.unwrap();
assert_eq!(score_field_reader.min_value(), 3);
assert_eq!(score_field_reader.max_value(), 7000);
@@ -1514,7 +1532,7 @@ mod tests {
let score_field_reader = searcher
.segment_reader(0)
.fast_fields()
.u64(score_field)
.u64("score")
.unwrap();
assert_eq!(score_field_reader.min_value(), 6000);
assert_eq!(score_field_reader.max_value(), 7000);
@@ -1836,7 +1854,7 @@ mod tests {
{
let segment = searcher.segment_reader(0u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
let ff_reader = segment.fast_fields().u64s("intvals").unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[1, 2]);
@@ -1862,7 +1880,7 @@ mod tests {
{
let segment = searcher.segment_reader(1u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
let ff_reader = segment.fast_fields().u64s("intvals").unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[28, 27]);
@@ -1872,7 +1890,7 @@ mod tests {
{
let segment = searcher.segment_reader(2u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
let ff_reader = segment.fast_fields().u64s("intvals").unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[20]);
}
@@ -1889,7 +1907,7 @@ mod tests {
{
let searcher = reader.searcher();
let segment = searcher.segment_reader(0u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
let ff_reader = segment.fast_fields().u64s("intvals").unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[1, 2]);

View File

@@ -185,7 +185,7 @@ mod tests {
let segment_reader = searcher.segment_readers().last().unwrap();
let fast_fields = segment_reader.fast_fields();
let fast_field = fast_fields.u64(int_field).unwrap();
let fast_field = fast_fields.u64("intval").unwrap();
assert_eq!(fast_field.get_val(5), 1u64);
assert_eq!(fast_field.get_val(4), 2u64);
assert_eq!(fast_field.get_val(3), 3u64);
@@ -364,15 +364,13 @@ mod tests {
.unwrap();
let int_field = index.schema().get_field("intval").unwrap();
let multi_numbers = index.schema().get_field("multi_numbers").unwrap();
let bytes_field = index.schema().get_field("bytes").unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_readers().last().unwrap();
let fast_fields = segment_reader.fast_fields();
let fast_field = fast_fields.u64(int_field).unwrap();
let fast_field = fast_fields.u64("intval").unwrap();
assert_eq!(fast_field.get_val(0), 1u64);
assert_eq!(fast_field.get_val(1), 2u64);
assert_eq!(fast_field.get_val(2), 3u64);
@@ -386,7 +384,7 @@ mod tests {
vals
};
let fast_fields = segment_reader.fast_fields();
let fast_field = fast_fields.u64s(multi_numbers).unwrap();
let fast_field = fast_fields.u64s("multi_numbers").unwrap();
assert_eq!(&get_vals(&fast_field, 0), &[] as &[u64]);
assert_eq!(&get_vals(&fast_field, 1), &[2, 3]);
assert_eq!(&get_vals(&fast_field, 2), &[3, 4]);
@@ -394,7 +392,7 @@ mod tests {
assert_eq!(&get_vals(&fast_field, 4), &[20]);
assert_eq!(&get_vals(&fast_field, 5), &[1001, 1002]);
let fast_field = fast_fields.bytes(bytes_field).unwrap();
let fast_field = fast_fields.bytes("bytes").unwrap();
assert_eq!(fast_field.get_bytes(0), &[] as &[u8]);
assert_eq!(fast_field.get_bytes(2), &[1, 2, 3]);
assert_eq!(fast_field.get_bytes(5), &[5, 5]);
@@ -527,7 +525,6 @@ mod bench_sorted_index_merge {
order: Order::Desc,
};
let index = create_index(Some(sort_by_field.clone()));
let field = index.schema().get_field("intval").unwrap();
let segments = index.searchable_segments().unwrap();
let merger: IndexMerger =
IndexMerger::open(index.schema(), index.settings().clone(), &segments[..])?;
@@ -535,8 +532,10 @@ mod bench_sorted_index_merge {
b.iter(|| {
let sorted_doc_ids = doc_id_mapping.iter_old_doc_addrs().map(|doc_addr| {
let reader = &merger.readers[doc_addr.segment_ord as usize];
let u64_reader: Arc<dyn Column<u64>> =
reader.fast_fields().typed_fast_field_reader(field).expect(
let u64_reader: Arc<dyn Column<u64>> = reader
.fast_fields()
.typed_fast_field_reader("intval")
.expect(
"Failed to find a reader for single fast field. This is a tantivy bug and \
it should never happen.",
);

View File

@@ -1,6 +1,5 @@
pub mod delete_queue;
pub mod demuxer;
pub mod doc_id_mapping;
mod doc_opstamp_mapping;
mod flat_map_with_buffer;

View File

@@ -834,20 +834,23 @@ mod tests {
// This is a bit of a contrived example.
let tokens = PreTokenizedString {
text: "contrived-example".to_string(), //< I can't think of a use case where this corner case happens in real life.
tokens: vec![Token { // Not the last token, yet ends after the last token.
offset_from: 0,
offset_to: 14,
position: 0,
text: "long_token".to_string(),
position_length: 3,
},
Token {
offset_from: 0,
offset_to: 14,
position: 1,
text: "short".to_string(),
position_length: 1,
}],
tokens: vec![
Token {
// Not the last token, yet ends after the last token.
offset_from: 0,
offset_to: 14,
position: 0,
text: "long_token".to_string(),
position_length: 3,
},
Token {
offset_from: 0,
offset_to: 14,
position: 1,
text: "short".to_string(),
position_length: 1,
},
],
};
doc.add_pre_tokenized_text(text, tokens);
doc.add_text(text, "hello");

View File

@@ -4,7 +4,6 @@ use fastfield_codecs::Column;
use itertools::Itertools;
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
use crate::schema::Field;
use crate::SegmentReader;
pub(crate) struct RemappedDocIdColumn<'a> {
@@ -41,7 +40,7 @@ impl<'a> RemappedDocIdColumn<'a> {
pub(crate) fn new(
readers: &'a [SegmentReader],
doc_id_mapping: &'a SegmentDocIdMapping,
field: Field,
field: &str,
) -> Self {
let (min_value, max_value) = readers
.iter()

View File

@@ -5,7 +5,6 @@ use fastfield_codecs::Column;
use super::flat_map_with_buffer::FlatMapWithBufferIter;
use crate::fastfield::{MultiValueIndex, MultiValuedFastFieldReader};
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
use crate::schema::Field;
use crate::{DocAddress, SegmentReader};
pub(crate) struct RemappedDocIdMultiValueColumn<'a> {
@@ -20,7 +19,7 @@ impl<'a> RemappedDocIdMultiValueColumn<'a> {
pub(crate) fn new(
readers: &'a [SegmentReader],
doc_id_mapping: &'a SegmentDocIdMapping,
field: Field,
field: &str,
) -> Self {
// Our values are bitpacked and we need to know what should be
// our bitwidth and our minimum value before serializing any values.

View File

@@ -299,7 +299,6 @@ pub use crate::core::{
SegmentReader, SingleSegmentIndexWriter,
};
pub use crate::directory::Directory;
pub use crate::indexer::demuxer::*;
pub use crate::indexer::operation::UserOperation;
pub use crate::indexer::{merge_filtered_segments, merge_indices, IndexWriter, PreparedCommit};
pub use crate::postings::Postings;
@@ -995,8 +994,8 @@ pub mod tests {
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
let fast_field_float = schema_builder.add_f64_field("float", FAST);
let text_field = schema_builder.add_text_field("text", TEXT);
let stored_int_field = schema_builder.add_u64_field("stored_int", STORED);
schema_builder.add_text_field("text", TEXT);
schema_builder.add_u64_field("stored_int", STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
@@ -1011,37 +1010,37 @@ pub mod tests {
let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
{
let fast_field_reader_res = segment_reader.fast_fields().u64(text_field);
let fast_field_reader_res = segment_reader.fast_fields().u64("text");
assert!(fast_field_reader_res.is_err());
}
{
let fast_field_reader_opt = segment_reader.fast_fields().u64(stored_int_field);
let fast_field_reader_opt = segment_reader.fast_fields().u64("stored_int");
assert!(fast_field_reader_opt.is_err());
}
{
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_signed);
let fast_field_reader_opt = segment_reader.fast_fields().u64("signed");
assert!(fast_field_reader_opt.is_err());
}
{
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_float);
let fast_field_reader_opt = segment_reader.fast_fields().u64("float");
assert!(fast_field_reader_opt.is_err());
}
{
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_unsigned);
let fast_field_reader_opt = segment_reader.fast_fields().u64("unsigned");
assert!(fast_field_reader_opt.is_ok());
let fast_field_reader = fast_field_reader_opt.unwrap();
assert_eq!(fast_field_reader.get_val(0), 4u64)
}
{
let fast_field_reader_res = segment_reader.fast_fields().i64(fast_field_signed);
let fast_field_reader_res = segment_reader.fast_fields().i64("signed");
assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_res.unwrap();
assert_eq!(fast_field_reader.get_val(0), 4i64)
}
{
let fast_field_reader_res = segment_reader.fast_fields().f64(fast_field_float);
let fast_field_reader_res = segment_reader.fast_fields().f64("float");
assert!(fast_field_reader_res.is_ok());
let fast_field_reader = fast_field_reader_res.unwrap();
assert_eq!(fast_field_reader.get_val(0), 4f64)

View File

@@ -10,7 +10,7 @@ pub enum LogicalLiteral {
Term(Term),
Phrase(Vec<(usize, Term)>, u32),
Range {
field: Field,
field: String,
value_type: Type,
lower: Bound<Term>,
upper: Bound<Term>,

View File

@@ -672,7 +672,7 @@ impl QueryParser {
let field_entry = self.schema.get_field_entry(field);
let value_type = field_entry.field_type().value_type();
let logical_ast = LogicalAst::Leaf(Box::new(LogicalLiteral::Range {
field,
field: self.schema.get_field_name(field).to_string(),
value_type,
lower: self.resolve_bound(field, json_path, &lower)?,
upper: self.resolve_bound(field, json_path, &upper)?,
@@ -964,7 +964,7 @@ mod test {
let query = make_query_parser().parse_query("title:[A TO B]").unwrap();
assert_eq!(
format!("{:?}", query),
"RangeQuery { field: Field(0), value_type: Str, left_bound: Included([97]), \
"RangeQuery { field: \"title\", value_type: Str, left_bound: Included([97]), \
right_bound: Included([98]) }"
);
}

View File

@@ -67,7 +67,7 @@ pub(crate) fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
/// let docs_in_the_sixties = RangeQuery::new_u64("year".to_string(), 1960..1970);
/// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
/// assert_eq!(num_60s_books, 2285);
/// Ok(())
@@ -76,7 +76,7 @@ pub(crate) fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// ```
#[derive(Clone, Debug)]
pub struct RangeQuery {
field: Field,
field: String,
value_type: Type,
left_bound: Bound<Vec<u8>>,
right_bound: Bound<Vec<u8>>,
@@ -88,15 +88,12 @@ impl RangeQuery {
/// If the value type is not correct, something may go terribly wrong when
/// the `Weight` object is created.
pub fn new_term_bounds(
field: Field,
field: String,
value_type: Type,
left_bound: &Bound<Term>,
right_bound: &Bound<Term>,
) -> RangeQuery {
let verify_and_unwrap_term = |val: &Term| {
assert_eq!(field, val.field());
val.value_bytes().to_owned()
};
let verify_and_unwrap_term = |val: &Term| val.value_bytes().to_owned();
RangeQuery {
field,
value_type,
@@ -109,7 +106,7 @@ impl RangeQuery {
///
/// If the field is not of the type `i64`, tantivy
/// will panic when the `Weight` object is created.
pub fn new_i64(field: Field, range: Range<i64>) -> RangeQuery {
pub fn new_i64(field: String, range: Range<i64>) -> RangeQuery {
RangeQuery::new_i64_bounds(
field,
Bound::Included(range.start),
@@ -125,11 +122,15 @@ impl RangeQuery {
/// If the field is not of the type `i64`, tantivy
/// will panic when the `Weight` object is created.
pub fn new_i64_bounds(
field: Field,
field: String,
left_bound: Bound<i64>,
right_bound: Bound<i64>,
) -> RangeQuery {
let make_term_val = |val: &i64| Term::from_field_i64(field, *val).value_bytes().to_owned();
let make_term_val = |val: &i64| {
Term::from_field_i64(Field::from_field_id(0), *val)
.value_bytes()
.to_owned()
};
RangeQuery {
field,
value_type: Type::I64,
@@ -142,7 +143,7 @@ impl RangeQuery {
///
/// If the field is not of the type `f64`, tantivy
/// will panic when the `Weight` object is created.
pub fn new_f64(field: Field, range: Range<f64>) -> RangeQuery {
pub fn new_f64(field: String, range: Range<f64>) -> RangeQuery {
RangeQuery::new_f64_bounds(
field,
Bound::Included(range.start),
@@ -158,11 +159,15 @@ impl RangeQuery {
/// If the field is not of the type `f64`, tantivy
/// will panic when the `Weight` object is created.
pub fn new_f64_bounds(
field: Field,
field: String,
left_bound: Bound<f64>,
right_bound: Bound<f64>,
) -> RangeQuery {
let make_term_val = |val: &f64| Term::from_field_f64(field, *val).value_bytes().to_owned();
let make_term_val = |val: &f64| {
Term::from_field_f64(Field::from_field_id(0), *val)
.value_bytes()
.to_owned()
};
RangeQuery {
field,
value_type: Type::F64,
@@ -179,11 +184,15 @@ impl RangeQuery {
/// If the field is not of the type `u64`, tantivy
/// will panic when the `Weight` object is created.
pub fn new_u64_bounds(
field: Field,
field: String,
left_bound: Bound<u64>,
right_bound: Bound<u64>,
) -> RangeQuery {
let make_term_val = |val: &u64| Term::from_field_u64(field, *val).value_bytes().to_owned();
let make_term_val = |val: &u64| {
Term::from_field_u64(Field::from_field_id(0), *val)
.value_bytes()
.to_owned()
};
RangeQuery {
field,
value_type: Type::U64,
@@ -196,7 +205,7 @@ impl RangeQuery {
///
/// If the field is not of the type `u64`, tantivy
/// will panic when the `Weight` object is created.
pub fn new_u64(field: Field, range: Range<u64>) -> RangeQuery {
pub fn new_u64(field: String, range: Range<u64>) -> RangeQuery {
RangeQuery::new_u64_bounds(
field,
Bound::Included(range.start),
@@ -212,12 +221,15 @@ impl RangeQuery {
/// If the field is not of the type `date`, tantivy
/// will panic when the `Weight` object is created.
pub fn new_date_bounds(
field: Field,
field: String,
left_bound: Bound<DateTime>,
right_bound: Bound<DateTime>,
) -> RangeQuery {
let make_term_val =
|val: &DateTime| Term::from_field_date(field, *val).value_bytes().to_owned();
let make_term_val = |val: &DateTime| {
Term::from_field_date(Field::from_field_id(0), *val)
.value_bytes()
.to_owned()
};
RangeQuery {
field,
value_type: Type::Date,
@@ -230,7 +242,7 @@ impl RangeQuery {
///
/// If the field is not of the type `date`, tantivy
/// will panic when the `Weight` object is created.
pub fn new_date(field: Field, range: Range<DateTime>) -> RangeQuery {
pub fn new_date(field: String, range: Range<DateTime>) -> RangeQuery {
RangeQuery::new_date_bounds(
field,
Bound::Included(range.start),
@@ -245,7 +257,7 @@ impl RangeQuery {
///
/// If the field is not of the type `Str`, tantivy
/// will panic when the `Weight` object is created.
pub fn new_str_bounds(field: Field, left: Bound<&str>, right: Bound<&str>) -> RangeQuery {
pub fn new_str_bounds(field: String, left: Bound<&str>, right: Bound<&str>) -> RangeQuery {
let make_term_val = |val: &&str| val.as_bytes().to_vec();
RangeQuery {
field,
@@ -259,7 +271,7 @@ impl RangeQuery {
///
/// If the field is not of the type `Str`, tantivy
/// will panic when the `Weight` object is created.
pub fn new_str(field: Field, range: Range<&str>) -> RangeQuery {
pub fn new_str(field: String, range: Range<&str>) -> RangeQuery {
RangeQuery::new_str_bounds(
field,
Bound::Included(range.start),
@@ -268,22 +280,8 @@ impl RangeQuery {
}
/// Field to search over
pub fn field(&self) -> Field {
self.field
}
/// Lower bound of range
pub fn left_bound(&self) -> Bound<Term> {
map_bound(&self.left_bound, &|bytes| {
Term::from_field_bytes(self.field, bytes)
})
}
/// Upper bound of range
pub fn right_bound(&self) -> Bound<Term> {
map_bound(&self.right_bound, &|bytes| {
Term::from_field_bytes(self.field, bytes)
})
pub fn field(&self) -> &str {
&self.field
}
}
@@ -307,7 +305,9 @@ pub(crate) fn maps_to_u64_fastfield(typ: Type) -> bool {
impl Query for RangeQuery {
fn weight(&self, enable_scoring: EnableScoring<'_>) -> crate::Result<Box<dyn Weight>> {
let schema = enable_scoring.schema();
let field_type = schema.get_field_entry(self.field).field_type();
let field_type = schema
.get_field_entry(schema.get_field(&self.field)?)
.field_type();
let value_type = field_type.value_type();
if value_type != self.value_type {
let err_msg = format!(
@@ -320,7 +320,7 @@ impl Query for RangeQuery {
if field_type.is_fast() && is_type_valid_for_fastfield_range_query(self.value_type) {
if field_type.is_ip_addr() {
Ok(Box::new(IPFastFieldRangeWeight::new(
self.field,
self.field.to_string(),
&self.left_bound,
&self.right_bound,
)))
@@ -335,14 +335,14 @@ impl Query for RangeQuery {
let left_bound = map_bound(&self.left_bound, &parse_from_bytes);
let right_bound = map_bound(&self.right_bound, &parse_from_bytes);
Ok(Box::new(FastFieldRangeWeight::new(
self.field,
self.field.to_string(),
left_bound,
right_bound,
)))
}
} else {
Ok(Box::new(RangeWeight {
field: self.field,
field: self.field.to_string(),
left_bound: self.left_bound.clone(),
right_bound: self.right_bound.clone(),
}))
@@ -351,7 +351,7 @@ impl Query for RangeQuery {
}
pub struct RangeWeight {
field: Field,
field: String,
left_bound: Bound<Vec<u8>>,
right_bound: Bound<Vec<u8>>,
}
@@ -379,7 +379,7 @@ impl Weight for RangeWeight {
let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc);
let inverted_index = reader.inverted_index(self.field)?;
let inverted_index = reader.inverted_index(reader.schema().get_field(&self.field)?)?;
let term_dict = inverted_index.terms();
let mut term_range = self.term_range(term_dict)?;
while term_range.advance() {
@@ -443,7 +443,7 @@ mod tests {
let reader = index.reader()?;
let searcher = reader.searcher();
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64);
let docs_in_the_sixties = RangeQuery::new_u64("year".to_string(), 1960u64..1970u64);
// ... or `1960..=1969` if inclusive range is enabled.
let count = searcher.search(&docs_in_the_sixties, &Count)?;
@@ -481,10 +481,13 @@ mod tests {
let count_multiples =
|range_query: RangeQuery| searcher.search(&range_query, &Count).unwrap();
assert_eq!(count_multiples(RangeQuery::new_i64(int_field, 10..11)), 9);
assert_eq!(
count_multiples(RangeQuery::new_i64("intfield".to_string(), 10..11)),
9
);
assert_eq!(
count_multiples(RangeQuery::new_i64_bounds(
int_field,
"intfield".to_string(),
Bound::Included(10),
Bound::Included(11)
)),
@@ -492,7 +495,7 @@ mod tests {
);
assert_eq!(
count_multiples(RangeQuery::new_i64_bounds(
int_field,
"intfield".to_string(),
Bound::Excluded(9),
Bound::Included(10)
)),
@@ -500,7 +503,7 @@ mod tests {
);
assert_eq!(
count_multiples(RangeQuery::new_i64_bounds(
int_field,
"intfield".to_string(),
Bound::Included(9),
Bound::Unbounded
)),
@@ -540,12 +543,12 @@ mod tests {
|range_query: RangeQuery| searcher.search(&range_query, &Count).unwrap();
assert_eq!(
count_multiples(RangeQuery::new_f64(float_field, 10.0..11.0)),
count_multiples(RangeQuery::new_f64("floatfield".to_string(), 10.0..11.0)),
9
);
assert_eq!(
count_multiples(RangeQuery::new_f64_bounds(
float_field,
"floatfield".to_string(),
Bound::Included(10.0),
Bound::Included(11.0)
)),
@@ -553,7 +556,7 @@ mod tests {
);
assert_eq!(
count_multiples(RangeQuery::new_f64_bounds(
float_field,
"floatfield".to_string(),
Bound::Excluded(9.0),
Bound::Included(10.0)
)),
@@ -561,7 +564,7 @@ mod tests {
);
assert_eq!(
count_multiples(RangeQuery::new_f64_bounds(
float_field,
"floatfield".to_string(),
Bound::Included(9.0),
Bound::Unbounded
)),

View File

@@ -11,18 +11,18 @@ use fastfield_codecs::MonotonicallyMappableToU128;
use super::fast_field_range_query::{FastFieldCardinality, RangeDocSet};
use super::range_query::map_bound;
use crate::query::{ConstScorer, Explanation, Scorer, Weight};
use crate::schema::{Cardinality, Field};
use crate::schema::Cardinality;
use crate::{DocId, DocSet, Score, SegmentReader, TantivyError};
/// `IPFastFieldRangeWeight` uses the ip address fast field to execute range queries.
pub struct IPFastFieldRangeWeight {
field: Field,
field: String,
left_bound: Bound<Ipv6Addr>,
right_bound: Bound<Ipv6Addr>,
}
impl IPFastFieldRangeWeight {
pub fn new(field: Field, left_bound: &Bound<Vec<u8>>, right_bound: &Bound<Vec<u8>>) -> Self {
pub fn new(field: String, left_bound: &Bound<Vec<u8>>, right_bound: &Bound<Vec<u8>>) -> Self {
let parse_ip_from_bytes = |data: &Vec<u8>| {
let ip_u128: u128 =
u128::from_be(BinarySerializable::deserialize(&mut &data[..]).unwrap());
@@ -40,10 +40,13 @@ impl IPFastFieldRangeWeight {
impl Weight for IPFastFieldRangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let field_type = reader.schema().get_field_entry(self.field).field_type();
let field_type = reader
.schema()
.get_field_entry(reader.schema().get_field(&self.field)?)
.field_type();
match field_type.fastfield_cardinality().unwrap() {
Cardinality::SingleValue => {
let ip_addr_fast_field = reader.fast_fields().ip_addr(self.field)?;
let ip_addr_fast_field = reader.fast_fields().ip_addr(&self.field)?;
let value_range = bound_to_value_range(
&self.left_bound,
&self.right_bound,
@@ -57,7 +60,7 @@ impl Weight for IPFastFieldRangeWeight {
Ok(Box::new(ConstScorer::new(docset, boost)))
}
Cardinality::MultiValues => {
let ip_addr_fast_field = reader.fast_fields().ip_addrs(self.field)?;
let ip_addr_fast_field = reader.fast_fields().ip_addrs(&self.field)?;
let value_range = bound_to_value_range(
&self.left_bound,
&self.right_bound,

View File

@@ -9,18 +9,18 @@ use fastfield_codecs::MonotonicallyMappableToU64;
use super::fast_field_range_query::{FastFieldCardinality, RangeDocSet};
use super::range_query::map_bound;
use crate::query::{ConstScorer, Explanation, Scorer, Weight};
use crate::schema::{Cardinality, Field};
use crate::schema::Cardinality;
use crate::{DocId, DocSet, Score, SegmentReader, TantivyError};
/// `FastFieldRangeWeight` uses the fast field to execute range queries.
pub struct FastFieldRangeWeight {
field: Field,
field: String,
left_bound: Bound<u64>,
right_bound: Bound<u64>,
}
impl FastFieldRangeWeight {
pub fn new(field: Field, left_bound: Bound<u64>, right_bound: Bound<u64>) -> Self {
pub fn new(field: String, left_bound: Bound<u64>, right_bound: Bound<u64>) -> Self {
let left_bound = map_bound(&left_bound, &|val| *val);
let right_bound = map_bound(&right_bound, &|val| *val);
Self {
@@ -33,10 +33,13 @@ impl FastFieldRangeWeight {
impl Weight for FastFieldRangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let field_type = reader.schema().get_field_entry(self.field).field_type();
let field_type = reader
.schema()
.get_field_entry(reader.schema().get_field(&self.field)?)
.field_type();
match field_type.fastfield_cardinality().unwrap() {
Cardinality::SingleValue => {
let fast_field = reader.fast_fields().u64_lenient(self.field)?;
let fast_field = reader.fast_fields().u64_lenient(&self.field)?;
let value_range = bound_to_value_range(
&self.left_bound,
&self.right_bound,
@@ -48,7 +51,7 @@ impl Weight for FastFieldRangeWeight {
Ok(Box::new(ConstScorer::new(docset, boost)))
}
Cardinality::MultiValues => {
let fast_field = reader.fast_fields().u64s_lenient(self.field)?;
let fast_field = reader.fast_fields().u64s_lenient(&self.field)?;
let value_range = bound_to_value_range(
&self.left_bound,
&self.right_bound,

View File

@@ -109,6 +109,7 @@ impl TermQuery {
} else {
IndexRecordOption::Basic
};
Ok(TermWeight::new(
self.term.clone(),
index_record_option,

View File

@@ -44,7 +44,7 @@ pub struct IndexReaderBuilder {
index: Index,
warmers: Vec<Weak<dyn Warmer>>,
num_warming_threads: usize,
doc_store_cache_size: usize,
doc_store_cache_num_blocks: usize,
}
impl IndexReaderBuilder {
@@ -55,7 +55,7 @@ impl IndexReaderBuilder {
index,
warmers: Vec::new(),
num_warming_threads: 1,
doc_store_cache_size: DOCSTORE_CACHE_CAPACITY,
doc_store_cache_num_blocks: DOCSTORE_CACHE_CAPACITY,
}
}
@@ -72,7 +72,7 @@ impl IndexReaderBuilder {
searcher_generation_inventory.clone(),
)?;
let inner_reader = InnerIndexReader::new(
self.doc_store_cache_size,
self.doc_store_cache_num_blocks,
self.index,
warming_state,
searcher_generation_inventory,
@@ -119,8 +119,11 @@ impl IndexReaderBuilder {
///
/// The doc store readers cache by default DOCSTORE_CACHE_CAPACITY(100) decompressed blocks.
#[must_use]
pub fn doc_store_cache_size(mut self, doc_store_cache_size: usize) -> IndexReaderBuilder {
self.doc_store_cache_size = doc_store_cache_size;
pub fn doc_store_cache_num_blocks(
mut self,
doc_store_cache_num_blocks: usize,
) -> IndexReaderBuilder {
self.doc_store_cache_num_blocks = doc_store_cache_num_blocks;
self
}
@@ -151,7 +154,7 @@ impl TryInto<IndexReader> for IndexReaderBuilder {
}
struct InnerIndexReader {
doc_store_cache_size: usize,
doc_store_cache_num_blocks: usize,
index: Index,
warming_state: WarmingState,
searcher: arc_swap::ArcSwap<SearcherInner>,
@@ -161,7 +164,7 @@ struct InnerIndexReader {
impl InnerIndexReader {
fn new(
doc_store_cache_size: usize,
doc_store_cache_num_blocks: usize,
index: Index,
warming_state: WarmingState,
// The searcher_generation_inventory is not used as source, but as target to track the
@@ -172,13 +175,13 @@ impl InnerIndexReader {
let searcher = Self::create_searcher(
&index,
doc_store_cache_size,
doc_store_cache_num_blocks,
&warming_state,
&searcher_generation_counter,
&searcher_generation_inventory,
)?;
Ok(InnerIndexReader {
doc_store_cache_size,
doc_store_cache_num_blocks,
index,
warming_state,
searcher: ArcSwap::from(searcher),
@@ -214,7 +217,7 @@ impl InnerIndexReader {
fn create_searcher(
index: &Index,
doc_store_cache_size: usize,
doc_store_cache_num_blocks: usize,
warming_state: &WarmingState,
searcher_generation_counter: &Arc<AtomicU64>,
searcher_generation_inventory: &Inventory<SearcherGeneration>,
@@ -232,7 +235,7 @@ impl InnerIndexReader {
index.clone(),
segment_readers,
searcher_generation,
doc_store_cache_size,
doc_store_cache_num_blocks,
)?);
warming_state.warm_new_searcher_generation(&searcher.clone().into())?;
@@ -242,7 +245,7 @@ impl InnerIndexReader {
fn reload(&self) -> crate::Result<()> {
let searcher = Self::create_searcher(
&self.index,
self.doc_store_cache_size,
self.doc_store_cache_num_blocks,
&self.warming_state,
&self.searcher_generation_counter,
&self.searcher_generation_inventory,

View File

@@ -49,4 +49,17 @@ impl IndexRecordOption {
IndexRecordOption::WithFreqsAndPositions => true,
}
}
/// Downgrades to the next level if provided `IndexRecordOption` is unavailable.
pub fn downgrade(&self, other: IndexRecordOption) -> IndexRecordOption {
use IndexRecordOption::*;
match (other, self) {
(WithFreqsAndPositions, WithFreqsAndPositions) => WithFreqsAndPositions,
(WithFreqs, WithFreqs) => WithFreqs,
(WithFreqsAndPositions, WithFreqs) => WithFreqs,
(WithFreqs, WithFreqsAndPositions) => WithFreqs,
_ => Basic,
}
}
}

View File

@@ -11,6 +11,7 @@ use super::ip_options::IpAddrOptions;
use super::*;
use crate::schema::bytes_options::BytesOptions;
use crate::schema::field_type::ValueParsingError;
use crate::TantivyError;
/// Tantivy has a very strict schema.
/// You need to specify in advance whether a field is indexed or not,
@@ -308,8 +309,12 @@ impl Schema {
}
/// Returns the field option associated with a given name.
pub fn get_field(&self, field_name: &str) -> Option<Field> {
self.0.fields_map.get(field_name).cloned()
pub fn get_field(&self, field_name: &str) -> crate::Result<Field> {
self.0
.fields_map
.get(field_name)
.cloned()
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))
}
/// Create document from a named doc.
@@ -319,7 +324,7 @@ impl Schema {
) -> Result<Document, DocParsingError> {
let mut document = Document::new();
for (field_name, values) in named_doc.0 {
if let Some(field) = self.get_field(&field_name) {
if let Ok(field) = self.get_field(&field_name) {
for value in values {
document.add_field_value(field, value);
}
@@ -360,7 +365,7 @@ impl Schema {
) -> Result<Document, DocParsingError> {
let mut doc = Document::default();
for (field_name, json_value) in json_obj {
if let Some(field) = self.get_field(&field_name) {
if let Ok(field) = self.get_field(&field_name) {
let field_entry = self.get_field_entry(field);
let field_type = field_entry.field_type();
match json_value {

View File

@@ -375,7 +375,8 @@ where B: AsRef<[u8]>
///
/// Do NOT rely on this byte representation in the index.
/// This value is likely to change in the future.
pub(crate) fn as_slice(&self) -> &[u8] {
#[inline(always)]
pub fn as_slice(&self) -> &[u8] {
self.0.as_ref()
}
}

View File

@@ -30,7 +30,7 @@ impl TextOptions {
self.stored
}
/// Returns true iff the value is a fast field.
/// Returns true if and only if the value is a fast field.
pub fn is_fast(&self) -> bool {
self.fast
}

View File

@@ -90,7 +90,7 @@ impl CheckpointBlock {
return Ok(());
}
let mut doc = read_u32_vint(data);
let mut start_offset = read_u32_vint(data) as usize;
let mut start_offset = VInt::deserialize_u64(data)? as usize;
for _ in 0..len {
let num_docs = read_u32_vint(data);
let block_num_bytes = read_u32_vint(data) as usize;
@@ -147,6 +147,15 @@ mod tests {
test_aux_ser_deser(&checkpoints)
}
#[test]
fn test_block_serialize_large_byte_range() -> io::Result<()> {
let checkpoints = vec![Checkpoint {
doc_range: 10..12,
byte_range: 8_000_000_000..9_000_000_000,
}];
test_aux_ser_deser(&checkpoints)
}
#[test]
fn test_block_serialize() -> io::Result<()> {
let offsets: Vec<usize> = (0..11).map(|i| i * i * i).collect();

View File

@@ -4,8 +4,8 @@
//! order to be handled in the `Store`.
//!
//! Internally, documents (or rather their stored fields) are serialized to a buffer.
//! When the buffer exceeds 16K, the buffer is compressed using `brotli`, `LZ4` or `snappy`
//! and the resulting block is written to disk.
//! When the buffer exceeds `block_size` (defaults to 16K), the buffer is compressed using `brotli`,
//! `LZ4` or `snappy` and the resulting block is written to disk.
//!
//! One can then request for a specific `DocId`.
//! A skip list helps navigating to the right block,
@@ -28,8 +28,6 @@
//! - at the segment level, the
//! [`SegmentReader`'s `doc` method](../struct.SegmentReader.html#method.doc)
//! - at the index level, the [`Searcher::doc()`](crate::Searcher::doc) method
//!
//! !
mod compressors;
mod decompressors;

View File

@@ -114,7 +114,10 @@ impl Sum for CacheStats {
impl StoreReader {
/// Opens a store reader
pub fn open(store_file: FileSlice, cache_size: usize) -> io::Result<StoreReader> {
///
/// `cache_num_blocks` sets the number of decompressed blocks to be cached in an LRU.
/// The size of blocks is configurable, this should be reflexted in the
pub fn open(store_file: FileSlice, cache_num_blocks: usize) -> io::Result<StoreReader> {
let (footer, data_and_offset) = DocStoreFooter::extract_footer(store_file)?;
let (data_file, offset_index_file) = data_and_offset.split(footer.offset as usize);
@@ -125,8 +128,8 @@ impl StoreReader {
decompressor: footer.decompressor,
data: data_file,
cache: BlockCache {
cache: NonZeroUsize::new(cache_size)
.map(|cache_size| Mutex::new(LruCache::new(cache_size))),
cache: NonZeroUsize::new(cache_num_blocks)
.map(|cache_num_blocks| Mutex::new(LruCache::new(cache_num_blocks))),
cache_hits: Default::default(),
cache_misses: Default::default(),
},

View File

@@ -312,7 +312,7 @@ mod tests {
bitpack.write(51, 6, &mut buffer).unwrap();
assert_eq!(compute_num_bits(51), 6);
bitpack.close(&mut buffer).unwrap();
assert_eq!(buffer.len(), 3 + 7);
assert_eq!(buffer.len(), 3);
assert_eq!(extract_bits(&buffer[..], 0, 9), 321u64);
assert_eq!(extract_bits(&buffer[..], 9, 2), 2u64);
assert_eq!(extract_bits(&buffer[..], 11, 6), 51u64);

View File

@@ -69,14 +69,16 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
pub(crate) fn sstable_delta_reader_for_key_range(
&self,
key_range: impl RangeBounds<[u8]>,
limit: Option<u64>,
) -> io::Result<DeltaReader<'static, TSSTable::ValueReader>> {
let slice = self.file_slice_for_range(key_range);
let slice = self.file_slice_for_range(key_range, limit);
let data = slice.read_bytes()?;
Ok(TSSTable::delta_reader(data))
}
/// This function returns a file slice covering a set of sstable blocks
/// that include the key range passed in arguments.
/// that include the key range passed in arguments. Optionally returns
/// only block for up to `limit` matching terms.
///
/// It works by identifying
/// - `first_block`: the block containing the start boudary key
@@ -92,26 +94,56 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// On the rare edge case where a user asks for `(start_key, end_key]`
/// and `start_key` happens to be the last key of a block, we return a
/// slice that is the first block was not necessary.
pub fn file_slice_for_range(&self, key_range: impl RangeBounds<[u8]>) -> FileSlice {
let start_bound: Bound<usize> = match key_range.start_bound() {
pub fn file_slice_for_range(
&self,
key_range: impl RangeBounds<[u8]>,
limit: Option<u64>,
) -> FileSlice {
let first_block_id = match key_range.start_bound() {
Bound::Included(key) | Bound::Excluded(key) => {
let Some(first_block_addr) = self.sstable_index.search_block(key) else {
let Some(first_block_id) = self.sstable_index.locate_with_key(key) else {
return FileSlice::empty();
};
Bound::Included(first_block_addr.byte_range.start)
Some(first_block_id)
}
Bound::Unbounded => Bound::Unbounded,
Bound::Unbounded => None,
};
let end_bound: Bound<usize> = match key_range.end_bound() {
Bound::Included(key) | Bound::Excluded(key) => {
if let Some(block_addr) = self.sstable_index.search_block(key) {
Bound::Excluded(block_addr.byte_range.end)
let last_block_id = match key_range.end_bound() {
Bound::Included(key) | Bound::Excluded(key) => self.sstable_index.locate_with_key(key),
Bound::Unbounded => None,
};
let start_bound = if let Some(first_block_id) = first_block_id {
let Some(block_addr) = self.sstable_index.get_block(first_block_id) else {
return FileSlice::empty();
};
Bound::Included(block_addr.byte_range.start)
} else {
Bound::Unbounded
};
let last_block_id = if let Some(limit) = limit {
let second_block_id = first_block_id.map(|id| id + 1).unwrap_or(0);
if let Some(block_addr) = self.sstable_index.get_block(second_block_id) {
let ordinal_limit = block_addr.first_ordinal + limit;
let last_block_limit = self.sstable_index.locate_with_ord(ordinal_limit);
if let Some(last_block_id) = last_block_id {
Some(last_block_id.min(last_block_limit))
} else {
Bound::Unbounded
Some(last_block_limit)
}
} else {
last_block_id
}
Bound::Unbounded => Bound::Unbounded,
} else {
last_block_id
};
let end_bound = last_block_id
.and_then(|block_id| self.sstable_index.get_block(block_id))
.map(|block_addr| Bound::Excluded(block_addr.byte_range.end))
.unwrap_or(Bound::Unbounded);
self.sstable_slice.slice((start_bound, end_bound))
}
@@ -156,10 +188,15 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// Returns the ordinal associated with a given term.
pub fn term_ord<K: AsRef<[u8]>>(&self, key: K) -> io::Result<Option<TermOrdinal>> {
let mut term_ord = 0u64;
let key_bytes = key.as_ref();
let mut sstable_reader = self.sstable_reader()?;
while sstable_reader.advance().unwrap_or(false) {
let Some(block_addr) = self.sstable_index.get_block_with_key(key_bytes) else {
return Ok(None);
};
let mut term_ord = block_addr.first_ordinal;
let mut sstable_reader = self.sstable_reader_block(block_addr)?;
while sstable_reader.advance()? {
if sstable_reader.key() == key_bytes {
return Ok(Some(term_ord));
}
@@ -178,22 +215,32 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// Regardless of whether the term is found or not,
/// the buffer may be modified.
pub fn ord_to_term(&self, ord: TermOrdinal, bytes: &mut Vec<u8>) -> io::Result<bool> {
let mut sstable_reader = self.sstable_reader()?;
bytes.clear();
for _ in 0..(ord + 1) {
if !sstable_reader.advance().unwrap_or(false) {
// find block in which the term would be
let block_addr = self.sstable_index.get_block_with_ord(ord);
let first_ordinal = block_addr.first_ordinal;
// then search inside that block only
let mut sstable_reader = self.sstable_reader_block(block_addr)?;
for _ in first_ordinal..=ord {
if !sstable_reader.advance()? {
return Ok(false);
}
}
bytes.clear();
bytes.extend_from_slice(sstable_reader.key());
Ok(true)
}
/// Returns the number of terms in the dictionary.
pub fn term_info_from_ord(&self, term_ord: TermOrdinal) -> io::Result<Option<TSSTable::Value>> {
let mut sstable_reader = self.sstable_reader()?;
for _ in 0..(term_ord + 1) {
if !sstable_reader.advance().unwrap_or(false) {
// find block in which the term would be
let block_addr = self.sstable_index.get_block_with_ord(term_ord);
let first_ordinal = block_addr.first_ordinal;
// then search inside that block only
let mut sstable_reader = self.sstable_reader_block(block_addr)?;
for _ in first_ordinal..=term_ord {
if !sstable_reader.advance()? {
return Ok(None);
}
}
@@ -202,10 +249,10 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// Lookups the value corresponding to the key.
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> io::Result<Option<TSSTable::Value>> {
if let Some(block_addr) = self.sstable_index.search_block(key.as_ref()) {
if let Some(block_addr) = self.sstable_index.get_block_with_key(key.as_ref()) {
let mut sstable_reader = self.sstable_reader_block(block_addr)?;
let key_bytes = key.as_ref();
while sstable_reader.advance().unwrap_or(false) {
while sstable_reader.advance()? {
if sstable_reader.key() == key_bytes {
let value = sstable_reader.value().clone();
return Ok(Some(value));
@@ -217,10 +264,10 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
/// Lookups the value corresponding to the key.
pub async fn get_async<K: AsRef<[u8]>>(&self, key: K) -> io::Result<Option<TSSTable::Value>> {
if let Some(block_addr) = self.sstable_index.search_block(key.as_ref()) {
if let Some(block_addr) = self.sstable_index.get_block_with_key(key.as_ref()) {
let mut sstable_reader = self.sstable_reader_block_async(block_addr).await?;
let key_bytes = key.as_ref();
while sstable_reader.advance().unwrap_or(false) {
while sstable_reader.advance()? {
if sstable_reader.key() == key_bytes {
let value = sstable_reader.value().clone();
return Ok(Some(value));
@@ -259,3 +306,192 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::ops::Range;
use std::sync::{Arc, Mutex};
use common::OwnedBytes;
use super::Dictionary;
use crate::MonotonicU64SSTable;
#[derive(Debug)]
struct PermissionedHandle {
bytes: OwnedBytes,
allowed_range: Mutex<Range<usize>>,
}
impl PermissionedHandle {
fn new(bytes: Vec<u8>) -> Self {
let bytes = OwnedBytes::new(bytes);
PermissionedHandle {
allowed_range: Mutex::new(0..bytes.len()),
bytes,
}
}
fn restrict(&self, range: Range<usize>) {
*self.allowed_range.lock().unwrap() = range;
}
}
impl common::HasLen for PermissionedHandle {
fn len(&self) -> usize {
self.bytes.len()
}
}
impl common::file_slice::FileHandle for PermissionedHandle {
fn read_bytes(&self, range: Range<usize>) -> std::io::Result<OwnedBytes> {
let allowed_range = self.allowed_range.lock().unwrap();
if !allowed_range.contains(&range.start) || !allowed_range.contains(&(range.end - 1)) {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("invalid range, allowed {allowed_range:?}, requested {range:?}"),
));
}
Ok(self.bytes.slice(range))
}
}
fn make_test_sstable() -> (Dictionary<MonotonicU64SSTable>, Arc<PermissionedHandle>) {
let mut builder = Dictionary::<MonotonicU64SSTable>::builder(Vec::new()).unwrap();
// this makes 256k keys, enough to fill multiple blocks.
for elem in 0..0x3ffff {
let key = format!("{elem:05X}").into_bytes();
builder.insert_cannot_fail(&key, &elem);
}
let table = builder.finish().unwrap();
let table = Arc::new(PermissionedHandle::new(table));
let slice = common::file_slice::FileSlice::new(table.clone());
let dictionary = Dictionary::<MonotonicU64SSTable>::open(slice).unwrap();
// if the last block is id 0, tests are meaningless
assert_ne!(dictionary.sstable_index.locate_with_ord(u64::MAX), 0);
assert_eq!(dictionary.num_terms(), 0x3ffff);
(dictionary, table)
}
#[test]
fn test_ord_term_conversion() {
let (dic, slice) = make_test_sstable();
let block = dic.sstable_index.get_block_with_ord(100_000);
slice.restrict(block.byte_range);
let mut res = Vec::new();
// middle of a block
assert!(dic.ord_to_term(100_000, &mut res).unwrap());
assert_eq!(res, format!("{:05X}", 100_000).into_bytes());
assert_eq!(dic.term_info_from_ord(100_000).unwrap().unwrap(), 100_000);
assert_eq!(dic.get(&res).unwrap().unwrap(), 100_000);
assert_eq!(dic.term_ord(&res).unwrap().unwrap(), 100_000);
// start of a block
assert!(dic.ord_to_term(block.first_ordinal, &mut res).unwrap());
assert_eq!(res, format!("{:05X}", block.first_ordinal).into_bytes());
assert_eq!(
dic.term_info_from_ord(block.first_ordinal)
.unwrap()
.unwrap(),
block.first_ordinal
);
assert_eq!(dic.get(&res).unwrap().unwrap(), block.first_ordinal);
assert_eq!(dic.term_ord(&res).unwrap().unwrap(), block.first_ordinal);
// end of a block
let ordinal = block.first_ordinal - 1;
let new_range = dic.sstable_index.get_block_with_ord(ordinal).byte_range;
slice.restrict(new_range);
assert!(dic.ord_to_term(ordinal, &mut res).unwrap());
assert_eq!(res, format!("{:05X}", ordinal).into_bytes());
assert_eq!(dic.term_info_from_ord(ordinal).unwrap().unwrap(), ordinal);
assert_eq!(dic.get(&res).unwrap().unwrap(), ordinal);
assert_eq!(dic.term_ord(&res).unwrap().unwrap(), ordinal);
// before first block
// 1st block must be loaded for key-related operations
let block = dic.sstable_index.get_block_with_ord(0);
slice.restrict(block.byte_range);
assert!(dic.get(&b"$$$").unwrap().is_none());
assert!(dic.term_ord(&b"$$$").unwrap().is_none());
// after last block
// last block must be loaded for ord related operations
let ordinal = 0x40000 + 10;
let new_range = dic.sstable_index.get_block_with_ord(ordinal).byte_range;
slice.restrict(new_range);
assert!(!dic.ord_to_term(ordinal, &mut res).unwrap());
assert!(dic.term_info_from_ord(ordinal).unwrap().is_none());
// last block isn't required to be loaded for key related operations
slice.restrict(0..0);
assert!(dic.get(&b"~~~").unwrap().is_none());
assert!(dic.term_ord(&b"~~~").unwrap().is_none());
}
#[test]
fn test_range() {
let (dic, slice) = make_test_sstable();
let start = dic
.sstable_index
.get_block_with_key(b"10000")
.unwrap()
.byte_range;
let end = dic
.sstable_index
.get_block_with_key(b"18000")
.unwrap()
.byte_range;
slice.restrict(start.start..end.end);
let mut stream = dic.range().ge(b"10000").lt(b"18000").into_stream().unwrap();
for i in 0x10000..0x18000 {
assert!(stream.advance());
assert_eq!(stream.term_ord(), i);
assert_eq!(stream.value(), &i);
assert_eq!(stream.key(), format!("{i:05X}").into_bytes());
}
assert!(!stream.advance());
// verify limiting the number of results reduce the size read
slice.restrict(start.start..(end.end - 1));
let mut stream = dic
.range()
.ge(b"10000")
.lt(b"18000")
.limit(0xfff)
.into_stream()
.unwrap();
for i in 0x10000..0x10fff {
assert!(stream.advance());
assert_eq!(stream.term_ord(), i);
assert_eq!(stream.value(), &i);
assert_eq!(stream.key(), format!("{i:05X}").into_bytes());
}
// there might be more successful elements after, though how many is undefined
slice.restrict(0..slice.bytes.len());
let mut stream = dic.stream().unwrap();
for i in 0..0x3ffff {
assert!(stream.advance());
assert_eq!(stream.term_ord(), i);
assert_eq!(stream.value(), &i);
assert_eq!(stream.key(), format!("{i:05X}").into_bytes());
}
assert!(!stream.advance());
}
}

View File

@@ -3,7 +3,7 @@ use std::ops::Range;
use serde::{Deserialize, Serialize};
use crate::{common_prefix_len, SSTableDataCorruption};
use crate::{common_prefix_len, SSTableDataCorruption, TermOrdinal};
#[derive(Default, Debug, Serialize, Deserialize)]
pub struct SSTableIndex {
@@ -11,15 +11,61 @@ pub struct SSTableIndex {
}
impl SSTableIndex {
/// Load an index from its binary representation
pub fn load(data: &[u8]) -> Result<SSTableIndex, SSTableDataCorruption> {
ciborium::de::from_reader(data).map_err(|_| SSTableDataCorruption)
}
pub fn search_block(&self, key: &[u8]) -> Option<BlockAddr> {
/// Get the [`BlockAddr`] of the requested block.
pub(crate) fn get_block(&self, block_id: usize) -> Option<BlockAddr> {
self.blocks
.iter()
.find(|block| &block.last_key_or_greater[..] >= key)
.map(|block| block.block_addr.clone())
.get(block_id)
.map(|block_meta| block_meta.block_addr.clone())
}
/// Get the block id of the block that woudl contain `key`.
///
/// Returns None if `key` is lexicographically after the last key recorded.
pub(crate) fn locate_with_key(&self, key: &[u8]) -> Option<usize> {
let pos = self
.blocks
.binary_search_by_key(&key, |block| &block.last_key_or_greater);
match pos {
Ok(pos) => Some(pos),
Err(pos) => {
if pos < self.blocks.len() {
Some(pos)
} else {
// after end of last block: no block matches
None
}
}
}
}
/// Get the [`BlockAddr`] of the block that would contain `key`.
///
/// Returns None if `key` is lexicographically after the last key recorded.
pub fn get_block_with_key(&self, key: &[u8]) -> Option<BlockAddr> {
self.locate_with_key(key).and_then(|id| self.get_block(id))
}
pub(crate) fn locate_with_ord(&self, ord: TermOrdinal) -> usize {
let pos = self
.blocks
.binary_search_by_key(&ord, |block| block.block_addr.first_ordinal);
match pos {
Ok(pos) => pos,
// Err(0) can't happen as the sstable starts with ordinal zero
Err(pos) => pos - 1,
}
}
/// Get the [`BlockAddr`] of the block containing the `ord`-th term.
pub(crate) fn get_block_with_ord(&self, ord: TermOrdinal) -> BlockAddr {
// locate_with_ord always returns an index within range
self.get_block(self.locate_with_ord(ord)).unwrap()
}
}
@@ -30,7 +76,7 @@ pub struct BlockAddr {
}
#[derive(Debug, Serialize, Deserialize)]
struct BlockMeta {
pub(crate) struct BlockMeta {
/// Any byte string that is lexicographically greater or equal to
/// the last key in the block,
/// and yet strictly smaller than the first key in the next block.
@@ -98,26 +144,38 @@ mod tests {
fn test_sstable_index() {
let mut sstable_builder = SSTableIndexBuilder::default();
sstable_builder.add_block(b"aaa", 10..20, 0u64);
sstable_builder.add_block(b"bbbbbbb", 20..30, 564);
sstable_builder.add_block(b"bbbbbbb", 20..30, 5u64);
sstable_builder.add_block(b"ccc", 30..40, 10u64);
sstable_builder.add_block(b"dddd", 40..50, 15u64);
let mut buffer: Vec<u8> = Vec::new();
sstable_builder.serialize(&mut buffer).unwrap();
let sstable_index = SSTableIndex::load(&buffer[..]).unwrap();
assert_eq!(
sstable_index.search_block(b"bbbde"),
sstable_index.get_block_with_key(b"bbbde"),
Some(BlockAddr {
first_ordinal: 10u64,
byte_range: 30..40
})
);
assert_eq!(sstable_index.locate_with_key(b"aa").unwrap(), 0);
assert_eq!(sstable_index.locate_with_key(b"aaa").unwrap(), 0);
assert_eq!(sstable_index.locate_with_key(b"aab").unwrap(), 1);
assert_eq!(sstable_index.locate_with_key(b"ccc").unwrap(), 2);
assert!(sstable_index.locate_with_key(b"e").is_none());
assert_eq!(sstable_index.locate_with_ord(0), 0);
assert_eq!(sstable_index.locate_with_ord(1), 0);
assert_eq!(sstable_index.locate_with_ord(4), 0);
assert_eq!(sstable_index.locate_with_ord(5), 1);
assert_eq!(sstable_index.locate_with_ord(100), 3);
}
#[test]
fn test_sstable_with_corrupted_data() {
let mut sstable_builder = SSTableIndexBuilder::default();
sstable_builder.add_block(b"aaa", 10..20, 0u64);
sstable_builder.add_block(b"bbbbbbb", 20..30, 564);
sstable_builder.add_block(b"bbbbbbb", 20..30, 5u64);
sstable_builder.add_block(b"ccc", 30..40, 10u64);
sstable_builder.add_block(b"dddd", 40..50, 15u64);
let mut buffer: Vec<u8> = Vec::new();

View File

@@ -19,6 +19,7 @@ where
automaton: A,
lower: Bound<Vec<u8>>,
upper: Bound<Vec<u8>>,
limit: Option<u64>,
}
fn bound_as_byte_slice(bound: &Bound<Vec<u8>>) -> Bound<&[u8]> {
@@ -41,6 +42,7 @@ where
automaton,
lower: Bound::Unbounded,
upper: Bound::Unbounded,
limit: None,
}
}
@@ -68,24 +70,46 @@ where
self
}
/// Load no more data than what's required to to get `limit`
/// matching entries.
///
/// The resulting [`Streamer`] can still return marginaly
/// more than `limit` elements.
pub fn limit(mut self, limit: u64) -> Self {
self.limit = Some(limit);
self
}
/// Creates the stream corresponding to the range
/// of terms defined using the `StreamerBuilder`.
pub fn into_stream(self) -> io::Result<Streamer<'a, TSSTable, A>> {
// TODO Optimize by skipping to the right first block.
let start_state = self.automaton.start();
let key_range = (
bound_as_byte_slice(&self.lower),
bound_as_byte_slice(&self.upper),
);
let first_term = match &key_range.0 {
Bound::Included(key) | Bound::Excluded(key) => self
.term_dict
.sstable_index
.get_block_with_key(key)
.map(|block| block.first_ordinal)
.unwrap_or(0),
Bound::Unbounded => 0,
};
let delta_reader = self
.term_dict
.sstable_delta_reader_for_key_range(key_range)?;
.sstable_delta_reader_for_key_range(key_range, self.limit)?;
Ok(Streamer {
automaton: self.automaton,
states: vec![start_state],
delta_reader,
key: Vec::new(),
term_ord: None,
term_ord: first_term.checked_sub(1),
lower_bound: self.lower,
upper_bound: self.upper,
})

View File

@@ -16,7 +16,7 @@ pub trait ValueReader: Default {
/// Loads a block.
///
/// Returns the number of bytes that were written.
/// Returns the number of bytes that were read.
fn load(&mut self, data: &[u8]) -> io::Result<usize>;
}