mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 04:52:55 +00:00
Compare commits
86 Commits
test_parse
...
0.24.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a3175f5341 | ||
|
|
203b0eebf1 | ||
|
|
eb37dbee26 | ||
|
|
c6e77d27c6 | ||
|
|
db6587ed9b | ||
|
|
3fa90e70e2 | ||
|
|
6ab4102253 | ||
|
|
11c6329ca5 | ||
|
|
ab8bb93928 | ||
|
|
2b668bd2bf | ||
|
|
97a7137ef8 | ||
|
|
ffa7cdf397 | ||
|
|
caf1275e60 | ||
|
|
fb12b7be28 | ||
|
|
6f77083493 | ||
|
|
cd7745da7a | ||
|
|
eb8304dee9 | ||
|
|
e5638112a9 | ||
|
|
81110152fb | ||
|
|
ae88a7ece5 | ||
|
|
bdd5f80fd9 | ||
|
|
3f62ef22e5 | ||
|
|
8102e19e48 | ||
|
|
175c853ea7 | ||
|
|
c992cf3f37 | ||
|
|
83f6c2f265 | ||
|
|
17bf8aa092 | ||
|
|
6fc0e96ff8 | ||
|
|
06d2dcf469 | ||
|
|
b681ec9335 | ||
|
|
da2ff5712a | ||
|
|
18da402e27 | ||
|
|
18ae3ffe94 | ||
|
|
0a37b7acaa | ||
|
|
1a9fd885dd | ||
|
|
3e660905a7 | ||
|
|
0c2b984cb4 | ||
|
|
a69b1c609c | ||
|
|
8d4a6fcaba | ||
|
|
feced4762f | ||
|
|
0149317c5a | ||
|
|
3fcb6f9597 | ||
|
|
388fcd763b | ||
|
|
e488f9e6a2 | ||
|
|
9426d5be7b | ||
|
|
d5d2d41264 | ||
|
|
80f5f1ecd4 | ||
|
|
519e5d2ed1 | ||
|
|
df2d52a84e | ||
|
|
371dba9414 | ||
|
|
0afabad494 | ||
|
|
89b052cd42 | ||
|
|
c48c649436 | ||
|
|
58c0739953 | ||
|
|
e7daf69de9 | ||
|
|
f060e86bc6 | ||
|
|
0368162ef0 | ||
|
|
e843c71015 | ||
|
|
5cea16ef9f | ||
|
|
4aa8cd2470 | ||
|
|
4d4ee1b0ac | ||
|
|
43c89b4360 | ||
|
|
d281ca3e65 | ||
|
|
be17daf658 | ||
|
|
6ca84a61fa | ||
|
|
037d12c9c9 | ||
|
|
71cf19870b | ||
|
|
175a529c41 | ||
|
|
fe0c7c5408 | ||
|
|
148594f0f9 | ||
|
|
8edb439440 | ||
|
|
dfff5f3bcb | ||
|
|
ebf4d84553 | ||
|
|
42efc7f7c8 | ||
|
|
192395c311 | ||
|
|
a1447cc9c2 | ||
|
|
c39d91f827 | ||
|
|
32b6e9711b | ||
|
|
24c5dc2398 | ||
|
|
9e2ddec4b3 | ||
|
|
1f6a8e74bb | ||
|
|
7e901f523b | ||
|
|
3c30a41c14 | ||
|
|
0f99d4f420 | ||
|
|
6e02c5cb25 | ||
|
|
0bac391291 |
@@ -7,6 +7,8 @@ Tantivy 0.23 will be backwards compatible with indices created with v0.22 and v0
|
||||
- fix bug that causes out-of-order sstable key. [#2445](https://github.com/quickwit-oss/tantivy/pull/2445)(@fulmicoton)
|
||||
- fix ReferenceValue API flaw [#2372](https://github.com/quickwit-oss/tantivy/pull/2372)(@PSeitz)
|
||||
- fix `OwnedBytes` debug panic [#2512](https://github.com/quickwit-oss/tantivy/pull/2512)(@b41sh)
|
||||
- catch panics during merges [#2582](https://github.com/quickwit-oss/tantivy/pull/2582)(@rdettai)
|
||||
- switch from u32 to usize in bitpacker. This enables multivalued columns larger than 4GB, which crashed during merge before. [#2581](https://github.com/quickwit-oss/tantivy/pull/2581) [#2586](https://github.com/quickwit-oss/tantivy/pull/2586)(@fulmicoton-dd @PSeitz)
|
||||
|
||||
#### Breaking API Changes
|
||||
- remove index sorting [#2434](https://github.com/quickwit-oss/tantivy/pull/2434)(@PSeitz)
|
||||
@@ -24,6 +26,7 @@ Tantivy 0.23 will be backwards compatible with indices created with v0.22 and v0
|
||||
- reduce top hits memory consumption [#2426](https://github.com/quickwit-oss/tantivy/pull/2426)(@PSeitz)
|
||||
- check unsupported parameters top_hits [#2351](https://github.com/quickwit-oss/tantivy/pull/2351)(@PSeitz)
|
||||
- Change AggregationLimits to AggregationLimitsGuard [#2495](https://github.com/quickwit-oss/tantivy/pull/2495)(@PSeitz)
|
||||
- add support for counting non integer in aggregation [#2547](https://github.com/quickwit-oss/tantivy/pull/2547)(@trinity-1686a)
|
||||
- **Range Queries**
|
||||
- Support fast field range queries on json fields [#2456](https://github.com/quickwit-oss/tantivy/pull/2456)(@PSeitz)
|
||||
- Add support for str fast field range query [#2460](https://github.com/quickwit-oss/tantivy/pull/2460) [#2452](https://github.com/quickwit-oss/tantivy/pull/2452) [#2453](https://github.com/quickwit-oss/tantivy/pull/2453)(@PSeitz)
|
||||
@@ -34,7 +37,8 @@ Tantivy 0.23 will be backwards compatible with indices created with v0.22 and v0
|
||||
- add columnar format compatibility tests [#2433](https://github.com/quickwit-oss/tantivy/pull/2433)(@PSeitz)
|
||||
- Improved snippet ranges algorithm [#2474](https://github.com/quickwit-oss/tantivy/pull/2474)(@gezihuzi)
|
||||
- make find_field_with_default return json fields without path [#2476](https://github.com/quickwit-oss/tantivy/pull/2476)(@trinity-1686a)
|
||||
- feat(query): Make `BooleanQuery` support `minimum_number_should_match` [#2405](https://github.com/quickwit-oss/tantivy/pull/2405)(@LebranceBW)
|
||||
- Make `BooleanQuery` support `minimum_number_should_match` [#2405](https://github.com/quickwit-oss/tantivy/pull/2405)(@LebranceBW)
|
||||
- Make `NUM_MERGE_THREADS` configurable [#2535](https://github.com/quickwit-oss/tantivy/pull/2535)(@Barre)
|
||||
|
||||
- **RegexPhraseQuery**
|
||||
`RegexPhraseQuery` supports phrase queries with regex. E.g. query "b.* b.* wolf" matches "big bad wolf". Slop is supported as well: "b.* wolf"~2 matches "big bad wolf" [#2516](https://github.com/quickwit-oss/tantivy/pull/2516)(@PSeitz)
|
||||
@@ -60,7 +64,9 @@ This will slightly increase space and access time. [#2439](https://github.com/qu
|
||||
- fix de-escaping too much in query parser [#2427](https://github.com/quickwit-oss/tantivy/pull/2427)(@trinity-1686a)
|
||||
- improve query parser [#2416](https://github.com/quickwit-oss/tantivy/pull/2416)(@trinity-1686a)
|
||||
- Support field grouping `title:(return AND "pink panther")` [#2333](https://github.com/quickwit-oss/tantivy/pull/2333)(@trinity-1686a)
|
||||
- allow term starting with wildcard [#2568](https://github.com/quickwit-oss/tantivy/pull/2568)(@trinity-1686a)
|
||||
|
||||
- Exist queries match subpath fields [#2558](https://github.com/quickwit-oss/tantivy/pull/2558)(@rdettai)
|
||||
- add access benchmark for columnar [#2432](https://github.com/quickwit-oss/tantivy/pull/2432)(@PSeitz)
|
||||
- extend indexwriter proptests [#2342](https://github.com/quickwit-oss/tantivy/pull/2342)(@PSeitz)
|
||||
- add bench & test for columnar merging [#2428](https://github.com/quickwit-oss/tantivy/pull/2428)(@PSeitz)
|
||||
|
||||
32
Cargo.toml
32
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.23.0"
|
||||
version = "0.24.2"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -11,7 +11,7 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2021"
|
||||
rust-version = "1.75"
|
||||
rust-version = "1.81"
|
||||
exclude = ["benches/*.json", "benches/*.txt"]
|
||||
|
||||
[dependencies]
|
||||
@@ -31,14 +31,14 @@ lz4_flex = { version = "0.11", default-features = false, optional = true }
|
||||
zstd = { version = "0.13", optional = true, default-features = false }
|
||||
tempfile = { version = "3.12.0", optional = true }
|
||||
log = "0.4.16"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = "1.0.79"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
fs4 = { version = "0.8.0", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
rust-stemmers = "1.2.0"
|
||||
downcast-rs = "1.2.1"
|
||||
downcast-rs = "2.0.1"
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = [
|
||||
"bitpacker4x",
|
||||
] }
|
||||
@@ -52,20 +52,22 @@ smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.12.0"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.13.0"
|
||||
measure_time = "0.8.2"
|
||||
itertools = "0.14.0"
|
||||
measure_time = "0.9.0"
|
||||
arc-swap = "1.5.0"
|
||||
bon = "3.3.1"
|
||||
|
||||
columnar = { version = "0.3", path = "./columnar", package = "tantivy-columnar" }
|
||||
sstable = { version = "0.3", path = "./sstable", package = "tantivy-sstable", optional = true }
|
||||
stacker = { version = "0.3", path = "./stacker", package = "tantivy-stacker" }
|
||||
query-grammar = { version = "0.22.0", path = "./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version = "0.6", path = "./bitpacker" }
|
||||
common = { version = "0.7", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version = "0.3", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
||||
columnar = { version = "0.5", path = "./columnar", package = "tantivy-columnar" }
|
||||
sstable = { version = "0.5", path = "./sstable", package = "tantivy-sstable", optional = true }
|
||||
stacker = { version = "0.5", path = "./stacker", package = "tantivy-stacker" }
|
||||
query-grammar = { version = "0.24.0", path = "./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version = "0.8", path = "./bitpacker" }
|
||||
common = { version = "0.9", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version = "0.5", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
||||
sketches-ddsketch = { version = "0.3.0", features = ["use_serde"] }
|
||||
hyperloglogplus = { version = "0.4.1", features = ["const-loop"] }
|
||||
futures-util = { version = "0.3.28", optional = true }
|
||||
futures-channel = { version = "0.3.28", optional = true }
|
||||
fnv = "1.0.7"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
@@ -120,7 +122,7 @@ zstd-compression = ["zstd"]
|
||||
failpoints = ["fail", "fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
quickwit = ["sstable", "futures-util"]
|
||||
quickwit = ["sstable", "futures-util", "futures-channel"]
|
||||
|
||||
# Compares only the hash of a string when indexing data.
|
||||
# Increases indexing speed, but may lead to extremely rare missing terms, when there's a hash collision.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.6.0"
|
||||
version = "0.8.0"
|
||||
edition = "2021"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
|
||||
@@ -65,7 +65,7 @@ impl BitPacker {
|
||||
|
||||
#[derive(Clone, Debug, Default, Copy)]
|
||||
pub struct BitUnpacker {
|
||||
num_bits: u32,
|
||||
num_bits: usize,
|
||||
mask: u64,
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ impl BitUnpacker {
|
||||
(1u64 << num_bits) - 1u64
|
||||
};
|
||||
BitUnpacker {
|
||||
num_bits: u32::from(num_bits),
|
||||
num_bits: usize::from(num_bits),
|
||||
mask,
|
||||
}
|
||||
}
|
||||
@@ -94,14 +94,14 @@ impl BitUnpacker {
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u32, data: &[u8]) -> u64 {
|
||||
let addr_in_bits = idx * self.num_bits;
|
||||
let addr = (addr_in_bits >> 3) as usize;
|
||||
let addr_in_bits = idx as usize * self.num_bits;
|
||||
let addr = addr_in_bits >> 3;
|
||||
if addr + 8 > data.len() {
|
||||
if self.num_bits == 0 {
|
||||
return 0;
|
||||
}
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
return self.get_slow_path(addr, bit_shift, data);
|
||||
return self.get_slow_path(addr, bit_shift as u32, data);
|
||||
}
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
let bytes: [u8; 8] = (&data[addr..addr + 8]).try_into().unwrap();
|
||||
@@ -134,12 +134,13 @@ impl BitUnpacker {
|
||||
"Bitwidth must be <= 32 to use this method."
|
||||
);
|
||||
|
||||
let end_idx = start_idx + output.len() as u32;
|
||||
let end_idx: u32 = start_idx + output.len() as u32;
|
||||
|
||||
let end_bit_read = end_idx * self.num_bits;
|
||||
// We use `usize` here to avoid overflow issues.
|
||||
let end_bit_read = (end_idx as usize) * self.num_bits;
|
||||
let end_byte_read = (end_bit_read + 7) / 8;
|
||||
assert!(
|
||||
end_byte_read as usize <= data.len(),
|
||||
end_byte_read <= data.len(),
|
||||
"Requested index is out of bounds."
|
||||
);
|
||||
|
||||
@@ -159,24 +160,24 @@ impl BitUnpacker {
|
||||
// We want the start of the fast track to start align with bytes.
|
||||
// A sufficient condition is to start with an idx that is a multiple of 8,
|
||||
// so highway start is the closest multiple of 8 that is >= start_idx.
|
||||
let entrance_ramp_len = 8 - (start_idx % 8) % 8;
|
||||
let entrance_ramp_len: u32 = 8 - (start_idx % 8) % 8;
|
||||
|
||||
let highway_start: u32 = start_idx + entrance_ramp_len;
|
||||
|
||||
if highway_start + BitPacker1x::BLOCK_LEN as u32 > end_idx {
|
||||
if highway_start + (BitPacker1x::BLOCK_LEN as u32) > end_idx {
|
||||
// We don't have enough values to have even a single block of highway.
|
||||
// Let's just supply the values the simple way.
|
||||
get_batch_ramp(start_idx, output);
|
||||
return;
|
||||
}
|
||||
|
||||
let num_blocks: u32 = (end_idx - highway_start) / BitPacker1x::BLOCK_LEN as u32;
|
||||
let num_blocks: usize = (end_idx - highway_start) as usize / BitPacker1x::BLOCK_LEN;
|
||||
|
||||
// Entrance ramp
|
||||
get_batch_ramp(start_idx, &mut output[..entrance_ramp_len as usize]);
|
||||
|
||||
// Highway
|
||||
let mut offset = (highway_start * self.num_bits) as usize / 8;
|
||||
let mut offset = (highway_start as usize * self.num_bits) / 8;
|
||||
let mut output_cursor = (highway_start - start_idx) as usize;
|
||||
for _ in 0..num_blocks {
|
||||
offset += BitPacker1x.decompress(
|
||||
@@ -188,7 +189,7 @@ impl BitUnpacker {
|
||||
}
|
||||
|
||||
// Exit ramp
|
||||
let highway_end = highway_start + num_blocks * BitPacker1x::BLOCK_LEN as u32;
|
||||
let highway_end: u32 = highway_start + (num_blocks * BitPacker1x::BLOCK_LEN) as u32;
|
||||
get_batch_ramp(highway_end, &mut output[output_cursor..]);
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ struct BlockedBitpackerEntryMetaData {
|
||||
|
||||
impl BlockedBitpackerEntryMetaData {
|
||||
fn new(offset: u64, num_bits: u8, base_value: u64) -> Self {
|
||||
let encoded = offset | (num_bits as u64) << (64 - 8);
|
||||
let encoded = offset | (u64::from(num_bits) << (64 - 8));
|
||||
Self {
|
||||
encoded,
|
||||
base_value,
|
||||
|
||||
@@ -16,14 +16,14 @@ body = """
|
||||
|
||||
{%- if version %} in {{ version }}{%- endif -%}
|
||||
{% for commit in commits %}
|
||||
{% if commit.github.pr_title -%}
|
||||
{%- set commit_message = commit.github.pr_title -%}
|
||||
{% if commit.remote.pr_title -%}
|
||||
{%- set commit_message = commit.remote.pr_title -%}
|
||||
{%- else -%}
|
||||
{%- set commit_message = commit.message -%}
|
||||
{%- endif -%}
|
||||
- {{ commit_message | split(pat="\n") | first | trim }}\
|
||||
{% if commit.github.pr_number %} \
|
||||
[#{{ commit.github.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.github.pr_number }}){% if commit.github.username %}(@{{ commit.github.username }}){%- endif -%} \
|
||||
{% if commit.remote.pr_number %} \
|
||||
[#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}){% if commit.remote.username %}(@{{ commit.remote.username }}){%- endif -%} \
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-columnar"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
@@ -9,15 +9,15 @@ description = "column oriented storage for tantivy"
|
||||
categories = ["database-implementations", "data-structures", "compression"]
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.13.0"
|
||||
itertools = "0.14.0"
|
||||
fastdivide = "0.4.0"
|
||||
|
||||
stacker = { version= "0.3", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.3", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.7", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.6", path = "../bitpacker/" }
|
||||
stacker = { version= "0.5", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.5", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.9", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.8", path = "../bitpacker/" }
|
||||
serde = "1.0.152"
|
||||
downcast-rs = "1.2.0"
|
||||
downcast-rs = "2.0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
|
||||
18
columnar/columnar-cli-inspect/Cargo.toml
Normal file
18
columnar/columnar-cli-inspect/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "tantivy-columnar-inspect"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
tantivy = {path="../..", package="tantivy"}
|
||||
columnar = {path="../", package="tantivy-columnar"}
|
||||
common = {path="../../common", package="tantivy-common"}
|
||||
|
||||
[workspace]
|
||||
members = []
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
#debug-assertions = true
|
||||
#overflow-checks = true
|
||||
54
columnar/columnar-cli-inspect/src/main.rs
Normal file
54
columnar/columnar-cli-inspect/src/main.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use columnar::ColumnarReader;
|
||||
use common::file_slice::{FileSlice, WrapFile};
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use tantivy::directory::footer::Footer;
|
||||
|
||||
fn main() -> io::Result<()> {
|
||||
println!("Opens a columnar file written by tantivy and validates it.");
|
||||
let path = std::env::args().nth(1).unwrap();
|
||||
|
||||
let path = Path::new(&path);
|
||||
println!("Reading {:?}", path);
|
||||
let _reader = open_and_validate_columnar(path.to_str().unwrap())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn validate_columnar_reader(reader: &ColumnarReader) {
|
||||
let num_rows = reader.num_rows();
|
||||
println!("num_rows: {}", num_rows);
|
||||
let columns = reader.list_columns().unwrap();
|
||||
println!("num columns: {:?}", columns.len());
|
||||
for (col_name, dynamic_column_handle) in columns {
|
||||
let col = dynamic_column_handle.open().unwrap();
|
||||
match col {
|
||||
columnar::DynamicColumn::Bool(_)
|
||||
| columnar::DynamicColumn::I64(_)
|
||||
| columnar::DynamicColumn::U64(_)
|
||||
| columnar::DynamicColumn::F64(_)
|
||||
| columnar::DynamicColumn::IpAddr(_)
|
||||
| columnar::DynamicColumn::DateTime(_)
|
||||
| columnar::DynamicColumn::Bytes(_) => {}
|
||||
columnar::DynamicColumn::Str(str_column) => {
|
||||
let num_vals = str_column.ords().values.num_vals();
|
||||
let num_terms_dict = str_column.num_terms() as u64;
|
||||
let max_ord = str_column.ords().values.iter().max().unwrap_or_default();
|
||||
println!("{col_name:35} num_vals {num_vals:10} \t num_terms_dict {num_terms_dict:8} max_ord: {max_ord:8}",);
|
||||
for ord in str_column.ords().values.iter() {
|
||||
assert!(ord < num_terms_dict);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Opens a columnar file that was written by tantivy and validates it.
|
||||
pub fn open_and_validate_columnar(path: &str) -> io::Result<ColumnarReader> {
|
||||
let wrap_file = WrapFile::new(std::fs::File::open(path)?)?;
|
||||
let slice = FileSlice::new(std::sync::Arc::new(wrap_file));
|
||||
let (_footer, slice) = Footer::extract_footer(slice.clone()).unwrap();
|
||||
let reader = ColumnarReader::open(slice).unwrap();
|
||||
validate_columnar_reader(&reader);
|
||||
Ok(reader)
|
||||
}
|
||||
@@ -139,7 +139,7 @@ mod tests {
|
||||
missing_docs.push(missing_doc);
|
||||
});
|
||||
|
||||
assert_eq!(missing_docs, vec![]);
|
||||
assert_eq!(missing_docs, Vec::<u32>::new());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -56,7 +56,7 @@ fn get_doc_ids_with_values<'a>(
|
||||
ColumnIndex::Full => Box::new(doc_range),
|
||||
ColumnIndex::Optional(optional_index) => Box::new(
|
||||
optional_index
|
||||
.iter_rows()
|
||||
.iter_docs()
|
||||
.map(move |row| row + doc_range.start),
|
||||
),
|
||||
ColumnIndex::Multivalued(multivalued_index) => match multivalued_index {
|
||||
@@ -73,7 +73,7 @@ fn get_doc_ids_with_values<'a>(
|
||||
MultiValueIndex::MultiValueIndexV2(multivalued_index) => Box::new(
|
||||
multivalued_index
|
||||
.optional_index
|
||||
.iter_rows()
|
||||
.iter_docs()
|
||||
.map(move |row| row + doc_range.start),
|
||||
),
|
||||
},
|
||||
@@ -177,7 +177,7 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
|
||||
ColumnIndex::Full => Box::new(columnar_row_range),
|
||||
ColumnIndex::Optional(optional_index) => Box::new(
|
||||
optional_index
|
||||
.iter_rows()
|
||||
.iter_docs()
|
||||
.map(move |row_id: RowId| columnar_row_range.start + row_id),
|
||||
),
|
||||
ColumnIndex::Multivalued(_) => {
|
||||
|
||||
@@ -80,23 +80,23 @@ impl BlockVariant {
|
||||
/// index is the block index. For each block `byte_start` and `offset` is computed.
|
||||
#[derive(Clone)]
|
||||
pub struct OptionalIndex {
|
||||
num_rows: RowId,
|
||||
num_non_null_rows: RowId,
|
||||
num_docs: RowId,
|
||||
num_non_null_docs: RowId,
|
||||
block_data: OwnedBytes,
|
||||
block_metas: Arc<[BlockMeta]>,
|
||||
}
|
||||
|
||||
impl Iterable<u32> for &OptionalIndex {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
|
||||
Box::new(self.iter_rows())
|
||||
Box::new(self.iter_docs())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for OptionalIndex {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
f.debug_struct("OptionalIndex")
|
||||
.field("num_rows", &self.num_rows)
|
||||
.field("num_non_null_rows", &self.num_non_null_rows)
|
||||
.field("num_docs", &self.num_docs)
|
||||
.field("num_non_null_docs", &self.num_non_null_docs)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
@@ -271,17 +271,17 @@ impl OptionalIndex {
|
||||
}
|
||||
|
||||
pub fn num_docs(&self) -> RowId {
|
||||
self.num_rows
|
||||
self.num_docs
|
||||
}
|
||||
|
||||
pub fn num_non_nulls(&self) -> RowId {
|
||||
self.num_non_null_rows
|
||||
self.num_non_null_docs
|
||||
}
|
||||
|
||||
pub fn iter_rows(&self) -> impl Iterator<Item = RowId> + '_ {
|
||||
pub fn iter_docs(&self) -> impl Iterator<Item = RowId> + '_ {
|
||||
// TODO optimize
|
||||
let mut select_batch = self.select_cursor();
|
||||
(0..self.num_non_null_rows).map(move |rank| select_batch.select(rank))
|
||||
(0..self.num_non_null_docs).map(move |rank| select_batch.select(rank))
|
||||
}
|
||||
pub fn select_batch(&self, ranks: &mut [RowId]) {
|
||||
let mut select_cursor = self.select_cursor();
|
||||
@@ -519,15 +519,15 @@ pub fn open_optional_index(bytes: OwnedBytes) -> io::Result<OptionalIndex> {
|
||||
let (mut bytes, num_non_empty_blocks_bytes) = bytes.rsplit(2);
|
||||
let num_non_empty_block_bytes =
|
||||
u16::from_le_bytes(num_non_empty_blocks_bytes.as_slice().try_into().unwrap());
|
||||
let num_rows = VInt::deserialize_u64(&mut bytes)? as u32;
|
||||
let num_docs = VInt::deserialize_u64(&mut bytes)? as u32;
|
||||
let block_metas_num_bytes =
|
||||
num_non_empty_block_bytes as usize * SERIALIZED_BLOCK_META_NUM_BYTES;
|
||||
let (block_data, block_metas) = bytes.rsplit(block_metas_num_bytes);
|
||||
let (block_metas, num_non_null_rows) =
|
||||
deserialize_optional_index_block_metadatas(block_metas.as_slice(), num_rows);
|
||||
let (block_metas, num_non_null_docs) =
|
||||
deserialize_optional_index_block_metadatas(block_metas.as_slice(), num_docs);
|
||||
let optional_index = OptionalIndex {
|
||||
num_rows,
|
||||
num_non_null_rows,
|
||||
num_docs,
|
||||
num_non_null_docs,
|
||||
block_data,
|
||||
block_metas: block_metas.into(),
|
||||
};
|
||||
|
||||
@@ -164,7 +164,7 @@ fn test_optional_index_large() {
|
||||
fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) {
|
||||
let optional_index = OptionalIndex::for_test(num_rows, row_ids);
|
||||
assert_eq!(optional_index.num_docs(), num_rows);
|
||||
assert!(optional_index.iter_rows().eq(row_ids.iter().copied()));
|
||||
assert!(optional_index.iter_docs().eq(row_ids.iter().copied()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::io::{self, Write};
|
||||
use common::{BitSet, CountingWriter, ReadOnlyBitSet};
|
||||
use sstable::{SSTable, Streamer, TermOrdinal, VoidSSTable};
|
||||
|
||||
use super::term_merger::TermMerger;
|
||||
use super::term_merger::{TermMerger, TermsWithSegmentOrd};
|
||||
use crate::column::serialize_column_mappable_to_u64;
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::iterable::Iterable;
|
||||
@@ -126,14 +126,17 @@ fn serialize_merged_dict(
|
||||
let mut term_ord_mapping = TermOrdinalMapping::default();
|
||||
|
||||
let mut field_term_streams = Vec::new();
|
||||
for column_opt in bytes_columns.iter() {
|
||||
for (segment_ord, column_opt) in bytes_columns.iter().enumerate() {
|
||||
if let Some(column) = column_opt {
|
||||
term_ord_mapping.add_segment(column.dictionary.num_terms());
|
||||
let terms: Streamer<VoidSSTable> = column.dictionary.stream()?;
|
||||
field_term_streams.push(terms);
|
||||
field_term_streams.push(TermsWithSegmentOrd { terms, segment_ord });
|
||||
} else {
|
||||
term_ord_mapping.add_segment(0);
|
||||
field_term_streams.push(Streamer::empty());
|
||||
field_term_streams.push(TermsWithSegmentOrd {
|
||||
terms: Streamer::empty(),
|
||||
segment_ord,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,6 +194,7 @@ fn serialize_merged_dict(
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct TermOrdinalMapping {
|
||||
/// Contains the new term ordinals for each segment.
|
||||
per_segment_new_term_ordinals: Vec<Vec<TermOrdinal>>,
|
||||
}
|
||||
|
||||
@@ -205,6 +209,6 @@ impl TermOrdinalMapping {
|
||||
}
|
||||
|
||||
fn get_segment(&self, segment_ord: u32) -> &[TermOrdinal] {
|
||||
&(self.per_segment_new_term_ordinals[segment_ord as usize])[..]
|
||||
&self.per_segment_new_term_ordinals[segment_ord as usize]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ impl StackMergeOrder {
|
||||
let mut cumulated_row_ids: Vec<RowId> = Vec::with_capacity(columnars.len());
|
||||
let mut cumulated_row_id = 0;
|
||||
for columnar in columnars {
|
||||
cumulated_row_id += columnar.num_rows();
|
||||
cumulated_row_id += columnar.num_docs();
|
||||
cumulated_row_ids.push(cumulated_row_id);
|
||||
}
|
||||
StackMergeOrder { cumulated_row_ids }
|
||||
|
||||
@@ -80,13 +80,12 @@ pub fn merge_columnar(
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let mut serializer = ColumnarSerializer::new(output);
|
||||
let num_rows_per_columnar = columnar_readers
|
||||
let num_docs_per_columnar = columnar_readers
|
||||
.iter()
|
||||
.map(|reader| reader.num_rows())
|
||||
.map(|reader| reader.num_docs())
|
||||
.collect::<Vec<u32>>();
|
||||
|
||||
let columns_to_merge =
|
||||
group_columns_for_merge(columnar_readers, required_columns, &merge_row_order)?;
|
||||
let columns_to_merge = group_columns_for_merge(columnar_readers, required_columns)?;
|
||||
for res in columns_to_merge {
|
||||
let ((column_name, _column_type_category), grouped_columns) = res;
|
||||
let grouped_columns = grouped_columns.open(&merge_row_order)?;
|
||||
@@ -94,15 +93,18 @@ pub fn merge_columnar(
|
||||
continue;
|
||||
}
|
||||
|
||||
let column_type = grouped_columns.column_type_after_merge();
|
||||
let column_type_after_merge = grouped_columns.column_type_after_merge();
|
||||
let mut columns = grouped_columns.columns;
|
||||
coerce_columns(column_type, &mut columns)?;
|
||||
// Make sure the number of columns is the same as the number of columnar readers.
|
||||
// Or num_docs_per_columnar would be incorrect.
|
||||
assert_eq!(columns.len(), columnar_readers.len());
|
||||
coerce_columns(column_type_after_merge, &mut columns)?;
|
||||
|
||||
let mut column_serializer =
|
||||
serializer.start_serialize_column(column_name.as_bytes(), column_type);
|
||||
serializer.start_serialize_column(column_name.as_bytes(), column_type_after_merge);
|
||||
merge_column(
|
||||
column_type,
|
||||
&num_rows_per_columnar,
|
||||
column_type_after_merge,
|
||||
&num_docs_per_columnar,
|
||||
columns,
|
||||
&merge_row_order,
|
||||
&mut column_serializer,
|
||||
@@ -128,7 +130,7 @@ fn dynamic_column_to_u64_monotonic(dynamic_column: DynamicColumn) -> Option<Colu
|
||||
fn merge_column(
|
||||
column_type: ColumnType,
|
||||
num_docs_per_column: &[u32],
|
||||
columns: Vec<Option<DynamicColumn>>,
|
||||
columns_to_merge: Vec<Option<DynamicColumn>>,
|
||||
merge_row_order: &MergeRowOrder,
|
||||
wrt: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
@@ -138,10 +140,10 @@ fn merge_column(
|
||||
| ColumnType::F64
|
||||
| ColumnType::DateTime
|
||||
| ColumnType::Bool => {
|
||||
let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns.len());
|
||||
let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns_to_merge.len());
|
||||
let mut column_values: Vec<Option<Arc<dyn ColumnValues>>> =
|
||||
Vec::with_capacity(columns.len());
|
||||
for (i, dynamic_column_opt) in columns.into_iter().enumerate() {
|
||||
Vec::with_capacity(columns_to_merge.len());
|
||||
for (i, dynamic_column_opt) in columns_to_merge.into_iter().enumerate() {
|
||||
if let Some(Column { index: idx, values }) =
|
||||
dynamic_column_opt.and_then(dynamic_column_to_u64_monotonic)
|
||||
{
|
||||
@@ -164,10 +166,10 @@ fn merge_column(
|
||||
serialize_column_mappable_to_u64(merged_column_index, &merge_column_values, wrt)?;
|
||||
}
|
||||
ColumnType::IpAddr => {
|
||||
let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns.len());
|
||||
let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns_to_merge.len());
|
||||
let mut column_values: Vec<Option<Arc<dyn ColumnValues<Ipv6Addr>>>> =
|
||||
Vec::with_capacity(columns.len());
|
||||
for (i, dynamic_column_opt) in columns.into_iter().enumerate() {
|
||||
Vec::with_capacity(columns_to_merge.len());
|
||||
for (i, dynamic_column_opt) in columns_to_merge.into_iter().enumerate() {
|
||||
if let Some(DynamicColumn::IpAddr(Column { index: idx, values })) =
|
||||
dynamic_column_opt
|
||||
{
|
||||
@@ -192,9 +194,10 @@ fn merge_column(
|
||||
serialize_column_mappable_to_u128(merged_column_index, &merge_column_values, wrt)?;
|
||||
}
|
||||
ColumnType::Bytes | ColumnType::Str => {
|
||||
let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns.len());
|
||||
let mut bytes_columns: Vec<Option<BytesColumn>> = Vec::with_capacity(columns.len());
|
||||
for (i, dynamic_column_opt) in columns.into_iter().enumerate() {
|
||||
let mut column_indexes: Vec<ColumnIndex> = Vec::with_capacity(columns_to_merge.len());
|
||||
let mut bytes_columns: Vec<Option<BytesColumn>> =
|
||||
Vec::with_capacity(columns_to_merge.len());
|
||||
for (i, dynamic_column_opt) in columns_to_merge.into_iter().enumerate() {
|
||||
match dynamic_column_opt {
|
||||
Some(DynamicColumn::Str(str_column)) => {
|
||||
column_indexes.push(str_column.term_ord_column.index.clone());
|
||||
@@ -248,7 +251,7 @@ impl GroupedColumns {
|
||||
if column_type.len() == 1 {
|
||||
return column_type.into_iter().next().unwrap();
|
||||
}
|
||||
// At the moment, only the numerical categorical column type has more than one possible
|
||||
// At the moment, only the numerical column type category has more than one possible
|
||||
// column type.
|
||||
assert!(self
|
||||
.columns
|
||||
@@ -361,7 +364,7 @@ fn is_empty_after_merge(
|
||||
ColumnIndex::Empty { .. } => true,
|
||||
ColumnIndex::Full => alive_bitset.len() == 0,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
for doc in optional_index.iter_rows() {
|
||||
for doc in optional_index.iter_docs() {
|
||||
if alive_bitset.contains(doc) {
|
||||
return false;
|
||||
}
|
||||
@@ -391,7 +394,6 @@ fn is_empty_after_merge(
|
||||
fn group_columns_for_merge<'a>(
|
||||
columnar_readers: &'a [&'a ColumnarReader],
|
||||
required_columns: &'a [(String, ColumnType)],
|
||||
_merge_row_order: &'a MergeRowOrder,
|
||||
) -> io::Result<BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle>> {
|
||||
let mut columns: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> = BTreeMap::new();
|
||||
|
||||
|
||||
@@ -5,28 +5,29 @@ use sstable::TermOrdinal;
|
||||
|
||||
use crate::Streamer;
|
||||
|
||||
pub struct HeapItem<'a> {
|
||||
pub streamer: Streamer<'a>,
|
||||
/// The terms of a column with the ordinal of the segment.
|
||||
pub struct TermsWithSegmentOrd<'a> {
|
||||
pub terms: Streamer<'a>,
|
||||
pub segment_ord: usize,
|
||||
}
|
||||
|
||||
impl PartialEq for HeapItem<'_> {
|
||||
impl PartialEq for TermsWithSegmentOrd<'_> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.segment_ord == other.segment_ord
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for HeapItem<'_> {}
|
||||
impl Eq for TermsWithSegmentOrd<'_> {}
|
||||
|
||||
impl<'a> PartialOrd for HeapItem<'a> {
|
||||
fn partial_cmp(&self, other: &HeapItem<'a>) -> Option<Ordering> {
|
||||
impl<'a> PartialOrd for TermsWithSegmentOrd<'a> {
|
||||
fn partial_cmp(&self, other: &TermsWithSegmentOrd<'a>) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Ord for HeapItem<'a> {
|
||||
fn cmp(&self, other: &HeapItem<'a>) -> Ordering {
|
||||
(&other.streamer.key(), &other.segment_ord).cmp(&(&self.streamer.key(), &self.segment_ord))
|
||||
impl<'a> Ord for TermsWithSegmentOrd<'a> {
|
||||
fn cmp(&self, other: &TermsWithSegmentOrd<'a>) -> Ordering {
|
||||
(&other.terms.key(), &other.segment_ord).cmp(&(&self.terms.key(), &self.segment_ord))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,39 +38,32 @@ impl<'a> Ord for HeapItem<'a> {
|
||||
/// - the term
|
||||
/// - a slice with the ordinal of the segments containing the terms.
|
||||
pub struct TermMerger<'a> {
|
||||
heap: BinaryHeap<HeapItem<'a>>,
|
||||
current_streamers: Vec<HeapItem<'a>>,
|
||||
heap: BinaryHeap<TermsWithSegmentOrd<'a>>,
|
||||
term_streams_with_segment: Vec<TermsWithSegmentOrd<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> TermMerger<'a> {
|
||||
/// Stream of merged term dictionary
|
||||
pub fn new(streams: Vec<Streamer<'a>>) -> TermMerger<'a> {
|
||||
pub fn new(term_streams_with_segment: Vec<TermsWithSegmentOrd<'a>>) -> TermMerger<'a> {
|
||||
TermMerger {
|
||||
heap: BinaryHeap::new(),
|
||||
current_streamers: streams
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(ord, streamer)| HeapItem {
|
||||
streamer,
|
||||
segment_ord: ord,
|
||||
})
|
||||
.collect(),
|
||||
term_streams_with_segment,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn matching_segments<'b: 'a>(
|
||||
&'b self,
|
||||
) -> impl 'b + Iterator<Item = (usize, TermOrdinal)> {
|
||||
self.current_streamers
|
||||
self.term_streams_with_segment
|
||||
.iter()
|
||||
.map(|heap_item| (heap_item.segment_ord, heap_item.streamer.term_ord()))
|
||||
.map(|heap_item| (heap_item.segment_ord, heap_item.terms.term_ord()))
|
||||
}
|
||||
|
||||
fn advance_segments(&mut self) {
|
||||
let streamers = &mut self.current_streamers;
|
||||
let streamers = &mut self.term_streams_with_segment;
|
||||
let heap = &mut self.heap;
|
||||
for mut heap_item in streamers.drain(..) {
|
||||
if heap_item.streamer.advance() {
|
||||
if heap_item.terms.advance() {
|
||||
heap.push(heap_item);
|
||||
}
|
||||
}
|
||||
@@ -81,13 +75,13 @@ impl<'a> TermMerger<'a> {
|
||||
pub fn advance(&mut self) -> bool {
|
||||
self.advance_segments();
|
||||
if let Some(head) = self.heap.pop() {
|
||||
self.current_streamers.push(head);
|
||||
self.term_streams_with_segment.push(head);
|
||||
while let Some(next_streamer) = self.heap.peek() {
|
||||
if self.current_streamers[0].streamer.key() != next_streamer.streamer.key() {
|
||||
if self.term_streams_with_segment[0].terms.key() != next_streamer.terms.key() {
|
||||
break;
|
||||
}
|
||||
let next_heap_it = self.heap.pop().unwrap(); // safe : we peeked beforehand
|
||||
self.current_streamers.push(next_heap_it);
|
||||
self.term_streams_with_segment.push(next_heap_it);
|
||||
}
|
||||
true
|
||||
} else {
|
||||
@@ -101,6 +95,6 @@ impl<'a> TermMerger<'a> {
|
||||
/// if and only if advance() has been called before
|
||||
/// and "true" was returned.
|
||||
pub fn key(&self) -> &[u8] {
|
||||
self.current_streamers[0].streamer.key()
|
||||
self.term_streams_with_segment[0].terms.key()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use itertools::Itertools;
|
||||
use proptest::collection::vec;
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::*;
|
||||
use crate::{Cardinality, ColumnarWriter, HasAssociatedColumnType, RowId};
|
||||
use crate::columnar::{merge_columnar, ColumnarReader, MergeRowOrder, StackMergeOrder};
|
||||
use crate::{Cardinality, ColumnarWriter, DynamicColumn, HasAssociatedColumnType, RowId};
|
||||
|
||||
fn make_columnar<T: Into<NumericalValue> + HasAssociatedColumnType + Copy>(
|
||||
column_name: &str,
|
||||
@@ -26,9 +29,8 @@ fn test_column_coercion_to_u64() {
|
||||
// u64 type
|
||||
let columnar2 = make_columnar("numbers", &[u64::MAX]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> =
|
||||
group_columns_for_merge(columnars, &[], &merge_order).unwrap();
|
||||
group_columns_for_merge(columnars, &[]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnTypeCategory::Numerical)));
|
||||
}
|
||||
@@ -38,9 +40,8 @@ fn test_column_coercion_to_i64() {
|
||||
let columnar1 = make_columnar("numbers", &[-1i64]);
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> =
|
||||
group_columns_for_merge(columnars, &[], &merge_order).unwrap();
|
||||
group_columns_for_merge(columnars, &[]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnTypeCategory::Numerical)));
|
||||
}
|
||||
@@ -63,14 +64,8 @@ fn test_group_columns_with_required_column() {
|
||||
let columnar1 = make_columnar("numbers", &[1i64]);
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> =
|
||||
group_columns_for_merge(
|
||||
&[&columnar1, &columnar2],
|
||||
&[("numbers".to_string(), ColumnType::U64)],
|
||||
&merge_order,
|
||||
)
|
||||
.unwrap();
|
||||
group_columns_for_merge(columnars, &[("numbers".to_string(), ColumnType::U64)]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnTypeCategory::Numerical)));
|
||||
}
|
||||
@@ -80,13 +75,9 @@ fn test_group_columns_required_column_with_no_existing_columns() {
|
||||
let columnar1 = make_columnar("numbers", &[2u64]);
|
||||
let columnar2 = make_columnar("numbers", &[2u64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<_, _> = group_columns_for_merge(
|
||||
columnars,
|
||||
&[("required_col".to_string(), ColumnType::Str)],
|
||||
&merge_order,
|
||||
)
|
||||
.unwrap();
|
||||
let column_map: BTreeMap<_, _> =
|
||||
group_columns_for_merge(columnars, &[("required_col".to_string(), ColumnType::Str)])
|
||||
.unwrap();
|
||||
assert_eq!(column_map.len(), 2);
|
||||
let columns = &column_map
|
||||
.get(&("required_col".to_string(), ColumnTypeCategory::Str))
|
||||
@@ -102,14 +93,8 @@ fn test_group_columns_required_column_is_above_all_columns_have_the_same_type_ru
|
||||
let columnar1 = make_columnar("numbers", &[2i64]);
|
||||
let columnar2 = make_columnar("numbers", &[2i64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> =
|
||||
group_columns_for_merge(
|
||||
columnars,
|
||||
&[("numbers".to_string(), ColumnType::U64)],
|
||||
&merge_order,
|
||||
)
|
||||
.unwrap();
|
||||
group_columns_for_merge(columnars, &[("numbers".to_string(), ColumnType::U64)]).unwrap();
|
||||
assert_eq!(column_map.len(), 1);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnTypeCategory::Numerical)));
|
||||
}
|
||||
@@ -119,9 +104,8 @@ fn test_missing_column() {
|
||||
let columnar1 = make_columnar("numbers", &[-1i64]);
|
||||
let columnar2 = make_columnar("numbers2", &[2u64]);
|
||||
let columnars = &[&columnar1, &columnar2];
|
||||
let merge_order = StackMergeOrder::stack(columnars).into();
|
||||
let column_map: BTreeMap<(String, ColumnTypeCategory), GroupedColumnsHandle> =
|
||||
group_columns_for_merge(columnars, &[], &merge_order).unwrap();
|
||||
group_columns_for_merge(columnars, &[]).unwrap();
|
||||
assert_eq!(column_map.len(), 2);
|
||||
assert!(column_map.contains_key(&("numbers".to_string(), ColumnTypeCategory::Numerical)));
|
||||
{
|
||||
@@ -224,7 +208,7 @@ fn test_merge_columnar_numbers() {
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 3);
|
||||
assert_eq!(columnar_reader.num_docs(), 3);
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("numbers").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
@@ -252,7 +236,7 @@ fn test_merge_columnar_texts() {
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 3);
|
||||
assert_eq!(columnar_reader.num_docs(), 3);
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("texts").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
@@ -301,7 +285,7 @@ fn test_merge_columnar_byte() {
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 4);
|
||||
assert_eq!(columnar_reader.num_docs(), 4);
|
||||
assert_eq!(columnar_reader.num_columns(), 1);
|
||||
let cols = columnar_reader.read_columns("bytes").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
@@ -357,7 +341,7 @@ fn test_merge_columnar_byte_with_missing() {
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 3 + 2 + 3);
|
||||
assert_eq!(columnar_reader.num_docs(), 3 + 2 + 3);
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let cols = columnar_reader.read_columns("col").unwrap();
|
||||
let dynamic_column = cols[0].open().unwrap();
|
||||
@@ -409,7 +393,7 @@ fn test_merge_columnar_different_types() {
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 4);
|
||||
assert_eq!(columnar_reader.num_docs(), 4);
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let cols = columnar_reader.read_columns("mixed").unwrap();
|
||||
|
||||
@@ -419,11 +403,11 @@ fn test_merge_columnar_different_types() {
|
||||
panic!()
|
||||
};
|
||||
assert_eq!(vals.get_cardinality(), Cardinality::Optional);
|
||||
assert_eq!(vals.values_for_doc(0).collect_vec(), vec![]);
|
||||
assert_eq!(vals.values_for_doc(1).collect_vec(), vec![]);
|
||||
assert_eq!(vals.values_for_doc(2).collect_vec(), vec![]);
|
||||
assert_eq!(vals.values_for_doc(0).collect_vec(), Vec::<i64>::new());
|
||||
assert_eq!(vals.values_for_doc(1).collect_vec(), Vec::<i64>::new());
|
||||
assert_eq!(vals.values_for_doc(2).collect_vec(), Vec::<i64>::new());
|
||||
assert_eq!(vals.values_for_doc(3).collect_vec(), vec![1]);
|
||||
assert_eq!(vals.values_for_doc(4).collect_vec(), vec![]);
|
||||
assert_eq!(vals.values_for_doc(4).collect_vec(), Vec::<i64>::new());
|
||||
|
||||
// text column
|
||||
let dynamic_column = cols[1].open().unwrap();
|
||||
@@ -474,7 +458,7 @@ fn test_merge_columnar_different_empty_cardinality() {
|
||||
)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar_reader.num_rows(), 2);
|
||||
assert_eq!(columnar_reader.num_docs(), 2);
|
||||
assert_eq!(columnar_reader.num_columns(), 2);
|
||||
let cols = columnar_reader.read_columns("mixed").unwrap();
|
||||
|
||||
@@ -486,3 +470,119 @@ fn test_merge_columnar_different_empty_cardinality() {
|
||||
let dynamic_column = cols[1].open().unwrap();
|
||||
assert_eq!(dynamic_column.get_cardinality(), Cardinality::Optional);
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct ColumnSpec {
|
||||
column_name: String,
|
||||
/// (row_id, term)
|
||||
terms: Vec<(RowId, Vec<u8>)>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ColumnarSpec {
|
||||
columns: Vec<ColumnSpec>,
|
||||
}
|
||||
|
||||
/// Generate a random (row_id, term) pair:
|
||||
/// - row_id in [0..10]
|
||||
/// - term is either from POSSIBLE_TERMS or random bytes
|
||||
fn rowid_and_term_strategy() -> impl Strategy<Value = (RowId, Vec<u8>)> {
|
||||
const POSSIBLE_TERMS: &[&[u8]] = &[b"a", b"b", b"allo"];
|
||||
|
||||
let term_strat = prop_oneof![
|
||||
// pick from the fixed list
|
||||
(0..POSSIBLE_TERMS.len()).prop_map(|i| POSSIBLE_TERMS[i].to_vec()),
|
||||
// or random bytes (length 0..10)
|
||||
prop::collection::vec(any::<u8>(), 0..10),
|
||||
];
|
||||
|
||||
(0u32..11, term_strat)
|
||||
}
|
||||
|
||||
/// Generate one ColumnSpec, with a random name and a random list of (row_id, term).
|
||||
/// We sort it by row_id so that data is in ascending order.
|
||||
fn column_spec_strategy() -> impl Strategy<Value = ColumnSpec> {
|
||||
let column_name = prop_oneof![
|
||||
Just("col".to_string()),
|
||||
Just("col2".to_string()),
|
||||
"col.*".prop_map(|s| s),
|
||||
];
|
||||
|
||||
// We'll produce 0..8 (rowid,term) entries for this column
|
||||
let data_strat = vec(rowid_and_term_strategy(), 0..8).prop_map(|mut pairs| {
|
||||
// Sort by row_id
|
||||
pairs.sort_by_key(|(row_id, _)| *row_id);
|
||||
pairs
|
||||
});
|
||||
|
||||
(column_name, data_strat).prop_map(|(name, data)| ColumnSpec {
|
||||
column_name: name,
|
||||
terms: data,
|
||||
})
|
||||
}
|
||||
|
||||
/// Strategy to generate an ColumnarSpec
|
||||
fn columnar_strategy() -> impl Strategy<Value = ColumnarSpec> {
|
||||
vec(column_spec_strategy(), 0..3).prop_map(|columns| ColumnarSpec { columns })
|
||||
}
|
||||
|
||||
/// Strategy to generate multiple ColumnarSpecs, each of which we will treat
|
||||
/// as one "columnar" to be merged together.
|
||||
fn columnars_strategy() -> impl Strategy<Value = Vec<ColumnarSpec>> {
|
||||
vec(columnar_strategy(), 1..4)
|
||||
}
|
||||
|
||||
/// Build a `ColumnarReader` from a `ColumnarSpec`
|
||||
fn build_columnar(spec: &ColumnarSpec) -> ColumnarReader {
|
||||
let mut writer = ColumnarWriter::default();
|
||||
let mut max_row_id = 0;
|
||||
for col in &spec.columns {
|
||||
for &(row_id, ref term) in &col.terms {
|
||||
writer.record_bytes(row_id, &col.column_name, term);
|
||||
max_row_id = max_row_id.max(row_id);
|
||||
}
|
||||
}
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
writer.serialize(max_row_id + 1, &mut buffer).unwrap();
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
proptest! {
|
||||
// We just test that the merge_columnar function doesn't crash.
|
||||
#![proptest_config(ProptestConfig::with_cases(256))]
|
||||
#[test]
|
||||
fn test_merge_columnar_bytes_no_crash(columnars in columnars_strategy(), second_merge_columnars in columnars_strategy()) {
|
||||
let columnars: Vec<ColumnarReader> = columnars.iter()
|
||||
.map(build_columnar)
|
||||
.collect();
|
||||
|
||||
let mut out = Vec::new();
|
||||
let columnar_refs: Vec<&ColumnarReader> = columnars.iter().collect();
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_refs);
|
||||
merge_columnar(
|
||||
&columnar_refs,
|
||||
&[],
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut out,
|
||||
).unwrap();
|
||||
|
||||
let merged_reader = ColumnarReader::open(out).unwrap();
|
||||
|
||||
// Merge the second set of columnars with the result of the first merge
|
||||
let mut columnars: Vec<ColumnarReader> = second_merge_columnars.iter()
|
||||
.map(build_columnar)
|
||||
.collect();
|
||||
columnars.push(merged_reader);
|
||||
let mut out = Vec::new();
|
||||
let columnar_refs: Vec<&ColumnarReader> = columnars.iter().collect();
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_refs);
|
||||
merge_columnar(
|
||||
&columnar_refs,
|
||||
&[],
|
||||
MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut out,
|
||||
).unwrap();
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::{fmt, io, mem};
|
||||
|
||||
use common::file_slice::FileSlice;
|
||||
use common::json_path_writer::JSON_PATH_SEGMENT_SEP;
|
||||
use common::BinarySerializable;
|
||||
use sstable::{Dictionary, RangeSSTable};
|
||||
|
||||
@@ -18,13 +19,13 @@ fn io_invalid_data(msg: String) -> io::Error {
|
||||
pub struct ColumnarReader {
|
||||
column_dictionary: Dictionary<RangeSSTable>,
|
||||
column_data: FileSlice,
|
||||
num_rows: RowId,
|
||||
num_docs: RowId,
|
||||
format_version: Version,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ColumnarReader {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let num_rows = self.num_rows();
|
||||
let num_rows = self.num_docs();
|
||||
let columns = self.list_columns().unwrap();
|
||||
let num_cols = columns.len();
|
||||
let mut debug_struct = f.debug_struct("Columnar");
|
||||
@@ -76,6 +77,19 @@ fn read_all_columns_in_stream(
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
fn column_dictionary_prefix_for_column_name(column_name: &str) -> String {
|
||||
// Each column is a associated to a given `column_key`,
|
||||
// that starts by `column_name\0column_header`.
|
||||
//
|
||||
// Listing the columns associated to the given column name is therefore equivalent to
|
||||
// listing `column_key` with the prefix `column_name\0`.
|
||||
format!("{}{}", column_name, '\0')
|
||||
}
|
||||
|
||||
fn column_dictionary_prefix_for_subpath(root_path: &str) -> String {
|
||||
format!("{}{}", root_path, JSON_PATH_SEGMENT_SEP as char)
|
||||
}
|
||||
|
||||
impl ColumnarReader {
|
||||
/// Opens a new Columnar file.
|
||||
pub fn open<F>(file_slice: F) -> io::Result<ColumnarReader>
|
||||
@@ -98,13 +112,13 @@ impl ColumnarReader {
|
||||
Ok(ColumnarReader {
|
||||
column_dictionary,
|
||||
column_data,
|
||||
num_rows,
|
||||
num_docs: num_rows,
|
||||
format_version,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn num_rows(&self) -> RowId {
|
||||
self.num_rows
|
||||
pub fn num_docs(&self) -> RowId {
|
||||
self.num_docs
|
||||
}
|
||||
// Iterate over the columns in a sorted way
|
||||
pub fn iter_columns(
|
||||
@@ -144,32 +158,14 @@ impl ColumnarReader {
|
||||
Ok(self.iter_columns()?.collect())
|
||||
}
|
||||
|
||||
fn stream_for_column_range(&self, column_name: &str) -> sstable::StreamerBuilder<RangeSSTable> {
|
||||
// Each column is a associated to a given `column_key`,
|
||||
// that starts by `column_name\0column_header`.
|
||||
//
|
||||
// Listing the columns associated to the given column name is therefore equivalent to
|
||||
// listing `column_key` with the prefix `column_name\0`.
|
||||
//
|
||||
// This is in turn equivalent to searching for the range
|
||||
// `[column_name,\0`..column_name\1)`.
|
||||
// TODO can we get some more generic `prefix(..)` logic in the dictionary.
|
||||
let mut start_key = column_name.to_string();
|
||||
start_key.push('\0');
|
||||
let mut end_key = column_name.to_string();
|
||||
end_key.push(1u8 as char);
|
||||
self.column_dictionary
|
||||
.range()
|
||||
.ge(start_key.as_bytes())
|
||||
.lt(end_key.as_bytes())
|
||||
}
|
||||
|
||||
pub async fn read_columns_async(
|
||||
&self,
|
||||
column_name: &str,
|
||||
) -> io::Result<Vec<DynamicColumnHandle>> {
|
||||
let prefix = column_dictionary_prefix_for_column_name(column_name);
|
||||
let stream = self
|
||||
.stream_for_column_range(column_name)
|
||||
.column_dictionary
|
||||
.prefix_range(prefix)
|
||||
.into_stream_async()
|
||||
.await?;
|
||||
read_all_columns_in_stream(stream, &self.column_data, self.format_version)
|
||||
@@ -180,7 +176,35 @@ impl ColumnarReader {
|
||||
/// There can be more than one column associated to a given column name, provided they have
|
||||
/// different types.
|
||||
pub fn read_columns(&self, column_name: &str) -> io::Result<Vec<DynamicColumnHandle>> {
|
||||
let stream = self.stream_for_column_range(column_name).into_stream()?;
|
||||
let prefix = column_dictionary_prefix_for_column_name(column_name);
|
||||
let stream = self.column_dictionary.prefix_range(prefix).into_stream()?;
|
||||
read_all_columns_in_stream(stream, &self.column_data, self.format_version)
|
||||
}
|
||||
|
||||
pub async fn read_subpath_columns_async(
|
||||
&self,
|
||||
root_path: &str,
|
||||
) -> io::Result<Vec<DynamicColumnHandle>> {
|
||||
let prefix = column_dictionary_prefix_for_subpath(root_path);
|
||||
let stream = self
|
||||
.column_dictionary
|
||||
.prefix_range(prefix)
|
||||
.into_stream_async()
|
||||
.await?;
|
||||
read_all_columns_in_stream(stream, &self.column_data, self.format_version)
|
||||
}
|
||||
|
||||
/// Get all inner columns for a given JSON prefix, i.e columns for which the name starts
|
||||
/// with the prefix then contain the [`JSON_PATH_SEGMENT_SEP`].
|
||||
///
|
||||
/// There can be more than one column associated to each path within the JSON structure,
|
||||
/// provided they have different types.
|
||||
pub fn read_subpath_columns(&self, root_path: &str) -> io::Result<Vec<DynamicColumnHandle>> {
|
||||
let prefix = column_dictionary_prefix_for_subpath(root_path);
|
||||
let stream = self
|
||||
.column_dictionary
|
||||
.prefix_range(prefix.as_bytes())
|
||||
.into_stream()?;
|
||||
read_all_columns_in_stream(stream, &self.column_data, self.format_version)
|
||||
}
|
||||
|
||||
@@ -192,6 +216,8 @@ impl ColumnarReader {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common::json_path_writer::JSON_PATH_SEGMENT_SEP;
|
||||
|
||||
use crate::{ColumnType, ColumnarReader, ColumnarWriter};
|
||||
|
||||
#[test]
|
||||
@@ -224,6 +250,64 @@ mod tests {
|
||||
assert_eq!(columns[0].1.column_type(), ColumnType::U64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_columns() {
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
columnar_writer.record_column_type("col", ColumnType::U64, false);
|
||||
columnar_writer.record_numerical(1, "col", 1u64);
|
||||
let mut buffer = Vec::new();
|
||||
columnar_writer.serialize(2, &mut buffer).unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
{
|
||||
let columns = columnar.read_columns("col").unwrap();
|
||||
assert_eq!(columns.len(), 1);
|
||||
assert_eq!(columns[0].column_type(), ColumnType::U64);
|
||||
}
|
||||
{
|
||||
let columns = columnar.read_columns("other").unwrap();
|
||||
assert_eq!(columns.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_subpath_columns() {
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
columnar_writer.record_str(
|
||||
0,
|
||||
&format!("col1{}subcol1", JSON_PATH_SEGMENT_SEP as char),
|
||||
"hello",
|
||||
);
|
||||
columnar_writer.record_numerical(
|
||||
0,
|
||||
&format!("col1{}subcol2", JSON_PATH_SEGMENT_SEP as char),
|
||||
1i64,
|
||||
);
|
||||
columnar_writer.record_str(1, "col1", "hello");
|
||||
columnar_writer.record_str(0, "col2", "hello");
|
||||
let mut buffer = Vec::new();
|
||||
columnar_writer.serialize(2, &mut buffer).unwrap();
|
||||
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
{
|
||||
let columns = columnar.read_subpath_columns("col1").unwrap();
|
||||
assert_eq!(columns.len(), 2);
|
||||
assert_eq!(columns[0].column_type(), ColumnType::Str);
|
||||
assert_eq!(columns[1].column_type(), ColumnType::I64);
|
||||
}
|
||||
{
|
||||
let columns = columnar.read_subpath_columns("col1.subcol1").unwrap();
|
||||
assert_eq!(columns.len(), 0);
|
||||
}
|
||||
{
|
||||
let columns = columnar.read_subpath_columns("col2").unwrap();
|
||||
assert_eq!(columns.len(), 0);
|
||||
}
|
||||
{
|
||||
let columns = columnar.read_subpath_columns("other").unwrap();
|
||||
assert_eq!(columns.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Input type forbidden")]
|
||||
fn test_list_columns_strict_typing_panics_on_wrong_types() {
|
||||
|
||||
@@ -285,7 +285,6 @@ impl ColumnarWriter {
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::DateTime, addr)),
|
||||
);
|
||||
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
|
||||
|
||||
let (arena, buffers, dictionaries) = (&self.arena, &mut self.buffers, &self.dictionaries);
|
||||
let mut symbol_byte_buffer: Vec<u8> = Vec::new();
|
||||
for (column_name, column_type, addr) in columns {
|
||||
|
||||
@@ -380,7 +380,7 @@ fn assert_columnar_eq(
|
||||
right: &ColumnarReader,
|
||||
lenient_on_numerical_value: bool,
|
||||
) {
|
||||
assert_eq!(left.num_rows(), right.num_rows());
|
||||
assert_eq!(left.num_docs(), right.num_docs());
|
||||
let left_columns = left.list_columns().unwrap();
|
||||
let right_columns = right.list_columns().unwrap();
|
||||
assert_eq!(left_columns.len(), right_columns.len());
|
||||
@@ -588,7 +588,7 @@ proptest! {
|
||||
#[test]
|
||||
fn test_single_columnar_builder_proptest(docs in columnar_docs_strategy()) {
|
||||
let columnar = build_columnar(&docs[..]);
|
||||
assert_eq!(columnar.num_rows() as usize, docs.len());
|
||||
assert_eq!(columnar.num_docs() as usize, docs.len());
|
||||
let mut expected_columns: HashMap<(&str, ColumnTypeCategory), HashMap<u32, Vec<&ColumnValue>> > = Default::default();
|
||||
for (doc_id, doc_vals) in docs.iter().enumerate() {
|
||||
for (col_name, col_val) in doc_vals {
|
||||
@@ -715,6 +715,7 @@ fn test_columnar_merging_number_columns() {
|
||||
// TODO test required_columns
|
||||
// TODO document edge case: required_columns incompatible with values.
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn columnar_docs_and_remap(
|
||||
) -> impl Strategy<Value = (Vec<Vec<Vec<(&'static str, ColumnValue)>>>, Vec<RowAddr>)> {
|
||||
proptest::collection::vec(columnar_docs_strategy(), 2..=3).prop_flat_map(
|
||||
@@ -819,7 +820,7 @@ fn test_columnar_merge_empty() {
|
||||
)
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
assert_eq!(merged_columnar.num_rows(), 0);
|
||||
assert_eq!(merged_columnar.num_docs(), 0);
|
||||
assert_eq!(merged_columnar.num_columns(), 0);
|
||||
}
|
||||
|
||||
@@ -845,7 +846,7 @@ fn test_columnar_merge_single_str_column() {
|
||||
)
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
assert_eq!(merged_columnar.num_rows(), 1);
|
||||
assert_eq!(merged_columnar.num_docs(), 1);
|
||||
assert_eq!(merged_columnar.num_columns(), 1);
|
||||
}
|
||||
|
||||
@@ -877,7 +878,7 @@ fn test_delete_decrease_cardinality() {
|
||||
)
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
assert_eq!(merged_columnar.num_rows(), 1);
|
||||
assert_eq!(merged_columnar.num_docs(), 1);
|
||||
assert_eq!(merged_columnar.num_columns(), 1);
|
||||
let cols = merged_columnar.read_columns("c").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-common"
|
||||
version = "0.7.0"
|
||||
version = "0.9.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
@@ -13,7 +13,7 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
ownedbytes = { version= "0.7", path="../ownedbytes" }
|
||||
ownedbytes = { version= "0.9", path="../ownedbytes" }
|
||||
async-trait = "0.1"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::fs::File;
|
||||
use std::ops::{Deref, Range, RangeBounds};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
@@ -177,6 +178,12 @@ fn combine_ranges<R: RangeBounds<usize>>(orig_range: Range<usize>, rel_range: R)
|
||||
}
|
||||
|
||||
impl FileSlice {
|
||||
/// Creates a FileSlice from a path.
|
||||
pub fn open(path: &Path) -> io::Result<FileSlice> {
|
||||
let wrap_file = WrapFile::new(File::open(path)?)?;
|
||||
Ok(FileSlice::new(Arc::new(wrap_file)))
|
||||
}
|
||||
|
||||
/// Wraps a FileHandle.
|
||||
pub fn new(file_handle: Arc<dyn FileHandle>) -> Self {
|
||||
let num_bytes = file_handle.len();
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
name = "ownedbytes"
|
||||
version = "0.7.0"
|
||||
version = "0.9.0"
|
||||
edition = "2021"
|
||||
description = "Expose data as static slice"
|
||||
license = "MIT"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.22.0"
|
||||
version = "0.24.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -13,3 +13,5 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
nom = "7"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
use std::convert::Infallible;
|
||||
|
||||
use nom::{AsChar, IResult, InputLength, InputTakeAtPosition};
|
||||
use serde::Serialize;
|
||||
|
||||
pub(crate) type ErrorList = Vec<LenientErrorInternal>;
|
||||
pub(crate) type JResult<I, O> = IResult<I, (O, ErrorList), Infallible>;
|
||||
@@ -15,7 +16,8 @@ pub(crate) struct LenientErrorInternal {
|
||||
}
|
||||
|
||||
/// A recoverable error and the position it happened at
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct LenientError {
|
||||
pub pos: usize,
|
||||
pub message: String,
|
||||
@@ -353,3 +355,21 @@ where
|
||||
{
|
||||
move |i: I| l.choice(i.clone()).unwrap_or_else(|| default.parse(i))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_lenient_error_serialization() {
|
||||
let error = LenientError {
|
||||
pos: 42,
|
||||
message: "test error message".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
serde_json::to_string(&error).unwrap(),
|
||||
"{\"pos\":42,\"message\":\"test error message\"}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
mod infallible;
|
||||
mod occur;
|
||||
mod query_grammar;
|
||||
@@ -12,6 +14,8 @@ pub use crate::user_input_ast::{
|
||||
Delimiter, UserInputAst, UserInputBound, UserInputLeaf, UserInputLiteral,
|
||||
};
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct Error;
|
||||
|
||||
/// Parse a query
|
||||
@@ -24,3 +28,31 @@ pub fn parse_query(query: &str) -> Result<UserInputAst, Error> {
|
||||
pub fn parse_query_lenient(query: &str) -> (UserInputAst, Vec<LenientError>) {
|
||||
parse_to_ast_lenient(query)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{parse_query, parse_query_lenient};
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_serialization() {
|
||||
let ast = parse_query("title:hello OR title:x").unwrap();
|
||||
let json = serde_json::to_string(&ast).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"type":"bool","clauses":[["should",{"type":"literal","field_name":"title","phrase":"hello","delimiter":"none","slop":0,"prefix":false}],["should",{"type":"literal","field_name":"title","phrase":"x","delimiter":"none","slop":0,"prefix":false}]]}"#
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_wrong_query() {
|
||||
assert!(parse_query("title:").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_lenient_wrong_query() {
|
||||
let (_, errors) = parse_query_lenient("title:");
|
||||
assert!(errors.len() == 1);
|
||||
let json = serde_json::to_string(&errors).unwrap();
|
||||
assert_eq!(json, r#"[{"pos":6,"message":"expected word"}]"#);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
use std::fmt;
|
||||
use std::fmt::Write;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
/// Defines whether a term in a query must be present,
|
||||
/// should be present or must not be present.
|
||||
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
|
||||
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum Occur {
|
||||
/// For a given document to be considered for scoring,
|
||||
/// at least one of the queries with the Should or the Must
|
||||
|
||||
@@ -6,7 +6,7 @@ use nom::bytes::complete::tag;
|
||||
use nom::character::complete::{
|
||||
anychar, char, digit1, multispace0, multispace1, none_of, one_of, satisfy, u32,
|
||||
};
|
||||
use nom::combinator::{eof, map, map_res, not, opt, peek, recognize, value, verify};
|
||||
use nom::combinator::{eof, map, map_res, opt, peek, recognize, value, verify};
|
||||
use nom::error::{Error, ErrorKind};
|
||||
use nom::multi::{many0, many1, separated_list0};
|
||||
use nom::sequence::{delimited, preceded, separated_pair, terminated, tuple};
|
||||
@@ -20,7 +20,7 @@ use crate::Occur;
|
||||
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to
|
||||
// special characters.
|
||||
const SPECIAL_CHARS: &[char] = &[
|
||||
'+', '^', '`', ':', '{', '}', '"', '\'', '[', ']', '(', ')', '!', '\\', ' ',
|
||||
'+', '^', '`', ':', '{', '}', '"', '\'', '[', ']', '(', ')', '!', '\\', '*', ' ',
|
||||
];
|
||||
|
||||
/// consume a field name followed by colon. Return the field name with escape sequence
|
||||
@@ -321,7 +321,17 @@ fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
UserInputLeaf::Exists {
|
||||
field: String::new(),
|
||||
},
|
||||
tuple((multispace0, char('*'))),
|
||||
tuple((
|
||||
multispace0,
|
||||
char('*'),
|
||||
peek(alt((
|
||||
value(
|
||||
"",
|
||||
satisfy(|c: char| c.is_whitespace() || ESCAPE_IN_WORD.contains(&c)),
|
||||
),
|
||||
eof,
|
||||
))),
|
||||
)),
|
||||
)(inp)
|
||||
}
|
||||
|
||||
@@ -331,7 +341,14 @@ fn exists_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
peek(tuple((
|
||||
field_name,
|
||||
multispace0,
|
||||
char('*'), // when we are here, we know it can't be anything but a exists
|
||||
char('*'),
|
||||
peek(alt((
|
||||
value(
|
||||
"",
|
||||
satisfy(|c: char| c.is_whitespace() || ESCAPE_IN_WORD.contains(&c)),
|
||||
),
|
||||
eof,
|
||||
))), // we need to check this isn't a wildcard query
|
||||
))),
|
||||
)(inp)
|
||||
.map_err(|e| e.map(|_| ()))
|
||||
@@ -679,10 +696,7 @@ fn negate(expr: UserInputAst) -> UserInputAst {
|
||||
fn leaf(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
alt((
|
||||
delimited(char('('), ast, char(')')),
|
||||
preceded(
|
||||
peek(not(tag("*:"))),
|
||||
map(char('*'), |_| UserInputAst::from(UserInputLeaf::All)),
|
||||
),
|
||||
map(char('*'), |_| UserInputAst::from(UserInputLeaf::All)),
|
||||
map(preceded(tuple((tag("NOT"), multispace1)), leaf), negate),
|
||||
literal,
|
||||
))(inp)
|
||||
@@ -703,13 +717,7 @@ fn leaf_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>> {
|
||||
),
|
||||
),
|
||||
(
|
||||
value(
|
||||
(),
|
||||
preceded(
|
||||
peek(not(tag("*:"))), // Fail if `*:` is detected
|
||||
char('*'), // Match standalone `*`
|
||||
),
|
||||
),
|
||||
value((), char('*')),
|
||||
map(nothing, |_| {
|
||||
(Some(UserInputAst::from(UserInputLeaf::All)), Vec::new())
|
||||
}),
|
||||
@@ -1231,7 +1239,6 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_field_name() {
|
||||
assert_eq!(super::field_name("*:a"), Ok(("a", "*".to_string())));
|
||||
assert_eq!(
|
||||
super::field_name(".my.field.name:a"),
|
||||
Ok(("a", ".my.field.name".to_string()))
|
||||
@@ -1537,11 +1544,6 @@ mod test {
|
||||
test_parse_query_to_ast_helper("abc:toto", "\"abc\":toto");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn all_field_star() {
|
||||
test_parse_query_to_ast_helper("*:toto", "\"*\":toto");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_phrase_with_field() {
|
||||
test_parse_query_to_ast_helper("abc:\"happy tax payer\"", "\"abc\":\"happy tax payer\"");
|
||||
@@ -1639,13 +1641,19 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_exist_query() {
|
||||
test_parse_query_to_ast_helper("a:*", "\"a\":*");
|
||||
test_parse_query_to_ast_helper("a: *", "\"a\":*");
|
||||
// an exist followed by default term being b
|
||||
test_is_parse_err("a:*b", "(*\"a\":* *b)");
|
||||
test_parse_query_to_ast_helper("a:*", "$exists(\"a\")");
|
||||
test_parse_query_to_ast_helper("a: *", "$exists(\"a\")");
|
||||
|
||||
// this is a term query (not a phrase prefix)
|
||||
test_parse_query_to_ast_helper(
|
||||
"(hello AND toto:*) OR happy",
|
||||
"(?(+hello +$exists(\"toto\")) ?happy)",
|
||||
);
|
||||
test_parse_query_to_ast_helper("(a:*)", "$exists(\"a\")");
|
||||
|
||||
// these are term/wildcard query (not a phrase prefix)
|
||||
test_parse_query_to_ast_helper("a:b*", "\"a\":b*");
|
||||
test_parse_query_to_ast_helper("a:*b", "\"a\":*b");
|
||||
test_parse_query_to_ast_helper(r#"a:*def*"#, "\"a\":*def*");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
use std::fmt;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::Occur;
|
||||
|
||||
#[derive(PartialEq, Clone)]
|
||||
#[derive(PartialEq, Clone, Serialize)]
|
||||
#[serde(tag = "type")]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum UserInputLeaf {
|
||||
Literal(UserInputLiteral),
|
||||
All,
|
||||
@@ -101,20 +105,22 @@ impl Debug for UserInputLeaf {
|
||||
}
|
||||
UserInputLeaf::All => write!(formatter, "*"),
|
||||
UserInputLeaf::Exists { field } => {
|
||||
write!(formatter, "\"{field}\":*")
|
||||
write!(formatter, "$exists(\"{field}\")")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum Delimiter {
|
||||
SingleQuotes,
|
||||
DoubleQuotes,
|
||||
None,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone)]
|
||||
#[derive(PartialEq, Clone, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub struct UserInputLiteral {
|
||||
pub field_name: Option<String>,
|
||||
pub phrase: String,
|
||||
@@ -152,7 +158,9 @@ impl fmt::Debug for UserInputLiteral {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
#[derive(PartialEq, Debug, Clone, Serialize)]
|
||||
#[serde(tag = "type", content = "value")]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum UserInputBound {
|
||||
Inclusive(String),
|
||||
Exclusive(String),
|
||||
@@ -187,11 +195,38 @@ impl UserInputBound {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone)]
|
||||
#[derive(PartialEq, Clone, Serialize)]
|
||||
#[serde(into = "UserInputAstSerde")]
|
||||
pub enum UserInputAst {
|
||||
Clause(Vec<(Option<Occur>, UserInputAst)>),
|
||||
Leaf(Box<UserInputLeaf>),
|
||||
Boost(Box<UserInputAst>, f64),
|
||||
Leaf(Box<UserInputLeaf>),
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
enum UserInputAstSerde {
|
||||
Bool {
|
||||
clauses: Vec<(Option<Occur>, UserInputAst)>,
|
||||
},
|
||||
Boost {
|
||||
underlying: Box<UserInputAst>,
|
||||
boost: f64,
|
||||
},
|
||||
#[serde(untagged)]
|
||||
Leaf(Box<UserInputLeaf>),
|
||||
}
|
||||
|
||||
impl From<UserInputAst> for UserInputAstSerde {
|
||||
fn from(ast: UserInputAst) -> Self {
|
||||
match ast {
|
||||
UserInputAst::Clause(clause) => UserInputAstSerde::Bool { clauses: clause },
|
||||
UserInputAst::Boost(underlying, boost) => {
|
||||
UserInputAstSerde::Boost { underlying, boost }
|
||||
}
|
||||
UserInputAst::Leaf(leaf) => UserInputAstSerde::Leaf(leaf),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UserInputAst {
|
||||
@@ -285,3 +320,126 @@ impl fmt::Debug for UserInputAst {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_all_leaf_serialization() {
|
||||
let ast = UserInputAst::Leaf(Box::new(UserInputLeaf::All));
|
||||
let json = serde_json::to_string(&ast).unwrap();
|
||||
assert_eq!(json, r#"{"type":"all"}"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_literal_leaf_serialization() {
|
||||
let literal = UserInputLiteral {
|
||||
field_name: Some("title".to_string()),
|
||||
phrase: "hello".to_string(),
|
||||
delimiter: Delimiter::None,
|
||||
slop: 0,
|
||||
prefix: false,
|
||||
};
|
||||
let ast = UserInputAst::Leaf(Box::new(UserInputLeaf::Literal(literal)));
|
||||
let json = serde_json::to_string(&ast).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"type":"literal","field_name":"title","phrase":"hello","delimiter":"none","slop":0,"prefix":false}"#
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_leaf_serialization() {
|
||||
let range = UserInputLeaf::Range {
|
||||
field: Some("price".to_string()),
|
||||
lower: UserInputBound::Inclusive("10".to_string()),
|
||||
upper: UserInputBound::Exclusive("100".to_string()),
|
||||
};
|
||||
let ast = UserInputAst::Leaf(Box::new(range));
|
||||
let json = serde_json::to_string(&ast).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"type":"range","field":"price","lower":{"type":"inclusive","value":"10"},"upper":{"type":"exclusive","value":"100"}}"#
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_leaf_unbounded_serialization() {
|
||||
let range = UserInputLeaf::Range {
|
||||
field: Some("price".to_string()),
|
||||
lower: UserInputBound::Inclusive("10".to_string()),
|
||||
upper: UserInputBound::Unbounded,
|
||||
};
|
||||
let ast = UserInputAst::Leaf(Box::new(range));
|
||||
let json = serde_json::to_string(&ast).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"type":"range","field":"price","lower":{"type":"inclusive","value":"10"},"upper":{"type":"unbounded"}}"#
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boost_serialization() {
|
||||
let inner_ast = UserInputAst::Leaf(Box::new(UserInputLeaf::All));
|
||||
let boost_ast = UserInputAst::Boost(Box::new(inner_ast), 2.5);
|
||||
let json = serde_json::to_string(&boost_ast).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"type":"boost","underlying":{"type":"all"},"boost":2.5}"#
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boost_serialization2() {
|
||||
let boost_ast = UserInputAst::Boost(
|
||||
Box::new(UserInputAst::Clause(vec![
|
||||
(
|
||||
Some(Occur::Must),
|
||||
UserInputAst::Leaf(Box::new(UserInputLeaf::All)),
|
||||
),
|
||||
(
|
||||
Some(Occur::Should),
|
||||
UserInputAst::Leaf(Box::new(UserInputLeaf::Literal(UserInputLiteral {
|
||||
field_name: Some("title".to_string()),
|
||||
phrase: "hello".to_string(),
|
||||
delimiter: Delimiter::None,
|
||||
slop: 0,
|
||||
prefix: false,
|
||||
}))),
|
||||
),
|
||||
])),
|
||||
2.5,
|
||||
);
|
||||
let json = serde_json::to_string(&boost_ast).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"type":"boost","underlying":{"type":"bool","clauses":[["must",{"type":"all"}],["should",{"type":"literal","field_name":"title","phrase":"hello","delimiter":"none","slop":0,"prefix":false}]]},"boost":2.5}"#
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clause_serialization() {
|
||||
let clause = UserInputAst::Clause(vec![
|
||||
(
|
||||
Some(Occur::Must),
|
||||
UserInputAst::Leaf(Box::new(UserInputLeaf::All)),
|
||||
),
|
||||
(
|
||||
Some(Occur::Should),
|
||||
UserInputAst::Leaf(Box::new(UserInputLeaf::Literal(UserInputLiteral {
|
||||
field_name: Some("title".to_string()),
|
||||
phrase: "hello".to_string(),
|
||||
delimiter: Delimiter::None,
|
||||
slop: 0,
|
||||
prefix: false,
|
||||
}))),
|
||||
),
|
||||
]);
|
||||
let json = serde_json::to_string(&clause).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"type":"bool","clauses":[["must",{"type":"all"}],["should",{"type":"literal","field_name":"title","phrase":"hello","delimiter":"none","slop":0,"prefix":false}]]}"#
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,10 +271,6 @@ impl AggregationWithAccessor {
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Count(CountAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Max(MaxAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
@@ -299,6 +295,24 @@ impl AggregationWithAccessor {
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Count(CountAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let allowed_column_types = [
|
||||
ColumnType::I64,
|
||||
ColumnType::U64,
|
||||
ColumnType::F64,
|
||||
ColumnType::Str,
|
||||
ColumnType::DateTime,
|
||||
ColumnType::Bool,
|
||||
ColumnType::IpAddr,
|
||||
// ColumnType::Bytes Unsupported
|
||||
];
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(&allowed_column_types))?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Percentiles(ref percentiles) => {
|
||||
let (accessor, column_type) = get_ff_reader(
|
||||
reader,
|
||||
|
||||
@@ -34,10 +34,10 @@ use crate::aggregation::*;
|
||||
pub struct DateHistogramAggregationReq {
|
||||
#[doc(hidden)]
|
||||
/// Only for validation
|
||||
interval: Option<String>,
|
||||
pub interval: Option<String>,
|
||||
#[doc(hidden)]
|
||||
/// Only for validation
|
||||
calendar_interval: Option<String>,
|
||||
pub calendar_interval: Option<String>,
|
||||
/// The field to aggregate on.
|
||||
pub field: String,
|
||||
/// The format to format dates. Unsupported currently.
|
||||
|
||||
@@ -220,9 +220,23 @@ impl SegmentStatsCollector {
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &agg_accessor.accessor);
|
||||
}
|
||||
for val in agg_accessor.column_block_accessor.iter_vals() {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val1);
|
||||
if [
|
||||
ColumnType::I64,
|
||||
ColumnType::U64,
|
||||
ColumnType::F64,
|
||||
ColumnType::DateTime,
|
||||
]
|
||||
.contains(&self.field_type)
|
||||
{
|
||||
for val in agg_accessor.column_block_accessor.iter_vals() {
|
||||
let val1 = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val1);
|
||||
}
|
||||
} else {
|
||||
for _val in agg_accessor.column_block_accessor.iter_vals() {
|
||||
// we ignore the value and simply record that we got something
|
||||
self.stats.collect(0.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -435,6 +449,11 @@ mod tests {
|
||||
"field": "score",
|
||||
},
|
||||
},
|
||||
"count_str": {
|
||||
"value_count": {
|
||||
"field": "text",
|
||||
},
|
||||
},
|
||||
"range": range_agg
|
||||
}))
|
||||
.unwrap();
|
||||
@@ -500,6 +519,13 @@ mod tests {
|
||||
})
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
res["count_str"],
|
||||
json!({
|
||||
"value": 7.0,
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -366,8 +366,12 @@ impl PartialEq for Key {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self, other) {
|
||||
(Self::Str(l), Self::Str(r)) => l == r,
|
||||
(Self::F64(l), Self::F64(r)) => l == r,
|
||||
_ => false,
|
||||
(Self::F64(l), Self::F64(r)) => l.to_bits() == r.to_bits(),
|
||||
(Self::I64(l), Self::I64(r)) => l == r,
|
||||
(Self::U64(l), Self::U64(r)) => l == r,
|
||||
// we list all variant of left operand to make sure this gets updated when we add
|
||||
// variants to the enum
|
||||
(Self::Str(_) | Self::F64(_) | Self::I64(_) | Self::U64(_), _) => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -578,7 +582,7 @@ mod tests {
|
||||
.set_indexing_options(
|
||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||
)
|
||||
.set_fast(None)
|
||||
.set_fast(Some("raw"))
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
|
||||
@@ -786,7 +786,7 @@ impl<Score, D, const R: bool> From<TopNComputerDeser<Score, D, R>> for TopNCompu
|
||||
}
|
||||
}
|
||||
|
||||
impl<Score, D, const R: bool> TopNComputer<Score, D, R>
|
||||
impl<Score, D, const REVERSE_ORDER: bool> TopNComputer<Score, D, REVERSE_ORDER>
|
||||
where
|
||||
Score: PartialOrd + Clone,
|
||||
D: Ord,
|
||||
@@ -807,7 +807,10 @@ where
|
||||
#[inline]
|
||||
pub fn push(&mut self, feature: Score, doc: D) {
|
||||
if let Some(last_median) = self.threshold.clone() {
|
||||
if feature < last_median {
|
||||
if !REVERSE_ORDER && feature > last_median {
|
||||
return;
|
||||
}
|
||||
if REVERSE_ORDER && feature < last_median {
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -842,7 +845,7 @@ where
|
||||
}
|
||||
|
||||
/// Returns the top n elements in sorted order.
|
||||
pub fn into_sorted_vec(mut self) -> Vec<ComparableDoc<Score, D, R>> {
|
||||
pub fn into_sorted_vec(mut self) -> Vec<ComparableDoc<Score, D, REVERSE_ORDER>> {
|
||||
if self.buffer.len() > self.top_n {
|
||||
self.truncate_top_n();
|
||||
}
|
||||
@@ -853,7 +856,7 @@ where
|
||||
/// Returns the top n elements in stored order.
|
||||
/// Useful if you do not need the elements in sorted order,
|
||||
/// for example when merging the results of multiple segments.
|
||||
pub fn into_vec(mut self) -> Vec<ComparableDoc<Score, D, R>> {
|
||||
pub fn into_vec(mut self) -> Vec<ComparableDoc<Score, D, REVERSE_ORDER>> {
|
||||
if self.buffer.len() > self.top_n {
|
||||
self.truncate_top_n();
|
||||
}
|
||||
@@ -863,9 +866,11 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::{TopDocs, TopNComputer};
|
||||
use crate::collector::top_collector::ComparableDoc;
|
||||
use crate::collector::Collector;
|
||||
use crate::collector::{Collector, DocSetCollector};
|
||||
use crate::query::{AllQuery, Query, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
@@ -960,6 +965,44 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_topn_computer_asc_prop(
|
||||
limit in 0..10_usize,
|
||||
docs in proptest::collection::vec((0..100_u64, 0..100_u64), 0..100_usize),
|
||||
) {
|
||||
let mut computer: TopNComputer<_, _, false> = TopNComputer::new(limit);
|
||||
for (feature, doc) in &docs {
|
||||
computer.push(*feature, *doc);
|
||||
}
|
||||
let mut comparable_docs = docs.into_iter().map(|(feature, doc)| ComparableDoc { feature, doc }).collect::<Vec<_>>();
|
||||
comparable_docs.sort();
|
||||
comparable_docs.truncate(limit);
|
||||
prop_assert_eq!(
|
||||
computer.into_sorted_vec(),
|
||||
comparable_docs,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_topn_computer_desc_prop(
|
||||
limit in 0..10_usize,
|
||||
docs in proptest::collection::vec((0..100_u64, 0..100_u64), 0..100_usize),
|
||||
) {
|
||||
let mut computer: TopNComputer<_, _, true> = TopNComputer::new(limit);
|
||||
for (feature, doc) in &docs {
|
||||
computer.push(*feature, *doc);
|
||||
}
|
||||
let mut comparable_docs = docs.into_iter().map(|(feature, doc)| ComparableDoc { feature, doc }).collect::<Vec<_>>();
|
||||
comparable_docs.sort();
|
||||
comparable_docs.truncate(limit);
|
||||
prop_assert_eq!(
|
||||
computer.into_sorted_vec(),
|
||||
comparable_docs,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity_without_offset() -> crate::Result<()> {
|
||||
let index = make_index()?;
|
||||
@@ -1373,4 +1416,29 @@ mod tests {
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_topn_computer_asc() {
|
||||
let mut computer: TopNComputer<u32, u32, false> = TopNComputer::new(2);
|
||||
|
||||
computer.push(1u32, 1u32);
|
||||
computer.push(2u32, 2u32);
|
||||
computer.push(3u32, 3u32);
|
||||
computer.push(2u32, 4u32);
|
||||
computer.push(4u32, 5u32);
|
||||
computer.push(1u32, 6u32);
|
||||
assert_eq!(
|
||||
computer.into_sorted_vec(),
|
||||
&[
|
||||
ComparableDoc {
|
||||
feature: 1u32,
|
||||
doc: 1u32,
|
||||
},
|
||||
ComparableDoc {
|
||||
feature: 1u32,
|
||||
doc: 6u32,
|
||||
}
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,16 +41,12 @@ impl Executor {
|
||||
///
|
||||
/// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
|
||||
/// will propagate to the caller.
|
||||
pub fn map<
|
||||
pub fn map<A, R, F>(&self, f: F, args: impl Iterator<Item = A>) -> crate::Result<Vec<R>>
|
||||
where
|
||||
A: Send,
|
||||
R: Send,
|
||||
AIterator: Iterator<Item = A>,
|
||||
F: Sized + Sync + Fn(A) -> crate::Result<R>,
|
||||
>(
|
||||
&self,
|
||||
f: F,
|
||||
args: AIterator,
|
||||
) -> crate::Result<Vec<R>> {
|
||||
{
|
||||
match self {
|
||||
Executor::SingleThread => args.map(f).collect::<crate::Result<_>>(),
|
||||
Executor::ThreadPool(pool) => {
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
//! The footer is a small metadata structure that is appended at the end of every file.
|
||||
//!
|
||||
//! The footer is used to store a checksum of the file content.
|
||||
//! The footer also stores the version of the index format.
|
||||
//! This version is used to detect incompatibility between the index and the library version.
|
||||
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
@@ -20,20 +26,22 @@ type CrcHashU32 = u32;
|
||||
/// A Footer is appended to every file
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Footer {
|
||||
/// The version of the index format
|
||||
pub version: Version,
|
||||
/// The crc32 hash of the body
|
||||
pub crc: CrcHashU32,
|
||||
}
|
||||
|
||||
impl Footer {
|
||||
pub fn new(crc: CrcHashU32) -> Self {
|
||||
pub(crate) fn new(crc: CrcHashU32) -> Self {
|
||||
let version = crate::VERSION.clone();
|
||||
Footer { version, crc }
|
||||
}
|
||||
|
||||
pub fn crc(&self) -> CrcHashU32 {
|
||||
pub(crate) fn crc(&self) -> CrcHashU32 {
|
||||
self.crc
|
||||
}
|
||||
pub fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> {
|
||||
pub(crate) fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> {
|
||||
let mut counting_write = CountingWriter::wrap(&mut write);
|
||||
counting_write.write_all(serde_json::to_string(&self)?.as_ref())?;
|
||||
let footer_payload_len = counting_write.written_bytes();
|
||||
@@ -42,6 +50,7 @@ impl Footer {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Extracts the tantivy Footer from the file and returns the footer and the rest of the file
|
||||
pub fn extract_footer(file: FileSlice) -> io::Result<(Footer, FileSlice)> {
|
||||
if file.len() < 4 {
|
||||
return Err(io::Error::new(
|
||||
|
||||
@@ -6,7 +6,7 @@ mod mmap_directory;
|
||||
mod directory;
|
||||
mod directory_lock;
|
||||
mod file_watcher;
|
||||
mod footer;
|
||||
pub mod footer;
|
||||
mod managed_directory;
|
||||
mod ram_directory;
|
||||
mod watch_event_router;
|
||||
|
||||
@@ -942,7 +942,7 @@ mod tests {
|
||||
|
||||
let numbers = [100, 200, 300];
|
||||
let test_range = |range: RangeInclusive<u64>| {
|
||||
let expected_count = numbers.iter().filter(|num| range.contains(num)).count();
|
||||
let expected_count = numbers.iter().filter(|num| range.contains(*num)).count();
|
||||
let mut vec = vec![];
|
||||
field.get_row_ids_for_value_range(range, 0..u32::MAX, &mut vec);
|
||||
assert_eq!(vec.len(), expected_count);
|
||||
@@ -1020,7 +1020,7 @@ mod tests {
|
||||
|
||||
let numbers = [1000, 1001, 1003];
|
||||
let test_range = |range: RangeInclusive<u64>| {
|
||||
let expected_count = numbers.iter().filter(|num| range.contains(num)).count();
|
||||
let expected_count = numbers.iter().filter(|num| range.contains(*num)).count();
|
||||
let mut vec = vec![];
|
||||
field.get_row_ids_for_value_range(range, 0..u32::MAX, &mut vec);
|
||||
assert_eq!(vec.len(), expected_count);
|
||||
|
||||
@@ -217,7 +217,7 @@ impl FastFieldReaders {
|
||||
Ok(dynamic_column.into())
|
||||
}
|
||||
|
||||
/// Returning a `dynamic_column_handle`.
|
||||
/// Returns a `dynamic_column_handle`.
|
||||
pub fn dynamic_column_handle(
|
||||
&self,
|
||||
field_name: &str,
|
||||
@@ -234,7 +234,7 @@ impl FastFieldReaders {
|
||||
Ok(dynamic_column_handle_opt)
|
||||
}
|
||||
|
||||
/// Returning all `dynamic_column_handle`.
|
||||
/// Returns all `dynamic_column_handle` that match the given field name.
|
||||
pub fn dynamic_column_handles(
|
||||
&self,
|
||||
field_name: &str,
|
||||
@@ -250,6 +250,22 @@ impl FastFieldReaders {
|
||||
Ok(dynamic_column_handles)
|
||||
}
|
||||
|
||||
/// Returns all `dynamic_column_handle` that are inner fields of the provided JSON path.
|
||||
pub fn dynamic_subpath_column_handles(
|
||||
&self,
|
||||
root_path: &str,
|
||||
) -> crate::Result<Vec<DynamicColumnHandle>> {
|
||||
let Some(resolved_field_name) = self.resolve_field(root_path)? else {
|
||||
return Ok(Vec::new());
|
||||
};
|
||||
let dynamic_column_handles = self
|
||||
.columnar
|
||||
.read_subpath_columns(&resolved_field_name)?
|
||||
.into_iter()
|
||||
.collect();
|
||||
Ok(dynamic_column_handles)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub async fn list_dynamic_column_handles(
|
||||
&self,
|
||||
@@ -265,6 +281,21 @@ impl FastFieldReaders {
|
||||
Ok(columns)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub async fn list_subpath_dynamic_column_handles(
|
||||
&self,
|
||||
root_path: &str,
|
||||
) -> crate::Result<Vec<DynamicColumnHandle>> {
|
||||
let Some(resolved_field_name) = self.resolve_field(root_path)? else {
|
||||
return Ok(Vec::new());
|
||||
};
|
||||
let columns = self
|
||||
.columnar
|
||||
.read_subpath_columns_async(&resolved_field_name)
|
||||
.await?;
|
||||
Ok(columns)
|
||||
}
|
||||
|
||||
/// Returns the `u64` column used to represent any `u64`-mapped typed (String/Bytes term ids,
|
||||
/// i64, u64, f64, DateTime).
|
||||
///
|
||||
@@ -476,6 +507,15 @@ mod tests {
|
||||
.iter()
|
||||
.any(|column| column.column_type() == ColumnType::Str));
|
||||
|
||||
println!("*** {:?}", fast_fields.columnar().list_columns());
|
||||
let json_columns = fast_fields.dynamic_column_handles("json").unwrap();
|
||||
assert_eq!(json_columns.len(), 0);
|
||||
|
||||
let json_subcolumns = fast_fields.dynamic_subpath_column_handles("json").unwrap();
|
||||
assert_eq!(json_subcolumns.len(), 3);
|
||||
|
||||
let foo_subcolumns = fast_fields
|
||||
.dynamic_subpath_column_handles("json.foo")
|
||||
.unwrap();
|
||||
assert_eq!(foo_subcolumns.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,9 @@ use crate::directory::MmapDirectory;
|
||||
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
|
||||
use crate::error::{DataCorruption, TantivyError};
|
||||
use crate::index::{IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory};
|
||||
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_BUDGET_NUM_BYTES_MIN};
|
||||
use crate::indexer::index_writer::{
|
||||
IndexWriterOptions, MAX_NUM_THREAD, MEMORY_BUDGET_NUM_BYTES_MIN,
|
||||
};
|
||||
use crate::indexer::segment_updater::save_metas;
|
||||
use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
|
||||
use crate::reader::{IndexReader, IndexReaderBuilder};
|
||||
@@ -519,6 +521,43 @@ impl Index {
|
||||
load_metas(self.directory(), &self.inventory)
|
||||
}
|
||||
|
||||
/// Open a new index writer with the given options. Attempts to acquire a lockfile.
|
||||
///
|
||||
/// The lockfile should be deleted on drop, but it is possible
|
||||
/// that due to a panic or other error, a stale lockfile will be
|
||||
/// left in the index directory. If you are sure that no other
|
||||
/// `IndexWriter` on the system is accessing the index directory,
|
||||
/// it is safe to manually delete the lockfile.
|
||||
///
|
||||
/// - `options` defines the writer configuration which includes things like buffer sizes,
|
||||
/// indexer threads, etc...
|
||||
///
|
||||
/// # Errors
|
||||
/// If the lockfile already exists, returns `TantivyError::LockFailure`.
|
||||
/// If the memory arena per thread is too small or too big, returns
|
||||
/// `TantivyError::InvalidArgument`
|
||||
pub fn writer_with_options<D: Document>(
|
||||
&self,
|
||||
options: IndexWriterOptions,
|
||||
) -> crate::Result<IndexWriter<D>> {
|
||||
let directory_lock = self
|
||||
.directory
|
||||
.acquire_lock(&INDEX_WRITER_LOCK)
|
||||
.map_err(|err| {
|
||||
TantivyError::LockFailure(
|
||||
err,
|
||||
Some(
|
||||
"Failed to acquire index lock. If you are using a regular directory, this \
|
||||
means there is already an `IndexWriter` working on this `Directory`, in \
|
||||
this process or in a different process."
|
||||
.to_string(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
|
||||
IndexWriter::new(self, options, directory_lock)
|
||||
}
|
||||
|
||||
/// Open a new index writer. Attempts to acquire a lockfile.
|
||||
///
|
||||
/// The lockfile should be deleted on drop, but it is possible
|
||||
@@ -543,27 +582,12 @@ impl Index {
|
||||
num_threads: usize,
|
||||
overall_memory_budget_in_bytes: usize,
|
||||
) -> crate::Result<IndexWriter<D>> {
|
||||
let directory_lock = self
|
||||
.directory
|
||||
.acquire_lock(&INDEX_WRITER_LOCK)
|
||||
.map_err(|err| {
|
||||
TantivyError::LockFailure(
|
||||
err,
|
||||
Some(
|
||||
"Failed to acquire index lock. If you are using a regular directory, this \
|
||||
means there is already an `IndexWriter` working on this `Directory`, in \
|
||||
this process or in a different process."
|
||||
.to_string(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let memory_arena_in_bytes_per_thread = overall_memory_budget_in_bytes / num_threads;
|
||||
IndexWriter::new(
|
||||
self,
|
||||
num_threads,
|
||||
memory_arena_in_bytes_per_thread,
|
||||
directory_lock,
|
||||
)
|
||||
let options = IndexWriterOptions::builder()
|
||||
.num_worker_threads(num_threads)
|
||||
.memory_budget_per_thread(memory_arena_in_bytes_per_thread)
|
||||
.build();
|
||||
self.writer_with_options(options)
|
||||
}
|
||||
|
||||
/// Helper to create an index writer for tests.
|
||||
|
||||
@@ -3,6 +3,12 @@ use std::io;
|
||||
use common::json_path_writer::JSON_END_OF_PATH;
|
||||
use common::BinarySerializable;
|
||||
use fnv::FnvHashSet;
|
||||
#[cfg(feature = "quickwit")]
|
||||
use futures_util::{FutureExt, StreamExt, TryStreamExt};
|
||||
#[cfg(feature = "quickwit")]
|
||||
use itertools::Itertools;
|
||||
#[cfg(feature = "quickwit")]
|
||||
use tantivy_fst::automaton::{AlwaysMatch, Automaton};
|
||||
|
||||
use crate::directory::FileSlice;
|
||||
use crate::positions::PositionReader;
|
||||
@@ -219,13 +225,18 @@ impl InvertedIndexReader {
|
||||
self.termdict.get_async(term.serialized_value_bytes()).await
|
||||
}
|
||||
|
||||
async fn get_term_range_async(
|
||||
&self,
|
||||
async fn get_term_range_async<'a, A: Automaton + 'a>(
|
||||
&'a self,
|
||||
terms: impl std::ops::RangeBounds<Term>,
|
||||
automaton: A,
|
||||
limit: Option<u64>,
|
||||
) -> io::Result<impl Iterator<Item = TermInfo> + '_> {
|
||||
merge_holes_under_bytes: usize,
|
||||
) -> io::Result<impl Iterator<Item = TermInfo> + 'a>
|
||||
where
|
||||
A::State: Clone,
|
||||
{
|
||||
use std::ops::Bound;
|
||||
let range_builder = self.termdict.range();
|
||||
let range_builder = self.termdict.search(automaton);
|
||||
let range_builder = match terms.start_bound() {
|
||||
Bound::Included(bound) => range_builder.ge(bound.serialized_value_bytes()),
|
||||
Bound::Excluded(bound) => range_builder.gt(bound.serialized_value_bytes()),
|
||||
@@ -242,7 +253,9 @@ impl InvertedIndexReader {
|
||||
range_builder
|
||||
};
|
||||
|
||||
let mut stream = range_builder.into_stream_async().await?;
|
||||
let mut stream = range_builder
|
||||
.into_stream_async_merging_holes(merge_holes_under_bytes)
|
||||
.await?;
|
||||
|
||||
let iter = std::iter::from_fn(move || stream.next().map(|(_k, v)| v.clone()));
|
||||
|
||||
@@ -288,7 +301,9 @@ impl InvertedIndexReader {
|
||||
limit: Option<u64>,
|
||||
with_positions: bool,
|
||||
) -> io::Result<bool> {
|
||||
let mut term_info = self.get_term_range_async(terms, limit).await?;
|
||||
let mut term_info = self
|
||||
.get_term_range_async(terms, AlwaysMatch, limit, 0)
|
||||
.await?;
|
||||
|
||||
let Some(first_terminfo) = term_info.next() else {
|
||||
// no key matches, nothing more to load
|
||||
@@ -315,6 +330,84 @@ impl InvertedIndexReader {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Warmup a block postings given a range of `Term`s.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// returns a boolean, whether a term matching the range was found in the dictionary
|
||||
pub async fn warm_postings_automaton<
|
||||
A: Automaton + Clone + Send + 'static,
|
||||
E: FnOnce(Box<dyn FnOnce() -> io::Result<()> + Send>) -> F,
|
||||
F: std::future::Future<Output = io::Result<()>>,
|
||||
>(
|
||||
&self,
|
||||
automaton: A,
|
||||
// with_positions: bool, at the moment we have no use for it, and supporting it would add
|
||||
// complexity to the coalesce
|
||||
executor: E,
|
||||
) -> io::Result<bool>
|
||||
where
|
||||
A::State: Clone,
|
||||
{
|
||||
// merge holes under 4MiB, that's how many bytes we can hope to receive during a TTFB from
|
||||
// S3 (~80MiB/s, and 50ms latency)
|
||||
const MERGE_HOLES_UNDER_BYTES: usize = (80 * 1024 * 1024 * 50) / 1000;
|
||||
// we build a first iterator to download everything. Simply calling the function already
|
||||
// download everything we need from the sstable, but doesn't start iterating over it.
|
||||
let _term_info_iter = self
|
||||
.get_term_range_async(.., automaton.clone(), None, MERGE_HOLES_UNDER_BYTES)
|
||||
.await?;
|
||||
|
||||
let (sender, posting_ranges_to_load_stream) = futures_channel::mpsc::unbounded();
|
||||
let termdict = self.termdict.clone();
|
||||
let cpu_bound_task = move || {
|
||||
// then we build a 2nd iterator, this one with no holes, so we don't go through blocks
|
||||
// we can't match.
|
||||
// This makes the assumption there is a caching layer below us, which gives sync read
|
||||
// for free after the initial async access. This might not always be true, but is in
|
||||
// Quickwit.
|
||||
// We build things from this closure otherwise we get into lifetime issues that can only
|
||||
// be solved with self referential strucs. Returning an io::Result from here is a bit
|
||||
// more leaky abstraction-wise, but a lot better than the alternative
|
||||
let mut stream = termdict.search(automaton).into_stream()?;
|
||||
|
||||
// we could do without an iterator, but this allows us access to coalesce which simplify
|
||||
// things
|
||||
let posting_ranges_iter =
|
||||
std::iter::from_fn(move || stream.next().map(|(_k, v)| v.postings_range.clone()));
|
||||
|
||||
let merged_posting_ranges_iter = posting_ranges_iter.coalesce(|range1, range2| {
|
||||
if range1.end + MERGE_HOLES_UNDER_BYTES >= range2.start {
|
||||
Ok(range1.start..range2.end)
|
||||
} else {
|
||||
Err((range1, range2))
|
||||
}
|
||||
});
|
||||
|
||||
for posting_range in merged_posting_ranges_iter {
|
||||
if let Err(_) = sender.unbounded_send(posting_range) {
|
||||
// this should happen only when search is cancelled
|
||||
return Err(io::Error::other("failed to send posting range back"));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
let task_handle = executor(Box::new(cpu_bound_task));
|
||||
|
||||
let posting_downloader = posting_ranges_to_load_stream
|
||||
.map(|posting_slice| {
|
||||
self.postings_file_slice
|
||||
.read_bytes_slice_async(posting_slice)
|
||||
.map(|result| result.map(|_slice| ()))
|
||||
})
|
||||
.buffer_unordered(5)
|
||||
.try_collect::<Vec<()>>();
|
||||
|
||||
let (_, slices_downloaded) =
|
||||
futures_util::future::try_join(task_handle, posting_downloader).await?;
|
||||
|
||||
Ok(!slices_downloaded.is_empty())
|
||||
}
|
||||
|
||||
/// Warmup the block postings for all terms.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
|
||||
@@ -45,6 +45,23 @@ fn error_in_index_worker_thread(context: &str) -> TantivyError {
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Clone, bon::Builder)]
|
||||
/// A builder for creating a new [IndexWriter] for an index.
|
||||
pub struct IndexWriterOptions {
|
||||
#[builder(default = MEMORY_BUDGET_NUM_BYTES_MIN)]
|
||||
/// The memory budget per indexer thread.
|
||||
///
|
||||
/// When an indexer thread has buffered this much data in memory
|
||||
/// it will flush the segment to disk (although this is not searchable until commit is called.)
|
||||
memory_budget_per_thread: usize,
|
||||
#[builder(default = 1)]
|
||||
/// The number of indexer worker threads to use.
|
||||
num_worker_threads: usize,
|
||||
#[builder(default = 4)]
|
||||
/// Defines the number of merger threads to use.
|
||||
num_merge_threads: usize,
|
||||
}
|
||||
|
||||
/// `IndexWriter` is the user entry-point to add document to an index.
|
||||
///
|
||||
/// It manages a small number of indexing thread, as well as a shared
|
||||
@@ -58,8 +75,7 @@ pub struct IndexWriter<D: Document = TantivyDocument> {
|
||||
|
||||
index: Index,
|
||||
|
||||
// The memory budget per thread, after which a commit is triggered.
|
||||
memory_budget_in_bytes_per_thread: usize,
|
||||
options: IndexWriterOptions,
|
||||
|
||||
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
|
||||
|
||||
@@ -70,8 +86,6 @@ pub struct IndexWriter<D: Document = TantivyDocument> {
|
||||
|
||||
worker_id: usize,
|
||||
|
||||
num_threads: usize,
|
||||
|
||||
delete_queue: DeleteQueue,
|
||||
|
||||
stamper: Stamper,
|
||||
@@ -265,23 +279,27 @@ impl<D: Document> IndexWriter<D> {
|
||||
/// `TantivyError::InvalidArgument`
|
||||
pub(crate) fn new(
|
||||
index: &Index,
|
||||
num_threads: usize,
|
||||
memory_budget_in_bytes_per_thread: usize,
|
||||
options: IndexWriterOptions,
|
||||
directory_lock: DirectoryLock,
|
||||
) -> crate::Result<Self> {
|
||||
if memory_budget_in_bytes_per_thread < MEMORY_BUDGET_NUM_BYTES_MIN {
|
||||
if options.memory_budget_per_thread < MEMORY_BUDGET_NUM_BYTES_MIN {
|
||||
let err_msg = format!(
|
||||
"The memory arena in bytes per thread needs to be at least \
|
||||
{MEMORY_BUDGET_NUM_BYTES_MIN}."
|
||||
);
|
||||
return Err(TantivyError::InvalidArgument(err_msg));
|
||||
}
|
||||
if memory_budget_in_bytes_per_thread >= MEMORY_BUDGET_NUM_BYTES_MAX {
|
||||
if options.memory_budget_per_thread >= MEMORY_BUDGET_NUM_BYTES_MAX {
|
||||
let err_msg = format!(
|
||||
"The memory arena in bytes per thread cannot exceed {MEMORY_BUDGET_NUM_BYTES_MAX}"
|
||||
);
|
||||
return Err(TantivyError::InvalidArgument(err_msg));
|
||||
}
|
||||
if options.num_worker_threads == 0 {
|
||||
let err_msg = "At least one worker thread is required, got 0".to_string();
|
||||
return Err(TantivyError::InvalidArgument(err_msg));
|
||||
}
|
||||
|
||||
let (document_sender, document_receiver) =
|
||||
crossbeam_channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||
|
||||
@@ -291,13 +309,17 @@ impl<D: Document> IndexWriter<D> {
|
||||
|
||||
let stamper = Stamper::new(current_opstamp);
|
||||
|
||||
let segment_updater =
|
||||
SegmentUpdater::create(index.clone(), stamper.clone(), &delete_queue.cursor())?;
|
||||
let segment_updater = SegmentUpdater::create(
|
||||
index.clone(),
|
||||
stamper.clone(),
|
||||
&delete_queue.cursor(),
|
||||
options.num_merge_threads,
|
||||
)?;
|
||||
|
||||
let mut index_writer = Self {
|
||||
_directory_lock: Some(directory_lock),
|
||||
|
||||
memory_budget_in_bytes_per_thread,
|
||||
options: options.clone(),
|
||||
index: index.clone(),
|
||||
index_writer_status: IndexWriterStatus::from(document_receiver),
|
||||
operation_sender: document_sender,
|
||||
@@ -305,7 +327,6 @@ impl<D: Document> IndexWriter<D> {
|
||||
segment_updater,
|
||||
|
||||
workers_join_handle: vec![],
|
||||
num_threads,
|
||||
|
||||
delete_queue,
|
||||
|
||||
@@ -398,7 +419,7 @@ impl<D: Document> IndexWriter<D> {
|
||||
|
||||
let mut delete_cursor = self.delete_queue.cursor();
|
||||
|
||||
let mem_budget = self.memory_budget_in_bytes_per_thread;
|
||||
let mem_budget = self.options.memory_budget_per_thread;
|
||||
let index = self.index.clone();
|
||||
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
|
||||
.name(format!("thrd-tantivy-index{}", self.worker_id))
|
||||
@@ -451,7 +472,7 @@ impl<D: Document> IndexWriter<D> {
|
||||
}
|
||||
|
||||
fn start_workers(&mut self) -> crate::Result<()> {
|
||||
for _ in 0..self.num_threads {
|
||||
for _ in 0..self.options.num_worker_threads {
|
||||
self.add_indexing_worker()?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -553,12 +574,7 @@ impl<D: Document> IndexWriter<D> {
|
||||
.take()
|
||||
.expect("The IndexWriter does not have any lock. This is a bug, please report.");
|
||||
|
||||
let new_index_writer = IndexWriter::new(
|
||||
&self.index,
|
||||
self.num_threads,
|
||||
self.memory_budget_in_bytes_per_thread,
|
||||
directory_lock,
|
||||
)?;
|
||||
let new_index_writer = IndexWriter::new(&self.index, self.options.clone(), directory_lock)?;
|
||||
|
||||
// the current `self` is dropped right away because of this call.
|
||||
//
|
||||
@@ -812,7 +828,7 @@ mod tests {
|
||||
use crate::directory::error::LockError;
|
||||
use crate::error::*;
|
||||
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::indexer::{IndexWriterOptions, NoMergePolicy};
|
||||
use crate::query::{QueryParser, TermQuery};
|
||||
use crate::schema::{
|
||||
self, Facet, FacetOptions, IndexRecordOption, IpAddrOptions, JsonObjectOptions,
|
||||
@@ -2533,4 +2549,36 @@ mod tests {
|
||||
index_writer.commit().unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_writer_options_validation() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let _field = schema_builder.add_bool_field("example", STORED);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
|
||||
let opt_wo_threads = IndexWriterOptions::builder().num_worker_threads(0).build();
|
||||
let result = index.writer_with_options::<TantivyDocument>(opt_wo_threads);
|
||||
assert!(result.is_err(), "Writer should reject 0 thread count");
|
||||
assert!(matches!(result, Err(TantivyError::InvalidArgument(_))));
|
||||
|
||||
let opt_with_low_memory = IndexWriterOptions::builder()
|
||||
.memory_budget_per_thread(10 << 10)
|
||||
.build();
|
||||
let result = index.writer_with_options::<TantivyDocument>(opt_with_low_memory);
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Writer should reject options with too low memory size"
|
||||
);
|
||||
assert!(matches!(result, Err(TantivyError::InvalidArgument(_))));
|
||||
|
||||
let opt_with_low_memory = IndexWriterOptions::builder()
|
||||
.memory_budget_per_thread(5 << 30)
|
||||
.build();
|
||||
let result = index.writer_with_options::<TantivyDocument>(opt_with_low_memory);
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Writer should reject options with too high memory size"
|
||||
);
|
||||
assert!(matches!(result, Err(TantivyError::InvalidArgument(_))));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ mod stamper;
|
||||
use crossbeam_channel as channel;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
pub use self::index_writer::IndexWriter;
|
||||
pub use self::index_writer::{IndexWriter, IndexWriterOptions};
|
||||
pub use self::log_merge_policy::LogMergePolicy;
|
||||
pub use self::merge_operation::MergeOperation;
|
||||
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::any::Any;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::collections::HashSet;
|
||||
use std::io::Write;
|
||||
@@ -23,9 +24,9 @@ use crate::indexer::{
|
||||
DefaultMergePolicy, MergeCandidate, MergeOperation, MergePolicy, SegmentEntry,
|
||||
SegmentSerializer,
|
||||
};
|
||||
use crate::{FutureResult, Opstamp};
|
||||
use crate::{FutureResult, Opstamp, TantivyError};
|
||||
|
||||
const NUM_MERGE_THREADS: usize = 4;
|
||||
const PANIC_CAUGHT: &str = "Panic caught in merge thread";
|
||||
|
||||
/// Save the index meta file.
|
||||
/// This operation is atomic:
|
||||
@@ -273,6 +274,7 @@ impl SegmentUpdater {
|
||||
index: Index,
|
||||
stamper: Stamper,
|
||||
delete_cursor: &DeleteCursor,
|
||||
num_merge_threads: usize,
|
||||
) -> crate::Result<SegmentUpdater> {
|
||||
let segments = index.searchable_segment_metas()?;
|
||||
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
||||
@@ -287,7 +289,16 @@ impl SegmentUpdater {
|
||||
})?;
|
||||
let merge_thread_pool = ThreadPoolBuilder::new()
|
||||
.thread_name(|i| format!("merge_thread_{i}"))
|
||||
.num_threads(NUM_MERGE_THREADS)
|
||||
.num_threads(num_merge_threads)
|
||||
.panic_handler(move |panic| {
|
||||
// We don't print the panic content itself,
|
||||
// it is already printed during the unwinding
|
||||
if let Some(message) = panic.downcast_ref::<&str>() {
|
||||
if *message != PANIC_CAUGHT {
|
||||
error!("uncaught merge panic")
|
||||
}
|
||||
}
|
||||
})
|
||||
.build()
|
||||
.map_err(|_| {
|
||||
crate::TantivyError::SystemError(
|
||||
@@ -507,11 +518,34 @@ impl SegmentUpdater {
|
||||
// Its lifetime is used to track how many merging thread are currently running,
|
||||
// as well as which segment is currently in merge and therefore should not be
|
||||
// candidate for another merge.
|
||||
match merge(
|
||||
&segment_updater.index,
|
||||
segment_entries,
|
||||
merge_operation.target_opstamp(),
|
||||
) {
|
||||
let merge_panic_res = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
|
||||
merge(
|
||||
&segment_updater.index,
|
||||
segment_entries,
|
||||
merge_operation.target_opstamp(),
|
||||
)
|
||||
}));
|
||||
let merge_res = match merge_panic_res {
|
||||
Ok(merge_res) => merge_res,
|
||||
Err(panic_err) => {
|
||||
let panic_str = if let Some(msg) = panic_err.downcast_ref::<&str>() {
|
||||
*msg
|
||||
} else if let Some(msg) = panic_err.downcast_ref::<String>() {
|
||||
msg.as_str()
|
||||
} else {
|
||||
"UNKNOWN"
|
||||
};
|
||||
let _send_result = merging_future_send.send(Err(TantivyError::SystemError(
|
||||
format!("Merge thread panicked: {panic_str}"),
|
||||
)));
|
||||
// Resume unwinding because we forced unwind safety with
|
||||
// `std::panic::AssertUnwindSafe` Use a specific message so
|
||||
// the panic_handler can double check that we properly caught the panic.
|
||||
let boxed_panic_message: Box<dyn Any + Send> = Box::new(PANIC_CAUGHT);
|
||||
std::panic::resume_unwind(boxed_panic_message);
|
||||
}
|
||||
};
|
||||
match merge_res {
|
||||
Ok(after_merge_segment_entry) => {
|
||||
let res = segment_updater.end_merge(merge_operation, after_merge_segment_entry);
|
||||
let _send_result = merging_future_send.send(res);
|
||||
|
||||
@@ -422,6 +422,7 @@ mod tests {
|
||||
use std::collections::BTreeMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use columnar::ColumnType;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use crate::collector::{Count, TopDocs};
|
||||
@@ -431,15 +432,15 @@ mod tests {
|
||||
use crate::query::{PhraseQuery, QueryParser};
|
||||
use crate::schema::{
|
||||
Document, IndexRecordOption, OwnedValue, Schema, TextFieldIndexing, TextOptions, Value,
|
||||
DATE_TIME_PRECISION_INDEXED, STORED, STRING, TEXT,
|
||||
DATE_TIME_PRECISION_INDEXED, FAST, STORED, STRING, TEXT,
|
||||
};
|
||||
use crate::store::{Compressor, StoreReader, StoreWriter};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::tokenizer::{PreTokenizedString, Token};
|
||||
use crate::{
|
||||
DateTime, Directory, DocAddress, DocSet, Index, IndexWriter, TantivyDocument, Term,
|
||||
TERMINATED,
|
||||
DateTime, Directory, DocAddress, DocSet, Index, IndexWriter, SegmentReader,
|
||||
TantivyDocument, Term, TERMINATED,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -841,6 +842,75 @@ mod tests {
|
||||
assert_eq!(searcher.search(&phrase_query, &Count).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_fast() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json_field = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let json_val: serde_json::Value = serde_json::from_str(
|
||||
r#"{
|
||||
"toto": "titi",
|
||||
"float": -0.2,
|
||||
"bool": true,
|
||||
"unsigned": 1,
|
||||
"signed": -2,
|
||||
"complexobject": {
|
||||
"field.with.dot": 1
|
||||
},
|
||||
"date": "1985-04-12T23:20:50.52Z",
|
||||
"my_arr": [2, 3, {"my_key": "two tokens"}, 4]
|
||||
}"#,
|
||||
)
|
||||
.unwrap();
|
||||
let doc = doc!(json_field=>json_val.clone());
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut writer = index.writer_for_tests().unwrap();
|
||||
writer.add_document(doc).unwrap();
|
||||
writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
|
||||
fn assert_type(reader: &SegmentReader, field: &str, typ: ColumnType) {
|
||||
let cols = reader.fast_fields().dynamic_column_handles(field).unwrap();
|
||||
assert_eq!(cols.len(), 1, "{}", field);
|
||||
assert_eq!(cols[0].column_type(), typ, "{}", field);
|
||||
}
|
||||
assert_type(segment_reader, "json.toto", ColumnType::Str);
|
||||
assert_type(segment_reader, "json.float", ColumnType::F64);
|
||||
assert_type(segment_reader, "json.bool", ColumnType::Bool);
|
||||
assert_type(segment_reader, "json.unsigned", ColumnType::I64);
|
||||
assert_type(segment_reader, "json.signed", ColumnType::I64);
|
||||
assert_type(
|
||||
segment_reader,
|
||||
"json.complexobject.field\\.with\\.dot",
|
||||
ColumnType::I64,
|
||||
);
|
||||
assert_type(segment_reader, "json.date", ColumnType::DateTime);
|
||||
assert_type(segment_reader, "json.my_arr", ColumnType::I64);
|
||||
assert_type(segment_reader, "json.my_arr.my_key", ColumnType::Str);
|
||||
|
||||
fn assert_empty(reader: &SegmentReader, field: &str) {
|
||||
let cols = reader.fast_fields().dynamic_column_handles(field).unwrap();
|
||||
assert_eq!(cols.len(), 0);
|
||||
}
|
||||
assert_empty(segment_reader, "unknown");
|
||||
assert_empty(segment_reader, "json");
|
||||
assert_empty(segment_reader, "json.toto.titi");
|
||||
|
||||
let sub_columns = segment_reader
|
||||
.fast_fields()
|
||||
.dynamic_subpath_column_handles("json")
|
||||
.unwrap();
|
||||
assert_eq!(sub_columns.len(), 9);
|
||||
|
||||
let subsub_columns = segment_reader
|
||||
.fast_fields()
|
||||
.dynamic_subpath_column_handles("json.complexobject")
|
||||
.unwrap();
|
||||
assert_eq!(subsub_columns.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_term_with_numeric_merge_panic_regression_bug_2283() {
|
||||
// https://github.com/quickwit-oss/tantivy/issues/2283
|
||||
|
||||
@@ -1,79 +1,19 @@
|
||||
use std::ops::Range;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::Opstamp;
|
||||
|
||||
#[cfg(not(target_arch = "arm"))]
|
||||
mod atomic_impl {
|
||||
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
use crate::Opstamp;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AtomicU64Wrapper(AtomicU64);
|
||||
|
||||
impl AtomicU64Wrapper {
|
||||
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
|
||||
AtomicU64Wrapper(AtomicU64::new(first_opstamp))
|
||||
}
|
||||
|
||||
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
|
||||
self.0.fetch_add(val, order)
|
||||
}
|
||||
|
||||
pub fn revert(&self, val: u64, order: Ordering) -> u64 {
|
||||
self.0.store(val, order);
|
||||
val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "arm")]
|
||||
mod atomic_impl {
|
||||
|
||||
/// Under other architecture, we rely on a mutex.
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use crate::Opstamp;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AtomicU64Wrapper(RwLock<u64>);
|
||||
|
||||
impl AtomicU64Wrapper {
|
||||
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
|
||||
AtomicU64Wrapper(RwLock::new(first_opstamp))
|
||||
}
|
||||
|
||||
pub fn fetch_add(&self, incr: u64, _order: Ordering) -> u64 {
|
||||
let mut lock = self.0.write().unwrap();
|
||||
let previous_val = *lock;
|
||||
*lock = previous_val + incr;
|
||||
previous_val
|
||||
}
|
||||
|
||||
pub fn revert(&self, val: u64, _order: Ordering) -> u64 {
|
||||
let mut lock = self.0.write().unwrap();
|
||||
*lock = val;
|
||||
val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use self::atomic_impl::AtomicU64Wrapper;
|
||||
|
||||
/// Stamper provides Opstamps, which is just an auto-increment id to label
|
||||
/// an operation.
|
||||
///
|
||||
/// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Stamper(Arc<AtomicU64Wrapper>);
|
||||
pub struct Stamper(Arc<AtomicU64>);
|
||||
|
||||
impl Stamper {
|
||||
pub fn new(first_opstamp: Opstamp) -> Stamper {
|
||||
Stamper(Arc::new(AtomicU64Wrapper::new(first_opstamp)))
|
||||
Stamper(Arc::new(AtomicU64::new(first_opstamp)))
|
||||
}
|
||||
|
||||
pub fn stamp(&self) -> Opstamp {
|
||||
@@ -92,7 +32,8 @@ impl Stamper {
|
||||
|
||||
/// Reverts the stamper to a given `Opstamp` value and returns it
|
||||
pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp {
|
||||
self.0.revert(to_opstamp, Ordering::SeqCst)
|
||||
self.0.store(to_opstamp, Ordering::SeqCst);
|
||||
to_opstamp
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ fn encode_bitwidth(bitwidth: u8, delta_1: bool) -> u8 {
|
||||
}
|
||||
|
||||
fn decode_bitwidth(raw_bitwidth: u8) -> (u8, bool) {
|
||||
let delta_1 = (raw_bitwidth >> 6 & 1) != 0;
|
||||
let delta_1 = ((raw_bitwidth >> 6) & 1) != 0;
|
||||
let bitwidth = raw_bitwidth & 0x3f;
|
||||
(bitwidth, delta_1)
|
||||
}
|
||||
|
||||
@@ -7,14 +7,32 @@ use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::index::SegmentReader;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::{EnableScoring, Explanation, Query, Scorer, Weight};
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, Score, TantivyError};
|
||||
|
||||
/// Query that matches all documents with a non-null value in the specified field.
|
||||
/// Query that matches all documents with a non-null value in the specified
|
||||
/// field.
|
||||
///
|
||||
/// When querying inside a JSON field, "exists" queries can be executed strictly
|
||||
/// on the field name or check all the subpaths. In that second case a document
|
||||
/// will be matched if a non-null value exists in any subpath. For example,
|
||||
/// assuming the following document where `myfield` is a JSON fast field:
|
||||
/// ```json
|
||||
/// {
|
||||
/// "myfield": {
|
||||
/// "mysubfield": "hello"
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
/// With `json_subpaths` enabled queries on either `myfield` or
|
||||
/// `myfield.mysubfield` will match the document. If it is set to false, only
|
||||
/// `myfield.mysubfield` will match it.
|
||||
///
|
||||
/// All of the matched documents get the score 1.0.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ExistsQuery {
|
||||
field_name: String,
|
||||
json_subpaths: bool,
|
||||
}
|
||||
|
||||
impl ExistsQuery {
|
||||
@@ -23,8 +41,28 @@ impl ExistsQuery {
|
||||
/// This query matches all documents with at least one non-null value in the specified field.
|
||||
/// This constructor never fails, but executing the search with this query will return an
|
||||
/// error if the specified field doesn't exists or is not a fast field.
|
||||
#[deprecated]
|
||||
pub fn new_exists_query(field: String) -> ExistsQuery {
|
||||
ExistsQuery { field_name: field }
|
||||
ExistsQuery {
|
||||
field_name: field,
|
||||
json_subpaths: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new `ExistQuery` from the given field.
|
||||
///
|
||||
/// This query matches all documents with at least one non-null value in the
|
||||
/// specified field. If `json_subpaths` is set to true, documents with
|
||||
/// non-null values in any JSON subpath will also be matched.
|
||||
///
|
||||
/// This constructor never fails, but executing the search with this query will
|
||||
/// return an error if the specified field doesn't exists or is not a fast
|
||||
/// field.
|
||||
pub fn new(field: String, json_subpaths: bool) -> Self {
|
||||
Self {
|
||||
field_name: field,
|
||||
json_subpaths,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +81,8 @@ impl Query for ExistsQuery {
|
||||
}
|
||||
Ok(Box::new(ExistsWeight {
|
||||
field_name: self.field_name.clone(),
|
||||
field_type: field_type.value_type(),
|
||||
json_subpaths: self.json_subpaths,
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -50,13 +90,20 @@ impl Query for ExistsQuery {
|
||||
/// Weight associated with the `ExistsQuery` query.
|
||||
pub struct ExistsWeight {
|
||||
field_name: String,
|
||||
field_type: Type,
|
||||
json_subpaths: bool,
|
||||
}
|
||||
|
||||
impl Weight for ExistsWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let fast_field_reader = reader.fast_fields();
|
||||
let dynamic_columns: crate::Result<Vec<DynamicColumn>> = fast_field_reader
|
||||
.dynamic_column_handles(&self.field_name)?
|
||||
let mut column_handles = fast_field_reader.dynamic_column_handles(&self.field_name)?;
|
||||
if self.field_type == Type::Json && self.json_subpaths {
|
||||
let mut sub_columns =
|
||||
fast_field_reader.dynamic_subpath_column_handles(&self.field_name)?;
|
||||
column_handles.append(&mut sub_columns);
|
||||
}
|
||||
let dynamic_columns: crate::Result<Vec<DynamicColumn>> = column_handles
|
||||
.into_iter()
|
||||
.map(|handle| handle.open().map_err(|io_error| io_error.into()))
|
||||
.collect();
|
||||
@@ -180,11 +227,12 @@ mod tests {
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
assert_eq!(count_existing_fields(&searcher, "all")?, 100);
|
||||
assert_eq!(count_existing_fields(&searcher, "odd")?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "even")?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "multi")?, 10);
|
||||
assert_eq!(count_existing_fields(&searcher, "never")?, 0);
|
||||
assert_eq!(count_existing_fields(&searcher, "all", false)?, 100);
|
||||
assert_eq!(count_existing_fields(&searcher, "odd", false)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "even", false)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "multi", false)?, 10);
|
||||
assert_eq!(count_existing_fields(&searcher, "multi", true)?, 10);
|
||||
assert_eq!(count_existing_fields(&searcher, "never", false)?, 0);
|
||||
|
||||
// exercise seek
|
||||
let query = BooleanQuery::intersection(vec![
|
||||
@@ -192,7 +240,7 @@ mod tests {
|
||||
Bound::Included(Term::from_field_u64(all_field, 50)),
|
||||
Bound::Unbounded,
|
||||
)),
|
||||
Box::new(ExistsQuery::new_exists_query("even".to_string())),
|
||||
Box::new(ExistsQuery::new("even".to_string(), false)),
|
||||
]);
|
||||
assert_eq!(searcher.search(&query, &Count)?, 25);
|
||||
|
||||
@@ -201,7 +249,7 @@ mod tests {
|
||||
Bound::Included(Term::from_field_u64(all_field, 0)),
|
||||
Bound::Included(Term::from_field_u64(all_field, 50)),
|
||||
)),
|
||||
Box::new(ExistsQuery::new_exists_query("odd".to_string())),
|
||||
Box::new(ExistsQuery::new("odd".to_string(), false)),
|
||||
]);
|
||||
assert_eq!(searcher.search(&query, &Count)?, 25);
|
||||
|
||||
@@ -230,22 +278,18 @@ mod tests {
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
assert_eq!(count_existing_fields(&searcher, "json.all")?, 100);
|
||||
assert_eq!(count_existing_fields(&searcher, "json.even")?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "json.odd")?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "json.all", false)?, 100);
|
||||
assert_eq!(count_existing_fields(&searcher, "json.even", false)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "json.even", true)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "json.odd", false)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "json", false)?, 0);
|
||||
assert_eq!(count_existing_fields(&searcher, "json", true)?, 100);
|
||||
|
||||
// Handling of non-existing fields:
|
||||
assert_eq!(count_existing_fields(&searcher, "json.absent")?, 0);
|
||||
assert_eq!(
|
||||
searcher
|
||||
.search(
|
||||
&ExistsQuery::new_exists_query("does_not_exists.absent".to_string()),
|
||||
&Count
|
||||
)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
"The field does not exist: 'does_not_exists.absent'"
|
||||
);
|
||||
assert_eq!(count_existing_fields(&searcher, "json.absent", false)?, 0);
|
||||
assert_eq!(count_existing_fields(&searcher, "json.absent", true)?, 0);
|
||||
assert_does_not_exist(&searcher, "does_not_exists.absent", true);
|
||||
assert_does_not_exist(&searcher, "does_not_exists.absent", false);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -284,12 +328,13 @@ mod tests {
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
assert_eq!(count_existing_fields(&searcher, "bool")?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "bytes")?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "date")?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "f64")?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "ip_addr")?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "facet")?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "bool", false)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "bool", true)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "bytes", false)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "date", false)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "f64", false)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "ip_addr", false)?, 50);
|
||||
assert_eq!(count_existing_fields(&searcher, "facet", false)?, 50);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -313,31 +358,33 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
searcher
|
||||
.search(
|
||||
&ExistsQuery::new_exists_query("not_fast".to_string()),
|
||||
&Count
|
||||
)
|
||||
.search(&ExistsQuery::new("not_fast".to_string(), false), &Count)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
"Schema error: 'Field not_fast is not a fast field.'"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
searcher
|
||||
.search(
|
||||
&ExistsQuery::new_exists_query("does_not_exists".to_string()),
|
||||
&Count
|
||||
)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
"The field does not exist: 'does_not_exists'"
|
||||
);
|
||||
assert_does_not_exist(&searcher, "does_not_exists", false);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn count_existing_fields(searcher: &Searcher, field: &str) -> crate::Result<usize> {
|
||||
let query = ExistsQuery::new_exists_query(field.to_string());
|
||||
fn count_existing_fields(
|
||||
searcher: &Searcher,
|
||||
field: &str,
|
||||
json_subpaths: bool,
|
||||
) -> crate::Result<usize> {
|
||||
let query = ExistsQuery::new(field.to_string(), json_subpaths);
|
||||
searcher.search(&query, &Count)
|
||||
}
|
||||
|
||||
fn assert_does_not_exist(searcher: &Searcher, field: &str, json_subpaths: bool) {
|
||||
assert_eq!(
|
||||
searcher
|
||||
.search(&ExistsQuery::new(field.to_string(), json_subpaths), &Count)
|
||||
.unwrap_err()
|
||||
.to_string(),
|
||||
format!("The field does not exist: '{}'", field)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ use crate::schema::field_type::ValueParsingError;
|
||||
use crate::schema::{Facet, Field, NamedFieldDocument, OwnedValue, Schema};
|
||||
use crate::tokenizer::PreTokenizedString;
|
||||
|
||||
#[repr(packed)]
|
||||
#[repr(C, packed)]
|
||||
#[derive(Debug, Clone)]
|
||||
/// A field value pair in the compact tantivy document
|
||||
struct FieldValueAddr {
|
||||
@@ -480,7 +480,7 @@ impl<'a> CompactDocValue<'a> {
|
||||
type Addr = u32;
|
||||
|
||||
#[derive(Clone, Copy, Default)]
|
||||
#[repr(packed)]
|
||||
#[repr(C, packed)]
|
||||
/// The value type and the address to its payload in the container.
|
||||
struct ValueAddr {
|
||||
type_id: ValueType,
|
||||
@@ -734,7 +734,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_json_value() {
|
||||
let json_str = r#"{
|
||||
let json_str = r#"{
|
||||
"toto": "titi",
|
||||
"float": -0.2,
|
||||
"bool": true,
|
||||
|
||||
@@ -93,6 +93,7 @@ impl TermInfoBlockMeta {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TermInfoStore {
|
||||
num_terms: usize,
|
||||
block_meta_bytes: OwnedBytes,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::io::{self, Write};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::{BinarySerializable, CountingWriter};
|
||||
use once_cell::sync::Lazy;
|
||||
@@ -113,8 +114,9 @@ static EMPTY_TERM_DICT_FILE: Lazy<FileSlice> = Lazy::new(|| {
|
||||
/// The `Fst` crate is used to associate terms to their
|
||||
/// respective `TermOrdinal`. The `TermInfoStore` then makes it
|
||||
/// possible to fetch the associated `TermInfo`.
|
||||
#[derive(Clone)]
|
||||
pub struct TermDictionary {
|
||||
fst_index: tantivy_fst::Map<OwnedBytes>,
|
||||
fst_index: Arc<tantivy_fst::Map<OwnedBytes>>,
|
||||
term_info_store: TermInfoStore,
|
||||
}
|
||||
|
||||
@@ -136,7 +138,7 @@ impl TermDictionary {
|
||||
let fst_index = open_fst_index(fst_file_slice)?;
|
||||
let term_info_store = TermInfoStore::open(values_file_slice)?;
|
||||
Ok(TermDictionary {
|
||||
fst_index,
|
||||
fst_index: Arc::new(fst_index),
|
||||
term_info_store,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -74,6 +74,7 @@ const CURRENT_TYPE: DictionaryType = DictionaryType::SSTable;
|
||||
|
||||
// TODO in the future this should become an enum of supported dictionaries
|
||||
/// A TermDictionary wrapping either an FST based dictionary or a SSTable based one.
|
||||
#[derive(Clone)]
|
||||
pub struct TermDictionary(InnerTermDict);
|
||||
|
||||
impl TermDictionary {
|
||||
|
||||
@@ -28,6 +28,7 @@ pub type TermDictionaryBuilder<W> = sstable::Writer<W, TermInfoValueWriter>;
|
||||
pub type TermStreamer<'a, A = AlwaysMatch> = sstable::Streamer<'a, TermSSTable, A>;
|
||||
|
||||
/// SSTable used to store TermInfo objects.
|
||||
#[derive(Clone)]
|
||||
pub struct TermSSTable;
|
||||
|
||||
pub type TermStreamerBuilder<'a, A = AlwaysMatch> = sstable::StreamerBuilder<'a, TermSSTable, A>;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-sstable"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
@@ -10,8 +10,10 @@ categories = ["database-implementations", "data-structures", "compression"]
|
||||
description = "sstables for tantivy"
|
||||
|
||||
[dependencies]
|
||||
common = {version= "0.7", path="../common", package="tantivy-common"}
|
||||
tantivy-bitpacker = { version= "0.6", path="../bitpacker" }
|
||||
common = {version= "0.9", path="../common", package="tantivy-common"}
|
||||
futures-util = "0.3.30"
|
||||
itertools = "0.14.0"
|
||||
tantivy-bitpacker = { version= "0.8", path="../bitpacker" }
|
||||
tantivy-fst = "0.5"
|
||||
# experimental gives us access to Decompressor::upper_bound
|
||||
zstd = { version = "0.13", features = ["experimental"] }
|
||||
|
||||
271
sstable/src/block_match_automaton.rs
Normal file
271
sstable/src/block_match_automaton.rs
Normal file
@@ -0,0 +1,271 @@
|
||||
use tantivy_fst::Automaton;
|
||||
|
||||
/// Returns whether a block can match an automaton based on its bounds.
|
||||
///
|
||||
/// start key is exclusive, and optional to account for the first block. end key is inclusive and
|
||||
/// mandatory.
|
||||
pub(crate) fn can_block_match_automaton(
|
||||
start_key_opt: Option<&[u8]>,
|
||||
end_key: &[u8],
|
||||
automaton: &impl Automaton,
|
||||
) -> bool {
|
||||
let start_key = if let Some(start_key) = start_key_opt {
|
||||
start_key
|
||||
} else {
|
||||
// if start_key_opt is None, we would allow an automaton matching the empty string to match
|
||||
if automaton.is_match(&automaton.start()) {
|
||||
return true;
|
||||
}
|
||||
&[]
|
||||
};
|
||||
can_block_match_automaton_with_start(start_key, end_key, automaton)
|
||||
}
|
||||
|
||||
// similar to can_block_match_automaton, ignoring the edge case of the initial block
|
||||
fn can_block_match_automaton_with_start(
|
||||
start_key: &[u8],
|
||||
end_key: &[u8],
|
||||
automaton: &impl Automaton,
|
||||
) -> bool {
|
||||
// notation: in loops, we use `kb` to denotate a key byte (a byte taken from the start/end key),
|
||||
// and `rb`, a range byte (usually all values higher than a `kb` when comparing with
|
||||
// start_key, or all values lower than a `kb` when comparing with end_key)
|
||||
|
||||
if start_key >= end_key {
|
||||
return false;
|
||||
}
|
||||
|
||||
let common_prefix_len = crate::common_prefix_len(start_key, end_key);
|
||||
|
||||
let mut base_state = automaton.start();
|
||||
for kb in &start_key[0..common_prefix_len] {
|
||||
base_state = automaton.accept(&base_state, *kb);
|
||||
}
|
||||
|
||||
// this is not required for correctness, but allows dodging more expensive checks
|
||||
if !automaton.can_match(&base_state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// we have 3 distinct case:
|
||||
// - keys are `abc` and `abcd` => we test for abc[\0-d].*
|
||||
// - keys are `abcd` and `abce` => we test for abc[d-e].*
|
||||
// - keys are `abcd` and `abc` => contradiction with start_key < end_key.
|
||||
//
|
||||
// ideally for (abc, abcde] we could test for abc([\0-c].*|d([\0-d].*|e)?)
|
||||
// but let's start simple (and correct), and tighten our bounds latter
|
||||
//
|
||||
// and for (abcde, abcfg] we could test for abc(d(e.+|[f-\xff].*)|e.*|f([\0-f].*|g)?)
|
||||
// abc (
|
||||
// d(e.+|[f-\xff].*) |
|
||||
// e.* |
|
||||
// f([\0-f].*|g)?
|
||||
// )
|
||||
//
|
||||
// these are all written as regex, but can be converted to operations we can do:
|
||||
// - [x-y] is a for c in x..=y
|
||||
// - .* is a can_match()
|
||||
// - .+ is a for c in 0..=255 { accept(c).can_match() }
|
||||
// - ? is a the thing before can_match(), or current state.is_match()
|
||||
// - | means test both side
|
||||
|
||||
// we have two cases, either start_key is a prefix of end_key (e.g. (abc, abcjp]),
|
||||
// or it is not (e.g. (abcdg, abcjp]). It is not possible however that end_key be a prefix of
|
||||
// start_key (or that both are equal) because we already handled start_key >= end_key.
|
||||
//
|
||||
// if we are in the first case, we want to visit the following states:
|
||||
// abc (
|
||||
// [\0-i].* |
|
||||
// j (
|
||||
// [\0-o].* |
|
||||
// p
|
||||
// )?
|
||||
// )
|
||||
// Everything after `abc` is handled by `match_range_end`
|
||||
//
|
||||
// if we are in the 2nd case, we want to visit the following states:
|
||||
// abc (
|
||||
// d(g.+|[h-\xff].*) | // this is handled by match_range_start
|
||||
//
|
||||
// [e-i].* | // this is handled here
|
||||
//
|
||||
// j ( // this is handled by match_range_end (but countrary to the other
|
||||
// [\0-o].* | // case, j is already consumed so to not check [\0-i].* )
|
||||
// p
|
||||
// )?
|
||||
// )
|
||||
|
||||
let Some(start_range) = start_key.get(common_prefix_len) else {
|
||||
return match_range_end(&end_key[common_prefix_len..], &automaton, base_state);
|
||||
};
|
||||
|
||||
let end_range = end_key[common_prefix_len];
|
||||
|
||||
// things starting with start_range were handled in match_range_start
|
||||
// this starting with end_range are handled bellow.
|
||||
// this can run for 0 iteration in cases such as (abc, abd]
|
||||
for rb in (start_range + 1)..end_range {
|
||||
let new_state = automaton.accept(&base_state, rb);
|
||||
if automaton.can_match(&new_state) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
let state_for_start = automaton.accept(&base_state, *start_range);
|
||||
if match_range_start(
|
||||
&start_key[common_prefix_len + 1..],
|
||||
&automaton,
|
||||
state_for_start,
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
let state_for_end = automaton.accept(&base_state, end_range);
|
||||
if automaton.is_match(&state_for_end) {
|
||||
return true;
|
||||
}
|
||||
match_range_end(&end_key[common_prefix_len + 1..], &automaton, state_for_end)
|
||||
}
|
||||
|
||||
fn match_range_start<S, A: Automaton<State = S>>(
|
||||
start_key: &[u8],
|
||||
automaton: &A,
|
||||
mut state: S,
|
||||
) -> bool {
|
||||
// case (abcdgj, abcpqr], `abcd` is already consumed, we need to handle:
|
||||
// - [h-\xff].*
|
||||
// - g[k-\xff].*
|
||||
// - gj.+ == gf[\0-\xff].*
|
||||
|
||||
for kb in start_key {
|
||||
// this is an optimisation, and is not needed for correctness
|
||||
if !automaton.can_match(&state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// does the [h-\xff].* part. we skip if kb==255 as [\{0100}-\xff] is an empty range, and
|
||||
// this would overflow in our u8 world
|
||||
if *kb < u8::MAX {
|
||||
for rb in (kb + 1)..=u8::MAX {
|
||||
let temp_state = automaton.accept(&state, rb);
|
||||
if automaton.can_match(&temp_state) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
// push g
|
||||
state = automaton.accept(&state, *kb);
|
||||
}
|
||||
|
||||
// this isn't required for correctness, but can save us from looping 256 below
|
||||
if !automaton.can_match(&state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// does the final `.+`, which is the same as `[\0-\xff].*`
|
||||
for rb in 0..=u8::MAX {
|
||||
let temp_state = automaton.accept(&state, rb);
|
||||
if automaton.can_match(&temp_state) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn match_range_end<S, A: Automaton<State = S>>(
|
||||
end_key: &[u8],
|
||||
automaton: &A,
|
||||
mut state: S,
|
||||
) -> bool {
|
||||
// for (abcdef, abcmps]. the prefix `abcm` has been consumed, `[d-l].*` was handled elsewhere,
|
||||
// we just need to handle
|
||||
// - [\0-o].*
|
||||
// - p
|
||||
// - p[\0-r].*
|
||||
// - ps
|
||||
for kb in end_key {
|
||||
// this is an optimisation, and is not needed for correctness
|
||||
if !automaton.can_match(&state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// does the `[\0-o].*`
|
||||
for rb in 0..*kb {
|
||||
let temp_state = automaton.accept(&state, rb);
|
||||
if automaton.can_match(&temp_state) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// push p
|
||||
state = automaton.accept(&state, *kb);
|
||||
// verify the `p` case
|
||||
if automaton.is_match(&state) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use proptest::prelude::*;
|
||||
use tantivy_fst::Automaton;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub(crate) struct EqBuffer(pub Vec<u8>);
|
||||
|
||||
impl Automaton for EqBuffer {
|
||||
type State = Option<usize>;
|
||||
|
||||
fn start(&self) -> Self::State {
|
||||
Some(0)
|
||||
}
|
||||
|
||||
fn is_match(&self, state: &Self::State) -> bool {
|
||||
*state == Some(self.0.len())
|
||||
}
|
||||
|
||||
fn accept(&self, state: &Self::State, byte: u8) -> Self::State {
|
||||
state
|
||||
.filter(|pos| self.0.get(*pos) == Some(&byte))
|
||||
.map(|pos| pos + 1)
|
||||
}
|
||||
|
||||
fn can_match(&self, state: &Self::State) -> bool {
|
||||
state.is_some()
|
||||
}
|
||||
|
||||
fn will_always_match(&self, _state: &Self::State) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_key_strategy() -> impl Strategy<Value = Vec<u8>> {
|
||||
// we only generate bytes in [0, 1, 2, 254, 255] to reduce the search space without
|
||||
// ignoring edge cases that might ocure with integer over/underflow
|
||||
proptest::collection::vec(prop_oneof![0u8..=2, 254u8..=255], 0..5)
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig {
|
||||
cases: 10000, .. ProptestConfig::default()
|
||||
})]
|
||||
|
||||
#[test]
|
||||
fn test_proptest_automaton_match_block(start in gen_key_strategy(), end in gen_key_strategy(), key in gen_key_strategy()) {
|
||||
let expected = start < key && end >= key;
|
||||
let automaton = EqBuffer(key);
|
||||
|
||||
assert_eq!(can_block_match_automaton(Some(&start), &end, &automaton), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proptest_automaton_match_first_block(end in gen_key_strategy(), key in gen_key_strategy()) {
|
||||
let expected = end >= key;
|
||||
let automaton = EqBuffer(key);
|
||||
assert_eq!(can_block_match_automaton(None, &end, &automaton), expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ use zstd::bulk::Decompressor;
|
||||
pub struct BlockReader {
|
||||
buffer: Vec<u8>,
|
||||
reader: OwnedBytes,
|
||||
next_readers: std::vec::IntoIter<OwnedBytes>,
|
||||
offset: usize,
|
||||
}
|
||||
|
||||
@@ -15,6 +16,18 @@ impl BlockReader {
|
||||
BlockReader {
|
||||
buffer: Vec::new(),
|
||||
reader,
|
||||
next_readers: Vec::new().into_iter(),
|
||||
offset: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_multiple_blocks(readers: Vec<OwnedBytes>) -> BlockReader {
|
||||
let mut next_readers = readers.into_iter();
|
||||
let reader = next_readers.next().unwrap_or_else(OwnedBytes::empty);
|
||||
BlockReader {
|
||||
buffer: Vec::new(),
|
||||
reader,
|
||||
next_readers,
|
||||
offset: 0,
|
||||
}
|
||||
}
|
||||
@@ -34,42 +47,52 @@ impl BlockReader {
|
||||
self.offset = 0;
|
||||
self.buffer.clear();
|
||||
|
||||
let block_len = match self.reader.len() {
|
||||
0 => return Ok(false),
|
||||
1..=3 => {
|
||||
loop {
|
||||
let block_len = match self.reader.len() {
|
||||
0 => {
|
||||
// we are out of data for this block. Check if we have another block after
|
||||
if let Some(new_reader) = self.next_readers.next() {
|
||||
self.reader = new_reader;
|
||||
continue;
|
||||
} else {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
1..=3 => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"failed to read block_len",
|
||||
))
|
||||
}
|
||||
_ => self.reader.read_u32() as usize,
|
||||
};
|
||||
if block_len <= 1 {
|
||||
return Ok(false);
|
||||
}
|
||||
let compress = self.reader.read_u8();
|
||||
let block_len = block_len - 1;
|
||||
|
||||
if self.reader.len() < block_len {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"failed to read block_len",
|
||||
))
|
||||
"failed to read block content",
|
||||
));
|
||||
}
|
||||
_ => self.reader.read_u32() as usize,
|
||||
};
|
||||
if block_len <= 1 {
|
||||
return Ok(false);
|
||||
}
|
||||
let compress = self.reader.read_u8();
|
||||
let block_len = block_len - 1;
|
||||
if compress == 1 {
|
||||
let required_capacity =
|
||||
Decompressor::upper_bound(&self.reader[..block_len]).unwrap_or(1024 * 1024);
|
||||
self.buffer.reserve(required_capacity);
|
||||
Decompressor::new()?
|
||||
.decompress_to_buffer(&self.reader[..block_len], &mut self.buffer)?;
|
||||
|
||||
if self.reader.len() < block_len {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"failed to read block content",
|
||||
));
|
||||
}
|
||||
if compress == 1 {
|
||||
let required_capacity =
|
||||
Decompressor::upper_bound(&self.reader[..block_len]).unwrap_or(1024 * 1024);
|
||||
self.buffer.reserve(required_capacity);
|
||||
Decompressor::new()?
|
||||
.decompress_to_buffer(&self.reader[..block_len], &mut self.buffer)?;
|
||||
self.reader.advance(block_len);
|
||||
} else {
|
||||
self.buffer.resize(block_len, 0u8);
|
||||
self.reader.read_exact(&mut self.buffer[..])?;
|
||||
}
|
||||
|
||||
self.reader.advance(block_len);
|
||||
} else {
|
||||
self.buffer.resize(block_len, 0u8);
|
||||
self.reader.read_exact(&mut self.buffer[..])?;
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
||||
@@ -89,7 +89,7 @@ where
|
||||
|
||||
fn encode_keep_add(&mut self, keep_len: usize, add_len: usize) {
|
||||
if keep_len < FOUR_BIT_LIMITS && add_len < FOUR_BIT_LIMITS {
|
||||
let b = (keep_len | add_len << 4) as u8;
|
||||
let b = (keep_len | (add_len << 4)) as u8;
|
||||
self.block.extend_from_slice(&[b])
|
||||
} else {
|
||||
let mut buf = [VINT_MODE; 20];
|
||||
@@ -143,6 +143,16 @@ where TValueReader: value::ValueReader
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_multiple_blocks(reader: Vec<OwnedBytes>) -> Self {
|
||||
DeltaReader {
|
||||
idx: 0,
|
||||
common_prefix_len: 0,
|
||||
suffix_range: 0..0,
|
||||
value_reader: TValueReader::default(),
|
||||
block_reader: BlockReader::from_multiple_blocks(reader),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn empty() -> Self {
|
||||
DeltaReader::new(OwnedBytes::empty())
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(clippy::needless_borrows_for_generic_args)]
|
||||
|
||||
use std::cmp::Ordering;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
@@ -7,6 +9,8 @@ use std::sync::Arc;
|
||||
use common::bounds::{transform_bound_inner_res, TransformBound};
|
||||
use common::file_slice::FileSlice;
|
||||
use common::{BinarySerializable, OwnedBytes};
|
||||
use futures_util::{stream, StreamExt, TryStreamExt};
|
||||
use itertools::Itertools;
|
||||
use tantivy_fst::automaton::AlwaysMatch;
|
||||
use tantivy_fst::Automaton;
|
||||
|
||||
@@ -98,20 +102,52 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
|
||||
&self,
|
||||
key_range: impl RangeBounds<[u8]>,
|
||||
limit: Option<u64>,
|
||||
automaton: &impl Automaton,
|
||||
merge_holes_under_bytes: usize,
|
||||
) -> io::Result<DeltaReader<TSSTable::ValueReader>> {
|
||||
let slice = self.file_slice_for_range(key_range, limit);
|
||||
let data = slice.read_bytes_async().await?;
|
||||
Ok(TSSTable::delta_reader(data))
|
||||
let match_all = automaton.will_always_match(&automaton.start());
|
||||
if match_all {
|
||||
let slice = self.file_slice_for_range(key_range, limit);
|
||||
let data = slice.read_bytes_async().await?;
|
||||
Ok(TSSTable::delta_reader(data))
|
||||
} else {
|
||||
let blocks = stream::iter(self.get_block_iterator_for_range_and_automaton(
|
||||
key_range,
|
||||
automaton,
|
||||
merge_holes_under_bytes,
|
||||
));
|
||||
let data = blocks
|
||||
.map(|block_addr| {
|
||||
self.sstable_slice
|
||||
.read_bytes_slice_async(block_addr.byte_range)
|
||||
})
|
||||
.buffered(5)
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
Ok(DeltaReader::from_multiple_blocks(data))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn sstable_delta_reader_for_key_range(
|
||||
&self,
|
||||
key_range: impl RangeBounds<[u8]>,
|
||||
limit: Option<u64>,
|
||||
automaton: &impl Automaton,
|
||||
) -> io::Result<DeltaReader<TSSTable::ValueReader>> {
|
||||
let slice = self.file_slice_for_range(key_range, limit);
|
||||
let data = slice.read_bytes()?;
|
||||
Ok(TSSTable::delta_reader(data))
|
||||
let match_all = automaton.will_always_match(&automaton.start());
|
||||
if match_all {
|
||||
let slice = self.file_slice_for_range(key_range, limit);
|
||||
let data = slice.read_bytes()?;
|
||||
Ok(TSSTable::delta_reader(data))
|
||||
} else {
|
||||
// if operations are sync, we assume latency is almost null, and there is no point in
|
||||
// merging accross holes
|
||||
let blocks = self.get_block_iterator_for_range_and_automaton(key_range, automaton, 0);
|
||||
let data = blocks
|
||||
.map(|block_addr| self.sstable_slice.read_bytes_slice(block_addr.byte_range))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
Ok(DeltaReader::from_multiple_blocks(data))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn sstable_delta_reader_block(
|
||||
@@ -204,6 +240,42 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
|
||||
self.sstable_slice.slice((start_bound, end_bound))
|
||||
}
|
||||
|
||||
fn get_block_iterator_for_range_and_automaton<'a>(
|
||||
&'a self,
|
||||
key_range: impl RangeBounds<[u8]>,
|
||||
automaton: &'a impl Automaton,
|
||||
merge_holes_under_bytes: usize,
|
||||
) -> impl Iterator<Item = BlockAddr> + 'a {
|
||||
let lower_bound = match key_range.start_bound() {
|
||||
Bound::Included(key) | Bound::Excluded(key) => {
|
||||
self.sstable_index.locate_with_key(key).unwrap_or(u64::MAX)
|
||||
}
|
||||
Bound::Unbounded => 0,
|
||||
};
|
||||
|
||||
let upper_bound = match key_range.end_bound() {
|
||||
Bound::Included(key) | Bound::Excluded(key) => {
|
||||
self.sstable_index.locate_with_key(key).unwrap_or(u64::MAX)
|
||||
}
|
||||
Bound::Unbounded => u64::MAX,
|
||||
};
|
||||
let block_range = lower_bound..=upper_bound;
|
||||
self.sstable_index
|
||||
.get_block_for_automaton(automaton)
|
||||
.filter(move |(block_id, _)| block_range.contains(block_id))
|
||||
.map(|(_, block_addr)| block_addr)
|
||||
.coalesce(move |first, second| {
|
||||
if first.byte_range.end + merge_holes_under_bytes >= second.byte_range.start {
|
||||
Ok(BlockAddr {
|
||||
first_ordinal: first.first_ordinal,
|
||||
byte_range: first.byte_range.start..second.byte_range.end,
|
||||
})
|
||||
} else {
|
||||
Err((first, second))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Opens a `TermDictionary`.
|
||||
pub fn open(term_dictionary_file: FileSlice) -> io::Result<Self> {
|
||||
let (main_slice, footer_len_slice) = term_dictionary_file.split_from_end(20);
|
||||
@@ -521,6 +593,25 @@ impl<TSSTable: SSTable> Dictionary<TSSTable> {
|
||||
StreamerBuilder::new(self, AlwaysMatch)
|
||||
}
|
||||
|
||||
/// Returns a range builder filtered with a prefix.
|
||||
pub fn prefix_range<K: AsRef<[u8]>>(&self, prefix: K) -> StreamerBuilder<TSSTable> {
|
||||
let lower_bound = prefix.as_ref();
|
||||
let mut upper_bound = lower_bound.to_vec();
|
||||
for idx in (0..upper_bound.len()).rev() {
|
||||
if upper_bound[idx] == 255 {
|
||||
upper_bound.pop();
|
||||
} else {
|
||||
upper_bound[idx] += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
let mut builder = self.range().ge(lower_bound);
|
||||
if !upper_bound.is_empty() {
|
||||
builder = builder.lt(upper_bound);
|
||||
}
|
||||
builder
|
||||
}
|
||||
|
||||
/// A stream of all the sorted terms.
|
||||
pub fn stream(&self) -> io::Result<Streamer<TSSTable>> {
|
||||
self.range().into_stream()
|
||||
@@ -928,4 +1019,62 @@ mod tests {
|
||||
}
|
||||
assert!(!stream.advance());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prefix() {
|
||||
let (dic, _slice) = make_test_sstable();
|
||||
{
|
||||
let mut stream = dic.prefix_range("1").into_stream().unwrap();
|
||||
for i in 0x10000..0x20000 {
|
||||
assert!(stream.advance());
|
||||
assert_eq!(stream.term_ord(), i);
|
||||
assert_eq!(stream.value(), &i);
|
||||
assert_eq!(stream.key(), format!("{i:05X}").into_bytes());
|
||||
}
|
||||
assert!(!stream.advance());
|
||||
}
|
||||
{
|
||||
let mut stream = dic.prefix_range("").into_stream().unwrap();
|
||||
for i in 0..0x3ffff {
|
||||
assert!(stream.advance(), "failed at {i:05X}");
|
||||
assert_eq!(stream.term_ord(), i);
|
||||
assert_eq!(stream.value(), &i);
|
||||
assert_eq!(stream.key(), format!("{i:05X}").into_bytes());
|
||||
}
|
||||
assert!(!stream.advance());
|
||||
}
|
||||
{
|
||||
let mut stream = dic.prefix_range("0FF").into_stream().unwrap();
|
||||
for i in 0x0ff00..=0x0ffff {
|
||||
assert!(stream.advance(), "failed at {i:05X}");
|
||||
assert_eq!(stream.term_ord(), i);
|
||||
assert_eq!(stream.value(), &i);
|
||||
assert_eq!(stream.key(), format!("{i:05X}").into_bytes());
|
||||
}
|
||||
assert!(!stream.advance());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prefix_edge() {
|
||||
let dict = {
|
||||
let mut builder = Dictionary::<MonotonicU64SSTable>::builder(Vec::new()).unwrap();
|
||||
builder.insert(&[0, 254], &0).unwrap();
|
||||
builder.insert(&[0, 255], &1).unwrap();
|
||||
builder.insert(&[0, 255, 12], &2).unwrap();
|
||||
builder.insert(&[1], &2).unwrap();
|
||||
builder.insert(&[1, 0], &2).unwrap();
|
||||
let table = builder.finish().unwrap();
|
||||
let table = Arc::new(PermissionedHandle::new(table));
|
||||
let slice = common::file_slice::FileSlice::new(table.clone());
|
||||
Dictionary::<MonotonicU64SSTable>::open(slice).unwrap()
|
||||
};
|
||||
|
||||
let mut stream = dict.prefix_range(&[0, 255]).into_stream().unwrap();
|
||||
assert!(stream.advance());
|
||||
assert_eq!(stream.key(), &[0, 255]);
|
||||
assert!(stream.advance());
|
||||
assert_eq!(stream.key(), &[0, 255, 12]);
|
||||
assert!(!stream.advance());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::ops::Range;
|
||||
|
||||
use merge::ValueMerger;
|
||||
|
||||
mod block_match_automaton;
|
||||
mod delta;
|
||||
mod dictionary;
|
||||
pub mod merge;
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
use common::OwnedBytes;
|
||||
use tantivy_fst::Automaton;
|
||||
|
||||
use crate::block_match_automaton::can_block_match_automaton;
|
||||
use crate::{BlockAddr, SSTable, SSTableDataCorruption, TermOrdinal};
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct SSTableIndex {
|
||||
blocks: Vec<BlockMeta>,
|
||||
pub(crate) blocks: Vec<BlockMeta>,
|
||||
}
|
||||
|
||||
impl SSTableIndex {
|
||||
@@ -74,6 +76,31 @@ impl SSTableIndex {
|
||||
// locate_with_ord always returns an index within range
|
||||
self.get_block(self.locate_with_ord(ord)).unwrap()
|
||||
}
|
||||
|
||||
pub(crate) fn get_block_for_automaton<'a>(
|
||||
&'a self,
|
||||
automaton: &'a impl Automaton,
|
||||
) -> impl Iterator<Item = (u64, BlockAddr)> + 'a {
|
||||
std::iter::once((None, &self.blocks[0]))
|
||||
.chain(self.blocks.windows(2).map(|window| {
|
||||
let [prev, curr] = window else {
|
||||
unreachable!();
|
||||
};
|
||||
(Some(&*prev.last_key_or_greater), curr)
|
||||
}))
|
||||
.enumerate()
|
||||
.filter_map(move |(pos, (prev_key, current_block))| {
|
||||
if can_block_match_automaton(
|
||||
prev_key,
|
||||
¤t_block.last_key_or_greater,
|
||||
automaton,
|
||||
) {
|
||||
Some((pos as u64, current_block.block_addr.clone()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -99,3 +126,106 @@ impl SSTable for IndexSSTable {
|
||||
|
||||
type ValueWriter = crate::value::index::IndexValueWriter;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::block_match_automaton::tests::EqBuffer;
|
||||
|
||||
#[test]
|
||||
fn test_get_block_for_automaton() {
|
||||
let sstable = SSTableIndex {
|
||||
blocks: vec![
|
||||
BlockMeta {
|
||||
last_key_or_greater: vec![0, 1, 2],
|
||||
block_addr: BlockAddr {
|
||||
first_ordinal: 0,
|
||||
byte_range: 0..10,
|
||||
},
|
||||
},
|
||||
BlockMeta {
|
||||
last_key_or_greater: vec![0, 2, 2],
|
||||
block_addr: BlockAddr {
|
||||
first_ordinal: 5,
|
||||
byte_range: 10..20,
|
||||
},
|
||||
},
|
||||
BlockMeta {
|
||||
last_key_or_greater: vec![0, 3, 2],
|
||||
block_addr: BlockAddr {
|
||||
first_ordinal: 10,
|
||||
byte_range: 20..30,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let res = sstable
|
||||
.get_block_for_automaton(&EqBuffer(vec![0, 1, 1]))
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![(
|
||||
0,
|
||||
BlockAddr {
|
||||
first_ordinal: 0,
|
||||
byte_range: 0..10
|
||||
}
|
||||
)]
|
||||
);
|
||||
let res = sstable
|
||||
.get_block_for_automaton(&EqBuffer(vec![0, 2, 1]))
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![(
|
||||
1,
|
||||
BlockAddr {
|
||||
first_ordinal: 5,
|
||||
byte_range: 10..20
|
||||
}
|
||||
)]
|
||||
);
|
||||
let res = sstable
|
||||
.get_block_for_automaton(&EqBuffer(vec![0, 3, 1]))
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![(
|
||||
2,
|
||||
BlockAddr {
|
||||
first_ordinal: 10,
|
||||
byte_range: 20..30
|
||||
}
|
||||
)]
|
||||
);
|
||||
let res = sstable
|
||||
.get_block_for_automaton(&EqBuffer(vec![0, 4, 1]))
|
||||
.collect::<Vec<_>>();
|
||||
assert!(res.is_empty());
|
||||
|
||||
let complex_automaton = EqBuffer(vec![0, 1, 1]).union(EqBuffer(vec![0, 3, 1]));
|
||||
let res = sstable
|
||||
.get_block_for_automaton(&complex_automaton)
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![
|
||||
(
|
||||
0,
|
||||
BlockAddr {
|
||||
first_ordinal: 0,
|
||||
byte_range: 0..10
|
||||
}
|
||||
),
|
||||
(
|
||||
2,
|
||||
BlockAddr {
|
||||
first_ordinal: 10,
|
||||
byte_range: 20..30
|
||||
}
|
||||
)
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,9 @@ use std::sync::Arc;
|
||||
use common::{BinarySerializable, FixedSize, OwnedBytes};
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker};
|
||||
use tantivy_fst::raw::Fst;
|
||||
use tantivy_fst::{IntoStreamer, Map, MapBuilder, Streamer};
|
||||
use tantivy_fst::{Automaton, IntoStreamer, Map, MapBuilder, Streamer};
|
||||
|
||||
use crate::block_match_automaton::can_block_match_automaton;
|
||||
use crate::{common_prefix_len, SSTableDataCorruption, TermOrdinal};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -64,6 +65,41 @@ impl SSTableIndex {
|
||||
SSTableIndex::V3Empty(v3_empty) => v3_empty.get_block_with_ord(ord),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_block_for_automaton<'a>(
|
||||
&'a self,
|
||||
automaton: &'a impl Automaton,
|
||||
) -> impl Iterator<Item = (u64, BlockAddr)> + 'a {
|
||||
match self {
|
||||
SSTableIndex::V2(v2_index) => {
|
||||
BlockIter::V2(v2_index.get_block_for_automaton(automaton))
|
||||
}
|
||||
SSTableIndex::V3(v3_index) => {
|
||||
BlockIter::V3(v3_index.get_block_for_automaton(automaton))
|
||||
}
|
||||
SSTableIndex::V3Empty(v3_empty) => {
|
||||
BlockIter::V3Empty(std::iter::once((0, v3_empty.block_addr.clone())))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum BlockIter<V2, V3, T> {
|
||||
V2(V2),
|
||||
V3(V3),
|
||||
V3Empty(std::iter::Once<T>),
|
||||
}
|
||||
|
||||
impl<V2: Iterator<Item = T>, V3: Iterator<Item = T>, T> Iterator for BlockIter<V2, V3, T> {
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match self {
|
||||
BlockIter::V2(v2) => v2.next(),
|
||||
BlockIter::V3(v3) => v3.next(),
|
||||
BlockIter::V3Empty(once) => once.next(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -123,6 +159,59 @@ impl SSTableIndexV3 {
|
||||
pub(crate) fn get_block_with_ord(&self, ord: TermOrdinal) -> BlockAddr {
|
||||
self.block_addr_store.binary_search_ord(ord).1
|
||||
}
|
||||
|
||||
pub(crate) fn get_block_for_automaton<'a>(
|
||||
&'a self,
|
||||
automaton: &'a impl Automaton,
|
||||
) -> impl Iterator<Item = (u64, BlockAddr)> + 'a {
|
||||
// this is more complicated than other index formats: we don't have a ready made list of
|
||||
// blocks, and instead need to stream-decode the sstable.
|
||||
|
||||
GetBlockForAutomaton {
|
||||
streamer: self.fst_index.stream(),
|
||||
block_addr_store: &self.block_addr_store,
|
||||
prev_key: None,
|
||||
automaton,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO we iterate over the entire Map to find matching blocks,
|
||||
// we could manually iterate on the underlying Fst and skip whole branches if our Automaton says
|
||||
// cannot match. this isn't as bad as it sounds given the fst is a lot smaller than the rest of the
|
||||
// sstable.
|
||||
// To do that, we can't use tantivy_fst's Stream with an automaton, as we need to know 2 consecutive
|
||||
// fst keys to form a proper opinion on whether this is a match, which we wan't translate into a
|
||||
// single automaton
|
||||
struct GetBlockForAutomaton<'a, A: Automaton> {
|
||||
streamer: tantivy_fst::map::Stream<'a>,
|
||||
block_addr_store: &'a BlockAddrStore,
|
||||
prev_key: Option<Vec<u8>>,
|
||||
automaton: &'a A,
|
||||
}
|
||||
|
||||
impl<A: Automaton> Iterator for GetBlockForAutomaton<'_, A> {
|
||||
type Item = (u64, BlockAddr);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
while let Some((new_key, block_id)) = self.streamer.next() {
|
||||
if let Some(prev_key) = self.prev_key.as_mut() {
|
||||
if can_block_match_automaton(Some(prev_key), new_key, self.automaton) {
|
||||
prev_key.clear();
|
||||
prev_key.extend_from_slice(new_key);
|
||||
return Some((block_id, self.block_addr_store.get(block_id).unwrap()));
|
||||
}
|
||||
prev_key.clear();
|
||||
prev_key.extend_from_slice(new_key);
|
||||
} else {
|
||||
self.prev_key = Some(new_key.to_owned());
|
||||
if can_block_match_automaton(None, new_key, self.automaton) {
|
||||
return Some((block_id, self.block_addr_store.get(block_id).unwrap()));
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -734,7 +823,8 @@ fn find_best_slope(elements: impl Iterator<Item = (usize, u64)> + Clone) -> (u32
|
||||
mod tests {
|
||||
use common::OwnedBytes;
|
||||
|
||||
use super::{BlockAddr, SSTableIndexBuilder, SSTableIndexV3};
|
||||
use super::*;
|
||||
use crate::block_match_automaton::tests::EqBuffer;
|
||||
use crate::SSTableDataCorruption;
|
||||
|
||||
#[test]
|
||||
@@ -823,4 +913,108 @@ mod tests {
|
||||
(12345, 1)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_block_for_automaton() {
|
||||
let sstable_index_builder = SSTableIndexBuilder {
|
||||
blocks: vec![
|
||||
BlockMeta {
|
||||
last_key_or_greater: vec![0, 1, 2],
|
||||
block_addr: BlockAddr {
|
||||
first_ordinal: 0,
|
||||
byte_range: 0..10,
|
||||
},
|
||||
},
|
||||
BlockMeta {
|
||||
last_key_or_greater: vec![0, 2, 2],
|
||||
block_addr: BlockAddr {
|
||||
first_ordinal: 5,
|
||||
byte_range: 10..20,
|
||||
},
|
||||
},
|
||||
BlockMeta {
|
||||
last_key_or_greater: vec![0, 3, 2],
|
||||
block_addr: BlockAddr {
|
||||
first_ordinal: 10,
|
||||
byte_range: 20..30,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let mut sstable_index_bytes = Vec::new();
|
||||
let fst_len = sstable_index_builder
|
||||
.serialize(&mut sstable_index_bytes)
|
||||
.unwrap();
|
||||
|
||||
let sstable = SSTableIndexV3::load(OwnedBytes::new(sstable_index_bytes), fst_len).unwrap();
|
||||
|
||||
let res = sstable
|
||||
.get_block_for_automaton(&EqBuffer(vec![0, 1, 1]))
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![(
|
||||
0,
|
||||
BlockAddr {
|
||||
first_ordinal: 0,
|
||||
byte_range: 0..10
|
||||
}
|
||||
)]
|
||||
);
|
||||
let res = sstable
|
||||
.get_block_for_automaton(&EqBuffer(vec![0, 2, 1]))
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![(
|
||||
1,
|
||||
BlockAddr {
|
||||
first_ordinal: 5,
|
||||
byte_range: 10..20
|
||||
}
|
||||
)]
|
||||
);
|
||||
let res = sstable
|
||||
.get_block_for_automaton(&EqBuffer(vec![0, 3, 1]))
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![(
|
||||
2,
|
||||
BlockAddr {
|
||||
first_ordinal: 10,
|
||||
byte_range: 20..30
|
||||
}
|
||||
)]
|
||||
);
|
||||
let res = sstable
|
||||
.get_block_for_automaton(&EqBuffer(vec![0, 4, 1]))
|
||||
.collect::<Vec<_>>();
|
||||
assert!(res.is_empty());
|
||||
|
||||
let complex_automaton = EqBuffer(vec![0, 1, 1]).union(EqBuffer(vec![0, 3, 1]));
|
||||
let res = sstable
|
||||
.get_block_for_automaton(&complex_automaton)
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(
|
||||
res,
|
||||
vec![
|
||||
(
|
||||
0,
|
||||
BlockAddr {
|
||||
first_ordinal: 0,
|
||||
byte_range: 0..10
|
||||
}
|
||||
),
|
||||
(
|
||||
2,
|
||||
BlockAddr {
|
||||
first_ordinal: 10,
|
||||
byte_range: 20..30
|
||||
}
|
||||
)
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,16 +86,24 @@ where
|
||||
bound_as_byte_slice(&self.upper),
|
||||
);
|
||||
self.term_dict
|
||||
.sstable_delta_reader_for_key_range(key_range, self.limit)
|
||||
.sstable_delta_reader_for_key_range(key_range, self.limit, &self.automaton)
|
||||
}
|
||||
|
||||
async fn delta_reader_async(&self) -> io::Result<DeltaReader<TSSTable::ValueReader>> {
|
||||
async fn delta_reader_async(
|
||||
&self,
|
||||
merge_holes_under_bytes: usize,
|
||||
) -> io::Result<DeltaReader<TSSTable::ValueReader>> {
|
||||
let key_range = (
|
||||
bound_as_byte_slice(&self.lower),
|
||||
bound_as_byte_slice(&self.upper),
|
||||
);
|
||||
self.term_dict
|
||||
.sstable_delta_reader_for_key_range_async(key_range, self.limit)
|
||||
.sstable_delta_reader_for_key_range_async(
|
||||
key_range,
|
||||
self.limit,
|
||||
&self.automaton,
|
||||
merge_holes_under_bytes,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -130,7 +138,16 @@ where
|
||||
|
||||
/// See `into_stream(..)`
|
||||
pub async fn into_stream_async(self) -> io::Result<Streamer<'a, TSSTable, A>> {
|
||||
let delta_reader = self.delta_reader_async().await?;
|
||||
self.into_stream_async_merging_holes(0).await
|
||||
}
|
||||
|
||||
/// Same as `into_stream_async`, but tries to issue a single io operation when requesting
|
||||
/// blocks that are not consecutive, but also less than `merge_holes_under_bytes` bytes appart.
|
||||
pub async fn into_stream_async_merging_holes(
|
||||
self,
|
||||
merge_holes_under_bytes: usize,
|
||||
) -> io::Result<Streamer<'a, TSSTable, A>> {
|
||||
let delta_reader = self.delta_reader_async(merge_holes_under_bytes).await?;
|
||||
self.into_stream_given_delta_reader(delta_reader)
|
||||
}
|
||||
|
||||
@@ -327,4 +344,7 @@ mod tests {
|
||||
assert!(!term_streamer.advance());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO add test for sparse search with a block of poison (starts with 0xffffffff) => such a
|
||||
// block instantly causes an unexpected EOF error
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-stacker"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
@@ -9,7 +9,7 @@ description = "term hashmap used for indexing"
|
||||
|
||||
[dependencies]
|
||||
murmurhash32 = "0.3"
|
||||
common = { version = "0.7", path = "../common/", package = "tantivy-common" }
|
||||
common = { version = "0.9", path = "../common/", package = "tantivy-common" }
|
||||
ahash = { version = "0.8.11", default-features = false, optional = true }
|
||||
rand_distr = "0.4.3"
|
||||
|
||||
@@ -26,7 +26,7 @@ path = "example/hashmap.rs"
|
||||
[dev-dependencies]
|
||||
rand = "0.8.5"
|
||||
zipf = "7.0.0"
|
||||
rustc-hash = "1.1.0"
|
||||
rustc-hash = "2.1.0"
|
||||
proptest = "1.2.0"
|
||||
binggan = { version = "0.14.0" }
|
||||
|
||||
|
||||
@@ -160,7 +160,7 @@ mod tests {
|
||||
{
|
||||
let mut buffer = Vec::new();
|
||||
stack.read_to_end(&arena, &mut buffer);
|
||||
assert_eq!(&buffer[..], &[]);
|
||||
assert_eq!(&buffer[..], &[] as &[u8]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ impl Addr {
|
||||
|
||||
#[inline]
|
||||
fn new(page_id: usize, local_addr: usize) -> Addr {
|
||||
Addr((page_id << NUM_BITS_PAGE_ADDR | local_addr) as u32)
|
||||
Addr(((page_id << NUM_BITS_PAGE_ADDR) | local_addr) as u32)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-tokenizer-api"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
description = "Tokenizer API of tantivy"
|
||||
|
||||
Reference in New Issue
Block a user