mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-02 07:22:53 +00:00
Compare commits
127 Commits
0.18
...
ip_fastfie
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c62ddb61b7 | ||
|
|
ed85ba62b3 | ||
|
|
4b7ed27595 | ||
|
|
66ccba2878 | ||
|
|
c56f4572f4 | ||
|
|
399b137617 | ||
|
|
f3efb41d4e | ||
|
|
20a09282a1 | ||
|
|
1107400ae0 | ||
|
|
391f881fa1 | ||
|
|
eec908e962 | ||
|
|
4a1b251a08 | ||
|
|
f4d7621370 | ||
|
|
d4b2b7de8b | ||
|
|
d5ee4edf25 | ||
|
|
fcc7bd7024 | ||
|
|
ce8d6b259a | ||
|
|
099e626156 | ||
|
|
6a9d09cf7a | ||
|
|
704d0a8d8b | ||
|
|
195309a557 | ||
|
|
da0f78e06c | ||
|
|
9b6b60cc2b | ||
|
|
6444516a82 | ||
|
|
a9b0d1a0ab | ||
|
|
2b333ca635 | ||
|
|
80a1418284 | ||
|
|
5ab5f070ed | ||
|
|
d122f2c74e | ||
|
|
5b564916f0 | ||
|
|
06fd8684b7 | ||
|
|
931bab8010 | ||
|
|
8dac30e6d1 | ||
|
|
2e0a7d072f | ||
|
|
af84e74284 | ||
|
|
fff1a03842 | ||
|
|
90e296f2d0 | ||
|
|
5f966d747b | ||
|
|
d24f31f965 | ||
|
|
f26b686a1c | ||
|
|
775e936f7d | ||
|
|
7e032a9efd | ||
|
|
23fe73a6c0 | ||
|
|
a4be239d38 | ||
|
|
2406d9278b | ||
|
|
6c2d9737f1 | ||
|
|
a5688572a5 | ||
|
|
431b5a091e | ||
|
|
2c17271cd9 | ||
|
|
5750224d4c | ||
|
|
02691f2445 | ||
|
|
e31e78f39f | ||
|
|
9db2f0e82b | ||
|
|
2ed5cc873d | ||
|
|
d278417300 | ||
|
|
d89a8dd118 | ||
|
|
1bd44a5f61 | ||
|
|
d750ced813 | ||
|
|
fbc469e5df | ||
|
|
c1273670e4 | ||
|
|
7eb267341e | ||
|
|
db1836691e | ||
|
|
437cd350a2 | ||
|
|
8024ecf013 | ||
|
|
9baefbe2ab | ||
|
|
ad76d11008 | ||
|
|
c3220bece0 | ||
|
|
2b713f0977 | ||
|
|
0bc6b4a117 | ||
|
|
79e42d4a6d | ||
|
|
0135fbc4c8 | ||
|
|
449594f67a | ||
|
|
8b6647e908 | ||
|
|
efabcbcdf5 | ||
|
|
7bf5962554 | ||
|
|
4c7dedef29 | ||
|
|
93f356a7a7 | ||
|
|
6ca5f77466 | ||
|
|
2e2822f89d | ||
|
|
de178a1901 | ||
|
|
11e4225f23 | ||
|
|
f21b73d1f6 | ||
|
|
1440f3243b | ||
|
|
83d0c13fb0 | ||
|
|
88054aa333 | ||
|
|
635c39ba48 | ||
|
|
eab2257637 | ||
|
|
328bd96c24 | ||
|
|
fc24842a43 | ||
|
|
2d6f1d43ff | ||
|
|
ca0973ec78 | ||
|
|
38ee60d792 | ||
|
|
f68be28284 | ||
|
|
fc43ab9280 | ||
|
|
38c2ea6a5d | ||
|
|
26a0fd1fbe | ||
|
|
811b91ecb3 | ||
|
|
25c00ce856 | ||
|
|
e5debb97a7 | ||
|
|
bc4cd9ffaa | ||
|
|
9a13d8709b | ||
|
|
e6eadf1a2f | ||
|
|
7cca7e6a47 | ||
|
|
ef2492dba6 | ||
|
|
2981e6c1df | ||
|
|
b33b4c0092 | ||
|
|
4d9d2b6db0 | ||
|
|
ed868f93a3 | ||
|
|
5e599d96d7 | ||
|
|
314ae43a45 | ||
|
|
fce91b2f3a | ||
|
|
9bcd2b8104 | ||
|
|
0c9c257150 | ||
|
|
1af85a2956 | ||
|
|
bc4c3d0c6b | ||
|
|
6937c75f05 | ||
|
|
e54429e827 | ||
|
|
ca836b6414 | ||
|
|
71f75071d2 | ||
|
|
b114e553cd | ||
|
|
17dcc99e43 | ||
|
|
c5c2e59b2b | ||
|
|
44ea7313ca | ||
|
|
11ac451250 | ||
|
|
6a4632211a | ||
|
|
a99e5459e3 | ||
|
|
3f88718f38 |
15
.github/workflows/long_running.yml
vendored
15
.github/workflows/long_running.yml
vendored
@@ -9,16 +9,21 @@ env:
|
||||
NUM_FUNCTIONAL_TEST_ITERATIONS: 20000
|
||||
|
||||
jobs:
|
||||
functional_test_unsorted:
|
||||
test:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Run indexing_unsorted
|
||||
run: cargo test indexing_unsorted -- --ignored
|
||||
functional_test_sorted:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run indexing_sorted
|
||||
run: cargo test indexing_sorted -- --ignored
|
||||
|
||||
|
||||
7
.github/workflows/test.yml
vendored
7
.github/workflows/test.yml
vendored
@@ -16,8 +16,6 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build
|
||||
run: cargo build --verbose --workspace
|
||||
- name: Install latest nightly to test also against unstable feature flag
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
@@ -25,13 +23,16 @@ jobs:
|
||||
override: true
|
||||
components: rustfmt
|
||||
|
||||
- name: Install latest nightly to test also against unstable feature flag
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Build
|
||||
run: cargo build --verbose --workspace
|
||||
|
||||
- name: Run tests
|
||||
run: cargo +stable test --features mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints --verbose --workspace
|
||||
|
||||
|
||||
@@ -57,8 +57,8 @@ For a better idea of how indexing works, you may read the [following blog post](
|
||||
|
||||
Deletes happen by deleting a "term". Tantivy does not offer any notion of primary id, so it is up to the user to use a field in their schema as if it was a primary id, and delete the associated term if they want to delete only one specific document.
|
||||
|
||||
On commit, tantivy will find all of the segments with documents matching this existing term and create a [tombstone file](src/fastfield/delete.rs) that represents the bitset of the document that are deleted.
|
||||
Like all segment files, this file is immutable. Because it is possible to have more than one tombstone file at a given instant, the tombstone filename has the format ``` segment_id . commit_opstamp . del```.
|
||||
On commit, tantivy will find all of the segments with documents matching this existing term and remove from [alive bitset file](src/fastfield/alive_bitset.rs) that represents the bitset of the alive document ids.
|
||||
Like all segment files, this file is immutable. Because it is possible to have more than one alive bitset file at a given instant, the alive bitset filename has the format ``` segment_id . commit_opstamp . del```.
|
||||
|
||||
An opstamp is simply an incremental id that identifies any operation applied to the index. For instance, performing a commit or adding a document.
|
||||
|
||||
@@ -249,7 +249,7 @@ For instance, when the phrase query "the art of war" does not match "the war of
|
||||
To make it possible, it is possible to specify in the schema that a field should store positions in addition to being indexed.
|
||||
|
||||
The token positions of all of the terms are then stored in a separate file with the extension `.pos`.
|
||||
The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate throught the docset,
|
||||
The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate through the docset,
|
||||
we advance the position reader by the number of term frequencies of the current document.
|
||||
|
||||
## [fieldnorms/](src/fieldnorms): Here is my doc, how many tokens in this field?
|
||||
|
||||
@@ -1,3 +1,10 @@
|
||||
Tantivy 0.19
|
||||
================================
|
||||
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
|
||||
The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
||||
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing).
|
||||
- Remove Searcher pool and make `Searcher` cloneable.
|
||||
|
||||
Tantivy 0.18
|
||||
================================
|
||||
- For date values `chrono` has been replaced with `time` (@uklotzde) #1304 :
|
||||
@@ -29,7 +36,7 @@ Tantivy 0.17
|
||||
|
||||
Tantivy 0.16.2
|
||||
================================
|
||||
- Bugfix in FuzzyTermQuery. (tranposition_cost_one was not doing anything)
|
||||
- Bugfix in FuzzyTermQuery. (transposition_cost_one was not doing anything)
|
||||
|
||||
Tantivy 0.16.1
|
||||
========================
|
||||
|
||||
@@ -49,7 +49,7 @@ thiserror = "1.0.30"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.5.0"
|
||||
murmurhash32 = "0.2.0"
|
||||
time = { version = "0.3.9", features = ["serde-well-known"] }
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.7.5"
|
||||
@@ -59,6 +59,9 @@ measure_time = "0.8.2"
|
||||
pretty_assertions = "1.2.1"
|
||||
serde_cbor = { version = "0.11.2", optional = true }
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
gcd = "2.1.0"
|
||||
roaring = "0.9.0"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
@@ -71,7 +74,7 @@ proptest = "1.0.0"
|
||||
criterion = "0.3.5"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.9.0"
|
||||
pprof = { version = "0.9.0", features = ["flamegraph", "criterion"] }
|
||||
pprof = { version = "0.10.0", features = ["flamegraph", "criterion"] }
|
||||
futures = "0.3.21"
|
||||
|
||||
[dev-dependencies.fail]
|
||||
|
||||
11
README.md
11
README.md
@@ -152,4 +152,13 @@ You can also find other bindings on [GitHub](https://github.com/search?q=tantivy
|
||||
- and [more](https://github.com/search?q=tantivy)!
|
||||
|
||||
### On average, how much faster is Tantivy compared to Lucene?
|
||||
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
|
||||
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
|
||||
|
||||
### Does tantivy support incremental indexing?
|
||||
- Yes.
|
||||
|
||||
### How can I edit documents?
|
||||
- Data in tantivy is immutable. To edit a document, the document needs to be deleted and reindexed.
|
||||
|
||||
### When will my documents be searchable during indexing?
|
||||
- Documents will be searchable after a `commit` is called on an `IndexWriter`. Existing `IndexReader`s will also need to be reloaded in order to reflect the changes. Finally, changes are only visible to newly acquired `Searcher`.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.2.0"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = []
|
||||
|
||||
@@ -3,7 +3,7 @@ name = "tantivy-common"
|
||||
version = "0.3.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
description = "common traits and utility functions used by multiple tantivy subcrates"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -11,7 +11,10 @@ mod writer;
|
||||
|
||||
pub use bitset::*;
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
pub use vint::{read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt};
|
||||
pub use vint::{
|
||||
deserialize_vint_u128, read_u32_vint, read_u32_vint_no_advance, serialize_vint_u128,
|
||||
serialize_vint_u32, write_u32_vint, VInt,
|
||||
};
|
||||
pub use writer::{AntiCallToken, CountingWriter, TerminatingWrite};
|
||||
|
||||
/// Has length trait
|
||||
@@ -104,8 +107,6 @@ pub fn u64_to_f64(val: u64) -> f64 {
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
use std::f64;
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64, BinarySerializable, FixedSize};
|
||||
@@ -135,11 +136,11 @@ pub mod test {
|
||||
|
||||
#[test]
|
||||
fn test_i64_converter() {
|
||||
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
||||
assert_eq!(i64_to_u64(i64::max_value()), u64::max_value());
|
||||
assert_eq!(i64_to_u64(i64::MIN), u64::MIN);
|
||||
assert_eq!(i64_to_u64(i64::MAX), u64::MAX);
|
||||
test_i64_converter_helper(0i64);
|
||||
test_i64_converter_helper(i64::min_value());
|
||||
test_i64_converter_helper(i64::max_value());
|
||||
test_i64_converter_helper(i64::MIN);
|
||||
test_i64_converter_helper(i64::MAX);
|
||||
for i in -1000i64..1000i64 {
|
||||
test_i64_converter_helper(i);
|
||||
}
|
||||
|
||||
@@ -229,7 +229,7 @@ pub mod test {
|
||||
fixed_size_test::<u32>();
|
||||
assert_eq!(4, serialize_test(3u32));
|
||||
assert_eq!(4, serialize_test(5u32));
|
||||
assert_eq!(4, serialize_test(u32::max_value()));
|
||||
assert_eq!(4, serialize_test(u32::MAX));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -247,6 +247,11 @@ pub mod test {
|
||||
fixed_size_test::<u64>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_bool() {
|
||||
fixed_size_test::<bool>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_string() {
|
||||
assert_eq!(serialize_test(String::from("")), 1);
|
||||
@@ -272,6 +277,6 @@ pub mod test {
|
||||
assert_eq!(serialize_test(VInt(1234u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_383u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_384u64)), 3);
|
||||
assert_eq!(serialize_test(VInt(u64::max_value())), 10);
|
||||
assert_eq!(serialize_test(VInt(u64::MAX)), 10);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,40 @@ use byteorder::{ByteOrder, LittleEndian};
|
||||
|
||||
use super::BinarySerializable;
|
||||
|
||||
/// Variable int serializes a u128 number
|
||||
pub fn serialize_vint_u128(mut val: u128, output: &mut Vec<u8>) {
|
||||
loop {
|
||||
let next_byte: u8 = (val % 128u128) as u8;
|
||||
val /= 128u128;
|
||||
if val == 0 {
|
||||
output.push(next_byte | STOP_BIT);
|
||||
return;
|
||||
} else {
|
||||
output.push(next_byte);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Deserializes a u128 number
|
||||
///
|
||||
/// Returns the number and the slice after the vint
|
||||
pub fn deserialize_vint_u128(data: &[u8]) -> io::Result<(u128, &[u8])> {
|
||||
let mut result = 0u128;
|
||||
let mut shift = 0u64;
|
||||
for i in 0..19 {
|
||||
let b = data[i];
|
||||
result |= u128::from(b % 128u8) << shift;
|
||||
if b >= STOP_BIT {
|
||||
return Ok((result, &data[i + 1..]));
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Failed to deserialize u128 vint",
|
||||
))
|
||||
}
|
||||
|
||||
/// Wrapper over a `u64` that serializes as a variable int.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub struct VInt(pub u64);
|
||||
@@ -176,6 +210,7 @@ impl BinarySerializable for VInt {
|
||||
mod tests {
|
||||
|
||||
use super::{serialize_vint_u32, BinarySerializable, VInt};
|
||||
use crate::vint::{deserialize_vint_u128, serialize_vint_u128};
|
||||
|
||||
fn aux_test_vint(val: u64) {
|
||||
let mut v = [14u8; 10];
|
||||
@@ -199,7 +234,7 @@ mod tests {
|
||||
aux_test_vint(0);
|
||||
aux_test_vint(1);
|
||||
aux_test_vint(5);
|
||||
aux_test_vint(u64::max_value());
|
||||
aux_test_vint(u64::MAX);
|
||||
for i in 1..9 {
|
||||
let power_of_128 = 1u64 << (7 * i);
|
||||
aux_test_vint(power_of_128 - 1u64);
|
||||
@@ -217,6 +252,21 @@ mod tests {
|
||||
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
||||
}
|
||||
|
||||
fn aux_test_vint_u128(val: u128) {
|
||||
let mut data = vec![];
|
||||
serialize_vint_u128(val, &mut data);
|
||||
let (deser_val, _data) = deserialize_vint_u128(&data).unwrap();
|
||||
assert_eq!(val, deser_val);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vint_u128() {
|
||||
aux_test_vint_u128(0);
|
||||
aux_test_vint_u128(1);
|
||||
aux_test_vint_u128(u128::MAX / 3);
|
||||
aux_test_vint_u128(u128::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vint_u32() {
|
||||
aux_test_serialize_vint_u32(0);
|
||||
@@ -228,6 +278,6 @@ mod tests {
|
||||
aux_test_serialize_vint_u32(power_of_128);
|
||||
aux_test_serialize_vint_u32(power_of_128 + 1u32);
|
||||
}
|
||||
aux_test_serialize_vint_u32(u32::max_value());
|
||||
aux_test_serialize_vint_u32(u32::MAX);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
||||
pub struct AntiCallToken(());
|
||||
|
||||
/// Trait used to indicate when no more write need to be done on a writer
|
||||
pub trait TerminatingWrite: Write {
|
||||
pub trait TerminatingWrite: Write + Send {
|
||||
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
|
||||
fn terminate(mut self) -> io::Result<()>
|
||||
where Self: Sized {
|
||||
|
||||
@@ -38,7 +38,7 @@ Note: Tantivy 0.16 does not do this optimization yet.
|
||||
In principle there are many algorithms possible that exploit the monotonically increasing nature. (aggregations maybe?)
|
||||
|
||||
## Usage
|
||||
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of tantvy 0.16 only fast fields are allowed to be used.
|
||||
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of Tantivy 0.16 only fast fields are allowed to be used.
|
||||
|
||||
```
|
||||
let settings = IndexSettings {
|
||||
|
||||
@@ -86,7 +86,7 @@ If one more json field is defined, things get even more complicated.
|
||||
## Default json field
|
||||
|
||||
If the schema contains a text field called "text" and a json field that is set as a default field:
|
||||
`text:hello` could be reasonably interpreted as targetting the text field or as targetting the json field called `json_dynamic` with the json_path "text".
|
||||
`text:hello` could be reasonably interpreted as targeting the text field or as targeting the json field called `json_dynamic` with the json_path "text".
|
||||
|
||||
If there is such an ambiguity, we decide to only search in the "text" field: `text:hello`.
|
||||
|
||||
|
||||
@@ -110,6 +110,7 @@ fn main() -> tantivy::Result<()> {
|
||||
(9f64..14f64).into(),
|
||||
(14f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req_1.clone(),
|
||||
}),
|
||||
@@ -117,7 +118,7 @@ fn main() -> tantivy::Result<()> {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
|
||||
69
examples/date_time_field.rs
Normal file
69
examples/date_time_field.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
// # DateTime field example
|
||||
//
|
||||
// This example shows how the DateTime field can be used
|
||||
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Cardinality, DateOptions, Schema, Value, INDEXED, STORED, STRING};
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
let mut schema_builder = Schema::builder();
|
||||
let opts = DateOptions::from(INDEXED)
|
||||
.set_stored()
|
||||
.set_fast(Cardinality::SingleValue)
|
||||
.set_precision(tantivy::DatePrecision::Seconds);
|
||||
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
|
||||
let event_type = schema_builder.add_text_field("event", STRING | STORED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T12:53:50.53Z",
|
||||
"event": "pull-request"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T13:00:00.22Z",
|
||||
"event": "comment"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// # Default fields: event_type
|
||||
let query_parser = QueryParser::for_index(&index, vec![event_type]);
|
||||
{
|
||||
let query = query_parser.parse_query("event:comment")?;
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(5))?;
|
||||
assert_eq!(count_docs.len(), 1);
|
||||
}
|
||||
{
|
||||
let query = query_parser
|
||||
.parse_query(r#"occurred_at:[2022-06-22T12:58:00Z TO 2022-06-23T00:00:00Z}"#)?;
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
|
||||
assert_eq!(count_docs.len(), 1);
|
||||
for (_score, doc_address) in count_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
assert!(matches!(
|
||||
retrieved_doc.get_first(occurred_at),
|
||||
Some(Value::Date(_))
|
||||
));
|
||||
assert_eq!(
|
||||
schema.to_json(&retrieved_doc),
|
||||
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -145,11 +145,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let warmers: Vec<Weak<dyn Warmer>> = vec![Arc::downgrade(
|
||||
&(price_dynamic_column.clone() as Arc<dyn Warmer>),
|
||||
)];
|
||||
let reader: IndexReader = index
|
||||
.reader_builder()
|
||||
.warmers(warmers)
|
||||
.num_searchers(1)
|
||||
.try_into()?;
|
||||
let reader: IndexReader = index.reader_builder().warmers(warmers).try_into()?;
|
||||
reader.reload()?;
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![text]);
|
||||
|
||||
@@ -3,7 +3,7 @@ name = "fastfield_codecs"
|
||||
version = "0.2.0"
|
||||
authors = ["Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
description = "Fast field codecs used by tantivy"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
@@ -12,13 +12,16 @@ description = "Fast field codecs used by tantivy"
|
||||
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
|
||||
prettytable-rs = {version="0.8.0", optional= true}
|
||||
rand = {version="0.8.3", optional= true}
|
||||
rand = { version="0.8.3", optional= true}
|
||||
itertools = { version="0.10.3", optional=true}
|
||||
measure_time = { version="0.8.2", optional=true}
|
||||
|
||||
[dev-dependencies]
|
||||
more-asserts = "0.2.1"
|
||||
more-asserts = "0.3.0"
|
||||
proptest = "1.0.0"
|
||||
rand = "0.8.3"
|
||||
|
||||
[features]
|
||||
bin = ["prettytable-rs", "rand"]
|
||||
bin = ["prettytable-rs", "rand", "itertools", "measure_time"]
|
||||
default = ["bin"]
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ pub struct BitpackedFastFieldReader {
|
||||
pub max_value_u64: u64,
|
||||
}
|
||||
|
||||
impl<'data> FastFieldCodecReader for BitpackedFastFieldReader {
|
||||
impl FastFieldCodecReader for BitpackedFastFieldReader {
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
|
||||
let (_data, mut footer) = bytes.split_at(bytes.len() - 16);
|
||||
@@ -107,7 +107,7 @@ impl FastFieldCodecSerializer for BitpackedFastFieldSerializer {
|
||||
/// values.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
_fastfield_accessor: &dyn FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
_data_iter1: impl Iterator<Item = u64>,
|
||||
|
||||
729
fastfield_codecs/src/ip_codec.rs
Normal file
729
fastfield_codecs/src/ip_codec.rs
Normal file
@@ -0,0 +1,729 @@
|
||||
/// This codec takes a large number space (u128) and reduces it to a compact number space.
|
||||
///
|
||||
/// It will find spaces in the numer range. For example:
|
||||
///
|
||||
/// 100, 101, 102, 103, 104, 50000, 50001
|
||||
/// could be mapped to
|
||||
/// 100..104 -> 0..4
|
||||
/// 50000..50001 -> 5..6
|
||||
///
|
||||
/// Compact space 0..6 requires much less bits than 100..50001
|
||||
///
|
||||
/// The codec is created to compress ip addresses, but may be employed in other use cases.
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::BinaryHeap,
|
||||
io::{self, Write},
|
||||
net::{IpAddr, Ipv6Addr},
|
||||
ops::RangeInclusive,
|
||||
};
|
||||
|
||||
use common::{deserialize_vint_u128, serialize_vint_u128};
|
||||
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::FastFieldCodecReaderU128;
|
||||
|
||||
pub fn ip_to_u128(ip_addr: IpAddr) -> u128 {
|
||||
let ip_addr_v6: Ipv6Addr = match ip_addr {
|
||||
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||
IpAddr::V6(v6) => v6,
|
||||
};
|
||||
u128::from_be_bytes(ip_addr_v6.octets())
|
||||
}
|
||||
|
||||
const INTERVAL_COST_IN_BITS: usize = 64;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct IntervalEncoding();
|
||||
|
||||
pub struct IntervalCompressor {
|
||||
pub null_value: u128,
|
||||
min_value: u128,
|
||||
max_value: u128,
|
||||
compact_space: CompactSpace,
|
||||
pub num_bits: u8,
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
struct DeltaAndPos {
|
||||
delta: u128,
|
||||
pos: usize,
|
||||
}
|
||||
impl DeltaAndPos {
|
||||
fn new(ip: u128, pos: usize) -> Self {
|
||||
DeltaAndPos { delta: ip, pos }
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for DeltaAndPos {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
self.delta.cmp(&other.delta)
|
||||
}
|
||||
}
|
||||
impl PartialOrd for DeltaAndPos {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
self.delta.partial_cmp(&other.delta)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delta_and_pos_sort() {
|
||||
let mut deltas: BinaryHeap<DeltaAndPos> = BinaryHeap::new();
|
||||
deltas.push(DeltaAndPos::new(10, 1));
|
||||
deltas.push(DeltaAndPos::new(100, 10));
|
||||
deltas.push(DeltaAndPos::new(1, 10));
|
||||
assert_eq!(deltas.pop().unwrap().delta, 100);
|
||||
assert_eq!(deltas.pop().unwrap().delta, 10);
|
||||
}
|
||||
|
||||
/// Put the deltas for the sorted ip addresses into a binary heap
|
||||
fn get_deltas(ip_addrs_sorted: &[u128]) -> BinaryHeap<DeltaAndPos> {
|
||||
let mut prev_opt = None;
|
||||
let mut deltas: BinaryHeap<DeltaAndPos> = BinaryHeap::new();
|
||||
for (pos, ip_addr) in ip_addrs_sorted.iter().cloned().enumerate() {
|
||||
let delta = if let Some(prev) = prev_opt {
|
||||
ip_addr - prev
|
||||
} else {
|
||||
ip_addr + 1
|
||||
};
|
||||
// skip too small deltas
|
||||
if delta > 2 {
|
||||
deltas.push(DeltaAndPos::new(delta, pos));
|
||||
}
|
||||
prev_opt = Some(ip_addr);
|
||||
}
|
||||
deltas
|
||||
}
|
||||
|
||||
/// Will collect blanks and add them to compact space if it will affect the number of bits used on
|
||||
/// the compact space.
|
||||
fn get_compact_space(ip_addrs_sorted: &[u128], cost_per_interval: usize) -> CompactSpace {
|
||||
let max_val = *ip_addrs_sorted.last().unwrap_or(&0u128) + 1;
|
||||
let mut deltas = get_deltas(ip_addrs_sorted);
|
||||
let mut amplitude_compact_space = max_val;
|
||||
let mut amplitude_bits: u8 = (amplitude_compact_space as f64).log2().ceil() as u8;
|
||||
let mut staged_blanks = vec![];
|
||||
|
||||
let mut compact_space = CompactSpaceBuilder::new();
|
||||
|
||||
// We will stage blanks until they reduce the compact space by 1 bit.
|
||||
// Binary heap to process the gaps by their size
|
||||
while let Some(ip_addr_and_pos) = deltas.pop() {
|
||||
let delta = ip_addr_and_pos.delta;
|
||||
let pos = ip_addr_and_pos.pos;
|
||||
staged_blanks.push((delta, pos));
|
||||
let staged_spaces_sum: u128 = staged_blanks.iter().map(|(delta, _)| delta - 1).sum();
|
||||
// +1 for later added null value
|
||||
let amplitude_new_compact_space = amplitude_compact_space - staged_spaces_sum + 1;
|
||||
let amplitude_new_bits = (amplitude_new_compact_space as f64).log2().ceil() as u8;
|
||||
if amplitude_bits == amplitude_new_bits {
|
||||
continue;
|
||||
}
|
||||
let saved_bits = (amplitude_bits - amplitude_new_bits) as usize * ip_addrs_sorted.len();
|
||||
let cost = staged_blanks.len() * cost_per_interval;
|
||||
if cost >= saved_bits {
|
||||
// Continue here, since although we walk over the deltas by size,
|
||||
// we can potentially save a lot at the last bits, which are smaller deltas
|
||||
//
|
||||
// E.g. if the first range reduces the compact space by 1000 from 2000 to 1000, which
|
||||
// saves 11-10=1 bit and the next range reduces the compact space by 950 to
|
||||
// 50, which saves 10-6=4 bit
|
||||
continue;
|
||||
}
|
||||
|
||||
amplitude_compact_space = amplitude_new_compact_space;
|
||||
amplitude_bits = amplitude_new_bits;
|
||||
for (_, pos) in staged_blanks.drain(..) {
|
||||
let ip_addr = ip_addrs_sorted[pos];
|
||||
if pos == 0 {
|
||||
compact_space.add_hole(0..=ip_addr - 1);
|
||||
} else {
|
||||
compact_space.add_hole(ip_addrs_sorted[pos - 1] + 1..=ip_addr - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
compact_space.add_hole(max_val..=u128::MAX);
|
||||
|
||||
compact_space.finish()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compact_space_test() {
|
||||
// small ranges are ignored here
|
||||
let ips = vec![
|
||||
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
|
||||
];
|
||||
let ranges_and_compact_start = get_compact_space(&ips, 11);
|
||||
let null_value = ranges_and_compact_start.null_value;
|
||||
let amplitude = ranges_and_compact_start.amplitude_compact_space();
|
||||
assert_eq!(null_value, 5);
|
||||
assert_eq!(amplitude, 20);
|
||||
assert_eq!(2, ranges_and_compact_start.to_compact(2).unwrap());
|
||||
|
||||
assert_eq!(ranges_and_compact_start.to_compact(100).unwrap_err(), 0);
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
struct CompactSpaceBuilder {
|
||||
covered_space: Vec<std::ops::RangeInclusive<u128>>,
|
||||
}
|
||||
|
||||
impl CompactSpaceBuilder {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
covered_space: vec![0..=u128::MAX],
|
||||
}
|
||||
}
|
||||
|
||||
// Will extend the first range and add a null value to it.
|
||||
fn assign_and_return_null(&mut self) -> u128 {
|
||||
self.covered_space[0] = *self.covered_space[0].start()..=*self.covered_space[0].end() + 1;
|
||||
*self.covered_space[0].end()
|
||||
}
|
||||
|
||||
// Assumes that repeated add_hole calls don't overlap.
|
||||
fn add_hole(&mut self, hole: std::ops::RangeInclusive<u128>) {
|
||||
let position = self
|
||||
.covered_space
|
||||
.iter()
|
||||
.position(|range| range.start() <= hole.start() && range.end() >= hole.end());
|
||||
if let Some(position) = position {
|
||||
let old_range = self.covered_space.remove(position);
|
||||
if old_range == hole {
|
||||
return;
|
||||
}
|
||||
let new_range_end = hole.end().saturating_add(1)..=*old_range.end();
|
||||
if old_range.start() == hole.start() {
|
||||
self.covered_space.insert(position, new_range_end);
|
||||
return;
|
||||
}
|
||||
let new_range_start = *old_range.start()..=hole.start().saturating_sub(1);
|
||||
if old_range.end() == hole.end() {
|
||||
self.covered_space.insert(position, new_range_start);
|
||||
return;
|
||||
}
|
||||
self.covered_space.insert(position, new_range_end);
|
||||
self.covered_space.insert(position, new_range_start);
|
||||
}
|
||||
}
|
||||
fn finish(mut self) -> CompactSpace {
|
||||
let null_value = self.assign_and_return_null();
|
||||
|
||||
let mut compact_start: u64 = 0;
|
||||
let mut ranges_and_compact_start = vec![];
|
||||
for cov in self.covered_space {
|
||||
let covered_range_len = cov.end() - cov.start();
|
||||
ranges_and_compact_start.push((cov, compact_start));
|
||||
compact_start += covered_range_len as u64 + 1;
|
||||
}
|
||||
CompactSpace {
|
||||
ranges_and_compact_start,
|
||||
null_value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
struct CompactSpace {
|
||||
ranges_and_compact_start: Vec<(std::ops::RangeInclusive<u128>, u64)>,
|
||||
pub null_value: u128,
|
||||
}
|
||||
impl CompactSpace {
|
||||
fn amplitude_compact_space(&self) -> u128 {
|
||||
let last_range = &self.ranges_and_compact_start[self.ranges_and_compact_start.len() - 1];
|
||||
last_range.1 as u128 + (last_range.0.end() - last_range.0.start()) + 1
|
||||
}
|
||||
|
||||
fn get_range_and_compact_start(&self, pos: usize) -> &(std::ops::RangeInclusive<u128>, u64) {
|
||||
&self.ranges_and_compact_start[pos]
|
||||
}
|
||||
fn serialize(&self, output: &mut Vec<u8>) {
|
||||
serialize_vint_u128(self.null_value as u128, output);
|
||||
serialize_vint_u128(self.ranges_and_compact_start.len() as u128, output);
|
||||
let mut prev_ip = 0;
|
||||
for (ip_range, _compact) in &self.ranges_and_compact_start {
|
||||
let delta_ip = ip_range.start() - prev_ip;
|
||||
serialize_vint_u128(delta_ip as u128, output);
|
||||
prev_ip = *ip_range.start();
|
||||
|
||||
let delta_ip = ip_range.end() - prev_ip;
|
||||
serialize_vint_u128(delta_ip as u128, output);
|
||||
prev_ip = *ip_range.end();
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize(data: &[u8]) -> io::Result<(&[u8], Self)> {
|
||||
let (null_value, data) = deserialize_vint_u128(data)?;
|
||||
let (num_ip_addrs, mut data) = deserialize_vint_u128(data)?;
|
||||
let mut ip_addr = 0u128;
|
||||
let mut compact = 0u64;
|
||||
let mut ranges_and_compact_start: Vec<(std::ops::RangeInclusive<u128>, u64)> = vec![];
|
||||
for _ in 0..num_ip_addrs {
|
||||
let (ip_addr_delta, new_data) = deserialize_vint_u128(data)?;
|
||||
data = new_data;
|
||||
ip_addr += ip_addr_delta;
|
||||
let ip_addr_start = ip_addr;
|
||||
|
||||
let (ip_addr_delta, new_data) = deserialize_vint_u128(data)?;
|
||||
data = new_data;
|
||||
ip_addr += ip_addr_delta;
|
||||
let ip_addr_end = ip_addr;
|
||||
|
||||
let compact_delta = ip_addr_end - ip_addr_start + 1;
|
||||
|
||||
ranges_and_compact_start.push((ip_addr_start..=ip_addr_end, compact));
|
||||
compact += compact_delta as u64;
|
||||
}
|
||||
Ok((
|
||||
data,
|
||||
Self {
|
||||
null_value,
|
||||
ranges_and_compact_start,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
/// Returns either Ok(the value in the compact space) or if it is outside the compact space the
|
||||
/// Err(position on the next larger range above the value)
|
||||
fn to_compact(&self, ip: u128) -> Result<u64, usize> {
|
||||
self.ranges_and_compact_start
|
||||
.binary_search_by(|probe| {
|
||||
let ip_range = &probe.0;
|
||||
if *ip_range.start() <= ip && *ip_range.end() >= ip {
|
||||
return Ordering::Equal;
|
||||
} else if ip < *ip_range.start() {
|
||||
return Ordering::Greater;
|
||||
} else if ip > *ip_range.end() {
|
||||
return Ordering::Less;
|
||||
}
|
||||
panic!("not covered all ranges in check");
|
||||
})
|
||||
.map(|pos| {
|
||||
let (range, compact_start) = &self.ranges_and_compact_start[pos];
|
||||
compact_start + (ip - range.start()) as u64
|
||||
})
|
||||
.map_err(|pos| pos - 1)
|
||||
}
|
||||
|
||||
/// Unpacks a ip from compact space to u128 space
|
||||
fn unpack_ip(&self, compact: u64) -> u128 {
|
||||
let pos = self
|
||||
.ranges_and_compact_start
|
||||
.binary_search_by_key(&compact, |probe| probe.1)
|
||||
.map_or_else(|e| e - 1, |v| v);
|
||||
|
||||
let range_and_compact_start = &self.ranges_and_compact_start[pos];
|
||||
let diff = compact - self.ranges_and_compact_start[pos].1;
|
||||
range_and_compact_start.0.start() + diff as u128
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ranges_and_compact_start_test() {
|
||||
let ips = vec![
|
||||
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
|
||||
];
|
||||
let ranges_and_compact_start = get_compact_space(&ips, 11);
|
||||
assert_eq!(ranges_and_compact_start.null_value, 5);
|
||||
|
||||
let mut output = vec![];
|
||||
ranges_and_compact_start.serialize(&mut output);
|
||||
|
||||
assert_eq!(
|
||||
ranges_and_compact_start,
|
||||
CompactSpace::deserialize(&output).unwrap().1
|
||||
);
|
||||
|
||||
for ip in &ips {
|
||||
let compact = ranges_and_compact_start.to_compact(*ip).unwrap();
|
||||
assert_eq!(ranges_and_compact_start.unpack_ip(compact), *ip);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn train(ip_addrs_sorted: &[u128]) -> IntervalCompressor {
|
||||
let ranges_and_compact_start = get_compact_space(ip_addrs_sorted, INTERVAL_COST_IN_BITS);
|
||||
let null_value = ranges_and_compact_start.null_value;
|
||||
let amplitude_compact_space = ranges_and_compact_start.amplitude_compact_space();
|
||||
|
||||
assert!(
|
||||
amplitude_compact_space <= u64::MAX as u128,
|
||||
"case unsupported."
|
||||
);
|
||||
|
||||
let num_bits = tantivy_bitpacker::compute_num_bits(amplitude_compact_space as u64);
|
||||
let min_value = *ip_addrs_sorted.first().unwrap_or(&0);
|
||||
let max_value = *ip_addrs_sorted.last().unwrap_or(&0);
|
||||
let compressor = IntervalCompressor {
|
||||
null_value,
|
||||
min_value,
|
||||
max_value,
|
||||
compact_space: ranges_and_compact_start,
|
||||
num_bits,
|
||||
};
|
||||
|
||||
let max_value = *ip_addrs_sorted.last().unwrap_or(&0u128).max(&null_value);
|
||||
assert_eq!(
|
||||
compressor.to_compact(max_value) + 1,
|
||||
amplitude_compact_space as u64
|
||||
);
|
||||
compressor
|
||||
}
|
||||
|
||||
impl IntervalCompressor {
|
||||
/// Taking the vals as Vec may cost a lot of memory.
|
||||
/// It is used to sort the vals.
|
||||
///
|
||||
/// Less memory alternative: We could just store the index (u32), and use that as sorting.
|
||||
pub fn from_vals(mut vals: Vec<u128>) -> Self {
|
||||
vals.sort();
|
||||
train(&vals)
|
||||
}
|
||||
|
||||
fn to_compact(&self, ip_addr: u128) -> u64 {
|
||||
self.compact_space.to_compact(ip_addr).unwrap()
|
||||
}
|
||||
|
||||
fn write_footer(&self, write: &mut impl Write, num_vals: u128) -> io::Result<()> {
|
||||
let mut footer = vec![];
|
||||
|
||||
// header flags for future optional dictionary encoding
|
||||
let header_flags = 0u64;
|
||||
footer.extend_from_slice(&header_flags.to_le_bytes());
|
||||
|
||||
let null_value = self
|
||||
.compact_space
|
||||
.to_compact(self.null_value)
|
||||
.expect("could not convert null to compact space");
|
||||
serialize_vint_u128(null_value as u128, &mut footer);
|
||||
serialize_vint_u128(self.min_value, &mut footer);
|
||||
serialize_vint_u128(self.max_value, &mut footer);
|
||||
|
||||
self.compact_space.serialize(&mut footer);
|
||||
|
||||
footer.push(self.num_bits);
|
||||
serialize_vint_u128(num_vals as u128, &mut footer);
|
||||
|
||||
write.write_all(&footer)?;
|
||||
let footer_len = footer.len() as u32;
|
||||
write.write_all(&footer_len.to_le_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn compress(&self, vals: &[u128]) -> io::Result<Vec<u8>> {
|
||||
let mut output = vec![];
|
||||
self.compress_into(vals.iter().cloned(), &mut output)?;
|
||||
Ok(output)
|
||||
}
|
||||
pub fn compress_into(
|
||||
&self,
|
||||
vals: impl Iterator<Item = u128>,
|
||||
write: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
let mut bitpacker = BitPacker::default();
|
||||
let mut num_vals = 0;
|
||||
for ip_addr in vals {
|
||||
let compact = self.to_compact(ip_addr);
|
||||
bitpacker.write(compact, self.num_bits, write).unwrap();
|
||||
num_vals += 1;
|
||||
}
|
||||
bitpacker.close(write).unwrap();
|
||||
self.write_footer(write, num_vals as u128)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IntervallDecompressor {
|
||||
compact_space: CompactSpace,
|
||||
bit_unpacker: BitUnpacker,
|
||||
null_compact_space: u64,
|
||||
min_value: u128,
|
||||
max_value: u128,
|
||||
num_vals: usize,
|
||||
}
|
||||
|
||||
impl FastFieldCodecReaderU128 for IntervallDecompressor {
|
||||
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self> {
|
||||
Self::open(bytes)
|
||||
}
|
||||
|
||||
fn get(&self, doc: u64, data: &[u8]) -> Option<u128> {
|
||||
self.get(doc, data)
|
||||
}
|
||||
|
||||
fn get_between_vals(&self, range: RangeInclusive<u128>, data: &[u8]) -> Vec<usize> {
|
||||
self.get_range(range, data)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u128 {
|
||||
self.min_value()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u128 {
|
||||
self.max_value()
|
||||
}
|
||||
|
||||
/// The computed and assigned number for null values
|
||||
fn null_value(&self) -> u128 {
|
||||
self.compact_space.null_value
|
||||
}
|
||||
|
||||
fn iter<'a>(&'a self, data: &'a [u8]) -> Box<dyn Iterator<Item = Option<u128>> + 'a> {
|
||||
Box::new(self.iter(data))
|
||||
}
|
||||
}
|
||||
|
||||
impl IntervallDecompressor {
|
||||
pub fn open(data: &[u8]) -> io::Result<IntervallDecompressor> {
|
||||
let (data, footer_len_bytes) = data.split_at(data.len() - 4);
|
||||
let footer_len = u32::from_le_bytes(footer_len_bytes.try_into().unwrap());
|
||||
|
||||
let data = &data[data.len() - footer_len as usize..];
|
||||
let (_header_flags, data) = data.split_at(8);
|
||||
let (null_compact_space, data) = deserialize_vint_u128(data)?;
|
||||
let (min_value, data) = deserialize_vint_u128(data)?;
|
||||
let (max_value, data) = deserialize_vint_u128(data)?;
|
||||
let (mut data, compact_space) = CompactSpace::deserialize(data).unwrap();
|
||||
|
||||
let num_bits = data[0];
|
||||
data = &data[1..];
|
||||
let (num_vals, _data) = deserialize_vint_u128(data)?;
|
||||
let decompressor = IntervallDecompressor {
|
||||
null_compact_space: null_compact_space as u64,
|
||||
min_value,
|
||||
max_value,
|
||||
compact_space,
|
||||
num_vals: num_vals as usize,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
};
|
||||
|
||||
Ok(decompressor)
|
||||
}
|
||||
|
||||
/// Converting to compact space for the decompressor is more complex, since we may get values
|
||||
/// which are outside the compact space. e.g. if we map
|
||||
/// 1000 => 5
|
||||
/// 2000 => 6
|
||||
///
|
||||
/// and we want a mapping for 1005, there is no equivalent compact space. We instead return an
|
||||
/// error with the index of the next range.
|
||||
fn to_compact(&self, ip_addr: u128) -> Result<u64, usize> {
|
||||
self.compact_space.to_compact(ip_addr)
|
||||
}
|
||||
|
||||
fn compact_to_ip_addr(&self, compact: u64) -> u128 {
|
||||
self.compact_space.unpack_ip(compact)
|
||||
}
|
||||
|
||||
/// Comparing on compact space: 1.2 GElements/s
|
||||
///
|
||||
/// Comparing on original space: .06 GElements/s (not completely optimized)
|
||||
pub fn get_range(&self, range: RangeInclusive<u128>, data: &[u8]) -> Vec<usize> {
|
||||
let from_ip_addr = *range.start();
|
||||
let to_ip_addr = *range.end();
|
||||
assert!(to_ip_addr >= from_ip_addr);
|
||||
let compact_from = self.to_compact(from_ip_addr);
|
||||
let compact_to = self.to_compact(to_ip_addr);
|
||||
// Quick return, if both ranges fall into the same non-mapped space, the range can't cover
|
||||
// any values, so we can early exit
|
||||
match (compact_to, compact_from) {
|
||||
(Err(pos1), Err(pos2)) if pos1 == pos2 => return vec![],
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let compact_from = compact_from.unwrap_or_else(|pos| {
|
||||
let range_and_compact_start = self.compact_space.get_range_and_compact_start(pos);
|
||||
let compact_end = range_and_compact_start.1
|
||||
+ (range_and_compact_start.0.end() - range_and_compact_start.0.start()) as u64;
|
||||
compact_end + 1
|
||||
});
|
||||
// If there is no compact space, we go to the closest upperbound compact space
|
||||
let compact_to = compact_to.unwrap_or_else(|pos| {
|
||||
let range_and_compact_start = self.compact_space.get_range_and_compact_start(pos);
|
||||
let compact_end = range_and_compact_start.1
|
||||
+ (range_and_compact_start.0.end() - range_and_compact_start.0.start()) as u64;
|
||||
compact_end
|
||||
});
|
||||
|
||||
let range = compact_from..=compact_to;
|
||||
let mut positions = vec![];
|
||||
|
||||
for (pos, compact_ip) in self
|
||||
.iter_compact(data)
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val != self.null_compact_space)
|
||||
{
|
||||
if range.contains(&compact_ip) {
|
||||
positions.push(pos);
|
||||
}
|
||||
}
|
||||
|
||||
positions
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn iter_compact<'a>(&'a self, data: &'a [u8]) -> impl Iterator<Item = u64> + 'a {
|
||||
(0..self.num_vals).map(move |idx| self.bit_unpacker.get(idx as u64, data) as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter<'a>(&'a self, data: &'a [u8]) -> impl Iterator<Item = Option<u128>> + 'a {
|
||||
// TODO: Performance. It would be better to iterate on the ranges and check existence via
|
||||
// the bit_unpacker.
|
||||
self.iter_compact(data).map(|compact| {
|
||||
if compact == self.null_compact_space {
|
||||
None
|
||||
} else {
|
||||
Some(self.compact_to_ip_addr(compact))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get(&self, idx: u64, data: &[u8]) -> Option<u128> {
|
||||
let compact = self.bit_unpacker.get(idx, data);
|
||||
if compact == self.null_compact_space {
|
||||
None
|
||||
} else {
|
||||
Some(self.compact_to_ip_addr(compact))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn min_value(&self) -> u128 {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
pub fn max_value(&self) -> u128 {
|
||||
self.max_value
|
||||
}
|
||||
}
|
||||
|
||||
impl IntervalEncoding {
|
||||
pub fn train(&self, mut vals: Vec<u128>) -> IntervalCompressor {
|
||||
vals.sort();
|
||||
train(&vals)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
fn decode_all(data: &[u8]) -> Vec<u128> {
|
||||
let decompressor = IntervallDecompressor::open(data).unwrap();
|
||||
let mut u128_vals = Vec::new();
|
||||
for idx in 0..decompressor.num_vals as usize {
|
||||
let val = decompressor.get(idx as u64, data);
|
||||
if let Some(val) = val {
|
||||
u128_vals.push(val);
|
||||
}
|
||||
}
|
||||
u128_vals
|
||||
}
|
||||
|
||||
fn test_aux_vals(encoder: &IntervalEncoding, u128_vals: &[u128]) -> Vec<u8> {
|
||||
let compressor = encoder.train(u128_vals.to_vec());
|
||||
let data = compressor.compress(u128_vals).unwrap();
|
||||
let decoded_val = decode_all(&data);
|
||||
assert_eq!(&decoded_val, u128_vals);
|
||||
data
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_1() {
|
||||
let vals = &[
|
||||
1u128,
|
||||
100u128,
|
||||
3u128,
|
||||
99999u128,
|
||||
100000u128,
|
||||
100001u128,
|
||||
4_000_211_221u128,
|
||||
4_000_211_222u128,
|
||||
333u128,
|
||||
];
|
||||
let interval_encoding = IntervalEncoding::default();
|
||||
let data = test_aux_vals(&interval_encoding, vals);
|
||||
let decomp = IntervallDecompressor::open(&data).unwrap();
|
||||
let positions = decomp.get_range(0..=1, &data);
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions = decomp.get_range(0..=2, &data);
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions = decomp.get_range(0..=3, &data);
|
||||
assert_eq!(positions, vec![0, 2]);
|
||||
assert_eq!(decomp.get_range(99999u128..=99999u128, &data), vec![3]);
|
||||
assert_eq!(decomp.get_range(99998u128..=100000u128, &data), vec![3, 4]);
|
||||
assert_eq!(decomp.get_range(99998u128..=99999u128, &data), vec![3]);
|
||||
assert_eq!(decomp.get_range(99998u128..=99998u128, &data), vec![]);
|
||||
assert_eq!(decomp.get_range(333u128..=333u128, &data), vec![8]);
|
||||
assert_eq!(decomp.get_range(332u128..=333u128, &data), vec![8]);
|
||||
assert_eq!(decomp.get_range(332u128..=334u128, &data), vec![8]);
|
||||
assert_eq!(decomp.get_range(333u128..=334u128, &data), vec![8]);
|
||||
|
||||
assert_eq!(
|
||||
decomp.get_range(4_000_211_221u128..=5_000_000_000u128, &data),
|
||||
vec![6, 7]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty() {
|
||||
let vals = &[];
|
||||
let interval_encoding = IntervalEncoding::default();
|
||||
let data = test_aux_vals(&interval_encoding, vals);
|
||||
let _decomp = IntervallDecompressor::open(&data).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_2() {
|
||||
let vals = &[
|
||||
100u128,
|
||||
99999u128,
|
||||
100000u128,
|
||||
100001u128,
|
||||
4_000_211_221u128,
|
||||
4_000_211_222u128,
|
||||
333u128,
|
||||
];
|
||||
let interval_encoding = IntervalEncoding::default();
|
||||
let data = test_aux_vals(&interval_encoding, vals);
|
||||
let decomp = IntervallDecompressor::open(&data).unwrap();
|
||||
let positions = decomp.get_range(0..=5, &data);
|
||||
assert_eq!(positions, vec![]);
|
||||
let positions = decomp.get_range(0..=100, &data);
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions = decomp.get_range(0..=105, &data);
|
||||
assert_eq!(positions, vec![0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_null() {
|
||||
let vals = &[2u128];
|
||||
let interval_encoding = IntervalEncoding::default().train(vals.to_vec());
|
||||
let vals = vec![interval_encoding.null_value, 2u128];
|
||||
let data = interval_encoding.compress(&vals).unwrap();
|
||||
let decomp = IntervallDecompressor::open(&data).unwrap();
|
||||
let positions = decomp.get_range(0..=1, &data);
|
||||
assert_eq!(positions, vec![]);
|
||||
let positions = decomp.get_range(2..=2, &data);
|
||||
assert_eq!(positions, vec![1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_first_large_gaps() {
|
||||
let vals = &[1_000_000_000u128; 100];
|
||||
let interval_encoding = IntervalEncoding::default();
|
||||
let _data = test_aux_vals(&interval_encoding, vals);
|
||||
}
|
||||
use proptest::prelude::*;
|
||||
|
||||
proptest! {
|
||||
|
||||
#[test]
|
||||
fn compress_decompress_random(vals in proptest::collection::vec(any::<u128>()
|
||||
, 1..1000)) {
|
||||
let interval_encoding = IntervalEncoding::default();
|
||||
let _data = test_aux_vals(&interval_encoding, &vals);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,8 +4,10 @@ extern crate more_asserts;
|
||||
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
pub mod bitpacked;
|
||||
pub mod ip_codec;
|
||||
pub mod linearinterpol;
|
||||
pub mod multilinearinterpol;
|
||||
|
||||
@@ -19,10 +21,32 @@ pub trait FastFieldCodecReader: Sized {
|
||||
fn max_value(&self) -> u64;
|
||||
}
|
||||
|
||||
pub trait FastFieldCodecReaderU128: Sized {
|
||||
/// reads the metadata and returns the CodecReader
|
||||
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self>;
|
||||
|
||||
/// Get value for doc
|
||||
fn get(&self, doc: u64, data: &[u8]) -> Option<u128>;
|
||||
|
||||
/// Iterator
|
||||
///
|
||||
/// Replace with opaque type after: https://github.com/rust-lang/rust/issues/63063
|
||||
fn iter<'a>(&'a self, data: &'a [u8]) -> Box<dyn Iterator<Item = Option<u128>> + 'a>;
|
||||
|
||||
/// Get positions (=docs in single value) for provided value range
|
||||
fn get_between_vals(&self, range: RangeInclusive<u128>, data: &[u8]) -> Vec<usize>;
|
||||
|
||||
/// The computed and assigned number value for null values
|
||||
fn null_value(&self) -> u128;
|
||||
|
||||
fn min_value(&self) -> u128;
|
||||
fn max_value(&self) -> u128;
|
||||
}
|
||||
|
||||
/// The FastFieldSerializerEstimate trait is required on all variants
|
||||
/// of fast field compressions, to decide which one to choose.
|
||||
pub trait FastFieldCodecSerializer {
|
||||
/// A codex needs to provide a unique name and id, which is
|
||||
/// A codec needs to provide a unique name and id, which is
|
||||
/// used for debugging and de/serialization.
|
||||
const NAME: &'static str;
|
||||
const ID: u8;
|
||||
@@ -42,7 +66,7 @@ pub trait FastFieldCodecSerializer {
|
||||
/// The iterators should be preferred over using fastfield_accessor for performance reasons.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &impl FastFieldDataAccess,
|
||||
fastfield_accessor: &dyn FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
data_iter1: impl Iterator<Item = u64>,
|
||||
|
||||
@@ -111,7 +111,7 @@ impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
|
||||
/// Creates a new fast field serializer.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &impl FastFieldDataAccess,
|
||||
fastfield_accessor: &dyn FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
data_iter1: impl Iterator<Item = u64>,
|
||||
|
||||
@@ -1,11 +1,117 @@
|
||||
#[macro_use]
|
||||
extern crate prettytable;
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::io::BufRead;
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use fastfield_codecs::ip_codec::{IntervalEncoding, IntervallDecompressor};
|
||||
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
|
||||
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
|
||||
use fastfield_codecs::{FastFieldCodecSerializer, FastFieldStats};
|
||||
use itertools::Itertools;
|
||||
use measure_time::print_time;
|
||||
use prettytable::{Cell, Row, Table};
|
||||
|
||||
fn print_set_stats(ip_addrs: &[u128]) {
|
||||
println!("NumIps\t{}", ip_addrs.len());
|
||||
let ip_addr_set: HashSet<u128> = ip_addrs.iter().cloned().collect();
|
||||
println!("NumUniqueIps\t{}", ip_addr_set.len());
|
||||
let ratio_unique = ip_addr_set.len() as f64 / ip_addrs.len() as f64;
|
||||
println!("RatioUniqueOverTotal\t{ratio_unique:.4}");
|
||||
|
||||
// histogram
|
||||
let mut ip_addrs = ip_addrs.to_vec();
|
||||
ip_addrs.sort();
|
||||
let mut cnts: Vec<usize> = ip_addrs
|
||||
.into_iter()
|
||||
.dedup_with_count()
|
||||
.map(|(cnt, _)| cnt)
|
||||
.collect();
|
||||
cnts.sort();
|
||||
|
||||
let top_256_cnt: usize = cnts.iter().rev().take(256).sum();
|
||||
let top_128_cnt: usize = cnts.iter().rev().take(128).sum();
|
||||
let top_64_cnt: usize = cnts.iter().rev().take(64).sum();
|
||||
let top_8_cnt: usize = cnts.iter().rev().take(8).sum();
|
||||
let total: usize = cnts.iter().sum();
|
||||
|
||||
println!("{}", total);
|
||||
println!("{}", top_256_cnt);
|
||||
println!("{}", top_128_cnt);
|
||||
println!("Percentage Top8 {:02}", top_8_cnt as f32 / total as f32);
|
||||
println!("Percentage Top64 {:02}", top_64_cnt as f32 / total as f32);
|
||||
println!("Percentage Top128 {:02}", top_128_cnt as f32 / total as f32);
|
||||
println!("Percentage Top256 {:02}", top_256_cnt as f32 / total as f32);
|
||||
|
||||
let mut cnts: Vec<(usize, usize)> = cnts.into_iter().dedup_with_count().collect();
|
||||
cnts.sort_by(|a, b| {
|
||||
if a.1 == b.1 {
|
||||
a.0.cmp(&b.0)
|
||||
} else {
|
||||
b.1.cmp(&a.1)
|
||||
}
|
||||
});
|
||||
|
||||
println!("\n\n----\nIP Address histogram");
|
||||
println!("IPAddrCount\tFrequency");
|
||||
for (ip_addr_count, times) in cnts {
|
||||
println!("{}\t{}", ip_addr_count, times);
|
||||
}
|
||||
}
|
||||
|
||||
fn ip_dataset() -> Vec<u128> {
|
||||
let mut ip_addr_v4 = 0;
|
||||
|
||||
let stdin = std::io::stdin();
|
||||
let ip_addrs: Vec<u128> = stdin
|
||||
.lock()
|
||||
.lines()
|
||||
.flat_map(|line| {
|
||||
let line = line.unwrap();
|
||||
let line = line.trim();
|
||||
let ip_addr = IpAddr::from_str(line.trim()).ok()?;
|
||||
if ip_addr.is_ipv4() {
|
||||
ip_addr_v4 += 1;
|
||||
}
|
||||
let ip_addr_v6: Ipv6Addr = match ip_addr {
|
||||
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||
IpAddr::V6(v6) => v6,
|
||||
};
|
||||
Some(ip_addr_v6)
|
||||
})
|
||||
.map(|ip_v6| u128::from_be_bytes(ip_v6.octets()))
|
||||
.collect();
|
||||
|
||||
println!("IpAddrsAny\t{}", ip_addrs.len());
|
||||
println!("IpAddrsV4\t{}", ip_addr_v4);
|
||||
|
||||
ip_addrs
|
||||
}
|
||||
|
||||
fn bench_ip() {
|
||||
let encoding = IntervalEncoding();
|
||||
let dataset = ip_dataset();
|
||||
print_set_stats(&dataset);
|
||||
|
||||
let compressor = encoding.train(dataset.to_vec());
|
||||
let data = compressor.compress(&dataset).unwrap();
|
||||
|
||||
let decompressor = IntervallDecompressor::open(&data).unwrap();
|
||||
|
||||
for i in 11100..11150 {
|
||||
print_time!("get range");
|
||||
let doc_values = decompressor.get_range(dataset[i]..=dataset[i], &data);
|
||||
println!("{:?}", doc_values.len());
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if env::args().nth(1).unwrap() == "bench" {
|
||||
bench_ip();
|
||||
return;
|
||||
}
|
||||
let mut table = Table::new();
|
||||
|
||||
// Add a row per time
|
||||
|
||||
@@ -195,7 +195,7 @@ impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
|
||||
/// Creates a new fast field serializer.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &impl FastFieldDataAccess,
|
||||
fastfield_accessor: &dyn FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
_data_iter1: impl Iterator<Item = u64>,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
name = "ownedbytes"
|
||||
version = "0.3.0"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
description = "Expose data as static slice"
|
||||
license = "MIT"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -9,9 +9,9 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
combine = {version="4", default-features=false, features=[] }
|
||||
once_cell = "1.7.2"
|
||||
regex ={ version = "1.5.4", default-features = false, features = ["std"] }
|
||||
regex ={ version = "1.5.4", default-features = false, features = ["std", "unicode"] }
|
||||
|
||||
@@ -2,11 +2,11 @@ use std::fmt;
|
||||
use std::fmt::Write;
|
||||
|
||||
/// Defines whether a term in a query must be present,
|
||||
/// should be present or must be not present.
|
||||
/// should be present or must not be present.
|
||||
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
|
||||
pub enum Occur {
|
||||
/// For a given document to be considered for scoring,
|
||||
/// at least one of the document with the Should or the Must
|
||||
/// at least one of the terms with the Should or the Must
|
||||
/// Occur constraint must be within the document.
|
||||
Should,
|
||||
/// Document without the term are excluded from the search.
|
||||
|
||||
@@ -16,9 +16,9 @@ use crate::Occur;
|
||||
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to
|
||||
// special characters.
|
||||
const SPECIAL_CHARS: &[char] = &[
|
||||
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '~', '!', '\\', '*', ' ',
|
||||
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '!', '\\', '*', ' ',
|
||||
];
|
||||
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|\~|!|\\|\*|\s)"#;
|
||||
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|!|\\|\*|\s)"#;
|
||||
|
||||
/// Parses a field_name
|
||||
/// A field name must have at least one character and be followed by a colon.
|
||||
@@ -120,22 +120,36 @@ fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
|
||||
fn term_val<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
|
||||
phrase.or(word())
|
||||
negative_number().or(phrase.or(word()))
|
||||
}
|
||||
|
||||
fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> {
|
||||
let term_val_with_field = negative_number().or(term_val());
|
||||
(field_name(), term_val_with_field).map(|(field_name, phrase)| UserInputLiteral {
|
||||
(field_name(), term_val(), slop_val()).map(|(field_name, phrase, slop)| UserInputLiteral {
|
||||
field_name: Some(field_name),
|
||||
phrase,
|
||||
slop,
|
||||
})
|
||||
}
|
||||
|
||||
fn slop_val<'a>() -> impl Parser<&'a str, Output = u32> {
|
||||
let slop =
|
||||
(char('~'), many1(digit())).and_then(|(_, slop): (_, String)| match slop.parse::<u32>() {
|
||||
Ok(d) => Ok(d),
|
||||
_ => Err(StringStreamError::UnexpectedParse),
|
||||
});
|
||||
optional(slop).map(|slop| match slop {
|
||||
Some(d) => d,
|
||||
_ => 0,
|
||||
})
|
||||
}
|
||||
|
||||
fn literal<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||
let term_default_field = term_val().map(|phrase| UserInputLiteral {
|
||||
let term_default_field = (term_val(), slop_val()).map(|(phrase, slop)| UserInputLiteral {
|
||||
field_name: None,
|
||||
phrase,
|
||||
slop,
|
||||
});
|
||||
|
||||
attempt(term_query())
|
||||
.or(term_default_field)
|
||||
.map(UserInputLeaf::from)
|
||||
@@ -285,7 +299,7 @@ fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||
|
||||
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
|
||||
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
||||
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
|
||||
Some(boost) if (boost - 1.0).abs() > f64::EPSILON => {
|
||||
UserInputAst::Boost(Box::new(leaf), boost)
|
||||
}
|
||||
_ => leaf,
|
||||
@@ -522,18 +536,10 @@ mod test {
|
||||
super::field_name().parse(".my.field.name:a"),
|
||||
Ok((".my.field.name".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse(r#"my\ field:a"#),
|
||||
Ok(("my field".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse(r#"にんじん:a"#),
|
||||
Ok(("にんじん".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse("my\\ field\\ name:a"),
|
||||
Ok(("my field name".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse(r#"my\field:a"#),
|
||||
Ok((r#"my\field"#.to_string(), "a"))
|
||||
@@ -562,6 +568,17 @@ mod test {
|
||||
super::field_name().parse("_my_field:a"),
|
||||
Ok(("_my_field".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse("~my~field:a"),
|
||||
Ok(("~my~field".to_string(), "a"))
|
||||
);
|
||||
for special_char in SPECIAL_CHARS.iter() {
|
||||
let query = &format!("\\{special_char}my\\{special_char}field:a");
|
||||
assert_eq!(
|
||||
super::field_name().parse(query),
|
||||
Ok((format!("{special_char}my{special_char}field"), "a"))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -714,4 +731,22 @@ mod test {
|
||||
);
|
||||
test_is_parse_err("abc + ");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slop() {
|
||||
assert!(parse_to_ast().parse("\"a b\"~").is_err());
|
||||
assert!(parse_to_ast().parse("foo:\"a b\"~").is_err());
|
||||
assert!(parse_to_ast().parse("\"a b\"~a").is_err());
|
||||
assert!(parse_to_ast().parse("\"a b\"~100000000000000000").is_err());
|
||||
|
||||
test_parse_query_to_ast_helper("\"a b\"^2~4", "(*(\"a b\")^2 *\"~4\")");
|
||||
test_parse_query_to_ast_helper("\"~Document\"", "\"~Document\"");
|
||||
test_parse_query_to_ast_helper("~Document", "\"~Document\"");
|
||||
test_parse_query_to_ast_helper("a~2", "\"a~2\"");
|
||||
test_parse_query_to_ast_helper("\"a b\"~0", "\"a b\"");
|
||||
test_parse_query_to_ast_helper("\"a b\"~1", "\"a b\"~1");
|
||||
test_parse_query_to_ast_helper("\"a b\"~3", "\"a b\"~3");
|
||||
test_parse_query_to_ast_helper("foo:\"a b\"~300", "\"foo\":\"a b\"~300");
|
||||
test_parse_query_to_ast_helper("\"a b\"~300^2", "(\"a b\"~300)^2");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,14 +40,19 @@ impl Debug for UserInputLeaf {
|
||||
pub struct UserInputLiteral {
|
||||
pub field_name: Option<String>,
|
||||
pub phrase: String,
|
||||
pub slop: u32,
|
||||
}
|
||||
|
||||
impl fmt::Debug for UserInputLiteral {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match self.field_name {
|
||||
Some(ref field_name) => write!(formatter, "\"{}\":\"{}\"", field_name, self.phrase),
|
||||
None => write!(formatter, "\"{}\"", self.phrase),
|
||||
if let Some(ref field) = self.field_name {
|
||||
write!(formatter, "\"{}\":", field)?;
|
||||
}
|
||||
write!(formatter, "\"{}\"", self.phrase)?;
|
||||
if self.slop > 0 {
|
||||
write!(formatter, "~{}", self.slop)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ Tantivy's aggregations have been designed to mimic the
|
||||
The code is organized in submodules:
|
||||
|
||||
## bucket
|
||||
Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggegations.
|
||||
Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggregations.
|
||||
|
||||
## metric
|
||||
Contains all metric aggregations, like average aggregation. Metric aggregations do not have sub aggregations.
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
//! bucket_agg: BucketAggregationType::Range(RangeAggregation{
|
||||
//! field: "score".to_string(),
|
||||
//! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
//! keyed: false,
|
||||
//! }),
|
||||
//! sub_aggregation: Default::default(),
|
||||
//! }),
|
||||
@@ -100,6 +101,12 @@ pub(crate) struct BucketAggregationInternal {
|
||||
}
|
||||
|
||||
impl BucketAggregationInternal {
|
||||
pub(crate) fn as_range(&self) -> Option<&RangeAggregation> {
|
||||
match &self.bucket_agg {
|
||||
BucketAggregationType::Range(range) => Some(range),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub(crate) fn as_histogram(&self) -> Option<&HistogramAggregation> {
|
||||
match &self.bucket_agg {
|
||||
BucketAggregationType::Histogram(histogram) => Some(histogram),
|
||||
@@ -264,6 +271,7 @@ mod tests {
|
||||
(7f64..20f64).into(),
|
||||
(20f64..f64::MAX).into(),
|
||||
],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -290,7 +298,8 @@ mod tests {
|
||||
{
|
||||
"from": 20.0
|
||||
}
|
||||
]
|
||||
],
|
||||
"keyed": true
|
||||
}
|
||||
}
|
||||
}"#;
|
||||
@@ -312,6 +321,7 @@ mod tests {
|
||||
(7f64..20f64).into(),
|
||||
(20f64..f64::MAX).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -337,6 +347,7 @@ mod tests {
|
||||
(7f64..20f64).into(),
|
||||
(20f64..f64::MAX).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: agg_req2,
|
||||
}),
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
//! This will enhance the request tree with access to the fastfield and metadata.
|
||||
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
||||
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
||||
use super::metric::{AverageAggregation, StatsAggregation};
|
||||
use super::segment_agg_result::BucketCount;
|
||||
use super::VecWithNames;
|
||||
use crate::fastfield::{
|
||||
type_and_cardinality, DynamicFastFieldReader, FastType, MultiValuedFastFieldReader,
|
||||
@@ -60,6 +63,7 @@ pub struct BucketAggregationWithAccessor {
|
||||
pub(crate) field_type: Type,
|
||||
pub(crate) bucket_agg: BucketAggregationType,
|
||||
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
||||
pub(crate) bucket_count: BucketCount,
|
||||
}
|
||||
|
||||
impl BucketAggregationWithAccessor {
|
||||
@@ -67,12 +71,13 @@ impl BucketAggregationWithAccessor {
|
||||
bucket: &BucketAggregationType,
|
||||
sub_aggregation: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
bucket_count: Rc<AtomicU32>,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<BucketAggregationWithAccessor> {
|
||||
let mut inverted_index = None;
|
||||
let (accessor, field_type) = match &bucket {
|
||||
BucketAggregationType::Range(RangeAggregation {
|
||||
field: field_name,
|
||||
ranges: _,
|
||||
field: field_name, ..
|
||||
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
|
||||
BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: field_name, ..
|
||||
@@ -92,9 +97,18 @@ impl BucketAggregationWithAccessor {
|
||||
Ok(BucketAggregationWithAccessor {
|
||||
accessor,
|
||||
field_type,
|
||||
sub_aggregation: get_aggs_with_accessor_and_validate(&sub_aggregation, reader)?,
|
||||
sub_aggregation: get_aggs_with_accessor_and_validate(
|
||||
&sub_aggregation,
|
||||
reader,
|
||||
bucket_count.clone(),
|
||||
max_bucket_count,
|
||||
)?,
|
||||
bucket_agg: bucket.clone(),
|
||||
inverted_index,
|
||||
bucket_count: BucketCount {
|
||||
bucket_count,
|
||||
max_bucket_count,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -134,6 +148,8 @@ impl MetricAggregationWithAccessor {
|
||||
pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||
aggs: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
bucket_count: Rc<AtomicU32>,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<AggregationsWithAccessor> {
|
||||
let mut metrics = vec![];
|
||||
let mut buckets = vec![];
|
||||
@@ -145,6 +161,8 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||
&bucket.bucket_agg,
|
||||
&bucket.sub_aggregation,
|
||||
reader,
|
||||
Rc::clone(&bucket_count),
|
||||
max_bucket_count,
|
||||
)?,
|
||||
)),
|
||||
Aggregation::Metric(metric) => metrics.push((
|
||||
|
||||
@@ -4,21 +4,16 @@
|
||||
//! intermediate average results, which is the sum and the number of values. The actual average is
|
||||
//! calculated on the step from intermediate to final aggregation result tree.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::agg_req::{
|
||||
Aggregations, AggregationsInternal, BucketAggregationInternal, MetricAggregation,
|
||||
};
|
||||
use super::bucket::{intermediate_buckets_to_final_buckets, GetDocCount};
|
||||
use super::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||
IntermediateMetricResult, IntermediateRangeBucketEntry,
|
||||
};
|
||||
use super::agg_req::BucketAggregationInternal;
|
||||
use super::bucket::GetDocCount;
|
||||
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
|
||||
use super::metric::{SingleMetricResult, Stats};
|
||||
use super::{Key, VecWithNames};
|
||||
use super::Key;
|
||||
use crate::TantivyError;
|
||||
|
||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@@ -41,98 +36,6 @@ impl AggregationResults {
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert and intermediate result and its aggregation request to the final result
|
||||
pub fn from_intermediate_and_req(
|
||||
results: IntermediateAggregationResults,
|
||||
agg: Aggregations,
|
||||
) -> crate::Result<Self> {
|
||||
AggregationResults::from_intermediate_and_req_internal(results, &(agg.into()))
|
||||
}
|
||||
|
||||
/// Convert and intermediate result and its aggregation request to the final result
|
||||
///
|
||||
/// Internal function, CollectorAggregations is used instead Aggregations, which is optimized
|
||||
/// for internal processing, by splitting metric and buckets into seperate groups.
|
||||
pub(crate) fn from_intermediate_and_req_internal(
|
||||
intermediate_results: IntermediateAggregationResults,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<Self> {
|
||||
// Important assumption:
|
||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||
// request
|
||||
let mut results: HashMap<String, AggregationResult> = HashMap::new();
|
||||
|
||||
if let Some(buckets) = intermediate_results.buckets {
|
||||
add_coverted_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||
} else {
|
||||
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||
// format is constant
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
||||
};
|
||||
|
||||
if let Some(metrics) = intermediate_results.metrics {
|
||||
add_converted_final_metrics_to_result(&mut results, metrics);
|
||||
} else {
|
||||
// When there are no metrics, we create empty metric results, so that the serialized
|
||||
// json format is constant
|
||||
add_empty_final_metrics_to_result(&mut results, &req.metrics)?;
|
||||
}
|
||||
Ok(Self(results))
|
||||
}
|
||||
}
|
||||
|
||||
fn add_converted_final_metrics_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
metrics: VecWithNames<IntermediateMetricResult>,
|
||||
) {
|
||||
results.extend(
|
||||
metrics
|
||||
.into_iter()
|
||||
.map(|(key, metric)| (key, AggregationResult::MetricResult(metric.into()))),
|
||||
);
|
||||
}
|
||||
|
||||
fn add_empty_final_metrics_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_metrics: &VecWithNames<MetricAggregation>,
|
||||
) -> crate::Result<()> {
|
||||
results.extend(req_metrics.iter().map(|(key, req)| {
|
||||
let empty_bucket = IntermediateMetricResult::empty_from_req(req);
|
||||
(
|
||||
key.to_string(),
|
||||
AggregationResult::MetricResult(empty_bucket.into()),
|
||||
)
|
||||
}));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_empty_final_buckets_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
let requested_buckets = req_buckets.iter();
|
||||
for (key, req) in requested_buckets {
|
||||
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
||||
results.insert(key.to_string(), empty_bucket);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_coverted_final_buckets_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
buckets: VecWithNames<IntermediateBucketResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
assert_eq!(buckets.len(), req_buckets.len());
|
||||
|
||||
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||
for ((key, bucket), req) in buckets_with_request {
|
||||
let result =
|
||||
AggregationResult::BucketResult(BucketResult::from_intermediate_and_req(bucket, req)?);
|
||||
results.insert(key, result);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@@ -154,7 +57,8 @@ impl AggregationResult {
|
||||
match self {
|
||||
AggregationResult::BucketResult(_bucket) => Err(TantivyError::InternalError(
|
||||
"Tried to retrieve value from bucket aggregation. This is not supported and \
|
||||
should not happen during collection, but should be catched during validation"
|
||||
should not happen during collection phase, but should be catched during \
|
||||
validation"
|
||||
.to_string(),
|
||||
)),
|
||||
AggregationResult::MetricResult(metric) => metric.get_value(agg_property),
|
||||
@@ -201,7 +105,7 @@ pub enum BucketResult {
|
||||
/// sub_aggregations.
|
||||
Range {
|
||||
/// The range buckets sorted by range.
|
||||
buckets: Vec<RangeBucketEntry>,
|
||||
buckets: BucketEntries<RangeBucketEntry>,
|
||||
},
|
||||
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||
/// sub_aggregations.
|
||||
@@ -211,7 +115,7 @@ pub enum BucketResult {
|
||||
/// If there are holes depends on the request, if min_doc_count is 0, then there are no
|
||||
/// holes between the first and last bucket.
|
||||
/// See [HistogramAggregation](super::bucket::HistogramAggregation)
|
||||
buckets: Vec<BucketEntry>,
|
||||
buckets: BucketEntries<BucketEntry>,
|
||||
},
|
||||
/// This is the term result
|
||||
Terms {
|
||||
@@ -230,49 +134,19 @@ pub enum BucketResult {
|
||||
impl BucketResult {
|
||||
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
|
||||
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
||||
BucketResult::from_intermediate_and_req(empty_bucket, req)
|
||||
empty_bucket.into_final_bucket_result(req)
|
||||
}
|
||||
}
|
||||
|
||||
fn from_intermediate_and_req(
|
||||
bucket_result: IntermediateBucketResult,
|
||||
req: &BucketAggregationInternal,
|
||||
) -> crate::Result<Self> {
|
||||
match bucket_result {
|
||||
IntermediateBucketResult::Range(range_res) => {
|
||||
let mut buckets: Vec<RangeBucketEntry> = range_res
|
||||
.buckets
|
||||
.into_iter()
|
||||
.map(|(_, bucket)| {
|
||||
RangeBucketEntry::from_intermediate_and_req(bucket, &req.sub_aggregation)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
buckets.sort_by(|left, right| {
|
||||
// TODO use total_cmp next stable rust release
|
||||
left.from
|
||||
.unwrap_or(f64::MIN)
|
||||
.partial_cmp(&right.from.unwrap_or(f64::MIN))
|
||||
.unwrap_or(Ordering::Equal)
|
||||
});
|
||||
Ok(BucketResult::Range { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Histogram { buckets } => {
|
||||
let buckets = intermediate_buckets_to_final_buckets(
|
||||
buckets,
|
||||
req.as_histogram()
|
||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||
&req.sub_aggregation,
|
||||
)?;
|
||||
|
||||
Ok(BucketResult::Histogram { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
|
||||
req.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
&req.sub_aggregation,
|
||||
),
|
||||
}
|
||||
}
|
||||
/// This is the wrapper of buckets entries, which can be vector or hashmap
|
||||
/// depending on if it's keyed or not.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum BucketEntries<T> {
|
||||
/// Vector format bucket entries
|
||||
Vec(Vec<T>),
|
||||
/// HashMap format bucket entries
|
||||
HashMap(FnvHashMap<String, T>),
|
||||
}
|
||||
|
||||
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
||||
@@ -311,22 +185,6 @@ pub struct BucketEntry {
|
||||
/// Sub-aggregations in this bucket.
|
||||
pub sub_aggregation: AggregationResults,
|
||||
}
|
||||
|
||||
impl BucketEntry {
|
||||
pub(crate) fn from_intermediate_and_req(
|
||||
entry: IntermediateHistogramBucketEntry,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<Self> {
|
||||
Ok(BucketEntry {
|
||||
key: Key::F64(entry.key),
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
|
||||
entry.sub_aggregation,
|
||||
req,
|
||||
)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
impl GetDocCount for &BucketEntry {
|
||||
fn doc_count(&self) -> u64 {
|
||||
self.doc_count
|
||||
@@ -384,21 +242,3 @@ pub struct RangeBucketEntry {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
impl RangeBucketEntry {
|
||||
fn from_intermediate_and_req(
|
||||
entry: IntermediateRangeBucketEntry,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<Self> {
|
||||
Ok(RangeBucketEntry {
|
||||
key: entry.key,
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
|
||||
entry.sub_aggregation,
|
||||
req,
|
||||
)?,
|
||||
to: entry.to,
|
||||
from: entry.from,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,8 +48,6 @@ use crate::{DocId, TantivyError};
|
||||
///
|
||||
/// # Limitations/Compatibility
|
||||
///
|
||||
/// The keyed parameter (elasticsearch) is not yet supported.
|
||||
///
|
||||
/// # JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
@@ -117,6 +115,9 @@ pub struct HistogramAggregation {
|
||||
/// Cannot be set in conjunction with min_doc_count > 0, since the empty buckets from extended
|
||||
/// bounds would not be returned.
|
||||
pub extended_bounds: Option<HistogramBounds>,
|
||||
/// Whether to return the buckets as a hash map
|
||||
#[serde(default)]
|
||||
pub keyed: bool,
|
||||
}
|
||||
|
||||
impl HistogramAggregation {
|
||||
@@ -250,6 +251,11 @@ impl SegmentHistogramCollector {
|
||||
);
|
||||
};
|
||||
|
||||
agg_with_accessor
|
||||
.bucket_count
|
||||
.add_count(buckets.len() as u32);
|
||||
agg_with_accessor.bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(IntermediateBucketResult::Histogram { buckets })
|
||||
}
|
||||
|
||||
@@ -311,7 +317,7 @@ impl SegmentHistogramCollector {
|
||||
doc: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let bounds = self.bounds;
|
||||
let interval = self.interval;
|
||||
let offset = self.offset;
|
||||
@@ -341,28 +347,28 @@ impl SegmentHistogramCollector {
|
||||
bucket_pos0,
|
||||
docs[0],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
);
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val1,
|
||||
&bounds,
|
||||
bucket_pos1,
|
||||
docs[1],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
);
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val2,
|
||||
&bounds,
|
||||
bucket_pos2,
|
||||
docs[2],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
);
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val3,
|
||||
&bounds,
|
||||
bucket_pos3,
|
||||
docs[3],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
);
|
||||
)?;
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = f64_from_fastfield_u64(accessor.get(*doc), &self.field_type);
|
||||
@@ -376,16 +382,17 @@ impl SegmentHistogramCollector {
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset) as f64
|
||||
);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {
|
||||
for sub_aggregation in sub_aggregations {
|
||||
sub_aggregation
|
||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush);
|
||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -396,15 +403,16 @@ impl SegmentHistogramCollector {
|
||||
bucket_pos: usize,
|
||||
doc: DocId,
|
||||
bucket_with_accessor: &AggregationsWithAccessor,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
if bounds.contains(val) {
|
||||
debug_assert_eq!(
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset) as f64
|
||||
);
|
||||
|
||||
self.increment_bucket(bucket_pos, doc, bucket_with_accessor);
|
||||
self.increment_bucket(bucket_pos, doc, bucket_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -413,12 +421,13 @@ impl SegmentHistogramCollector {
|
||||
bucket_pos: usize,
|
||||
doc: DocId,
|
||||
bucket_with_accessor: &AggregationsWithAccessor,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let bucket = &mut self.buckets[bucket_pos];
|
||||
bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation) = self.sub_aggregations.as_mut() {
|
||||
(&mut sub_aggregation[bucket_pos]).collect(doc, bucket_with_accessor);
|
||||
(&mut sub_aggregation[bucket_pos]).collect(doc, bucket_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn f64_from_fastfield_u64(&self, val: u64) -> f64 {
|
||||
@@ -482,14 +491,12 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
sub_aggregation: empty_sub_aggregation.clone(),
|
||||
},
|
||||
})
|
||||
.map(|intermediate_bucket| {
|
||||
BucketEntry::from_intermediate_and_req(intermediate_bucket, sub_aggregation)
|
||||
})
|
||||
.map(|intermediate_bucket| intermediate_bucket.into_final_bucket_entry(sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()
|
||||
}
|
||||
|
||||
// Convert to BucketEntry
|
||||
pub(crate) fn intermediate_buckets_to_final_buckets(
|
||||
pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsInternal,
|
||||
@@ -503,8 +510,8 @@ pub(crate) fn intermediate_buckets_to_final_buckets(
|
||||
} else {
|
||||
buckets
|
||||
.into_iter()
|
||||
.filter(|bucket| bucket.doc_count >= histogram_req.min_doc_count())
|
||||
.map(|bucket| BucketEntry::from_intermediate_and_req(bucket, sub_aggregation))
|
||||
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count())
|
||||
.map(|histogram_bucket| histogram_bucket.into_final_bucket_entry(sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()
|
||||
}
|
||||
}
|
||||
@@ -546,7 +553,7 @@ pub(crate) fn generate_buckets_with_opt_minmax(
|
||||
let offset = req.offset.unwrap_or(0.0);
|
||||
let first_bucket_num = get_bucket_num_f64(min, req.interval, offset) as i64;
|
||||
let last_bucket_num = get_bucket_num_f64(max, req.interval, offset) as i64;
|
||||
let mut buckets = vec![];
|
||||
let mut buckets = Vec::with_capacity((first_bucket_num..=last_bucket_num).count());
|
||||
for bucket_pos in first_bucket_num..=last_bucket_num {
|
||||
let bucket_key = bucket_pos as f64 * req.interval + offset;
|
||||
buckets.push(bucket_key);
|
||||
@@ -1389,4 +1396,46 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_keyed_buckets_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 50.0,
|
||||
keyed: true,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request(agg_req, &index)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"histogram": {
|
||||
"buckets": {
|
||||
"0": {
|
||||
"key": 0.0,
|
||||
"doc_count": 50
|
||||
},
|
||||
"50": {
|
||||
"key": 50.0,
|
||||
"doc_count": 50
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::agg_req_with_accessor::{
|
||||
@@ -9,15 +10,15 @@ use crate::aggregation::agg_req_with_accessor::{
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key};
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// Provide user-defined buckets to aggregate on.
|
||||
/// Two special buckets will automatically be created to cover the whole range of values.
|
||||
/// The provided buckets have to be continous.
|
||||
/// The provided buckets have to be continuous.
|
||||
/// During the aggregation, the values extracted from the fast_field `field` will be checked
|
||||
/// against each bucket range. Note that this aggregation includes the from value and excludes the
|
||||
/// to value for each range.
|
||||
@@ -34,8 +35,6 @@ use crate::{DocId, TantivyError};
|
||||
/// # Limitations/Compatibility
|
||||
/// Overlapping ranges are not yet supported.
|
||||
///
|
||||
/// The keyed parameter (elasticsearch) is not yet supported.
|
||||
///
|
||||
/// # Request JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
@@ -50,18 +49,24 @@ use crate::{DocId, TantivyError};
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RangeAggregation {
|
||||
/// The field to aggregate on.
|
||||
pub field: String,
|
||||
/// Note that this aggregation includes the from value and excludes the to value for each
|
||||
/// range. Extra buckets will be created until the first to, and last from, if necessary.
|
||||
pub ranges: Vec<RangeAggregationRange>,
|
||||
/// Whether to return the buckets as a hash map
|
||||
#[serde(default)]
|
||||
pub keyed: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
/// The range for one range bucket.
|
||||
pub struct RangeAggregationRange {
|
||||
/// Custom key for the range bucket
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub key: Option<String>,
|
||||
/// The from range value, which is inclusive in the range.
|
||||
/// None equals to an open ended interval.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
@@ -84,7 +89,26 @@ impl From<Range<f64>> for RangeAggregationRange {
|
||||
} else {
|
||||
Some(range.end)
|
||||
};
|
||||
RangeAggregationRange { from, to }
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from,
|
||||
to,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
/// Internally used u64 range for one range bucket.
|
||||
pub(crate) struct InternalRangeAggregationRange {
|
||||
/// Custom key for the range bucket
|
||||
key: Option<String>,
|
||||
/// u64 range value
|
||||
range: Range<u64>,
|
||||
}
|
||||
|
||||
impl From<Range<u64>> for InternalRangeAggregationRange {
|
||||
fn from(range: Range<u64>) -> Self {
|
||||
InternalRangeAggregationRange { key: None, range }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,7 +177,7 @@ impl SegmentRangeCollector {
|
||||
) -> crate::Result<IntermediateBucketResult> {
|
||||
let field_type = self.field_type;
|
||||
|
||||
let buckets = self
|
||||
let buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
|
||||
.buckets
|
||||
.into_iter()
|
||||
.map(move |range_bucket| {
|
||||
@@ -174,23 +198,29 @@ impl SegmentRangeCollector {
|
||||
pub(crate) fn from_req_and_validate(
|
||||
req: &RangeAggregation,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
bucket_count: &BucketCount,
|
||||
field_type: Type,
|
||||
) -> crate::Result<Self> {
|
||||
// The range input on the request is f64.
|
||||
// We need to convert to u64 ranges, because we read the values as u64.
|
||||
// The mapping from the conversion is monotonic so ordering is preserved.
|
||||
let buckets = extend_validate_ranges(&req.ranges, &field_type)?
|
||||
let buckets: Vec<_> = extend_validate_ranges(&req.ranges, &field_type)?
|
||||
.iter()
|
||||
.map(|range| {
|
||||
let to = if range.end == u64::MAX {
|
||||
let key = range
|
||||
.key
|
||||
.clone()
|
||||
.map(|key| Key::Str(key))
|
||||
.unwrap_or(range_to_key(&range.range, &field_type));
|
||||
let to = if range.range.end == u64::MAX {
|
||||
None
|
||||
} else {
|
||||
Some(f64_from_fastfield_u64(range.end, &field_type))
|
||||
Some(f64_from_fastfield_u64(range.range.end, &field_type))
|
||||
};
|
||||
let from = if range.start == u64::MIN {
|
||||
let from = if range.range.start == u64::MIN {
|
||||
None
|
||||
} else {
|
||||
Some(f64_from_fastfield_u64(range.start, &field_type))
|
||||
Some(f64_from_fastfield_u64(range.range.start, &field_type))
|
||||
};
|
||||
let sub_aggregation = if sub_aggregation.is_empty() {
|
||||
None
|
||||
@@ -200,11 +230,11 @@ impl SegmentRangeCollector {
|
||||
)?)
|
||||
};
|
||||
Ok(SegmentRangeAndBucketEntry {
|
||||
range: range.clone(),
|
||||
range: range.range.clone(),
|
||||
bucket: SegmentRangeBucketEntry {
|
||||
key: range_to_key(range, &field_type),
|
||||
doc_count: 0,
|
||||
sub_aggregation,
|
||||
key,
|
||||
from,
|
||||
to,
|
||||
},
|
||||
@@ -212,6 +242,9 @@ impl SegmentRangeCollector {
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
bucket_count.add_count(buckets.len() as u32);
|
||||
bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(SegmentRangeCollector {
|
||||
buckets,
|
||||
field_type,
|
||||
@@ -224,7 +257,7 @@ impl SegmentRangeCollector {
|
||||
doc: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
@@ -240,24 +273,25 @@ impl SegmentRangeCollector {
|
||||
let bucket_pos3 = self.get_bucket_pos(val3);
|
||||
let bucket_pos4 = self.get_bucket_pos(val4);
|
||||
|
||||
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = accessor.get(*doc);
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
for bucket in &mut self.buckets {
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
sub_aggregation
|
||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush);
|
||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -266,13 +300,14 @@ impl SegmentRangeCollector {
|
||||
bucket_pos: usize,
|
||||
doc: DocId,
|
||||
bucket_with_accessor: &AggregationsWithAccessor,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let bucket = &mut self.buckets[bucket_pos];
|
||||
|
||||
bucket.bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
sub_aggregation.collect(doc, bucket_with_accessor);
|
||||
sub_aggregation.collect(doc, bucket_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -298,7 +333,10 @@ impl SegmentRangeCollector {
|
||||
/// fast field.
|
||||
/// The alternative would be that every value read would be converted to the f64 range, but that is
|
||||
/// more computational expensive when many documents are hit.
|
||||
fn to_u64_range(range: &RangeAggregationRange, field_type: &Type) -> crate::Result<Range<u64>> {
|
||||
fn to_u64_range(
|
||||
range: &RangeAggregationRange,
|
||||
field_type: &Type,
|
||||
) -> crate::Result<InternalRangeAggregationRange> {
|
||||
let start = if let Some(from) = range.from {
|
||||
f64_to_fastfield_u64(from, field_type)
|
||||
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
|
||||
@@ -313,39 +351,43 @@ fn to_u64_range(range: &RangeAggregationRange, field_type: &Type) -> crate::Resu
|
||||
u64::MAX
|
||||
};
|
||||
|
||||
Ok(start..end)
|
||||
Ok(InternalRangeAggregationRange {
|
||||
key: range.key.clone(),
|
||||
range: start..end,
|
||||
})
|
||||
}
|
||||
|
||||
/// Extends the provided buckets to contain the whole value range, by inserting buckets at the
|
||||
/// beginning and end.
|
||||
/// beginning and end and filling gaps.
|
||||
fn extend_validate_ranges(
|
||||
buckets: &[RangeAggregationRange],
|
||||
field_type: &Type,
|
||||
) -> crate::Result<Vec<Range<u64>>> {
|
||||
) -> crate::Result<Vec<InternalRangeAggregationRange>> {
|
||||
let mut converted_buckets = buckets
|
||||
.iter()
|
||||
.map(|range| to_u64_range(range, field_type))
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
converted_buckets.sort_by_key(|bucket| bucket.start);
|
||||
if converted_buckets[0].start != u64::MIN {
|
||||
converted_buckets.insert(0, u64::MIN..converted_buckets[0].start);
|
||||
converted_buckets.sort_by_key(|bucket| bucket.range.start);
|
||||
if converted_buckets[0].range.start != u64::MIN {
|
||||
converted_buckets.insert(0, (u64::MIN..converted_buckets[0].range.start).into());
|
||||
}
|
||||
|
||||
if converted_buckets[converted_buckets.len() - 1].end != u64::MAX {
|
||||
converted_buckets.push(converted_buckets[converted_buckets.len() - 1].end..u64::MAX);
|
||||
if converted_buckets[converted_buckets.len() - 1].range.end != u64::MAX {
|
||||
converted_buckets
|
||||
.push((converted_buckets[converted_buckets.len() - 1].range.end..u64::MAX).into());
|
||||
}
|
||||
|
||||
// fill up holes in the ranges
|
||||
let find_hole = |converted_buckets: &[Range<u64>]| {
|
||||
let find_hole = |converted_buckets: &[InternalRangeAggregationRange]| {
|
||||
for (pos, ranges) in converted_buckets.windows(2).enumerate() {
|
||||
if ranges[0].end > ranges[1].start {
|
||||
if ranges[0].range.end > ranges[1].range.start {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Overlapping ranges not supported range {:?}, range+1 {:?}",
|
||||
ranges[0], ranges[1]
|
||||
)));
|
||||
}
|
||||
if ranges[0].end != ranges[1].start {
|
||||
if ranges[0].range.end != ranges[1].range.start {
|
||||
return Ok(Some(pos));
|
||||
}
|
||||
}
|
||||
@@ -353,8 +395,9 @@ fn extend_validate_ranges(
|
||||
};
|
||||
|
||||
while let Some(hole_pos) = find_hole(&converted_buckets)? {
|
||||
let new_range = converted_buckets[hole_pos].end..converted_buckets[hole_pos + 1].start;
|
||||
converted_buckets.insert(hole_pos + 1, new_range);
|
||||
let new_range =
|
||||
converted_buckets[hole_pos].range.end..converted_buckets[hole_pos + 1].range.start;
|
||||
converted_buckets.insert(hole_pos + 1, new_range.into());
|
||||
}
|
||||
|
||||
Ok(converted_buckets)
|
||||
@@ -362,7 +405,7 @@ fn extend_validate_ranges(
|
||||
|
||||
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
||||
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
||||
// it should be rendererd as "*-0" and not "*-*"
|
||||
// it should be rendered as "*-0" and not "*-*"
|
||||
let to_str = |val: u64, is_start: bool| {
|
||||
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
|
||||
"*".to_string()
|
||||
@@ -381,16 +424,12 @@ pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use serde_json::Value;
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req::{
|
||||
Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
|
||||
};
|
||||
use crate::aggregation::tests::get_test_index_with_num_docs;
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::aggregation::tests::{exec_request_with_query, get_test_index_with_num_docs};
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::query::AllQuery;
|
||||
|
||||
pub fn get_collector_from_ranges(
|
||||
ranges: Vec<RangeAggregationRange>,
|
||||
@@ -399,10 +438,16 @@ mod tests {
|
||||
let req = RangeAggregation {
|
||||
field: "dummy".to_string(),
|
||||
ranges,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
SegmentRangeCollector::from_req_and_validate(&req, &Default::default(), field_type)
|
||||
.expect("unexpected error")
|
||||
SegmentRangeCollector::from_req_and_validate(
|
||||
&req,
|
||||
&Default::default(),
|
||||
&Default::default(),
|
||||
field_type,
|
||||
)
|
||||
.expect("unexpected error")
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -415,6 +460,7 @@ mod tests {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -422,13 +468,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let agg_res = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(res["range"]["buckets"][0]["key"], "*-0");
|
||||
assert_eq!(res["range"]["buckets"][0]["doc_count"], 0);
|
||||
@@ -442,6 +482,131 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_keyed_buckets_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"range": {
|
||||
"buckets": {
|
||||
"*-0": { "key": "*-0", "doc_count": 0, "to": 0.0},
|
||||
"0-0.1": {"key": "0-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
|
||||
"0.1-0.2": {"key": "0.1-0.2", "doc_count": 10, "from": 0.1, "to": 0.2},
|
||||
"0.2-*": {"key": "0.2-*", "doc_count": 80, "from": 0.2},
|
||||
}
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_custom_key_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![
|
||||
RangeAggregationRange {
|
||||
key: Some("custom-key-0-to-0.1".to_string()),
|
||||
from: Some(0f64),
|
||||
to: Some(0.1f64),
|
||||
},
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: Some(0.1f64),
|
||||
to: Some(0.2f64),
|
||||
},
|
||||
],
|
||||
keyed: false,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"range": {
|
||||
"buckets": [
|
||||
{"key": "*-0", "doc_count": 0, "to": 0.0},
|
||||
{"key": "custom-key-0-to-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
|
||||
{"key": "0.1-0.2", "doc_count": 10, "from": 0.1, "to": 0.2},
|
||||
{"key": "0.2-*", "doc_count": 80, "from": 0.2}
|
||||
]
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_custom_key_keyed_buckets_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![RangeAggregationRange {
|
||||
key: Some("custom-key-0-to-0.1".to_string()),
|
||||
from: Some(0f64),
|
||||
to: Some(0.1f64),
|
||||
}],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"range": {
|
||||
"buckets": {
|
||||
"*-0": { "key": "*-0", "doc_count": 0, "to": 0.0},
|
||||
"custom-key-0-to-0.1": {"key": "custom-key-0-to-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
|
||||
"0.1-*": {"key": "0.1-*", "doc_count": 90, "from": 0.1},
|
||||
}
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_test_extend_range_hole() {
|
||||
let buckets = vec![(10f64..20f64).into(), (30f64..40f64).into()];
|
||||
@@ -520,6 +685,7 @@ mod tests {
|
||||
|
||||
let ranges = vec![
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
to: Some(10.0),
|
||||
from: None,
|
||||
},
|
||||
@@ -529,11 +695,13 @@ mod tests {
|
||||
|
||||
let ranges = vec![
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
to: Some(10.0),
|
||||
from: None,
|
||||
},
|
||||
(10.0..100.0).into(),
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
to: None,
|
||||
from: Some(100.0),
|
||||
},
|
||||
|
||||
@@ -11,7 +11,7 @@ use crate::aggregation::agg_req_with_accessor::{
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateBucketResult, IntermediateTermBucketEntry, IntermediateTermBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::MultiValuedFastFieldReader;
|
||||
use crate::schema::Type;
|
||||
@@ -244,28 +244,33 @@ impl TermBuckets {
|
||||
&mut self,
|
||||
term_ids: &[u64],
|
||||
doc: DocId,
|
||||
bucket_with_accessor: &AggregationsWithAccessor,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
bucket_count: &BucketCount,
|
||||
blueprint: &Option<SegmentAggregationResultsCollector>,
|
||||
) {
|
||||
// self.ensure_vec_exists(term_ids);
|
||||
) -> crate::Result<()> {
|
||||
for &term_id in term_ids {
|
||||
let entry = self
|
||||
.entries
|
||||
.entry(term_id as u32)
|
||||
.or_insert_with(|| TermBucketEntry::from_blueprint(blueprint));
|
||||
let entry = self.entries.entry(term_id as u32).or_insert_with(|| {
|
||||
bucket_count.add_count(1);
|
||||
|
||||
TermBucketEntry::from_blueprint(blueprint)
|
||||
});
|
||||
entry.doc_count += 1;
|
||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||
sub_aggregations.collect(doc, bucket_with_accessor);
|
||||
sub_aggregations.collect(doc, sub_aggregation)?;
|
||||
}
|
||||
}
|
||||
bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) {
|
||||
fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||
for entry in &mut self.entries.values_mut() {
|
||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||
sub_aggregations.flush_staged_docs(agg_with_accessor, false);
|
||||
sub_aggregations.flush_staged_docs(agg_with_accessor, false)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -421,7 +426,7 @@ impl SegmentTermCollector {
|
||||
doc: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
.as_multi()
|
||||
@@ -441,26 +446,30 @@ impl SegmentTermCollector {
|
||||
&vals1,
|
||||
docs[0],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
);
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals2,
|
||||
docs[1],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
);
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals3,
|
||||
docs[2],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
);
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals4,
|
||||
docs[3],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
);
|
||||
)?;
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
accessor.get_vals(doc, &mut vals1);
|
||||
@@ -469,13 +478,15 @@ impl SegmentTermCollector {
|
||||
&vals1,
|
||||
doc,
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
);
|
||||
)?;
|
||||
}
|
||||
if force_flush {
|
||||
self.term_buckets
|
||||
.force_flush(&bucket_with_accessor.sub_aggregation);
|
||||
.force_flush(&bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1173,6 +1184,33 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_term_bucket_limit() -> crate::Result<()> {
|
||||
let terms: Vec<String> = (0..100_000).map(|el| el.to_string()).collect();
|
||||
let terms_per_segment = vec![terms.iter().map(|el| el.as_str()).collect()];
|
||||
|
||||
let index = get_test_index_from_terms(true, &terms_per_segment)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "string_id".to_string(),
|
||||
min_doc_count: Some(0),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None);
|
||||
assert!(res.is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_format() -> crate::Result<()> {
|
||||
let agg_req: Aggregations = vec![(
|
||||
@@ -1291,9 +1329,15 @@ mod bench {
|
||||
let mut collector = get_collector_with_buckets(total_terms);
|
||||
let vals = get_rand_terms(total_terms, num_terms);
|
||||
let aggregations_with_accessor: AggregationsWithAccessor = Default::default();
|
||||
let bucket_count: BucketCount = BucketCount {
|
||||
bucket_count: Default::default(),
|
||||
max_bucket_count: 1_000_001u32,
|
||||
};
|
||||
b.iter(|| {
|
||||
for &val in &vals {
|
||||
collector.increment_bucket(&[val], 0, &aggregations_with_accessor, &None);
|
||||
collector
|
||||
.increment_bucket(&[val], 0, &aggregations_with_accessor, &bucket_count, &None)
|
||||
.unwrap();
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::rc::Rc;
|
||||
|
||||
use super::agg_req::Aggregations;
|
||||
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use super::agg_result::AggregationResults;
|
||||
@@ -5,19 +7,29 @@ use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::SegmentReader;
|
||||
use crate::{SegmentReader, TantivyError};
|
||||
|
||||
/// The default max bucket count, before the aggregation fails.
|
||||
pub const MAX_BUCKET_COUNT: u32 = 65000;
|
||||
|
||||
/// Collector for aggregations.
|
||||
///
|
||||
/// The collector collects all aggregations by the underlying aggregation request.
|
||||
pub struct AggregationCollector {
|
||||
agg: Aggregations,
|
||||
max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl AggregationCollector {
|
||||
/// Create collector from aggregation request.
|
||||
pub fn from_aggs(agg: Aggregations) -> Self {
|
||||
Self { agg }
|
||||
///
|
||||
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
|
||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||
Self {
|
||||
agg,
|
||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,15 +40,21 @@ impl AggregationCollector {
|
||||
/// # Purpose
|
||||
/// AggregationCollector returns `IntermediateAggregationResults` and not the final
|
||||
/// `AggregationResults`, so that results from differenct indices can be merged and then converted
|
||||
/// into the final `AggregationResults` via the `into()` method.
|
||||
/// into the final `AggregationResults` via the `into_final_result()` method.
|
||||
pub struct DistributedAggregationCollector {
|
||||
agg: Aggregations,
|
||||
max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl DistributedAggregationCollector {
|
||||
/// Create collector from aggregation request.
|
||||
pub fn from_aggs(agg: Aggregations) -> Self {
|
||||
Self { agg }
|
||||
///
|
||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||
Self {
|
||||
agg,
|
||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +68,11 @@ impl Collector for DistributedAggregationCollector {
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
self.max_bucket_count,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -75,7 +97,11 @@ impl Collector for AggregationCollector {
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
self.max_bucket_count,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -87,7 +113,7 @@ impl Collector for AggregationCollector {
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let res = merge_fruits(segment_fruits)?;
|
||||
AggregationResults::from_intermediate_and_req(res, self.agg.clone())
|
||||
res.into_final_bucket_result(self.agg.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,6 +135,7 @@ fn merge_fruits(
|
||||
pub struct AggregationSegmentCollector {
|
||||
aggs_with_accessor: AggregationsWithAccessor,
|
||||
result: SegmentAggregationResultsCollector,
|
||||
error: Option<TantivyError>,
|
||||
}
|
||||
|
||||
impl AggregationSegmentCollector {
|
||||
@@ -117,13 +144,16 @@ impl AggregationSegmentCollector {
|
||||
pub fn from_agg_req_and_reader(
|
||||
agg: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<Self> {
|
||||
let aggs_with_accessor = get_aggs_with_accessor_and_validate(agg, reader)?;
|
||||
let aggs_with_accessor =
|
||||
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
|
||||
let result =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?;
|
||||
Ok(AggregationSegmentCollector {
|
||||
aggs_with_accessor,
|
||||
result,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -133,12 +163,20 @@ impl SegmentCollector for AggregationSegmentCollector {
|
||||
|
||||
#[inline]
|
||||
fn collect(&mut self, doc: crate::DocId, _score: crate::Score) {
|
||||
self.result.collect(doc, &self.aggs_with_accessor);
|
||||
if self.error.is_some() {
|
||||
return;
|
||||
}
|
||||
if let Err(err) = self.result.collect(doc, &self.aggs_with_accessor) {
|
||||
self.error = Some(err);
|
||||
}
|
||||
}
|
||||
|
||||
fn harvest(mut self) -> Self::Fruit {
|
||||
if let Some(err) = self.error {
|
||||
return Err(err);
|
||||
}
|
||||
self.result
|
||||
.flush_staged_docs(&self.aggs_with_accessor, true);
|
||||
.flush_staged_docs(&self.aggs_with_accessor, true)?;
|
||||
self.result
|
||||
.into_intermediate_aggregations_result(&self.aggs_with_accessor)
|
||||
}
|
||||
|
||||
@@ -3,21 +3,25 @@
|
||||
//! indices.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::agg_req::{AggregationsInternal, BucketAggregationType, MetricAggregation};
|
||||
use super::agg_result::BucketResult;
|
||||
use super::agg_req::{
|
||||
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
|
||||
MetricAggregation,
|
||||
};
|
||||
use super::agg_result::{AggregationResult, BucketResult, RangeBucketEntry};
|
||||
use super::bucket::{
|
||||
cut_off_buckets, get_agg_name_and_property, GetDocCount, Order, OrderTarget,
|
||||
SegmentHistogramBucketEntry, TermsAggregation,
|
||||
cut_off_buckets, get_agg_name_and_property, intermediate_histogram_buckets_to_final_buckets,
|
||||
GetDocCount, Order, OrderTarget, SegmentHistogramBucketEntry, TermsAggregation,
|
||||
};
|
||||
use super::metric::{IntermediateAverage, IntermediateStats};
|
||||
use super::segment_agg_result::SegmentMetricResultCollector;
|
||||
use super::{Key, SerializedKey, VecWithNames};
|
||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntry};
|
||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
||||
use crate::aggregation::bucket::TermsAggregationInternal;
|
||||
|
||||
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
||||
@@ -31,6 +35,43 @@ pub struct IntermediateAggregationResults {
|
||||
}
|
||||
|
||||
impl IntermediateAggregationResults {
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
|
||||
self.into_final_bucket_result_internal(&(req.into()))
|
||||
}
|
||||
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
///
|
||||
/// Internal function, AggregationsInternal is used instead Aggregations, which is optimized
|
||||
/// for internal processing, by splitting metric and buckets into seperate groups.
|
||||
pub(crate) fn into_final_bucket_result_internal(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<AggregationResults> {
|
||||
// Important assumption:
|
||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||
// request
|
||||
let mut results: HashMap<String, AggregationResult> = HashMap::new();
|
||||
|
||||
if let Some(buckets) = self.buckets {
|
||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||
} else {
|
||||
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||
// format is constant
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
||||
};
|
||||
|
||||
if let Some(metrics) = self.metrics {
|
||||
convert_and_add_final_metrics_to_result(&mut results, metrics);
|
||||
} else {
|
||||
// When there are no metrics, we create empty metric results, so that the serialized
|
||||
// json format is constant
|
||||
add_empty_final_metrics_to_result(&mut results, &req.metrics)?;
|
||||
}
|
||||
|
||||
Ok(AggregationResults(results))
|
||||
}
|
||||
|
||||
pub(crate) fn empty_from_req(req: &AggregationsInternal) -> Self {
|
||||
let metrics = if req.metrics.is_empty() {
|
||||
None
|
||||
@@ -90,6 +131,58 @@ impl IntermediateAggregationResults {
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_and_add_final_metrics_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
metrics: VecWithNames<IntermediateMetricResult>,
|
||||
) {
|
||||
results.extend(
|
||||
metrics
|
||||
.into_iter()
|
||||
.map(|(key, metric)| (key, AggregationResult::MetricResult(metric.into()))),
|
||||
);
|
||||
}
|
||||
|
||||
fn add_empty_final_metrics_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_metrics: &VecWithNames<MetricAggregation>,
|
||||
) -> crate::Result<()> {
|
||||
results.extend(req_metrics.iter().map(|(key, req)| {
|
||||
let empty_bucket = IntermediateMetricResult::empty_from_req(req);
|
||||
(
|
||||
key.to_string(),
|
||||
AggregationResult::MetricResult(empty_bucket.into()),
|
||||
)
|
||||
}));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_empty_final_buckets_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
let requested_buckets = req_buckets.iter();
|
||||
for (key, req) in requested_buckets {
|
||||
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
||||
results.insert(key.to_string(), empty_bucket);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn convert_and_add_final_buckets_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
buckets: VecWithNames<IntermediateBucketResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
assert_eq!(buckets.len(), req_buckets.len());
|
||||
|
||||
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||
for ((key, bucket), req) in buckets_with_request {
|
||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
|
||||
results.insert(key, result);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// An aggregation is either a bucket or a metric.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum IntermediateAggregationResult {
|
||||
@@ -171,6 +264,68 @@ pub enum IntermediateBucketResult {
|
||||
}
|
||||
|
||||
impl IntermediateBucketResult {
|
||||
pub(crate) fn into_final_bucket_result(
|
||||
self,
|
||||
req: &BucketAggregationInternal,
|
||||
) -> crate::Result<BucketResult> {
|
||||
match self {
|
||||
IntermediateBucketResult::Range(range_res) => {
|
||||
let mut buckets: Vec<RangeBucketEntry> = range_res
|
||||
.buckets
|
||||
.into_iter()
|
||||
.map(|(_, bucket)| bucket.into_final_bucket_entry(&req.sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
buckets.sort_by(|left, right| {
|
||||
left.from
|
||||
.unwrap_or(f64::MIN)
|
||||
.total_cmp(&right.from.unwrap_or(f64::MIN))
|
||||
});
|
||||
|
||||
let is_keyed = req
|
||||
.as_range()
|
||||
.expect("unexpected aggregation, expected range aggregation")
|
||||
.keyed;
|
||||
let buckets = if is_keyed {
|
||||
let mut bucket_map =
|
||||
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||
for bucket in buckets {
|
||||
bucket_map.insert(bucket.key.to_string(), bucket);
|
||||
}
|
||||
BucketEntries::HashMap(bucket_map)
|
||||
} else {
|
||||
BucketEntries::Vec(buckets)
|
||||
};
|
||||
Ok(BucketResult::Range { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Histogram { buckets } => {
|
||||
let buckets = intermediate_histogram_buckets_to_final_buckets(
|
||||
buckets,
|
||||
req.as_histogram()
|
||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||
&req.sub_aggregation,
|
||||
)?;
|
||||
|
||||
let buckets = if req.as_histogram().unwrap().keyed {
|
||||
let mut bucket_map =
|
||||
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||
for bucket in buckets {
|
||||
bucket_map.insert(bucket.key.to_string(), bucket);
|
||||
}
|
||||
BucketEntries::HashMap(bucket_map)
|
||||
} else {
|
||||
BucketEntries::Vec(buckets)
|
||||
};
|
||||
Ok(BucketResult::Histogram { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
|
||||
req.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
&req.sub_aggregation,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn empty_from_req(req: &BucketAggregationType) -> Self {
|
||||
match req {
|
||||
BucketAggregationType::Terms(_) => IntermediateBucketResult::Terms(Default::default()),
|
||||
@@ -267,10 +422,9 @@ impl IntermediateTermBucketResult {
|
||||
Ok(BucketEntry {
|
||||
key: Key::Str(key),
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
|
||||
entry.sub_aggregation,
|
||||
sub_aggregation_req,
|
||||
)?,
|
||||
sub_aggregation: entry
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(sub_aggregation_req)?,
|
||||
})
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
@@ -307,12 +461,9 @@ impl IntermediateTermBucketResult {
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
buckets_with_val.sort_by(|(_, val1), (_, val2)| {
|
||||
// TODO use total_cmp in next rust stable release
|
||||
match &order {
|
||||
Order::Desc => val2.partial_cmp(val1).unwrap_or(std::cmp::Ordering::Equal),
|
||||
Order::Asc => val1.partial_cmp(val2).unwrap_or(std::cmp::Ordering::Equal),
|
||||
}
|
||||
buckets_with_val.sort_by(|(_, val1), (_, val2)| match &order {
|
||||
Order::Desc => val2.total_cmp(val1),
|
||||
Order::Asc => val1.total_cmp(val2),
|
||||
});
|
||||
buckets = buckets_with_val
|
||||
.into_iter()
|
||||
@@ -374,6 +525,21 @@ pub struct IntermediateHistogramBucketEntry {
|
||||
pub sub_aggregation: IntermediateAggregationResults,
|
||||
}
|
||||
|
||||
impl IntermediateHistogramBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<BucketEntry> {
|
||||
Ok(BucketEntry {
|
||||
key: Key::F64(self.key),
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SegmentHistogramBucketEntry> for IntermediateHistogramBucketEntry {
|
||||
fn from(entry: SegmentHistogramBucketEntry) -> Self {
|
||||
IntermediateHistogramBucketEntry {
|
||||
@@ -402,6 +568,23 @@ pub struct IntermediateRangeBucketEntry {
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
impl IntermediateRangeBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<RangeBucketEntry> {
|
||||
Ok(RangeBucketEntry {
|
||||
key: self.key,
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
to: self.to,
|
||||
from: self.from,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// This is the term entry for a bucket, which contains a count, and optionally
|
||||
/// sub_aggregations.
|
||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
|
||||
@@ -222,7 +222,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -285,6 +285,7 @@ mod tests {
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: iter::once((
|
||||
"stats".to_string(),
|
||||
@@ -299,7 +300,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
//!
|
||||
//! ```verbatim
|
||||
//! let agg_req: Aggregations = serde_json::from_str(json_request_string).unwrap();
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req);
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
//! let searcher = reader.searcher();
|
||||
//! let agg_res = searcher.search(&term_query, &collector).unwrap_err();
|
||||
//! let json_response_string: String = &serde_json::to_string(&agg_res)?;
|
||||
@@ -68,7 +68,7 @@
|
||||
//! .into_iter()
|
||||
//! .collect();
|
||||
//!
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req);
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
//!
|
||||
//! let searcher = reader.searcher();
|
||||
//! let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
@@ -132,6 +132,7 @@
|
||||
//! bucket_agg: BucketAggregationType::Range(RangeAggregation{
|
||||
//! field: "score".to_string(),
|
||||
//! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
//! keyed: false,
|
||||
//! }),
|
||||
//! sub_aggregation: sub_agg_req_1.clone(),
|
||||
//! }),
|
||||
@@ -166,6 +167,7 @@ use std::fmt::Display;
|
||||
|
||||
pub use collector::{
|
||||
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
||||
MAX_BUCKET_COUNT,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -358,7 +360,7 @@ mod tests {
|
||||
index: &Index,
|
||||
query: Option<(&str, &str)>,
|
||||
) -> crate::Result<Value> {
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -375,7 +377,7 @@ mod tests {
|
||||
searcher.search(&AllQuery, &collector)?
|
||||
};
|
||||
|
||||
// Test serialization/deserialization rountrip
|
||||
// Test serialization/deserialization roundtrip
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
Ok(res)
|
||||
}
|
||||
@@ -417,7 +419,9 @@ mod tests {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_fieldtype = crate::schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||
TextFieldIndexing::default()
|
||||
.set_index_option(IndexRecordOption::Basic)
|
||||
.set_fieldnorms(false),
|
||||
)
|
||||
.set_fast()
|
||||
.set_stored();
|
||||
@@ -435,7 +439,8 @@ mod tests {
|
||||
);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?;
|
||||
for values in segment_and_values {
|
||||
for (i, term) in values {
|
||||
let i = *i;
|
||||
@@ -457,9 +462,11 @@ mod tests {
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
if segment_ids.len() > 1 {
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
@@ -511,7 +518,7 @@ mod tests {
|
||||
"histogram": {
|
||||
"field": "score",
|
||||
"interval": 70.0,
|
||||
"offset": 3.0,
|
||||
"offset": 3.0
|
||||
},
|
||||
"aggs": {
|
||||
"bucketsL2": {
|
||||
@@ -542,16 +549,15 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let agg_res: AggregationResults = if use_distributed_collector {
|
||||
let collector = DistributedAggregationCollector::from_aggs(agg_req.clone());
|
||||
let collector = DistributedAggregationCollector::from_aggs(agg_req.clone(), None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
AggregationResults::from_intermediate_and_req(
|
||||
searcher.search(&AllQuery, &collector).unwrap(),
|
||||
agg_req,
|
||||
)
|
||||
.unwrap()
|
||||
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
||||
intermediate_agg_result
|
||||
.into_final_bucket_result(agg_req)
|
||||
.unwrap()
|
||||
} else {
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
@@ -760,6 +766,7 @@ mod tests {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score".to_string(),
|
||||
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -770,6 +777,7 @@ mod tests {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -780,6 +788,7 @@ mod tests {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score_i64".to_string(),
|
||||
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -788,7 +797,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
@@ -936,6 +945,7 @@ mod tests {
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req.clone(),
|
||||
}),
|
||||
@@ -950,6 +960,7 @@ mod tests {
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req.clone(),
|
||||
}),
|
||||
@@ -964,6 +975,7 @@ mod tests {
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req,
|
||||
}),
|
||||
@@ -978,16 +990,16 @@ mod tests {
|
||||
assert_eq!(field_names, vec!["text".to_string()].into_iter().collect());
|
||||
|
||||
let agg_res: AggregationResults = if use_distributed_collector {
|
||||
let collector = DistributedAggregationCollector::from_aggs(agg_req.clone());
|
||||
let collector = DistributedAggregationCollector::from_aggs(agg_req.clone(), None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let res = searcher.search(&term_query, &collector).unwrap();
|
||||
// Test de/serialization roundtrip on intermediate_agg_result
|
||||
let res: IntermediateAggregationResults =
|
||||
serde_json::from_str(&serde_json::to_string(&res).unwrap()).unwrap();
|
||||
AggregationResults::from_intermediate_and_req(res, agg_req.clone()).unwrap()
|
||||
res.into_final_bucket_result(agg_req.clone()).unwrap()
|
||||
} else {
|
||||
let collector = AggregationCollector::from_aggs(agg_req.clone());
|
||||
let collector = AggregationCollector::from_aggs(agg_req.clone(), None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
@@ -1045,7 +1057,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test empty result set
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&query_with_no_hits, &collector).unwrap();
|
||||
|
||||
@@ -1110,7 +1122,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
|
||||
@@ -1223,7 +1235,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1254,7 +1266,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1285,7 +1297,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1324,7 +1336,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1353,7 +1365,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1382,7 +1394,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1411,6 +1423,7 @@ mod tests {
|
||||
(40000f64..50000f64).into(),
|
||||
(50000f64..60000f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -1418,7 +1431,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1453,7 +1466,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1492,7 +1505,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1522,7 +1535,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1570,6 +1583,7 @@ mod tests {
|
||||
(7000f64..20000f64).into(),
|
||||
(20000f64..60000f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req_1.clone(),
|
||||
}),
|
||||
@@ -1578,7 +1592,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
|
||||
@@ -4,19 +4,22 @@
|
||||
//! merging.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
|
||||
use super::agg_req::MetricAggregation;
|
||||
use super::agg_req_with_accessor::{
|
||||
AggregationsWithAccessor, BucketAggregationWithAccessor, MetricAggregationWithAccessor,
|
||||
};
|
||||
use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTermCollector};
|
||||
use super::collector::MAX_BUCKET_COUNT;
|
||||
use super::intermediate_agg_result::{IntermediateAggregationResults, IntermediateBucketResult};
|
||||
use super::metric::{
|
||||
AverageAggregation, SegmentAverageCollector, SegmentStatsCollector, StatsAggregation,
|
||||
};
|
||||
use super::VecWithNames;
|
||||
use crate::aggregation::agg_req::BucketAggregationType;
|
||||
use crate::DocId;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
pub(crate) const DOC_BLOCK_SIZE: usize = 64;
|
||||
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
|
||||
@@ -115,21 +118,22 @@ impl SegmentAggregationResultsCollector {
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
self.staged_docs[self.num_staged_docs] = doc;
|
||||
self.num_staged_docs += 1;
|
||||
if self.num_staged_docs == self.staged_docs.len() {
|
||||
self.flush_staged_docs(agg_with_accessor, false);
|
||||
self.flush_staged_docs(agg_with_accessor, false)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn flush_staged_docs(
|
||||
&mut self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
if self.num_staged_docs == 0 {
|
||||
return;
|
||||
return Ok(());
|
||||
}
|
||||
if let Some(metrics) = &mut self.metrics {
|
||||
for (collector, agg_with_accessor) in
|
||||
@@ -148,11 +152,12 @@ impl SegmentAggregationResultsCollector {
|
||||
&self.staged_docs[..self.num_staged_docs],
|
||||
agg_with_accessor,
|
||||
force_flush,
|
||||
);
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
self.num_staged_docs = 0;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,6 +239,7 @@ impl SegmentBucketResultCollector {
|
||||
Ok(Self::Range(SegmentRangeCollector::from_req_and_validate(
|
||||
range_req,
|
||||
&req.sub_aggregation,
|
||||
&req.bucket_count,
|
||||
req.field_type,
|
||||
)?))
|
||||
}
|
||||
@@ -256,17 +262,52 @@ impl SegmentBucketResultCollector {
|
||||
doc: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
match self {
|
||||
SegmentBucketResultCollector::Range(range) => {
|
||||
range.collect_block(doc, bucket_with_accessor, force_flush);
|
||||
range.collect_block(doc, bucket_with_accessor, force_flush)?;
|
||||
}
|
||||
SegmentBucketResultCollector::Histogram(histogram) => {
|
||||
histogram.collect_block(doc, bucket_with_accessor, force_flush)
|
||||
histogram.collect_block(doc, bucket_with_accessor, force_flush)?;
|
||||
}
|
||||
SegmentBucketResultCollector::Terms(terms) => {
|
||||
terms.collect_block(doc, bucket_with_accessor, force_flush)
|
||||
terms.collect_block(doc, bucket_with_accessor, force_flush)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct BucketCount {
|
||||
/// The counter which is shared between the aggregations for one request.
|
||||
pub(crate) bucket_count: Rc<AtomicU32>,
|
||||
pub(crate) max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl Default for BucketCount {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bucket_count: Default::default(),
|
||||
max_bucket_count: MAX_BUCKET_COUNT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketCount {
|
||||
pub(crate) fn validate_bucket_count(&self) -> crate::Result<()> {
|
||||
if self.get_count() > self.max_bucket_count {
|
||||
return Err(TantivyError::InvalidArgument(
|
||||
"Aborting aggregation because too many buckets were created".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub(crate) fn add_count(&self, count: u32) {
|
||||
self.bucket_count
|
||||
.fetch_add(count as u32, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
pub(crate) fn get_count(&self) -> u32 {
|
||||
self.bucket_count.load(std::sync::atomic::Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,8 +271,8 @@ impl Collector for FacetCollector {
|
||||
let mut facet_streamer = facet_reader.facet_dict().range().into_stream()?;
|
||||
if facet_streamer.advance() {
|
||||
'outer: loop {
|
||||
// at the begining of this loop, facet_streamer
|
||||
// is positionned on a term that has not been processed yet.
|
||||
// at the beginning of this loop, facet_streamer
|
||||
// is positioned on a term that has not been processed yet.
|
||||
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
||||
match skip_result {
|
||||
SkipResult::Found => {
|
||||
|
||||
@@ -72,8 +72,7 @@ impl HistogramComputer {
|
||||
return;
|
||||
}
|
||||
let delta = value - self.min_value;
|
||||
let delta_u64 = delta.to_u64();
|
||||
let bucket_id: usize = self.divider.divide(delta_u64) as usize;
|
||||
let bucket_id: usize = self.divider.divide(delta) as usize;
|
||||
if bucket_id < self.counts.len() {
|
||||
self.counts[bucket_id] += 1;
|
||||
}
|
||||
@@ -287,7 +286,7 @@ mod tests {
|
||||
DateTime::from_primitive(
|
||||
Date::from_calendar_date(1980, Month::January, 1)?.with_hms(0, 0, 0)?,
|
||||
),
|
||||
3600 * 24 * 365, // it is just for a unit test... sorry leap years.
|
||||
3_600_000_000 * 24 * 365, // it is just for a unit test... sorry leap years.
|
||||
10,
|
||||
);
|
||||
let week_histogram = searcher.search(&all_query, &week_histogram_collector)?;
|
||||
|
||||
@@ -69,10 +69,8 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
||||
|
||||
/// Stores all of the doc ids.
|
||||
/// This collector is only used for tests.
|
||||
/// It is unusable in pr
|
||||
///
|
||||
/// actise, as it does not store
|
||||
/// the segment ordinals
|
||||
/// It is unusable in practise, as it does
|
||||
/// not store the segment ordinals
|
||||
pub struct TestCollector {
|
||||
pub compute_score: bool,
|
||||
}
|
||||
@@ -265,7 +263,7 @@ impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||
}
|
||||
}
|
||||
|
||||
fn make_test_searcher() -> crate::Result<crate::LeasedItem<Searcher>> {
|
||||
fn make_test_searcher() -> crate::Result<Searcher> {
|
||||
let schema = Schema::builder().build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
|
||||
@@ -137,7 +137,7 @@ where T: PartialOrd + Clone
|
||||
/// sorted by type `T`.
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// The theoretical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
pub(crate) struct TopSegmentCollector<T> {
|
||||
limit: usize,
|
||||
|
||||
@@ -79,7 +79,7 @@ where
|
||||
/// sorted by their score.
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// The theoretical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
///
|
||||
/// This collector guarantees a stable sorting in case of a tie on the
|
||||
@@ -283,7 +283,7 @@ impl TopDocs {
|
||||
///
|
||||
/// # See also
|
||||
///
|
||||
/// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||
/// To comfortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method.
|
||||
pub fn order_by_u64_field(
|
||||
self,
|
||||
|
||||
@@ -232,7 +232,7 @@ impl Index {
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
/// by a thread pool with as many threads as there are CPUs on the system.
|
||||
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
|
||||
let default_num_threads = num_cpus::get();
|
||||
self.set_multithread_executor(default_num_threads)
|
||||
@@ -366,8 +366,7 @@ impl Index {
|
||||
/// Create a `IndexReader` for the given index.
|
||||
///
|
||||
/// Most project should create at most one reader for a given index.
|
||||
/// This method is typically called only once per `Index` instance,
|
||||
/// over the lifetime of most problem.
|
||||
/// This method is typically called only once per `Index` instance.
|
||||
pub fn reader_builder(&self) -> IndexReaderBuilder {
|
||||
IndexReaderBuilder::new(self.clone())
|
||||
}
|
||||
|
||||
@@ -270,7 +270,7 @@ impl Default for IndexSettings {
|
||||
|
||||
/// Settings to presort the documents in an index
|
||||
///
|
||||
/// Presorting documents can greatly performance
|
||||
/// Presorting documents can greatly improve performance
|
||||
/// in some scenarios, by applying top n
|
||||
/// optimizations.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||
@@ -326,7 +326,7 @@ pub struct IndexMeta {
|
||||
pub payload: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct UntrackedIndexMeta {
|
||||
pub segments: Vec<InnerSegmentMeta>,
|
||||
#[serde(default)]
|
||||
@@ -395,6 +395,7 @@ mod tests {
|
||||
use super::IndexMeta;
|
||||
use crate::core::index_meta::UntrackedIndexMeta;
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::store::ZstdCompressor;
|
||||
use crate::{IndexSettings, IndexSortByField, Order};
|
||||
|
||||
#[test]
|
||||
@@ -428,4 +429,60 @@ mod tests {
|
||||
assert_eq!(index_metas.schema, deser_meta.schema);
|
||||
assert_eq!(index_metas.opstamp, deser_meta.opstamp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_metas_zstd_compressor() {
|
||||
let schema = {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("text", TEXT);
|
||||
schema_builder.build()
|
||||
};
|
||||
let index_metas = IndexMeta {
|
||||
index_settings: IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "text".to_string(),
|
||||
order: Order::Asc,
|
||||
}),
|
||||
docstore_compression: crate::store::Compressor::Zstd(ZstdCompressor {
|
||||
compression_level: Some(4),
|
||||
}),
|
||||
docstore_blocksize: 1_000_000,
|
||||
},
|
||||
segments: Vec::new(),
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
};
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd(compression_level=4)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
|
||||
);
|
||||
|
||||
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(index_metas.index_settings, deser_meta.index_settings);
|
||||
assert_eq!(index_metas.schema, deser_meta.schema);
|
||||
assert_eq!(index_metas.opstamp, deser_meta.opstamp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_metas_invalid_comp() {
|
||||
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zsstd","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
|
||||
|
||||
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"unknown variant `zsstd`, expected one of `none`, `lz4`, `brotli`, `snappy`, `zstd`, \
|
||||
`zstd(compression_level=5)` at line 1 column 96"
|
||||
.to_string()
|
||||
);
|
||||
|
||||
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd(bla=10)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
|
||||
|
||||
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"unknown zstd option \"bla\" at line 1 column 103".to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use crate::collector::Collector;
|
||||
@@ -6,7 +7,7 @@ use crate::core::{Executor, SegmentReader};
|
||||
use crate::query::Query;
|
||||
use crate::schema::{Document, Schema, Term};
|
||||
use crate::space_usage::SearcherSpaceUsage;
|
||||
use crate::store::StoreReader;
|
||||
use crate::store::{CacheStats, StoreReader};
|
||||
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject};
|
||||
|
||||
/// Identifies the searcher generation accessed by a [Searcher].
|
||||
@@ -62,43 +63,20 @@ impl SearcherGeneration {
|
||||
///
|
||||
/// It guarantees that the `Segment` will not be removed before
|
||||
/// the destruction of the `Searcher`.
|
||||
#[derive(Clone)]
|
||||
pub struct Searcher {
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
store_readers: Vec<StoreReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
inner: Arc<SearcherInner>,
|
||||
}
|
||||
|
||||
impl Searcher {
|
||||
/// Creates a new `Searcher`
|
||||
pub(crate) fn new(
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
) -> io::Result<Searcher> {
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
.iter()
|
||||
.map(SegmentReader::get_store_reader)
|
||||
.collect::<io::Result<Vec<_>>>()?;
|
||||
Ok(Searcher {
|
||||
schema,
|
||||
index,
|
||||
segment_readers,
|
||||
store_readers,
|
||||
generation,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the `Index` associated to the `Searcher`
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
&self.inner.index
|
||||
}
|
||||
|
||||
/// [SearcherGeneration] which identifies the version of the snapshot held by this `Searcher`.
|
||||
pub fn generation(&self) -> &SearcherGeneration {
|
||||
self.generation.as_ref()
|
||||
self.inner.generation.as_ref()
|
||||
}
|
||||
|
||||
/// Fetches a document from tantivy's store given a `DocAddress`.
|
||||
@@ -106,25 +84,39 @@ impl Searcher {
|
||||
/// The searcher uses the segment ordinal to route the
|
||||
/// the request to the right `Segment`.
|
||||
pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||
let store_reader = &self.store_readers[doc_address.segment_ord as usize];
|
||||
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
||||
store_reader.get(doc_address.doc_id)
|
||||
}
|
||||
|
||||
/// The cache stats for the underlying store reader.
|
||||
///
|
||||
/// Aggregates the sum for each segment store reader.
|
||||
pub fn doc_store_cache_stats(&self) -> CacheStats {
|
||||
let cache_stats: CacheStats = self
|
||||
.inner
|
||||
.store_readers
|
||||
.iter()
|
||||
.map(|reader| reader.cache_stats())
|
||||
.sum();
|
||||
cache_stats
|
||||
}
|
||||
|
||||
/// Fetches a document in an asynchronous manner.
|
||||
#[cfg(feature = "quickwit")]
|
||||
pub async fn doc_async(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||
let store_reader = &self.store_readers[doc_address.segment_ord as usize];
|
||||
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
||||
store_reader.get_async(doc_address.doc_id).await
|
||||
}
|
||||
|
||||
/// Access the schema associated to the index of this searcher.
|
||||
pub fn schema(&self) -> &Schema {
|
||||
&self.schema
|
||||
&self.inner.schema
|
||||
}
|
||||
|
||||
/// Returns the overall number of documents in the index.
|
||||
pub fn num_docs(&self) -> u64 {
|
||||
self.segment_readers
|
||||
self.inner
|
||||
.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| u64::from(segment_reader.num_docs()))
|
||||
.sum::<u64>()
|
||||
@@ -134,7 +126,7 @@ impl Searcher {
|
||||
/// the given term.
|
||||
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> {
|
||||
let mut total_doc_freq = 0;
|
||||
for segment_reader in &self.segment_readers {
|
||||
for segment_reader in &self.inner.segment_readers {
|
||||
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||
let doc_freq = inverted_index.doc_freq(term)?;
|
||||
total_doc_freq += u64::from(doc_freq);
|
||||
@@ -144,12 +136,12 @@ impl Searcher {
|
||||
|
||||
/// Return the list of segment readers
|
||||
pub fn segment_readers(&self) -> &[SegmentReader] {
|
||||
&self.segment_readers
|
||||
&self.inner.segment_readers
|
||||
}
|
||||
|
||||
/// Returns the segment_reader associated with the given segment_ord
|
||||
pub fn segment_reader(&self, segment_ord: u32) -> &SegmentReader {
|
||||
&self.segment_readers[segment_ord as usize]
|
||||
&self.inner.segment_readers[segment_ord as usize]
|
||||
}
|
||||
|
||||
/// Runs a query on the segment readers wrapped by the searcher.
|
||||
@@ -171,7 +163,7 @@ impl Searcher {
|
||||
query: &dyn Query,
|
||||
collector: &C,
|
||||
) -> crate::Result<C::Fruit> {
|
||||
let executor = self.index.search_executor();
|
||||
let executor = self.inner.index.search_executor();
|
||||
self.search_with_executor(query, collector, executor)
|
||||
}
|
||||
|
||||
@@ -208,17 +200,59 @@ impl Searcher {
|
||||
/// Summarize total space usage of this searcher.
|
||||
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> {
|
||||
let mut space_usage = SearcherSpaceUsage::new();
|
||||
for segment_reader in &self.segment_readers {
|
||||
for segment_reader in self.segment_readers() {
|
||||
space_usage.add_segment(segment_reader.space_usage()?);
|
||||
}
|
||||
Ok(space_usage)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Arc<SearcherInner>> for Searcher {
|
||||
fn from(inner: Arc<SearcherInner>) -> Self {
|
||||
Searcher { inner }
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds a list of `SegmentReader`s ready for search.
|
||||
///
|
||||
/// It guarantees that the `Segment` will not be removed before
|
||||
/// the destruction of the `Searcher`.
|
||||
pub(crate) struct SearcherInner {
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
store_readers: Vec<StoreReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
}
|
||||
|
||||
impl SearcherInner {
|
||||
/// Creates a new `Searcher`
|
||||
pub(crate) fn new(
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
doc_store_cache_size: usize,
|
||||
) -> io::Result<SearcherInner> {
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size))
|
||||
.collect::<io::Result<Vec<_>>>()?;
|
||||
|
||||
Ok(SearcherInner {
|
||||
schema,
|
||||
index,
|
||||
segment_readers,
|
||||
store_readers,
|
||||
generation,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Searcher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let segment_ids = self
|
||||
.segment_readers
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(SegmentReader::segment_id)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -24,7 +24,8 @@ pub enum SegmentComponent {
|
||||
Store,
|
||||
/// Temporary storage of the documents, before streamed to `Store`.
|
||||
TempStore,
|
||||
/// Bitset describing which document of the segment is deleted.
|
||||
/// Bitset describing which document of the segment is alive.
|
||||
/// (It was representing deleted docs but changed to represent alive docs from v0.17)
|
||||
Delete,
|
||||
}
|
||||
|
||||
|
||||
@@ -128,13 +128,14 @@ impl SegmentReader {
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn fieldnorms_readers(&self) -> &FieldNormReaders {
|
||||
#[doc(hidden)]
|
||||
pub fn fieldnorms_readers(&self) -> &FieldNormReaders {
|
||||
&self.fieldnorm_readers
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `StoreReader`.
|
||||
pub fn get_store_reader(&self) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone())
|
||||
pub fn get_store_reader(&self, cache_size: usize) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone(), cache_size)
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
@@ -175,9 +176,9 @@ impl SegmentReader {
|
||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||
|
||||
let original_bitset = if segment.meta().has_deletes() {
|
||||
let delete_file_slice = segment.open_read(SegmentComponent::Delete)?;
|
||||
let delete_data = delete_file_slice.read_bytes()?;
|
||||
Some(AliveBitSet::open(delete_data))
|
||||
let alive_doc_file_slice = segment.open_read(SegmentComponent::Delete)?;
|
||||
let alive_doc_data = alive_doc_file_slice.read_bytes()?;
|
||||
Some(AliveBitSet::open(alive_doc_data))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -215,7 +216,7 @@ impl SegmentReader {
|
||||
/// term dictionary associated to a specific field,
|
||||
/// and opening the posting list associated to any term.
|
||||
///
|
||||
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||
/// If the field is not marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||
/// is returned.
|
||||
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
||||
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||
@@ -295,8 +296,7 @@ impl SegmentReader {
|
||||
self.delete_opstamp
|
||||
}
|
||||
|
||||
/// Returns the bitset representing
|
||||
/// the documents that have been deleted.
|
||||
/// Returns the bitset representing the alive `DocId`s.
|
||||
pub fn alive_bitset(&self) -> Option<&AliveBitSet> {
|
||||
self.alive_bitset_opt.as_ref()
|
||||
}
|
||||
@@ -305,7 +305,7 @@ impl SegmentReader {
|
||||
/// as deleted.
|
||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||
self.alive_bitset()
|
||||
.map(|delete_set| delete_set.is_deleted(doc))
|
||||
.map(|alive_bitset| alive_bitset.is_deleted(doc))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
@@ -327,7 +327,7 @@ impl SegmentReader {
|
||||
self.positions_composite.space_usage(),
|
||||
self.fast_fields_readers.space_usage(),
|
||||
self.fieldnorm_readers.space_usage(),
|
||||
self.get_store_reader()?.space_usage(),
|
||||
self.get_store_reader(0)?.space_usage(),
|
||||
self.alive_bitset_opt
|
||||
.as_ref()
|
||||
.map(AliveBitSet::space_usage)
|
||||
|
||||
@@ -38,7 +38,7 @@ impl BinarySerializable for FileAddr {
|
||||
/// A `CompositeWrite` is used to write a `CompositeFile`.
|
||||
pub struct CompositeWrite<W = WritePtr> {
|
||||
write: CountingWriter<W>,
|
||||
offsets: HashMap<FileAddr, u64>,
|
||||
offsets: Vec<(FileAddr, u64)>,
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
@@ -47,7 +47,7 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
pub fn wrap(w: W) -> CompositeWrite<W> {
|
||||
CompositeWrite {
|
||||
write: CountingWriter::wrap(w),
|
||||
offsets: HashMap::new(),
|
||||
offsets: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,8 +60,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
pub fn for_field_with_idx(&mut self, field: Field, idx: usize) -> &mut CountingWriter<W> {
|
||||
let offset = self.write.written_bytes();
|
||||
let file_addr = FileAddr::new(field, idx);
|
||||
assert!(!self.offsets.contains_key(&file_addr));
|
||||
self.offsets.insert(file_addr, offset);
|
||||
assert!(!self.offsets.iter().any(|el| el.0 == file_addr));
|
||||
self.offsets.push((file_addr, offset));
|
||||
&mut self.write
|
||||
}
|
||||
|
||||
@@ -73,16 +73,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
let footer_offset = self.write.written_bytes();
|
||||
VInt(self.offsets.len() as u64).serialize(&mut self.write)?;
|
||||
|
||||
let mut offset_fields: Vec<_> = self
|
||||
.offsets
|
||||
.iter()
|
||||
.map(|(file_addr, offset)| (*offset, *file_addr))
|
||||
.collect();
|
||||
|
||||
offset_fields.sort();
|
||||
|
||||
let mut prev_offset = 0;
|
||||
for (offset, file_addr) in offset_fields {
|
||||
for (file_addr, offset) in self.offsets {
|
||||
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
|
||||
file_addr.serialize(&mut self.write)?;
|
||||
prev_offset = offset;
|
||||
@@ -106,6 +98,14 @@ pub struct CompositeFile {
|
||||
offsets_index: HashMap<FileAddr, Range<usize>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for CompositeFile {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("CompositeFile")
|
||||
.field("offsets_index", &self.offsets_index)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl CompositeFile {
|
||||
/// Opens a composite file stored in a given
|
||||
/// `FileSlice`.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::io::Write;
|
||||
use std::marker::{Send, Sync};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{fmt, io, thread};
|
||||
|
||||
@@ -62,7 +63,12 @@ impl Drop for DirectoryLockGuard {
|
||||
|
||||
enum TryAcquireLockError {
|
||||
FileExists,
|
||||
IoError(io::Error),
|
||||
IoError(Arc<io::Error>),
|
||||
}
|
||||
impl From<io::Error> for TryAcquireLockError {
|
||||
fn from(io_error: io::Error) -> Self {
|
||||
Self::IoError(Arc::new(io_error))
|
||||
}
|
||||
}
|
||||
|
||||
fn try_acquire_lock(
|
||||
@@ -73,7 +79,7 @@ fn try_acquire_lock(
|
||||
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
||||
OpenWriteError::IoError { io_error, .. } => TryAcquireLockError::IoError(io_error),
|
||||
})?;
|
||||
write.flush().map_err(TryAcquireLockError::IoError)?;
|
||||
write.flush().map_err(TryAcquireLockError::from)?;
|
||||
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
||||
directory: directory.box_clone(),
|
||||
path: filepath.to_owned(),
|
||||
@@ -105,7 +111,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// Users of `Directory` should typically call `Directory::open_read(...)`,
|
||||
/// while `Directory` implementor should implement `get_file_handle()`.
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError>;
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError>;
|
||||
|
||||
/// Once a virtual file is open, its data may not
|
||||
/// change.
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use crate::Version;
|
||||
|
||||
/// Error while trying to acquire a directory lock.
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum LockError {
|
||||
/// Failed to acquired a lock as it is already held by another
|
||||
/// client.
|
||||
@@ -16,11 +17,18 @@ pub enum LockError {
|
||||
LockBusy,
|
||||
/// Trying to acquire a lock failed with an `IoError`
|
||||
#[error("Failed to acquire the lock due to an io:Error.")]
|
||||
IoError(io::Error),
|
||||
IoError(Arc<io::Error>),
|
||||
}
|
||||
|
||||
impl LockError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error) -> Self {
|
||||
Self::IoError(Arc::new(io_error))
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when opening a directory
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenDirectoryError {
|
||||
/// The underlying directory does not exists.
|
||||
#[error("Directory does not exist: '{0}'.")]
|
||||
@@ -30,12 +38,12 @@ pub enum OpenDirectoryError {
|
||||
NotADirectory(PathBuf),
|
||||
/// Failed to create a temp directory.
|
||||
#[error("Failed to create a temporary directory: '{0}'.")]
|
||||
FailedToCreateTempDir(io::Error),
|
||||
FailedToCreateTempDir(Arc<io::Error>),
|
||||
/// IoError
|
||||
#[error("IoError '{io_error:?}' while create directory in: '{directory_path:?}'.")]
|
||||
IoError {
|
||||
/// underlying io Error.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// directory we tried to open.
|
||||
directory_path: PathBuf,
|
||||
},
|
||||
@@ -45,14 +53,14 @@ impl OpenDirectoryError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error, directory_path: PathBuf) -> Self {
|
||||
Self::IoError {
|
||||
io_error,
|
||||
io_error: Arc::new(io_error),
|
||||
directory_path,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when starting to write in a file
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenWriteError {
|
||||
/// Our directory is WORM, writing an existing file is forbidden.
|
||||
/// Checkout the `Directory` documentation.
|
||||
@@ -63,7 +71,7 @@ pub enum OpenWriteError {
|
||||
#[error("IoError '{io_error:?}' while opening file for write: '{filepath}'.")]
|
||||
IoError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// File path of the file that tantivy failed to open for write.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
@@ -72,11 +80,15 @@ pub enum OpenWriteError {
|
||||
impl OpenWriteError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||
Self::IoError { io_error, filepath }
|
||||
Self::IoError {
|
||||
io_error: Arc::new(io_error),
|
||||
filepath,
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Type of index incompatibility between the library and the index found on disk
|
||||
/// Used to catch and provide a hint to solve this incompatibility issue
|
||||
#[derive(Clone)]
|
||||
pub enum Incompatibility {
|
||||
/// This library cannot decompress the index found on disk
|
||||
CompressionMismatch {
|
||||
@@ -135,7 +147,7 @@ impl fmt::Debug for Incompatibility {
|
||||
}
|
||||
|
||||
/// Error that may occur when accessing a file read
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenReadError {
|
||||
/// The file does not exists.
|
||||
#[error("Files does not exists: {0:?}")]
|
||||
@@ -146,7 +158,7 @@ pub enum OpenReadError {
|
||||
)]
|
||||
IoError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// File path of the file that tantivy failed to open for read.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
@@ -158,11 +170,14 @@ pub enum OpenReadError {
|
||||
impl OpenReadError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||
Self::IoError { io_error, filepath }
|
||||
Self::IoError {
|
||||
io_error: Arc::new(io_error),
|
||||
filepath,
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Error that may occur when trying to delete a file
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum DeleteError {
|
||||
/// The file does not exists.
|
||||
#[error("File does not exists: '{0}'.")]
|
||||
@@ -172,7 +187,7 @@ pub enum DeleteError {
|
||||
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")]
|
||||
IoError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// File path of the file that tantivy failed to delete.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
|
||||
@@ -54,7 +54,7 @@ impl<B> From<B> for FileSlice
|
||||
where B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync
|
||||
{
|
||||
fn from(bytes: B) -> FileSlice {
|
||||
FileSlice::new(Box::new(OwnedBytes::new(bytes)))
|
||||
FileSlice::new(Arc::new(OwnedBytes::new(bytes)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ impl fmt::Debug for FileSlice {
|
||||
|
||||
impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
pub fn new(file_handle: Box<dyn FileHandle>) -> Self {
|
||||
pub fn new(file_handle: Arc<dyn FileHandle>) -> Self {
|
||||
let num_bytes = file_handle.len();
|
||||
FileSlice::new_with_num_bytes(file_handle, num_bytes)
|
||||
}
|
||||
@@ -83,9 +83,9 @@ impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
#[doc(hidden)]
|
||||
#[must_use]
|
||||
pub fn new_with_num_bytes(file_handle: Box<dyn FileHandle>, num_bytes: usize) -> Self {
|
||||
pub fn new_with_num_bytes(file_handle: Arc<dyn FileHandle>, num_bytes: usize) -> Self {
|
||||
FileSlice {
|
||||
data: Arc::from(file_handle),
|
||||
data: file_handle,
|
||||
range: 0..num_bytes,
|
||||
}
|
||||
}
|
||||
@@ -235,6 +235,7 @@ impl FileHandle for OwnedBytes {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::HasLen;
|
||||
|
||||
@@ -242,7 +243,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_file_slice() -> io::Result<()> {
|
||||
let file_slice = FileSlice::new(Box::new(b"abcdef".as_ref()));
|
||||
let file_slice = FileSlice::new(Arc::new(b"abcdef".as_ref()));
|
||||
assert_eq!(file_slice.len(), 6);
|
||||
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
|
||||
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
|
||||
@@ -286,7 +287,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_slice_simple_read() -> io::Result<()> {
|
||||
let slice = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||
let slice = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(slice.len(), 6);
|
||||
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
|
||||
assert_eq!(slice.slice(1..4).read_bytes()?.as_ref(), b"bcd");
|
||||
@@ -295,7 +296,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_slice_read_slice() -> io::Result<()> {
|
||||
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||
let slice_deref = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(slice_deref.read_bytes_slice(1..4)?.as_ref(), b"bcd");
|
||||
Ok(())
|
||||
}
|
||||
@@ -303,7 +304,7 @@ mod tests {
|
||||
#[test]
|
||||
#[should_panic(expected = "end of requested range exceeds the fileslice length (10 > 6)")]
|
||||
fn test_slice_read_slice_invalid_range_exceeds() {
|
||||
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||
let slice_deref = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(
|
||||
slice_deref.read_bytes_slice(0..10).unwrap().as_ref(),
|
||||
b"bcd"
|
||||
|
||||
@@ -156,6 +156,7 @@ impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||
mod tests {
|
||||
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
|
||||
@@ -168,7 +169,7 @@ mod tests {
|
||||
let footer = Footer::new(123);
|
||||
footer.append_footer(&mut buf).unwrap();
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let (footer_deser, _body) = Footer::extract_footer(fileslice).unwrap();
|
||||
assert_eq!(footer_deser.crc(), footer.crc());
|
||||
}
|
||||
@@ -181,7 +182,7 @@ mod tests {
|
||||
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let err = Footer::extract_footer(fileslice).unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -198,7 +199,7 @@ mod tests {
|
||||
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let err = Footer::extract_footer(fileslice).unwrap_err();
|
||||
assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof);
|
||||
assert_eq!(
|
||||
@@ -217,7 +218,7 @@ mod tests {
|
||||
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let err = Footer::extract_footer(fileslice).unwrap_err();
|
||||
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
|
||||
assert_eq!(
|
||||
|
||||
@@ -242,16 +242,13 @@ impl ManagedDirectory {
|
||||
/// Verify checksum of a managed file
|
||||
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
||||
let reader = self.directory.open_read(path)?;
|
||||
let (footer, data) =
|
||||
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IoError {
|
||||
io_error,
|
||||
filepath: path.to_path_buf(),
|
||||
})?;
|
||||
let (footer, data) = Footer::extract_footer(reader)
|
||||
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||
let bytes = data
|
||||
.read_bytes()
|
||||
.map_err(|io_error| OpenReadError::IoError {
|
||||
io_error: Arc::new(io_error),
|
||||
filepath: path.to_path_buf(),
|
||||
io_error,
|
||||
})?;
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(bytes.as_slice());
|
||||
@@ -272,9 +269,9 @@ impl ManagedDirectory {
|
||||
}
|
||||
|
||||
impl Directory for ManagedDirectory {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError> {
|
||||
let file_slice = self.open_read(path)?;
|
||||
Ok(Box::new(file_slice))
|
||||
Ok(Arc::new(file_slice))
|
||||
}
|
||||
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
|
||||
@@ -174,7 +174,8 @@ impl MmapDirectory {
|
||||
/// This is mostly useful to test the MmapDirectory itself.
|
||||
/// For your unit tests, prefer the RamDirectory.
|
||||
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?;
|
||||
let tempdir = TempDir::new()
|
||||
.map_err(|io_err| OpenDirectoryError::FailedToCreateTempDir(Arc::new(io_err)))?;
|
||||
Ok(MmapDirectory::new(
|
||||
tempdir.path().to_path_buf(),
|
||||
Some(tempdir),
|
||||
@@ -309,7 +310,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
||||
}
|
||||
|
||||
impl Directory for MmapDirectory {
|
||||
fn get_file_handle(&self, path: &Path) -> result::Result<Box<dyn FileHandle>, OpenReadError> {
|
||||
fn get_file_handle(&self, path: &Path) -> result::Result<Arc<dyn FileHandle>, OpenReadError> {
|
||||
debug!("Open Read {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
@@ -330,7 +331,7 @@ impl Directory for MmapDirectory {
|
||||
})
|
||||
.unwrap_or_else(OwnedBytes::empty);
|
||||
|
||||
Ok(Box::new(owned_bytes))
|
||||
Ok(Arc::new(owned_bytes))
|
||||
}
|
||||
|
||||
/// Any entry associated to the path in the mmap will be
|
||||
@@ -342,7 +343,7 @@ impl Directory for MmapDirectory {
|
||||
DeleteError::FileDoesNotExist(path.to_owned())
|
||||
} else {
|
||||
DeleteError::IoError {
|
||||
io_error: e,
|
||||
io_error: Arc::new(e),
|
||||
filepath: path.to_path_buf(),
|
||||
}
|
||||
}
|
||||
@@ -422,9 +423,9 @@ impl Directory for MmapDirectory {
|
||||
.write(true)
|
||||
.create(true) //< if the file does not exist yet, create it.
|
||||
.open(&full_path)
|
||||
.map_err(LockError::IoError)?;
|
||||
.map_err(LockError::wrap_io_error)?;
|
||||
if lock.is_blocking {
|
||||
file.lock_exclusive().map_err(LockError::IoError)?;
|
||||
file.lock_exclusive().map_err(LockError::wrap_io_error)?;
|
||||
} else {
|
||||
file.try_lock_exclusive().map_err(|_| LockError::LockBusy)?
|
||||
}
|
||||
|
||||
@@ -160,9 +160,9 @@ impl RamDirectory {
|
||||
}
|
||||
|
||||
impl Directory for RamDirectory {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError> {
|
||||
let file_slice = self.open_read(path)?;
|
||||
Ok(Box::new(file_slice))
|
||||
Ok(Arc::new(file_slice))
|
||||
}
|
||||
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
@@ -172,7 +172,7 @@ impl Directory for RamDirectory {
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
fail_point!("RamDirectory::delete", |_| {
|
||||
Err(DeleteError::IoError {
|
||||
io_error: io::Error::from(io::ErrorKind::Other),
|
||||
io_error: Arc::new(io::Error::from(io::ErrorKind::Other)),
|
||||
filepath: path.to_path_buf(),
|
||||
})
|
||||
});
|
||||
@@ -184,7 +184,7 @@ impl Directory for RamDirectory {
|
||||
.fs
|
||||
.read()
|
||||
.map_err(|e| OpenReadError::IoError {
|
||||
io_error: io::Error::new(io::ErrorKind::Other, e.to_string()),
|
||||
io_error: Arc::new(io::Error::new(io::ErrorKind::Other, e.to_string())),
|
||||
filepath: path.to_path_buf(),
|
||||
})?
|
||||
.exists(path))
|
||||
@@ -208,7 +208,7 @@ impl Directory for RamDirectory {
|
||||
self.open_read(path)?
|
||||
.read_bytes()
|
||||
.map_err(|io_error| OpenReadError::IoError {
|
||||
io_error,
|
||||
io_error: Arc::new(io_error),
|
||||
filepath: path.to_path_buf(),
|
||||
})?;
|
||||
Ok(bytes.as_slice().to_owned())
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::DocId;
|
||||
///
|
||||
/// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions
|
||||
/// to compare [u32; 4].
|
||||
pub const TERMINATED: DocId = std::i32::MAX as u32;
|
||||
pub const TERMINATED: DocId = i32::MAX as u32;
|
||||
|
||||
/// Represents an iterable set of sorted doc ids.
|
||||
pub trait DocSet: Send {
|
||||
@@ -24,7 +24,6 @@ pub trait DocSet: Send {
|
||||
///
|
||||
/// Calling `.advance()` on a terminated DocSet should be supported, and TERMINATED should
|
||||
/// be returned.
|
||||
/// TODO Test existing docsets.
|
||||
fn advance(&mut self) -> DocId;
|
||||
|
||||
/// Advances the DocSet forward until reaching the target, or going to the
|
||||
|
||||
14
src/error.rs
14
src/error.rs
@@ -1,7 +1,7 @@
|
||||
//! Definition of Tantivy's errors and results.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::PoisonError;
|
||||
use std::sync::{Arc, PoisonError};
|
||||
use std::{fmt, io};
|
||||
|
||||
use thiserror::Error;
|
||||
@@ -15,6 +15,7 @@ use crate::{query, schema};
|
||||
/// Represents a `DataCorruption` error.
|
||||
///
|
||||
/// When facing data corruption, tantivy actually panics or returns this error.
|
||||
#[derive(Clone)]
|
||||
pub struct DataCorruption {
|
||||
filepath: Option<PathBuf>,
|
||||
comment: String,
|
||||
@@ -50,7 +51,7 @@ impl fmt::Debug for DataCorruption {
|
||||
}
|
||||
|
||||
/// The library's error enum
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum TantivyError {
|
||||
/// Failed to open the directory.
|
||||
#[error("Failed to open the directory: '{0:?}'")]
|
||||
@@ -69,7 +70,7 @@ pub enum TantivyError {
|
||||
LockFailure(LockError, Option<String>),
|
||||
/// IO Error.
|
||||
#[error("An IO error occurred: '{0}'")]
|
||||
IoError(#[from] io::Error),
|
||||
IoError(Arc<io::Error>),
|
||||
/// Data corruption.
|
||||
#[error("Data corrupted: '{0:?}'")]
|
||||
DataCorruption(DataCorruption),
|
||||
@@ -125,6 +126,11 @@ impl From<AsyncIoError> for TantivyError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for TantivyError {
|
||||
fn from(io_err: io::Error) -> TantivyError {
|
||||
TantivyError::IoError(Arc::new(io_err))
|
||||
}
|
||||
}
|
||||
impl From<DataCorruption> for TantivyError {
|
||||
fn from(data_corruption: DataCorruption) -> TantivyError {
|
||||
TantivyError::DataCorruption(data_corruption)
|
||||
@@ -179,7 +185,7 @@ impl From<schema::DocParsingError> for TantivyError {
|
||||
|
||||
impl From<serde_json::Error> for TantivyError {
|
||||
fn from(error: serde_json::Error) -> TantivyError {
|
||||
TantivyError::IoError(error.into())
|
||||
TantivyError::IoError(Arc::new(error.into()))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ pub use self::writer::BytesFastFieldWriter;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Deref;
|
||||
|
||||
use crate::query::TermQuery;
|
||||
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value, FAST, INDEXED, STORED};
|
||||
use crate::{DocAddress, DocSet, Index, Searcher, Term};
|
||||
@@ -37,9 +35,7 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_index_for_test<T: Into<BytesOptions>>(
|
||||
byte_options: T,
|
||||
) -> crate::Result<impl Deref<Target = Searcher>> {
|
||||
fn create_index_for_test<T: Into<BytesOptions>>(byte_options: T) -> crate::Result<Searcher> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_bytes_field("string_bytes", byte_options.into());
|
||||
let schema = schema_builder.build();
|
||||
@@ -86,7 +82,7 @@ mod tests {
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||
let term_weight = term_query.specialized_weight(&*searcher, true)?;
|
||||
let term_weight = term_query.specialized_weight(&searcher, true)?;
|
||||
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0)?;
|
||||
assert_eq!(term_scorer.doc(), 0u32);
|
||||
Ok(())
|
||||
@@ -99,7 +95,7 @@ mod tests {
|
||||
let field = searcher.schema().get_field("string_bytes").unwrap();
|
||||
let term = Term::from_field_bytes(field, b"lucene".as_ref());
|
||||
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||
let term_weight_err = term_query.specialized_weight(&*searcher, false);
|
||||
let term_weight_err = term_query.specialized_weight(&searcher, false);
|
||||
assert!(matches!(
|
||||
term_weight_err,
|
||||
Err(crate::TantivyError::SchemaError(_))
|
||||
|
||||
@@ -52,6 +52,11 @@ impl BytesFastFieldReader {
|
||||
}
|
||||
|
||||
impl MultiValueLength for BytesFastFieldReader {
|
||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u64> {
|
||||
let (start, stop) = self.range(doc_id);
|
||||
start as u64..stop as u64
|
||||
}
|
||||
|
||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
||||
self.num_bytes(doc_id) as u64
|
||||
}
|
||||
|
||||
241
src/fastfield/fast_value.rs
Normal file
241
src/fastfield/fast_value.rs
Normal file
@@ -0,0 +1,241 @@
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
|
||||
use crate::schema::{Cardinality, FieldType, Type};
|
||||
use crate::DateTime;
|
||||
|
||||
pub fn ip_to_u128(ip_addr: IpAddr) -> u128 {
|
||||
let ip_addr_v6: Ipv6Addr = match ip_addr {
|
||||
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||
IpAddr::V6(v6) => v6,
|
||||
};
|
||||
u128::from_be_bytes(ip_addr_v6.octets())
|
||||
}
|
||||
|
||||
/// Trait for large types that are allowed for fast fields: u128, IpAddr
|
||||
pub trait FastValueU128: Clone + Copy + Send + Sync + PartialOrd + 'static {
|
||||
/// Converts a value from u128
|
||||
///
|
||||
/// Internally all fast field values are encoded as u128.
|
||||
fn from_u128(val: u128) -> Self;
|
||||
|
||||
/// Converts a value to u128.
|
||||
///
|
||||
/// Internally all fast field values are encoded as u128.
|
||||
fn to_u128(&self) -> u128;
|
||||
|
||||
/// Cast value to `u128`.
|
||||
/// The value is just reinterpreted in memory.
|
||||
fn as_u128(&self) -> u128;
|
||||
|
||||
/// Returns the `schema::Type` for this FastValue.
|
||||
fn to_type() -> Type;
|
||||
|
||||
/// Build a default value. This default value is never used, so the value does not
|
||||
/// really matter.
|
||||
fn make_zero() -> Self {
|
||||
Self::from_u128(0u128)
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValueU128 for u128 {
|
||||
fn from_u128(val: u128) -> Self {
|
||||
val
|
||||
}
|
||||
|
||||
fn to_u128(&self) -> u128 {
|
||||
*self
|
||||
}
|
||||
|
||||
fn as_u128(&self) -> u128 {
|
||||
*self
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::U128
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValueU128 for IpAddr {
|
||||
fn from_u128(val: u128) -> Self {
|
||||
IpAddr::from(val.to_be_bytes())
|
||||
}
|
||||
|
||||
fn to_u128(&self) -> u128 {
|
||||
ip_to_u128(*self)
|
||||
}
|
||||
|
||||
fn as_u128(&self) -> u128 {
|
||||
ip_to_u128(*self)
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::Ip
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for types that are allowed for fast fields:
|
||||
/// (u64, i64 and f64, bool, DateTime).
|
||||
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd + 'static {
|
||||
/// Converts a value from u64
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
/// **Note: To be used for converting encoded Term, Posting values.**
|
||||
fn from_u64(val: u64) -> Self;
|
||||
|
||||
/// Converts a value to u64.
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
fn to_u64(&self) -> u64;
|
||||
|
||||
/// Returns the fast field cardinality that can be extracted from the given
|
||||
/// `FieldType`.
|
||||
///
|
||||
/// If the type is not a fast field, `None` is returned.
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality>;
|
||||
|
||||
/// Cast value to `u64`.
|
||||
/// The value is just reinterpreted in memory.
|
||||
fn as_u64(&self) -> u64;
|
||||
|
||||
/// Build a default value. This default value is never used, so the value does not
|
||||
/// really matter.
|
||||
fn make_zero() -> Self {
|
||||
Self::from_u64(0i64.to_u64())
|
||||
}
|
||||
|
||||
/// Returns the `schema::Type` for this FastValue.
|
||||
fn to_type() -> Type;
|
||||
}
|
||||
|
||||
impl FastValue for u64 {
|
||||
fn from_u64(val: u64) -> Self {
|
||||
val
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
*self
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
FieldType::Facet(_) => Some(Cardinality::MultiValues),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::U64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for i64 {
|
||||
fn from_u64(val: u64) -> Self {
|
||||
common::u64_to_i64(val)
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
common::i64_to_u64(*self)
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::I64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self as u64
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::I64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for f64 {
|
||||
fn from_u64(val: u64) -> Self {
|
||||
common::u64_to_f64(val)
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
common::f64_to_u64(*self)
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::F64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.to_bits()
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::F64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for bool {
|
||||
fn from_u64(val: u64) -> Self {
|
||||
val != 0u64
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
match self {
|
||||
false => 0,
|
||||
true => 1,
|
||||
}
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::Bool(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self as u64
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::Bool
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for DateTime {
|
||||
/// Converts a timestamp microseconds into DateTime.
|
||||
///
|
||||
/// **Note the timestamps is expected to be in microseconds.**
|
||||
fn from_u64(timestamp_micros_u64: u64) -> Self {
|
||||
let timestamp_micros = i64::from_u64(timestamp_micros_u64);
|
||||
Self::from_timestamp_micros(timestamp_micros)
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
common::i64_to_u64(self.into_timestamp_micros())
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::Date(ref options) => options.get_fastfield_cardinality(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.into_timestamp_micros().as_u64()
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::Date
|
||||
}
|
||||
}
|
||||
224
src/fastfield/gcd.rs
Normal file
224
src/fastfield/gcd.rs
Normal file
@@ -0,0 +1,224 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
use fastdivide::DividerU64;
|
||||
use fastfield_codecs::FastFieldCodecReader;
|
||||
use gcd::Gcd;
|
||||
|
||||
pub const GCD_DEFAULT: u64 = 1;
|
||||
pub const GCD_CODEC_ID: u8 = 4;
|
||||
|
||||
/// Wrapper for accessing a fastfield.
|
||||
///
|
||||
/// Holds the data and the codec to the read the data.
|
||||
#[derive(Clone)]
|
||||
pub struct GCDFastFieldCodec<CodecReader> {
|
||||
gcd: u64,
|
||||
min_value: u64,
|
||||
reader: CodecReader,
|
||||
}
|
||||
impl<C: FastFieldCodecReader + Clone> FastFieldCodecReader for GCDFastFieldCodec<C> {
|
||||
/// Opens a fast field given the bytes.
|
||||
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self> {
|
||||
let (header, mut footer) = bytes.split_at(bytes.len() - 16);
|
||||
let gcd = u64::deserialize(&mut footer)?;
|
||||
let min_value = u64::deserialize(&mut footer)?;
|
||||
let reader = C::open_from_bytes(header)?;
|
||||
|
||||
Ok(GCDFastFieldCodec {
|
||||
gcd,
|
||||
min_value,
|
||||
reader,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
let mut data = self.reader.get_u64(doc, data);
|
||||
data *= self.gcd;
|
||||
data += self.min_value;
|
||||
data
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value + self.reader.min_value() * self.gcd
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.min_value + self.reader.max_value() * self.gcd
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_gcd_header<W: Write>(field_write: &mut W, min_value: u64, gcd: u64) -> io::Result<()> {
|
||||
gcd.serialize(field_write)?;
|
||||
min_value.serialize(field_write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Find GCD for iterator of numbers
|
||||
pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<u64> {
|
||||
let mut numbers = numbers.filter(|n| *n != 0);
|
||||
let mut gcd = numbers.next()?;
|
||||
if gcd == 1 {
|
||||
return Some(1);
|
||||
}
|
||||
|
||||
let mut gcd_divider = DividerU64::divide_by(gcd);
|
||||
for val in numbers {
|
||||
let remainder = val - (gcd_divider.divide(val)) * gcd;
|
||||
if remainder == 0 {
|
||||
continue;
|
||||
}
|
||||
gcd = gcd.gcd(val);
|
||||
if gcd == 1 {
|
||||
return Some(1);
|
||||
}
|
||||
|
||||
gcd_divider = DividerU64::divide_by(gcd);
|
||||
}
|
||||
Some(gcd)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use common::HasLen;
|
||||
|
||||
use crate::directory::{CompositeFile, RamDirectory, WritePtr};
|
||||
use crate::fastfield::serializer::FastFieldCodecEnableCheck;
|
||||
use crate::fastfield::tests::{FIELD, FIELDI64, SCHEMA, SCHEMAI64};
|
||||
use crate::fastfield::{
|
||||
find_gcd, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldCodecName,
|
||||
FastFieldReader, FastFieldsWriter, ALL_CODECS,
|
||||
};
|
||||
use crate::schema::Schema;
|
||||
use crate::Directory;
|
||||
|
||||
fn get_index(
|
||||
docs: &[crate::Document],
|
||||
schema: &Schema,
|
||||
codec_enable_checker: FastFieldCodecEnableCheck,
|
||||
) -> crate::Result<RamDirectory> {
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer =
|
||||
CompositeFastFieldSerializer::from_write_with_codec(write, codec_enable_checker)
|
||||
.unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(schema);
|
||||
for doc in docs {
|
||||
fast_field_writers.add_document(doc);
|
||||
}
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new(), None)
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
Ok(directory)
|
||||
}
|
||||
|
||||
fn test_fastfield_gcd_i64_with_codec(
|
||||
codec_name: FastFieldCodecName,
|
||||
num_vals: usize,
|
||||
) -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let mut docs = vec![];
|
||||
for i in 1..=num_vals {
|
||||
let val = i as i64 * 1000i64;
|
||||
docs.push(doc!(*FIELDI64=>val));
|
||||
}
|
||||
let directory = get_index(&docs, &SCHEMAI64, codec_name.clone().into())?;
|
||||
let file = directory.open_read(path).unwrap();
|
||||
// assert_eq!(file.len(), 118);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<i64>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), 1000i64);
|
||||
assert_eq!(fast_field_reader.get(1), 2000i64);
|
||||
assert_eq!(fast_field_reader.get(2), 3000i64);
|
||||
assert_eq!(fast_field_reader.max_value(), num_vals as i64 * 1000);
|
||||
assert_eq!(fast_field_reader.min_value(), 1000i64);
|
||||
let file = directory.open_read(path).unwrap();
|
||||
|
||||
// Can't apply gcd
|
||||
let path = Path::new("test");
|
||||
docs.pop();
|
||||
docs.push(doc!(*FIELDI64=>2001i64));
|
||||
let directory = get_index(&docs, &SCHEMAI64, codec_name.into())?;
|
||||
let file2 = directory.open_read(path).unwrap();
|
||||
assert!(file2.len() > file.len());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd_i64() -> crate::Result<()> {
|
||||
for codec_name in ALL_CODECS {
|
||||
test_fastfield_gcd_i64_with_codec(codec_name.clone(), 5005)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_fastfield_gcd_u64_with_codec(
|
||||
codec_name: FastFieldCodecName,
|
||||
num_vals: usize,
|
||||
) -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let mut docs = vec![];
|
||||
for i in 1..=num_vals {
|
||||
let val = i as u64 * 1000u64;
|
||||
docs.push(doc!(*FIELD=>val));
|
||||
}
|
||||
let directory = get_index(&docs, &SCHEMA, codec_name.clone().into())?;
|
||||
let file = directory.open_read(path).unwrap();
|
||||
// assert_eq!(file.len(), 118);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), 1000u64);
|
||||
assert_eq!(fast_field_reader.get(1), 2000u64);
|
||||
assert_eq!(fast_field_reader.get(2), 3000u64);
|
||||
assert_eq!(fast_field_reader.max_value(), num_vals as u64 * 1000);
|
||||
assert_eq!(fast_field_reader.min_value(), 1000u64);
|
||||
let file = directory.open_read(path).unwrap();
|
||||
|
||||
// Can't apply gcd
|
||||
let path = Path::new("test");
|
||||
docs.pop();
|
||||
docs.push(doc!(*FIELDI64=>2001u64));
|
||||
let directory = get_index(&docs, &SCHEMA, codec_name.into())?;
|
||||
let file2 = directory.open_read(path).unwrap();
|
||||
assert!(file2.len() > file.len());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd_u64() -> crate::Result<()> {
|
||||
for codec_name in ALL_CODECS {
|
||||
test_fastfield_gcd_u64_with_codec(codec_name.clone(), 5005)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield2() {
|
||||
let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]);
|
||||
assert_eq!(test_fastfield.get(0), 100);
|
||||
assert_eq!(test_fastfield.get(1), 200);
|
||||
assert_eq!(test_fastfield.get(2), 300);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_gcd_test() {
|
||||
assert_eq!(find_gcd([0].into_iter()), None);
|
||||
assert_eq!(find_gcd([0, 10].into_iter()), Some(10));
|
||||
assert_eq!(find_gcd([10, 0].into_iter()), Some(10));
|
||||
assert_eq!(find_gcd([].into_iter()), None);
|
||||
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), Some(5));
|
||||
assert_eq!(find_gcd([15, 16, 10].into_iter()), Some(1));
|
||||
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), Some(5));
|
||||
}
|
||||
}
|
||||
@@ -20,182 +20,71 @@
|
||||
//!
|
||||
//! Read access performance is comparable to that of an array lookup.
|
||||
|
||||
use std::collections::btree_map::Range;
|
||||
|
||||
pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet};
|
||||
pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter};
|
||||
pub use self::error::{FastFieldNotAvailableError, Result};
|
||||
pub use self::facet_reader::FacetReader;
|
||||
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
|
||||
pub use self::reader::{DynamicFastFieldReader, FastFieldReader};
|
||||
pub use self::fast_value::{FastValue, FastValueU128};
|
||||
pub(crate) use self::gcd::{find_gcd, GCDFastFieldCodec, GCD_CODEC_ID, GCD_DEFAULT};
|
||||
pub use self::multivalued::{
|
||||
MultiValuedFastFieldReader, MultiValuedFastFieldWriter, MultiValuedU128FastFieldReader,
|
||||
};
|
||||
pub use self::reader::{DynamicFastFieldReader, FastFieldReader, FastFieldReaderCodecWrapperU128};
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub(crate) use self::readers::{type_and_cardinality, FastType};
|
||||
pub use self::serializer::{CompositeFastFieldSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||
use crate::schema::{Cardinality, FieldType, Type, Value};
|
||||
use crate::{DateTime, DocId};
|
||||
use crate::schema::Value;
|
||||
use crate::DocId;
|
||||
|
||||
mod alive_bitset;
|
||||
mod bytes;
|
||||
mod error;
|
||||
mod facet_reader;
|
||||
mod fast_value;
|
||||
mod gcd;
|
||||
mod multivalued;
|
||||
mod reader;
|
||||
mod readers;
|
||||
mod serializer;
|
||||
mod writer;
|
||||
|
||||
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
|
||||
/// for a doc_id
|
||||
pub trait MultiValueLength {
|
||||
/// returns the num of values associated to a doc_id
|
||||
fn get_len(&self, doc_id: DocId) -> u64;
|
||||
/// returns the sum of num values for all doc_ids
|
||||
fn get_total_len(&self) -> u64;
|
||||
}
|
||||
|
||||
/// Trait for types that are allowed for fast fields: (u64, i64 and f64).
|
||||
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd + 'static {
|
||||
/// Converts a value from u64
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
fn from_u64(val: u64) -> Self;
|
||||
|
||||
/// Converts a value to u64.
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
fn to_u64(&self) -> u64;
|
||||
|
||||
/// Returns the fast field cardinality that can be extracted from the given
|
||||
/// `FieldType`.
|
||||
///
|
||||
/// If the type is not a fast field, `None` is returned.
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality>;
|
||||
|
||||
/// Cast value to `u64`.
|
||||
/// The value is just reinterpreted in memory.
|
||||
fn as_u64(&self) -> u64;
|
||||
|
||||
/// Build a default value. This default value is never used, so the value does not
|
||||
/// really matter.
|
||||
fn make_zero() -> Self {
|
||||
Self::from_u64(0i64.to_u64())
|
||||
}
|
||||
|
||||
/// Returns the `schema::Type` for this FastValue.
|
||||
fn to_type() -> Type;
|
||||
}
|
||||
|
||||
impl FastValue for u64 {
|
||||
fn from_u64(val: u64) -> Self {
|
||||
val
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
*self
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
FieldType::Facet(_) => Some(Cardinality::MultiValues),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::U64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for i64 {
|
||||
fn from_u64(val: u64) -> Self {
|
||||
common::u64_to_i64(val)
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
common::i64_to_u64(*self)
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::I64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self as u64
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::I64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for f64 {
|
||||
fn from_u64(val: u64) -> Self {
|
||||
common::u64_to_f64(val)
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
common::f64_to_u64(*self)
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::F64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.to_bits()
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::F64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for DateTime {
|
||||
fn from_u64(timestamp_u64: u64) -> Self {
|
||||
let unix_timestamp = i64::from_u64(timestamp_u64);
|
||||
Self::from_unix_timestamp(unix_timestamp)
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
self.into_unix_timestamp().to_u64()
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::Date(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.into_unix_timestamp().as_u64()
|
||||
}
|
||||
|
||||
fn to_type() -> Type {
|
||||
Type::Date
|
||||
}
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
|
||||
pub(crate) enum FastFieldCodecName {
|
||||
Bitpacked,
|
||||
LinearInterpol,
|
||||
BlockwiseLinearInterpol,
|
||||
}
|
||||
pub(crate) const ALL_CODECS: &[FastFieldCodecName; 3] = &[
|
||||
FastFieldCodecName::Bitpacked,
|
||||
FastFieldCodecName::LinearInterpol,
|
||||
FastFieldCodecName::BlockwiseLinearInterpol,
|
||||
];
|
||||
|
||||
fn value_to_u64(value: &Value) -> u64 {
|
||||
match value {
|
||||
Value::U64(val) => val.to_u64(),
|
||||
Value::I64(val) => val.to_u64(),
|
||||
Value::F64(val) => val.to_u64(),
|
||||
Value::Bool(val) => val.to_u64(),
|
||||
Value::Date(val) => val.to_u64(),
|
||||
_ => panic!("Expected a u64/i64/f64/date field, got {:?} ", value),
|
||||
_ => panic!("Expected a u64/i64/f64/bool/date field, got {:?} ", value),
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
|
||||
/// for a doc_id
|
||||
pub trait MultiValueLength {
|
||||
/// returns the positions of values associated to a doc_id
|
||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u64>;
|
||||
/// returns the num of values associated to a doc_id
|
||||
fn get_len(&self, doc_id: DocId) -> u64;
|
||||
/// returns the sum of num values for all doc_ids
|
||||
fn get_total_len(&self) -> u64;
|
||||
}
|
||||
|
||||
/// The fast field type
|
||||
pub enum FastFieldType {
|
||||
/// Numeric type, e.g. f64.
|
||||
@@ -220,6 +109,7 @@ impl FastFieldType {
|
||||
mod tests {
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::ops::Range;
|
||||
use std::path::Path;
|
||||
|
||||
@@ -232,9 +122,11 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::schema::{Document, Field, NumericOptions, Schema, FAST, STRING, TEXT};
|
||||
use crate::schema::{
|
||||
self, Cardinality, Document, Field, IpOptions, Schema, FAST, INDEXED, STORED, STRING, TEXT,
|
||||
};
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::{Index, SegmentId, SegmentReader};
|
||||
use crate::{DateOptions, DatePrecision, DateTime, Index, SegmentId, SegmentReader};
|
||||
|
||||
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -242,7 +134,14 @@ mod tests {
|
||||
schema_builder.build()
|
||||
});
|
||||
|
||||
pub static SCHEMAI64: Lazy<Schema> = Lazy::new(|| {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_i64_field("field", FAST);
|
||||
schema_builder.build()
|
||||
});
|
||||
|
||||
pub static FIELD: Lazy<Field> = Lazy::new(|| SCHEMA.get_field("field").unwrap());
|
||||
pub static FIELDI64: Lazy<Field> = Lazy::new(|| SCHEMAI64.get_field("field").unwrap());
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield() {
|
||||
@@ -253,7 +152,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield_i64_u64() {
|
||||
pub fn test_datetime_conversion() {
|
||||
let datetime = DateTime::from_utc(OffsetDateTime::UNIX_EPOCH);
|
||||
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
|
||||
}
|
||||
@@ -391,7 +290,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signed_intfastfield() -> crate::Result<()> {
|
||||
fn test_signed_intfastfield_normal() -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -471,10 +370,15 @@ mod tests {
|
||||
permutation
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_permutation() -> crate::Result<()> {
|
||||
// Warning: this generates the same permutation at each call
|
||||
pub fn generate_permutation_gcd() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
fn test_intfastfield_permutation_with_data(permutation: Vec<u64>) -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let directory = RamDirectory::create();
|
||||
{
|
||||
@@ -493,15 +397,27 @@ mod tests {
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
|
||||
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
for a in 0..n {
|
||||
assert_eq!(fast_field_reader.get(a as u32), permutation[a as usize]);
|
||||
a = fast_field_reader.get(a as u32);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_permutation_gcd() -> crate::Result<()> {
|
||||
let permutation = generate_permutation_gcd();
|
||||
test_intfastfield_permutation_with_data(permutation)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_permutation() -> crate::Result<()> {
|
||||
let permutation = generate_permutation();
|
||||
test_intfastfield_permutation_with_data(permutation)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_missing_date_fast_field() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -530,8 +446,8 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_datetime() {
|
||||
assert_eq!(0, DateTime::make_zero().into_unix_timestamp());
|
||||
fn test_default_date() {
|
||||
assert_eq!(0, DateTime::make_zero().into_timestamp_secs());
|
||||
}
|
||||
|
||||
fn get_vals_for_docs(ff: &MultiValuedFastFieldReader<u64>, docs: Range<u32>) -> Vec<u64> {
|
||||
@@ -545,6 +461,85 @@ mod tests {
|
||||
all
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ip_fastfield_minimal() -> crate::Result<()> {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let ip_field = schema_builder.add_ip_field("ip", FAST | INDEXED | STORED);
|
||||
|
||||
let ips_field = schema_builder.add_ip_field(
|
||||
"ips",
|
||||
IpOptions::default().set_fast(Cardinality::MultiValues),
|
||||
);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let ip1 = IpAddr::from((1_u128).to_be_bytes());
|
||||
let ip2 = IpAddr::from((2_u128).to_be_bytes());
|
||||
let ip3 = IpAddr::from((3_u128).to_be_bytes());
|
||||
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!())?;
|
||||
index_writer.add_document(doc!(
|
||||
ip_field => ip2,
|
||||
ips_field => ip2,
|
||||
ips_field => ip2,
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
|
||||
// single value
|
||||
let ip_addr_fast_field = fast_fields.ip_addr(ip_field).unwrap();
|
||||
assert_eq!(ip_addr_fast_field.get_val(0), None);
|
||||
assert_eq!(ip_addr_fast_field.get_val(1), Some(ip2));
|
||||
assert_eq!(ip_addr_fast_field.get_between_vals(ip2..=ip2), vec![1]);
|
||||
assert_eq!(ip_addr_fast_field.get_between_vals(ip1..=ip2), vec![1]);
|
||||
assert_eq!(ip_addr_fast_field.get_between_vals(ip2..=ip3), vec![1]);
|
||||
assert_eq!(ip_addr_fast_field.get_between_vals(ip1..=ip3), vec![1]);
|
||||
assert_eq!(
|
||||
ip_addr_fast_field.get_between_vals(ip1..=ip1),
|
||||
vec![] as Vec<usize>
|
||||
);
|
||||
assert_eq!(
|
||||
ip_addr_fast_field.get_between_vals(ip3..=ip3),
|
||||
vec![] as Vec<usize>
|
||||
);
|
||||
|
||||
// multi value
|
||||
let ip_addr_fast_field = fast_fields.ip_addrs(ips_field).unwrap();
|
||||
assert_eq!(ip_addr_fast_field.get_first_val(0), None);
|
||||
assert_eq!(ip_addr_fast_field.get_first_val(1), Some(ip2));
|
||||
|
||||
let mut out = vec![];
|
||||
ip_addr_fast_field.get_vals(0, &mut out);
|
||||
assert_eq!(out, vec![] as Vec<IpAddr>);
|
||||
let mut out = vec![];
|
||||
ip_addr_fast_field.get_vals(1, &mut out);
|
||||
assert_eq!(out, vec![ip2, ip2]);
|
||||
|
||||
assert_eq!(ip_addr_fast_field.get_between_vals(ip2..=ip2), vec![1]);
|
||||
assert_eq!(ip_addr_fast_field.get_between_vals(ip1..=ip2), vec![1]);
|
||||
assert_eq!(ip_addr_fast_field.get_between_vals(ip2..=ip3), vec![1]);
|
||||
assert_eq!(ip_addr_fast_field.get_between_vals(ip1..=ip3), vec![1]);
|
||||
assert_eq!(
|
||||
ip_addr_fast_field.get_between_vals(ip1..=ip1),
|
||||
vec![] as Vec<usize>
|
||||
);
|
||||
assert_eq!(
|
||||
ip_addr_fast_field.get_between_vals(ip3..=ip3),
|
||||
vec![] as Vec<usize>
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_text_fastfield() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -737,10 +732,15 @@ mod tests {
|
||||
fn test_datefastfield() -> crate::Result<()> {
|
||||
use crate::fastfield::FastValue;
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
let date_field = schema_builder.add_date_field(
|
||||
"date",
|
||||
DateOptions::from(FAST).set_precision(DatePrecision::Microseconds),
|
||||
);
|
||||
let multi_date_field = schema_builder.add_date_field(
|
||||
"multi_date",
|
||||
NumericOptions::default().set_fast(Cardinality::MultiValues),
|
||||
DateOptions::default()
|
||||
.set_precision(DatePrecision::Microseconds)
|
||||
.set_fast(Cardinality::MultiValues),
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
@@ -768,26 +768,138 @@ mod tests {
|
||||
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
||||
let mut dates = vec![];
|
||||
{
|
||||
assert_eq!(date_fast_field.get(0u32).into_unix_timestamp(), 1i64);
|
||||
assert_eq!(date_fast_field.get(0u32).into_timestamp_micros(), 1i64);
|
||||
dates_fast_field.get_vals(0u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].into_unix_timestamp(), 2i64);
|
||||
assert_eq!(dates[1].into_unix_timestamp(), 3i64);
|
||||
assert_eq!(dates[0].into_timestamp_micros(), 2i64);
|
||||
assert_eq!(dates[1].into_timestamp_micros(), 3i64);
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get(1u32).into_unix_timestamp(), 4i64);
|
||||
assert_eq!(date_fast_field.get(1u32).into_timestamp_micros(), 4i64);
|
||||
dates_fast_field.get_vals(1u32, &mut dates);
|
||||
assert!(dates.is_empty());
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get(2u32).into_unix_timestamp(), 0i64);
|
||||
assert_eq!(date_fast_field.get(2u32).into_timestamp_micros(), 0i64);
|
||||
dates_fast_field.get_vals(2u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].into_unix_timestamp(), 5i64);
|
||||
assert_eq!(dates[1].into_unix_timestamp(), 6i64);
|
||||
assert_eq!(dates[0].into_timestamp_micros(), 5i64);
|
||||
assert_eq!(dates[1].into_timestamp_micros(), 6i64);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield_bool() {
|
||||
let test_fastfield = DynamicFastFieldReader::<bool>::from(vec![true, false, true, false]);
|
||||
assert_eq!(test_fastfield.get(0), true);
|
||||
assert_eq!(test_fastfield.get(1), false);
|
||||
assert_eq!(test_fastfield.get(2), true);
|
||||
assert_eq!(test_fastfield.get(3), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield_bool_small() -> crate::Result<()> {
|
||||
let path = Path::new("test_bool");
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_bool_field("field_bool", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let field = schema.get_field("field_bool").unwrap();
|
||||
|
||||
{
|
||||
let write: WritePtr = directory.open_write(path).unwrap();
|
||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
fast_field_writers.add_document(&doc!(field=>true));
|
||||
fast_field_writers.add_document(&doc!(field=>false));
|
||||
fast_field_writers.add_document(&doc!(field=>true));
|
||||
fast_field_writers.add_document(&doc!(field=>false));
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new(), None)
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 36);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(field).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), true);
|
||||
assert_eq!(fast_field_reader.get(1), false);
|
||||
assert_eq!(fast_field_reader.get(2), true);
|
||||
assert_eq!(fast_field_reader.get(3), false);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield_bool_large() -> crate::Result<()> {
|
||||
let path = Path::new("test_bool");
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_bool_field("field_bool", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let field = schema.get_field("field_bool").unwrap();
|
||||
|
||||
{
|
||||
let write: WritePtr = directory.open_write(path).unwrap();
|
||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
for _ in 0..50 {
|
||||
fast_field_writers.add_document(&doc!(field=>true));
|
||||
fast_field_writers.add_document(&doc!(field=>false));
|
||||
}
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new(), None)
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 48);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(field).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
|
||||
for i in 0..25 {
|
||||
assert_eq!(fast_field_reader.get(i * 2), true);
|
||||
assert_eq!(fast_field_reader.get(i * 2 + 1), false);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield_bool_default_value() -> crate::Result<()> {
|
||||
let path = Path::new("test_bool");
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_bool_field("field_bool", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let field = schema.get_field("field_bool").unwrap();
|
||||
|
||||
{
|
||||
let write: WritePtr = directory.open_write(path).unwrap();
|
||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema);
|
||||
let doc = Document::default();
|
||||
fast_field_writers.add_document(&doc);
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new(), None)
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 35);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(field).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), false);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
@@ -800,6 +912,7 @@ mod bench {
|
||||
use super::tests::{generate_permutation, FIELD, SCHEMA};
|
||||
use super::*;
|
||||
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
||||
use crate::fastfield::tests::generate_permutation_gcd;
|
||||
use crate::fastfield::FastFieldReader;
|
||||
|
||||
#[bench]
|
||||
@@ -886,10 +999,42 @@ mod bench {
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let n = test::black_box(1000u32);
|
||||
let mut a = 0u32;
|
||||
for _ in 0u32..n {
|
||||
a = fast_field_reader.get(a) as u32;
|
||||
for i in 0u32..permutation.len() as u32 {
|
||||
a = fast_field_reader.get(i) as u32;
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_fflookup_gcd(b: &mut Bencher) {
|
||||
let path = Path::new("test");
|
||||
let permutation = generate_permutation_gcd();
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
for &x in &permutation {
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
||||
}
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new(), None)
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let mut a = 0u32;
|
||||
for i in 0u32..permutation.len() as u32 {
|
||||
a = fast_field_reader.get(i) as u32;
|
||||
}
|
||||
a
|
||||
});
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
mod reader;
|
||||
mod writer;
|
||||
|
||||
pub use self::reader::MultiValuedFastFieldReader;
|
||||
pub use self::writer::MultiValuedFastFieldWriter;
|
||||
pub use self::reader::{MultiValuedFastFieldReader, MultiValuedU128FastFieldReader};
|
||||
pub use self::writer::{MultiValuedFastFieldWriter, U128MultiValueFastFieldWriter};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
@@ -13,7 +13,7 @@ mod tests {
|
||||
use crate::collector::TopDocs;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{Cardinality, Facet, FacetOptions, NumericOptions, Schema};
|
||||
use crate::schema::{Cardinality, DateOptions, Facet, FacetOptions, NumericOptions, Schema};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::{Duration, OffsetDateTime};
|
||||
use crate::{DateTime, Document, Index, Term};
|
||||
@@ -58,7 +58,7 @@ mod tests {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field(
|
||||
"multi_date_field",
|
||||
NumericOptions::default()
|
||||
DateOptions::default()
|
||||
.set_fast(Cardinality::MultiValues)
|
||||
.set_indexed()
|
||||
.set_fieldnorm()
|
||||
@@ -226,6 +226,38 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_bool() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let bool_field = schema_builder.add_bool_field(
|
||||
"multifield",
|
||||
NumericOptions::default().set_fast(Cardinality::MultiValues),
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(bool_field=> true, bool_field => false))?;
|
||||
index_writer.add_document(doc!())?;
|
||||
index_writer.add_document(doc!(bool_field=> false))?;
|
||||
index_writer
|
||||
.add_document(doc!(bool_field=> true, bool_field => true, bool_field => false))?;
|
||||
index_writer.commit()?;
|
||||
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut vals = Vec::new();
|
||||
let multi_value_reader = segment_reader.fast_fields().bools(bool_field).unwrap();
|
||||
multi_value_reader.get_vals(2, &mut vals);
|
||||
assert_eq!(&vals, &[false]);
|
||||
multi_value_reader.get_vals(0, &mut vals);
|
||||
assert_eq!(&vals, &[true, false]);
|
||||
multi_value_reader.get_vals(1, &mut vals);
|
||||
assert!(vals.is_empty());
|
||||
multi_value_reader.get_vals(3, &mut vals);
|
||||
assert_eq!(&vals, &[true, true, false]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_multivalued_no_panic(ops: &[IndexingOp]) -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_u64_field(
|
||||
@@ -314,6 +346,13 @@ mod tests {
|
||||
assert!(test_multivalued_no_panic(&ops[..]).is_ok());
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_multivalued_proptest_gcd() {
|
||||
use IndexingOp::*;
|
||||
let ops = [AddDoc { id: 9 }, AddDoc { id: 9 }, Merge];
|
||||
|
||||
assert!(test_multivalued_no_panic(&ops[..]).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_proptest_off_by_one_bug_1151() {
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
use std::ops::Range;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue, MultiValueLength};
|
||||
use fastfield_codecs::ip_codec::IntervallDecompressor;
|
||||
|
||||
use crate::fastfield::{
|
||||
DynamicFastFieldReader, FastFieldReader, FastFieldReaderCodecWrapperU128, FastValue,
|
||||
FastValueU128, MultiValueLength,
|
||||
};
|
||||
use crate::DocId;
|
||||
|
||||
/// Reader for a multivalued `u64` fast field.
|
||||
@@ -84,6 +89,155 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
}
|
||||
|
||||
impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
|
||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u64> {
|
||||
self.range(doc_id)
|
||||
}
|
||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
||||
self.num_vals(doc_id) as u64
|
||||
}
|
||||
fn get_total_len(&self) -> u64 {
|
||||
self.total_num_vals() as u64
|
||||
}
|
||||
}
|
||||
|
||||
/// Reader for a multivalued `u128` fast field.
|
||||
///
|
||||
/// The reader is implemented as a `u64` fast field for the index and a `u128` fast field.
|
||||
///
|
||||
/// The `vals_reader` will access the concatenated list of all
|
||||
/// values for all reader.
|
||||
/// The `idx_reader` associated, for each document, the index of its first value.
|
||||
#[derive(Clone)]
|
||||
pub struct MultiValuedU128FastFieldReader<Item: FastValueU128> {
|
||||
idx_reader: DynamicFastFieldReader<u64>,
|
||||
vals_reader: FastFieldReaderCodecWrapperU128<Item, IntervallDecompressor>,
|
||||
}
|
||||
|
||||
impl<Item: FastValueU128> MultiValuedU128FastFieldReader<Item> {
|
||||
pub(crate) fn open(
|
||||
idx_reader: DynamicFastFieldReader<u64>,
|
||||
vals_reader: FastFieldReaderCodecWrapperU128<Item, IntervallDecompressor>,
|
||||
) -> MultiValuedU128FastFieldReader<Item> {
|
||||
Self {
|
||||
idx_reader,
|
||||
vals_reader,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `[start, end)`, such that the values associated
|
||||
/// to the given document are `start..end`.
|
||||
#[inline]
|
||||
fn range(&self, doc: DocId) -> Range<u64> {
|
||||
let start = self.idx_reader.get(doc);
|
||||
let end = self.idx_reader.get(doc + 1);
|
||||
start..end
|
||||
}
|
||||
|
||||
/// Returns the array of values associated to the given `doc`.
|
||||
#[inline]
|
||||
pub fn get_first_val(&self, doc: DocId) -> Option<Item> {
|
||||
let range = self.range(doc);
|
||||
if range.is_empty() {
|
||||
return None;
|
||||
}
|
||||
self.vals_reader.get_val(range.start)
|
||||
}
|
||||
|
||||
/// Returns the array of values associated to the given `doc`.
|
||||
#[inline]
|
||||
fn get_vals_for_range(&self, range: Range<u64>, vals: &mut Vec<Item>) {
|
||||
let len = (range.end - range.start) as usize;
|
||||
vals.resize(len, Item::make_zero());
|
||||
self.vals_reader.get_range(range.start, &mut vals[..]);
|
||||
}
|
||||
|
||||
/// Returns the array of values associated to the given `doc`.
|
||||
#[inline]
|
||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||
let range = self.range(doc);
|
||||
self.get_vals_for_range(range, vals);
|
||||
}
|
||||
|
||||
/// Returns all docids which are in the provided value range
|
||||
pub fn get_between_vals(&self, range: RangeInclusive<Item>) -> Vec<DocId> {
|
||||
let positions = self.vals_reader.get_between_vals(range);
|
||||
|
||||
positions_to_docids(&positions, self)
|
||||
}
|
||||
|
||||
/// Iterates over all elements in the fast field
|
||||
pub fn iter(&self) -> impl Iterator<Item = Option<Item>> + '_ {
|
||||
self.vals_reader.iter()
|
||||
}
|
||||
|
||||
/// Returns the minimum value for this fast field.
|
||||
///
|
||||
/// The min value does not take in account of possible
|
||||
/// deleted document, and should be considered as a lower bound
|
||||
/// of the actual mimimum value.
|
||||
pub fn min_value(&self) -> Item {
|
||||
self.vals_reader.min_value()
|
||||
}
|
||||
|
||||
/// Returns the maximum value for this fast field.
|
||||
///
|
||||
/// The max value does not take in account of possible
|
||||
/// deleted document, and should be considered as an upper bound
|
||||
/// of the actual maximum value.
|
||||
pub fn max_value(&self) -> Item {
|
||||
self.vals_reader.max_value()
|
||||
}
|
||||
|
||||
/// Returns the number of values associated with the document `DocId`.
|
||||
#[inline]
|
||||
pub fn num_vals(&self, doc: DocId) -> usize {
|
||||
let range = self.range(doc);
|
||||
(range.end - range.start) as usize
|
||||
}
|
||||
|
||||
/// Returns the overall number of values in this field .
|
||||
#[inline]
|
||||
pub fn total_num_vals(&self) -> u64 {
|
||||
self.idx_reader.max_value()
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a list of positions of values in a 1:n index to the corresponding list of DocIds.
|
||||
///
|
||||
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the index.
|
||||
///
|
||||
/// Correctness: positions needs to be sorted.
|
||||
///
|
||||
/// TODO: Instead of a linear scan we can employ a binary search to match a docid to its value
|
||||
/// position.
|
||||
fn positions_to_docids<T: MultiValueLength>(positions: &[usize], multival_idx: &T) -> Vec<DocId> {
|
||||
let mut docs = vec![];
|
||||
let mut cur_doc = 0u32;
|
||||
let mut last_doc = None;
|
||||
|
||||
for pos in positions {
|
||||
loop {
|
||||
let range = multival_idx.get_range(cur_doc);
|
||||
if range.contains(&(*pos as u64)) {
|
||||
// avoid duplicates
|
||||
if Some(cur_doc) == last_doc {
|
||||
break;
|
||||
}
|
||||
docs.push(cur_doc);
|
||||
last_doc = Some(cur_doc);
|
||||
break;
|
||||
}
|
||||
cur_doc += 1;
|
||||
}
|
||||
}
|
||||
|
||||
docs
|
||||
}
|
||||
|
||||
impl<Item: FastValueU128> MultiValueLength for MultiValuedU128FastFieldReader<Item> {
|
||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u64> {
|
||||
self.range(doc_id)
|
||||
}
|
||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
||||
self.num_vals(doc_id) as u64
|
||||
}
|
||||
@@ -92,6 +246,7 @@ impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
|
||||
self.total_num_vals() as u64
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
use std::io;
|
||||
|
||||
use fastfield_codecs::ip_codec::{ip_to_u128, IntervalCompressor};
|
||||
use fnv::FnvHashMap;
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::fastfield::serializer::BitpackedFastFieldSerializerLegacy;
|
||||
use crate::fastfield::{value_to_u64, CompositeFastFieldSerializer, FastFieldType};
|
||||
use crate::fastfield::{value_to_u64, CompositeFastFieldSerializer, FastFieldType, FastValue};
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::postings::UnorderedTermId;
|
||||
use crate::schema::{Document, Field};
|
||||
use crate::schema::{Document, Field, Value};
|
||||
use crate::termdict::TermOrdinal;
|
||||
use crate::DocId;
|
||||
use crate::{DatePrecision, DocId};
|
||||
|
||||
/// Writer for multi-valued (as in, more than one value per document)
|
||||
/// int fast field.
|
||||
@@ -36,6 +37,7 @@ use crate::DocId;
|
||||
/// term ids when the segment is getting serialized.
|
||||
pub struct MultiValuedFastFieldWriter {
|
||||
field: Field,
|
||||
precision_opt: Option<DatePrecision>,
|
||||
vals: Vec<UnorderedTermId>,
|
||||
doc_index: Vec<u64>,
|
||||
fast_field_type: FastFieldType,
|
||||
@@ -43,9 +45,14 @@ pub struct MultiValuedFastFieldWriter {
|
||||
|
||||
impl MultiValuedFastFieldWriter {
|
||||
/// Creates a new `MultiValuedFastFieldWriter`
|
||||
pub(crate) fn new(field: Field, fast_field_type: FastFieldType) -> Self {
|
||||
pub(crate) fn new(
|
||||
field: Field,
|
||||
fast_field_type: FastFieldType,
|
||||
precision_opt: Option<DatePrecision>,
|
||||
) -> Self {
|
||||
MultiValuedFastFieldWriter {
|
||||
field,
|
||||
precision_opt,
|
||||
vals: Vec::new(),
|
||||
doc_index: Vec::new(),
|
||||
fast_field_type,
|
||||
@@ -83,7 +90,14 @@ impl MultiValuedFastFieldWriter {
|
||||
}
|
||||
for field_value in doc.field_values() {
|
||||
if field_value.field == self.field {
|
||||
self.add_val(value_to_u64(field_value.value()));
|
||||
let value = field_value.value();
|
||||
let value_u64 = match (self.precision_opt, value) {
|
||||
(Some(precision), Value::Date(date_val)) => {
|
||||
date_val.truncate(precision).to_u64()
|
||||
}
|
||||
_ => value_to_u64(value),
|
||||
};
|
||||
self.add_val(value_u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -107,25 +121,9 @@ impl MultiValuedFastFieldWriter {
|
||||
&'a self,
|
||||
doc_id_map: Option<&'b DocIdMapping>,
|
||||
) -> impl Iterator<Item = &'b [u64]> {
|
||||
let doc_id_iter: Box<dyn Iterator<Item = u32>> = if let Some(doc_id_map) = doc_id_map {
|
||||
Box::new(doc_id_map.iter_old_doc_ids())
|
||||
} else {
|
||||
let max_doc = self.doc_index.len() as DocId;
|
||||
Box::new(0..max_doc)
|
||||
};
|
||||
doc_id_iter.map(move |doc_id| self.get_values_for_doc_id(doc_id))
|
||||
get_ordered_values(&self.vals, &self.doc_index, doc_id_map)
|
||||
}
|
||||
|
||||
/// returns all values for a doc_ids
|
||||
fn get_values_for_doc_id(&self, doc_id: u32) -> &[u64] {
|
||||
let start_pos = self.doc_index[doc_id as usize] as usize;
|
||||
let end_pos = self
|
||||
.doc_index
|
||||
.get(doc_id as usize + 1)
|
||||
.cloned()
|
||||
.unwrap_or(self.vals.len() as u64) as usize; // special case, last doc_id has no offset information
|
||||
&self.vals[start_pos..end_pos]
|
||||
}
|
||||
/// Serializes fast field values by pushing them to the `FastFieldSerializer`.
|
||||
///
|
||||
/// If a mapping is given, the values are remapped *and sorted* before serialization.
|
||||
@@ -207,3 +205,132 @@ impl MultiValuedFastFieldWriter {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Writer for multi-valued (as in, more than one value per document)
|
||||
/// int fast field.
|
||||
///
|
||||
/// This `Writer` is only useful for advanced users.
|
||||
/// The normal way to get your multivalued int in your index
|
||||
/// is to
|
||||
/// - declare your field with fast set to `Cardinality::MultiValues`
|
||||
/// in your schema
|
||||
/// - add your document simply by calling `.add_document(...)`.
|
||||
///
|
||||
/// The `MultiValuedFastFieldWriter` can be acquired from the
|
||||
|
||||
pub struct U128MultiValueFastFieldWriter {
|
||||
field: Field,
|
||||
vals: Vec<u128>,
|
||||
doc_index: Vec<u64>,
|
||||
}
|
||||
|
||||
impl U128MultiValueFastFieldWriter {
|
||||
/// Creates a new `U128MultiValueFastFieldWriter`
|
||||
pub(crate) fn new(field: Field) -> Self {
|
||||
U128MultiValueFastFieldWriter {
|
||||
field,
|
||||
vals: Vec::new(),
|
||||
doc_index: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// The memory used (inclusive childs)
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
self.vals.capacity() * std::mem::size_of::<UnorderedTermId>()
|
||||
+ self.doc_index.capacity() * std::mem::size_of::<u64>()
|
||||
}
|
||||
|
||||
/// Finalize the current document.
|
||||
pub(crate) fn next_doc(&mut self) {
|
||||
self.doc_index.push(self.vals.len() as u64);
|
||||
}
|
||||
|
||||
/// Pushes a new value to the current document.
|
||||
pub(crate) fn add_val(&mut self, val: u128) {
|
||||
self.vals.push(val);
|
||||
}
|
||||
|
||||
/// Shift to the next document and adds
|
||||
/// all of the matching field values present in the document.
|
||||
pub fn add_document(&mut self, doc: &Document) {
|
||||
self.next_doc();
|
||||
for field_value in doc.field_values() {
|
||||
if field_value.field == self.field {
|
||||
let value = field_value.value();
|
||||
let ip_addr = value.as_ip().unwrap();
|
||||
let value = ip_to_u128(ip_addr);
|
||||
self.add_val(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator over values per doc_id in ascending doc_id order.
|
||||
///
|
||||
/// Normally the order is simply iterating self.doc_id_index.
|
||||
/// With doc_id_map it accounts for the new mapping, returning values in the order of the
|
||||
/// new doc_ids.
|
||||
fn get_ordered_values<'a: 'b, 'b>(
|
||||
&'a self,
|
||||
doc_id_map: Option<&'b DocIdMapping>,
|
||||
) -> impl Iterator<Item = &'b [u128]> {
|
||||
get_ordered_values(&self.vals, &self.doc_index, doc_id_map)
|
||||
}
|
||||
|
||||
/// Serializes fast field values.
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_map: Option<&DocIdMapping>,
|
||||
) -> io::Result<()> {
|
||||
{
|
||||
// writing the offset index
|
||||
let mut doc_index_serializer =
|
||||
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
|
||||
|
||||
let mut offset = 0;
|
||||
for vals in self.get_ordered_values(doc_id_map) {
|
||||
doc_index_serializer.add_val(offset)?;
|
||||
offset += vals.len() as u64;
|
||||
}
|
||||
doc_index_serializer.add_val(self.vals.len() as u64)?;
|
||||
|
||||
doc_index_serializer.close_field()?;
|
||||
}
|
||||
{
|
||||
let field_write = serializer.get_field_writer(self.field, 1);
|
||||
let compressor = IntervalCompressor::from_vals(self.vals.to_vec());
|
||||
let iter = self.get_ordered_values(doc_id_map).flatten().cloned();
|
||||
compressor.compress_into(iter, field_write)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator over values per doc_id in ascending doc_id order.
|
||||
///
|
||||
/// Normally the order is simply iterating self.doc_id_index.
|
||||
/// With doc_id_map it accounts for the new mapping, returning values in the order of the
|
||||
/// new doc_ids.
|
||||
fn get_ordered_values<'a: 'b, 'b, T>(
|
||||
vals: &'a [T],
|
||||
doc_index: &'a [u64],
|
||||
doc_id_map: Option<&'b DocIdMapping>,
|
||||
) -> impl Iterator<Item = &'b [T]> {
|
||||
let doc_id_iter: Box<dyn Iterator<Item = u32>> = if let Some(doc_id_map) = doc_id_map {
|
||||
Box::new(doc_id_map.iter_old_doc_ids())
|
||||
} else {
|
||||
let max_doc = doc_index.len() as DocId;
|
||||
Box::new(0..max_doc)
|
||||
};
|
||||
doc_id_iter.map(move |doc_id| get_values_for_doc_id(doc_id, vals, doc_index))
|
||||
}
|
||||
|
||||
/// returns all values for a doc_id
|
||||
fn get_values_for_doc_id<'a, T>(doc_id: u32, vals: &'a [T], doc_index: &'a [u64]) -> &'a [T] {
|
||||
let start_pos = doc_index[doc_id as usize] as usize;
|
||||
let end_pos = doc_index
|
||||
.get(doc_id as usize + 1)
|
||||
.cloned()
|
||||
.unwrap_or(vals.len() as u64) as usize; // special case, last doc_id has no offset information
|
||||
&vals[start_pos..end_pos]
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::RangeInclusive;
|
||||
use std::path::Path;
|
||||
|
||||
use common::BinarySerializable;
|
||||
use fastfield_codecs::bitpacked::{
|
||||
BitpackedFastFieldReader as BitpackedReader, BitpackedFastFieldSerializer,
|
||||
};
|
||||
@@ -12,9 +12,9 @@ use fastfield_codecs::linearinterpol::{
|
||||
use fastfield_codecs::multilinearinterpol::{
|
||||
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use fastfield_codecs::{FastFieldCodecReader, FastFieldCodecSerializer};
|
||||
use fastfield_codecs::{FastFieldCodecReader, FastFieldCodecReaderU128, FastFieldCodecSerializer};
|
||||
|
||||
use super::FastValue;
|
||||
use super::{FastValue, FastValueU128, GCDFastFieldCodec, GCD_CODEC_ID};
|
||||
use crate::directory::{CompositeFile, Directory, FileSlice, OwnedBytes, RamDirectory, WritePtr};
|
||||
use crate::fastfield::{CompositeFastFieldSerializer, FastFieldsWriter};
|
||||
use crate::schema::{Schema, FAST};
|
||||
@@ -71,15 +71,26 @@ pub enum DynamicFastFieldReader<Item: FastValue> {
|
||||
LinearInterpol(FastFieldReaderCodecWrapper<Item, LinearInterpolFastFieldReader>),
|
||||
/// Blockwise linear interpolated values + bitpacked
|
||||
MultiLinearInterpol(FastFieldReaderCodecWrapper<Item, MultiLinearInterpolFastFieldReader>),
|
||||
|
||||
/// GCD and Bitpacked compressed fastfield data.
|
||||
BitpackedGCD(FastFieldReaderCodecWrapper<Item, GCDFastFieldCodec<BitpackedReader>>),
|
||||
/// GCD and Linear interpolated values + bitpacked
|
||||
LinearInterpolGCD(
|
||||
FastFieldReaderCodecWrapper<Item, GCDFastFieldCodec<LinearInterpolFastFieldReader>>,
|
||||
),
|
||||
/// GCD and Blockwise linear interpolated values + bitpacked
|
||||
MultiLinearInterpolGCD(
|
||||
FastFieldReaderCodecWrapper<Item, GCDFastFieldCodec<MultiLinearInterpolFastFieldReader>>,
|
||||
),
|
||||
}
|
||||
|
||||
impl<Item: FastValue> DynamicFastFieldReader<Item> {
|
||||
/// Returns correct the reader wrapped in the `DynamicFastFieldReader` enum for the data.
|
||||
pub fn open(file: FileSlice) -> crate::Result<DynamicFastFieldReader<Item>> {
|
||||
let mut bytes = file.read_bytes()?;
|
||||
let id = bytes.read_u8();
|
||||
|
||||
let reader = match id {
|
||||
pub fn open_from_id(
|
||||
mut bytes: OwnedBytes,
|
||||
codec_id: u8,
|
||||
) -> crate::Result<DynamicFastFieldReader<Item>> {
|
||||
let reader = match codec_id {
|
||||
BitpackedFastFieldSerializer::ID => {
|
||||
DynamicFastFieldReader::Bitpacked(FastFieldReaderCodecWrapper::<
|
||||
Item,
|
||||
@@ -100,15 +111,59 @@ impl<Item: FastValue> DynamicFastFieldReader<Item> {
|
||||
bytes
|
||||
)?)
|
||||
}
|
||||
_ if codec_id == GCD_CODEC_ID => {
|
||||
let codec_id = bytes.read_u8();
|
||||
|
||||
match codec_id {
|
||||
BitpackedFastFieldSerializer::ID => {
|
||||
DynamicFastFieldReader::BitpackedGCD(FastFieldReaderCodecWrapper::<
|
||||
Item,
|
||||
GCDFastFieldCodec<BitpackedReader>,
|
||||
>::open_from_bytes(
|
||||
bytes
|
||||
)?)
|
||||
}
|
||||
LinearInterpolFastFieldSerializer::ID => {
|
||||
DynamicFastFieldReader::LinearInterpolGCD(FastFieldReaderCodecWrapper::<
|
||||
Item,
|
||||
GCDFastFieldCodec<LinearInterpolFastFieldReader>,
|
||||
>::open_from_bytes(
|
||||
bytes
|
||||
)?)
|
||||
}
|
||||
MultiLinearInterpolFastFieldSerializer::ID => {
|
||||
DynamicFastFieldReader::MultiLinearInterpolGCD(
|
||||
FastFieldReaderCodecWrapper::<
|
||||
Item,
|
||||
GCDFastFieldCodec<MultiLinearInterpolFastFieldReader>,
|
||||
>::open_from_bytes(bytes)?,
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
panic!(
|
||||
"unknown fastfield codec id {:?}. Data corrupted or using old tantivy \
|
||||
version.",
|
||||
codec_id
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
panic!(
|
||||
"unknown fastfield id {:?}. Data corrupted or using old tantivy version.",
|
||||
id
|
||||
"unknown fastfield codec id {:?}. Data corrupted or using old tantivy version.",
|
||||
codec_id
|
||||
)
|
||||
}
|
||||
};
|
||||
Ok(reader)
|
||||
}
|
||||
/// Returns correct the reader wrapped in the `DynamicFastFieldReader` enum for the data.
|
||||
pub fn open(file: FileSlice) -> crate::Result<DynamicFastFieldReader<Item>> {
|
||||
let mut bytes = file.read_bytes()?;
|
||||
let codec_id = bytes.read_u8();
|
||||
|
||||
Self::open_from_id(bytes, codec_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Item: FastValue> FastFieldReader<Item> for DynamicFastFieldReader<Item> {
|
||||
@@ -118,6 +173,9 @@ impl<Item: FastValue> FastFieldReader<Item> for DynamicFastFieldReader<Item> {
|
||||
Self::Bitpacked(reader) => reader.get(doc),
|
||||
Self::LinearInterpol(reader) => reader.get(doc),
|
||||
Self::MultiLinearInterpol(reader) => reader.get(doc),
|
||||
Self::BitpackedGCD(reader) => reader.get(doc),
|
||||
Self::LinearInterpolGCD(reader) => reader.get(doc),
|
||||
Self::MultiLinearInterpolGCD(reader) => reader.get(doc),
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
@@ -126,6 +184,9 @@ impl<Item: FastValue> FastFieldReader<Item> for DynamicFastFieldReader<Item> {
|
||||
Self::Bitpacked(reader) => reader.get_range(start, output),
|
||||
Self::LinearInterpol(reader) => reader.get_range(start, output),
|
||||
Self::MultiLinearInterpol(reader) => reader.get_range(start, output),
|
||||
Self::BitpackedGCD(reader) => reader.get_range(start, output),
|
||||
Self::LinearInterpolGCD(reader) => reader.get_range(start, output),
|
||||
Self::MultiLinearInterpolGCD(reader) => reader.get_range(start, output),
|
||||
}
|
||||
}
|
||||
fn min_value(&self) -> Item {
|
||||
@@ -133,6 +194,9 @@ impl<Item: FastValue> FastFieldReader<Item> for DynamicFastFieldReader<Item> {
|
||||
Self::Bitpacked(reader) => reader.min_value(),
|
||||
Self::LinearInterpol(reader) => reader.min_value(),
|
||||
Self::MultiLinearInterpol(reader) => reader.min_value(),
|
||||
Self::BitpackedGCD(reader) => reader.min_value(),
|
||||
Self::LinearInterpolGCD(reader) => reader.min_value(),
|
||||
Self::MultiLinearInterpolGCD(reader) => reader.min_value(),
|
||||
}
|
||||
}
|
||||
fn max_value(&self) -> Item {
|
||||
@@ -140,10 +204,85 @@ impl<Item: FastValue> FastFieldReader<Item> for DynamicFastFieldReader<Item> {
|
||||
Self::Bitpacked(reader) => reader.max_value(),
|
||||
Self::LinearInterpol(reader) => reader.max_value(),
|
||||
Self::MultiLinearInterpol(reader) => reader.max_value(),
|
||||
Self::BitpackedGCD(reader) => reader.max_value(),
|
||||
Self::LinearInterpolGCD(reader) => reader.max_value(),
|
||||
Self::MultiLinearInterpolGCD(reader) => reader.max_value(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for accessing a fastfield.
|
||||
///
|
||||
/// Holds the data and the codec to the read the data.
|
||||
#[derive(Clone)]
|
||||
pub struct FastFieldReaderCodecWrapperU128<Item: FastValueU128, CodecReader> {
|
||||
reader: CodecReader,
|
||||
bytes: OwnedBytes,
|
||||
_phantom: PhantomData<Item>,
|
||||
}
|
||||
|
||||
impl<Item: FastValueU128, C: FastFieldCodecReaderU128> FastFieldReaderCodecWrapperU128<Item, C> {
|
||||
/// Opens a fast field given the bytes.
|
||||
pub fn open_from_bytes(bytes: OwnedBytes) -> crate::Result<Self> {
|
||||
let reader = C::open_from_bytes(bytes.as_slice())?;
|
||||
Ok(Self {
|
||||
reader,
|
||||
bytes,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the item for the docid, if present
|
||||
pub fn get_val(&self, doc: u64) -> Option<Item> {
|
||||
self.reader
|
||||
.get(doc, self.bytes.as_slice())
|
||||
.map(|el| Item::from_u128(el))
|
||||
}
|
||||
|
||||
/// Internally `multivalued` also use SingleValue Fast fields.
|
||||
/// It works as follows... A first column contains the list of start index
|
||||
/// for each document, a second column contains the actual values.
|
||||
///
|
||||
/// The values associated to a given doc, are then
|
||||
/// `second_column[first_column.get(doc)..first_column.get(doc+1)]`.
|
||||
///
|
||||
/// Which means single value fast field reader can be indexed internally with
|
||||
/// something different from a `DocId`. For this use case, we want to use `u64`
|
||||
/// values.
|
||||
///
|
||||
/// See `get_range` for an actual documentation about this method.
|
||||
pub(crate) fn get_range(&self, start: u64, output: &mut [Item]) {
|
||||
for (i, out) in output.iter_mut().enumerate() {
|
||||
if let Some(val) = self.get_val(start + (i as u64)) {
|
||||
*out = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates over all elements in the fast field
|
||||
pub fn iter(&self) -> impl Iterator<Item = Option<Item>> + '_ {
|
||||
self.reader
|
||||
.iter(self.bytes.as_slice())
|
||||
.map(|el| el.map(Item::from_u128))
|
||||
}
|
||||
|
||||
/// Returns all docids which are in the provided value range
|
||||
pub fn get_between_vals(&self, range: RangeInclusive<Item>) -> Vec<usize> {
|
||||
let range = range.start().to_u128()..=range.end().to_u128();
|
||||
self.reader.get_between_vals(range, self.bytes.as_slice())
|
||||
}
|
||||
|
||||
/// Return min_value.
|
||||
pub fn min_value(&self) -> Item {
|
||||
Item::from_u128(self.reader.min_value())
|
||||
}
|
||||
|
||||
/// Return max_value.
|
||||
pub fn max_value(&self) -> Item {
|
||||
Item::from_u128(self.reader.max_value())
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for accessing a fastfield.
|
||||
///
|
||||
/// Holds the data and the codec to the read the data.
|
||||
@@ -158,10 +297,10 @@ impl<Item: FastValue, C: FastFieldCodecReader> FastFieldReaderCodecWrapper<Item,
|
||||
/// Opens a fast field given a file.
|
||||
pub fn open(file: FileSlice) -> crate::Result<Self> {
|
||||
let mut bytes = file.read_bytes()?;
|
||||
let id = u8::deserialize(&mut bytes)?;
|
||||
let codec_id = bytes.read_u8();
|
||||
assert_eq!(
|
||||
BitpackedFastFieldSerializer::ID,
|
||||
id,
|
||||
codec_id,
|
||||
"Tried to open fast field as bitpacked encoded (id=1), but got serializer with \
|
||||
different id"
|
||||
);
|
||||
@@ -178,7 +317,8 @@ impl<Item: FastValue, C: FastFieldCodecReader> FastFieldReaderCodecWrapper<Item,
|
||||
}
|
||||
#[inline]
|
||||
pub(crate) fn get_u64(&self, doc: u64) -> Item {
|
||||
Item::from_u64(self.reader.get_u64(doc, self.bytes.as_slice()))
|
||||
let data = self.reader.get_u64(doc, self.bytes.as_slice());
|
||||
Item::from_u64(data)
|
||||
}
|
||||
|
||||
/// Internally `multivalued` also use SingleValue Fast fields.
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
use super::reader::DynamicFastFieldReader;
|
||||
use std::net::IpAddr;
|
||||
|
||||
use fastfield_codecs::ip_codec::IntervallDecompressor;
|
||||
|
||||
use super::multivalued::MultiValuedU128FastFieldReader;
|
||||
use super::reader::{DynamicFastFieldReader, FastFieldReaderCodecWrapperU128};
|
||||
use crate::directory::{CompositeFile, FileSlice};
|
||||
use crate::fastfield::{
|
||||
BytesFastFieldReader, FastFieldNotAvailableError, FastValue, MultiValuedFastFieldReader,
|
||||
@@ -20,7 +25,9 @@ pub struct FastFieldReaders {
|
||||
pub(crate) enum FastType {
|
||||
I64,
|
||||
U64,
|
||||
U128,
|
||||
F64,
|
||||
Bool,
|
||||
Date,
|
||||
}
|
||||
|
||||
@@ -35,6 +42,9 @@ pub(crate) fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType,
|
||||
FieldType::F64(options) => options
|
||||
.get_fastfield_cardinality()
|
||||
.map(|cardinality| (FastType::F64, cardinality)),
|
||||
FieldType::Bool(options) => options
|
||||
.get_fastfield_cardinality()
|
||||
.map(|cardinality| (FastType::Bool, cardinality)),
|
||||
FieldType::Date(options) => options
|
||||
.get_fastfield_cardinality()
|
||||
.map(|cardinality| (FastType::Date, cardinality)),
|
||||
@@ -42,6 +52,9 @@ pub(crate) fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType,
|
||||
FieldType::Str(options) if options.is_fast() => {
|
||||
Some((FastType::U64, Cardinality::MultiValues))
|
||||
}
|
||||
FieldType::Ip(options) => options
|
||||
.get_fastfield_cardinality()
|
||||
.map(|cardinality| (FastType::U128, cardinality)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -133,6 +146,69 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
|
||||
/// Returns the `ip` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u128 fast field, this method returns an Error.
|
||||
pub fn ip_addr(
|
||||
&self,
|
||||
field: Field,
|
||||
) -> crate::Result<FastFieldReaderCodecWrapperU128<IpAddr, IntervallDecompressor>> {
|
||||
self.check_type(field, FastType::U128, Cardinality::SingleValue)?;
|
||||
let fast_field_slice = self.fast_field_data(field, 0)?;
|
||||
let bytes = fast_field_slice.read_bytes()?;
|
||||
FastFieldReaderCodecWrapperU128::<IpAddr, IntervallDecompressor>::open_from_bytes(bytes)
|
||||
}
|
||||
|
||||
/// Returns the `ip` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u128 fast field, this method returns an Error.
|
||||
pub fn ip_addrs(&self, field: Field) -> crate::Result<MultiValuedU128FastFieldReader<IpAddr>> {
|
||||
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
|
||||
let idx_reader: DynamicFastFieldReader<u64> = self.typed_fast_field_reader(field)?;
|
||||
|
||||
let fast_field_slice = self.fast_field_data(field, 1)?;
|
||||
let bytes = fast_field_slice.read_bytes()?;
|
||||
|
||||
let vals_reader =
|
||||
FastFieldReaderCodecWrapperU128::<IpAddr, IntervallDecompressor>::open_from_bytes(
|
||||
bytes,
|
||||
)?;
|
||||
Ok(MultiValuedU128FastFieldReader::open(
|
||||
idx_reader,
|
||||
vals_reader,
|
||||
))
|
||||
}
|
||||
|
||||
/// Returns the `u128` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u128 fast field, this method returns an Error.
|
||||
pub fn u128(
|
||||
&self,
|
||||
field: Field,
|
||||
) -> crate::Result<FastFieldReaderCodecWrapperU128<u128, IntervallDecompressor>> {
|
||||
let fast_field_slice = self.fast_field_data(field, 0)?;
|
||||
let bytes = fast_field_slice.read_bytes()?;
|
||||
FastFieldReaderCodecWrapperU128::<u128, IntervallDecompressor>::open_from_bytes(bytes)
|
||||
}
|
||||
|
||||
/// Returns the `u128` multi-valued fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u128 multi-valued fast field, this method returns an Error.
|
||||
pub fn u128s(&self, field: Field) -> crate::Result<MultiValuedU128FastFieldReader<u128>> {
|
||||
self.check_type(field, FastType::U128, Cardinality::MultiValues)?;
|
||||
let idx_reader: DynamicFastFieldReader<u64> = self.typed_fast_field_reader(field)?;
|
||||
|
||||
let fast_field_slice = self.fast_field_data(field, 1)?;
|
||||
let bytes = fast_field_slice.read_bytes()?;
|
||||
|
||||
let vals_reader =
|
||||
FastFieldReaderCodecWrapperU128::<u128, IntervallDecompressor>::open_from_bytes(bytes)?;
|
||||
Ok(MultiValuedU128FastFieldReader::open(
|
||||
idx_reader,
|
||||
vals_reader,
|
||||
))
|
||||
}
|
||||
|
||||
/// Returns the `u64` fast field reader reader associated to `field`, regardless of whether the
|
||||
/// given field is effectively of type `u64` or not.
|
||||
///
|
||||
@@ -166,6 +242,14 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
|
||||
/// Returns the `bool` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a bool fast field, this method returns an Error.
|
||||
pub fn bool(&self, field: Field) -> crate::Result<DynamicFastFieldReader<bool>> {
|
||||
self.check_type(field, FastType::Bool, Cardinality::SingleValue)?;
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
|
||||
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u64 multi-valued fast field, this method returns an Error.
|
||||
@@ -198,6 +282,14 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_multi_reader(field)
|
||||
}
|
||||
|
||||
/// Returns a `bools` multi-valued fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a bool multi-valued fast field, this method returns an Error.
|
||||
pub fn bools(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<bool>> {
|
||||
self.check_type(field, FastType::Bool, Cardinality::MultiValues)?;
|
||||
self.typed_fast_field_multi_reader(field)
|
||||
}
|
||||
|
||||
/// Returns a `time::OffsetDateTime` multi-valued fast field reader reader associated to
|
||||
/// `field`.
|
||||
///
|
||||
|
||||
@@ -8,7 +8,10 @@ use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
|
||||
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
|
||||
pub use fastfield_codecs::{FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
|
||||
use super::{find_gcd, FastFieldCodecName, ALL_CODECS, GCD_DEFAULT};
|
||||
use crate::directory::{CompositeWrite, WritePtr};
|
||||
use crate::fastfield::gcd::write_gcd_header;
|
||||
use crate::fastfield::GCD_CODEC_ID;
|
||||
use crate::schema::Field;
|
||||
|
||||
/// `CompositeFastFieldSerializer` is in charge of serializing
|
||||
@@ -33,6 +36,30 @@ use crate::schema::Field;
|
||||
/// * `close()`
|
||||
pub struct CompositeFastFieldSerializer {
|
||||
composite_write: CompositeWrite<WritePtr>,
|
||||
codec_enable_checker: FastFieldCodecEnableCheck,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FastFieldCodecEnableCheck {
|
||||
enabled_codecs: Vec<FastFieldCodecName>,
|
||||
}
|
||||
impl FastFieldCodecEnableCheck {
|
||||
fn allow_all() -> Self {
|
||||
FastFieldCodecEnableCheck {
|
||||
enabled_codecs: ALL_CODECS.to_vec(),
|
||||
}
|
||||
}
|
||||
fn is_enabled(&self, codec_name: FastFieldCodecName) -> bool {
|
||||
self.enabled_codecs.contains(&codec_name)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FastFieldCodecName> for FastFieldCodecEnableCheck {
|
||||
fn from(codec_name: FastFieldCodecName) -> Self {
|
||||
FastFieldCodecEnableCheck {
|
||||
enabled_codecs: vec![codec_name],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// use this, when this is merged and stabilized explicit_generic_args_with_impl_trait
|
||||
@@ -52,60 +79,154 @@ fn codec_estimation<T: FastFieldCodecSerializer, A: FastFieldDataAccess>(
|
||||
impl CompositeFastFieldSerializer {
|
||||
/// Constructor
|
||||
pub fn from_write(write: WritePtr) -> io::Result<CompositeFastFieldSerializer> {
|
||||
Self::from_write_with_codec(write, FastFieldCodecEnableCheck::allow_all())
|
||||
}
|
||||
|
||||
/// Constructor
|
||||
pub fn from_write_with_codec(
|
||||
write: WritePtr,
|
||||
codec_enable_checker: FastFieldCodecEnableCheck,
|
||||
) -> io::Result<CompositeFastFieldSerializer> {
|
||||
// just making room for the pointer to header.
|
||||
let composite_write = CompositeWrite::wrap(write);
|
||||
Ok(CompositeFastFieldSerializer { composite_write })
|
||||
Ok(CompositeFastFieldSerializer {
|
||||
composite_write,
|
||||
codec_enable_checker,
|
||||
})
|
||||
}
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
|
||||
/// automatically.
|
||||
pub fn create_auto_detect_u64_fast_field(
|
||||
pub fn create_auto_detect_u64_fast_field<F, I>(
|
||||
&mut self,
|
||||
field: Field,
|
||||
stats: FastFieldStats,
|
||||
fastfield_accessor: impl FastFieldDataAccess,
|
||||
data_iter_1: impl Iterator<Item = u64>,
|
||||
data_iter_2: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
iter_gen: F,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
F: Fn() -> I,
|
||||
I: Iterator<Item = u64>,
|
||||
{
|
||||
self.create_auto_detect_u64_fast_field_with_idx(
|
||||
field,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
data_iter_1,
|
||||
data_iter_2,
|
||||
iter_gen,
|
||||
0,
|
||||
)
|
||||
}
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
|
||||
/// automatically.
|
||||
pub fn create_auto_detect_u64_fast_field_with_idx(
|
||||
pub fn write_header<W: Write>(field_write: &mut W, codec_id: u8) -> io::Result<()> {
|
||||
codec_id.serialize(field_write)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
|
||||
/// automatically.
|
||||
pub fn create_auto_detect_u64_fast_field_with_idx<F, I>(
|
||||
&mut self,
|
||||
field: Field,
|
||||
stats: FastFieldStats,
|
||||
fastfield_accessor: impl FastFieldDataAccess,
|
||||
data_iter_1: impl Iterator<Item = u64>,
|
||||
data_iter_2: impl Iterator<Item = u64>,
|
||||
iter_gen: F,
|
||||
idx: usize,
|
||||
) -> io::Result<()> {
|
||||
) -> io::Result<()>
|
||||
where
|
||||
F: Fn() -> I,
|
||||
I: Iterator<Item = u64>,
|
||||
{
|
||||
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
||||
let gcd = find_gcd(iter_gen().map(|val| val - stats.min_value)).unwrap_or(GCD_DEFAULT);
|
||||
|
||||
if gcd == 1 {
|
||||
return Self::create_auto_detect_u64_fast_field_with_idx_gcd(
|
||||
self.codec_enable_checker.clone(),
|
||||
field,
|
||||
field_write,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter_gen(),
|
||||
iter_gen(),
|
||||
);
|
||||
}
|
||||
|
||||
Self::write_header(field_write, GCD_CODEC_ID)?;
|
||||
struct GCDWrappedFFAccess<T: FastFieldDataAccess> {
|
||||
fastfield_accessor: T,
|
||||
min_value: u64,
|
||||
gcd: u64,
|
||||
}
|
||||
impl<T: FastFieldDataAccess> FastFieldDataAccess for GCDWrappedFFAccess<T> {
|
||||
fn get_val(&self, position: u64) -> u64 {
|
||||
(self.fastfield_accessor.get_val(position) - self.min_value) / self.gcd
|
||||
}
|
||||
}
|
||||
|
||||
let fastfield_accessor = GCDWrappedFFAccess {
|
||||
fastfield_accessor,
|
||||
min_value: stats.min_value,
|
||||
gcd,
|
||||
};
|
||||
|
||||
let min_value = stats.min_value;
|
||||
let stats = FastFieldStats {
|
||||
min_value: 0,
|
||||
max_value: (stats.max_value - stats.min_value) / gcd,
|
||||
num_vals: stats.num_vals,
|
||||
};
|
||||
let iter1 = iter_gen().map(|val| (val - min_value) / gcd);
|
||||
let iter2 = iter_gen().map(|val| (val - min_value) / gcd);
|
||||
Self::create_auto_detect_u64_fast_field_with_idx_gcd(
|
||||
self.codec_enable_checker.clone(),
|
||||
field,
|
||||
field_write,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter1,
|
||||
iter2,
|
||||
)?;
|
||||
write_gcd_header(field_write, min_value, gcd)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
|
||||
/// automatically.
|
||||
pub fn create_auto_detect_u64_fast_field_with_idx_gcd<W: Write>(
|
||||
codec_enable_checker: FastFieldCodecEnableCheck,
|
||||
field: Field,
|
||||
field_write: &mut CountingWriter<W>,
|
||||
stats: FastFieldStats,
|
||||
fastfield_accessor: impl FastFieldDataAccess,
|
||||
iter1: impl Iterator<Item = u64>,
|
||||
iter2: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
let mut estimations = vec![];
|
||||
|
||||
codec_estimation::<BitpackedFastFieldSerializer, _>(
|
||||
stats.clone(),
|
||||
&fastfield_accessor,
|
||||
&mut estimations,
|
||||
);
|
||||
codec_estimation::<LinearInterpolFastFieldSerializer, _>(
|
||||
stats.clone(),
|
||||
&fastfield_accessor,
|
||||
&mut estimations,
|
||||
);
|
||||
codec_estimation::<MultiLinearInterpolFastFieldSerializer, _>(
|
||||
stats.clone(),
|
||||
&fastfield_accessor,
|
||||
&mut estimations,
|
||||
);
|
||||
if codec_enable_checker.is_enabled(FastFieldCodecName::Bitpacked) {
|
||||
codec_estimation::<BitpackedFastFieldSerializer, _>(
|
||||
stats.clone(),
|
||||
&fastfield_accessor,
|
||||
&mut estimations,
|
||||
);
|
||||
}
|
||||
if codec_enable_checker.is_enabled(FastFieldCodecName::LinearInterpol) {
|
||||
codec_estimation::<LinearInterpolFastFieldSerializer, _>(
|
||||
stats.clone(),
|
||||
&fastfield_accessor,
|
||||
&mut estimations,
|
||||
);
|
||||
}
|
||||
if codec_enable_checker.is_enabled(FastFieldCodecName::BlockwiseLinearInterpol) {
|
||||
codec_estimation::<MultiLinearInterpolFastFieldSerializer, _>(
|
||||
stats.clone(),
|
||||
&fastfield_accessor,
|
||||
&mut estimations,
|
||||
);
|
||||
}
|
||||
if let Some(broken_estimation) = estimations.iter().find(|estimation| estimation.0.is_nan())
|
||||
{
|
||||
warn!(
|
||||
@@ -122,15 +243,16 @@ impl CompositeFastFieldSerializer {
|
||||
"choosing fast field codec {} for field_id {:?}",
|
||||
name, field
|
||||
); // todo print actual field name
|
||||
id.serialize(field_write)?;
|
||||
|
||||
Self::write_header(field_write, id)?;
|
||||
match name {
|
||||
BitpackedFastFieldSerializer::NAME => {
|
||||
BitpackedFastFieldSerializer::serialize(
|
||||
field_write,
|
||||
&fastfield_accessor,
|
||||
stats,
|
||||
data_iter_1,
|
||||
data_iter_2,
|
||||
iter1,
|
||||
iter2,
|
||||
)?;
|
||||
}
|
||||
LinearInterpolFastFieldSerializer::NAME => {
|
||||
@@ -138,8 +260,8 @@ impl CompositeFastFieldSerializer {
|
||||
field_write,
|
||||
&fastfield_accessor,
|
||||
stats,
|
||||
data_iter_1,
|
||||
data_iter_2,
|
||||
iter1,
|
||||
iter2,
|
||||
)?;
|
||||
}
|
||||
MultiLinearInterpolFastFieldSerializer::NAME => {
|
||||
@@ -147,19 +269,29 @@ impl CompositeFastFieldSerializer {
|
||||
field_write,
|
||||
&fastfield_accessor,
|
||||
stats,
|
||||
data_iter_1,
|
||||
data_iter_2,
|
||||
iter1,
|
||||
iter2,
|
||||
)?;
|
||||
}
|
||||
_ => {
|
||||
panic!("unknown fastfield serializer {}", name)
|
||||
}
|
||||
};
|
||||
}
|
||||
field_write.flush()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start serializing a new u64 fast field
|
||||
pub fn serialize_into(
|
||||
&mut self,
|
||||
field: Field,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
) -> io::Result<BitpackedFastFieldSerializerLegacy<'_, CountingWriter<WritePtr>>> {
|
||||
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
|
||||
}
|
||||
|
||||
/// Start serializing a new u64 fast field
|
||||
pub fn new_u64_fast_field(
|
||||
&mut self,
|
||||
@@ -195,6 +327,11 @@ impl CompositeFastFieldSerializer {
|
||||
FastBytesFieldSerializer { write: field_write }
|
||||
}
|
||||
|
||||
/// Gets the underlying writer
|
||||
pub fn get_field_writer(&mut self, field: Field, idx: usize) -> &mut impl Write {
|
||||
self.composite_write.for_field_with_idx(field, idx)
|
||||
}
|
||||
|
||||
/// Closes the serializer
|
||||
///
|
||||
/// After this call the data must be persistently saved on disk.
|
||||
|
||||
@@ -2,22 +2,27 @@ use std::collections::HashMap;
|
||||
use std::io;
|
||||
|
||||
use common;
|
||||
use fastfield_codecs::ip_codec::{ip_to_u128, IntervalCompressor};
|
||||
use fnv::FnvHashMap;
|
||||
use roaring::RoaringBitmap;
|
||||
use tantivy_bitpacker::BlockedBitpacker;
|
||||
|
||||
use super::multivalued::MultiValuedFastFieldWriter;
|
||||
use super::multivalued::{MultiValuedFastFieldWriter, U128MultiValueFastFieldWriter};
|
||||
use super::serializer::FastFieldStats;
|
||||
use super::{FastFieldDataAccess, FastFieldType};
|
||||
use super::{FastFieldDataAccess, FastFieldType, FastValue};
|
||||
use crate::fastfield::{BytesFastFieldWriter, CompositeFastFieldSerializer};
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::postings::UnorderedTermId;
|
||||
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema};
|
||||
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema, Value};
|
||||
use crate::termdict::TermOrdinal;
|
||||
use crate::DatePrecision;
|
||||
|
||||
/// The `FastFieldsWriter` groups all of the fast field writers.
|
||||
pub struct FastFieldsWriter {
|
||||
term_id_writers: Vec<MultiValuedFastFieldWriter>,
|
||||
single_value_writers: Vec<IntFastFieldWriter>,
|
||||
u128_value_writers: Vec<U128FastFieldWriter>,
|
||||
u128_multi_value_writers: Vec<U128MultiValueFastFieldWriter>,
|
||||
multi_values_writers: Vec<MultiValuedFastFieldWriter>,
|
||||
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
||||
}
|
||||
@@ -33,6 +38,8 @@ fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
|
||||
impl FastFieldsWriter {
|
||||
/// Create all `FastFieldWriter` required by the schema.
|
||||
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
||||
let mut u128_value_writers = Vec::new();
|
||||
let mut u128_multi_value_writers = Vec::new();
|
||||
let mut single_value_writers = Vec::new();
|
||||
let mut term_id_writers = Vec::new();
|
||||
let mut multi_values_writers = Vec::new();
|
||||
@@ -43,30 +50,51 @@ impl FastFieldsWriter {
|
||||
FieldType::I64(ref int_options)
|
||||
| FieldType::U64(ref int_options)
|
||||
| FieldType::F64(ref int_options)
|
||||
| FieldType::Date(ref int_options) => {
|
||||
| FieldType::Bool(ref int_options) => {
|
||||
match int_options.get_fastfield_cardinality() {
|
||||
Some(Cardinality::SingleValue) => {
|
||||
let mut fast_field_writer = IntFastFieldWriter::new(field);
|
||||
let mut fast_field_writer = IntFastFieldWriter::new(field, None);
|
||||
let default_value = fast_field_default_value(field_entry);
|
||||
fast_field_writer.set_val_if_missing(default_value);
|
||||
single_value_writers.push(fast_field_writer);
|
||||
}
|
||||
Some(Cardinality::MultiValues) => {
|
||||
let fast_field_writer =
|
||||
MultiValuedFastFieldWriter::new(field, FastFieldType::Numeric);
|
||||
let fast_field_writer = MultiValuedFastFieldWriter::new(
|
||||
field,
|
||||
FastFieldType::Numeric,
|
||||
None,
|
||||
);
|
||||
multi_values_writers.push(fast_field_writer);
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
FieldType::Date(ref options) => match options.get_fastfield_cardinality() {
|
||||
Some(Cardinality::SingleValue) => {
|
||||
let mut fast_field_writer =
|
||||
IntFastFieldWriter::new(field, Some(options.get_precision()));
|
||||
let default_value = fast_field_default_value(field_entry);
|
||||
fast_field_writer.set_val_if_missing(default_value);
|
||||
single_value_writers.push(fast_field_writer);
|
||||
}
|
||||
Some(Cardinality::MultiValues) => {
|
||||
let fast_field_writer = MultiValuedFastFieldWriter::new(
|
||||
field,
|
||||
FastFieldType::Numeric,
|
||||
Some(options.get_precision()),
|
||||
);
|
||||
multi_values_writers.push(fast_field_writer);
|
||||
}
|
||||
None => {}
|
||||
},
|
||||
FieldType::Facet(_) => {
|
||||
let fast_field_writer =
|
||||
MultiValuedFastFieldWriter::new(field, FastFieldType::Facet);
|
||||
MultiValuedFastFieldWriter::new(field, FastFieldType::Facet, None);
|
||||
term_id_writers.push(fast_field_writer);
|
||||
}
|
||||
FieldType::Str(_) if field_entry.is_fast() => {
|
||||
let fast_field_writer =
|
||||
MultiValuedFastFieldWriter::new(field, FastFieldType::String);
|
||||
MultiValuedFastFieldWriter::new(field, FastFieldType::String, None);
|
||||
term_id_writers.push(fast_field_writer);
|
||||
}
|
||||
FieldType::Bytes(bytes_option) => {
|
||||
@@ -75,10 +103,27 @@ impl FastFieldsWriter {
|
||||
bytes_value_writers.push(fast_field_writer);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
FieldType::Ip(opt) => {
|
||||
if opt.is_fast() {
|
||||
match opt.get_fastfield_cardinality() {
|
||||
Some(Cardinality::SingleValue) => {
|
||||
let fast_field_writer = U128FastFieldWriter::new(field);
|
||||
u128_value_writers.push(fast_field_writer);
|
||||
}
|
||||
Some(Cardinality::MultiValues) => {
|
||||
let fast_field_writer = U128MultiValueFastFieldWriter::new(field);
|
||||
u128_multi_value_writers.push(fast_field_writer);
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
FieldType::Str(_) | FieldType::JsonObject(_) => {}
|
||||
}
|
||||
}
|
||||
FastFieldsWriter {
|
||||
u128_value_writers,
|
||||
u128_multi_value_writers,
|
||||
term_id_writers,
|
||||
single_value_writers,
|
||||
multi_values_writers,
|
||||
@@ -107,6 +152,16 @@ impl FastFieldsWriter {
|
||||
.iter()
|
||||
.map(|w| w.mem_usage())
|
||||
.sum::<usize>()
|
||||
+ self
|
||||
.u128_value_writers
|
||||
.iter()
|
||||
.map(|w| w.mem_usage())
|
||||
.sum::<usize>()
|
||||
+ self
|
||||
.u128_multi_value_writers
|
||||
.iter()
|
||||
.map(|w| w.mem_usage())
|
||||
.sum::<usize>()
|
||||
}
|
||||
|
||||
/// Get the `FastFieldWriter` associated to a field.
|
||||
@@ -168,7 +223,6 @@ impl FastFieldsWriter {
|
||||
.iter_mut()
|
||||
.find(|field_writer| field_writer.field() == field)
|
||||
}
|
||||
|
||||
/// Indexes all of the fastfields of a new document.
|
||||
pub fn add_document(&mut self, doc: &Document) {
|
||||
for field_writer in &mut self.term_id_writers {
|
||||
@@ -183,6 +237,12 @@ impl FastFieldsWriter {
|
||||
for field_writer in &mut self.bytes_value_writers {
|
||||
field_writer.add_document(doc);
|
||||
}
|
||||
for field_writer in &mut self.u128_value_writers {
|
||||
field_writer.add_document(doc);
|
||||
}
|
||||
for field_writer in &mut self.u128_multi_value_writers {
|
||||
field_writer.add_document(doc);
|
||||
}
|
||||
}
|
||||
|
||||
/// Serializes all of the `FastFieldWriter`s by pushing them in
|
||||
@@ -208,6 +268,129 @@ impl FastFieldsWriter {
|
||||
for field_writer in &self.bytes_value_writers {
|
||||
field_writer.serialize(serializer, doc_id_map)?;
|
||||
}
|
||||
for field_writer in &self.u128_value_writers {
|
||||
field_writer.serialize(serializer, doc_id_map)?;
|
||||
}
|
||||
for field_writer in &self.u128_multi_value_writers {
|
||||
field_writer.serialize(serializer, doc_id_map)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Fast field writer for u128 values.
|
||||
/// The fast field writer just keeps the values in memory.
|
||||
///
|
||||
/// Only when the segment writer can be closed and
|
||||
/// persisted on disc, the fast field writer is
|
||||
/// sent to a `FastFieldSerializer` via the `.serialize(...)`
|
||||
/// method.
|
||||
///
|
||||
/// We cannot serialize earlier as the values are
|
||||
/// compressed to a compact number space and the number of
|
||||
/// bits required for bitpacking can only been known once
|
||||
/// we have seen all of the values.
|
||||
pub struct U128FastFieldWriter {
|
||||
field: Field,
|
||||
vals: Vec<u128>,
|
||||
val_count: u32,
|
||||
|
||||
null_values: RoaringBitmap,
|
||||
}
|
||||
|
||||
impl U128FastFieldWriter {
|
||||
/// Creates a new `IntFastFieldWriter`
|
||||
pub fn new(field: Field) -> Self {
|
||||
Self {
|
||||
field,
|
||||
vals: vec![],
|
||||
val_count: 0,
|
||||
null_values: RoaringBitmap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// The memory used (inclusive childs)
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
self.vals.len() * 16
|
||||
}
|
||||
|
||||
/// Records a new value.
|
||||
///
|
||||
/// The n-th value being recorded is implicitely
|
||||
/// associated to the document with the `DocId` n.
|
||||
/// (Well, `n-1` actually because of 0-indexing)
|
||||
pub fn add_val(&mut self, val: u128) {
|
||||
self.vals.push(val);
|
||||
}
|
||||
|
||||
/// Extract the fast field value from the document
|
||||
/// (or use the default value) and records it.
|
||||
///
|
||||
/// Extract the value associated to the fast field for
|
||||
/// this document.
|
||||
pub fn add_document(&mut self, doc: &Document) {
|
||||
match doc.get_first(self.field) {
|
||||
Some(v) => {
|
||||
let ip_addr = v.as_ip().unwrap();
|
||||
let value = ip_to_u128(ip_addr);
|
||||
self.add_val(value);
|
||||
}
|
||||
None => {
|
||||
self.null_values.insert(self.val_count as u32);
|
||||
}
|
||||
};
|
||||
self.val_count += 1;
|
||||
}
|
||||
|
||||
/// Push the fast fields value to the `FastFieldWriter`.
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_map: Option<&DocIdMapping>,
|
||||
) -> io::Result<()> {
|
||||
let mut field_write = serializer.get_field_writer(self.field, 0);
|
||||
let compressor = IntervalCompressor::from_vals(self.vals.to_vec());
|
||||
|
||||
let mut val_idx = 0;
|
||||
let mut get_val = |idx| {
|
||||
if self.null_values.contains(idx as u32) {
|
||||
compressor.null_value
|
||||
} else {
|
||||
let val = self.vals[val_idx];
|
||||
val_idx += 1;
|
||||
val
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(doc_id_map) = doc_id_map {
|
||||
// To get the actual value, we could materialize the vec with u128 including nulls, but
|
||||
// that could cost a lot of memory. Instead we just compute the index for of
|
||||
// the values
|
||||
let mut idx_to_val_idx = vec![];
|
||||
idx_to_val_idx.resize(self.val_count as usize, 0);
|
||||
|
||||
let mut val_idx = 0;
|
||||
for idx in 0..self.val_count {
|
||||
if !self.null_values.contains(idx as u32) {
|
||||
idx_to_val_idx[idx as usize] = val_idx as u32;
|
||||
val_idx += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let iter = doc_id_map.iter_old_doc_ids().map(|idx| {
|
||||
if self.null_values.contains(idx as u32) {
|
||||
compressor.null_value
|
||||
} else {
|
||||
self.vals[idx_to_val_idx[idx as usize] as usize]
|
||||
}
|
||||
});
|
||||
compressor.compress_into(iter, &mut field_write)?;
|
||||
} else {
|
||||
let iter = (0..self.val_count).map(&mut get_val);
|
||||
compressor.compress_into(iter, &mut field_write)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -229,6 +412,7 @@ impl FastFieldsWriter {
|
||||
/// using `common::i64_to_u64` and `common::f64_to_u64`.
|
||||
pub struct IntFastFieldWriter {
|
||||
field: Field,
|
||||
precision_opt: Option<DatePrecision>,
|
||||
vals: BlockedBitpacker,
|
||||
val_count: usize,
|
||||
val_if_missing: u64,
|
||||
@@ -238,13 +422,14 @@ pub struct IntFastFieldWriter {
|
||||
|
||||
impl IntFastFieldWriter {
|
||||
/// Creates a new `IntFastFieldWriter`
|
||||
pub fn new(field: Field) -> IntFastFieldWriter {
|
||||
pub fn new(field: Field, precision_opt: Option<DatePrecision>) -> IntFastFieldWriter {
|
||||
IntFastFieldWriter {
|
||||
field,
|
||||
precision_opt,
|
||||
vals: BlockedBitpacker::new(),
|
||||
val_count: 0,
|
||||
val_if_missing: 0u64,
|
||||
val_min: u64::max_value(),
|
||||
val_min: u64::MAX,
|
||||
val_max: 0,
|
||||
}
|
||||
}
|
||||
@@ -254,7 +439,7 @@ impl IntFastFieldWriter {
|
||||
self.vals.mem_usage()
|
||||
}
|
||||
|
||||
/// Returns the field that this writer is targetting.
|
||||
/// Returns the field that this writer is targeting.
|
||||
pub fn field(&self) -> Field {
|
||||
self.field
|
||||
}
|
||||
@@ -304,7 +489,13 @@ impl IntFastFieldWriter {
|
||||
pub fn add_document(&mut self, doc: &Document) {
|
||||
match doc.get_first(self.field) {
|
||||
Some(v) => {
|
||||
self.add_val(super::value_to_u64(v));
|
||||
let value = match (self.precision_opt, v) {
|
||||
(Some(precision), Value::Date(date_val)) => {
|
||||
date_val.truncate(precision).to_u64()
|
||||
}
|
||||
_ => super::value_to_u64(v),
|
||||
};
|
||||
self.add_val(value);
|
||||
}
|
||||
None => {
|
||||
self.add_val(self.val_if_missing);
|
||||
@@ -340,23 +531,25 @@ impl IntFastFieldWriter {
|
||||
};
|
||||
|
||||
if let Some(doc_id_map) = doc_id_map {
|
||||
let iter = doc_id_map
|
||||
.iter_old_doc_ids()
|
||||
.map(|doc_id| self.vals.get(doc_id as usize));
|
||||
let iter_gen = || {
|
||||
doc_id_map
|
||||
.iter_old_doc_ids()
|
||||
.map(|doc_id| self.vals.get(doc_id as usize))
|
||||
};
|
||||
serializer.create_auto_detect_u64_fast_field(
|
||||
self.field,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter.clone(),
|
||||
iter,
|
||||
iter_gen,
|
||||
)?;
|
||||
} else {
|
||||
let iter_gen = || self.vals.iter();
|
||||
|
||||
serializer.create_auto_detect_u64_fast_field(
|
||||
self.field,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
self.vals.iter(),
|
||||
self.vals.iter(),
|
||||
iter_gen,
|
||||
)?;
|
||||
};
|
||||
Ok(())
|
||||
|
||||
@@ -294,7 +294,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_u32_max() {
|
||||
assert_eq!(fieldnorm_to_id(u32::max_value()), u8::max_value());
|
||||
assert_eq!(fieldnorm_to_id(u32::MAX), u8::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
//! a given Field of a given document.
|
||||
//!
|
||||
//! This metric is important to compute the score of a
|
||||
//! document : a document having a query word in one its short fields
|
||||
//! document: a document having a query word in one of its short fields
|
||||
//! (e.g. title) is likely to be more relevant than in one of its longer field
|
||||
//! (e.g. body).
|
||||
//!
|
||||
//! It encodes `fieldnorm` on one byte with some precision loss,
|
||||
//! using the exact same scheme as Lucene. Each value is place on a log-scale
|
||||
//! using the exact same scheme as Lucene. Each value is placed on a log-scale
|
||||
//! that takes values from `0` to `255`.
|
||||
//!
|
||||
//! A value on this scale is identified by a `fieldnorm_id`.
|
||||
@@ -112,7 +112,7 @@ mod tests {
|
||||
Term::from_field_text(text, "hello"),
|
||||
IndexRecordOption::WithFreqs,
|
||||
);
|
||||
let weight = query.weight(&*searcher, true)?;
|
||||
let weight = query.weight(&searcher, true)?;
|
||||
let mut scorer = weight.scorer(searcher.segment_reader(0), 1.0f32)?;
|
||||
assert_eq!(scorer.doc(), 0);
|
||||
assert!((scorer.score() - 0.22920431).abs() < 0.001f32);
|
||||
@@ -141,7 +141,7 @@ mod tests {
|
||||
Term::from_field_text(text, "hello"),
|
||||
IndexRecordOption::WithFreqs,
|
||||
);
|
||||
let weight = query.weight(&*searcher, true)?;
|
||||
let weight = query.weight(&searcher, true)?;
|
||||
let mut scorer = weight.scorer(searcher.segment_reader(0), 1.0f32)?;
|
||||
assert_eq!(scorer.doc(), 0);
|
||||
assert!((scorer.score() - 0.22920431).abs() < 0.001f32);
|
||||
|
||||
@@ -40,25 +40,17 @@ impl FieldNormReaders {
|
||||
pub fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||
self.data.space_usage()
|
||||
}
|
||||
|
||||
/// Returns a handle to inner file
|
||||
pub fn get_inner_file(&self) -> Arc<CompositeFile> {
|
||||
self.data.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads the fieldnorm associated to a document.
|
||||
/// The fieldnorm represents the length associated to
|
||||
///
|
||||
/// The [fieldnorm](FieldNormReader::fieldnorm) represents the length associated to
|
||||
/// a given Field of a given document.
|
||||
///
|
||||
/// This metric is important to compute the score of a
|
||||
/// document : a document having a query word in one its short fields
|
||||
/// (e.g. title) is likely to be more relevant than in one of its longer field
|
||||
/// (e.g. body).
|
||||
///
|
||||
/// tantivy encodes `fieldnorm` on one byte with some precision loss,
|
||||
/// using the same scheme as Lucene. Each value is place on a log-scale
|
||||
/// that takes values from `0` to `255`.
|
||||
///
|
||||
/// A value on this scale is identified by a `fieldnorm_id`.
|
||||
/// Apart from compression, this scale also makes it possible to
|
||||
/// precompute computationally expensive functions of the fieldnorm
|
||||
/// in a very short array.
|
||||
#[derive(Clone)]
|
||||
pub struct FieldNormReader(ReaderImplEnum);
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {
|
||||
assert!(searcher.segment_readers().len() < 20);
|
||||
assert_eq!(searcher.num_docs() as usize, vals.len());
|
||||
for segment_reader in searcher.segment_readers() {
|
||||
let store_reader = segment_reader.get_store_reader()?;
|
||||
let store_reader = segment_reader.get_store_reader(1)?;
|
||||
for doc_id in 0..segment_reader.max_doc() {
|
||||
let _doc = store_reader.get(doc_id)?;
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ pub const MARGIN_IN_BYTES: usize = 1_000_000;
|
||||
|
||||
// We impose the memory per thread to be at least 3 MB.
|
||||
pub const MEMORY_ARENA_NUM_BYTES_MIN: usize = ((MARGIN_IN_BYTES as u32) * 3u32) as usize;
|
||||
pub const MEMORY_ARENA_NUM_BYTES_MAX: usize = u32::max_value() as usize - MARGIN_IN_BYTES;
|
||||
pub const MEMORY_ARENA_NUM_BYTES_MAX: usize = u32::MAX as usize - MARGIN_IN_BYTES;
|
||||
|
||||
// We impose the number of index writter thread to be at most this.
|
||||
pub const MAX_NUM_THREAD: usize = 8;
|
||||
@@ -158,9 +158,9 @@ pub(crate) fn advance_deletes(
|
||||
if num_deleted_docs > num_deleted_docs_before {
|
||||
// There are new deletes. We need to write a new delete file.
|
||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||
let mut delete_file = segment.open_write(SegmentComponent::Delete)?;
|
||||
write_alive_bitset(&alive_bitset, &mut delete_file)?;
|
||||
delete_file.terminate()?;
|
||||
let mut alive_doc_file = segment.open_write(SegmentComponent::Delete)?;
|
||||
write_alive_bitset(&alive_bitset, &mut alive_doc_file)?;
|
||||
alive_doc_file.terminate()?;
|
||||
}
|
||||
|
||||
segment_entry.set_meta(segment.meta().clone());
|
||||
@@ -776,6 +776,7 @@ impl Drop for IndexWriter {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::net::IpAddr;
|
||||
|
||||
use proptest::prelude::*;
|
||||
use proptest::prop_oneof;
|
||||
@@ -789,9 +790,10 @@ mod tests {
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::{QueryParser, TermQuery};
|
||||
use crate::schema::{
|
||||
self, Cardinality, Facet, FacetOptions, IndexRecordOption, NumericOptions,
|
||||
self, Cardinality, Facet, FacetOptions, IndexRecordOption, IpOptions, NumericOptions,
|
||||
TextFieldIndexing, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
|
||||
};
|
||||
use crate::store::DOCSTORE_CACHE_CAPACITY;
|
||||
use crate::{DocAddress, Index, IndexSettings, IndexSortByField, Order, ReloadPolicy, Term};
|
||||
|
||||
const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do \
|
||||
@@ -1383,8 +1385,14 @@ mod tests {
|
||||
force_end_merge: bool,
|
||||
) -> crate::Result<()> {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let ip_field = schema_builder.add_ip_field("ip", FAST | INDEXED | STORED);
|
||||
let ips_field = schema_builder.add_ip_field(
|
||||
"ips",
|
||||
IpOptions::default().set_fast(Cardinality::MultiValues),
|
||||
);
|
||||
let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED);
|
||||
let bytes_field = schema_builder.add_bytes_field("bytes", FAST | INDEXED | STORED);
|
||||
let bool_field = schema_builder.add_bool_field("bool", FAST | INDEXED | STORED);
|
||||
let text_field = schema_builder.add_text_field(
|
||||
"text_field",
|
||||
TextOptions::default()
|
||||
@@ -1403,6 +1411,12 @@ mod tests {
|
||||
.set_fast(Cardinality::MultiValues)
|
||||
.set_stored(),
|
||||
);
|
||||
let multi_bools = schema_builder.add_bool_field(
|
||||
"multi_bools",
|
||||
NumericOptions::default()
|
||||
.set_fast(Cardinality::MultiValues)
|
||||
.set_stored(),
|
||||
);
|
||||
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
|
||||
let schema = schema_builder.build();
|
||||
let settings = if sort_index {
|
||||
@@ -1431,14 +1445,37 @@ mod tests {
|
||||
match op {
|
||||
IndexingOp::AddDoc { id } => {
|
||||
let facet = Facet::from(&("/cola/".to_string() + &id.to_string()));
|
||||
index_writer.add_document(doc!(id_field=>id,
|
||||
bytes_field => id.to_le_bytes().as_slice(),
|
||||
multi_numbers=> id,
|
||||
multi_numbers => id,
|
||||
text_field => id.to_string(),
|
||||
facet_field => facet,
|
||||
large_text_field=> LOREM
|
||||
))?;
|
||||
let ip_from_id = IpAddr::from((id as u128).to_be_bytes());
|
||||
|
||||
if id % 3 == 0 {
|
||||
// every 3rd doc has no ip field
|
||||
index_writer.add_document(doc!(id_field=>id,
|
||||
bytes_field => id.to_le_bytes().as_slice(),
|
||||
multi_numbers=> id,
|
||||
multi_numbers => id,
|
||||
bool_field => (id % 2u64) != 0,
|
||||
multi_bools => (id % 2u64) != 0,
|
||||
multi_bools => (id % 2u64) == 0,
|
||||
text_field => id.to_string(),
|
||||
facet_field => facet,
|
||||
large_text_field=> LOREM
|
||||
))?;
|
||||
} else {
|
||||
index_writer.add_document(doc!(id_field=>id,
|
||||
bytes_field => id.to_le_bytes().as_slice(),
|
||||
ip_field => ip_from_id,
|
||||
ips_field => ip_from_id,
|
||||
ips_field => ip_from_id,
|
||||
multi_numbers=> id,
|
||||
multi_numbers => id,
|
||||
bool_field => (id % 2u64) != 0,
|
||||
multi_bools => (id % 2u64) != 0,
|
||||
multi_bools => (id % 2u64) == 0,
|
||||
text_field => id.to_string(),
|
||||
facet_field => facet,
|
||||
large_text_field=> LOREM
|
||||
))?;
|
||||
}
|
||||
}
|
||||
IndexingOp::DeleteDoc { id } => {
|
||||
index_writer.delete_term(Term::from_field_u64(id_field, id));
|
||||
@@ -1497,47 +1534,104 @@ mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (expected_ids_and_num_occurences, deleted_ids) = expected_ids(ops);
|
||||
let num_docs_expected = expected_ids_and_num_occurences
|
||||
let (expected_ids_and_num_occurrences, deleted_ids) = expected_ids(ops);
|
||||
let num_docs_expected = expected_ids_and_num_occurrences
|
||||
.iter()
|
||||
.map(|(_, id_occurences)| *id_occurences as usize)
|
||||
.map(|(_, id_occurrences)| *id_occurrences as usize)
|
||||
.sum::<usize>();
|
||||
assert_eq!(searcher.num_docs() as usize, num_docs_expected);
|
||||
assert_eq!(old_searcher.num_docs() as usize, num_docs_expected);
|
||||
assert_eq!(
|
||||
ids_old_searcher,
|
||||
expected_ids_and_num_occurences
|
||||
expected_ids_and_num_occurrences
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect::<HashSet<_>>()
|
||||
);
|
||||
assert_eq!(
|
||||
ids,
|
||||
expected_ids_and_num_occurences
|
||||
expected_ids_and_num_occurrences
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect::<HashSet<_>>()
|
||||
);
|
||||
|
||||
// Check ip addr
|
||||
let ips: HashSet<Option<IpAddr>> = searcher
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.flat_map(|segment_reader| {
|
||||
let ff_reader = segment_reader.fast_fields().ip_addr(ip_field).unwrap();
|
||||
segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(move |doc| ff_reader.get_val(doc as u64))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let expected_ips = expected_ids_and_num_occurrences
|
||||
.keys()
|
||||
.map(|id| {
|
||||
if id % 3 == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(IpAddr::from((*id as u128).to_be_bytes()))
|
||||
}
|
||||
})
|
||||
.collect::<HashSet<_>>();
|
||||
assert_eq!(ips, expected_ips);
|
||||
|
||||
let expected_ips = expected_ids_and_num_occurrences
|
||||
.keys()
|
||||
.filter_map(|id| {
|
||||
if id % 3 == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(IpAddr::from((*id as u128).to_be_bytes()))
|
||||
}
|
||||
})
|
||||
.collect::<HashSet<_>>();
|
||||
let ips: HashSet<IpAddr> = searcher
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.flat_map(|segment_reader| {
|
||||
let ff_reader = segment_reader.fast_fields().ip_addrs(ips_field).unwrap();
|
||||
segment_reader.doc_ids_alive().flat_map(move |doc| {
|
||||
let mut vals = vec![];
|
||||
ff_reader.get_vals(doc, &mut vals);
|
||||
vals
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(ips, expected_ips);
|
||||
|
||||
// multivalue fast field tests
|
||||
for segment_reader in searcher.segment_readers().iter() {
|
||||
let ff_reader = segment_reader.fast_fields().u64s(multi_numbers).unwrap();
|
||||
let bool_ff_reader = segment_reader.fast_fields().bools(multi_bools).unwrap();
|
||||
for doc in segment_reader.doc_ids_alive() {
|
||||
let mut vals = vec![];
|
||||
ff_reader.get_vals(doc, &mut vals);
|
||||
assert_eq!(vals.len(), 2);
|
||||
assert_eq!(vals[0], vals[1]);
|
||||
assert!(expected_ids_and_num_occurences.contains_key(&vals[0]));
|
||||
|
||||
let mut bool_vals = vec![];
|
||||
bool_ff_reader.get_vals(doc, &mut bool_vals);
|
||||
assert_eq!(bool_vals.len(), 2);
|
||||
assert_ne!(bool_vals[0], bool_vals[1]);
|
||||
|
||||
assert!(expected_ids_and_num_occurrences.contains_key(&vals[0]));
|
||||
}
|
||||
}
|
||||
|
||||
// doc store tests
|
||||
for segment_reader in searcher.segment_readers().iter() {
|
||||
let store_reader = segment_reader.get_store_reader().unwrap();
|
||||
let store_reader = segment_reader
|
||||
.get_store_reader(DOCSTORE_CACHE_CAPACITY)
|
||||
.unwrap();
|
||||
// test store iterator
|
||||
for doc in store_reader.iter(segment_reader.alive_bitset()) {
|
||||
let id = doc.unwrap().get_first(id_field).unwrap().as_u64().unwrap();
|
||||
assert!(expected_ids_and_num_occurences.contains_key(&id));
|
||||
assert!(expected_ids_and_num_occurrences.contains_key(&id));
|
||||
}
|
||||
// test store random access
|
||||
for doc_id in segment_reader.doc_ids_alive() {
|
||||
@@ -1548,7 +1642,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
assert!(expected_ids_and_num_occurences.contains_key(&id));
|
||||
assert!(expected_ids_and_num_occurrences.contains_key(&id));
|
||||
let id2 = store_reader
|
||||
.get(doc_id)
|
||||
.unwrap()
|
||||
@@ -1557,6 +1651,18 @@ mod tests {
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
assert_eq!(id, id2);
|
||||
let bool = store_reader
|
||||
.get(doc_id)
|
||||
.unwrap()
|
||||
.get_first(bool_field)
|
||||
.unwrap()
|
||||
.as_bool()
|
||||
.unwrap();
|
||||
let doc = store_reader.get(doc_id).unwrap();
|
||||
let mut bool2 = doc.get_all(multi_bools);
|
||||
assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap());
|
||||
assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap());
|
||||
assert_eq!(None, bool2.next())
|
||||
}
|
||||
}
|
||||
// test search
|
||||
@@ -1572,7 +1678,7 @@ mod tests {
|
||||
top_docs.iter().map(|el| el.1).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
for (existing_id, count) in expected_ids_and_num_occurences {
|
||||
for (existing_id, count) in expected_ids_and_num_occurrences {
|
||||
assert_eq!(do_search(&existing_id.to_string()).len() as u64, count);
|
||||
}
|
||||
for existing_id in deleted_ids {
|
||||
@@ -1599,6 +1705,31 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minimal() {
|
||||
assert!(test_operation_strategy(
|
||||
&[
|
||||
IndexingOp::AddDoc { id: 23 },
|
||||
IndexingOp::AddDoc { id: 13 },
|
||||
IndexingOp::DeleteDoc { id: 13 }
|
||||
],
|
||||
true,
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
|
||||
assert!(test_operation_strategy(
|
||||
&[
|
||||
IndexingOp::AddDoc { id: 23 },
|
||||
IndexingOp::AddDoc { id: 13 },
|
||||
IndexingOp::DeleteDoc { id: 13 }
|
||||
],
|
||||
false,
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(20))]
|
||||
#[test]
|
||||
|
||||
@@ -8,7 +8,7 @@ use crate::schema::{Field, Type};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::{OffsetDateTime, UtcOffset};
|
||||
use crate::tokenizer::TextAnalyzer;
|
||||
use crate::{DateTime, DocId, Term};
|
||||
use crate::{DatePrecision, DateTime, DocId, Term};
|
||||
|
||||
/// This object is a map storing the last position for a given path for the current document
|
||||
/// being indexed.
|
||||
@@ -123,8 +123,7 @@ fn index_json_value<'a>(
|
||||
match json_value {
|
||||
serde_json::Value::Null => {}
|
||||
serde_json::Value::Bool(val_bool) => {
|
||||
let bool_u64 = if *val_bool { 1u64 } else { 0u64 };
|
||||
json_term_writer.set_fast_value(bool_u64);
|
||||
json_term_writer.set_fast_value(*val_bool);
|
||||
postings_writer.subscribe(doc, 0u32, json_term_writer.term(), ctx);
|
||||
}
|
||||
serde_json::Value::Number(number) => {
|
||||
@@ -220,6 +219,9 @@ pub(crate) fn convert_to_fast_value_and_get_term(
|
||||
if let Ok(f64_val) = str::parse::<f64>(phrase) {
|
||||
return Some(set_fastvalue_and_get_term(json_term_writer, f64_val));
|
||||
}
|
||||
if let Ok(bool_val) = str::parse::<bool>(phrase) {
|
||||
return Some(set_fastvalue_and_get_term(json_term_writer, bool_val));
|
||||
}
|
||||
None
|
||||
}
|
||||
// helper function to generate a Term from a json fastvalue
|
||||
@@ -321,9 +323,16 @@ impl<'a> JsonTermWriter<'a> {
|
||||
|
||||
pub fn set_fast_value<T: FastValue>(&mut self, val: T) {
|
||||
self.close_path_and_set_type(T::to_type());
|
||||
let value = if T::to_type() == Type::Date {
|
||||
DateTime::from_u64(val.to_u64())
|
||||
.truncate(DatePrecision::Seconds)
|
||||
.to_u64()
|
||||
} else {
|
||||
val.to_u64()
|
||||
};
|
||||
self.term_buffer
|
||||
.as_mut()
|
||||
.extend_from_slice(val.to_u64().to_be_bytes().as_slice());
|
||||
.extend_from_slice(value.to_be_bytes().as_slice());
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -434,6 +443,20 @@ mod tests {
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bool_term() {
|
||||
let field = Field::from_field_id(1);
|
||||
let mut term = Term::new();
|
||||
term.set_field(Type::Json, field);
|
||||
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||
json_writer.push_path_segment("color");
|
||||
json_writer.set_fast_value(true);
|
||||
assert_eq!(
|
||||
json_writer.term().as_slice(),
|
||||
b"\x00\x00\x00\x01jcolor\x00o\x00\x00\x00\x00\x00\x00\x00\x01"
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_push_after_set_path_segment() {
|
||||
let field = Field::from_field_id(1);
|
||||
|
||||
@@ -9,7 +9,7 @@ pub struct MergeCandidate(pub Vec<SegmentId>);
|
||||
|
||||
/// The `MergePolicy` defines which segments should be merged.
|
||||
///
|
||||
/// Every time a the list of segments changes, the segment updater
|
||||
/// Every time the list of segments changes, the segment updater
|
||||
/// asks the merge policy if some segments should be merged.
|
||||
pub trait MergePolicy: marker::Send + marker::Sync + Debug {
|
||||
/// Given the list of segment metas, returns the list of merge candidates.
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::ip_codec::{IntervalCompressor, IntervallDecompressor};
|
||||
use itertools::Itertools;
|
||||
use measure_time::debug_time;
|
||||
use tantivy_bitpacker::minmax;
|
||||
@@ -11,7 +12,8 @@ use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{
|
||||
AliveBitSet, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldDataAccess,
|
||||
FastFieldReader, FastFieldStats, MultiValueLength, MultiValuedFastFieldReader,
|
||||
FastFieldReader, FastFieldReaderCodecWrapperU128, FastFieldStats, MultiValueLength,
|
||||
MultiValuedFastFieldReader, MultiValuedU128FastFieldReader,
|
||||
};
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
||||
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
|
||||
@@ -298,7 +300,16 @@ impl IndexMerger {
|
||||
FieldType::U64(ref options)
|
||||
| FieldType::I64(ref options)
|
||||
| FieldType::F64(ref options)
|
||||
| FieldType::Date(ref options) => match options.get_fastfield_cardinality() {
|
||||
| FieldType::Bool(ref options) => match options.get_fastfield_cardinality() {
|
||||
Some(Cardinality::SingleValue) => {
|
||||
self.write_single_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
||||
}
|
||||
Some(Cardinality::MultiValues) => {
|
||||
self.write_multi_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
||||
}
|
||||
None => {}
|
||||
},
|
||||
FieldType::Date(ref options) => match options.get_fastfield_cardinality() {
|
||||
Some(Cardinality::SingleValue) => {
|
||||
self.write_single_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
||||
}
|
||||
@@ -312,7 +323,25 @@ impl IndexMerger {
|
||||
self.write_bytes_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
FieldType::Ip(options) => match options.get_fastfield_cardinality() {
|
||||
Some(Cardinality::SingleValue) => {
|
||||
self.write_u128_single_fast_field(
|
||||
field,
|
||||
fast_field_serializer,
|
||||
doc_id_mapping,
|
||||
)?;
|
||||
}
|
||||
Some(Cardinality::MultiValues) => {
|
||||
self.write_u128_multi_fast_field(
|
||||
field,
|
||||
fast_field_serializer,
|
||||
doc_id_mapping,
|
||||
)?;
|
||||
}
|
||||
None => {}
|
||||
},
|
||||
|
||||
FieldType::JsonObject(_) | FieldType::Facet(_) | FieldType::Str(_) => {
|
||||
// We don't handle json fast field for the moment
|
||||
// They can be implemented using what is done
|
||||
// for facets in the future
|
||||
@@ -322,6 +351,114 @@ impl IndexMerger {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// used to merge `u128` single fast fields.
|
||||
fn write_u128_multi_fast_field(
|
||||
&self,
|
||||
field: Field,
|
||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
) -> crate::Result<()> {
|
||||
let reader_ordinal_and_field_accessors = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|segment_reader| {
|
||||
let val_length_reader: MultiValuedU128FastFieldReader<u128> =
|
||||
segment_reader.fast_fields().u128s(field).expect(
|
||||
"Failed to find index for multivalued field. This is a bug in tantivy, \
|
||||
please report.",
|
||||
);
|
||||
(segment_reader, val_length_reader)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Self::write_1_n_fast_field_idx_generic(
|
||||
field,
|
||||
fast_field_serializer,
|
||||
doc_id_mapping,
|
||||
&reader_ordinal_and_field_accessors,
|
||||
)?;
|
||||
|
||||
let fast_field_readers = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| {
|
||||
let u128_reader: MultiValuedU128FastFieldReader<u128> =
|
||||
reader.fast_fields().u128s(field).expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
it should never happen.",
|
||||
);
|
||||
u128_reader
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let compressor = {
|
||||
let vals = fast_field_readers
|
||||
.iter()
|
||||
.flat_map(|reader| reader.iter())
|
||||
.flatten()
|
||||
.collect::<Vec<u128>>();
|
||||
|
||||
IntervalCompressor::from_vals(vals)
|
||||
};
|
||||
|
||||
let iter = doc_id_mapping.iter().flat_map(|(doc_id, reader_ordinal)| {
|
||||
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
|
||||
let mut out = vec![];
|
||||
fast_field_reader.get_vals(*doc_id, &mut out);
|
||||
out.into_iter()
|
||||
});
|
||||
|
||||
let field_write = fast_field_serializer.get_field_writer(field, 1);
|
||||
|
||||
compressor.compress_into(iter, field_write)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// used to merge `u128` single fast fields.
|
||||
fn write_u128_single_fast_field(
|
||||
&self,
|
||||
field: Field,
|
||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
) -> crate::Result<()> {
|
||||
let fast_field_readers = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| {
|
||||
let u128_reader: FastFieldReaderCodecWrapperU128<u128, IntervallDecompressor> =
|
||||
reader.fast_fields().u128(field).expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
it should never happen.",
|
||||
);
|
||||
u128_reader
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let compressor = {
|
||||
let vals = fast_field_readers
|
||||
.iter()
|
||||
.flat_map(|reader| reader.iter())
|
||||
.flatten()
|
||||
.collect::<Vec<u128>>();
|
||||
|
||||
IntervalCompressor::from_vals(vals)
|
||||
};
|
||||
|
||||
let iter = doc_id_mapping.iter().map(|(doc_id, reader_ordinal)| {
|
||||
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
|
||||
fast_field_reader
|
||||
.get_val(*doc_id as u64)
|
||||
.unwrap_or(compressor.null_value)
|
||||
});
|
||||
|
||||
let field_write = fast_field_serializer.get_field_writer(field, 0);
|
||||
|
||||
compressor.compress_into(iter, field_write)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// used both to merge field norms, `u64/i64` single fast fields.
|
||||
fn write_single_fast_field(
|
||||
&self,
|
||||
@@ -376,20 +513,17 @@ impl IndexMerger {
|
||||
doc_id_mapping,
|
||||
fast_field_readers: &fast_field_readers,
|
||||
};
|
||||
let iter1 = doc_id_mapping.iter().map(|(doc_id, reader_ordinal)| {
|
||||
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
|
||||
fast_field_reader.get(*doc_id)
|
||||
});
|
||||
let iter2 = doc_id_mapping.iter().map(|(doc_id, reader_ordinal)| {
|
||||
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
|
||||
fast_field_reader.get(*doc_id)
|
||||
});
|
||||
let iter_gen = || {
|
||||
doc_id_mapping.iter().map(|(doc_id, reader_ordinal)| {
|
||||
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
|
||||
fast_field_reader.get(*doc_id)
|
||||
})
|
||||
};
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field(
|
||||
field,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter1,
|
||||
iter2,
|
||||
iter_gen,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
@@ -515,16 +649,16 @@ impl IndexMerger {
|
||||
// This is required by the bitpacker, as it needs to know
|
||||
// what should be the bit length use for bitpacking.
|
||||
let mut num_docs = 0;
|
||||
for (reader, u64s_reader) in reader_and_field_accessors.iter() {
|
||||
for (reader, value_length_reader) in reader_and_field_accessors.iter() {
|
||||
if let Some(alive_bitset) = reader.alive_bitset() {
|
||||
num_docs += alive_bitset.num_alive_docs() as u64;
|
||||
for doc in reader.doc_ids_alive() {
|
||||
let num_vals = u64s_reader.get_len(doc) as u64;
|
||||
let num_vals = value_length_reader.get_len(doc) as u64;
|
||||
total_num_vals += num_vals;
|
||||
}
|
||||
} else {
|
||||
num_docs += reader.max_doc() as u64;
|
||||
total_num_vals += u64s_reader.get_total_len();
|
||||
total_num_vals += value_length_reader.get_total_len();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -551,12 +685,12 @@ impl IndexMerger {
|
||||
}
|
||||
offsets.push(offset);
|
||||
|
||||
let iter_gen = || offsets.iter().cloned();
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field(
|
||||
field,
|
||||
stats,
|
||||
&offsets[..],
|
||||
offsets.iter().cloned(),
|
||||
offsets.iter().cloned(),
|
||||
iter_gen,
|
||||
)?;
|
||||
Ok(offsets)
|
||||
}
|
||||
@@ -679,8 +813,8 @@ impl IndexMerger {
|
||||
let offsets =
|
||||
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
|
||||
|
||||
let mut min_value = u64::max_value();
|
||||
let mut max_value = u64::min_value();
|
||||
let mut min_value = u64::MAX;
|
||||
let mut max_value = u64::MIN;
|
||||
let mut num_vals = 0;
|
||||
|
||||
let mut vals = Vec::with_capacity(100);
|
||||
@@ -759,24 +893,19 @@ impl IndexMerger {
|
||||
fast_field_readers: &ff_readers,
|
||||
offsets,
|
||||
};
|
||||
let iter1 = doc_id_mapping.iter().flat_map(|(doc_id, reader_ordinal)| {
|
||||
let ff_reader = &ff_readers[*reader_ordinal as usize];
|
||||
let mut vals = vec![];
|
||||
ff_reader.get_vals(*doc_id, &mut vals);
|
||||
vals.into_iter()
|
||||
});
|
||||
let iter2 = doc_id_mapping.iter().flat_map(|(doc_id, reader_ordinal)| {
|
||||
let ff_reader = &ff_readers[*reader_ordinal as usize];
|
||||
let mut vals = vec![];
|
||||
ff_reader.get_vals(*doc_id, &mut vals);
|
||||
vals.into_iter()
|
||||
});
|
||||
let iter_gen = || {
|
||||
doc_id_mapping.iter().flat_map(|(doc_id, reader_ordinal)| {
|
||||
let ff_reader = &ff_readers[*reader_ordinal as usize];
|
||||
let mut vals = vec![];
|
||||
ff_reader.get_vals(*doc_id, &mut vals);
|
||||
vals.into_iter()
|
||||
})
|
||||
};
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx(
|
||||
field,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter1,
|
||||
iter2,
|
||||
iter_gen,
|
||||
1,
|
||||
)?;
|
||||
|
||||
@@ -1029,18 +1158,21 @@ impl IndexMerger {
|
||||
debug_time!("write-storable-fields");
|
||||
debug!("write-storable-field");
|
||||
|
||||
let store_readers: Vec<_> = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| reader.get_store_reader())
|
||||
.collect::<Result<_, _>>()?;
|
||||
let mut document_iterators: Vec<_> = store_readers
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, store)| store.iter_raw(self.readers[i].alive_bitset()))
|
||||
.collect();
|
||||
if !doc_id_mapping.is_trivial() {
|
||||
debug!("non-trivial-doc-id-mapping");
|
||||
|
||||
let store_readers: Vec<_> = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| reader.get_store_reader(50))
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
let mut document_iterators: Vec<_> = store_readers
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, store)| store.iter_raw(self.readers[i].alive_bitset()))
|
||||
.collect();
|
||||
|
||||
for (old_doc_id, reader_ordinal) in doc_id_mapping.iter() {
|
||||
let doc_bytes_it = &mut document_iterators[*reader_ordinal as usize];
|
||||
if let Some(doc_bytes_res) = doc_bytes_it.next() {
|
||||
@@ -1057,7 +1189,7 @@ impl IndexMerger {
|
||||
} else {
|
||||
debug!("trivial-doc-id-mapping");
|
||||
for reader in &self.readers {
|
||||
let store_reader = reader.get_store_reader()?;
|
||||
let store_reader = reader.get_store_reader(1)?;
|
||||
if reader.has_deletes()
|
||||
// If there is not enough data in the store, we avoid stacking in order to
|
||||
// avoid creating many small blocks in the doc store. Once we have 5 full blocks,
|
||||
@@ -1073,14 +1205,14 @@ impl IndexMerger {
|
||||
//
|
||||
// take 7 in order to not walk over all checkpoints.
|
||||
|| store_reader.block_checkpoints().take(7).count() < 6
|
||||
|| store_reader.compressor() != store_writer.compressor()
|
||||
|| store_reader.decompressor() != store_writer.compressor().into()
|
||||
{
|
||||
for doc_bytes_res in store_reader.iter_raw(reader.alive_bitset()) {
|
||||
let doc_bytes = doc_bytes_res?;
|
||||
store_writer.store_bytes(&doc_bytes)?;
|
||||
}
|
||||
} else {
|
||||
store_writer.stack(&store_reader)?;
|
||||
store_writer.stack(store_reader)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ impl SegmentSerializer {
|
||||
let blocksize = segment.index().settings().docstore_blocksize;
|
||||
Ok(SegmentSerializer {
|
||||
segment,
|
||||
store_writer: StoreWriter::new(store_write, compressor, blocksize),
|
||||
store_writer: StoreWriter::new(store_write, compressor, blocksize)?,
|
||||
fast_field_serializer,
|
||||
fieldnorms_serializer: Some(fieldnorms_serializer),
|
||||
postings_serializer,
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::store::{StoreReader, StoreWriter};
|
||||
use crate::tokenizer::{
|
||||
BoxTokenStream, FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer,
|
||||
};
|
||||
use crate::{DocId, Document, Opstamp, SegmentComponent};
|
||||
use crate::{DatePrecision, DocId, Document, Opstamp, SegmentComponent};
|
||||
|
||||
/// Computes the initial size of the hash table.
|
||||
///
|
||||
@@ -248,7 +248,7 @@ impl SegmentWriter {
|
||||
FieldType::Date(_) => {
|
||||
for value in values {
|
||||
let date_val = value.as_date().ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_u64(date_val.to_u64());
|
||||
term_buffer.set_u64(date_val.truncate(DatePrecision::Seconds).to_u64());
|
||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||
}
|
||||
}
|
||||
@@ -266,6 +266,13 @@ impl SegmentWriter {
|
||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||
}
|
||||
}
|
||||
FieldType::Bool(_) => {
|
||||
for value in values {
|
||||
let bool_val = value.as_bool().ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_bool(bool_val);
|
||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||
}
|
||||
}
|
||||
FieldType::Bytes(_) => {
|
||||
for value in values {
|
||||
let bytes = value.as_bytes().ok_or_else(make_schema_error)?;
|
||||
@@ -287,6 +294,13 @@ impl SegmentWriter {
|
||||
ctx,
|
||||
)?;
|
||||
}
|
||||
FieldType::Ip(_) => {
|
||||
for value in values {
|
||||
let ip_val = value.as_ip().ok_or_else(make_schema_error)?;
|
||||
term_buffer.set_text(&ip_val.to_string());
|
||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -375,13 +389,14 @@ fn remap_and_write(
|
||||
let block_size = serializer.segment().index().settings().docstore_blocksize;
|
||||
let old_store_writer = std::mem::replace(
|
||||
&mut serializer.store_writer,
|
||||
StoreWriter::new(store_write, compressor, block_size),
|
||||
StoreWriter::new(store_write, compressor, block_size)?,
|
||||
);
|
||||
old_store_writer.close()?;
|
||||
let store_read = StoreReader::open(
|
||||
serializer
|
||||
.segment()
|
||||
.open_read(SegmentComponent::TempStore)?,
|
||||
50,
|
||||
)?;
|
||||
for old_doc_id in doc_id_map.iter_old_doc_ids() {
|
||||
let doc_bytes = store_read.get_document_bytes(old_doc_id)?;
|
||||
@@ -477,6 +492,7 @@ mod tests {
|
||||
r#"{
|
||||
"toto": "titi",
|
||||
"float": -0.2,
|
||||
"bool": true,
|
||||
"unsigned": 1,
|
||||
"signed": -2,
|
||||
"complexobject": {
|
||||
@@ -519,6 +535,13 @@ mod tests {
|
||||
let mut term_stream = term_dict.stream().unwrap();
|
||||
|
||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||
|
||||
json_term_writer.push_path_segment("bool");
|
||||
json_term_writer.set_fast_value(true);
|
||||
assert!(term_stream.advance());
|
||||
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||
|
||||
json_term_writer.pop_path_segment();
|
||||
json_term_writer.push_path_segment("complexobject");
|
||||
json_term_writer.push_path_segment("field.with.dot");
|
||||
json_term_writer.set_fast_value(1u64);
|
||||
|
||||
104
src/lib.rs
104
src/lib.rs
@@ -133,7 +133,7 @@ pub use time;
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::{OffsetDateTime, PrimitiveDateTime, UtcOffset};
|
||||
|
||||
/// A date/time value with second precision.
|
||||
/// A date/time value with microsecond precision.
|
||||
///
|
||||
/// This timestamp does not carry any explicit time zone information.
|
||||
/// Users are responsible for applying the provided conversion
|
||||
@@ -145,13 +145,30 @@ use crate::time::{OffsetDateTime, PrimitiveDateTime, UtcOffset};
|
||||
/// to prevent unintended usage.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct DateTime {
|
||||
unix_timestamp: i64,
|
||||
// Timestamp in microseconds.
|
||||
pub(crate) timestamp_micros: i64,
|
||||
}
|
||||
|
||||
impl DateTime {
|
||||
/// Create new from UNIX timestamp
|
||||
pub const fn from_unix_timestamp(unix_timestamp: i64) -> Self {
|
||||
Self { unix_timestamp }
|
||||
/// Create new from UNIX timestamp in seconds
|
||||
pub const fn from_timestamp_secs(seconds: i64) -> Self {
|
||||
Self {
|
||||
timestamp_micros: seconds * 1_000_000,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new from UNIX timestamp in milliseconds
|
||||
pub const fn from_timestamp_millis(milliseconds: i64) -> Self {
|
||||
Self {
|
||||
timestamp_micros: milliseconds * 1_000,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new from UNIX timestamp in microseconds.
|
||||
pub const fn from_timestamp_micros(microseconds: i64) -> Self {
|
||||
Self {
|
||||
timestamp_micros: microseconds,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new from `OffsetDateTime`
|
||||
@@ -159,7 +176,8 @@ impl DateTime {
|
||||
/// The given date/time is converted to UTC and the actual
|
||||
/// time zone is discarded.
|
||||
pub const fn from_utc(dt: OffsetDateTime) -> Self {
|
||||
Self::from_unix_timestamp(dt.unix_timestamp())
|
||||
let timestamp_micros = dt.unix_timestamp() as i64 * 1_000_000 + dt.microsecond() as i64;
|
||||
Self { timestamp_micros }
|
||||
}
|
||||
|
||||
/// Create new from `PrimitiveDateTime`
|
||||
@@ -167,21 +185,30 @@ impl DateTime {
|
||||
/// Implicitly assumes that the given date/time is in UTC!
|
||||
/// Otherwise the original value must only be reobtained with
|
||||
/// [`Self::into_primitive()`].
|
||||
pub const fn from_primitive(dt: PrimitiveDateTime) -> Self {
|
||||
pub fn from_primitive(dt: PrimitiveDateTime) -> Self {
|
||||
Self::from_utc(dt.assume_utc())
|
||||
}
|
||||
|
||||
/// Convert to UNIX timestamp
|
||||
pub const fn into_unix_timestamp(self) -> i64 {
|
||||
let Self { unix_timestamp } = self;
|
||||
unix_timestamp
|
||||
/// Convert to UNIX timestamp in seconds.
|
||||
pub const fn into_timestamp_secs(self) -> i64 {
|
||||
self.timestamp_micros / 1_000_000
|
||||
}
|
||||
|
||||
/// Convert to UNIX timestamp in milliseconds.
|
||||
pub const fn into_timestamp_millis(self) -> i64 {
|
||||
self.timestamp_micros / 1_000
|
||||
}
|
||||
|
||||
/// Convert to UNIX timestamp in microseconds.
|
||||
pub const fn into_timestamp_micros(self) -> i64 {
|
||||
self.timestamp_micros
|
||||
}
|
||||
|
||||
/// Convert to UTC `OffsetDateTime`
|
||||
pub fn into_utc(self) -> OffsetDateTime {
|
||||
let Self { unix_timestamp } = self;
|
||||
let utc_datetime =
|
||||
OffsetDateTime::from_unix_timestamp(unix_timestamp).expect("valid UNIX timestamp");
|
||||
let timestamp_nanos = self.timestamp_micros as i128 * 1000;
|
||||
let utc_datetime = OffsetDateTime::from_unix_timestamp_nanos(timestamp_nanos)
|
||||
.expect("valid UNIX timestamp");
|
||||
debug_assert_eq!(UtcOffset::UTC, utc_datetime.offset());
|
||||
utc_datetime
|
||||
}
|
||||
@@ -201,6 +228,18 @@ impl DateTime {
|
||||
debug_assert_eq!(UtcOffset::UTC, utc_datetime.offset());
|
||||
PrimitiveDateTime::new(utc_datetime.date(), utc_datetime.time())
|
||||
}
|
||||
|
||||
/// Truncates the microseconds value to the corresponding precision.
|
||||
pub(crate) fn truncate(self, precision: DatePrecision) -> Self {
|
||||
let truncated_timestamp_micros = match precision {
|
||||
DatePrecision::Seconds => (self.timestamp_micros / 1_000_000) * 1_000_000,
|
||||
DatePrecision::Milliseconds => (self.timestamp_micros / 1_000) * 1_000,
|
||||
DatePrecision::Microseconds => self.timestamp_micros,
|
||||
};
|
||||
Self {
|
||||
timestamp_micros: truncated_timestamp_micros,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for DateTime {
|
||||
@@ -268,8 +307,7 @@ pub use crate::indexer::demuxer::*;
|
||||
pub use crate::indexer::operation::UserOperation;
|
||||
pub use crate::indexer::{merge_filtered_segments, merge_indices, IndexWriter, PreparedCommit};
|
||||
pub use crate::postings::Postings;
|
||||
pub use crate::reader::LeasedItem;
|
||||
pub use crate::schema::{Document, Term};
|
||||
pub use crate::schema::{DateOptions, DatePrecision, Document, Term};
|
||||
|
||||
/// Index format version.
|
||||
const INDEX_FORMAT_VERSION: u32 = 4;
|
||||
@@ -385,6 +423,7 @@ pub mod tests {
|
||||
use rand::distributions::{Bernoulli, Uniform};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
||||
use crate::core::SegmentReader;
|
||||
@@ -393,7 +432,7 @@ pub mod tests {
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::query::BooleanQuery;
|
||||
use crate::schema::*;
|
||||
use crate::{DocAddress, Index, Postings, ReloadPolicy};
|
||||
use crate::{DateTime, DocAddress, Index, Postings, ReloadPolicy};
|
||||
|
||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||
let mut buffer = Vec::new();
|
||||
@@ -1102,4 +1141,35 @@ pub mod tests {
|
||||
assert!(index.validate_checksum()?.is_empty());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datetime() {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
|
||||
let dt = DateTime::from_utc(now).into_utc();
|
||||
assert_eq!(dt.to_ordinal_date(), now.to_ordinal_date());
|
||||
assert_eq!(dt.to_hms_micro(), now.to_hms_micro());
|
||||
// We don't store nanosecond level precision.
|
||||
assert_eq!(dt.nanosecond(), now.microsecond() * 1000);
|
||||
|
||||
let dt = DateTime::from_timestamp_secs(now.unix_timestamp()).into_utc();
|
||||
assert_eq!(dt.to_ordinal_date(), now.to_ordinal_date());
|
||||
assert_eq!(dt.to_hms(), now.to_hms());
|
||||
// Constructed from a second precision.
|
||||
assert_ne!(dt.to_hms_micro(), now.to_hms_micro());
|
||||
|
||||
let dt =
|
||||
DateTime::from_timestamp_micros((now.unix_timestamp_nanos() / 1_000) as i64).into_utc();
|
||||
assert_eq!(dt.to_ordinal_date(), now.to_ordinal_date());
|
||||
assert_eq!(dt.to_hms_micro(), now.to_hms_micro());
|
||||
|
||||
let dt_from_ts_nanos =
|
||||
OffsetDateTime::from_unix_timestamp_nanos(18446744073709551615i128).unwrap();
|
||||
let offset_dt = DateTime::from_utc(dt_from_ts_nanos).into_utc();
|
||||
assert_eq!(
|
||||
dt_from_ts_nanos.to_ordinal_date(),
|
||||
offset_dt.to_ordinal_date()
|
||||
);
|
||||
assert_eq!(dt_from_ts_nanos.to_hms_micro(), offset_dt.to_hms_micro());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
//! Tantivy can (if instructed to do so in the schema) store the term positions in a given field.
|
||||
//! This positions are expressed as token ordinal. For instance,
|
||||
//! This position is expressed as token ordinal. For instance,
|
||||
//! In "The beauty and the beast", the term "the" appears in position 0 and position 4.
|
||||
//! This information is useful to run phrase queries.
|
||||
//!
|
||||
//! The `SegmentComponent::POSITIONS` file contains all of the bitpacked positions delta,
|
||||
//! for all terms of a given field, one term after the other.
|
||||
//! The [position](../enum.SegmentComponent.html#variant.Positions) file contains all of the
|
||||
//! bitpacked positions delta, for all terms of a given field, one term after the other.
|
||||
//!
|
||||
//! Each terms is encoded independently.
|
||||
//! Like for positing lists, tantivy rely on simd bitpacking to encode the positions delta in blocks
|
||||
//! of 128 deltas. Because we rarely have a multiple of 128, a final block may encode the remaining
|
||||
//! values variable byte encoding.
|
||||
//! Each term is encoded independently.
|
||||
//! Like for positing lists, tantivy relies on simd bitpacking to encode the positions delta in
|
||||
//! blocks of 128 deltas. Because we rarely have a multiple of 128, a final block may encode the
|
||||
//! remaining values variable byte encoding.
|
||||
//!
|
||||
//! In order to make reading possible, the term delta positions first encodes the number of
|
||||
//! bitpacked blocks, then the bitwidth for each blocks, then the actual bitpacked block and finally
|
||||
|
||||
@@ -6,11 +6,11 @@ use crate::directory::OwnedBytes;
|
||||
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::postings::compression::{BlockDecoder, VIntDecoder};
|
||||
|
||||
/// When accessing the position of a term, we get a positions_idx from the `Terminfo`.
|
||||
/// This means we need to skip to the `nth` positions efficiently.
|
||||
/// When accessing the positions of a term, we get a positions_idx from the `Terminfo`.
|
||||
/// This means we need to skip to the `nth` position efficiently.
|
||||
///
|
||||
/// Blocks are compressed using bitpacking, so `skip_read` contains the number of bits
|
||||
/// (values can go from 0bit to 32 bits) required to decompress every block.
|
||||
/// (values can go from 0 to 32 bits) required to decompress every block.
|
||||
///
|
||||
/// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`,
|
||||
/// so skipping a block without decompressing it is just a matter of advancing that many
|
||||
@@ -47,7 +47,7 @@ impl PositionReader {
|
||||
bit_widths: bit_widths.clone(),
|
||||
positions: positions.clone(),
|
||||
block_decoder: BlockDecoder::default(),
|
||||
block_offset: std::i64::MAX as u64,
|
||||
block_offset: i64::MAX as u64,
|
||||
anchor_offset: 0u64,
|
||||
original_bit_widths: bit_widths,
|
||||
original_positions: positions,
|
||||
@@ -57,7 +57,7 @@ impl PositionReader {
|
||||
fn reset(&mut self) {
|
||||
self.positions = self.original_positions.clone();
|
||||
self.bit_widths = self.original_bit_widths.clone();
|
||||
self.block_offset = std::i64::MAX as u64;
|
||||
self.block_offset = i64::MAX as u64;
|
||||
self.anchor_offset = 0u64;
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ impl<W: io::Write> PositionSerializer<W> {
|
||||
/// at this point.
|
||||
/// When called before writing the positions of a term, this value is used as
|
||||
/// start offset.
|
||||
/// When called after writing the positions of a term, this value is used as a
|
||||
/// When called after writing the positions of a term, this value is used as
|
||||
/// end offset.
|
||||
pub fn written_bytes(&self) -> u64 {
|
||||
self.positions_wrt.written_bytes()
|
||||
@@ -74,7 +74,7 @@ impl<W: io::Write> PositionSerializer<W> {
|
||||
self.block.clear();
|
||||
}
|
||||
|
||||
/// Close the positions for the given term.
|
||||
/// Close the positions for the current term.
|
||||
pub fn close_term(&mut self) -> io::Result<()> {
|
||||
self.flush_block();
|
||||
VInt(self.bit_widths.len() as u64).serialize(&mut self.positions_wrt)?;
|
||||
|
||||
@@ -99,7 +99,7 @@ impl BlockSegmentPostings {
|
||||
|
||||
let mut block_segment_postings = BlockSegmentPostings {
|
||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
||||
loaded_offset: std::usize::MAX,
|
||||
loaded_offset: usize::MAX,
|
||||
freq_decoder: BlockDecoder::with_val(1),
|
||||
freq_reading_option,
|
||||
block_max_score_cache: None,
|
||||
@@ -169,7 +169,7 @@ impl BlockSegmentPostings {
|
||||
split_into_skips_and_postings(doc_freq, postings_data)?;
|
||||
self.data = postings_data;
|
||||
self.block_max_score_cache = None;
|
||||
self.loaded_offset = std::usize::MAX;
|
||||
self.loaded_offset = usize::MAX;
|
||||
if let Some(skip_data) = skip_data_opt {
|
||||
self.skip_reader.reset(skip_data, doc_freq);
|
||||
} else {
|
||||
|
||||
@@ -47,8 +47,10 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box<dyn Postings
|
||||
FieldType::U64(_)
|
||||
| FieldType::I64(_)
|
||||
| FieldType::F64(_)
|
||||
| FieldType::Bool(_)
|
||||
| FieldType::Date(_)
|
||||
| FieldType::Bytes(_)
|
||||
| FieldType::Ip(_)
|
||||
| FieldType::Facet(_) => Box::new(SpecializedPostingsWriter::<NothingRecorder>::default()),
|
||||
FieldType::JsonObject(ref json_object_options) => {
|
||||
if let Some(text_indexing_option) = json_object_options.get_text_indexing_options() {
|
||||
|
||||
@@ -82,9 +82,14 @@ pub(crate) fn serialize_postings(
|
||||
.collect();
|
||||
unordered_term_mappings.insert(field, mapping);
|
||||
}
|
||||
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {}
|
||||
FieldType::U64(_)
|
||||
| FieldType::I64(_)
|
||||
| FieldType::F64(_)
|
||||
| FieldType::Date(_)
|
||||
| FieldType::Bool(_) => {}
|
||||
FieldType::Bytes(_) => {}
|
||||
FieldType::JsonObject(_) => {}
|
||||
FieldType::Ip(_) => {} // TODO check
|
||||
}
|
||||
|
||||
let postings_writer = per_field_postings_writers.get_for_field(field);
|
||||
|
||||
@@ -92,7 +92,7 @@ impl Default for NothingRecorder {
|
||||
fn default() -> Self {
|
||||
NothingRecorder {
|
||||
stack: ExpUnrolledLinkedList::new(),
|
||||
current_doc: u32::max_value(),
|
||||
current_doc: u32::MAX,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -230,7 +230,7 @@ impl Default for TfAndPositionRecorder {
|
||||
fn default() -> Self {
|
||||
TfAndPositionRecorder {
|
||||
stack: ExpUnrolledLinkedList::new(),
|
||||
current_doc: u32::max_value(),
|
||||
current_doc: u32::MAX,
|
||||
term_doc_freq: 0u32,
|
||||
}
|
||||
}
|
||||
@@ -339,7 +339,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_vint_u32() {
|
||||
let mut buffer = vec![];
|
||||
let vals = [0, 1, 324_234_234, u32::max_value()];
|
||||
let vals = [0, 1, 324_234_234, u32::MAX];
|
||||
for &i in &vals {
|
||||
assert!(write_u32_vint(i, &mut buffer).is_ok());
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ use crate::{DocId, Score};
|
||||
/// * `close()`
|
||||
///
|
||||
/// Terms have to be pushed in a lexicographically-sorted order.
|
||||
/// Within a term, document have to be pushed in increasing order.
|
||||
/// Within a term, documents have to be pushed in increasing order.
|
||||
///
|
||||
/// A description of the serialization format is
|
||||
/// [available here](https://fulmicoton.gitbooks.io/tantivy-doc/content/inverted-index.html).
|
||||
@@ -55,7 +55,7 @@ pub struct InvertedIndexSerializer {
|
||||
}
|
||||
|
||||
impl InvertedIndexSerializer {
|
||||
/// Open a new `PostingsSerializer` for the given segment
|
||||
/// Open a new `InvertedIndexSerializer` for the given segment
|
||||
pub fn open(segment: &mut Segment) -> crate::Result<InvertedIndexSerializer> {
|
||||
use crate::SegmentComponent::{Positions, Postings, Terms};
|
||||
let inv_index_serializer = InvertedIndexSerializer {
|
||||
@@ -187,7 +187,7 @@ impl<'a> FieldSerializer<'a> {
|
||||
Ok(term_ordinal)
|
||||
}
|
||||
|
||||
/// Serialize the information that a document contains the current term,
|
||||
/// Serialize the information that a document contains for the current term:
|
||||
/// its term frequency, and the position deltas.
|
||||
///
|
||||
/// At this point, the positions are already `delta-encoded`.
|
||||
@@ -207,7 +207,7 @@ impl<'a> FieldSerializer<'a> {
|
||||
|
||||
/// Finish the serialization for this term postings.
|
||||
///
|
||||
/// If the current block is incomplete, it need to be encoded
|
||||
/// If the current block is incomplete, it needs to be encoded
|
||||
/// using `VInt` encoding.
|
||||
pub fn close_term(&mut self) -> io::Result<()> {
|
||||
fail_point!("FieldSerializer::close_term", |msg: Option<String>| {
|
||||
@@ -231,7 +231,7 @@ impl<'a> FieldSerializer<'a> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Closes the current current field.
|
||||
/// Closes the current field.
|
||||
pub fn close(mut self) -> io::Result<()> {
|
||||
self.close_term()?;
|
||||
if let Some(positions_serializer) = self.positions_serializer_opt {
|
||||
|
||||
@@ -250,7 +250,7 @@ impl SkipReader {
|
||||
BlockInfo::VInt { num_docs } => {
|
||||
debug_assert_eq!(num_docs, self.remaining_docs);
|
||||
self.remaining_docs = 0;
|
||||
self.byte_offset = std::usize::MAX;
|
||||
self.byte_offset = usize::MAX;
|
||||
}
|
||||
}
|
||||
self.last_doc_in_previous_block = self.last_doc_in_block;
|
||||
|
||||
@@ -42,7 +42,7 @@ pub struct Addr(u32);
|
||||
impl Addr {
|
||||
/// Creates a null pointer.
|
||||
pub fn null_pointer() -> Addr {
|
||||
Addr(u32::max_value())
|
||||
Addr(u32::MAX)
|
||||
}
|
||||
|
||||
/// Returns the `Addr` object for `addr + offset`
|
||||
@@ -64,7 +64,7 @@ impl Addr {
|
||||
|
||||
/// Returns true if and only if the `Addr` is null.
|
||||
pub fn is_null(self) -> bool {
|
||||
self.0 == u32::max_value()
|
||||
self.0 == u32::MAX
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,13 +19,13 @@ pub struct TermInfo {
|
||||
impl TermInfo {
|
||||
pub(crate) fn posting_num_bytes(&self) -> u32 {
|
||||
let num_bytes = self.postings_range.len();
|
||||
assert!(num_bytes <= std::u32::MAX as usize);
|
||||
assert!(num_bytes <= u32::MAX as usize);
|
||||
num_bytes as u32
|
||||
}
|
||||
|
||||
pub(crate) fn positions_num_bytes(&self) -> u32 {
|
||||
let num_bytes = self.positions_range.len();
|
||||
assert!(num_bytes <= std::u32::MAX as usize);
|
||||
assert!(num_bytes <= u32::MAX as usize);
|
||||
num_bytes as u32
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,11 +11,11 @@ use crate::Searcher;
|
||||
/// The documents matched by the boolean query are
|
||||
/// those which
|
||||
/// * match all of the sub queries associated with the
|
||||
/// `Must` occurence
|
||||
/// `Must` occurrence
|
||||
/// * match none of the sub queries associated with the
|
||||
/// `MustNot` occurence.
|
||||
/// `MustNot` occurrence.
|
||||
/// * match at least one of the subqueries that is not
|
||||
/// a `MustNot` occurence.
|
||||
/// a `MustNot` occurrence.
|
||||
///
|
||||
///
|
||||
/// You can combine other query types and their `Occur`ances into one `BooleanQuery`
|
||||
|
||||
@@ -243,13 +243,12 @@ impl MoreLikeThis {
|
||||
}
|
||||
FieldType::Date(_) => {
|
||||
for value in values {
|
||||
// TODO: Ask if this is the semantic (timestamp) we want
|
||||
let unix_timestamp = value
|
||||
let timestamp_micros = value
|
||||
.as_date()
|
||||
.ok_or_else(|| TantivyError::InvalidArgument("invalid value".to_string()))?
|
||||
.into_unix_timestamp();
|
||||
if !self.is_noise_word(unix_timestamp.to_string()) {
|
||||
let term = Term::from_field_i64(field, unix_timestamp);
|
||||
.into_timestamp_micros();
|
||||
if !self.is_noise_word(timestamp_micros.to_string()) {
|
||||
let term = Term::from_field_i64(field, timestamp_micros);
|
||||
*term_frequencies.entry(term).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -359,7 +359,7 @@ pub mod tests {
|
||||
let matching_docs = |query: &str| {
|
||||
let query_parser = QueryParser::for_index(&index, vec![json_field]);
|
||||
let phrase_query = query_parser.parse_query(query).unwrap();
|
||||
let phrase_weight = phrase_query.weight(&*searcher, false).unwrap();
|
||||
let phrase_weight = phrase_query.weight(&searcher, false).unwrap();
|
||||
let mut phrase_scorer = phrase_weight
|
||||
.scorer(searcher.segment_reader(0), 1.0f32)
|
||||
.unwrap();
|
||||
|
||||
@@ -17,6 +17,9 @@ use crate::schema::{Field, IndexRecordOption, Term};
|
||||
///
|
||||
/// **This is my favorite part of the job.**
|
||||
///
|
||||
/// [Slop](PhraseQuery::set_slop) allows leniency in term proximity
|
||||
/// for some performance tradeof.
|
||||
///
|
||||
/// Using a `PhraseQuery` on a field requires positions
|
||||
/// to be indexed for this field.
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -40,7 +43,12 @@ impl PhraseQuery {
|
||||
/// Creates a new `PhraseQuery` given a list of terms and their offsets.
|
||||
///
|
||||
/// Can be used to provide custom offset for each term.
|
||||
pub fn new_with_offset(mut terms: Vec<(usize, Term)>) -> PhraseQuery {
|
||||
pub fn new_with_offset(terms: Vec<(usize, Term)>) -> PhraseQuery {
|
||||
PhraseQuery::new_with_offset_and_slop(terms, 0)
|
||||
}
|
||||
|
||||
/// Creates a new `PhraseQuery` given a list of terms, their offsets and a slop
|
||||
pub fn new_with_offset_and_slop(mut terms: Vec<(usize, Term)>, slop: u32) -> PhraseQuery {
|
||||
assert!(
|
||||
terms.len() > 1,
|
||||
"A phrase query is required to have strictly more than one term."
|
||||
@@ -54,11 +62,14 @@ impl PhraseQuery {
|
||||
PhraseQuery {
|
||||
field,
|
||||
phrase_terms: terms,
|
||||
slop: 0,
|
||||
slop,
|
||||
}
|
||||
}
|
||||
|
||||
/// Slop allowed for the phrase.
|
||||
///
|
||||
/// The query will match if its terms are seperated by `slop` terms at most.
|
||||
/// By default the slop is 0 meaning query terms need to be adjacent.
|
||||
pub fn set_slop(&mut self, value: u32) {
|
||||
self.slop = value;
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ use crate::{DocAddress, Term};
|
||||
/// - a set of documents
|
||||
/// - a way to score these documents
|
||||
///
|
||||
/// When performing a [search](#method.search), these documents will then
|
||||
/// When performing a [search](Searcher::search), these documents will then
|
||||
/// be pushed to a [Collector](../collector/trait.Collector.html),
|
||||
/// which will in turn be in charge of deciding what to do with them.
|
||||
///
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user