mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-03 07:42:54 +00:00
Compare commits
38 Commits
validate_l
...
update_exa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f5a716e827 | ||
|
|
4143d31865 | ||
|
|
0c634adbe1 | ||
|
|
2e3641c2ae | ||
|
|
b806122c81 | ||
|
|
e1679f3fb9 | ||
|
|
5a80420b10 | ||
|
|
aa26ff5029 | ||
|
|
e197b59258 | ||
|
|
5b7cca13e5 | ||
|
|
a79590477e | ||
|
|
6181c1eb5e | ||
|
|
1ee5f90761 | ||
|
|
71f3b4e4e3 | ||
|
|
8cd7ddc535 | ||
|
|
2b76335a95 | ||
|
|
c6b213d8f0 | ||
|
|
eea70030bf | ||
|
|
92b5526310 | ||
|
|
99a59ad37e | ||
|
|
6a66a71cbb | ||
|
|
ff40764204 | ||
|
|
047da20b5b | ||
|
|
1417eaf3a7 | ||
|
|
4f8493d2de | ||
|
|
8861366137 | ||
|
|
0e9fced336 | ||
|
|
b257b960b3 | ||
|
|
4708171a32 | ||
|
|
b493743f8d | ||
|
|
d2955a3fd2 | ||
|
|
17d5869ad6 | ||
|
|
dfa3aed32d | ||
|
|
398817ce7b | ||
|
|
74940e9345 | ||
|
|
1e9fc51535 | ||
|
|
92c32979d2 | ||
|
|
b644d78a32 |
4
.github/workflows/coverage.yml
vendored
4
.github/workflows/coverage.yml
vendored
@@ -15,11 +15,11 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
run: rustup toolchain install nightly-2023-09-10 --profile minimal --component llvm-tools-preview
|
run: rustup toolchain install nightly-2024-04-10 --profile minimal --component llvm-tools-preview
|
||||||
- uses: Swatinem/rust-cache@v2
|
- uses: Swatinem/rust-cache@v2
|
||||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
- name: Generate code coverage
|
- name: Generate code coverage
|
||||||
run: cargo +nightly-2023-09-10 llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
|
run: cargo +nightly-2024-04-10 llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
|
||||||
- name: Upload coverage to Codecov
|
- name: Upload coverage to Codecov
|
||||||
uses: codecov/codecov-action@v3
|
uses: codecov/codecov-action@v3
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
|
|||||||
62
CHANGELOG.md
62
CHANGELOG.md
@@ -1,3 +1,65 @@
|
|||||||
|
Tantivy 0.22
|
||||||
|
================================
|
||||||
|
|
||||||
|
Tantivy 0.22 will be able to read indices created with Tantivy 0.21.
|
||||||
|
|
||||||
|
#### Bugfixes
|
||||||
|
- Fix null byte handling in JSON paths (null bytes in json keys caused panic during indexing) [#2345](https://github.com/quickwit-oss/tantivy/pull/2345)(@PSeitz)
|
||||||
|
- Fix bug that can cause `get_docids_for_value_range` to panic. [#2295](https://github.com/quickwit-oss/tantivy/pull/2295)(@fulmicoton)
|
||||||
|
- Avoid 1 document indices by increase min memory to 15MB for indexing [#2176](https://github.com/quickwit-oss/tantivy/pull/2176)(@PSeitz)
|
||||||
|
- Fix merge panic for JSON fields [#2284](https://github.com/quickwit-oss/tantivy/pull/2284)(@PSeitz)
|
||||||
|
- Fix bug occuring when merging JSON object indexed with positions. [#2253](https://github.com/quickwit-oss/tantivy/pull/2253)(@fulmicoton)
|
||||||
|
- Fix empty DateHistogram gap bug [#2183](https://github.com/quickwit-oss/tantivy/pull/2183)(@PSeitz)
|
||||||
|
- Fix range query end check (fields with less than 1 value per doc are affected) [#2226](https://github.com/quickwit-oss/tantivy/pull/2226)(@PSeitz)
|
||||||
|
- Handle exclusive out of bounds ranges on fastfield range queries [#2174](https://github.com/quickwit-oss/tantivy/pull/2174)(@PSeitz)
|
||||||
|
|
||||||
|
#### Breaking API Changes
|
||||||
|
- rename ReloadPolicy onCommit to onCommitWithDelay [#2235](https://github.com/quickwit-oss/tantivy/pull/2235)(@giovannicuccu)
|
||||||
|
- Move exports from the root into modules [#2220](https://github.com/quickwit-oss/tantivy/pull/2220)(@PSeitz)
|
||||||
|
- Accept field name instead of `Field` in FilterCollector [#2196](https://github.com/quickwit-oss/tantivy/pull/2196)(@PSeitz)
|
||||||
|
- remove deprecated IntOptions and DateTime [#2353](https://github.com/quickwit-oss/tantivy/pull/2353)(@PSeitz)
|
||||||
|
|
||||||
|
#### Features/Improvements
|
||||||
|
- Tantivy documents as a trait: Index data directly without converting to tantivy types first [#2071](https://github.com/quickwit-oss/tantivy/pull/2071)(@ChillFish8)
|
||||||
|
- encode some part of posting list as -1 instead of direct values (smaller inverted indices) [#2185](https://github.com/quickwit-oss/tantivy/pull/2185)(@trinity-1686a)
|
||||||
|
- **Aggregation**
|
||||||
|
- Support to deserialize f64 from string [#2311](https://github.com/quickwit-oss/tantivy/pull/2311)(@PSeitz)
|
||||||
|
- Add a top_hits aggregator [#2198](https://github.com/quickwit-oss/tantivy/pull/2198)(@ditsuke)
|
||||||
|
- Support bool type in term aggregation [#2318](https://github.com/quickwit-oss/tantivy/pull/2318)(@PSeitz)
|
||||||
|
- Support ip adresses in term aggregation [#2319](https://github.com/quickwit-oss/tantivy/pull/2319)(@PSeitz)
|
||||||
|
- Support date type in term aggregation [#2172](https://github.com/quickwit-oss/tantivy/pull/2172)(@PSeitz)
|
||||||
|
- Support escaped dot when addressing field [#2250](https://github.com/quickwit-oss/tantivy/pull/2250)(@PSeitz)
|
||||||
|
|
||||||
|
- Add ExistsQuery to check documents that have a value [#2160](https://github.com/quickwit-oss/tantivy/pull/2160)(@imotov)
|
||||||
|
- Expose TopDocs::order_by_u64_field again [#2282](https://github.com/quickwit-oss/tantivy/pull/2282)(@ditsuke)
|
||||||
|
|
||||||
|
- **Memory/Performance**
|
||||||
|
- Faster TopN: replace BinaryHeap with TopNComputer [#2186](https://github.com/quickwit-oss/tantivy/pull/2186)(@PSeitz)
|
||||||
|
- reduce number of allocations during indexing [#2257](https://github.com/quickwit-oss/tantivy/pull/2257)(@PSeitz)
|
||||||
|
- Less Memory while indexing: docid deltas while indexing [#2249](https://github.com/quickwit-oss/tantivy/pull/2249)(@PSeitz)
|
||||||
|
- Faster indexing: use term hashmap in fastfield [#2243](https://github.com/quickwit-oss/tantivy/pull/2243)(@PSeitz)
|
||||||
|
- term hashmap remove copy in is_empty, unused unordered_id [#2229](https://github.com/quickwit-oss/tantivy/pull/2229)(@PSeitz)
|
||||||
|
- add method to fetch block of first values in columnar [#2330](https://github.com/quickwit-oss/tantivy/pull/2330)(@PSeitz)
|
||||||
|
- Faster aggregations: add fast path for full columns in fetch_block [#2328](https://github.com/quickwit-oss/tantivy/pull/2328)(@PSeitz)
|
||||||
|
- Faster sstable loading: use fst for sstable index [#2268](https://github.com/quickwit-oss/tantivy/pull/2268)(@trinity-1686a)
|
||||||
|
|
||||||
|
- **QueryParser**
|
||||||
|
- allow newline where we allow space in query parser [#2302](https://github.com/quickwit-oss/tantivy/pull/2302)(@trinity-1686a)
|
||||||
|
- allow some mixing of occur and bool in strict query parser [#2323](https://github.com/quickwit-oss/tantivy/pull/2323)(@trinity-1686a)
|
||||||
|
- handle * inside term in lenient query parser [#2228](https://github.com/quickwit-oss/tantivy/pull/2228)(@trinity-1686a)
|
||||||
|
- add support for exists query syntax in query parser [#2170](https://github.com/quickwit-oss/tantivy/pull/2170)(@trinity-1686a)
|
||||||
|
- Add shared search executor [#2312](https://github.com/quickwit-oss/tantivy/pull/2312)(@MochiXu)
|
||||||
|
- Truncate keys to u16::MAX in term hashmap [#2299](https://github.com/quickwit-oss/tantivy/pull/2299)(@PSeitz)
|
||||||
|
- report if a term matched when warming up posting list [#2309](https://github.com/quickwit-oss/tantivy/pull/2309)(@trinity-1686a)
|
||||||
|
- Support json fields in FuzzyTermQuery [#2173](https://github.com/quickwit-oss/tantivy/pull/2173)(@PingXia-at)
|
||||||
|
- Read list of fields encoded in term dictionary for JSON fields [#2184](https://github.com/quickwit-oss/tantivy/pull/2184)(@PSeitz)
|
||||||
|
- add collect_block to BoxableSegmentCollector [#2331](https://github.com/quickwit-oss/tantivy/pull/2331)(@PSeitz)
|
||||||
|
- expose collect_block buffer size [#2326](https://github.com/quickwit-oss/tantivy/pull/2326)(@PSeitz)
|
||||||
|
- Forward regex parser errors [#2288](https://github.com/quickwit-oss/tantivy/pull/2288)(@adamreichold)
|
||||||
|
- Make FacetCounts defaultable and cloneable. [#2322](https://github.com/quickwit-oss/tantivy/pull/2322)(@adamreichold)
|
||||||
|
- Derive Debug for SchemaBuilder [#2254](https://github.com/quickwit-oss/tantivy/pull/2254)(@GodTamIt)
|
||||||
|
- add missing inlines to tantivy options [#2245](https://github.com/quickwit-oss/tantivy/pull/2245)(@PSeitz)
|
||||||
|
|
||||||
Tantivy 0.21.1
|
Tantivy 0.21.1
|
||||||
================================
|
================================
|
||||||
#### Bugfixes
|
#### Bugfixes
|
||||||
|
|||||||
56
Cargo.toml
56
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy"
|
name = "tantivy"
|
||||||
version = "0.22.0-dev"
|
version = "0.23.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
@@ -11,16 +11,20 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
|||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
keywords = ["search", "information", "retrieval"]
|
keywords = ["search", "information", "retrieval"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.62"
|
rust-version = "1.63"
|
||||||
exclude = ["benches/*.json", "benches/*.txt"]
|
exclude = ["benches/*.json", "benches/*.txt"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
oneshot = "0.1.5"
|
# Switch back to the non-forked oneshot crate once https://github.com/faern/oneshot/pull/35 is merged
|
||||||
|
oneshot = { git = "https://github.com/fulmicoton/oneshot.git", rev = "b208f49" }
|
||||||
base64 = "0.22.0"
|
base64 = "0.22.0"
|
||||||
byteorder = "1.4.3"
|
byteorder = "1.4.3"
|
||||||
crc32fast = "1.3.2"
|
crc32fast = "1.3.2"
|
||||||
once_cell = "1.10.0"
|
once_cell = "1.10.0"
|
||||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
regex = { version = "1.5.5", default-features = false, features = [
|
||||||
|
"std",
|
||||||
|
"unicode",
|
||||||
|
] }
|
||||||
aho-corasick = "1.0"
|
aho-corasick = "1.0"
|
||||||
tantivy-fst = "0.5"
|
tantivy-fst = "0.5"
|
||||||
memmap2 = { version = "0.9.0", optional = true }
|
memmap2 = { version = "0.9.0", optional = true }
|
||||||
@@ -30,14 +34,15 @@ tempfile = { version = "3.3.0", optional = true }
|
|||||||
log = "0.4.16"
|
log = "0.4.16"
|
||||||
serde = { version = "1.0.136", features = ["derive"] }
|
serde = { version = "1.0.136", features = ["derive"] }
|
||||||
serde_json = "1.0.79"
|
serde_json = "1.0.79"
|
||||||
num_cpus = "1.13.1"
|
|
||||||
fs4 = { version = "0.8.0", optional = true }
|
fs4 = { version = "0.8.0", optional = true }
|
||||||
levenshtein_automata = "0.2.1"
|
levenshtein_automata = "0.2.1"
|
||||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||||
crossbeam-channel = "0.5.4"
|
crossbeam-channel = "0.5.4"
|
||||||
rust-stemmers = "1.2.0"
|
rust-stemmers = "1.2.0"
|
||||||
downcast-rs = "1.2.0"
|
downcast-rs = "1.2.0"
|
||||||
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker4x"] }
|
bitpacking = { version = "0.9.2", default-features = false, features = [
|
||||||
|
"bitpacker4x",
|
||||||
|
] }
|
||||||
census = "0.4.2"
|
census = "0.4.2"
|
||||||
rustc-hash = "1.1.0"
|
rustc-hash = "1.1.0"
|
||||||
thiserror = "1.0.30"
|
thiserror = "1.0.30"
|
||||||
@@ -48,17 +53,17 @@ smallvec = "1.8.0"
|
|||||||
rayon = "1.5.2"
|
rayon = "1.5.2"
|
||||||
lru = "0.12.0"
|
lru = "0.12.0"
|
||||||
fastdivide = "0.4.0"
|
fastdivide = "0.4.0"
|
||||||
itertools = "0.12.0"
|
itertools = "0.13.0"
|
||||||
measure_time = "0.8.2"
|
measure_time = "0.8.2"
|
||||||
arc-swap = "1.5.0"
|
arc-swap = "1.5.0"
|
||||||
|
|
||||||
columnar = { version= "0.2", path="./columnar", package ="tantivy-columnar" }
|
columnar = { version = "0.3", path = "./columnar", package = "tantivy-columnar" }
|
||||||
sstable = { version= "0.2", path="./sstable", package ="tantivy-sstable", optional = true }
|
sstable = { version = "0.3", path = "./sstable", package = "tantivy-sstable", optional = true }
|
||||||
stacker = { version= "0.2", path="./stacker", package ="tantivy-stacker" }
|
stacker = { version = "0.3", path = "./stacker", package = "tantivy-stacker" }
|
||||||
query-grammar = { version= "0.21.0", path="./query-grammar", package = "tantivy-query-grammar" }
|
query-grammar = { version = "0.22.0", path = "./query-grammar", package = "tantivy-query-grammar" }
|
||||||
tantivy-bitpacker = { version= "0.5", path="./bitpacker" }
|
tantivy-bitpacker = { version = "0.6", path = "./bitpacker" }
|
||||||
common = { version= "0.6", path = "./common/", package = "tantivy-common" }
|
common = { version = "0.7", path = "./common/", package = "tantivy-common" }
|
||||||
tokenizer-api = { version= "0.2", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
tokenizer-api = { version = "0.3", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
||||||
sketches-ddsketch = { version = "0.2.1", features = ["use_serde"] }
|
sketches-ddsketch = { version = "0.2.1", features = ["use_serde"] }
|
||||||
futures-util = { version = "0.3.28", optional = true }
|
futures-util = { version = "0.3.28", optional = true }
|
||||||
fnv = "1.0.7"
|
fnv = "1.0.7"
|
||||||
@@ -67,6 +72,7 @@ fnv = "1.0.7"
|
|||||||
winapi = "0.3.9"
|
winapi = "0.3.9"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
binggan = "0.8.0"
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
matches = "0.1.9"
|
matches = "0.1.9"
|
||||||
@@ -78,6 +84,9 @@ paste = "1.0.11"
|
|||||||
more-asserts = "0.3.1"
|
more-asserts = "0.3.1"
|
||||||
rand_distr = "0.4.3"
|
rand_distr = "0.4.3"
|
||||||
time = { version = "0.3.10", features = ["serde-well-known", "macros"] }
|
time = { version = "0.3.10", features = ["serde-well-known", "macros"] }
|
||||||
|
postcard = { version = "1.0.4", features = [
|
||||||
|
"use-std",
|
||||||
|
], default-features = false }
|
||||||
|
|
||||||
[target.'cfg(not(windows))'.dev-dependencies]
|
[target.'cfg(not(windows))'.dev-dependencies]
|
||||||
criterion = { version = "0.5", default-features = false }
|
criterion = { version = "0.5", default-features = false }
|
||||||
@@ -109,17 +118,26 @@ lz4-compression = ["lz4_flex"]
|
|||||||
zstd-compression = ["zstd"]
|
zstd-compression = ["zstd"]
|
||||||
|
|
||||||
failpoints = ["fail", "fail/failpoints"]
|
failpoints = ["fail", "fail/failpoints"]
|
||||||
unstable = [] # useful for benches.
|
unstable = [] # useful for benches.
|
||||||
|
|
||||||
quickwit = ["sstable", "futures-util"]
|
quickwit = ["sstable", "futures-util"]
|
||||||
|
|
||||||
# Compares only the hash of a string when indexing data.
|
# Compares only the hash of a string when indexing data.
|
||||||
# Increases indexing speed, but may lead to extremely rare missing terms, when there's a hash collision.
|
# Increases indexing speed, but may lead to extremely rare missing terms, when there's a hash collision.
|
||||||
# Uses 64bit ahash.
|
# Uses 64bit ahash.
|
||||||
compare_hash_only = ["stacker/compare_hash_only"]
|
compare_hash_only = ["stacker/compare_hash_only"]
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = ["query-grammar", "bitpacker", "common", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
|
members = [
|
||||||
|
"query-grammar",
|
||||||
|
"bitpacker",
|
||||||
|
"common",
|
||||||
|
"ownedbytes",
|
||||||
|
"stacker",
|
||||||
|
"sstable",
|
||||||
|
"tokenizer-api",
|
||||||
|
"columnar",
|
||||||
|
]
|
||||||
|
|
||||||
# Following the "fail" crate best practises, we isolate
|
# Following the "fail" crate best practises, we isolate
|
||||||
# tests that define specific behavior in fail check points
|
# tests that define specific behavior in fail check points
|
||||||
@@ -140,3 +158,7 @@ harness = false
|
|||||||
[[bench]]
|
[[bench]]
|
||||||
name = "index-bench"
|
name = "index-bench"
|
||||||
harness = false
|
harness = false
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "agg_bench"
|
||||||
|
harness = false
|
||||||
|
|||||||
413
benches/agg_bench.rs
Normal file
413
benches/agg_bench.rs
Normal file
@@ -0,0 +1,413 @@
|
|||||||
|
use binggan::{black_box, InputGroup, PeakMemAlloc, INSTRUMENTED_SYSTEM};
|
||||||
|
use rand::prelude::SliceRandom;
|
||||||
|
use rand::rngs::StdRng;
|
||||||
|
use rand::{Rng, SeedableRng};
|
||||||
|
use rand_distr::Distribution;
|
||||||
|
use serde_json::json;
|
||||||
|
use tantivy::aggregation::agg_req::Aggregations;
|
||||||
|
use tantivy::aggregation::AggregationCollector;
|
||||||
|
use tantivy::query::{AllQuery, TermQuery};
|
||||||
|
use tantivy::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||||
|
use tantivy::{doc, Index, Term};
|
||||||
|
|
||||||
|
#[global_allocator]
|
||||||
|
pub static GLOBAL: &PeakMemAlloc<std::alloc::System> = &INSTRUMENTED_SYSTEM;
|
||||||
|
|
||||||
|
/// Mini macro to register a function via its name
|
||||||
|
/// runner.register("average_u64", move |index| average_u64(index));
|
||||||
|
macro_rules! register {
|
||||||
|
($runner:expr, $func:ident) => {
|
||||||
|
$runner.register(stringify!($func), move |index| $func(index))
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let inputs = vec![
|
||||||
|
("full", get_test_index_bench(Cardinality::Full).unwrap()),
|
||||||
|
(
|
||||||
|
"dense",
|
||||||
|
get_test_index_bench(Cardinality::OptionalDense).unwrap(),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"sparse",
|
||||||
|
get_test_index_bench(Cardinality::OptionalSparse).unwrap(),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"multivalue",
|
||||||
|
get_test_index_bench(Cardinality::Multivalued).unwrap(),
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
bench_agg(InputGroup::new_with_inputs(inputs));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_agg(mut group: InputGroup<Index>) {
|
||||||
|
group.set_alloc(GLOBAL); // Set the peak mem allocator. This will enable peak memory reporting.
|
||||||
|
register!(group, average_u64);
|
||||||
|
register!(group, average_f64);
|
||||||
|
register!(group, average_f64_u64);
|
||||||
|
register!(group, stats_f64);
|
||||||
|
register!(group, percentiles_f64);
|
||||||
|
register!(group, terms_few);
|
||||||
|
register!(group, terms_many);
|
||||||
|
register!(group, terms_many_order_by_term);
|
||||||
|
register!(group, terms_many_with_top_hits);
|
||||||
|
register!(group, terms_many_with_avg_sub_agg);
|
||||||
|
register!(group, terms_many_json_mixed_type_with_sub_agg_card);
|
||||||
|
register!(group, range_agg);
|
||||||
|
register!(group, range_agg_with_avg_sub_agg);
|
||||||
|
register!(group, range_agg_with_term_agg_few);
|
||||||
|
register!(group, range_agg_with_term_agg_many);
|
||||||
|
register!(group, histogram);
|
||||||
|
register!(group, histogram_hard_bounds);
|
||||||
|
register!(group, histogram_with_avg_sub_agg);
|
||||||
|
register!(group, avg_and_range_with_avg_sub_agg);
|
||||||
|
|
||||||
|
group.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn exec_term_with_agg(index: &Index, agg_req: serde_json::Value) {
|
||||||
|
let agg_req: Aggregations = serde_json::from_value(agg_req).unwrap();
|
||||||
|
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||||
|
let term_query = TermQuery::new(
|
||||||
|
Term::from_field_text(text_field, "cool"),
|
||||||
|
IndexRecordOption::Basic,
|
||||||
|
);
|
||||||
|
let collector = get_collector(agg_req);
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
black_box(searcher.search(&term_query, &collector).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn average_u64(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"average": { "avg": { "field": "score", } }
|
||||||
|
});
|
||||||
|
exec_term_with_agg(index, agg_req)
|
||||||
|
}
|
||||||
|
fn average_f64(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"average": { "avg": { "field": "score_f64", } }
|
||||||
|
});
|
||||||
|
exec_term_with_agg(index, agg_req)
|
||||||
|
}
|
||||||
|
fn average_f64_u64(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"average_f64": { "avg": { "field": "score_f64" } },
|
||||||
|
"average": { "avg": { "field": "score" } },
|
||||||
|
});
|
||||||
|
exec_term_with_agg(index, agg_req)
|
||||||
|
}
|
||||||
|
fn stats_f64(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"average_f64": { "stats": { "field": "score_f64", } }
|
||||||
|
});
|
||||||
|
exec_term_with_agg(index, agg_req)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn percentiles_f64(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"mypercentiles": {
|
||||||
|
"percentiles": {
|
||||||
|
"field": "score_f64",
|
||||||
|
"percents": [ 95, 99, 99.9 ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn terms_few(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"my_texts": { "terms": { "field": "text_few_terms" } },
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn terms_many(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"my_texts": { "terms": { "field": "text_many_terms" } },
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn terms_many_order_by_term(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"my_texts": { "terms": { "field": "text_many_terms", "order": { "_key": "desc" } } },
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn terms_many_with_top_hits(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"my_texts": {
|
||||||
|
"terms": { "field": "text_many_terms" },
|
||||||
|
"aggs": {
|
||||||
|
"top_hits": { "top_hits":
|
||||||
|
{
|
||||||
|
"sort": [
|
||||||
|
{ "score": "desc" }
|
||||||
|
],
|
||||||
|
"size": 2,
|
||||||
|
"doc_value_fields": ["score_f64"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn terms_many_with_avg_sub_agg(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"my_texts": {
|
||||||
|
"terms": { "field": "text_many_terms" },
|
||||||
|
"aggs": {
|
||||||
|
"average_f64": { "avg": { "field": "score_f64" } }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn terms_many_json_mixed_type_with_sub_agg_card(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"my_texts": {
|
||||||
|
"terms": { "field": "json.mixed_type" },
|
||||||
|
"aggs": {
|
||||||
|
"average_f64": { "avg": { "field": "score_f64" } }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute_agg(index: &Index, agg_req: serde_json::Value) {
|
||||||
|
let agg_req: Aggregations = serde_json::from_value(agg_req).unwrap();
|
||||||
|
let collector = get_collector(agg_req);
|
||||||
|
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
black_box(searcher.search(&AllQuery, &collector).unwrap());
|
||||||
|
}
|
||||||
|
fn range_agg(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"range_f64": { "range": { "field": "score_f64", "ranges": [
|
||||||
|
{ "from": 3, "to": 7000 },
|
||||||
|
{ "from": 7000, "to": 20000 },
|
||||||
|
{ "from": 20000, "to": 30000 },
|
||||||
|
{ "from": 30000, "to": 40000 },
|
||||||
|
{ "from": 40000, "to": 50000 },
|
||||||
|
{ "from": 50000, "to": 60000 }
|
||||||
|
] } },
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn range_agg_with_avg_sub_agg(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"rangef64": {
|
||||||
|
"range": {
|
||||||
|
"field": "score_f64",
|
||||||
|
"ranges": [
|
||||||
|
{ "from": 3, "to": 7000 },
|
||||||
|
{ "from": 7000, "to": 20000 },
|
||||||
|
{ "from": 20000, "to": 30000 },
|
||||||
|
{ "from": 30000, "to": 40000 },
|
||||||
|
{ "from": 40000, "to": 50000 },
|
||||||
|
{ "from": 50000, "to": 60000 }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"aggs": {
|
||||||
|
"average_f64": { "avg": { "field": "score_f64" } }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn range_agg_with_term_agg_few(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"rangef64": {
|
||||||
|
"range": {
|
||||||
|
"field": "score_f64",
|
||||||
|
"ranges": [
|
||||||
|
{ "from": 3, "to": 7000 },
|
||||||
|
{ "from": 7000, "to": 20000 },
|
||||||
|
{ "from": 20000, "to": 30000 },
|
||||||
|
{ "from": 30000, "to": 40000 },
|
||||||
|
{ "from": 40000, "to": 50000 },
|
||||||
|
{ "from": 50000, "to": 60000 }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"aggs": {
|
||||||
|
"my_texts": { "terms": { "field": "text_few_terms" } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn range_agg_with_term_agg_many(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"rangef64": {
|
||||||
|
"range": {
|
||||||
|
"field": "score_f64",
|
||||||
|
"ranges": [
|
||||||
|
{ "from": 3, "to": 7000 },
|
||||||
|
{ "from": 7000, "to": 20000 },
|
||||||
|
{ "from": 20000, "to": 30000 },
|
||||||
|
{ "from": 30000, "to": 40000 },
|
||||||
|
{ "from": 40000, "to": 50000 },
|
||||||
|
{ "from": 50000, "to": 60000 }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"aggs": {
|
||||||
|
"my_texts": { "terms": { "field": "text_many_terms" } },
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn histogram(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"rangef64": {
|
||||||
|
"histogram": {
|
||||||
|
"field": "score_f64",
|
||||||
|
"interval": 100 // 1000 buckets
|
||||||
|
},
|
||||||
|
}
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn histogram_hard_bounds(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"rangef64": { "histogram": { "field": "score_f64", "interval": 100, "hard_bounds": { "min": 1000, "max": 300000 } } },
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn histogram_with_avg_sub_agg(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"rangef64": {
|
||||||
|
"histogram": { "field": "score_f64", "interval": 100 },
|
||||||
|
"aggs": {
|
||||||
|
"average_f64": { "avg": { "field": "score_f64" } }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
fn avg_and_range_with_avg_sub_agg(index: &Index) {
|
||||||
|
let agg_req = json!({
|
||||||
|
"rangef64": {
|
||||||
|
"range": {
|
||||||
|
"field": "score_f64",
|
||||||
|
"ranges": [
|
||||||
|
{ "from": 3, "to": 7000 },
|
||||||
|
{ "from": 7000, "to": 20000 },
|
||||||
|
{ "from": 20000, "to": 60000 }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"aggs": {
|
||||||
|
"average_in_range": { "avg": { "field": "score" } }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"average": { "avg": { "field": "score" } }
|
||||||
|
});
|
||||||
|
execute_agg(index, agg_req);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
enum Cardinality {
|
||||||
|
/// All documents contain exactly one value.
|
||||||
|
/// `Full` is the default for auto-detecting the Cardinality, since it is the most strict.
|
||||||
|
#[default]
|
||||||
|
Full = 0,
|
||||||
|
/// All documents contain at most one value.
|
||||||
|
OptionalDense = 1,
|
||||||
|
/// All documents may contain any number of values.
|
||||||
|
Multivalued = 2,
|
||||||
|
/// 1 / 20 documents has a value
|
||||||
|
OptionalSparse = 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_collector(agg_req: Aggregations) -> AggregationCollector {
|
||||||
|
AggregationCollector::from_aggs(agg_req, Default::default())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_fieldtype = tantivy::schema::TextOptions::default()
|
||||||
|
.set_indexing_options(
|
||||||
|
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||||
|
)
|
||||||
|
.set_stored();
|
||||||
|
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||||
|
let json_field = schema_builder.add_json_field("json", FAST);
|
||||||
|
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
|
||||||
|
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
|
||||||
|
let score_fieldtype = tantivy::schema::NumericOptions::default().set_fast();
|
||||||
|
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||||
|
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||||
|
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||||
|
let index = Index::create_from_tempdir(schema_builder.build())?;
|
||||||
|
let few_terms_data = ["INFO", "ERROR", "WARN", "DEBUG"];
|
||||||
|
|
||||||
|
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
|
||||||
|
|
||||||
|
let many_terms_data = (0..150_000)
|
||||||
|
.map(|num| format!("author{num}"))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
{
|
||||||
|
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 200_000_000)?;
|
||||||
|
// To make the different test cases comparable we just change one doc to force the
|
||||||
|
// cardinality
|
||||||
|
if cardinality == Cardinality::OptionalDense {
|
||||||
|
index_writer.add_document(doc!())?;
|
||||||
|
}
|
||||||
|
if cardinality == Cardinality::Multivalued {
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
json_field => json!({"mixed_type": 10.0}),
|
||||||
|
json_field => json!({"mixed_type": 10.0}),
|
||||||
|
text_field => "cool",
|
||||||
|
text_field => "cool",
|
||||||
|
text_field_many_terms => "cool",
|
||||||
|
text_field_many_terms => "cool",
|
||||||
|
text_field_few_terms => "cool",
|
||||||
|
text_field_few_terms => "cool",
|
||||||
|
score_field => 1u64,
|
||||||
|
score_field => 1u64,
|
||||||
|
score_field_f64 => lg_norm.sample(&mut rng),
|
||||||
|
score_field_f64 => lg_norm.sample(&mut rng),
|
||||||
|
score_field_i64 => 1i64,
|
||||||
|
score_field_i64 => 1i64,
|
||||||
|
))?;
|
||||||
|
}
|
||||||
|
let mut doc_with_value = 1_000_000;
|
||||||
|
if cardinality == Cardinality::OptionalSparse {
|
||||||
|
doc_with_value /= 20;
|
||||||
|
}
|
||||||
|
let _val_max = 1_000_000.0;
|
||||||
|
for _ in 0..doc_with_value {
|
||||||
|
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
||||||
|
let json = if rng.gen_bool(0.1) {
|
||||||
|
// 10% are numeric values
|
||||||
|
json!({ "mixed_type": val })
|
||||||
|
} else {
|
||||||
|
json!({"mixed_type": many_terms_data.choose(&mut rng).unwrap().to_string()})
|
||||||
|
};
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "cool",
|
||||||
|
json_field => json,
|
||||||
|
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||||
|
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||||
|
score_field => val as u64,
|
||||||
|
score_field_f64 => lg_norm.sample(&mut rng),
|
||||||
|
score_field_i64 => val as i64,
|
||||||
|
))?;
|
||||||
|
if cardinality == Cardinality::OptionalSparse {
|
||||||
|
for _ in 0..20 {
|
||||||
|
index_writer.add_document(doc!(text_field => "cool"))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// writing the segment
|
||||||
|
index_writer.commit()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(index)
|
||||||
|
}
|
||||||
@@ -18,7 +18,7 @@ fn benchmark(
|
|||||||
benchmark_dynamic_json(b, input, schema, commit, parse_json)
|
benchmark_dynamic_json(b, input, schema, commit, parse_json)
|
||||||
} else {
|
} else {
|
||||||
_benchmark(b, input, schema, commit, parse_json, |schema, doc_json| {
|
_benchmark(b, input, schema, commit, parse_json, |schema, doc_json| {
|
||||||
TantivyDocument::parse_json(&schema, doc_json).unwrap()
|
TantivyDocument::parse_json(schema, doc_json).unwrap()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -90,8 +90,7 @@ fn benchmark_dynamic_json(
|
|||||||
) {
|
) {
|
||||||
let json_field = schema.get_field("json").unwrap();
|
let json_field = schema.get_field("json").unwrap();
|
||||||
_benchmark(b, input, schema, commit, parse_json, |_schema, doc_json| {
|
_benchmark(b, input, schema, commit, parse_json, |_schema, doc_json| {
|
||||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
let json_val: serde_json::Value = serde_json::from_str(doc_json).unwrap();
|
||||||
serde_json::from_str(doc_json).unwrap();
|
|
||||||
tantivy::doc!(json_field=>json_val)
|
tantivy::doc!(json_field=>json_val)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -138,15 +137,16 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
|||||||
for (prefix, schema, is_dynamic) in benches {
|
for (prefix, schema, is_dynamic) in benches {
|
||||||
for commit in [false, true] {
|
for commit in [false, true] {
|
||||||
let suffix = if commit { "with-commit" } else { "no-commit" };
|
let suffix = if commit { "with-commit" } else { "no-commit" };
|
||||||
for parse_json in [false] {
|
{
|
||||||
|
let parse_json = false;
|
||||||
// for parse_json in [false, true] {
|
// for parse_json in [false, true] {
|
||||||
let suffix = if parse_json {
|
let suffix = if parse_json {
|
||||||
format!("{}-with-json-parsing", suffix)
|
format!("{suffix}-with-json-parsing")
|
||||||
} else {
|
} else {
|
||||||
format!("{}", suffix)
|
suffix.to_string()
|
||||||
};
|
};
|
||||||
|
|
||||||
let bench_name = format!("{}{}", prefix, suffix);
|
let bench_name = format!("{prefix}{suffix}");
|
||||||
group.bench_function(bench_name, |b| {
|
group.bench_function(bench_name, |b| {
|
||||||
benchmark(b, HDFS_LOGS, schema.clone(), commit, parse_json, is_dynamic)
|
benchmark(b, HDFS_LOGS, schema.clone(), commit, parse_json, is_dynamic)
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-bitpacker"
|
name = "tantivy-bitpacker"
|
||||||
version = "0.5.0"
|
version = "0.6.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|||||||
83
cliff.toml
83
cliff.toml
@@ -1,6 +1,10 @@
|
|||||||
# configuration file for git-cliff{ pattern = "foo", replace = "bar"}
|
# configuration file for git-cliff{ pattern = "foo", replace = "bar"}
|
||||||
# see https://github.com/orhun/git-cliff#configuration-file
|
# see https://github.com/orhun/git-cliff#configuration-file
|
||||||
|
|
||||||
|
[remote.github]
|
||||||
|
owner = "quickwit-oss"
|
||||||
|
repo = "tantivy"
|
||||||
|
|
||||||
[changelog]
|
[changelog]
|
||||||
# changelog header
|
# changelog header
|
||||||
header = """
|
header = """
|
||||||
@@ -8,15 +12,43 @@ header = """
|
|||||||
# template for the changelog body
|
# template for the changelog body
|
||||||
# https://tera.netlify.app/docs/#introduction
|
# https://tera.netlify.app/docs/#introduction
|
||||||
body = """
|
body = """
|
||||||
{% if version %}\
|
## What's Changed
|
||||||
{{ version | trim_start_matches(pat="v") }} ({{ timestamp | date(format="%Y-%m-%d") }})
|
|
||||||
==================
|
{%- if version %} in {{ version }}{%- endif -%}
|
||||||
{% else %}\
|
|
||||||
## [unreleased]
|
|
||||||
{% endif %}\
|
|
||||||
{% for commit in commits %}
|
{% for commit in commits %}
|
||||||
- {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | split(pat="\n") | first | trim | upper_first }}(@{{ commit.author.name }})\
|
{% if commit.github.pr_title -%}
|
||||||
{% endfor %}
|
{%- set commit_message = commit.github.pr_title -%}
|
||||||
|
{%- else -%}
|
||||||
|
{%- set commit_message = commit.message -%}
|
||||||
|
{%- endif -%}
|
||||||
|
- {{ commit_message | split(pat="\n") | first | trim }}\
|
||||||
|
{% if commit.github.pr_number %} \
|
||||||
|
[#{{ commit.github.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.github.pr_number }}){% if commit.github.username %}(@{{ commit.github.username }}){%- endif -%} \
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor -%}
|
||||||
|
|
||||||
|
{% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
|
||||||
|
{% raw %}\n{% endraw -%}
|
||||||
|
## New Contributors
|
||||||
|
{%- endif %}\
|
||||||
|
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
|
||||||
|
* @{{ contributor.username }} made their first contribution
|
||||||
|
{%- if contributor.pr_number %} in \
|
||||||
|
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor -%}
|
||||||
|
|
||||||
|
{% if version %}
|
||||||
|
{% if previous.version %}
|
||||||
|
**Full Changelog**: {{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}
|
||||||
|
{% endif %}
|
||||||
|
{% else -%}
|
||||||
|
{% raw %}\n{% endraw %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{%- macro remote_url() -%}
|
||||||
|
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||||
|
{%- endmacro -%}
|
||||||
"""
|
"""
|
||||||
# remove the leading and trailing whitespace from the template
|
# remove the leading and trailing whitespace from the template
|
||||||
trim = true
|
trim = true
|
||||||
@@ -25,53 +57,24 @@ footer = """
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
postprocessors = [
|
postprocessors = [
|
||||||
{ pattern = 'Paul Masurel', replace = "fulmicoton"}, # replace with github user
|
|
||||||
{ pattern = 'PSeitz', replace = "PSeitz"}, # replace with github user
|
|
||||||
{ pattern = 'Adam Reichold', replace = "adamreichold"}, # replace with github user
|
|
||||||
{ pattern = 'trinity-1686a', replace = "trinity-1686a"}, # replace with github user
|
|
||||||
{ pattern = 'Michael Kleen', replace = "mkleen"}, # replace with github user
|
|
||||||
{ pattern = 'Adrien Guillo', replace = "guilload"}, # replace with github user
|
|
||||||
{ pattern = 'François Massot', replace = "fmassot"}, # replace with github user
|
|
||||||
{ pattern = 'Naveen Aiathurai', replace = "naveenann"}, # replace with github user
|
|
||||||
{ pattern = '', replace = ""}, # replace with github user
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[git]
|
[git]
|
||||||
# parse the commits based on https://www.conventionalcommits.org
|
# parse the commits based on https://www.conventionalcommits.org
|
||||||
# This is required or commit.message contains the whole commit message and not just the title
|
# This is required or commit.message contains the whole commit message and not just the title
|
||||||
conventional_commits = true
|
conventional_commits = false
|
||||||
# filter out the commits that are not conventional
|
# filter out the commits that are not conventional
|
||||||
filter_unconventional = false
|
filter_unconventional = true
|
||||||
# process each line of a commit as an individual commit
|
# process each line of a commit as an individual commit
|
||||||
split_commits = false
|
split_commits = false
|
||||||
# regex for preprocessing the commit messages
|
# regex for preprocessing the commit messages
|
||||||
commit_preprocessors = [
|
commit_preprocessors = [
|
||||||
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "[#${2}](https://github.com/quickwit-oss/tantivy/issues/${2})"}, # replace issue numbers
|
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = ""},
|
||||||
]
|
]
|
||||||
#link_parsers = [
|
#link_parsers = [
|
||||||
#{ pattern = "#(\\d+)", href = "https://github.com/quickwit-oss/tantivy/pulls/$1"},
|
#{ pattern = "#(\\d+)", href = "https://github.com/quickwit-oss/tantivy/pulls/$1"},
|
||||||
#]
|
#]
|
||||||
# regex for parsing and grouping commits
|
# regex for parsing and grouping commits
|
||||||
commit_parsers = [
|
|
||||||
{ message = "^feat", group = "Features"},
|
|
||||||
{ message = "^fix", group = "Bug Fixes"},
|
|
||||||
{ message = "^doc", group = "Documentation"},
|
|
||||||
{ message = "^perf", group = "Performance"},
|
|
||||||
{ message = "^refactor", group = "Refactor"},
|
|
||||||
{ message = "^style", group = "Styling"},
|
|
||||||
{ message = "^test", group = "Testing"},
|
|
||||||
{ message = "^chore\\(release\\): prepare for", skip = true},
|
|
||||||
{ message = "(?i)clippy", skip = true},
|
|
||||||
{ message = "(?i)dependabot", skip = true},
|
|
||||||
{ message = "(?i)fmt", skip = true},
|
|
||||||
{ message = "(?i)bump", skip = true},
|
|
||||||
{ message = "(?i)readme", skip = true},
|
|
||||||
{ message = "(?i)comment", skip = true},
|
|
||||||
{ message = "(?i)spelling", skip = true},
|
|
||||||
{ message = "^chore", group = "Miscellaneous Tasks"},
|
|
||||||
{ body = ".*security", group = "Security"},
|
|
||||||
{ message = ".*", group = "Other", default_scope = "other"},
|
|
||||||
]
|
|
||||||
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
||||||
protect_breaking_commits = false
|
protect_breaking_commits = false
|
||||||
# filter out the commits that are not matched by commit parsers
|
# filter out the commits that are not matched by commit parsers
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-columnar"
|
name = "tantivy-columnar"
|
||||||
version = "0.2.0"
|
version = "0.3.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||||
@@ -9,13 +9,13 @@ description = "column oriented storage for tantivy"
|
|||||||
categories = ["database-implementations", "data-structures", "compression"]
|
categories = ["database-implementations", "data-structures", "compression"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
itertools = "0.12.0"
|
itertools = "0.13.0"
|
||||||
fastdivide = "0.4.0"
|
fastdivide = "0.4.0"
|
||||||
|
|
||||||
stacker = { version= "0.2", path = "../stacker", package="tantivy-stacker"}
|
stacker = { version= "0.3", path = "../stacker", package="tantivy-stacker"}
|
||||||
sstable = { version= "0.2", path = "../sstable", package = "tantivy-sstable" }
|
sstable = { version= "0.3", path = "../sstable", package = "tantivy-sstable" }
|
||||||
common = { version= "0.6", path = "../common", package = "tantivy-common" }
|
common = { version= "0.7", path = "../common", package = "tantivy-common" }
|
||||||
tantivy-bitpacker = { version= "0.5", path = "../bitpacker/" }
|
tantivy-bitpacker = { version= "0.6", path = "../bitpacker/" }
|
||||||
serde = "1.0.152"
|
serde = "1.0.152"
|
||||||
downcast-rs = "1.2.0"
|
downcast-rs = "1.2.0"
|
||||||
|
|
||||||
|
|||||||
@@ -140,7 +140,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_merge_column_index_optional_shuffle() {
|
fn test_merge_column_index_optional_shuffle() {
|
||||||
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
|
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
|
||||||
let column_indexes = vec![optional_index, ColumnIndex::Full];
|
let column_indexes = [optional_index, ColumnIndex::Full];
|
||||||
let row_addrs = vec![
|
let row_addrs = vec![
|
||||||
RowAddr {
|
RowAddr {
|
||||||
segment_ord: 0u32,
|
segment_ord: 0u32,
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync + DowncastSync {
|
|||||||
let out_and_idx_chunks = output
|
let out_and_idx_chunks = output
|
||||||
.chunks_exact_mut(4)
|
.chunks_exact_mut(4)
|
||||||
.into_remainder()
|
.into_remainder()
|
||||||
.into_iter()
|
.iter_mut()
|
||||||
.zip(indexes.chunks_exact(4).remainder());
|
.zip(indexes.chunks_exact(4).remainder());
|
||||||
for (out, idx) in out_and_idx_chunks {
|
for (out, idx) in out_and_idx_chunks {
|
||||||
*out = self.get_val(*idx);
|
*out = self.get_val(*idx);
|
||||||
@@ -102,7 +102,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync + DowncastSync {
|
|||||||
let out_and_idx_chunks = output
|
let out_and_idx_chunks = output
|
||||||
.chunks_exact_mut(4)
|
.chunks_exact_mut(4)
|
||||||
.into_remainder()
|
.into_remainder()
|
||||||
.into_iter()
|
.iter_mut()
|
||||||
.zip(indexes.chunks_exact(4).remainder());
|
.zip(indexes.chunks_exact(4).remainder());
|
||||||
for (out, idx) in out_and_idx_chunks {
|
for (out, idx) in out_and_idx_chunks {
|
||||||
*out = Some(self.get_val(*idx));
|
*out = Some(self.get_val(*idx));
|
||||||
|
|||||||
@@ -148,7 +148,7 @@ impl CompactSpace {
|
|||||||
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
|
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
|
||||||
// Correctness: Overflow. The first range starts at compact space 0, the error from
|
// Correctness: Overflow. The first range starts at compact space 0, the error from
|
||||||
// binary search can never be 0
|
// binary search can never be 0
|
||||||
.map_or_else(|e| e - 1, |v| v);
|
.unwrap_or_else(|e| e - 1);
|
||||||
|
|
||||||
let range_mapping = &self.ranges_mapping[pos];
|
let range_mapping = &self.ranges_mapping[pos];
|
||||||
let diff = compact - range_mapping.compact_start;
|
let diff = compact - range_mapping.compact_start;
|
||||||
|
|||||||
@@ -59,22 +59,6 @@ pub struct ColumnarWriter {
|
|||||||
buffers: SpareBuffers,
|
buffers: SpareBuffers,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn mutate_or_create_column<V, TMutator>(
|
|
||||||
arena_hash_map: &mut ArenaHashMap,
|
|
||||||
column_name: &str,
|
|
||||||
updater: TMutator,
|
|
||||||
) where
|
|
||||||
V: Copy + 'static,
|
|
||||||
TMutator: FnMut(Option<V>) -> V,
|
|
||||||
{
|
|
||||||
assert!(
|
|
||||||
!column_name.as_bytes().contains(&0u8),
|
|
||||||
"key may not contain the 0 byte"
|
|
||||||
);
|
|
||||||
arena_hash_map.mutate_or_create(column_name.as_bytes(), updater);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ColumnarWriter {
|
impl ColumnarWriter {
|
||||||
pub fn mem_usage(&self) -> usize {
|
pub fn mem_usage(&self) -> usize {
|
||||||
self.arena.mem_usage()
|
self.arena.mem_usage()
|
||||||
@@ -175,9 +159,8 @@ impl ColumnarWriter {
|
|||||||
},
|
},
|
||||||
&mut self.dictionaries,
|
&mut self.dictionaries,
|
||||||
);
|
);
|
||||||
mutate_or_create_column(
|
hash_map.mutate_or_create(
|
||||||
hash_map,
|
column_name.as_bytes(),
|
||||||
column_name,
|
|
||||||
|column_opt: Option<StrOrBytesColumnWriter>| {
|
|column_opt: Option<StrOrBytesColumnWriter>| {
|
||||||
let mut column_writer = if let Some(column_writer) = column_opt {
|
let mut column_writer = if let Some(column_writer) = column_opt {
|
||||||
column_writer
|
column_writer
|
||||||
@@ -192,24 +175,21 @@ impl ColumnarWriter {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
ColumnType::Bool => {
|
ColumnType::Bool => {
|
||||||
mutate_or_create_column(
|
self.bool_field_hash_map.mutate_or_create(
|
||||||
&mut self.bool_field_hash_map,
|
column_name.as_bytes(),
|
||||||
column_name,
|
|
||||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
ColumnType::DateTime => {
|
ColumnType::DateTime => {
|
||||||
mutate_or_create_column(
|
self.datetime_field_hash_map.mutate_or_create(
|
||||||
&mut self.datetime_field_hash_map,
|
column_name.as_bytes(),
|
||||||
column_name,
|
|
||||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
ColumnType::I64 | ColumnType::F64 | ColumnType::U64 => {
|
ColumnType::I64 | ColumnType::F64 | ColumnType::U64 => {
|
||||||
let numerical_type = column_type.numerical_type().unwrap();
|
let numerical_type = column_type.numerical_type().unwrap();
|
||||||
mutate_or_create_column(
|
self.numerical_field_hash_map.mutate_or_create(
|
||||||
&mut self.numerical_field_hash_map,
|
column_name.as_bytes(),
|
||||||
column_name,
|
|
||||||
|column_opt: Option<NumericalColumnWriter>| {
|
|column_opt: Option<NumericalColumnWriter>| {
|
||||||
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
||||||
column.force_numerical_type(numerical_type);
|
column.force_numerical_type(numerical_type);
|
||||||
@@ -217,9 +197,8 @@ impl ColumnarWriter {
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
ColumnType::IpAddr => mutate_or_create_column(
|
ColumnType::IpAddr => self.ip_addr_field_hash_map.mutate_or_create(
|
||||||
&mut self.ip_addr_field_hash_map,
|
column_name.as_bytes(),
|
||||||
column_name,
|
|
||||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
@@ -232,9 +211,8 @@ impl ColumnarWriter {
|
|||||||
numerical_value: T,
|
numerical_value: T,
|
||||||
) {
|
) {
|
||||||
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
|
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
|
||||||
mutate_or_create_column(
|
hash_map.mutate_or_create(
|
||||||
hash_map,
|
column_name.as_bytes(),
|
||||||
column_name,
|
|
||||||
|column_opt: Option<NumericalColumnWriter>| {
|
|column_opt: Option<NumericalColumnWriter>| {
|
||||||
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
||||||
column.record_numerical_value(doc, numerical_value.into(), arena);
|
column.record_numerical_value(doc, numerical_value.into(), arena);
|
||||||
@@ -244,10 +222,6 @@ impl ColumnarWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn record_ip_addr(&mut self, doc: RowId, column_name: &str, ip_addr: Ipv6Addr) {
|
pub fn record_ip_addr(&mut self, doc: RowId, column_name: &str, ip_addr: Ipv6Addr) {
|
||||||
assert!(
|
|
||||||
!column_name.as_bytes().contains(&0u8),
|
|
||||||
"key may not contain the 0 byte"
|
|
||||||
);
|
|
||||||
let (hash_map, arena) = (&mut self.ip_addr_field_hash_map, &mut self.arena);
|
let (hash_map, arena) = (&mut self.ip_addr_field_hash_map, &mut self.arena);
|
||||||
hash_map.mutate_or_create(
|
hash_map.mutate_or_create(
|
||||||
column_name.as_bytes(),
|
column_name.as_bytes(),
|
||||||
@@ -261,24 +235,30 @@ impl ColumnarWriter {
|
|||||||
|
|
||||||
pub fn record_bool(&mut self, doc: RowId, column_name: &str, val: bool) {
|
pub fn record_bool(&mut self, doc: RowId, column_name: &str, val: bool) {
|
||||||
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
|
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
|
||||||
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
hash_map.mutate_or_create(
|
||||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
column_name.as_bytes(),
|
||||||
column.record(doc, val, arena);
|
|column_opt: Option<ColumnWriter>| {
|
||||||
column
|
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||||
});
|
column.record(doc, val, arena);
|
||||||
|
column
|
||||||
|
},
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: common::DateTime) {
|
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: common::DateTime) {
|
||||||
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
|
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
|
||||||
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
hash_map.mutate_or_create(
|
||||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
column_name.as_bytes(),
|
||||||
column.record(
|
|column_opt: Option<ColumnWriter>| {
|
||||||
doc,
|
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||||
NumericalValue::I64(datetime.into_timestamp_nanos()),
|
column.record(
|
||||||
arena,
|
doc,
|
||||||
);
|
NumericalValue::I64(datetime.into_timestamp_nanos()),
|
||||||
column
|
arena,
|
||||||
});
|
);
|
||||||
|
column
|
||||||
|
},
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn record_str(&mut self, doc: RowId, column_name: &str, value: &str) {
|
pub fn record_str(&mut self, doc: RowId, column_name: &str, value: &str) {
|
||||||
@@ -303,10 +283,6 @@ impl ColumnarWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn record_bytes(&mut self, doc: RowId, column_name: &str, value: &[u8]) {
|
pub fn record_bytes(&mut self, doc: RowId, column_name: &str, value: &[u8]) {
|
||||||
assert!(
|
|
||||||
!column_name.as_bytes().contains(&0u8),
|
|
||||||
"key may not contain the 0 byte"
|
|
||||||
);
|
|
||||||
let (hash_map, arena, dictionaries) = (
|
let (hash_map, arena, dictionaries) = (
|
||||||
&mut self.bytes_field_hash_map,
|
&mut self.bytes_field_hash_map,
|
||||||
&mut self.arena,
|
&mut self.arena,
|
||||||
|
|||||||
@@ -18,7 +18,12 @@ pub struct ColumnarSerializer<W: io::Write> {
|
|||||||
/// code.
|
/// code.
|
||||||
fn prepare_key(key: &[u8], column_type: ColumnType, buffer: &mut Vec<u8>) {
|
fn prepare_key(key: &[u8], column_type: ColumnType, buffer: &mut Vec<u8>) {
|
||||||
buffer.clear();
|
buffer.clear();
|
||||||
buffer.extend_from_slice(key);
|
// Convert 0 bytes to '0' string, as 0 bytes are reserved for the end of the path.
|
||||||
|
if key.contains(&0u8) {
|
||||||
|
buffer.extend(key.iter().map(|&b| if b == 0 { b'0' } else { b }));
|
||||||
|
} else {
|
||||||
|
buffer.extend_from_slice(key);
|
||||||
|
}
|
||||||
buffer.push(0u8);
|
buffer.push(0u8);
|
||||||
buffer.push(column_type.to_code());
|
buffer.push(column_type.to_code());
|
||||||
}
|
}
|
||||||
@@ -102,7 +107,7 @@ mod tests {
|
|||||||
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
|
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
|
||||||
prepare_key(b"root\0child", ColumnType::Str, &mut buffer);
|
prepare_key(b"root\0child", ColumnType::Str, &mut buffer);
|
||||||
assert_eq!(buffer.len(), 12);
|
assert_eq!(buffer.len(), 12);
|
||||||
assert_eq!(&buffer[..10], b"root\0child");
|
assert_eq!(&buffer[..10], b"root0child");
|
||||||
assert_eq!(buffer[10], 0u8);
|
assert_eq!(buffer[10], 0u8);
|
||||||
assert_eq!(buffer[11], ColumnType::Str.to_code());
|
assert_eq!(buffer[11], ColumnType::Str.to_code());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-common"
|
name = "tantivy-common"
|
||||||
version = "0.6.0"
|
version = "0.7.0"
|
||||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
@@ -14,7 +14,7 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
byteorder = "1.4.3"
|
byteorder = "1.4.3"
|
||||||
ownedbytes = { version= "0.6", path="../ownedbytes" }
|
ownedbytes = { version= "0.7", path="../ownedbytes" }
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||||
serde = { version = "1.0.136", features = ["derive"] }
|
serde = { version = "1.0.136", features = ["derive"] }
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::{fmt, io, u64};
|
use std::{fmt, io};
|
||||||
|
|
||||||
use ownedbytes::OwnedBytes;
|
use ownedbytes::OwnedBytes;
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
#![allow(deprecated)]
|
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
|
|
||||||
@@ -27,9 +25,6 @@ pub enum DateTimePrecision {
|
|||||||
Nanoseconds,
|
Nanoseconds,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[deprecated(since = "0.20.0", note = "Use `DateTimePrecision` instead")]
|
|
||||||
pub type DatePrecision = DateTimePrecision;
|
|
||||||
|
|
||||||
/// A date/time value with nanoseconds precision.
|
/// A date/time value with nanoseconds precision.
|
||||||
///
|
///
|
||||||
/// This timestamp does not carry any explicit time zone information.
|
/// This timestamp does not carry any explicit time zone information.
|
||||||
@@ -40,7 +35,7 @@ pub type DatePrecision = DateTimePrecision;
|
|||||||
/// All constructors and conversions are provided as explicit
|
/// All constructors and conversions are provided as explicit
|
||||||
/// functions and not by implementing any `From`/`Into` traits
|
/// functions and not by implementing any `From`/`Into` traits
|
||||||
/// to prevent unintended usage.
|
/// to prevent unintended usage.
|
||||||
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
pub struct DateTime {
|
pub struct DateTime {
|
||||||
// Timestamp in nanoseconds.
|
// Timestamp in nanoseconds.
|
||||||
pub(crate) timestamp_nanos: i64,
|
pub(crate) timestamp_nanos: i64,
|
||||||
|
|||||||
@@ -5,6 +5,12 @@ pub const JSON_PATH_SEGMENT_SEP: u8 = 1u8;
|
|||||||
pub const JSON_PATH_SEGMENT_SEP_STR: &str =
|
pub const JSON_PATH_SEGMENT_SEP_STR: &str =
|
||||||
unsafe { std::str::from_utf8_unchecked(&[JSON_PATH_SEGMENT_SEP]) };
|
unsafe { std::str::from_utf8_unchecked(&[JSON_PATH_SEGMENT_SEP]) };
|
||||||
|
|
||||||
|
/// Separates the json path and the value in
|
||||||
|
/// a JSON term binary representation.
|
||||||
|
pub const JSON_END_OF_PATH: u8 = 0u8;
|
||||||
|
pub const JSON_END_OF_PATH_STR: &str =
|
||||||
|
unsafe { std::str::from_utf8_unchecked(&[JSON_END_OF_PATH]) };
|
||||||
|
|
||||||
/// Create a new JsonPathWriter, that creates flattened json paths for tantivy.
|
/// Create a new JsonPathWriter, that creates flattened json paths for tantivy.
|
||||||
#[derive(Clone, Debug, Default)]
|
#[derive(Clone, Debug, Default)]
|
||||||
pub struct JsonPathWriter {
|
pub struct JsonPathWriter {
|
||||||
@@ -14,6 +20,14 @@ pub struct JsonPathWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl JsonPathWriter {
|
impl JsonPathWriter {
|
||||||
|
pub fn with_expand_dots(expand_dots: bool) -> Self {
|
||||||
|
JsonPathWriter {
|
||||||
|
path: String::new(),
|
||||||
|
indices: Vec::new(),
|
||||||
|
expand_dots,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
JsonPathWriter {
|
JsonPathWriter {
|
||||||
path: String::new(),
|
path: String::new(),
|
||||||
@@ -39,8 +53,8 @@ impl JsonPathWriter {
|
|||||||
pub fn push(&mut self, segment: &str) {
|
pub fn push(&mut self, segment: &str) {
|
||||||
let len_path = self.path.len();
|
let len_path = self.path.len();
|
||||||
self.indices.push(len_path);
|
self.indices.push(len_path);
|
||||||
if !self.path.is_empty() {
|
if self.indices.len() > 1 {
|
||||||
self.path.push_str(JSON_PATH_SEGMENT_SEP_STR);
|
self.path.push(JSON_PATH_SEGMENT_SEP as char);
|
||||||
}
|
}
|
||||||
self.path.push_str(segment);
|
self.path.push_str(segment);
|
||||||
if self.expand_dots {
|
if self.expand_dots {
|
||||||
@@ -55,6 +69,12 @@ impl JsonPathWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set the end of JSON path marker.
|
||||||
|
#[inline]
|
||||||
|
pub fn set_end(&mut self) {
|
||||||
|
self.path.push_str(JSON_END_OF_PATH_STR);
|
||||||
|
}
|
||||||
|
|
||||||
/// Remove the last segment. Does nothing if the path is empty.
|
/// Remove the last segment. Does nothing if the path is empty.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn pop(&mut self) {
|
pub fn pop(&mut self) {
|
||||||
@@ -91,6 +111,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn json_path_writer_test() {
|
fn json_path_writer_test() {
|
||||||
let mut writer = JsonPathWriter::new();
|
let mut writer = JsonPathWriter::new();
|
||||||
|
writer.set_expand_dots(false);
|
||||||
|
|
||||||
writer.push("root");
|
writer.push("root");
|
||||||
assert_eq!(writer.as_str(), "root");
|
assert_eq!(writer.as_str(), "root");
|
||||||
@@ -109,4 +130,15 @@ mod tests {
|
|||||||
writer.push("k8s.node.id");
|
writer.push("k8s.node.id");
|
||||||
assert_eq!(writer.as_str(), "root\u{1}k8s\u{1}node\u{1}id");
|
assert_eq!(writer.as_str(), "root\u{1}k8s\u{1}node\u{1}id");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_json_path_expand_dots_enabled_pop_segment() {
|
||||||
|
let mut json_writer = JsonPathWriter::with_expand_dots(true);
|
||||||
|
json_writer.push("hello");
|
||||||
|
assert_eq!(json_writer.as_str(), "hello");
|
||||||
|
json_writer.push("color.hue");
|
||||||
|
assert_eq!(json_writer.as_str(), "hello\x01color\x01hue");
|
||||||
|
json_writer.pop();
|
||||||
|
assert_eq!(json_writer.as_str(), "hello");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,14 +9,12 @@ mod byte_count;
|
|||||||
mod datetime;
|
mod datetime;
|
||||||
pub mod file_slice;
|
pub mod file_slice;
|
||||||
mod group_by;
|
mod group_by;
|
||||||
mod json_path_writer;
|
pub mod json_path_writer;
|
||||||
mod serialize;
|
mod serialize;
|
||||||
mod vint;
|
mod vint;
|
||||||
mod writer;
|
mod writer;
|
||||||
pub use bitset::*;
|
pub use bitset::*;
|
||||||
pub use byte_count::ByteCount;
|
pub use byte_count::ByteCount;
|
||||||
#[allow(deprecated)]
|
|
||||||
pub use datetime::DatePrecision;
|
|
||||||
pub use datetime::{DateTime, DateTimePrecision};
|
pub use datetime::{DateTime, DateTimePrecision};
|
||||||
pub use group_by::GroupByIteratorExtended;
|
pub use group_by::GroupByIteratorExtended;
|
||||||
pub use json_path_writer::JsonPathWriter;
|
pub use json_path_writer::JsonPathWriter;
|
||||||
|
|||||||
@@ -290,8 +290,7 @@ impl<'a> BinarySerializable for Cow<'a, [u8]> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod test {
|
pub mod test {
|
||||||
|
|
||||||
use super::{VInt, *};
|
use super::*;
|
||||||
use crate::serialize::BinarySerializable;
|
|
||||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
O::default().serialize(&mut buffer).unwrap();
|
O::default().serialize(&mut buffer).unwrap();
|
||||||
|
|||||||
@@ -151,7 +151,7 @@ pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
|||||||
(result, vlen)
|
(result, vlen)
|
||||||
}
|
}
|
||||||
/// Write a `u32` as a vint payload.
|
/// Write a `u32` as a vint payload.
|
||||||
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
pub fn write_u32_vint<W: io::Write + ?Sized>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||||
let mut buf = [0u8; 8];
|
let mut buf = [0u8; 8];
|
||||||
let data = serialize_vint_u32(val, &mut buf);
|
let data = serialize_vint_u32(val, &mut buf);
|
||||||
writer.write_all(data)
|
writer.write_all(data)
|
||||||
|
|||||||
@@ -19,13 +19,14 @@ use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
|||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
fn main() -> tantivy::Result<()> {
|
||||||
// Let's create a temporary directory for the
|
// Normally you would use `MMapDirectory` instead to persist data on disk.
|
||||||
// sake of this example
|
// https://docs.rs/tantivy/latest/tantivy/directory/struct.MmapDirectory.html
|
||||||
|
// But for this example, we will use a temporary directory `TempDir`.
|
||||||
let index_path = TempDir::new()?;
|
let index_path = TempDir::new()?;
|
||||||
|
|
||||||
// # Defining the schema
|
// # Defining the schema
|
||||||
//
|
//
|
||||||
// The Tantivy index requires a very strict schema.
|
// The Tantivy index requires a schema.
|
||||||
// The schema declares which fields are in the index,
|
// The schema declares which fields are in the index,
|
||||||
// and for each field, its type and "the way it should
|
// and for each field, its type and "the way it should
|
||||||
// be indexed".
|
// be indexed".
|
||||||
|
|||||||
@@ -11,9 +11,10 @@ use columnar::Column;
|
|||||||
// ---
|
// ---
|
||||||
// Importing tantivy...
|
// Importing tantivy...
|
||||||
use tantivy::collector::{Collector, SegmentCollector};
|
use tantivy::collector::{Collector, SegmentCollector};
|
||||||
|
use tantivy::index::SegmentReader;
|
||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||||
use tantivy::{doc, Index, IndexWriter, Score, SegmentReader};
|
use tantivy::{doc, Index, IndexWriter, Score};
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct Stats {
|
struct Stats {
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
use tantivy::collector::TopDocs;
|
use tantivy::collector::TopDocs;
|
||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::{DateOptions, Document, OwnedValue, Schema, INDEXED, STORED, STRING};
|
use tantivy::schema::{DateOptions, Document, Schema, Value, INDEXED, STORED, STRING};
|
||||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
fn main() -> tantivy::Result<()> {
|
||||||
@@ -13,7 +13,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
let opts = DateOptions::from(INDEXED)
|
let opts = DateOptions::from(INDEXED)
|
||||||
.set_stored()
|
.set_stored()
|
||||||
.set_fast()
|
.set_fast()
|
||||||
.set_precision(tantivy::DateTimePrecision::Seconds);
|
.set_precision(tantivy::schema::DateTimePrecision::Seconds);
|
||||||
// Add `occurred_at` date field type
|
// Add `occurred_at` date field type
|
||||||
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
|
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
|
||||||
let event_type = schema_builder.add_text_field("event", STRING | STORED);
|
let event_type = schema_builder.add_text_field("event", STRING | STORED);
|
||||||
@@ -61,10 +61,12 @@ fn main() -> tantivy::Result<()> {
|
|||||||
assert_eq!(count_docs.len(), 1);
|
assert_eq!(count_docs.len(), 1);
|
||||||
for (_score, doc_address) in count_docs {
|
for (_score, doc_address) in count_docs {
|
||||||
let retrieved_doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
let retrieved_doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||||
assert!(matches!(
|
assert!(retrieved_doc
|
||||||
retrieved_doc.get_first(occurred_at),
|
.get_first(occurred_at)
|
||||||
Some(OwnedValue::Date(_))
|
.unwrap()
|
||||||
));
|
.as_value()
|
||||||
|
.as_datetime()
|
||||||
|
.is_some(),);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
retrieved_doc.to_json(&schema),
|
retrieved_doc.to_json(&schema),
|
||||||
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
{
|
{
|
||||||
let facets = vec![
|
let facets = [
|
||||||
Facet::from("/ingredient/egg"),
|
Facet::from("/ingredient/egg"),
|
||||||
Facet::from("/ingredient/oil"),
|
Facet::from("/ingredient/oil"),
|
||||||
Facet::from("/ingredient/garlic"),
|
Facet::from("/ingredient/garlic"),
|
||||||
@@ -94,9 +94,8 @@ fn main() -> tantivy::Result<()> {
|
|||||||
.doc::<TantivyDocument>(*doc_id)
|
.doc::<TantivyDocument>(*doc_id)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.get_first(title)
|
.get_first(title)
|
||||||
.and_then(|v| v.as_str())
|
.and_then(|v| v.as_str().map(|el| el.to_string()))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_owned()
|
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||||
limbs and branches that arch over the pool"
|
limbs and branches that arch over the pool"
|
||||||
))?;
|
))?;
|
||||||
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
|
println!("add doc {i} from thread 1 - opstamp {opstamp}");
|
||||||
thread::sleep(Duration::from_millis(20));
|
thread::sleep(Duration::from_millis(20));
|
||||||
}
|
}
|
||||||
Result::<(), TantivyError>::Ok(())
|
Result::<(), TantivyError>::Ok(())
|
||||||
@@ -82,7 +82,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
body => "Some great book description..."
|
body => "Some great book description..."
|
||||||
))?
|
))?
|
||||||
};
|
};
|
||||||
println!("add doc {} from thread 2 - opstamp {}", i, opstamp);
|
println!("add doc {i} from thread 2 - opstamp {opstamp}");
|
||||||
thread::sleep(Duration::from_millis(10));
|
thread::sleep(Duration::from_millis(10));
|
||||||
}
|
}
|
||||||
Result::<(), TantivyError>::Ok(())
|
Result::<(), TantivyError>::Ok(())
|
||||||
|
|||||||
@@ -7,10 +7,11 @@
|
|||||||
// the list of documents containing a term, getting
|
// the list of documents containing a term, getting
|
||||||
// its term frequency, and accessing its positions.
|
// its term frequency, and accessing its positions.
|
||||||
|
|
||||||
|
use tantivy::postings::Postings;
|
||||||
// ---
|
// ---
|
||||||
// Importing tantivy...
|
// Importing tantivy...
|
||||||
use tantivy::schema::*;
|
use tantivy::schema::*;
|
||||||
use tantivy::{doc, DocSet, Index, IndexWriter, Postings, TERMINATED};
|
use tantivy::{doc, DocSet, Index, IndexWriter, TERMINATED};
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
fn main() -> tantivy::Result<()> {
|
||||||
// We first create a schema for the sake of the
|
// We first create a schema for the sake of the
|
||||||
|
|||||||
@@ -3,10 +3,11 @@ use std::collections::{HashMap, HashSet};
|
|||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::{Arc, RwLock, Weak};
|
||||||
|
|
||||||
use tantivy::collector::TopDocs;
|
use tantivy::collector::TopDocs;
|
||||||
|
use tantivy::index::SegmentId;
|
||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::{Schema, FAST, TEXT};
|
use tantivy::schema::{Schema, FAST, TEXT};
|
||||||
use tantivy::{
|
use tantivy::{
|
||||||
doc, DocAddress, DocId, Index, IndexWriter, Opstamp, Searcher, SearcherGeneration, SegmentId,
|
doc, DocAddress, DocId, Index, IndexWriter, Opstamp, Searcher, SearcherGeneration,
|
||||||
SegmentReader, Warmer,
|
SegmentReader, Warmer,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||||
name = "ownedbytes"
|
name = "ownedbytes"
|
||||||
version = "0.6.0"
|
version = "0.7.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "Expose data as static slice"
|
description = "Expose data as static slice"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-query-grammar"
|
name = "tantivy-query-grammar"
|
||||||
version = "0.21.0"
|
version = "0.22.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
|
|||||||
@@ -218,27 +218,14 @@ fn term_or_phrase_infallible(inp: &str) -> JResult<&str, Option<UserInputLeaf>>
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn term_group(inp: &str) -> IResult<&str, UserInputAst> {
|
fn term_group(inp: &str) -> IResult<&str, UserInputAst> {
|
||||||
let occur_symbol = alt((
|
|
||||||
value(Occur::MustNot, char('-')),
|
|
||||||
value(Occur::Must, char('+')),
|
|
||||||
));
|
|
||||||
|
|
||||||
map(
|
map(
|
||||||
tuple((
|
tuple((
|
||||||
terminated(field_name, multispace0),
|
terminated(field_name, multispace0),
|
||||||
delimited(
|
delimited(tuple((char('('), multispace0)), ast, char(')')),
|
||||||
tuple((char('('), multispace0)),
|
|
||||||
separated_list0(multispace1, tuple((opt(occur_symbol), term_or_phrase))),
|
|
||||||
char(')'),
|
|
||||||
),
|
|
||||||
)),
|
)),
|
||||||
|(field_name, terms)| {
|
|(field_name, mut ast)| {
|
||||||
UserInputAst::Clause(
|
ast.set_default_field(field_name);
|
||||||
terms
|
ast
|
||||||
.into_iter()
|
|
||||||
.map(|(occur, leaf)| (occur, leaf.set_field(Some(field_name.clone())).into()))
|
|
||||||
.collect(),
|
|
||||||
)
|
|
||||||
},
|
},
|
||||||
)(inp)
|
)(inp)
|
||||||
}
|
}
|
||||||
@@ -258,46 +245,18 @@ fn term_group_precond(inp: &str) -> IResult<&str, (), ()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||||
let (mut inp, (field_name, _, _, _)) =
|
let (inp, (field_name, _, _, _)) =
|
||||||
tuple((field_name, multispace0, char('('), multispace0))(inp).expect("precondition failed");
|
tuple((field_name, multispace0, char('('), multispace0))(inp).expect("precondition failed");
|
||||||
|
|
||||||
let mut terms = Vec::new();
|
let res = delimited_infallible(
|
||||||
let mut errs = Vec::new();
|
nothing,
|
||||||
|
map(ast_infallible, |(mut ast, errors)| {
|
||||||
let mut first_round = true;
|
ast.set_default_field(field_name.to_string());
|
||||||
loop {
|
(ast, errors)
|
||||||
let mut space_error = if first_round {
|
}),
|
||||||
first_round = false;
|
opt_i_err(char(')'), "expected ')'"),
|
||||||
Vec::new()
|
)(inp);
|
||||||
} else {
|
res
|
||||||
let (rest, (_, err)) = space1_infallible(inp)?;
|
|
||||||
inp = rest;
|
|
||||||
err
|
|
||||||
};
|
|
||||||
if inp.is_empty() {
|
|
||||||
errs.push(LenientErrorInternal {
|
|
||||||
pos: inp.len(),
|
|
||||||
message: "missing )".to_string(),
|
|
||||||
});
|
|
||||||
break Ok((inp, (UserInputAst::Clause(terms), errs)));
|
|
||||||
}
|
|
||||||
if let Some(inp) = inp.strip_prefix(')') {
|
|
||||||
break Ok((inp, (UserInputAst::Clause(terms), errs)));
|
|
||||||
}
|
|
||||||
// only append missing space error if we did not reach the end of group
|
|
||||||
errs.append(&mut space_error);
|
|
||||||
|
|
||||||
// here we do the assumption term_or_phrase_infallible always consume something if the
|
|
||||||
// first byte is not `)` or ' '. If it did not, we would end up looping.
|
|
||||||
|
|
||||||
let (rest, ((occur, leaf), mut err)) =
|
|
||||||
tuple_infallible((occur_symbol, term_or_phrase_infallible))(inp)?;
|
|
||||||
errs.append(&mut err);
|
|
||||||
if let Some(leaf) = leaf {
|
|
||||||
terms.push((occur, leaf.set_field(Some(field_name.clone())).into()));
|
|
||||||
}
|
|
||||||
inp = rest;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
|
fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||||
@@ -1468,8 +1427,18 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_query_term_group() {
|
fn test_parse_query_term_group() {
|
||||||
test_parse_query_to_ast_helper(r#"field:(abc)"#, r#"(*"field":abc)"#);
|
test_parse_query_to_ast_helper(r#"field:(abc)"#, r#""field":abc"#);
|
||||||
test_parse_query_to_ast_helper(r#"field:(+a -"b c")"#, r#"(+"field":a -"field":"b c")"#);
|
test_parse_query_to_ast_helper(r#"field:(+a -"b c")"#, r#"(+"field":a -"field":"b c")"#);
|
||||||
|
test_parse_query_to_ast_helper(r#"field:(a AND "b c")"#, r#"(+"field":a +"field":"b c")"#);
|
||||||
|
test_parse_query_to_ast_helper(r#"field:(a OR "b c")"#, r#"(?"field":a ?"field":"b c")"#);
|
||||||
|
test_parse_query_to_ast_helper(
|
||||||
|
r#"field:(a OR (b AND c))"#,
|
||||||
|
r#"(?"field":a ?(+"field":b +"field":c))"#,
|
||||||
|
);
|
||||||
|
test_parse_query_to_ast_helper(
|
||||||
|
r#"field:(a [b TO c])"#,
|
||||||
|
r#"(*"field":a *"field":["b" TO "c"])"#,
|
||||||
|
);
|
||||||
|
|
||||||
test_is_parse_err(r#"field:(+a -"b c""#, r#"(+"field":a -"field":"b c")"#);
|
test_is_parse_err(r#"field:(+a -"b c""#, r#"(+"field":a -"field":"b c")"#);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -44,6 +44,26 @@ impl UserInputLeaf {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn set_default_field(&mut self, default_field: String) {
|
||||||
|
match self {
|
||||||
|
UserInputLeaf::Literal(ref mut literal) if literal.field_name.is_none() => {
|
||||||
|
literal.field_name = Some(default_field)
|
||||||
|
}
|
||||||
|
UserInputLeaf::All => {
|
||||||
|
*self = UserInputLeaf::Exists {
|
||||||
|
field: default_field,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
UserInputLeaf::Range { ref mut field, .. } if field.is_none() => {
|
||||||
|
*field = Some(default_field)
|
||||||
|
}
|
||||||
|
UserInputLeaf::Set { ref mut field, .. } if field.is_none() => {
|
||||||
|
*field = Some(default_field)
|
||||||
|
}
|
||||||
|
_ => (), // field was already set, do nothing
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for UserInputLeaf {
|
impl Debug for UserInputLeaf {
|
||||||
@@ -205,6 +225,16 @@ impl UserInputAst {
|
|||||||
pub fn or(asts: Vec<UserInputAst>) -> UserInputAst {
|
pub fn or(asts: Vec<UserInputAst>) -> UserInputAst {
|
||||||
UserInputAst::compose(Occur::Should, asts)
|
UserInputAst::compose(Occur::Should, asts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn set_default_field(&mut self, field: String) {
|
||||||
|
match self {
|
||||||
|
UserInputAst::Clause(clauses) => clauses
|
||||||
|
.iter_mut()
|
||||||
|
.for_each(|(_, ast)| ast.set_default_field(field.clone())),
|
||||||
|
UserInputAst::Leaf(leaf) => leaf.set_default_field(field),
|
||||||
|
UserInputAst::Boost(ref mut ast, _) => ast.set_default_field(field),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<UserInputLiteral> for UserInputLeaf {
|
impl From<UserInputLiteral> for UserInputLeaf {
|
||||||
|
|||||||
@@ -1,585 +0,0 @@
|
|||||||
#[cfg(all(test, feature = "unstable"))]
|
|
||||||
mod bench {
|
|
||||||
|
|
||||||
use rand::prelude::SliceRandom;
|
|
||||||
use rand::rngs::StdRng;
|
|
||||||
use rand::{Rng, SeedableRng};
|
|
||||||
use rand_distr::Distribution;
|
|
||||||
use serde_json::json;
|
|
||||||
use test::{self, Bencher};
|
|
||||||
|
|
||||||
use crate::aggregation::agg_req::Aggregations;
|
|
||||||
use crate::aggregation::AggregationCollector;
|
|
||||||
use crate::query::{AllQuery, TermQuery};
|
|
||||||
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
|
||||||
use crate::{Index, Term};
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
|
||||||
enum Cardinality {
|
|
||||||
/// All documents contain exactly one value.
|
|
||||||
/// `Full` is the default for auto-detecting the Cardinality, since it is the most strict.
|
|
||||||
#[default]
|
|
||||||
Full = 0,
|
|
||||||
/// All documents contain at most one value.
|
|
||||||
Optional = 1,
|
|
||||||
/// All documents may contain any number of values.
|
|
||||||
Multivalued = 2,
|
|
||||||
/// 1 / 20 documents has a value
|
|
||||||
Sparse = 3,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_collector(agg_req: Aggregations) -> AggregationCollector {
|
|
||||||
AggregationCollector::from_aggs(agg_req, Default::default())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_test_index_bench(cardinality: Cardinality) -> crate::Result<Index> {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let text_fieldtype = crate::schema::TextOptions::default()
|
|
||||||
.set_indexing_options(
|
|
||||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
|
||||||
)
|
|
||||||
.set_stored();
|
|
||||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
|
||||||
let json_field = schema_builder.add_json_field("json", FAST);
|
|
||||||
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
|
|
||||||
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
|
|
||||||
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
|
|
||||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
|
||||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
|
||||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
|
||||||
let index = Index::create_from_tempdir(schema_builder.build())?;
|
|
||||||
let few_terms_data = ["INFO", "ERROR", "WARN", "DEBUG"];
|
|
||||||
|
|
||||||
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
|
|
||||||
|
|
||||||
let many_terms_data = (0..150_000)
|
|
||||||
.map(|num| format!("author{}", num))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
{
|
|
||||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 200_000_000)?;
|
|
||||||
// To make the different test cases comparable we just change one doc to force the
|
|
||||||
// cardinality
|
|
||||||
if cardinality == Cardinality::Optional {
|
|
||||||
index_writer.add_document(doc!())?;
|
|
||||||
}
|
|
||||||
if cardinality == Cardinality::Multivalued {
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
json_field => json!({"mixed_type": 10.0}),
|
|
||||||
json_field => json!({"mixed_type": 10.0}),
|
|
||||||
text_field => "cool",
|
|
||||||
text_field => "cool",
|
|
||||||
text_field_many_terms => "cool",
|
|
||||||
text_field_many_terms => "cool",
|
|
||||||
text_field_few_terms => "cool",
|
|
||||||
text_field_few_terms => "cool",
|
|
||||||
score_field => 1u64,
|
|
||||||
score_field => 1u64,
|
|
||||||
score_field_f64 => lg_norm.sample(&mut rng),
|
|
||||||
score_field_f64 => lg_norm.sample(&mut rng),
|
|
||||||
score_field_i64 => 1i64,
|
|
||||||
score_field_i64 => 1i64,
|
|
||||||
))?;
|
|
||||||
}
|
|
||||||
let mut doc_with_value = 1_000_000;
|
|
||||||
if cardinality == Cardinality::Sparse {
|
|
||||||
doc_with_value /= 20;
|
|
||||||
}
|
|
||||||
let _val_max = 1_000_000.0;
|
|
||||||
for _ in 0..doc_with_value {
|
|
||||||
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
|
||||||
let json = if rng.gen_bool(0.1) {
|
|
||||||
// 10% are numeric values
|
|
||||||
json!({ "mixed_type": val })
|
|
||||||
} else {
|
|
||||||
json!({"mixed_type": many_terms_data.choose(&mut rng).unwrap().to_string()})
|
|
||||||
};
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
text_field => "cool",
|
|
||||||
json_field => json,
|
|
||||||
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
|
|
||||||
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
|
|
||||||
score_field => val as u64,
|
|
||||||
score_field_f64 => lg_norm.sample(&mut rng),
|
|
||||||
score_field_i64 => val as i64,
|
|
||||||
))?;
|
|
||||||
if cardinality == Cardinality::Sparse {
|
|
||||||
for _ in 0..20 {
|
|
||||||
index_writer.add_document(doc!(text_field => "cool"))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// writing the segment
|
|
||||||
index_writer.commit()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(index)
|
|
||||||
}
|
|
||||||
|
|
||||||
use paste::paste;
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! bench_all_cardinalities {
|
|
||||||
( $x:ident ) => {
|
|
||||||
paste! {
|
|
||||||
#[bench]
|
|
||||||
fn $x(b: &mut Bencher) {
|
|
||||||
[<$x _card>](b, Cardinality::Full)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn [<$x _opt>](b: &mut Bencher) {
|
|
||||||
[<$x _card>](b, Cardinality::Optional)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn [<$x _multi>](b: &mut Bencher) {
|
|
||||||
[<$x _card>](b, Cardinality::Multivalued)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn [<$x _sparse>](b: &mut Bencher) {
|
|
||||||
[<$x _card>](b, Cardinality::Sparse)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_average_u64);
|
|
||||||
|
|
||||||
fn bench_aggregation_average_u64_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let term_query = TermQuery::new(
|
|
||||||
Term::from_field_text(text_field, "cool"),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
|
||||||
"average": { "avg": { "field": "score", } }
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&term_query, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_stats_f64);
|
|
||||||
|
|
||||||
fn bench_aggregation_stats_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let term_query = TermQuery::new(
|
|
||||||
Term::from_field_text(text_field, "cool"),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
|
||||||
"average_f64": { "stats": { "field": "score_f64", } }
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&term_query, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_average_f64);
|
|
||||||
|
|
||||||
fn bench_aggregation_average_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let term_query = TermQuery::new(
|
|
||||||
Term::from_field_text(text_field, "cool"),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
|
||||||
"average_f64": { "avg": { "field": "score_f64", } }
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&term_query, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_percentiles_f64);
|
|
||||||
|
|
||||||
fn bench_aggregation_percentiles_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req_str = r#"
|
|
||||||
{
|
|
||||||
"mypercentiles": {
|
|
||||||
"percentiles": {
|
|
||||||
"field": "score_f64",
|
|
||||||
"percents": [ 95, 99, 99.9 ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} "#;
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_str(agg_req_str).unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_average_u64_and_f64);
|
|
||||||
|
|
||||||
fn bench_aggregation_average_u64_and_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let term_query = TermQuery::new(
|
|
||||||
Term::from_field_text(text_field, "cool"),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
|
||||||
"average_f64": { "avg": { "field": "score_f64" } },
|
|
||||||
"average": { "avg": { "field": "score" } },
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&term_query, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_terms_few);
|
|
||||||
|
|
||||||
fn bench_aggregation_terms_few_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
|
||||||
"my_texts": { "terms": { "field": "text_few_terms" } },
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_terms_many_with_top_hits_agg);
|
|
||||||
|
|
||||||
fn bench_aggregation_terms_many_with_top_hits_agg_card(
|
|
||||||
b: &mut Bencher,
|
|
||||||
cardinality: Cardinality,
|
|
||||||
) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
|
||||||
"my_texts": {
|
|
||||||
"terms": { "field": "text_many_terms" },
|
|
||||||
"aggs": {
|
|
||||||
"top_hits": { "top_hits":
|
|
||||||
{
|
|
||||||
"sort": [
|
|
||||||
{ "score": "desc" }
|
|
||||||
],
|
|
||||||
"size": 2,
|
|
||||||
"doc_value_fields": ["score_f64"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_terms_many_with_sub_agg);
|
|
||||||
|
|
||||||
fn bench_aggregation_terms_many_with_sub_agg_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
|
||||||
"my_texts": {
|
|
||||||
"terms": { "field": "text_many_terms" },
|
|
||||||
"aggs": {
|
|
||||||
"average_f64": { "avg": { "field": "score_f64" } }
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_terms_many_json_mixed_type_with_sub_agg);
|
|
||||||
|
|
||||||
fn bench_aggregation_terms_many_json_mixed_type_with_sub_agg_card(
|
|
||||||
b: &mut Bencher,
|
|
||||||
cardinality: Cardinality,
|
|
||||||
) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
|
||||||
"my_texts": {
|
|
||||||
"terms": { "field": "json.mixed_type" },
|
|
||||||
"aggs": {
|
|
||||||
"average_f64": { "avg": { "field": "score_f64" } }
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_terms_many2);
|
|
||||||
|
|
||||||
fn bench_aggregation_terms_many2_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
|
||||||
"my_texts": { "terms": { "field": "text_many_terms" } },
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_terms_many_order_by_term);
|
|
||||||
|
|
||||||
fn bench_aggregation_terms_many_order_by_term_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
|
||||||
"my_texts": { "terms": { "field": "text_many_terms", "order": { "_key": "desc" } } },
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_range_only);
|
|
||||||
|
|
||||||
fn bench_aggregation_range_only_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
|
||||||
"range_f64": { "range": { "field": "score_f64", "ranges": [
|
|
||||||
{ "from": 3, "to": 7000 },
|
|
||||||
{ "from": 7000, "to": 20000 },
|
|
||||||
{ "from": 20000, "to": 30000 },
|
|
||||||
{ "from": 30000, "to": 40000 },
|
|
||||||
{ "from": 40000, "to": 50000 },
|
|
||||||
{ "from": 50000, "to": 60000 }
|
|
||||||
] } },
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_range_with_avg);
|
|
||||||
|
|
||||||
fn bench_aggregation_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
|
||||||
"rangef64": {
|
|
||||||
"range": {
|
|
||||||
"field": "score_f64",
|
|
||||||
"ranges": [
|
|
||||||
{ "from": 3, "to": 7000 },
|
|
||||||
{ "from": 7000, "to": 20000 },
|
|
||||||
{ "from": 20000, "to": 30000 },
|
|
||||||
{ "from": 30000, "to": 40000 },
|
|
||||||
{ "from": 40000, "to": 50000 },
|
|
||||||
{ "from": 50000, "to": 60000 }
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"aggs": {
|
|
||||||
"average_f64": { "avg": { "field": "score_f64" } }
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// hard bounds has a different algorithm, because it actually limits collection range
|
|
||||||
//
|
|
||||||
bench_all_cardinalities!(bench_aggregation_histogram_only_hard_bounds);
|
|
||||||
|
|
||||||
fn bench_aggregation_histogram_only_hard_bounds_card(
|
|
||||||
b: &mut Bencher,
|
|
||||||
cardinality: Cardinality,
|
|
||||||
) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
|
||||||
"rangef64": { "histogram": { "field": "score_f64", "interval": 100, "hard_bounds": { "min": 1000, "max": 300000 } } },
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_histogram_with_avg);
|
|
||||||
|
|
||||||
fn bench_aggregation_histogram_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
|
||||||
"rangef64": {
|
|
||||||
"histogram": { "field": "score_f64", "interval": 100 },
|
|
||||||
"aggs": {
|
|
||||||
"average_f64": { "avg": { "field": "score_f64" } }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_histogram_only);
|
|
||||||
|
|
||||||
fn bench_aggregation_histogram_only_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
|
||||||
"rangef64": {
|
|
||||||
"histogram": {
|
|
||||||
"field": "score_f64",
|
|
||||||
"interval": 100 // 1000 buckets
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&AllQuery, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bench_all_cardinalities!(bench_aggregation_avg_and_range_with_avg);
|
|
||||||
|
|
||||||
fn bench_aggregation_avg_and_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
|
|
||||||
let index = get_test_index_bench(cardinality).unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
let term_query = TermQuery::new(
|
|
||||||
Term::from_field_text(text_field, "cool"),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
|
|
||||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
|
||||||
"rangef64": {
|
|
||||||
"range": {
|
|
||||||
"field": "score_f64",
|
|
||||||
"ranges": [
|
|
||||||
{ "from": 3, "to": 7000 },
|
|
||||||
{ "from": 7000, "to": 20000 },
|
|
||||||
{ "from": 20000, "to": 60000 }
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"aggs": {
|
|
||||||
"average_in_range": { "avg": { "field": "score" } }
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"average": { "avg": { "field": "score" } }
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let collector = get_collector(agg_req_1);
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
searcher.search(&term_query, &collector).unwrap()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -81,10 +81,11 @@ impl AggregationLimits {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn add_memory_consumed(&self, num_bytes: u64) -> crate::Result<()> {
|
pub(crate) fn add_memory_consumed(&self, add_num_bytes: u64) -> crate::Result<()> {
|
||||||
self.memory_consumption
|
let prev_value = self
|
||||||
.fetch_add(num_bytes, Ordering::Relaxed);
|
.memory_consumption
|
||||||
validate_memory_consumption(&self.memory_consumption, self.memory_limit)?;
|
.fetch_add(add_num_bytes, Ordering::Relaxed);
|
||||||
|
validate_memory_consumption(prev_value + add_num_bytes, self.memory_limit)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,11 +95,11 @@ impl AggregationLimits {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn validate_memory_consumption(
|
fn validate_memory_consumption(
|
||||||
memory_consumption: &AtomicU64,
|
memory_consumption: u64,
|
||||||
memory_limit: ByteCount,
|
memory_limit: ByteCount,
|
||||||
) -> Result<(), AggregationError> {
|
) -> Result<(), AggregationError> {
|
||||||
// Load the estimated memory consumed by the aggregations
|
// Load the estimated memory consumed by the aggregations
|
||||||
let memory_consumed: ByteCount = memory_consumption.load(Ordering::Relaxed).into();
|
let memory_consumed: ByteCount = memory_consumption.into();
|
||||||
if memory_consumed > memory_limit {
|
if memory_consumed > memory_limit {
|
||||||
return Err(AggregationError::MemoryExceeded {
|
return Err(AggregationError::MemoryExceeded {
|
||||||
limit: memory_limit,
|
limit: memory_limit,
|
||||||
@@ -118,10 +119,11 @@ pub struct ResourceLimitGuard {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ResourceLimitGuard {
|
impl ResourceLimitGuard {
|
||||||
pub(crate) fn add_memory_consumed(&self, num_bytes: u64) -> crate::Result<()> {
|
pub(crate) fn add_memory_consumed(&self, add_num_bytes: u64) -> crate::Result<()> {
|
||||||
self.memory_consumption
|
let prev_value = self
|
||||||
.fetch_add(num_bytes, Ordering::Relaxed);
|
.memory_consumption
|
||||||
validate_memory_consumption(&self.memory_consumption, self.memory_limit)?;
|
.fetch_add(add_num_bytes, Ordering::Relaxed);
|
||||||
|
validate_memory_consumption(prev_value + add_num_bytes, self.memory_limit)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,8 @@ use super::metric::{
|
|||||||
use super::segment_agg_result::AggregationLimits;
|
use super::segment_agg_result::AggregationLimits;
|
||||||
use super::VecWithNames;
|
use super::VecWithNames;
|
||||||
use crate::aggregation::{f64_to_fastfield_u64, Key};
|
use crate::aggregation::{f64_to_fastfield_u64, Key};
|
||||||
use crate::{SegmentOrdinal, SegmentReader};
|
use crate::index::SegmentReader;
|
||||||
|
use crate::SegmentOrdinal;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub(crate) struct AggregationsWithAccessor {
|
pub(crate) struct AggregationsWithAccessor {
|
||||||
@@ -292,7 +293,7 @@ impl AggregationWithAccessor {
|
|||||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||||
}
|
}
|
||||||
TopHits(ref mut top_hits) => {
|
TopHits(ref mut top_hits) => {
|
||||||
top_hits.validate_and_resolve(reader.fast_fields().columnar())?;
|
top_hits.validate_and_resolve_field_names(reader.fast_fields().columnar())?;
|
||||||
let accessors: Vec<(Column<u64>, ColumnType)> = top_hits
|
let accessors: Vec<(Column<u64>, ColumnType)> = top_hits
|
||||||
.field_names()
|
.field_names()
|
||||||
.iter()
|
.iter()
|
||||||
@@ -334,8 +335,8 @@ fn get_missing_val(
|
|||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||||
"Missing value {:?} for field {} is not supported for column type {:?}",
|
"Missing value {missing:?} for field {field_name} is not supported for column \
|
||||||
missing, field_name, column_type
|
type {column_type:?}"
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -402,7 +403,7 @@ fn get_dynamic_columns(
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|h| h.open())
|
.map(|h| h.open())
|
||||||
.collect::<io::Result<_>>()?;
|
.collect::<io::Result<_>>()?;
|
||||||
assert!(!ff_fields.is_empty(), "field {} not found", field_name);
|
assert!(!ff_fields.is_empty(), "field {field_name} not found");
|
||||||
Ok(cols)
|
Ok(cols)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ use crate::aggregation::agg_req::{Aggregation, Aggregations};
|
|||||||
use crate::aggregation::agg_result::AggregationResults;
|
use crate::aggregation::agg_result::AggregationResults;
|
||||||
use crate::aggregation::buf_collector::DOC_BLOCK_SIZE;
|
use crate::aggregation::buf_collector::DOC_BLOCK_SIZE;
|
||||||
use crate::aggregation::collector::AggregationCollector;
|
use crate::aggregation::collector::AggregationCollector;
|
||||||
|
use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults;
|
||||||
use crate::aggregation::segment_agg_result::AggregationLimits;
|
use crate::aggregation::segment_agg_result::AggregationLimits;
|
||||||
use crate::aggregation::tests::{get_test_index_2_segments, get_test_index_from_values_and_terms};
|
use crate::aggregation::tests::{get_test_index_2_segments, get_test_index_from_values_and_terms};
|
||||||
use crate::aggregation::DistributedAggregationCollector;
|
use crate::aggregation::DistributedAggregationCollector;
|
||||||
@@ -66,6 +67,22 @@ fn test_aggregation_flushing(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"top_hits_test":{
|
||||||
|
"terms": {
|
||||||
|
"field": "string_id"
|
||||||
|
},
|
||||||
|
"aggs": {
|
||||||
|
"bucketsL2": {
|
||||||
|
"top_hits": {
|
||||||
|
"size": 2,
|
||||||
|
"sort": [
|
||||||
|
{ "score": "asc" }
|
||||||
|
],
|
||||||
|
"docvalue_fields": ["score"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"histogram_test":{
|
"histogram_test":{
|
||||||
"histogram": {
|
"histogram": {
|
||||||
"field": "score",
|
"field": "score",
|
||||||
@@ -108,6 +125,16 @@ fn test_aggregation_flushing(
|
|||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
||||||
|
|
||||||
|
// Test postcard roundtrip serialization
|
||||||
|
let intermediate_agg_result_bytes = postcard::to_allocvec(&intermediate_agg_result).expect(
|
||||||
|
"Postcard Serialization failed, flatten etc. is not supported in the intermediate \
|
||||||
|
result",
|
||||||
|
);
|
||||||
|
let intermediate_agg_result: IntermediateAggregationResults =
|
||||||
|
postcard::from_bytes(&intermediate_agg_result_bytes)
|
||||||
|
.expect("Post deserialization failed");
|
||||||
|
|
||||||
intermediate_agg_result
|
intermediate_agg_result
|
||||||
.into_final_result(agg_req, &Default::default())
|
.into_final_result(agg_req, &Default::default())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
use columnar::ColumnType;
|
|
||||||
use itertools::Itertools;
|
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tantivy_bitpacker::minmax;
|
use tantivy_bitpacker::minmax;
|
||||||
@@ -17,7 +15,7 @@ use crate::aggregation::intermediate_agg_result::{
|
|||||||
IntermediateHistogramBucketEntry,
|
IntermediateHistogramBucketEntry,
|
||||||
};
|
};
|
||||||
use crate::aggregation::segment_agg_result::{
|
use crate::aggregation::segment_agg_result::{
|
||||||
build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector,
|
build_segment_agg_collector, SegmentAggregationCollector,
|
||||||
};
|
};
|
||||||
use crate::aggregation::*;
|
use crate::aggregation::*;
|
||||||
use crate::TantivyError;
|
use crate::TantivyError;
|
||||||
@@ -333,9 +331,11 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mem_delta = self.get_memory_consumption() - mem_pre;
|
let mem_delta = self.get_memory_consumption() - mem_pre;
|
||||||
bucket_agg_accessor
|
if mem_delta > 0 {
|
||||||
.limits
|
bucket_agg_accessor
|
||||||
.add_memory_consumed(mem_delta as u64)?;
|
.limits
|
||||||
|
.add_memory_consumed(mem_delta as u64)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ mod term_agg;
|
|||||||
mod term_missing_agg;
|
mod term_missing_agg;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
pub use histogram::*;
|
pub use histogram::*;
|
||||||
pub use range::*;
|
pub use range::*;
|
||||||
@@ -72,12 +73,12 @@ impl From<&str> for OrderTarget {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ToString for OrderTarget {
|
impl fmt::Display for OrderTarget {
|
||||||
fn to_string(&self) -> String {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
OrderTarget::Key => "_key".to_string(),
|
OrderTarget::Key => f.write_str("_key"),
|
||||||
OrderTarget::Count => "_count".to_string(),
|
OrderTarget::Count => f.write_str("_count"),
|
||||||
OrderTarget::SubAggregation(agg) => agg.to_string(),
|
OrderTarget::SubAggregation(agg) => agg.fmt(f),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
use columnar::{ColumnType, MonotonicallyMappableToU64};
|
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
@@ -450,7 +449,6 @@ pub(crate) fn range_to_key(range: &Range<u64>, field_type: &ColumnType) -> crate
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use columnar::MonotonicallyMappableToU64;
|
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -459,7 +457,6 @@ mod tests {
|
|||||||
exec_request, exec_request_with_query, get_test_index_2_segments,
|
exec_request, exec_request_with_query, get_test_index_2_segments,
|
||||||
get_test_index_with_num_docs,
|
get_test_index_with_num_docs,
|
||||||
};
|
};
|
||||||
use crate::aggregation::AggregationLimits;
|
|
||||||
|
|
||||||
pub fn get_collector_from_ranges(
|
pub fn get_collector_from_ranges(
|
||||||
ranges: Vec<RangeAggregationRange>,
|
ranges: Vec<RangeAggregationRange>,
|
||||||
|
|||||||
@@ -324,9 +324,11 @@ impl SegmentAggregationCollector for SegmentTermCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mem_delta = self.get_memory_consumption() - mem_pre;
|
let mem_delta = self.get_memory_consumption() - mem_pre;
|
||||||
bucket_agg_accessor
|
if mem_delta > 0 {
|
||||||
.limits
|
bucket_agg_accessor
|
||||||
.add_memory_consumed(mem_delta as u64)?;
|
.limits
|
||||||
|
.add_memory_consumed(mem_delta as u64)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -355,8 +357,7 @@ impl SegmentTermCollector {
|
|||||||
) -> crate::Result<Self> {
|
) -> crate::Result<Self> {
|
||||||
if field_type == ColumnType::Bytes {
|
if field_type == ColumnType::Bytes {
|
||||||
return Err(TantivyError::InvalidArgument(format!(
|
return Err(TantivyError::InvalidArgument(format!(
|
||||||
"terms aggregation is not supported for column type {:?}",
|
"terms aggregation is not supported for column type {field_type:?}"
|
||||||
field_type
|
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
let term_buckets = TermBuckets::default();
|
let term_buckets = TermBuckets::default();
|
||||||
|
|||||||
@@ -8,7 +8,8 @@ use super::segment_agg_result::{
|
|||||||
};
|
};
|
||||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_segment_accessor_and_validate;
|
use crate::aggregation::agg_req_with_accessor::get_aggs_with_segment_accessor_and_validate;
|
||||||
use crate::collector::{Collector, SegmentCollector};
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
use crate::{DocId, SegmentOrdinal, SegmentReader, TantivyError};
|
use crate::index::SegmentReader;
|
||||||
|
use crate::{DocId, SegmentOrdinal, TantivyError};
|
||||||
|
|
||||||
/// The default max bucket count, before the aggregation fails.
|
/// The default max bucket count, before the aggregation fails.
|
||||||
pub const DEFAULT_BUCKET_LIMIT: u32 = 65000;
|
pub const DEFAULT_BUCKET_LIMIT: u32 = 65000;
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ use super::bucket::{
|
|||||||
};
|
};
|
||||||
use super::metric::{
|
use super::metric::{
|
||||||
IntermediateAverage, IntermediateCount, IntermediateMax, IntermediateMin, IntermediateStats,
|
IntermediateAverage, IntermediateCount, IntermediateMax, IntermediateMin, IntermediateStats,
|
||||||
IntermediateSum, PercentilesCollector, TopHitsCollector,
|
IntermediateSum, PercentilesCollector, TopHitsTopNComputer,
|
||||||
};
|
};
|
||||||
use super::segment_agg_result::AggregationLimits;
|
use super::segment_agg_result::AggregationLimits;
|
||||||
use super::{format_date, AggregationError, Key, SerializedKey};
|
use super::{format_date, AggregationError, Key, SerializedKey};
|
||||||
@@ -221,9 +221,9 @@ pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult
|
|||||||
Percentiles(_) => IntermediateAggregationResult::Metric(
|
Percentiles(_) => IntermediateAggregationResult::Metric(
|
||||||
IntermediateMetricResult::Percentiles(PercentilesCollector::default()),
|
IntermediateMetricResult::Percentiles(PercentilesCollector::default()),
|
||||||
),
|
),
|
||||||
TopHits(_) => IntermediateAggregationResult::Metric(IntermediateMetricResult::TopHits(
|
TopHits(ref req) => IntermediateAggregationResult::Metric(
|
||||||
TopHitsCollector::default(),
|
IntermediateMetricResult::TopHits(TopHitsTopNComputer::new(req.clone())),
|
||||||
)),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -285,7 +285,7 @@ pub enum IntermediateMetricResult {
|
|||||||
/// Intermediate sum result.
|
/// Intermediate sum result.
|
||||||
Sum(IntermediateSum),
|
Sum(IntermediateSum),
|
||||||
/// Intermediate top_hits result
|
/// Intermediate top_hits result
|
||||||
TopHits(TopHitsCollector),
|
TopHits(TopHitsTopNComputer),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntermediateMetricResult {
|
impl IntermediateMetricResult {
|
||||||
@@ -314,7 +314,7 @@ impl IntermediateMetricResult {
|
|||||||
.into_final_result(req.agg.as_percentile().expect("unexpected metric type")),
|
.into_final_result(req.agg.as_percentile().expect("unexpected metric type")),
|
||||||
),
|
),
|
||||||
IntermediateMetricResult::TopHits(top_hits) => {
|
IntermediateMetricResult::TopHits(top_hits) => {
|
||||||
MetricResult::TopHits(top_hits.finalize())
|
MetricResult::TopHits(top_hits.into_final_result())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ mod stats;
|
|||||||
mod sum;
|
mod sum;
|
||||||
mod top_hits;
|
mod top_hits;
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
pub use average::*;
|
pub use average::*;
|
||||||
pub use count::*;
|
pub use count::*;
|
||||||
pub use max::*;
|
pub use max::*;
|
||||||
@@ -36,6 +38,8 @@ pub use stats::*;
|
|||||||
pub use sum::*;
|
pub use sum::*;
|
||||||
pub use top_hits::*;
|
pub use top_hits::*;
|
||||||
|
|
||||||
|
use crate::schema::OwnedValue;
|
||||||
|
|
||||||
/// Single-metric aggregations use this common result structure.
|
/// Single-metric aggregations use this common result structure.
|
||||||
///
|
///
|
||||||
/// Main reason to wrap it in value is to match elasticsearch output structure.
|
/// Main reason to wrap it in value is to match elasticsearch output structure.
|
||||||
@@ -92,8 +96,9 @@ pub struct TopHitsVecEntry {
|
|||||||
|
|
||||||
/// Search results, for queries that include field retrieval requests
|
/// Search results, for queries that include field retrieval requests
|
||||||
/// (`docvalue_fields`).
|
/// (`docvalue_fields`).
|
||||||
#[serde(flatten)]
|
#[serde(rename = "docvalue_fields")]
|
||||||
pub search_results: FieldRetrivalResult,
|
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||||
|
pub doc_value_fields: HashMap<String, OwnedValue>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The top_hits metric aggregation results a list of top hits by sort criteria.
|
/// The top_hits metric aggregation results a list of top hits by sort criteria.
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
use columnar::ColumnType;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use columnar::ColumnType;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt::Formatter;
|
use std::net::Ipv6Addr;
|
||||||
|
|
||||||
use columnar::{ColumnarReader, DynamicColumn};
|
use columnar::{ColumnarReader, DynamicColumn};
|
||||||
|
use common::json_path_writer::JSON_PATH_SEGMENT_SEP_STR;
|
||||||
|
use common::DateTime;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use serde::ser::SerializeMap;
|
use serde::ser::SerializeMap;
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
@@ -12,8 +14,8 @@ use crate::aggregation::intermediate_agg_result::{
|
|||||||
IntermediateAggregationResult, IntermediateMetricResult,
|
IntermediateAggregationResult, IntermediateMetricResult,
|
||||||
};
|
};
|
||||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||||
|
use crate::aggregation::AggregationError;
|
||||||
use crate::collector::TopNComputer;
|
use crate::collector::TopNComputer;
|
||||||
use crate::schema::term::JSON_PATH_SEGMENT_SEP_STR;
|
|
||||||
use crate::schema::OwnedValue;
|
use crate::schema::OwnedValue;
|
||||||
use crate::{DocAddress, DocId, SegmentOrdinal};
|
use crate::{DocAddress, DocId, SegmentOrdinal};
|
||||||
|
|
||||||
@@ -92,53 +94,101 @@ pub struct TopHitsAggregation {
|
|||||||
size: usize,
|
size: usize,
|
||||||
from: Option<usize>,
|
from: Option<usize>,
|
||||||
|
|
||||||
#[serde(flatten)]
|
|
||||||
retrieval: RetrievalFields,
|
|
||||||
}
|
|
||||||
|
|
||||||
const fn default_doc_value_fields() -> Vec<String> {
|
|
||||||
Vec::new()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Search query spec for each matched document
|
|
||||||
/// TODO: move this to a common module
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
|
|
||||||
pub struct RetrievalFields {
|
|
||||||
/// The fast fields to return for each hit.
|
|
||||||
/// This is the only variant supported for now.
|
|
||||||
/// TODO: support the {field, format} variant for custom formatting.
|
|
||||||
#[serde(rename = "docvalue_fields")]
|
#[serde(rename = "docvalue_fields")]
|
||||||
#[serde(default = "default_doc_value_fields")]
|
#[serde(default)]
|
||||||
pub doc_value_fields: Vec<String>,
|
doc_value_fields: Vec<String>,
|
||||||
|
|
||||||
|
// Not supported
|
||||||
|
_source: Option<serde_json::Value>,
|
||||||
|
fields: Option<serde_json::Value>,
|
||||||
|
script_fields: Option<serde_json::Value>,
|
||||||
|
highlight: Option<serde_json::Value>,
|
||||||
|
explain: Option<serde_json::Value>,
|
||||||
|
version: Option<serde_json::Value>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Search query result for each matched document
|
#[derive(Debug, Clone, PartialEq, Default)]
|
||||||
/// TODO: move this to a common module
|
struct KeyOrder {
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
|
field: String,
|
||||||
pub struct FieldRetrivalResult {
|
order: Order,
|
||||||
/// The fast fields returned for each hit.
|
|
||||||
#[serde(rename = "docvalue_fields")]
|
|
||||||
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
|
||||||
pub doc_value_fields: HashMap<String, OwnedValue>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RetrievalFields {
|
impl Serialize for KeyOrder {
|
||||||
fn get_field_names(&self) -> Vec<&str> {
|
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
||||||
self.doc_value_fields.iter().map(|s| s.as_str()).collect()
|
let KeyOrder { field, order } = self;
|
||||||
|
let mut map = serializer.serialize_map(Some(1))?;
|
||||||
|
map.serialize_entry(field, order)?;
|
||||||
|
map.end()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for KeyOrder {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where D: Deserializer<'de> {
|
||||||
|
let mut key_order = <HashMap<String, Order>>::deserialize(deserializer)?.into_iter();
|
||||||
|
let (field, order) = key_order.next().ok_or(serde::de::Error::custom(
|
||||||
|
"Expected exactly one key-value pair in sort parameter of top_hits, found none",
|
||||||
|
))?;
|
||||||
|
if key_order.next().is_some() {
|
||||||
|
return Err(serde::de::Error::custom(format!(
|
||||||
|
"Expected exactly one key-value pair in sort parameter of top_hits, found \
|
||||||
|
{key_order:?}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
Ok(Self { field, order })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tranform a glob (`pattern*`, for example) into a regex::Regex (`^pattern.*$`)
|
||||||
|
fn globbed_string_to_regex(glob: &str) -> Result<Regex, crate::TantivyError> {
|
||||||
|
// Replace `*` glob with `.*` regex
|
||||||
|
let sanitized = format!("^{}$", regex::escape(glob).replace(r"\*", ".*"));
|
||||||
|
Regex::new(&sanitized.replace('*', ".*")).map_err(|e| {
|
||||||
|
crate::TantivyError::SchemaError(format!("Invalid regex '{glob}' in docvalue_fields: {e}"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn use_doc_value_fields_err(parameter: &str) -> crate::Result<()> {
|
||||||
|
Err(crate::TantivyError::AggregationError(
|
||||||
|
AggregationError::InvalidRequest(format!(
|
||||||
|
"The `{parameter}` parameter is not supported, only `docvalue_fields` is supported in \
|
||||||
|
`top_hits` aggregation"
|
||||||
|
)),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
fn unsupported_err(parameter: &str) -> crate::Result<()> {
|
||||||
|
Err(crate::TantivyError::AggregationError(
|
||||||
|
AggregationError::InvalidRequest(format!(
|
||||||
|
"The `{parameter}` parameter is not supported in the `top_hits` aggregation"
|
||||||
|
)),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TopHitsAggregation {
|
||||||
|
/// Validate and resolve field retrieval parameters
|
||||||
|
pub fn validate_and_resolve_field_names(
|
||||||
|
&mut self,
|
||||||
|
reader: &ColumnarReader,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
if self._source.is_some() {
|
||||||
|
use_doc_value_fields_err("_source")?;
|
||||||
|
}
|
||||||
|
if self.fields.is_some() {
|
||||||
|
use_doc_value_fields_err("fields")?;
|
||||||
|
}
|
||||||
|
if self.script_fields.is_some() {
|
||||||
|
use_doc_value_fields_err("script_fields")?;
|
||||||
|
}
|
||||||
|
if self.explain.is_some() {
|
||||||
|
unsupported_err("explain")?;
|
||||||
|
}
|
||||||
|
if self.highlight.is_some() {
|
||||||
|
unsupported_err("highlight")?;
|
||||||
|
}
|
||||||
|
if self.version.is_some() {
|
||||||
|
unsupported_err("version")?;
|
||||||
|
}
|
||||||
|
|
||||||
fn resolve_field_names(&mut self, reader: &ColumnarReader) -> crate::Result<()> {
|
|
||||||
// Tranform a glob (`pattern*`, for example) into a regex::Regex (`^pattern.*$`)
|
|
||||||
let globbed_string_to_regex = |glob: &str| {
|
|
||||||
// Replace `*` glob with `.*` regex
|
|
||||||
let sanitized = format!("^{}$", regex::escape(glob).replace(r"\*", ".*"));
|
|
||||||
Regex::new(&sanitized.replace('*', ".*")).map_err(|e| {
|
|
||||||
crate::TantivyError::SchemaError(format!(
|
|
||||||
"Invalid regex '{}' in docvalue_fields: {}",
|
|
||||||
glob, e
|
|
||||||
))
|
|
||||||
})
|
|
||||||
};
|
|
||||||
self.doc_value_fields = self
|
self.doc_value_fields = self
|
||||||
.doc_value_fields
|
.doc_value_fields
|
||||||
.iter()
|
.iter()
|
||||||
@@ -162,8 +212,7 @@ impl RetrievalFields {
|
|||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
assert!(
|
assert!(
|
||||||
!fields.is_empty(),
|
!fields.is_empty(),
|
||||||
"No fields matched the glob '{}' in docvalue_fields",
|
"No fields matched the glob '{field}' in docvalue_fields"
|
||||||
field
|
|
||||||
);
|
);
|
||||||
Ok(fields)
|
Ok(fields)
|
||||||
})
|
})
|
||||||
@@ -175,121 +224,6 @@ impl RetrievalFields {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_document_field_data(
|
|
||||||
&self,
|
|
||||||
accessors: &HashMap<String, Vec<DynamicColumn>>,
|
|
||||||
doc_id: DocId,
|
|
||||||
) -> FieldRetrivalResult {
|
|
||||||
let dvf = self
|
|
||||||
.doc_value_fields
|
|
||||||
.iter()
|
|
||||||
.map(|field| {
|
|
||||||
let accessors = accessors
|
|
||||||
.get(field)
|
|
||||||
.unwrap_or_else(|| panic!("field '{}' not found in accessors", field));
|
|
||||||
|
|
||||||
let values: Vec<OwnedValue> = accessors
|
|
||||||
.iter()
|
|
||||||
.flat_map(|accessor| match accessor {
|
|
||||||
DynamicColumn::U64(accessor) => accessor
|
|
||||||
.values_for_doc(doc_id)
|
|
||||||
.map(OwnedValue::U64)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
DynamicColumn::I64(accessor) => accessor
|
|
||||||
.values_for_doc(doc_id)
|
|
||||||
.map(OwnedValue::I64)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
DynamicColumn::F64(accessor) => accessor
|
|
||||||
.values_for_doc(doc_id)
|
|
||||||
.map(OwnedValue::F64)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
DynamicColumn::Bytes(accessor) => accessor
|
|
||||||
.term_ords(doc_id)
|
|
||||||
.map(|term_ord| {
|
|
||||||
let mut buffer = vec![];
|
|
||||||
assert!(
|
|
||||||
accessor
|
|
||||||
.ord_to_bytes(term_ord, &mut buffer)
|
|
||||||
.expect("could not read term dictionary"),
|
|
||||||
"term corresponding to term_ord does not exist"
|
|
||||||
);
|
|
||||||
OwnedValue::Bytes(buffer)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
DynamicColumn::Str(accessor) => accessor
|
|
||||||
.term_ords(doc_id)
|
|
||||||
.map(|term_ord| {
|
|
||||||
let mut buffer = vec![];
|
|
||||||
assert!(
|
|
||||||
accessor
|
|
||||||
.ord_to_bytes(term_ord, &mut buffer)
|
|
||||||
.expect("could not read term dictionary"),
|
|
||||||
"term corresponding to term_ord does not exist"
|
|
||||||
);
|
|
||||||
OwnedValue::Str(String::from_utf8(buffer).unwrap())
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
DynamicColumn::Bool(accessor) => accessor
|
|
||||||
.values_for_doc(doc_id)
|
|
||||||
.map(OwnedValue::Bool)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
DynamicColumn::IpAddr(accessor) => accessor
|
|
||||||
.values_for_doc(doc_id)
|
|
||||||
.map(OwnedValue::IpAddr)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
DynamicColumn::DateTime(accessor) => accessor
|
|
||||||
.values_for_doc(doc_id)
|
|
||||||
.map(OwnedValue::Date)
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
(field.to_owned(), OwnedValue::Array(values))
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
FieldRetrivalResult {
|
|
||||||
doc_value_fields: dvf,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Default)]
|
|
||||||
struct KeyOrder {
|
|
||||||
field: String,
|
|
||||||
order: Order,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for KeyOrder {
|
|
||||||
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
|
||||||
let KeyOrder { field, order } = self;
|
|
||||||
let mut map = serializer.serialize_map(Some(1))?;
|
|
||||||
map.serialize_entry(field, order)?;
|
|
||||||
map.end()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for KeyOrder {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where D: Deserializer<'de> {
|
|
||||||
let mut k_o = <HashMap<String, Order>>::deserialize(deserializer)?.into_iter();
|
|
||||||
let (k, v) = k_o.next().ok_or(serde::de::Error::custom(
|
|
||||||
"Expected exactly one key-value pair in KeyOrder, found none",
|
|
||||||
))?;
|
|
||||||
if k_o.next().is_some() {
|
|
||||||
return Err(serde::de::Error::custom(
|
|
||||||
"Expected exactly one key-value pair in KeyOrder, found more",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(Self { field: k, order: v })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TopHitsAggregation {
|
|
||||||
/// Validate and resolve field retrieval parameters
|
|
||||||
pub fn validate_and_resolve(&mut self, reader: &ColumnarReader) -> crate::Result<()> {
|
|
||||||
self.retrieval.resolve_field_names(reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return fields accessed by the aggregator, in order.
|
/// Return fields accessed by the aggregator, in order.
|
||||||
pub fn field_names(&self) -> Vec<&str> {
|
pub fn field_names(&self) -> Vec<&str> {
|
||||||
self.sort
|
self.sort
|
||||||
@@ -300,20 +234,136 @@ impl TopHitsAggregation {
|
|||||||
|
|
||||||
/// Return fields accessed by the aggregator's value retrieval.
|
/// Return fields accessed by the aggregator's value retrieval.
|
||||||
pub fn value_field_names(&self) -> Vec<&str> {
|
pub fn value_field_names(&self) -> Vec<&str> {
|
||||||
self.retrieval.get_field_names()
|
self.doc_value_fields.iter().map(|s| s.as_str()).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_document_field_data(
|
||||||
|
&self,
|
||||||
|
accessors: &HashMap<String, Vec<DynamicColumn>>,
|
||||||
|
doc_id: DocId,
|
||||||
|
) -> HashMap<String, FastFieldValue> {
|
||||||
|
let doc_value_fields = self
|
||||||
|
.doc_value_fields
|
||||||
|
.iter()
|
||||||
|
.map(|field| {
|
||||||
|
let accessors = accessors
|
||||||
|
.get(field)
|
||||||
|
.unwrap_or_else(|| panic!("field '{field}' not found in accessors"));
|
||||||
|
|
||||||
|
let values: Vec<FastFieldValue> = accessors
|
||||||
|
.iter()
|
||||||
|
.flat_map(|accessor| match accessor {
|
||||||
|
DynamicColumn::U64(accessor) => accessor
|
||||||
|
.values_for_doc(doc_id)
|
||||||
|
.map(FastFieldValue::U64)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
DynamicColumn::I64(accessor) => accessor
|
||||||
|
.values_for_doc(doc_id)
|
||||||
|
.map(FastFieldValue::I64)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
DynamicColumn::F64(accessor) => accessor
|
||||||
|
.values_for_doc(doc_id)
|
||||||
|
.map(FastFieldValue::F64)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
DynamicColumn::Bytes(accessor) => accessor
|
||||||
|
.term_ords(doc_id)
|
||||||
|
.map(|term_ord| {
|
||||||
|
let mut buffer = vec![];
|
||||||
|
assert!(
|
||||||
|
accessor
|
||||||
|
.ord_to_bytes(term_ord, &mut buffer)
|
||||||
|
.expect("could not read term dictionary"),
|
||||||
|
"term corresponding to term_ord does not exist"
|
||||||
|
);
|
||||||
|
FastFieldValue::Bytes(buffer)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
DynamicColumn::Str(accessor) => accessor
|
||||||
|
.term_ords(doc_id)
|
||||||
|
.map(|term_ord| {
|
||||||
|
let mut buffer = vec![];
|
||||||
|
assert!(
|
||||||
|
accessor
|
||||||
|
.ord_to_bytes(term_ord, &mut buffer)
|
||||||
|
.expect("could not read term dictionary"),
|
||||||
|
"term corresponding to term_ord does not exist"
|
||||||
|
);
|
||||||
|
FastFieldValue::Str(String::from_utf8(buffer).unwrap())
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
DynamicColumn::Bool(accessor) => accessor
|
||||||
|
.values_for_doc(doc_id)
|
||||||
|
.map(FastFieldValue::Bool)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
DynamicColumn::IpAddr(accessor) => accessor
|
||||||
|
.values_for_doc(doc_id)
|
||||||
|
.map(FastFieldValue::IpAddr)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
DynamicColumn::DateTime(accessor) => accessor
|
||||||
|
.values_for_doc(doc_id)
|
||||||
|
.map(FastFieldValue::Date)
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
(field.to_owned(), FastFieldValue::Array(values))
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
doc_value_fields
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Holds a single comparable doc feature, and the order in which it should be sorted.
|
/// A retrieved value from a fast field.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub enum FastFieldValue {
|
||||||
|
/// The str type is used for any text information.
|
||||||
|
Str(String),
|
||||||
|
/// Unsigned 64-bits Integer `u64`
|
||||||
|
U64(u64),
|
||||||
|
/// Signed 64-bits Integer `i64`
|
||||||
|
I64(i64),
|
||||||
|
/// 64-bits Float `f64`
|
||||||
|
F64(f64),
|
||||||
|
/// Bool value
|
||||||
|
Bool(bool),
|
||||||
|
/// Date/time with nanoseconds precision
|
||||||
|
Date(DateTime),
|
||||||
|
/// Arbitrarily sized byte array
|
||||||
|
Bytes(Vec<u8>),
|
||||||
|
/// IpV6 Address. Internally there is no IpV4, it needs to be converted to `Ipv6Addr`.
|
||||||
|
IpAddr(Ipv6Addr),
|
||||||
|
/// A list of values.
|
||||||
|
Array(Vec<Self>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<FastFieldValue> for OwnedValue {
|
||||||
|
fn from(value: FastFieldValue) -> Self {
|
||||||
|
match value {
|
||||||
|
FastFieldValue::Str(s) => OwnedValue::Str(s),
|
||||||
|
FastFieldValue::U64(u) => OwnedValue::U64(u),
|
||||||
|
FastFieldValue::I64(i) => OwnedValue::I64(i),
|
||||||
|
FastFieldValue::F64(f) => OwnedValue::F64(f),
|
||||||
|
FastFieldValue::Bool(b) => OwnedValue::Bool(b),
|
||||||
|
FastFieldValue::Date(d) => OwnedValue::Date(d),
|
||||||
|
FastFieldValue::Bytes(b) => OwnedValue::Bytes(b),
|
||||||
|
FastFieldValue::IpAddr(ip) => OwnedValue::IpAddr(ip),
|
||||||
|
FastFieldValue::Array(a) => {
|
||||||
|
OwnedValue::Array(a.into_iter().map(OwnedValue::from).collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Holds a fast field value in its u64 representation, and the order in which it should be sorted.
|
||||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||||
struct ComparableDocFeature {
|
struct DocValueAndOrder {
|
||||||
/// Stores any u64-mappable feature.
|
/// A fast field value in its u64 representation.
|
||||||
value: Option<u64>,
|
value: Option<u64>,
|
||||||
/// Sort order for the doc feature
|
/// Sort order for the value
|
||||||
order: Order,
|
order: Order,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Ord for ComparableDocFeature {
|
impl Ord for DocValueAndOrder {
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
let invert = |cmp: std::cmp::Ordering| match self.order {
|
let invert = |cmp: std::cmp::Ordering| match self.order {
|
||||||
Order::Asc => cmp,
|
Order::Asc => cmp,
|
||||||
@@ -329,26 +379,32 @@ impl Ord for ComparableDocFeature {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialOrd for ComparableDocFeature {
|
impl PartialOrd for DocValueAndOrder {
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
Some(self.cmp(other))
|
Some(self.cmp(other))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialEq for ComparableDocFeature {
|
impl PartialEq for DocValueAndOrder {
|
||||||
fn eq(&self, other: &Self) -> bool {
|
fn eq(&self, other: &Self) -> bool {
|
||||||
self.value.cmp(&other.value) == std::cmp::Ordering::Equal
|
self.value.cmp(&other.value) == std::cmp::Ordering::Equal
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Eq for ComparableDocFeature {}
|
impl Eq for DocValueAndOrder {}
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||||
struct ComparableDocFeatures(Vec<ComparableDocFeature>, FieldRetrivalResult);
|
struct DocSortValuesAndFields {
|
||||||
|
sorts: Vec<DocValueAndOrder>,
|
||||||
|
|
||||||
impl Ord for ComparableDocFeatures {
|
#[serde(rename = "docvalue_fields")]
|
||||||
|
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||||
|
doc_value_fields: HashMap<String, FastFieldValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for DocSortValuesAndFields {
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
for (self_feature, other_feature) in self.0.iter().zip(other.0.iter()) {
|
for (self_feature, other_feature) in self.sorts.iter().zip(other.sorts.iter()) {
|
||||||
let cmp = self_feature.cmp(other_feature);
|
let cmp = self_feature.cmp(other_feature);
|
||||||
if cmp != std::cmp::Ordering::Equal {
|
if cmp != std::cmp::Ordering::Equal {
|
||||||
return cmp;
|
return cmp;
|
||||||
@@ -358,53 +414,43 @@ impl Ord for ComparableDocFeatures {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialOrd for ComparableDocFeatures {
|
impl PartialOrd for DocSortValuesAndFields {
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
Some(self.cmp(other))
|
Some(self.cmp(other))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialEq for ComparableDocFeatures {
|
impl PartialEq for DocSortValuesAndFields {
|
||||||
fn eq(&self, other: &Self) -> bool {
|
fn eq(&self, other: &Self) -> bool {
|
||||||
self.cmp(other) == std::cmp::Ordering::Equal
|
self.cmp(other) == std::cmp::Ordering::Equal
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Eq for ComparableDocFeatures {}
|
impl Eq for DocSortValuesAndFields {}
|
||||||
|
|
||||||
/// The TopHitsCollector used for collecting over segments and merging results.
|
/// The TopHitsCollector used for collecting over segments and merging results.
|
||||||
#[derive(Clone, Serialize, Deserialize)]
|
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||||
pub struct TopHitsCollector {
|
pub struct TopHitsTopNComputer {
|
||||||
req: TopHitsAggregation,
|
req: TopHitsAggregation,
|
||||||
top_n: TopNComputer<ComparableDocFeatures, DocAddress, false>,
|
top_n: TopNComputer<DocSortValuesAndFields, DocAddress, false>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for TopHitsCollector {
|
impl std::cmp::PartialEq for TopHitsTopNComputer {
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
req: TopHitsAggregation::default(),
|
|
||||||
top_n: TopNComputer::new(1),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Debug for TopHitsCollector {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.debug_struct("TopHitsCollector")
|
|
||||||
.field("req", &self.req)
|
|
||||||
.field("top_n_threshold", &self.top_n.threshold)
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::cmp::PartialEq for TopHitsCollector {
|
|
||||||
fn eq(&self, _other: &Self) -> bool {
|
fn eq(&self, _other: &Self) -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TopHitsCollector {
|
impl TopHitsTopNComputer {
|
||||||
fn collect(&mut self, features: ComparableDocFeatures, doc: DocAddress) {
|
/// Create a new TopHitsCollector
|
||||||
|
pub fn new(req: TopHitsAggregation) -> Self {
|
||||||
|
Self {
|
||||||
|
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
|
||||||
|
req,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect(&mut self, features: DocSortValuesAndFields, doc: DocAddress) {
|
||||||
self.top_n.push(features, doc);
|
self.top_n.push(features, doc);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -416,14 +462,19 @@ impl TopHitsCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Finalize by converting self into the final result form
|
/// Finalize by converting self into the final result form
|
||||||
pub fn finalize(self) -> TopHitsMetricResult {
|
pub fn into_final_result(self) -> TopHitsMetricResult {
|
||||||
let mut hits: Vec<TopHitsVecEntry> = self
|
let mut hits: Vec<TopHitsVecEntry> = self
|
||||||
.top_n
|
.top_n
|
||||||
.into_sorted_vec()
|
.into_sorted_vec()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|doc| TopHitsVecEntry {
|
.map(|doc| TopHitsVecEntry {
|
||||||
sort: doc.feature.0.iter().map(|f| f.value).collect(),
|
sort: doc.feature.sorts.iter().map(|f| f.value).collect(),
|
||||||
search_results: doc.feature.1,
|
doc_value_fields: doc
|
||||||
|
.feature
|
||||||
|
.doc_value_fields
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k, v.into()))
|
||||||
|
.collect(),
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@@ -436,48 +487,63 @@ impl TopHitsCollector {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone, Debug)]
|
||||||
pub(crate) struct SegmentTopHitsCollector {
|
pub(crate) struct TopHitsSegmentCollector {
|
||||||
segment_ordinal: SegmentOrdinal,
|
segment_ordinal: SegmentOrdinal,
|
||||||
accessor_idx: usize,
|
accessor_idx: usize,
|
||||||
inner_collector: TopHitsCollector,
|
req: TopHitsAggregation,
|
||||||
|
top_n: TopNComputer<Vec<DocValueAndOrder>, DocAddress, false>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentTopHitsCollector {
|
impl TopHitsSegmentCollector {
|
||||||
pub fn from_req(
|
pub fn from_req(
|
||||||
req: &TopHitsAggregation,
|
req: &TopHitsAggregation,
|
||||||
accessor_idx: usize,
|
accessor_idx: usize,
|
||||||
segment_ordinal: SegmentOrdinal,
|
segment_ordinal: SegmentOrdinal,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inner_collector: TopHitsCollector {
|
req: req.clone(),
|
||||||
req: req.clone(),
|
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
|
||||||
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
|
|
||||||
},
|
|
||||||
segment_ordinal,
|
segment_ordinal,
|
||||||
accessor_idx,
|
accessor_idx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
fn into_top_hits_collector(
|
||||||
|
self,
|
||||||
|
value_accessors: &HashMap<String, Vec<DynamicColumn>>,
|
||||||
|
) -> TopHitsTopNComputer {
|
||||||
|
let mut top_hits_computer = TopHitsTopNComputer::new(self.req.clone());
|
||||||
|
let top_results = self.top_n.into_vec();
|
||||||
|
|
||||||
impl std::fmt::Debug for SegmentTopHitsCollector {
|
for res in top_results {
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
let doc_value_fields = self
|
||||||
f.debug_struct("SegmentTopHitsCollector")
|
.req
|
||||||
.field("segment_id", &self.segment_ordinal)
|
.get_document_field_data(value_accessors, res.doc.doc_id);
|
||||||
.field("accessor_idx", &self.accessor_idx)
|
top_hits_computer.collect(
|
||||||
.field("inner_collector", &self.inner_collector)
|
DocSortValuesAndFields {
|
||||||
.finish()
|
sorts: res.feature,
|
||||||
|
doc_value_fields,
|
||||||
|
},
|
||||||
|
res.doc,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
top_hits_computer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentAggregationCollector for SegmentTopHitsCollector {
|
impl SegmentAggregationCollector for TopHitsSegmentCollector {
|
||||||
fn add_intermediate_aggregation_result(
|
fn add_intermediate_aggregation_result(
|
||||||
self: Box<Self>,
|
self: Box<Self>,
|
||||||
agg_with_accessor: &crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
agg_with_accessor: &crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||||
results: &mut crate::aggregation::intermediate_agg_result::IntermediateAggregationResults,
|
results: &mut crate::aggregation::intermediate_agg_result::IntermediateAggregationResults,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||||
let intermediate_result = IntermediateMetricResult::TopHits(self.inner_collector);
|
|
||||||
|
let value_accessors = &agg_with_accessor.aggs.values[self.accessor_idx].value_accessors;
|
||||||
|
|
||||||
|
let intermediate_result =
|
||||||
|
IntermediateMetricResult::TopHits(self.into_top_hits_collector(value_accessors));
|
||||||
results.push(
|
results.push(
|
||||||
name,
|
name,
|
||||||
IntermediateAggregationResult::Metric(intermediate_result),
|
IntermediateAggregationResult::Metric(intermediate_result),
|
||||||
@@ -490,9 +556,7 @@ impl SegmentAggregationCollector for SegmentTopHitsCollector {
|
|||||||
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let accessors = &agg_with_accessor.aggs.values[self.accessor_idx].accessors;
|
let accessors = &agg_with_accessor.aggs.values[self.accessor_idx].accessors;
|
||||||
let value_accessors = &agg_with_accessor.aggs.values[self.accessor_idx].value_accessors;
|
let sorts: Vec<DocValueAndOrder> = self
|
||||||
let features: Vec<ComparableDocFeature> = self
|
|
||||||
.inner_collector
|
|
||||||
.req
|
.req
|
||||||
.sort
|
.sort
|
||||||
.iter()
|
.iter()
|
||||||
@@ -505,18 +569,12 @@ impl SegmentAggregationCollector for SegmentTopHitsCollector {
|
|||||||
.0
|
.0
|
||||||
.values_for_doc(doc_id)
|
.values_for_doc(doc_id)
|
||||||
.next();
|
.next();
|
||||||
ComparableDocFeature { value, order }
|
DocValueAndOrder { value, order }
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let retrieval_result = self
|
self.top_n.push(
|
||||||
.inner_collector
|
sorts,
|
||||||
.req
|
|
||||||
.retrieval
|
|
||||||
.get_document_field_data(value_accessors, doc_id);
|
|
||||||
|
|
||||||
self.inner_collector.collect(
|
|
||||||
ComparableDocFeatures(features, retrieval_result),
|
|
||||||
DocAddress {
|
DocAddress {
|
||||||
segment_ord: self.segment_ordinal,
|
segment_ord: self.segment_ordinal,
|
||||||
doc_id,
|
doc_id,
|
||||||
@@ -530,11 +588,7 @@ impl SegmentAggregationCollector for SegmentTopHitsCollector {
|
|||||||
docs: &[crate::DocId],
|
docs: &[crate::DocId],
|
||||||
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
// TODO: Consider getting fields with the column block accessor and refactor this.
|
// TODO: Consider getting fields with the column block accessor.
|
||||||
// ---
|
|
||||||
// Would the additional complexity of getting fields with the column_block_accessor
|
|
||||||
// make sense here? Probably yes, but I want to get a first-pass review first
|
|
||||||
// before proceeding.
|
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
self.collect(*doc, agg_with_accessor)?;
|
self.collect(*doc, agg_with_accessor)?;
|
||||||
}
|
}
|
||||||
@@ -549,7 +603,7 @@ mod tests {
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use time::macros::datetime;
|
use time::macros::datetime;
|
||||||
|
|
||||||
use super::{ComparableDocFeature, ComparableDocFeatures, Order};
|
use super::{DocSortValuesAndFields, DocValueAndOrder, Order};
|
||||||
use crate::aggregation::agg_req::Aggregations;
|
use crate::aggregation::agg_req::Aggregations;
|
||||||
use crate::aggregation::agg_result::AggregationResults;
|
use crate::aggregation::agg_result::AggregationResults;
|
||||||
use crate::aggregation::bucket::tests::get_test_index_from_docs;
|
use crate::aggregation::bucket::tests::get_test_index_from_docs;
|
||||||
@@ -557,44 +611,44 @@ mod tests {
|
|||||||
use crate::aggregation::AggregationCollector;
|
use crate::aggregation::AggregationCollector;
|
||||||
use crate::collector::ComparableDoc;
|
use crate::collector::ComparableDoc;
|
||||||
use crate::query::AllQuery;
|
use crate::query::AllQuery;
|
||||||
use crate::schema::OwnedValue as SchemaValue;
|
use crate::schema::OwnedValue;
|
||||||
|
|
||||||
fn invert_order(cmp_feature: ComparableDocFeature) -> ComparableDocFeature {
|
fn invert_order(cmp_feature: DocValueAndOrder) -> DocValueAndOrder {
|
||||||
let ComparableDocFeature { value, order } = cmp_feature;
|
let DocValueAndOrder { value, order } = cmp_feature;
|
||||||
let order = match order {
|
let order = match order {
|
||||||
Order::Asc => Order::Desc,
|
Order::Asc => Order::Desc,
|
||||||
Order::Desc => Order::Asc,
|
Order::Desc => Order::Asc,
|
||||||
};
|
};
|
||||||
ComparableDocFeature { value, order }
|
DocValueAndOrder { value, order }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn collector_with_capacity(capacity: usize) -> super::TopHitsCollector {
|
fn collector_with_capacity(capacity: usize) -> super::TopHitsTopNComputer {
|
||||||
super::TopHitsCollector {
|
super::TopHitsTopNComputer {
|
||||||
top_n: super::TopNComputer::new(capacity),
|
top_n: super::TopNComputer::new(capacity),
|
||||||
..Default::default()
|
req: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn invert_order_features(cmp_features: ComparableDocFeatures) -> ComparableDocFeatures {
|
fn invert_order_features(mut cmp_features: DocSortValuesAndFields) -> DocSortValuesAndFields {
|
||||||
let ComparableDocFeatures(cmp_features, search_results) = cmp_features;
|
cmp_features.sorts = cmp_features
|
||||||
let cmp_features = cmp_features
|
.sorts
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(invert_order)
|
.map(invert_order)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
ComparableDocFeatures(cmp_features, search_results)
|
cmp_features
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_comparable_doc_feature() -> crate::Result<()> {
|
fn test_comparable_doc_feature() -> crate::Result<()> {
|
||||||
let small = ComparableDocFeature {
|
let small = DocValueAndOrder {
|
||||||
value: Some(1),
|
value: Some(1),
|
||||||
order: Order::Asc,
|
order: Order::Asc,
|
||||||
};
|
};
|
||||||
let big = ComparableDocFeature {
|
let big = DocValueAndOrder {
|
||||||
value: Some(2),
|
value: Some(2),
|
||||||
order: Order::Asc,
|
order: Order::Asc,
|
||||||
};
|
};
|
||||||
let none = ComparableDocFeature {
|
let none = DocValueAndOrder {
|
||||||
value: None,
|
value: None,
|
||||||
order: Order::Asc,
|
order: Order::Asc,
|
||||||
};
|
};
|
||||||
@@ -616,21 +670,21 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_comparable_doc_features() -> crate::Result<()> {
|
fn test_comparable_doc_features() -> crate::Result<()> {
|
||||||
let features_1 = ComparableDocFeatures(
|
let features_1 = DocSortValuesAndFields {
|
||||||
vec![ComparableDocFeature {
|
sorts: vec![DocValueAndOrder {
|
||||||
value: Some(1),
|
value: Some(1),
|
||||||
order: Order::Asc,
|
order: Order::Asc,
|
||||||
}],
|
}],
|
||||||
Default::default(),
|
doc_value_fields: Default::default(),
|
||||||
);
|
};
|
||||||
|
|
||||||
let features_2 = ComparableDocFeatures(
|
let features_2 = DocSortValuesAndFields {
|
||||||
vec![ComparableDocFeature {
|
sorts: vec![DocValueAndOrder {
|
||||||
value: Some(2),
|
value: Some(2),
|
||||||
order: Order::Asc,
|
order: Order::Asc,
|
||||||
}],
|
}],
|
||||||
Default::default(),
|
doc_value_fields: Default::default(),
|
||||||
);
|
};
|
||||||
|
|
||||||
assert!(features_1 < features_2);
|
assert!(features_1 < features_2);
|
||||||
|
|
||||||
@@ -689,39 +743,39 @@ mod tests {
|
|||||||
segment_ord: 0,
|
segment_ord: 0,
|
||||||
doc_id: 0,
|
doc_id: 0,
|
||||||
},
|
},
|
||||||
feature: ComparableDocFeatures(
|
feature: DocSortValuesAndFields {
|
||||||
vec![ComparableDocFeature {
|
sorts: vec![DocValueAndOrder {
|
||||||
value: Some(1),
|
value: Some(1),
|
||||||
order: Order::Asc,
|
order: Order::Asc,
|
||||||
}],
|
}],
|
||||||
Default::default(),
|
doc_value_fields: Default::default(),
|
||||||
),
|
},
|
||||||
},
|
},
|
||||||
ComparableDoc {
|
ComparableDoc {
|
||||||
doc: crate::DocAddress {
|
doc: crate::DocAddress {
|
||||||
segment_ord: 0,
|
segment_ord: 0,
|
||||||
doc_id: 2,
|
doc_id: 2,
|
||||||
},
|
},
|
||||||
feature: ComparableDocFeatures(
|
feature: DocSortValuesAndFields {
|
||||||
vec![ComparableDocFeature {
|
sorts: vec![DocValueAndOrder {
|
||||||
value: Some(3),
|
value: Some(3),
|
||||||
order: Order::Asc,
|
order: Order::Asc,
|
||||||
}],
|
}],
|
||||||
Default::default(),
|
doc_value_fields: Default::default(),
|
||||||
),
|
},
|
||||||
},
|
},
|
||||||
ComparableDoc {
|
ComparableDoc {
|
||||||
doc: crate::DocAddress {
|
doc: crate::DocAddress {
|
||||||
segment_ord: 0,
|
segment_ord: 0,
|
||||||
doc_id: 1,
|
doc_id: 1,
|
||||||
},
|
},
|
||||||
feature: ComparableDocFeatures(
|
feature: DocSortValuesAndFields {
|
||||||
vec![ComparableDocFeature {
|
sorts: vec![DocValueAndOrder {
|
||||||
value: Some(5),
|
value: Some(5),
|
||||||
order: Order::Asc,
|
order: Order::Asc,
|
||||||
}],
|
}],
|
||||||
Default::default(),
|
doc_value_fields: Default::default(),
|
||||||
),
|
},
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
@@ -730,23 +784,23 @@ mod tests {
|
|||||||
collector.collect(doc.feature, doc.doc);
|
collector.collect(doc.feature, doc.doc);
|
||||||
}
|
}
|
||||||
|
|
||||||
let res = collector.finalize();
|
let res = collector.into_final_result();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
res,
|
res,
|
||||||
super::TopHitsMetricResult {
|
super::TopHitsMetricResult {
|
||||||
hits: vec![
|
hits: vec![
|
||||||
super::TopHitsVecEntry {
|
super::TopHitsVecEntry {
|
||||||
sort: vec![docs[0].feature.0[0].value],
|
sort: vec![docs[0].feature.sorts[0].value],
|
||||||
search_results: Default::default(),
|
doc_value_fields: Default::default(),
|
||||||
},
|
},
|
||||||
super::TopHitsVecEntry {
|
super::TopHitsVecEntry {
|
||||||
sort: vec![docs[1].feature.0[0].value],
|
sort: vec![docs[1].feature.sorts[0].value],
|
||||||
search_results: Default::default(),
|
doc_value_fields: Default::default(),
|
||||||
},
|
},
|
||||||
super::TopHitsVecEntry {
|
super::TopHitsVecEntry {
|
||||||
sort: vec![docs[2].feature.0[0].value],
|
sort: vec![docs[2].feature.sorts[0].value],
|
||||||
search_results: Default::default(),
|
doc_value_fields: Default::default(),
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -803,7 +857,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
"sort": [common::i64_to_u64(date_2017.unix_timestamp_nanos() as i64)],
|
"sort": [common::i64_to_u64(date_2017.unix_timestamp_nanos() as i64)],
|
||||||
"docvalue_fields": {
|
"docvalue_fields": {
|
||||||
"date": [ SchemaValue::Date(DateTime::from_utc(date_2017)) ],
|
"date": [ OwnedValue::Date(DateTime::from_utc(date_2017)) ],
|
||||||
"text": [ "ccc" ],
|
"text": [ "ccc" ],
|
||||||
"text2": [ "ddd" ],
|
"text2": [ "ddd" ],
|
||||||
"mixed.dyn_arr": [ 3, "4" ],
|
"mixed.dyn_arr": [ 3, "4" ],
|
||||||
@@ -812,7 +866,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
"sort": [common::i64_to_u64(date_2016.unix_timestamp_nanos() as i64)],
|
"sort": [common::i64_to_u64(date_2016.unix_timestamp_nanos() as i64)],
|
||||||
"docvalue_fields": {
|
"docvalue_fields": {
|
||||||
"date": [ SchemaValue::Date(DateTime::from_utc(date_2016)) ],
|
"date": [ OwnedValue::Date(DateTime::from_utc(date_2016)) ],
|
||||||
"text": [ "aaa" ],
|
"text": [ "aaa" ],
|
||||||
"text2": [ "bbb" ],
|
"text2": [ "bbb" ],
|
||||||
"mixed.dyn_arr": [ 6, "7" ],
|
"mixed.dyn_arr": [ 6, "7" ],
|
||||||
|
|||||||
@@ -143,8 +143,6 @@ use std::fmt::Display;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod agg_tests;
|
mod agg_tests;
|
||||||
|
|
||||||
mod agg_bench;
|
|
||||||
|
|
||||||
use core::fmt;
|
use core::fmt;
|
||||||
|
|
||||||
pub use agg_limits::AggregationLimits;
|
pub use agg_limits::AggregationLimits;
|
||||||
@@ -160,15 +158,14 @@ use serde::de::{self, Visitor};
|
|||||||
use serde::{Deserialize, Deserializer, Serialize};
|
use serde::{Deserialize, Deserializer, Serialize};
|
||||||
|
|
||||||
fn parse_str_into_f64<E: de::Error>(value: &str) -> Result<f64, E> {
|
fn parse_str_into_f64<E: de::Error>(value: &str) -> Result<f64, E> {
|
||||||
let parsed = value.parse::<f64>().map_err(|_err| {
|
let parsed = value
|
||||||
de::Error::custom(format!("Failed to parse f64 from string: {:?}", value))
|
.parse::<f64>()
|
||||||
})?;
|
.map_err(|_err| de::Error::custom(format!("Failed to parse f64 from string: {value:?}")))?;
|
||||||
|
|
||||||
// Check if the parsed value is NaN or infinity
|
// Check if the parsed value is NaN or infinity
|
||||||
if parsed.is_nan() || parsed.is_infinite() {
|
if parsed.is_nan() || parsed.is_infinite() {
|
||||||
Err(de::Error::custom(format!(
|
Err(de::Error::custom(format!(
|
||||||
"Value is not a valid f64 (NaN or Infinity): {:?}",
|
"Value is not a valid f64 (NaN or Infinity): {value:?}"
|
||||||
value
|
|
||||||
)))
|
)))
|
||||||
} else {
|
} else {
|
||||||
Ok(parsed)
|
Ok(parsed)
|
||||||
@@ -417,7 +414,6 @@ mod tests {
|
|||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
use super::agg_req::Aggregations;
|
use super::agg_req::Aggregations;
|
||||||
use super::segment_agg_result::AggregationLimits;
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::{AllQuery, TermQuery};
|
use crate::query::{AllQuery, TermQuery};
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ use super::metric::{
|
|||||||
SumAggregation,
|
SumAggregation,
|
||||||
};
|
};
|
||||||
use crate::aggregation::bucket::TermMissingAgg;
|
use crate::aggregation::bucket::TermMissingAgg;
|
||||||
use crate::aggregation::metric::SegmentTopHitsCollector;
|
use crate::aggregation::metric::TopHitsSegmentCollector;
|
||||||
|
|
||||||
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
||||||
fn add_intermediate_aggregation_result(
|
fn add_intermediate_aggregation_result(
|
||||||
@@ -161,7 +161,7 @@ pub(crate) fn build_single_agg_segment_collector(
|
|||||||
accessor_idx,
|
accessor_idx,
|
||||||
)?,
|
)?,
|
||||||
)),
|
)),
|
||||||
TopHits(top_hits_req) => Ok(Box::new(SegmentTopHitsCollector::from_req(
|
TopHits(top_hits_req) => Ok(Box::new(TopHitsSegmentCollector::from_req(
|
||||||
top_hits_req,
|
top_hits_req,
|
||||||
accessor_idx,
|
accessor_idx,
|
||||||
req.segment_ordinal,
|
req.segment_ordinal,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::{btree_map, BTreeMap, BTreeSet, BinaryHeap};
|
use std::collections::{btree_map, BTreeMap, BTreeSet, BinaryHeap};
|
||||||
|
use std::io;
|
||||||
use std::ops::Bound;
|
use std::ops::Bound;
|
||||||
use std::{io, u64, usize};
|
|
||||||
|
|
||||||
use crate::collector::{Collector, SegmentCollector};
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
use crate::fastfield::FacetReader;
|
use crate::fastfield::FacetReader;
|
||||||
@@ -598,7 +598,7 @@ mod tests {
|
|||||||
let mid = n % 4;
|
let mid = n % 4;
|
||||||
n /= 4;
|
n /= 4;
|
||||||
let leaf = n % 5;
|
let leaf = n % 5;
|
||||||
Facet::from(&format!("/top{}/mid{}/leaf{}", top, mid, leaf))
|
Facet::from(&format!("/top{top}/mid{mid}/leaf{leaf}"))
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
for i in 0..num_facets * 10 {
|
for i in 0..num_facets * 10 {
|
||||||
@@ -737,7 +737,7 @@ mod tests {
|
|||||||
vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.flat_map(|(c, count)| {
|
.flat_map(|(c, count)| {
|
||||||
let facet = Facet::from(&format!("/facet/{}", c));
|
let facet = Facet::from(&format!("/facet/{c}"));
|
||||||
let doc = doc!(facet_field => facet);
|
let doc = doc!(facet_field => facet);
|
||||||
iter::repeat(doc).take(count)
|
iter::repeat(doc).take(count)
|
||||||
})
|
})
|
||||||
@@ -785,7 +785,7 @@ mod tests {
|
|||||||
let docs: Vec<TantivyDocument> = vec![("b", 2), ("a", 2), ("c", 4)]
|
let docs: Vec<TantivyDocument> = vec![("b", 2), ("a", 2), ("c", 4)]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.flat_map(|(c, count)| {
|
.flat_map(|(c, count)| {
|
||||||
let facet = Facet::from(&format!("/facet/{}", c));
|
let facet = Facet::from(&format!("/facet/{c}"));
|
||||||
let doc = doc!(facet_field => facet);
|
let doc = doc!(facet_field => facet);
|
||||||
iter::repeat(doc).take(count)
|
iter::repeat(doc).take(count)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -160,7 +160,7 @@ mod tests {
|
|||||||
use super::{add_vecs, HistogramCollector, HistogramComputer};
|
use super::{add_vecs, HistogramCollector, HistogramComputer};
|
||||||
use crate::schema::{Schema, FAST};
|
use crate::schema::{Schema, FAST};
|
||||||
use crate::time::{Date, Month};
|
use crate::time::{Date, Month};
|
||||||
use crate::{doc, query, DateTime, Index};
|
use crate::{query, DateTime, Index};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_add_histograms_simple() {
|
fn test_add_histograms_simple() {
|
||||||
|
|||||||
@@ -1,15 +1,11 @@
|
|||||||
use columnar::{BytesColumn, Column};
|
use columnar::{BytesColumn, Column};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::collector::{Count, FilterCollector, TopDocs};
|
|
||||||
use crate::index::SegmentReader;
|
|
||||||
use crate::query::{AllQuery, QueryParser};
|
use crate::query::{AllQuery, QueryParser};
|
||||||
use crate::schema::{Schema, FAST, TEXT};
|
use crate::schema::{Schema, FAST, TEXT};
|
||||||
use crate::time::format_description::well_known::Rfc3339;
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
use crate::time::OffsetDateTime;
|
use crate::time::OffsetDateTime;
|
||||||
use crate::{
|
use crate::{DateTime, DocAddress, Index, Searcher, TantivyDocument};
|
||||||
doc, DateTime, DocAddress, DocId, Index, Score, Searcher, SegmentOrdinal, TantivyDocument,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
||||||
compute_score: true,
|
compute_score: true,
|
||||||
|
|||||||
@@ -4,7 +4,8 @@ use std::marker::PhantomData;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::top_score_collector::TopNComputer;
|
use super::top_score_collector::TopNComputer;
|
||||||
use crate::{DocAddress, DocId, SegmentOrdinal, SegmentReader};
|
use crate::index::SegmentReader;
|
||||||
|
use crate::{DocAddress, DocId, SegmentOrdinal};
|
||||||
|
|
||||||
/// Contains a feature (field, score, etc.) of a document along with the document address.
|
/// Contains a feature (field, score, etc.) of a document along with the document address.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -732,6 +732,19 @@ pub struct TopNComputer<Score, D, const REVERSE_ORDER: bool = true> {
|
|||||||
top_n: usize,
|
top_n: usize,
|
||||||
pub(crate) threshold: Option<Score>,
|
pub(crate) threshold: Option<Score>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<Score: std::fmt::Debug, D, const REVERSE_ORDER: bool> std::fmt::Debug
|
||||||
|
for TopNComputer<Score, D, REVERSE_ORDER>
|
||||||
|
{
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("TopNComputer")
|
||||||
|
.field("buffer_len", &self.buffer.len())
|
||||||
|
.field("top_n", &self.top_n)
|
||||||
|
.field("current_threshold", &self.threshold)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Intermediate struct for TopNComputer for deserialization, to keep vec capacity
|
// Intermediate struct for TopNComputer for deserialization, to keep vec capacity
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct TopNComputerDeser<Score, D, const REVERSE_ORDER: bool> {
|
struct TopNComputerDeser<Score, D, const REVERSE_ORDER: bool> {
|
||||||
|
|||||||
@@ -1,19 +1,25 @@
|
|||||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
use futures_util::{future::Either, FutureExt};
|
||||||
|
|
||||||
use crate::TantivyError;
|
use crate::TantivyError;
|
||||||
|
|
||||||
/// Search executor whether search request are single thread or multithread.
|
/// Executor makes it possible to run tasks in single thread or
|
||||||
///
|
/// in a thread pool.
|
||||||
/// We don't expose Rayon thread pool directly here for several reasons.
|
#[derive(Clone)]
|
||||||
///
|
|
||||||
/// First dependency hell. It is not a good idea to expose the
|
|
||||||
/// API of a dependency, knowing it might conflict with a different version
|
|
||||||
/// used by the client. Second, we may stop using rayon in the future.
|
|
||||||
pub enum Executor {
|
pub enum Executor {
|
||||||
/// Single thread variant of an Executor
|
/// Single thread variant of an Executor
|
||||||
SingleThread,
|
SingleThread,
|
||||||
/// Thread pool variant of an Executor
|
/// Thread pool variant of an Executor
|
||||||
ThreadPool(ThreadPool),
|
ThreadPool(Arc<rayon::ThreadPool>),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
impl From<Arc<rayon::ThreadPool>> for Executor {
|
||||||
|
fn from(thread_pool: Arc<rayon::ThreadPool>) -> Self {
|
||||||
|
Executor::ThreadPool(thread_pool)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Executor {
|
impl Executor {
|
||||||
@@ -24,11 +30,11 @@ impl Executor {
|
|||||||
|
|
||||||
/// Creates an Executor that dispatches the tasks in a thread pool.
|
/// Creates an Executor that dispatches the tasks in a thread pool.
|
||||||
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> crate::Result<Executor> {
|
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> crate::Result<Executor> {
|
||||||
let pool = ThreadPoolBuilder::new()
|
let pool = rayon::ThreadPoolBuilder::new()
|
||||||
.num_threads(num_threads)
|
.num_threads(num_threads)
|
||||||
.thread_name(move |num| format!("{prefix}{num}"))
|
.thread_name(move |num| format!("{prefix}{num}"))
|
||||||
.build()?;
|
.build()?;
|
||||||
Ok(Executor::ThreadPool(pool))
|
Ok(Executor::ThreadPool(Arc::new(pool)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Perform a map in the thread pool.
|
/// Perform a map in the thread pool.
|
||||||
@@ -91,11 +97,36 @@ impl Executor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Spawn a task on the pool, returning a future completing on task success.
|
||||||
|
///
|
||||||
|
/// If the task panic, returns `Err(())`.
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
pub fn spawn_blocking<T: Send + 'static>(
|
||||||
|
&self,
|
||||||
|
cpu_intensive_task: impl FnOnce() -> T + Send + 'static,
|
||||||
|
) -> impl std::future::Future<Output = Result<T, ()>> {
|
||||||
|
match self {
|
||||||
|
Executor::SingleThread => Either::Left(std::future::ready(Ok(cpu_intensive_task()))),
|
||||||
|
Executor::ThreadPool(pool) => {
|
||||||
|
let (sender, receiver) = oneshot::channel();
|
||||||
|
pool.spawn(|| {
|
||||||
|
if sender.is_closed() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let task_result = cpu_intensive_task();
|
||||||
|
let _ = sender.send(task_result);
|
||||||
|
});
|
||||||
|
|
||||||
|
let res = receiver.map(|res| res.map_err(|_| ()));
|
||||||
|
Either::Right(res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::Executor;
|
use super::Executor;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -147,4 +178,62 @@ mod tests {
|
|||||||
assert_eq!(result[i], i * 2);
|
assert_eq!(result[i], i * 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
#[test]
|
||||||
|
fn test_cancel_cpu_intensive_tasks() {
|
||||||
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
let counter: Arc<AtomicU64> = Default::default();
|
||||||
|
|
||||||
|
let other_counter: Arc<AtomicU64> = Default::default();
|
||||||
|
|
||||||
|
let mut futures = Vec::new();
|
||||||
|
let mut other_futures = Vec::new();
|
||||||
|
|
||||||
|
let (tx, rx) = crossbeam_channel::bounded::<()>(0);
|
||||||
|
let rx = Arc::new(rx);
|
||||||
|
let executor = Executor::multi_thread(3, "search-test").unwrap();
|
||||||
|
for i in 0..1000 {
|
||||||
|
let counter_clone: Arc<AtomicU64> = counter.clone();
|
||||||
|
let other_counter_clone: Arc<AtomicU64> = other_counter.clone();
|
||||||
|
|
||||||
|
let rx_clone = rx.clone();
|
||||||
|
let rx_clone2 = rx.clone();
|
||||||
|
let fut = executor.spawn_blocking(move || {
|
||||||
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
|
let () = rx_clone.recv().unwrap();
|
||||||
|
});
|
||||||
|
futures.push(fut);
|
||||||
|
let other_fut = executor.spawn_blocking(move || {
|
||||||
|
other_counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
|
let () = rx_clone2.recv().unwrap();
|
||||||
|
});
|
||||||
|
other_futures.push(other_fut);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We execute 100 futures.
|
||||||
|
for i in 0..100 {
|
||||||
|
tx.send(()).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let counter_val = counter.load(Ordering::SeqCst);
|
||||||
|
let other_counter_val = other_counter.load(Ordering::SeqCst);
|
||||||
|
assert!(counter_val >= 30);
|
||||||
|
assert!(other_counter_val >= 30);
|
||||||
|
|
||||||
|
drop(other_futures);
|
||||||
|
|
||||||
|
// We execute 100 futures.
|
||||||
|
for i in 0..100 {
|
||||||
|
tx.send(()).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let counter_val2 = counter.load(Ordering::SeqCst);
|
||||||
|
assert!(counter_val2 >= counter_val + 100 - 6);
|
||||||
|
|
||||||
|
let other_counter_val2 = other_counter.load(Ordering::SeqCst);
|
||||||
|
assert!(other_counter_val2 <= other_counter_val + 6);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
use columnar::MonotonicallyMappableToU64;
|
use common::json_path_writer::JSON_PATH_SEGMENT_SEP;
|
||||||
use common::{replace_in_place, JsonPathWriter};
|
use common::{replace_in_place, JsonPathWriter};
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
|
|
||||||
use crate::fastfield::FastValue;
|
|
||||||
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter};
|
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter};
|
||||||
use crate::schema::document::{ReferenceValue, ReferenceValueLeaf, Value};
|
use crate::schema::document::{ReferenceValue, ReferenceValueLeaf, Value};
|
||||||
use crate::schema::term::JSON_PATH_SEGMENT_SEP;
|
use crate::schema::Type;
|
||||||
use crate::schema::{Field, Type, DATE_TIME_PRECISION_INDEXED};
|
|
||||||
use crate::time::format_description::well_known::Rfc3339;
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
use crate::time::{OffsetDateTime, UtcOffset};
|
use crate::time::{OffsetDateTime, UtcOffset};
|
||||||
use crate::tokenizer::TextAnalyzer;
|
use crate::tokenizer::TextAnalyzer;
|
||||||
@@ -33,7 +31,7 @@ use crate::{DateTime, DocId, Term};
|
|||||||
/// position 1.
|
/// position 1.
|
||||||
/// As a result, with lemmatization, "The Smiths" will match our object.
|
/// As a result, with lemmatization, "The Smiths" will match our object.
|
||||||
///
|
///
|
||||||
/// Worse, if a same term is appears in the second object, a non increasing value would be pushed
|
/// Worse, if a same term appears in the second object, a non increasing value would be pushed
|
||||||
/// to the position recorder probably provoking a panic.
|
/// to the position recorder probably provoking a panic.
|
||||||
///
|
///
|
||||||
/// This problem is solved for regular multivalued object by offsetting the position
|
/// This problem is solved for regular multivalued object by offsetting the position
|
||||||
@@ -52,7 +50,7 @@ use crate::{DateTime, DocId, Term};
|
|||||||
/// We can therefore afford working with a map that is not imperfect. It is fine if several
|
/// We can therefore afford working with a map that is not imperfect. It is fine if several
|
||||||
/// path map to the same index position as long as the probability is relatively low.
|
/// path map to the same index position as long as the probability is relatively low.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct IndexingPositionsPerPath {
|
pub(crate) struct IndexingPositionsPerPath {
|
||||||
positions_per_path: FxHashMap<u32, IndexingPosition>,
|
positions_per_path: FxHashMap<u32, IndexingPosition>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -60,6 +58,9 @@ impl IndexingPositionsPerPath {
|
|||||||
fn get_position_from_id(&mut self, id: u32) -> &mut IndexingPosition {
|
fn get_position_from_id(&mut self, id: u32) -> &mut IndexingPosition {
|
||||||
self.positions_per_path.entry(id).or_default()
|
self.positions_per_path.entry(id).or_default()
|
||||||
}
|
}
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.positions_per_path.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert JSON_PATH_SEGMENT_SEP to a dot.
|
/// Convert JSON_PATH_SEGMENT_SEP to a dot.
|
||||||
@@ -70,36 +71,6 @@ pub fn json_path_sep_to_dot(path: &mut str) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub(crate) fn index_json_values<'a, V: Value<'a>>(
|
|
||||||
doc: DocId,
|
|
||||||
json_visitors: impl Iterator<Item = crate::Result<V::ObjectIter>>,
|
|
||||||
text_analyzer: &mut TextAnalyzer,
|
|
||||||
expand_dots_enabled: bool,
|
|
||||||
term_buffer: &mut Term,
|
|
||||||
postings_writer: &mut dyn PostingsWriter,
|
|
||||||
json_path_writer: &mut JsonPathWriter,
|
|
||||||
ctx: &mut IndexingContext,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
json_path_writer.clear();
|
|
||||||
json_path_writer.set_expand_dots(expand_dots_enabled);
|
|
||||||
let mut positions_per_path: IndexingPositionsPerPath = Default::default();
|
|
||||||
for json_visitor_res in json_visitors {
|
|
||||||
let json_visitor = json_visitor_res?;
|
|
||||||
index_json_object::<V>(
|
|
||||||
doc,
|
|
||||||
json_visitor,
|
|
||||||
text_analyzer,
|
|
||||||
term_buffer,
|
|
||||||
json_path_writer,
|
|
||||||
postings_writer,
|
|
||||||
ctx,
|
|
||||||
&mut positions_per_path,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn index_json_object<'a, V: Value<'a>>(
|
fn index_json_object<'a, V: Value<'a>>(
|
||||||
doc: DocId,
|
doc: DocId,
|
||||||
@@ -128,7 +99,7 @@ fn index_json_object<'a, V: Value<'a>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn index_json_value<'a, V: Value<'a>>(
|
pub(crate) fn index_json_value<'a, V: Value<'a>>(
|
||||||
doc: DocId,
|
doc: DocId,
|
||||||
json_value: V,
|
json_value: V,
|
||||||
text_analyzer: &mut TextAnalyzer,
|
text_analyzer: &mut TextAnalyzer,
|
||||||
@@ -168,12 +139,18 @@ fn index_json_value<'a, V: Value<'a>>(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
ReferenceValueLeaf::U64(val) => {
|
ReferenceValueLeaf::U64(val) => {
|
||||||
|
// try to parse to i64, since when querying we will apply the same logic and prefer
|
||||||
|
// i64 values
|
||||||
set_path_id(
|
set_path_id(
|
||||||
term_buffer,
|
term_buffer,
|
||||||
ctx.path_to_unordered_id
|
ctx.path_to_unordered_id
|
||||||
.get_or_allocate_unordered_id(json_path_writer.as_str()),
|
.get_or_allocate_unordered_id(json_path_writer.as_str()),
|
||||||
);
|
);
|
||||||
term_buffer.append_type_and_fast_value(val);
|
if let Ok(i64_val) = val.try_into() {
|
||||||
|
term_buffer.append_type_and_fast_value::<i64>(i64_val);
|
||||||
|
} else {
|
||||||
|
term_buffer.append_type_and_fast_value(val);
|
||||||
|
}
|
||||||
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
|
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
ReferenceValueLeaf::I64(val) => {
|
ReferenceValueLeaf::I64(val) => {
|
||||||
@@ -256,71 +233,42 @@ fn index_json_value<'a, V: Value<'a>>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tries to infer a JSON type from a string.
|
/// Tries to infer a JSON type from a string and append it to the term.
|
||||||
pub fn convert_to_fast_value_and_get_term(
|
///
|
||||||
json_term_writer: &mut JsonTermWriter,
|
/// The term must be json + JSON path.
|
||||||
phrase: &str,
|
pub fn convert_to_fast_value_and_append_to_json_term(mut term: Term, phrase: &str) -> Option<Term> {
|
||||||
) -> Option<Term> {
|
assert_eq!(
|
||||||
|
term.value()
|
||||||
|
.as_json_value_bytes()
|
||||||
|
.expect("expecting a Term with a json type and json path")
|
||||||
|
.as_serialized()
|
||||||
|
.len(),
|
||||||
|
0,
|
||||||
|
"JSON value bytes should be empty"
|
||||||
|
);
|
||||||
if let Ok(dt) = OffsetDateTime::parse(phrase, &Rfc3339) {
|
if let Ok(dt) = OffsetDateTime::parse(phrase, &Rfc3339) {
|
||||||
let dt_utc = dt.to_offset(UtcOffset::UTC);
|
let dt_utc = dt.to_offset(UtcOffset::UTC);
|
||||||
return Some(set_fastvalue_and_get_term(
|
term.append_type_and_fast_value(DateTime::from_utc(dt_utc));
|
||||||
json_term_writer,
|
return Some(term);
|
||||||
DateTime::from_utc(dt_utc),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
if let Ok(i64_val) = str::parse::<i64>(phrase) {
|
if let Ok(i64_val) = str::parse::<i64>(phrase) {
|
||||||
return Some(set_fastvalue_and_get_term(json_term_writer, i64_val));
|
term.append_type_and_fast_value(i64_val);
|
||||||
|
return Some(term);
|
||||||
}
|
}
|
||||||
if let Ok(u64_val) = str::parse::<u64>(phrase) {
|
if let Ok(u64_val) = str::parse::<u64>(phrase) {
|
||||||
return Some(set_fastvalue_and_get_term(json_term_writer, u64_val));
|
term.append_type_and_fast_value(u64_val);
|
||||||
|
return Some(term);
|
||||||
}
|
}
|
||||||
if let Ok(f64_val) = str::parse::<f64>(phrase) {
|
if let Ok(f64_val) = str::parse::<f64>(phrase) {
|
||||||
return Some(set_fastvalue_and_get_term(json_term_writer, f64_val));
|
term.append_type_and_fast_value(f64_val);
|
||||||
|
return Some(term);
|
||||||
}
|
}
|
||||||
if let Ok(bool_val) = str::parse::<bool>(phrase) {
|
if let Ok(bool_val) = str::parse::<bool>(phrase) {
|
||||||
return Some(set_fastvalue_and_get_term(json_term_writer, bool_val));
|
term.append_type_and_fast_value(bool_val);
|
||||||
|
return Some(term);
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
// helper function to generate a Term from a json fastvalue
|
|
||||||
pub(crate) fn set_fastvalue_and_get_term<T: FastValue>(
|
|
||||||
json_term_writer: &mut JsonTermWriter,
|
|
||||||
value: T,
|
|
||||||
) -> Term {
|
|
||||||
json_term_writer.set_fast_value(value);
|
|
||||||
json_term_writer.term().clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
// helper function to generate a list of terms with their positions from a textual json value
|
|
||||||
pub(crate) fn set_string_and_get_terms(
|
|
||||||
json_term_writer: &mut JsonTermWriter,
|
|
||||||
value: &str,
|
|
||||||
text_analyzer: &mut TextAnalyzer,
|
|
||||||
) -> Vec<(usize, Term)> {
|
|
||||||
let mut positions_and_terms = Vec::<(usize, Term)>::new();
|
|
||||||
json_term_writer.close_path_and_set_type(Type::Str);
|
|
||||||
let term_num_bytes = json_term_writer.term_buffer.len_bytes();
|
|
||||||
let mut token_stream = text_analyzer.token_stream(value);
|
|
||||||
token_stream.process(&mut |token| {
|
|
||||||
json_term_writer
|
|
||||||
.term_buffer
|
|
||||||
.truncate_value_bytes(term_num_bytes);
|
|
||||||
json_term_writer
|
|
||||||
.term_buffer
|
|
||||||
.append_bytes(token.text.as_bytes());
|
|
||||||
positions_and_terms.push((token.position, json_term_writer.term().clone()));
|
|
||||||
});
|
|
||||||
positions_and_terms
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Writes a value of a JSON field to a `Term`.
|
|
||||||
/// The Term format is as follows:
|
|
||||||
/// `[JSON_TYPE][JSON_PATH][JSON_END_OF_PATH][VALUE_BYTES]`
|
|
||||||
pub struct JsonTermWriter<'a> {
|
|
||||||
term_buffer: &'a mut Term,
|
|
||||||
path_stack: Vec<usize>,
|
|
||||||
expand_dots_enabled: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Splits a json path supplied to the query parser in such a way that
|
/// Splits a json path supplied to the query parser in such a way that
|
||||||
/// `.` can be escaped.
|
/// `.` can be escaped.
|
||||||
@@ -377,158 +325,48 @@ pub(crate) fn encode_column_name(
|
|||||||
path.into()
|
path.into()
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> JsonTermWriter<'a> {
|
|
||||||
pub fn from_field_and_json_path(
|
|
||||||
field: Field,
|
|
||||||
json_path: &str,
|
|
||||||
expand_dots_enabled: bool,
|
|
||||||
term_buffer: &'a mut Term,
|
|
||||||
) -> Self {
|
|
||||||
term_buffer.set_field_and_type(field, Type::Json);
|
|
||||||
let mut json_term_writer = Self::wrap(term_buffer, expand_dots_enabled);
|
|
||||||
for segment in split_json_path(json_path) {
|
|
||||||
json_term_writer.push_path_segment(&segment);
|
|
||||||
}
|
|
||||||
json_term_writer
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn wrap(term_buffer: &'a mut Term, expand_dots_enabled: bool) -> Self {
|
|
||||||
term_buffer.clear_with_type(Type::Json);
|
|
||||||
let mut path_stack = Vec::with_capacity(10);
|
|
||||||
path_stack.push(0);
|
|
||||||
Self {
|
|
||||||
term_buffer,
|
|
||||||
path_stack,
|
|
||||||
expand_dots_enabled,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn trim_to_end_of_path(&mut self) {
|
|
||||||
let end_of_path = *self.path_stack.last().unwrap();
|
|
||||||
self.term_buffer.truncate_value_bytes(end_of_path);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn close_path_and_set_type(&mut self, typ: Type) {
|
|
||||||
self.trim_to_end_of_path();
|
|
||||||
self.term_buffer.set_json_path_end();
|
|
||||||
self.term_buffer.append_bytes(&[typ.to_code()]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Remove this function and use JsonPathWriter instead.
|
|
||||||
pub fn push_path_segment(&mut self, segment: &str) {
|
|
||||||
// the path stack should never be empty.
|
|
||||||
self.trim_to_end_of_path();
|
|
||||||
|
|
||||||
if self.path_stack.len() > 1 {
|
|
||||||
self.term_buffer.set_json_path_separator();
|
|
||||||
}
|
|
||||||
let appended_segment = self.term_buffer.append_bytes(segment.as_bytes());
|
|
||||||
if self.expand_dots_enabled {
|
|
||||||
// We need to replace `.` by JSON_PATH_SEGMENT_SEP.
|
|
||||||
replace_in_place(b'.', JSON_PATH_SEGMENT_SEP, appended_segment);
|
|
||||||
}
|
|
||||||
self.term_buffer.add_json_path_separator();
|
|
||||||
self.path_stack.push(self.term_buffer.len_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pop_path_segment(&mut self) {
|
|
||||||
self.path_stack.pop();
|
|
||||||
assert!(!self.path_stack.is_empty());
|
|
||||||
self.trim_to_end_of_path();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the json path of the term being currently built.
|
|
||||||
#[cfg(test)]
|
|
||||||
pub(crate) fn path(&self) -> &[u8] {
|
|
||||||
let end_of_path = self.path_stack.last().cloned().unwrap_or(1);
|
|
||||||
&self.term().serialized_value_bytes()[..end_of_path - 1]
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn set_fast_value<T: FastValue>(&mut self, val: T) {
|
|
||||||
self.close_path_and_set_type(T::to_type());
|
|
||||||
let value = if T::to_type() == Type::Date {
|
|
||||||
DateTime::from_u64(val.to_u64())
|
|
||||||
.truncate(DATE_TIME_PRECISION_INDEXED)
|
|
||||||
.to_u64()
|
|
||||||
} else {
|
|
||||||
val.to_u64()
|
|
||||||
};
|
|
||||||
self.term_buffer
|
|
||||||
.append_bytes(value.to_be_bytes().as_slice());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_str(&mut self, text: &str) {
|
|
||||||
self.close_path_and_set_type(Type::Str);
|
|
||||||
self.term_buffer.append_bytes(text.as_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn term(&self) -> &Term {
|
|
||||||
self.term_buffer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::{split_json_path, JsonTermWriter};
|
use super::split_json_path;
|
||||||
use crate::schema::{Field, Type};
|
use crate::schema::Field;
|
||||||
use crate::Term;
|
use crate::Term;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_json_writer() {
|
fn test_json_writer() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
let mut term = Term::from_field_json_path(field, "attributes.color", false);
|
||||||
json_writer.push_path_segment("attributes");
|
term.append_type_and_str("red");
|
||||||
json_writer.push_path_segment("color");
|
|
||||||
json_writer.set_str("red");
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", json_writer.term()),
|
format!("{term:?}"),
|
||||||
"Term(field=1, type=Json, path=attributes.color, type=Str, \"red\")"
|
"Term(field=1, type=Json, path=attributes.color, type=Str, \"red\")"
|
||||||
);
|
);
|
||||||
json_writer.set_str("blue");
|
|
||||||
|
let mut term = Term::from_field_json_path(field, "attributes.dimensions.width", false);
|
||||||
|
term.append_type_and_fast_value(400i64);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", json_writer.term()),
|
format!("{term:?}"),
|
||||||
"Term(field=1, type=Json, path=attributes.color, type=Str, \"blue\")"
|
|
||||||
);
|
|
||||||
json_writer.pop_path_segment();
|
|
||||||
json_writer.push_path_segment("dimensions");
|
|
||||||
json_writer.push_path_segment("width");
|
|
||||||
json_writer.set_fast_value(400i64);
|
|
||||||
assert_eq!(
|
|
||||||
format!("{:?}", json_writer.term()),
|
|
||||||
"Term(field=1, type=Json, path=attributes.dimensions.width, type=I64, 400)"
|
"Term(field=1, type=Json, path=attributes.dimensions.width, type=I64, 400)"
|
||||||
);
|
);
|
||||||
json_writer.pop_path_segment();
|
|
||||||
json_writer.push_path_segment("height");
|
|
||||||
json_writer.set_fast_value(300i64);
|
|
||||||
assert_eq!(
|
|
||||||
format!("{:?}", json_writer.term()),
|
|
||||||
"Term(field=1, type=Json, path=attributes.dimensions.height, type=I64, 300)"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_string_term() {
|
fn test_string_term() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::from_field_json_path(field, "color", false);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
term.append_type_and_str("red");
|
||||||
json_writer.push_path_segment("color");
|
|
||||||
json_writer.set_str("red");
|
assert_eq!(term.serialized_term(), b"\x00\x00\x00\x01jcolor\x00sred")
|
||||||
assert_eq!(
|
|
||||||
json_writer.term().serialized_term(),
|
|
||||||
b"\x00\x00\x00\x01jcolor\x00sred"
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_i64_term() {
|
fn test_i64_term() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::from_field_json_path(field, "color", false);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
term.append_type_and_fast_value(-4i64);
|
||||||
json_writer.push_path_segment("color");
|
|
||||||
json_writer.set_fast_value(-4i64);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
json_writer.term().serialized_term(),
|
term.serialized_term(),
|
||||||
b"\x00\x00\x00\x01jcolor\x00i\x7f\xff\xff\xff\xff\xff\xff\xfc"
|
b"\x00\x00\x00\x01jcolor\x00i\x7f\xff\xff\xff\xff\xff\xff\xfc"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -536,12 +374,11 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_u64_term() {
|
fn test_u64_term() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::from_field_json_path(field, "color", false);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
term.append_type_and_fast_value(4u64);
|
||||||
json_writer.push_path_segment("color");
|
|
||||||
json_writer.set_fast_value(4u64);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
json_writer.term().serialized_term(),
|
term.serialized_term(),
|
||||||
b"\x00\x00\x00\x01jcolor\x00u\x00\x00\x00\x00\x00\x00\x00\x04"
|
b"\x00\x00\x00\x01jcolor\x00u\x00\x00\x00\x00\x00\x00\x00\x04"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -549,12 +386,10 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_f64_term() {
|
fn test_f64_term() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::from_field_json_path(field, "color", false);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
term.append_type_and_fast_value(4.0f64);
|
||||||
json_writer.push_path_segment("color");
|
|
||||||
json_writer.set_fast_value(4.0f64);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
json_writer.term().serialized_term(),
|
term.serialized_term(),
|
||||||
b"\x00\x00\x00\x01jcolor\x00f\xc0\x10\x00\x00\x00\x00\x00\x00"
|
b"\x00\x00\x00\x01jcolor\x00f\xc0\x10\x00\x00\x00\x00\x00\x00"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -562,90 +397,14 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_bool_term() {
|
fn test_bool_term() {
|
||||||
let field = Field::from_field_id(1);
|
let field = Field::from_field_id(1);
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
let mut term = Term::from_field_json_path(field, "color", false);
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
term.append_type_and_fast_value(true);
|
||||||
json_writer.push_path_segment("color");
|
|
||||||
json_writer.set_fast_value(true);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
json_writer.term().serialized_term(),
|
term.serialized_term(),
|
||||||
b"\x00\x00\x00\x01jcolor\x00o\x00\x00\x00\x00\x00\x00\x00\x01"
|
b"\x00\x00\x00\x01jcolor\x00o\x00\x00\x00\x00\x00\x00\x00\x01"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_push_after_set_path_segment() {
|
|
||||||
let field = Field::from_field_id(1);
|
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
|
||||||
json_writer.push_path_segment("attribute");
|
|
||||||
json_writer.set_str("something");
|
|
||||||
json_writer.push_path_segment("color");
|
|
||||||
json_writer.set_str("red");
|
|
||||||
assert_eq!(
|
|
||||||
json_writer.term().serialized_term(),
|
|
||||||
b"\x00\x00\x00\x01jattribute\x01color\x00sred"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_pop_segment() {
|
|
||||||
let field = Field::from_field_id(1);
|
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
|
||||||
json_writer.push_path_segment("color");
|
|
||||||
json_writer.push_path_segment("hue");
|
|
||||||
json_writer.pop_path_segment();
|
|
||||||
json_writer.set_str("red");
|
|
||||||
assert_eq!(
|
|
||||||
json_writer.term().serialized_term(),
|
|
||||||
b"\x00\x00\x00\x01jcolor\x00sred"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_json_writer_path() {
|
|
||||||
let field = Field::from_field_id(1);
|
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
|
||||||
json_writer.push_path_segment("color");
|
|
||||||
assert_eq!(json_writer.path(), b"color");
|
|
||||||
json_writer.push_path_segment("hue");
|
|
||||||
assert_eq!(json_writer.path(), b"color\x01hue");
|
|
||||||
json_writer.set_str("pink");
|
|
||||||
assert_eq!(json_writer.path(), b"color\x01hue");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_json_path_expand_dots_disabled() {
|
|
||||||
let field = Field::from_field_id(1);
|
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
|
|
||||||
json_writer.push_path_segment("color.hue");
|
|
||||||
assert_eq!(json_writer.path(), b"color.hue");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_json_path_expand_dots_enabled() {
|
|
||||||
let field = Field::from_field_id(1);
|
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, true);
|
|
||||||
json_writer.push_path_segment("color.hue");
|
|
||||||
assert_eq!(json_writer.path(), b"color\x01hue");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_json_path_expand_dots_enabled_pop_segment() {
|
|
||||||
let field = Field::from_field_id(1);
|
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
|
||||||
let mut json_writer = JsonTermWriter::wrap(&mut term, true);
|
|
||||||
json_writer.push_path_segment("hello");
|
|
||||||
assert_eq!(json_writer.path(), b"hello");
|
|
||||||
json_writer.push_path_segment("color.hue");
|
|
||||||
assert_eq!(json_writer.path(), b"hello\x01color\x01hue");
|
|
||||||
json_writer.pop_path_segment();
|
|
||||||
assert_eq!(json_writer.path(), b"hello");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_split_json_path_simple() {
|
fn test_split_json_path_simple() {
|
||||||
let json_path = split_json_path("titi.toto");
|
let json_path = split_json_path("titi.toto");
|
||||||
|
|||||||
@@ -4,13 +4,13 @@ use std::{fmt, io};
|
|||||||
|
|
||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::core::Executor;
|
use crate::core::Executor;
|
||||||
use crate::index::SegmentReader;
|
use crate::index::{SegmentId, SegmentReader};
|
||||||
use crate::query::{Bm25StatisticsProvider, EnableScoring, Query};
|
use crate::query::{Bm25StatisticsProvider, EnableScoring, Query};
|
||||||
use crate::schema::document::DocumentDeserialize;
|
use crate::schema::document::DocumentDeserialize;
|
||||||
use crate::schema::{Schema, Term};
|
use crate::schema::{Schema, Term};
|
||||||
use crate::space_usage::SearcherSpaceUsage;
|
use crate::space_usage::SearcherSpaceUsage;
|
||||||
use crate::store::{CacheStats, StoreReader};
|
use crate::store::{CacheStats, StoreReader};
|
||||||
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject};
|
use crate::{DocAddress, Index, Opstamp, TrackedObject};
|
||||||
|
|
||||||
/// Identifies the searcher generation accessed by a [`Searcher`].
|
/// Identifies the searcher generation accessed by a [`Searcher`].
|
||||||
///
|
///
|
||||||
@@ -109,8 +109,9 @@ impl Searcher {
|
|||||||
&self,
|
&self,
|
||||||
doc_address: DocAddress,
|
doc_address: DocAddress,
|
||||||
) -> crate::Result<D> {
|
) -> crate::Result<D> {
|
||||||
|
let executor = self.inner.index.search_executor();
|
||||||
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
||||||
store_reader.get_async(doc_address.doc_id).await
|
store_reader.get_async(doc_address.doc_id, executor).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Access the schema associated with the index of this searcher.
|
/// Access the schema associated with the index of this searcher.
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
use crate::collector::Count;
|
use crate::collector::Count;
|
||||||
use crate::directory::{RamDirectory, WatchCallback};
|
use crate::directory::{RamDirectory, WatchCallback};
|
||||||
|
use crate::index::SegmentId;
|
||||||
use crate::indexer::{LogMergePolicy, NoMergePolicy};
|
use crate::indexer::{LogMergePolicy, NoMergePolicy};
|
||||||
use crate::json_utils::JsonTermWriter;
|
use crate::postings::Postings;
|
||||||
use crate::query::TermQuery;
|
use crate::query::TermQuery;
|
||||||
use crate::schema::{Field, IndexRecordOption, Schema, Type, INDEXED, STRING, TEXT};
|
use crate::schema::{Field, IndexRecordOption, Schema, INDEXED, STRING, TEXT};
|
||||||
use crate::tokenizer::TokenizerManager;
|
use crate::tokenizer::TokenizerManager;
|
||||||
use crate::{
|
use crate::{
|
||||||
Directory, DocSet, Index, IndexBuilder, IndexReader, IndexSettings, IndexWriter, Postings,
|
Directory, DocSet, Index, IndexBuilder, IndexReader, IndexSettings, IndexWriter, ReloadPolicy,
|
||||||
ReloadPolicy, SegmentId, TantivyDocument, Term,
|
TantivyDocument, Term,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -416,16 +417,12 @@ fn test_non_text_json_term_freq() {
|
|||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0u32);
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
let inv_idx = segment_reader.inverted_index(field).unwrap();
|
let inv_idx = segment_reader.inverted_index(field).unwrap();
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
|
||||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
let mut term = Term::from_field_json_path(field, "tenant_id", false);
|
||||||
json_term_writer.push_path_segment("tenant_id");
|
term.append_type_and_fast_value(75i64);
|
||||||
json_term_writer.close_path_and_set_type(Type::U64);
|
|
||||||
json_term_writer.set_fast_value(75u64);
|
|
||||||
let postings = inv_idx
|
let postings = inv_idx
|
||||||
.read_postings(
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||||
json_term_writer.term(),
|
|
||||||
IndexRecordOption::WithFreqsAndPositions,
|
|
||||||
)
|
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
@@ -454,16 +451,12 @@ fn test_non_text_json_term_freq_bitpacked() {
|
|||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0u32);
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
let inv_idx = segment_reader.inverted_index(field).unwrap();
|
let inv_idx = segment_reader.inverted_index(field).unwrap();
|
||||||
let mut term = Term::with_type_and_field(Type::Json, field);
|
|
||||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
let mut term = Term::from_field_json_path(field, "tenant_id", false);
|
||||||
json_term_writer.push_path_segment("tenant_id");
|
term.append_type_and_fast_value(75i64);
|
||||||
json_term_writer.close_path_and_set_type(Type::U64);
|
|
||||||
json_term_writer.set_fast_value(75u64);
|
|
||||||
let mut postings = inv_idx
|
let mut postings = inv_idx
|
||||||
.read_postings(
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||||
json_term_writer.term(),
|
|
||||||
IndexRecordOption::WithFreqsAndPositions,
|
|
||||||
)
|
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
use std::iter::ExactSizeIterator;
|
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
use common::{BinarySerializable, CountingWriter, HasLen, VInt};
|
use common::{BinarySerializable, CountingWriter, HasLen, VInt};
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::marker::{Send, Sync};
|
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -40,6 +39,7 @@ impl RetryPolicy {
|
|||||||
/// The `DirectoryLock` is an object that represents a file lock.
|
/// The `DirectoryLock` is an object that represents a file lock.
|
||||||
///
|
///
|
||||||
/// It is associated with a lock file, that gets deleted on `Drop.`
|
/// It is associated with a lock file, that gets deleted on `Drop.`
|
||||||
|
#[allow(dead_code)]
|
||||||
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
|
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
|
||||||
|
|
||||||
struct DirectoryLockGuard {
|
struct DirectoryLockGuard {
|
||||||
|
|||||||
@@ -566,7 +566,7 @@ mod tests {
|
|||||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
let num_paths = 10;
|
let num_paths = 10;
|
||||||
let paths: Vec<PathBuf> = (0..num_paths)
|
let paths: Vec<PathBuf> = (0..num_paths)
|
||||||
.map(|i| PathBuf::from(&*format!("file_{}", i)))
|
.map(|i| PathBuf::from(&*format!("file_{i}")))
|
||||||
.collect();
|
.collect();
|
||||||
{
|
{
|
||||||
for path in &paths {
|
for path in &paths {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::Path;
|
||||||
use std::sync::atomic::Ordering::SeqCst;
|
use std::sync::atomic::Ordering::SeqCst;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize};
|
use std::sync::atomic::{AtomicBool, AtomicUsize};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ pub struct WatchCallbackList {
|
|||||||
/// file change is detected.
|
/// file change is detected.
|
||||||
#[must_use = "This `WatchHandle` controls the lifetime of the watch and should therefore be used."]
|
#[must_use = "This `WatchHandle` controls the lifetime of the watch and should therefore be used."]
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
#[allow(dead_code)]
|
||||||
pub struct WatchHandle(Arc<WatchCallback>);
|
pub struct WatchHandle(Arc<WatchCallback>);
|
||||||
|
|
||||||
impl WatchHandle {
|
impl WatchHandle {
|
||||||
|
|||||||
@@ -62,8 +62,7 @@ impl FacetReader {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::schema::document::Value;
|
use crate::schema::{Facet, FacetOptions, SchemaBuilder, Value, STORED};
|
||||||
use crate::schema::{Facet, FacetOptions, SchemaBuilder, STORED};
|
|
||||||
use crate::{DocAddress, Index, IndexWriter, TantivyDocument};
|
use crate::{DocAddress, Index, IndexWriter, TantivyDocument};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -89,7 +88,9 @@ mod tests {
|
|||||||
let doc = searcher
|
let doc = searcher
|
||||||
.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))
|
.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let value = doc.get_first(facet_field).and_then(|v| v.as_facet());
|
let value = doc
|
||||||
|
.get_first(facet_field)
|
||||||
|
.and_then(|v| v.as_value().as_facet());
|
||||||
assert_eq!(value, None);
|
assert_eq!(value, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -146,8 +147,11 @@ mod tests {
|
|||||||
facet_ords.extend(facet_reader.facet_ords(0u32));
|
facet_ords.extend(facet_reader.facet_ords(0u32));
|
||||||
assert_eq!(&facet_ords, &[0u64]);
|
assert_eq!(&facet_ords, &[0u64]);
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))?;
|
||||||
let value: Option<&Facet> = doc.get_first(facet_field).and_then(|v| v.as_facet());
|
let value: Option<Facet> = doc
|
||||||
assert_eq!(value, Facet::from_text("/a/b").ok().as_ref());
|
.get_first(facet_field)
|
||||||
|
.and_then(|v| v.as_facet())
|
||||||
|
.map(|facet| Facet::from_encoded_string(facet.to_string()));
|
||||||
|
assert_eq!(value, Facet::from_text("/a/b").ok());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -79,8 +79,8 @@ mod tests {
|
|||||||
use std::ops::{Range, RangeInclusive};
|
use std::ops::{Range, RangeInclusive};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use columnar::{Column, MonotonicallyMappableToU64, StrColumn};
|
use columnar::StrColumn;
|
||||||
use common::{ByteCount, HasLen, TerminatingWrite};
|
use common::{ByteCount, DateTimePrecision, HasLen, TerminatingWrite};
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use rand::prelude::SliceRandom;
|
use rand::prelude::SliceRandom;
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
@@ -88,14 +88,15 @@ mod tests {
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::directory::{Directory, RamDirectory, WritePtr};
|
use crate::directory::{Directory, RamDirectory, WritePtr};
|
||||||
|
use crate::index::SegmentId;
|
||||||
use crate::merge_policy::NoMergePolicy;
|
use crate::merge_policy::NoMergePolicy;
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
Facet, FacetOptions, Field, JsonObjectOptions, Schema, SchemaBuilder, TantivyDocument,
|
DateOptions, Facet, FacetOptions, Field, JsonObjectOptions, Schema, SchemaBuilder,
|
||||||
TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
|
TantivyDocument, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
|
||||||
};
|
};
|
||||||
use crate::time::OffsetDateTime;
|
use crate::time::OffsetDateTime;
|
||||||
use crate::tokenizer::{LowerCaser, RawTokenizer, TextAnalyzer, TokenizerManager};
|
use crate::tokenizer::{LowerCaser, RawTokenizer, TextAnalyzer, TokenizerManager};
|
||||||
use crate::{DateOptions, DateTimePrecision, Index, IndexWriter, SegmentId, SegmentReader};
|
use crate::{Index, IndexWriter, SegmentReader};
|
||||||
|
|
||||||
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
|
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use columnar::{ColumnarWriter, NumericalValue};
|
use columnar::{ColumnarWriter, NumericalValue};
|
||||||
use common::JsonPathWriter;
|
use common::{DateTimePrecision, JsonPathWriter};
|
||||||
use tokenizer_api::Token;
|
use tokenizer_api::Token;
|
||||||
|
|
||||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
use crate::schema::document::{Document, ReferenceValue, ReferenceValueLeaf, Value};
|
use crate::schema::document::{Document, ReferenceValue, ReferenceValueLeaf, Value};
|
||||||
use crate::schema::{value_type_to_column_type, Field, FieldType, Schema, Type};
|
use crate::schema::{value_type_to_column_type, Field, FieldType, Schema, Type};
|
||||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||||
use crate::{DateTimePrecision, DocId, TantivyError};
|
use crate::{DocId, TantivyError};
|
||||||
|
|
||||||
/// Only index JSON down to a depth of 20.
|
/// Only index JSON down to a depth of 20.
|
||||||
/// This is mostly to guard us from a stack overflow triggered by malicious input.
|
/// This is mostly to guard us from a stack overflow triggered by malicious input.
|
||||||
@@ -183,8 +183,7 @@ impl FastFieldsWriter {
|
|||||||
.record_datetime(doc_id, field_name, truncated_datetime);
|
.record_datetime(doc_id, field_name, truncated_datetime);
|
||||||
}
|
}
|
||||||
ReferenceValueLeaf::Facet(val) => {
|
ReferenceValueLeaf::Facet(val) => {
|
||||||
self.columnar_writer
|
self.columnar_writer.record_str(doc_id, field_name, val);
|
||||||
.record_str(doc_id, field_name, val.encoded_str());
|
|
||||||
}
|
}
|
||||||
ReferenceValueLeaf::Bytes(val) => {
|
ReferenceValueLeaf::Bytes(val) => {
|
||||||
self.columnar_writer.record_bytes(doc_id, field_name, val);
|
self.columnar_writer.record_bytes(doc_id, field_name, val);
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
|
#![allow(deprecated)] // Remove with index sorting
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
|
|
||||||
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
|
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
|
#[allow(deprecated)]
|
||||||
use crate::{doc, schema, Index, IndexSettings, IndexSortByField, IndexWriter, Order, Searcher};
|
use crate::{doc, schema, Index, IndexSettings, IndexSortByField, IndexWriter, Order, Searcher};
|
||||||
|
|
||||||
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {
|
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::fmt;
|
|||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::thread::available_parallelism;
|
||||||
|
|
||||||
use super::segment::Segment;
|
use super::segment::Segment;
|
||||||
use super::segment_reader::merge_field_meta_data;
|
use super::segment_reader::merge_field_meta_data;
|
||||||
@@ -20,7 +20,7 @@ use crate::indexer::segment_updater::save_metas;
|
|||||||
use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
|
use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
|
||||||
use crate::reader::{IndexReader, IndexReaderBuilder};
|
use crate::reader::{IndexReader, IndexReaderBuilder};
|
||||||
use crate::schema::document::Document;
|
use crate::schema::document::Document;
|
||||||
use crate::schema::{Field, FieldType, Schema};
|
use crate::schema::{Field, FieldType, Schema, Type};
|
||||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
|
|
||||||
@@ -83,7 +83,7 @@ fn save_new_metas(
|
|||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use tantivy::schema::*;
|
/// use tantivy::schema::*;
|
||||||
/// use tantivy::{Index, IndexSettings, IndexSortByField, Order};
|
/// use tantivy::{Index, IndexSettings};
|
||||||
///
|
///
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// let id_field = schema_builder.add_text_field("id", STRING);
|
/// let id_field = schema_builder.add_text_field("id", STRING);
|
||||||
@@ -96,10 +96,7 @@ fn save_new_metas(
|
|||||||
///
|
///
|
||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
/// let settings = IndexSettings{
|
/// let settings = IndexSettings{
|
||||||
/// sort_by_field: Some(IndexSortByField{
|
/// docstore_blocksize: 100_000,
|
||||||
/// field: "number".to_string(),
|
|
||||||
/// order: Order::Asc
|
|
||||||
/// }),
|
|
||||||
/// ..Default::default()
|
/// ..Default::default()
|
||||||
/// };
|
/// };
|
||||||
/// let index = Index::builder().schema(schema).settings(settings).create_in_ram();
|
/// let index = Index::builder().schema(schema).settings(settings).create_in_ram();
|
||||||
@@ -251,6 +248,14 @@ impl IndexBuilder {
|
|||||||
sort_by_field.field
|
sort_by_field.field
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
let supported_field_types = [Type::I64, Type::U64, Type::F64, Type::Date];
|
||||||
|
let field_type = entry.field_type().value_type();
|
||||||
|
if !supported_field_types.contains(&field_type) {
|
||||||
|
return Err(TantivyError::InvalidArgument(format!(
|
||||||
|
"Unsupported field type in sort_by_field: {field_type:?}. Supported field \
|
||||||
|
types: {supported_field_types:?} ",
|
||||||
|
)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
@@ -287,7 +292,7 @@ pub struct Index {
|
|||||||
directory: ManagedDirectory,
|
directory: ManagedDirectory,
|
||||||
schema: Schema,
|
schema: Schema,
|
||||||
settings: IndexSettings,
|
settings: IndexSettings,
|
||||||
executor: Arc<Executor>,
|
executor: Executor,
|
||||||
tokenizers: TokenizerManager,
|
tokenizers: TokenizerManager,
|
||||||
fast_field_tokenizers: TokenizerManager,
|
fast_field_tokenizers: TokenizerManager,
|
||||||
inventory: SegmentMetaInventory,
|
inventory: SegmentMetaInventory,
|
||||||
@@ -312,29 +317,25 @@ impl Index {
|
|||||||
///
|
///
|
||||||
/// By default the executor is single thread, and simply runs in the calling thread.
|
/// By default the executor is single thread, and simply runs in the calling thread.
|
||||||
pub fn search_executor(&self) -> &Executor {
|
pub fn search_executor(&self) -> &Executor {
|
||||||
self.executor.as_ref()
|
&self.executor
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Replace the default single thread search executor pool
|
/// Replace the default single thread search executor pool
|
||||||
/// by a thread pool with a given number of threads.
|
/// by a thread pool with a given number of threads.
|
||||||
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> {
|
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> {
|
||||||
self.executor = Arc::new(Executor::multi_thread(num_threads, "tantivy-search-")?);
|
self.executor = Executor::multi_thread(num_threads, "tantivy-search-")?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Custom thread pool by a outer thread pool.
|
/// Custom thread pool by a outer thread pool.
|
||||||
pub fn set_shared_multithread_executor(
|
pub fn set_executor(&mut self, executor: Executor) {
|
||||||
&mut self,
|
self.executor = executor;
|
||||||
shared_thread_pool: Arc<Executor>,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
self.executor = shared_thread_pool.clone();
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Replace the default single thread search executor pool
|
/// Replace the default single thread search executor pool
|
||||||
/// by a thread pool with as many threads as there are CPUs on the system.
|
/// by a thread pool with as many threads as there are CPUs on the system.
|
||||||
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
|
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
|
||||||
let default_num_threads = num_cpus::get();
|
let default_num_threads = available_parallelism()?.get();
|
||||||
self.set_multithread_executor(default_num_threads)
|
self.set_multithread_executor(default_num_threads)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -412,7 +413,7 @@ impl Index {
|
|||||||
schema,
|
schema,
|
||||||
tokenizers: TokenizerManager::default(),
|
tokenizers: TokenizerManager::default(),
|
||||||
fast_field_tokenizers: TokenizerManager::default(),
|
fast_field_tokenizers: TokenizerManager::default(),
|
||||||
executor: Arc::new(Executor::single_thread()),
|
executor: Executor::single_thread(),
|
||||||
inventory,
|
inventory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -615,7 +616,7 @@ impl Index {
|
|||||||
&self,
|
&self,
|
||||||
memory_budget_in_bytes: usize,
|
memory_budget_in_bytes: usize,
|
||||||
) -> crate::Result<IndexWriter<D>> {
|
) -> crate::Result<IndexWriter<D>> {
|
||||||
let mut num_threads = std::cmp::min(num_cpus::get(), MAX_NUM_THREAD);
|
let mut num_threads = std::cmp::min(available_parallelism()?.get(), MAX_NUM_THREAD);
|
||||||
let memory_budget_num_bytes_per_thread = memory_budget_in_bytes / num_threads;
|
let memory_budget_num_bytes_per_thread = memory_budget_in_bytes / num_threads;
|
||||||
if memory_budget_num_bytes_per_thread < MEMORY_BUDGET_NUM_BYTES_MIN {
|
if memory_budget_num_bytes_per_thread < MEMORY_BUDGET_NUM_BYTES_MIN {
|
||||||
num_threads = (memory_budget_in_bytes / MEMORY_BUDGET_NUM_BYTES_MIN).max(1);
|
num_threads = (memory_budget_in_bytes / MEMORY_BUDGET_NUM_BYTES_MIN).max(1);
|
||||||
|
|||||||
@@ -288,6 +288,10 @@ impl Default for IndexSettings {
|
|||||||
/// Presorting documents can greatly improve performance
|
/// Presorting documents can greatly improve performance
|
||||||
/// in some scenarios, by applying top n
|
/// in some scenarios, by applying top n
|
||||||
/// optimizations.
|
/// optimizations.
|
||||||
|
#[deprecated(
|
||||||
|
since = "0.22.0",
|
||||||
|
note = "We plan to remove index sorting in `0.23`. If you need index sorting, please comment on the related issue https://github.com/quickwit-oss/tantivy/issues/2352 and explain your use case."
|
||||||
|
)]
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||||
pub struct IndexSortByField {
|
pub struct IndexSortByField {
|
||||||
/// The field to sort the documents by
|
/// The field to sort the documents by
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
|
use common::json_path_writer::JSON_END_OF_PATH;
|
||||||
use common::BinarySerializable;
|
use common::BinarySerializable;
|
||||||
use fnv::FnvHashSet;
|
use fnv::FnvHashSet;
|
||||||
|
|
||||||
use crate::directory::FileSlice;
|
use crate::directory::FileSlice;
|
||||||
use crate::positions::PositionReader;
|
use crate::positions::PositionReader;
|
||||||
use crate::postings::{BlockSegmentPostings, SegmentPostings, TermInfo};
|
use crate::postings::{BlockSegmentPostings, SegmentPostings, TermInfo};
|
||||||
use crate::schema::{IndexRecordOption, Term, Type, JSON_END_OF_PATH};
|
use crate::schema::{IndexRecordOption, Term, Type};
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
|
|
||||||
/// The inverted index reader is in charge of accessing
|
/// The inverted index reader is in charge of accessing
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
//! # Index Module
|
|
||||||
//!
|
|
||||||
//! The `index` module in Tantivy contains core components to read and write indexes.
|
//! The `index` module in Tantivy contains core components to read and write indexes.
|
||||||
//!
|
//!
|
||||||
//! It contains `Index` and `Segment`, where a `Index` consists of one or more `Segment`s.
|
//! It contains `Index` and `Segment`, where a `Index` consists of one or more `Segment`s.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::cmp::{Ord, Ordering};
|
use std::cmp::Ordering;
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|||||||
@@ -318,14 +318,14 @@ impl SegmentReader {
|
|||||||
if create_canonical {
|
if create_canonical {
|
||||||
// Without expand dots enabled dots need to be escaped.
|
// Without expand dots enabled dots need to be escaped.
|
||||||
let escaped_json_path = json_path.replace('.', "\\.");
|
let escaped_json_path = json_path.replace('.', "\\.");
|
||||||
let full_path = format!("{}.{}", field_name, escaped_json_path);
|
let full_path = format!("{field_name}.{escaped_json_path}");
|
||||||
let full_path_unescaped = format!("{}.{}", field_name, &json_path);
|
let full_path_unescaped = format!("{}.{}", field_name, &json_path);
|
||||||
map_to_canonical.insert(full_path_unescaped, full_path.to_string());
|
map_to_canonical.insert(full_path_unescaped, full_path.to_string());
|
||||||
full_path
|
full_path
|
||||||
} else {
|
} else {
|
||||||
// With expand dots enabled, we can use '.' instead of '\u{1}'.
|
// With expand dots enabled, we can use '.' instead of '\u{1}'.
|
||||||
json_path_sep_to_dot(&mut json_path);
|
json_path_sep_to_dot(&mut json_path);
|
||||||
format!("{}.{}", field_name, json_path)
|
format!("{field_name}.{json_path}")
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
indexed_fields.extend(
|
indexed_fields.extend(
|
||||||
@@ -406,7 +406,7 @@ impl SegmentReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator that will iterate over the alive document ids
|
/// Returns an iterator that will iterate over the alive document ids
|
||||||
pub fn doc_ids_alive(&self) -> Box<dyn Iterator<Item = DocId> + '_> {
|
pub fn doc_ids_alive(&self) -> Box<dyn Iterator<Item = DocId> + Send + '_> {
|
||||||
if let Some(alive_bitset) = &self.alive_bitset_opt {
|
if let Some(alive_bitset) = &self.alive_bitset_opt {
|
||||||
Box::new(alive_bitset.iter_alive())
|
Box::new(alive_bitset.iter_alive())
|
||||||
} else {
|
} else {
|
||||||
@@ -516,8 +516,8 @@ impl fmt::Debug for SegmentReader {
|
|||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::index::Index;
|
use crate::index::Index;
|
||||||
use crate::schema::{Schema, SchemaBuilder, Term, STORED, TEXT};
|
use crate::schema::{SchemaBuilder, Term, STORED, TEXT};
|
||||||
use crate::{DocId, IndexWriter};
|
use crate::IndexWriter;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_merge_field_meta_data_same() {
|
fn test_merge_field_meta_data_same() {
|
||||||
|
|||||||
@@ -246,8 +246,9 @@ impl DeleteCursor {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::{DeleteOperation, DeleteQueue};
|
use super::{DeleteOperation, DeleteQueue};
|
||||||
|
use crate::index::SegmentReader;
|
||||||
use crate::query::{Explanation, Scorer, Weight};
|
use crate::query::{Explanation, Scorer, Weight};
|
||||||
use crate::{DocId, Score, SegmentReader};
|
use crate::{DocId, Score};
|
||||||
|
|
||||||
struct DummyWeight;
|
struct DummyWeight;
|
||||||
impl Weight for DummyWeight {
|
impl Weight for DummyWeight {
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ mod tests_indexsorting {
|
|||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::QueryParser;
|
use crate::query::QueryParser;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::{DocAddress, Index, IndexSettings, IndexSortByField, Order};
|
use crate::{DocAddress, Index, IndexBuilder, IndexSettings, IndexSortByField, Order};
|
||||||
|
|
||||||
fn create_test_index(
|
fn create_test_index(
|
||||||
index_settings: Option<IndexSettings>,
|
index_settings: Option<IndexSettings>,
|
||||||
@@ -306,12 +306,10 @@ mod tests_indexsorting {
|
|||||||
let my_string_field = index.schema().get_field("string_field").unwrap();
|
let my_string_field = index.schema().get_field("string_field").unwrap();
|
||||||
let searcher = index.reader()?.searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
{
|
{
|
||||||
assert_eq!(
|
assert!(searcher
|
||||||
searcher
|
.doc::<TantivyDocument>(DocAddress::new(0, 0))?
|
||||||
.doc::<TantivyDocument>(DocAddress::new(0, 0))?
|
.get_first(my_string_field)
|
||||||
.get_first(my_string_field),
|
.is_none());
|
||||||
None
|
|
||||||
);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
searcher
|
searcher
|
||||||
.doc::<TantivyDocument>(DocAddress::new(0, 3))?
|
.doc::<TantivyDocument>(DocAddress::new(0, 3))?
|
||||||
@@ -344,7 +342,7 @@ mod tests_indexsorting {
|
|||||||
Some("blublub")
|
Some("blublub")
|
||||||
);
|
);
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 4))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 4))?;
|
||||||
assert_eq!(doc.get_first(my_string_field), None);
|
assert!(doc.get_first(my_string_field).is_none());
|
||||||
}
|
}
|
||||||
// sort by field desc
|
// sort by field desc
|
||||||
let index = create_test_index(
|
let index = create_test_index(
|
||||||
@@ -557,4 +555,28 @@ mod tests_indexsorting {
|
|||||||
&[2000, 8000, 3000]
|
&[2000, 8000, 3000]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_text_sort() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = SchemaBuilder::new();
|
||||||
|
schema_builder.add_text_field("id", STRING | FAST | STORED);
|
||||||
|
schema_builder.add_text_field("name", TEXT | STORED);
|
||||||
|
|
||||||
|
let resp = IndexBuilder::new()
|
||||||
|
.schema(schema_builder.build())
|
||||||
|
.settings(IndexSettings {
|
||||||
|
sort_by_field: Some(IndexSortByField {
|
||||||
|
field: "id".to_string(),
|
||||||
|
order: Order::Asc,
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
})
|
||||||
|
.create_in_ram();
|
||||||
|
assert!(resp
|
||||||
|
.unwrap_err()
|
||||||
|
.to_string()
|
||||||
|
.contains("Unsupported field type"));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
pub trait FlatMapWithBufferIter: Iterator {
|
pub trait FlatMapWithBufferIter: Iterator {
|
||||||
/// Function similar to `flat_map`, but allows reusing a shared `Vec`.
|
/// Function similar to `flat_map`, but allows reusing a shared `Vec`.
|
||||||
fn flat_map_with_buffer<F, T>(self, fill_buffer: F) -> FlatMapWithBuffer<T, F, Self>
|
fn flat_map_with_buffer<F, T>(self, fill_buffer: F) -> FlatMapWithBuffer<T, F, Self>
|
||||||
|
|||||||
@@ -814,10 +814,9 @@ mod tests {
|
|||||||
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
|
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::{BooleanQuery, Occur, Query, QueryParser, TermQuery};
|
use crate::query::{BooleanQuery, Occur, Query, QueryParser, TermQuery};
|
||||||
use crate::schema::document::Value;
|
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
self, Facet, FacetOptions, IndexRecordOption, IpAddrOptions, NumericOptions, Schema,
|
self, Facet, FacetOptions, IndexRecordOption, IpAddrOptions, NumericOptions, Schema,
|
||||||
TextFieldIndexing, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
|
TextFieldIndexing, TextOptions, Value, FAST, INDEXED, STORED, STRING, TEXT,
|
||||||
};
|
};
|
||||||
use crate::store::DOCSTORE_CACHE_CAPACITY;
|
use crate::store::DOCSTORE_CACHE_CAPACITY;
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -1980,7 +1979,13 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
// test store iterator
|
// test store iterator
|
||||||
for doc in store_reader.iter::<TantivyDocument>(segment_reader.alive_bitset()) {
|
for doc in store_reader.iter::<TantivyDocument>(segment_reader.alive_bitset()) {
|
||||||
let id = doc.unwrap().get_first(id_field).unwrap().as_u64().unwrap();
|
let id = doc
|
||||||
|
.unwrap()
|
||||||
|
.get_first(id_field)
|
||||||
|
.unwrap()
|
||||||
|
.as_value()
|
||||||
|
.as_u64()
|
||||||
|
.unwrap();
|
||||||
assert!(expected_ids_and_num_occurrences.contains_key(&id));
|
assert!(expected_ids_and_num_occurrences.contains_key(&id));
|
||||||
}
|
}
|
||||||
// test store random access
|
// test store random access
|
||||||
@@ -2013,7 +2018,7 @@ mod tests {
|
|||||||
let mut bool2 = doc.get_all(multi_bools);
|
let mut bool2 = doc.get_all(multi_bools);
|
||||||
assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap());
|
assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap());
|
||||||
assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap());
|
assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap());
|
||||||
assert_eq!(None, bool2.next())
|
assert!(bool2.next().is_none())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -144,10 +144,9 @@ mod tests {
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::index::SegmentMetaInventory;
|
use crate::index::{SegmentId, SegmentMetaInventory};
|
||||||
use crate::indexer::merge_policy::MergePolicy;
|
use crate::schema;
|
||||||
use crate::schema::INDEXED;
|
use crate::schema::INDEXED;
|
||||||
use crate::{schema, SegmentId};
|
|
||||||
|
|
||||||
static INVENTORY: Lazy<SegmentMetaInventory> = Lazy::new(SegmentMetaInventory::default);
|
static INVENTORY: Lazy<SegmentMetaInventory> = Lazy::new(SegmentMetaInventory::default);
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
use crate::{Inventory, Opstamp, SegmentId, TrackedObject};
|
use crate::index::SegmentId;
|
||||||
|
use crate::{Inventory, Opstamp, TrackedObject};
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
||||||
|
|||||||
@@ -39,7 +39,6 @@ impl MergePolicy for NoMergePolicy {
|
|||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::index::{SegmentId, SegmentMeta};
|
|
||||||
|
|
||||||
/// `MergePolicy` useful for test purposes.
|
/// `MergePolicy` useful for test purposes.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ use crate::docset::{DocSet, TERMINATED};
|
|||||||
use crate::error::DataCorruption;
|
use crate::error::DataCorruption;
|
||||||
use crate::fastfield::{AliveBitSet, FastFieldNotAvailableError};
|
use crate::fastfield::{AliveBitSet, FastFieldNotAvailableError};
|
||||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
||||||
use crate::index::{Segment, SegmentReader};
|
use crate::index::{Segment, SegmentComponent, SegmentReader};
|
||||||
use crate::indexer::doc_id_mapping::{MappingType, SegmentDocIdMapping};
|
use crate::indexer::doc_id_mapping::{MappingType, SegmentDocIdMapping};
|
||||||
use crate::indexer::SegmentSerializer;
|
use crate::indexer::SegmentSerializer;
|
||||||
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
|
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
|
||||||
@@ -21,8 +21,7 @@ use crate::schema::{value_type_to_column_type, Field, FieldType, Schema};
|
|||||||
use crate::store::StoreWriter;
|
use crate::store::StoreWriter;
|
||||||
use crate::termdict::{TermMerger, TermOrdinal};
|
use crate::termdict::{TermMerger, TermOrdinal};
|
||||||
use crate::{
|
use crate::{
|
||||||
DocAddress, DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order,
|
DocAddress, DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order, SegmentOrdinal,
|
||||||
SegmentComponent, SegmentOrdinal,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
||||||
@@ -576,7 +575,7 @@ impl IndexMerger {
|
|||||||
//
|
//
|
||||||
// Overall the reliable way to know if we have actual frequencies loaded or not
|
// Overall the reliable way to know if we have actual frequencies loaded or not
|
||||||
// is to check whether the actual decoded array is empty or not.
|
// is to check whether the actual decoded array is empty or not.
|
||||||
if has_term_freq != !postings.block_cursor.freqs().is_empty() {
|
if has_term_freq == postings.block_cursor.freqs().is_empty() {
|
||||||
return Err(DataCorruption::comment_only(
|
return Err(DataCorruption::comment_only(
|
||||||
"Term freqs are inconsistent across segments",
|
"Term freqs are inconsistent across segments",
|
||||||
)
|
)
|
||||||
@@ -794,17 +793,16 @@ mod tests {
|
|||||||
BytesFastFieldTestCollector, FastFieldTestCollector, TEST_COLLECTOR_WITH_SCORE,
|
BytesFastFieldTestCollector, FastFieldTestCollector, TEST_COLLECTOR_WITH_SCORE,
|
||||||
};
|
};
|
||||||
use crate::collector::{Count, FacetCollector};
|
use crate::collector::{Count, FacetCollector};
|
||||||
use crate::index::Index;
|
use crate::index::{Index, SegmentId};
|
||||||
use crate::query::{AllQuery, BooleanQuery, EnableScoring, Scorer, TermQuery};
|
use crate::query::{AllQuery, BooleanQuery, EnableScoring, Scorer, TermQuery};
|
||||||
use crate::schema::document::Value;
|
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
Facet, FacetOptions, IndexRecordOption, NumericOptions, TantivyDocument, Term,
|
Facet, FacetOptions, IndexRecordOption, NumericOptions, TantivyDocument, Term,
|
||||||
TextFieldIndexing, INDEXED, TEXT,
|
TextFieldIndexing, Value, INDEXED, TEXT,
|
||||||
};
|
};
|
||||||
use crate::time::OffsetDateTime;
|
use crate::time::OffsetDateTime;
|
||||||
use crate::{
|
use crate::{
|
||||||
assert_nearly_equals, schema, DateTime, DocAddress, DocId, DocSet, IndexSettings,
|
assert_nearly_equals, schema, DateTime, DocAddress, DocId, DocSet, IndexSettings,
|
||||||
IndexSortByField, IndexWriter, Order, Searcher, SegmentId,
|
IndexSortByField, IndexWriter, Order, Searcher,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -911,15 +909,24 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 0))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 0))?;
|
||||||
assert_eq!(doc.get_first(text_field).unwrap().as_str(), Some("af b"));
|
assert_eq!(
|
||||||
|
doc.get_first(text_field).unwrap().as_value().as_str(),
|
||||||
|
Some("af b")
|
||||||
|
);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 1))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 1))?;
|
||||||
assert_eq!(doc.get_first(text_field).unwrap().as_str(), Some("a b c"));
|
assert_eq!(
|
||||||
|
doc.get_first(text_field).unwrap().as_value().as_str(),
|
||||||
|
Some("a b c")
|
||||||
|
);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 2))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 2))?;
|
||||||
assert_eq!(doc.get_first(text_field).unwrap().as_str(), Some("a b c d"));
|
assert_eq!(
|
||||||
|
doc.get_first(text_field).unwrap().as_value().as_str(),
|
||||||
|
Some("a b c d")
|
||||||
|
);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 3))?;
|
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 3))?;
|
||||||
|
|||||||
@@ -3,15 +3,15 @@ mod tests {
|
|||||||
use crate::collector::TopDocs;
|
use crate::collector::TopDocs;
|
||||||
use crate::fastfield::AliveBitSet;
|
use crate::fastfield::AliveBitSet;
|
||||||
use crate::index::Index;
|
use crate::index::Index;
|
||||||
|
use crate::postings::Postings;
|
||||||
use crate::query::QueryParser;
|
use crate::query::QueryParser;
|
||||||
use crate::schema::document::Value;
|
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
self, BytesOptions, Facet, FacetOptions, IndexRecordOption, NumericOptions,
|
self, BytesOptions, Facet, FacetOptions, IndexRecordOption, NumericOptions,
|
||||||
TextFieldIndexing, TextOptions,
|
TextFieldIndexing, TextOptions, Value,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
DocAddress, DocSet, IndexSettings, IndexSortByField, IndexWriter, Order, Postings,
|
DocAddress, DocSet, IndexSettings, IndexSortByField, IndexWriter, Order, TantivyDocument,
|
||||||
TantivyDocument, Term,
|
Term,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn create_test_index_posting_list_issue(index_settings: Option<IndexSettings>) -> Index {
|
fn create_test_index_posting_list_issue(index_settings: Option<IndexSettings>) -> Index {
|
||||||
@@ -280,13 +280,16 @@ mod tests {
|
|||||||
.doc::<TantivyDocument>(DocAddress::new(0, blubber_pos))
|
.doc::<TantivyDocument>(DocAddress::new(0, blubber_pos))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
doc.get_first(my_text_field).unwrap().as_str(),
|
doc.get_first(my_text_field).unwrap().as_value().as_str(),
|
||||||
Some("blubber")
|
Some("blubber")
|
||||||
);
|
);
|
||||||
let doc = searcher
|
let doc = searcher
|
||||||
.doc::<TantivyDocument>(DocAddress::new(0, 0))
|
.doc::<TantivyDocument>(DocAddress::new(0, 0))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(doc.get_first(int_field).unwrap().as_u64(), Some(1000));
|
assert_eq!(
|
||||||
|
doc.get_first(int_field).unwrap().as_value().as_u64(),
|
||||||
|
Some(1000)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -144,6 +144,181 @@ mod tests_mmap {
|
|||||||
assert_eq!(num_docs, 256);
|
assert_eq!(num_docs, 256);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_json_field_null_byte() {
|
||||||
|
// Test when field name contains a zero byte, which has special meaning in tantivy.
|
||||||
|
// As a workaround, we convert the zero byte to the ASCII character '0'.
|
||||||
|
// https://github.com/quickwit-oss/tantivy/issues/2340
|
||||||
|
// https://github.com/quickwit-oss/tantivy/issues/2193
|
||||||
|
let field_name_in = "\u{0000}";
|
||||||
|
let field_name_out = "0";
|
||||||
|
test_json_field_name(field_name_in, field_name_out);
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_json_field_1byte() {
|
||||||
|
// Test when field name contains a '1' byte, which has special meaning in tantivy.
|
||||||
|
// The 1 byte can be addressed as '1' byte or '.'.
|
||||||
|
let field_name_in = "\u{0001}";
|
||||||
|
let field_name_out = "\u{0001}";
|
||||||
|
test_json_field_name(field_name_in, field_name_out);
|
||||||
|
|
||||||
|
// Test when field name contains a '1' byte, which has special meaning in tantivy.
|
||||||
|
let field_name_in = "\u{0001}";
|
||||||
|
let field_name_out = ".";
|
||||||
|
test_json_field_name(field_name_in, field_name_out);
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_json_field_dot() {
|
||||||
|
// Test when field name contains a '.'
|
||||||
|
let field_name_in = ".";
|
||||||
|
let field_name_out = ".";
|
||||||
|
test_json_field_name(field_name_in, field_name_out);
|
||||||
|
}
|
||||||
|
fn test_json_field_name(field_name_in: &str, field_name_out: &str) {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
|
let options = JsonObjectOptions::from(TEXT | FAST).set_expand_dots_enabled();
|
||||||
|
let field = schema_builder.add_json_field("json", options);
|
||||||
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
index_writer
|
||||||
|
.add_document(doc!(field=>json!({format!("{field_name_in}"): "test1", format!("num{field_name_in}"): 10})))
|
||||||
|
.unwrap();
|
||||||
|
index_writer
|
||||||
|
.add_document(doc!(field=>json!({format!("a{field_name_in}"): "test2"})))
|
||||||
|
.unwrap();
|
||||||
|
index_writer
|
||||||
|
.add_document(doc!(field=>json!({format!("a{field_name_in}a"): "test3"})))
|
||||||
|
.unwrap();
|
||||||
|
index_writer
|
||||||
|
.add_document(
|
||||||
|
doc!(field=>json!({format!("a{field_name_in}a{field_name_in}"): "test4"})),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
index_writer
|
||||||
|
.add_document(
|
||||||
|
doc!(field=>json!({format!("a{field_name_in}.ab{field_name_in}"): "test5"})),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
index_writer
|
||||||
|
.add_document(
|
||||||
|
doc!(field=>json!({format!("a{field_name_in}"): json!({format!("a{field_name_in}"): "test6"}) })),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
index_writer
|
||||||
|
.add_document(doc!(field=>json!({format!("{field_name_in}a" ): "test7"})))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let parse_query = QueryParser::for_index(&index, Vec::new());
|
||||||
|
let test_query = |query_str: &str| {
|
||||||
|
let query = parse_query.parse_query(query_str).unwrap();
|
||||||
|
let num_docs = searcher.search(&query, &Count).unwrap();
|
||||||
|
assert_eq!(num_docs, 1, "{query_str}");
|
||||||
|
};
|
||||||
|
test_query(format!("json.{field_name_out}:test1").as_str());
|
||||||
|
test_query(format!("json.a{field_name_out}:test2").as_str());
|
||||||
|
test_query(format!("json.a{field_name_out}a:test3").as_str());
|
||||||
|
test_query(format!("json.a{field_name_out}a{field_name_out}:test4").as_str());
|
||||||
|
test_query(format!("json.a{field_name_out}.ab{field_name_out}:test5").as_str());
|
||||||
|
test_query(format!("json.a{field_name_out}.a{field_name_out}:test6").as_str());
|
||||||
|
test_query(format!("json.{field_name_out}a:test7").as_str());
|
||||||
|
|
||||||
|
let test_agg = |field_name: &str, expected: &str| {
|
||||||
|
let agg_req_str = json!(
|
||||||
|
{
|
||||||
|
"termagg": {
|
||||||
|
"terms": {
|
||||||
|
"field": field_name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let agg_req: Aggregations = serde_json::from_value(agg_req_str).unwrap();
|
||||||
|
let collector = AggregationCollector::from_aggs(agg_req, Default::default());
|
||||||
|
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||||
|
let res = serde_json::to_value(agg_res).unwrap();
|
||||||
|
assert_eq!(res["termagg"]["buckets"][0]["doc_count"], 1);
|
||||||
|
assert_eq!(res["termagg"]["buckets"][0]["key"], expected);
|
||||||
|
};
|
||||||
|
|
||||||
|
test_agg(format!("json.{field_name_out}").as_str(), "test1");
|
||||||
|
test_agg(format!("json.a{field_name_out}").as_str(), "test2");
|
||||||
|
test_agg(format!("json.a{field_name_out}a").as_str(), "test3");
|
||||||
|
test_agg(
|
||||||
|
format!("json.a{field_name_out}a{field_name_out}").as_str(),
|
||||||
|
"test4",
|
||||||
|
);
|
||||||
|
test_agg(
|
||||||
|
format!("json.a{field_name_out}.ab{field_name_out}").as_str(),
|
||||||
|
"test5",
|
||||||
|
);
|
||||||
|
test_agg(
|
||||||
|
format!("json.a{field_name_out}.a{field_name_out}").as_str(),
|
||||||
|
"test6",
|
||||||
|
);
|
||||||
|
test_agg(format!("json.{field_name_out}a").as_str(), "test7");
|
||||||
|
|
||||||
|
// `.` is stored as `\u{0001}` internally in tantivy
|
||||||
|
let field_name_out_internal = if field_name_out == "." {
|
||||||
|
"\u{0001}"
|
||||||
|
} else {
|
||||||
|
field_name_out
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut fields = reader.searcher().segment_readers()[0]
|
||||||
|
.inverted_index(field)
|
||||||
|
.unwrap()
|
||||||
|
.list_encoded_fields()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(fields.len(), 8);
|
||||||
|
fields.sort();
|
||||||
|
let mut expected_fields = vec![
|
||||||
|
(format!("a{field_name_out_internal}"), Type::Str),
|
||||||
|
(format!("a{field_name_out_internal}a"), Type::Str),
|
||||||
|
(
|
||||||
|
format!("a{field_name_out_internal}a{field_name_out_internal}"),
|
||||||
|
Type::Str,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
format!("a{field_name_out_internal}\u{1}ab{field_name_out_internal}"),
|
||||||
|
Type::Str,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
format!("a{field_name_out_internal}\u{1}a{field_name_out_internal}"),
|
||||||
|
Type::Str,
|
||||||
|
),
|
||||||
|
(format!("{field_name_out_internal}a"), Type::Str),
|
||||||
|
(format!("{field_name_out_internal}"), Type::Str),
|
||||||
|
(format!("num{field_name_out_internal}"), Type::I64),
|
||||||
|
];
|
||||||
|
expected_fields.sort();
|
||||||
|
assert_eq!(fields, expected_fields);
|
||||||
|
// Check columnar reader
|
||||||
|
let mut columns = reader.searcher().segment_readers()[0]
|
||||||
|
.fast_fields()
|
||||||
|
.columnar()
|
||||||
|
.list_columns()
|
||||||
|
.unwrap()
|
||||||
|
.into_iter()
|
||||||
|
.map(|(name, _)| name)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let mut expected_columns = vec![
|
||||||
|
format!("json\u{1}{field_name_out_internal}"),
|
||||||
|
format!("json\u{1}{field_name_out_internal}a"),
|
||||||
|
format!("json\u{1}a{field_name_out_internal}"),
|
||||||
|
format!("json\u{1}a{field_name_out_internal}a"),
|
||||||
|
format!("json\u{1}a{field_name_out_internal}a{field_name_out_internal}"),
|
||||||
|
format!("json\u{1}a{field_name_out_internal}\u{1}ab{field_name_out_internal}"),
|
||||||
|
format!("json\u{1}a{field_name_out_internal}\u{1}a{field_name_out_internal}"),
|
||||||
|
format!("json\u{1}num{field_name_out_internal}"),
|
||||||
|
];
|
||||||
|
columns.sort();
|
||||||
|
expected_columns.sort();
|
||||||
|
assert_eq!(columns, expected_columns);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_json_field_expand_dots_enabled_dot_escape_not_required() {
|
fn test_json_field_expand_dots_enabled_dot_escape_not_required() {
|
||||||
@@ -415,10 +590,10 @@ mod tests_mmap {
|
|||||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||||
// Test if field name can be queried
|
// Test if field name can be queried
|
||||||
for (indexed_field, val) in fields_and_vals.iter() {
|
for (indexed_field, val) in fields_and_vals.iter() {
|
||||||
let query_str = &format!("{}:{}", indexed_field, val);
|
let query_str = &format!("{indexed_field}:{val}");
|
||||||
let query = query_parser.parse_query(query_str).unwrap();
|
let query = query_parser.parse_query(query_str).unwrap();
|
||||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2)).unwrap();
|
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2)).unwrap();
|
||||||
assert!(!count_docs.is_empty(), "{}:{}", indexed_field, val);
|
assert!(!count_docs.is_empty(), "{indexed_field}:{val}");
|
||||||
}
|
}
|
||||||
// Test if field name can be used for aggregation
|
// Test if field name can be used for aggregation
|
||||||
for (field_name, val) in fields_and_vals.iter() {
|
for (field_name, val) in fields_and_vals.iter() {
|
||||||
|
|||||||
@@ -5,20 +5,20 @@ use tokenizer_api::BoxTokenStream;
|
|||||||
|
|
||||||
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
|
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
|
||||||
use super::operation::AddOperation;
|
use super::operation::AddOperation;
|
||||||
use crate::core::json_utils::index_json_values;
|
|
||||||
use crate::fastfield::FastFieldsWriter;
|
use crate::fastfield::FastFieldsWriter;
|
||||||
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
|
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
|
||||||
use crate::index::Segment;
|
use crate::index::{Segment, SegmentComponent};
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
|
use crate::json_utils::{index_json_value, IndexingPositionsPerPath};
|
||||||
use crate::postings::{
|
use crate::postings::{
|
||||||
compute_table_memory_size, serialize_postings, IndexingContext, IndexingPosition,
|
compute_table_memory_size, serialize_postings, IndexingContext, IndexingPosition,
|
||||||
PerFieldPostingsWriter, PostingsWriter,
|
PerFieldPostingsWriter, PostingsWriter,
|
||||||
};
|
};
|
||||||
use crate::schema::document::{Document, ReferenceValue, Value};
|
use crate::schema::document::{Document, Value};
|
||||||
use crate::schema::{FieldEntry, FieldType, Schema, Term, DATE_TIME_PRECISION_INDEXED};
|
use crate::schema::{FieldEntry, FieldType, Schema, Term, DATE_TIME_PRECISION_INDEXED};
|
||||||
use crate::store::{StoreReader, StoreWriter};
|
use crate::store::{StoreReader, StoreWriter};
|
||||||
use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer};
|
use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer};
|
||||||
use crate::{DocId, Opstamp, SegmentComponent, TantivyError};
|
use crate::{DocId, Opstamp, TantivyError};
|
||||||
|
|
||||||
/// Computes the initial size of the hash table.
|
/// Computes the initial size of the hash table.
|
||||||
///
|
///
|
||||||
@@ -68,6 +68,7 @@ pub struct SegmentWriter {
|
|||||||
pub(crate) fast_field_writers: FastFieldsWriter,
|
pub(crate) fast_field_writers: FastFieldsWriter,
|
||||||
pub(crate) fieldnorms_writer: FieldNormsWriter,
|
pub(crate) fieldnorms_writer: FieldNormsWriter,
|
||||||
pub(crate) json_path_writer: JsonPathWriter,
|
pub(crate) json_path_writer: JsonPathWriter,
|
||||||
|
pub(crate) json_positions_per_path: IndexingPositionsPerPath,
|
||||||
pub(crate) doc_opstamps: Vec<Opstamp>,
|
pub(crate) doc_opstamps: Vec<Opstamp>,
|
||||||
per_field_text_analyzers: Vec<TextAnalyzer>,
|
per_field_text_analyzers: Vec<TextAnalyzer>,
|
||||||
term_buffer: Term,
|
term_buffer: Term,
|
||||||
@@ -119,6 +120,7 @@ impl SegmentWriter {
|
|||||||
per_field_postings_writers,
|
per_field_postings_writers,
|
||||||
fieldnorms_writer: FieldNormsWriter::for_schema(&schema),
|
fieldnorms_writer: FieldNormsWriter::for_schema(&schema),
|
||||||
json_path_writer: JsonPathWriter::default(),
|
json_path_writer: JsonPathWriter::default(),
|
||||||
|
json_positions_per_path: IndexingPositionsPerPath::default(),
|
||||||
segment_serializer,
|
segment_serializer,
|
||||||
fast_field_writers: FastFieldsWriter::from_schema_and_tokenizer_manager(
|
fast_field_writers: FastFieldsWriter::from_schema_and_tokenizer_manager(
|
||||||
&schema,
|
&schema,
|
||||||
@@ -204,8 +206,7 @@ impl SegmentWriter {
|
|||||||
// Used to help with linting and type checking.
|
// Used to help with linting and type checking.
|
||||||
let value = value_access as D::Value<'_>;
|
let value = value_access as D::Value<'_>;
|
||||||
|
|
||||||
let facet = value.as_facet().ok_or_else(make_schema_error)?;
|
let facet_str = value.as_facet().ok_or_else(make_schema_error)?;
|
||||||
let facet_str = facet.encoded_str();
|
|
||||||
let mut facet_tokenizer = facet_tokenizer.token_stream(facet_str);
|
let mut facet_tokenizer = facet_tokenizer.token_stream(facet_str);
|
||||||
let mut indexing_position = IndexingPosition::default();
|
let mut indexing_position = IndexingPosition::default();
|
||||||
postings_writer.index_text(
|
postings_writer.index_text(
|
||||||
@@ -228,7 +229,7 @@ impl SegmentWriter {
|
|||||||
&mut self.per_field_text_analyzers[field.field_id() as usize];
|
&mut self.per_field_text_analyzers[field.field_id() as usize];
|
||||||
text_analyzer.token_stream(text)
|
text_analyzer.token_stream(text)
|
||||||
} else if let Some(tok_str) = value.as_pre_tokenized_text() {
|
} else if let Some(tok_str) = value.as_pre_tokenized_text() {
|
||||||
BoxTokenStream::new(PreTokenizedStream::from(tok_str.clone()))
|
BoxTokenStream::new(PreTokenizedStream::from(*tok_str.clone()))
|
||||||
} else {
|
} else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
@@ -342,26 +343,24 @@ impl SegmentWriter {
|
|||||||
FieldType::JsonObject(json_options) => {
|
FieldType::JsonObject(json_options) => {
|
||||||
let text_analyzer =
|
let text_analyzer =
|
||||||
&mut self.per_field_text_analyzers[field.field_id() as usize];
|
&mut self.per_field_text_analyzers[field.field_id() as usize];
|
||||||
let json_values_it = values.map(|value_access| {
|
|
||||||
// Used to help with linting and type checking.
|
|
||||||
let value_access = value_access as D::Value<'_>;
|
|
||||||
let value = value_access.as_value();
|
|
||||||
|
|
||||||
match value {
|
self.json_positions_per_path.clear();
|
||||||
ReferenceValue::Object(object_iter) => Ok(object_iter),
|
self.json_path_writer
|
||||||
_ => Err(make_schema_error()),
|
.set_expand_dots(json_options.is_expand_dots_enabled());
|
||||||
}
|
for json_value in values {
|
||||||
});
|
self.json_path_writer.clear();
|
||||||
index_json_values::<D::Value<'_>>(
|
|
||||||
doc_id,
|
index_json_value(
|
||||||
json_values_it,
|
doc_id,
|
||||||
text_analyzer,
|
json_value,
|
||||||
json_options.is_expand_dots_enabled(),
|
text_analyzer,
|
||||||
term_buffer,
|
term_buffer,
|
||||||
postings_writer,
|
&mut self.json_path_writer,
|
||||||
&mut self.json_path_writer,
|
postings_writer,
|
||||||
ctx,
|
ctx,
|
||||||
)?;
|
&mut self.json_positions_per_path,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
FieldType::IpAddr(_) => {
|
FieldType::IpAddr(_) => {
|
||||||
let mut num_vals = 0;
|
let mut num_vals = 0;
|
||||||
@@ -496,22 +495,21 @@ mod tests {
|
|||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
use crate::collector::{Count, TopDocs};
|
use crate::collector::{Count, TopDocs};
|
||||||
use crate::core::json_utils::JsonTermWriter;
|
|
||||||
use crate::directory::RamDirectory;
|
use crate::directory::RamDirectory;
|
||||||
use crate::postings::TermInfo;
|
use crate::fastfield::FastValue;
|
||||||
|
use crate::postings::{Postings, TermInfo};
|
||||||
use crate::query::{PhraseQuery, QueryParser};
|
use crate::query::{PhraseQuery, QueryParser};
|
||||||
use crate::schema::document::Value;
|
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
Document, IndexRecordOption, Schema, TextFieldIndexing, TextOptions, Type, STORED, STRING,
|
Document, IndexRecordOption, OwnedValue, Schema, TextFieldIndexing, TextOptions, Value,
|
||||||
TEXT,
|
STORED, STRING, TEXT,
|
||||||
};
|
};
|
||||||
use crate::store::{Compressor, StoreReader, StoreWriter};
|
use crate::store::{Compressor, StoreReader, StoreWriter};
|
||||||
use crate::time::format_description::well_known::Rfc3339;
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
use crate::time::OffsetDateTime;
|
use crate::time::OffsetDateTime;
|
||||||
use crate::tokenizer::{PreTokenizedString, Token};
|
use crate::tokenizer::{PreTokenizedString, Token};
|
||||||
use crate::{
|
use crate::{
|
||||||
DateTime, Directory, DocAddress, DocSet, Index, IndexWriter, Postings, TantivyDocument,
|
DateTime, Directory, DocAddress, DocSet, Index, IndexWriter, TantivyDocument, Term,
|
||||||
Term, TERMINATED,
|
TERMINATED,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -556,9 +554,15 @@ mod tests {
|
|||||||
let reader = StoreReader::open(directory.open_read(path).unwrap(), 0).unwrap();
|
let reader = StoreReader::open(directory.open_read(path).unwrap(), 0).unwrap();
|
||||||
let doc = reader.get::<TantivyDocument>(0).unwrap();
|
let doc = reader.get::<TantivyDocument>(0).unwrap();
|
||||||
|
|
||||||
assert_eq!(doc.field_values().len(), 2);
|
assert_eq!(doc.field_values().count(), 2);
|
||||||
assert_eq!(doc.field_values()[0].value().as_str(), Some("A"));
|
assert_eq!(
|
||||||
assert_eq!(doc.field_values()[1].value().as_str(), Some("title"));
|
doc.get_all(text_field).next().unwrap().as_value().as_str(),
|
||||||
|
Some("A")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
doc.get_all(text_field).nth(1).unwrap().as_value().as_str(),
|
||||||
|
Some("title")
|
||||||
|
);
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_simple_json_indexing() {
|
fn test_simple_json_indexing() {
|
||||||
@@ -598,12 +602,51 @@ mod tests {
|
|||||||
assert_eq!(score_docs.len(), 2);
|
assert_eq!(score_docs.len(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_flat_json_indexing() {
|
||||||
|
// A JSON Object that contains mixed values on the first level
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let json_field = schema_builder.add_json_field("json", STORED | STRING);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
|
// Text, i64, u64
|
||||||
|
writer.add_document(doc!(json_field=>"b")).unwrap();
|
||||||
|
writer
|
||||||
|
.add_document(doc!(json_field=>OwnedValue::I64(10i64)))
|
||||||
|
.unwrap();
|
||||||
|
writer
|
||||||
|
.add_document(doc!(json_field=>OwnedValue::U64(55u64)))
|
||||||
|
.unwrap();
|
||||||
|
writer
|
||||||
|
.add_document(doc!(json_field=>json!({"my_field": "a"})))
|
||||||
|
.unwrap();
|
||||||
|
writer.commit().unwrap();
|
||||||
|
|
||||||
|
let search_and_expect = |query| {
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![json_field]);
|
||||||
|
let text_query = query_parser.parse_query(query).unwrap();
|
||||||
|
let score_docs: Vec<(_, DocAddress)> = index
|
||||||
|
.reader()
|
||||||
|
.unwrap()
|
||||||
|
.searcher()
|
||||||
|
.search(&text_query, &TopDocs::with_limit(4))
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(score_docs.len(), 1);
|
||||||
|
};
|
||||||
|
|
||||||
|
search_and_expect("my_field:a");
|
||||||
|
search_and_expect("b");
|
||||||
|
search_and_expect("10");
|
||||||
|
search_and_expect("55");
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_json_indexing() {
|
fn test_json_indexing() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
|
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
|
let json_val: serde_json::Value = serde_json::from_str(
|
||||||
r#"{
|
r#"{
|
||||||
"toto": "titi",
|
"toto": "titi",
|
||||||
"float": -0.2,
|
"float": -0.2,
|
||||||
@@ -631,129 +674,125 @@ mod tests {
|
|||||||
doc_id: 0u32,
|
doc_id: 0u32,
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let serdeser_json_val = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(
|
let serdeser_json_val = serde_json::from_str::<serde_json::Value>(&doc.to_json(&schema))
|
||||||
&doc.to_json(&schema),
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
.get("json")
|
|
||||||
.unwrap()[0]
|
|
||||||
.as_object()
|
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
.get("json")
|
||||||
|
.unwrap()[0]
|
||||||
.clone();
|
.clone();
|
||||||
assert_eq!(json_val, serdeser_json_val);
|
assert_eq!(json_val, serdeser_json_val);
|
||||||
let segment_reader = searcher.segment_reader(0u32);
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
let inv_idx = segment_reader.inverted_index(json_field).unwrap();
|
let inv_idx = segment_reader.inverted_index(json_field).unwrap();
|
||||||
let term_dict = inv_idx.terms();
|
let term_dict = inv_idx.terms();
|
||||||
|
|
||||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
|
||||||
let mut term_stream = term_dict.stream().unwrap();
|
let mut term_stream = term_dict.stream().unwrap();
|
||||||
|
|
||||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
let term_from_path =
|
||||||
|
|path: &str| -> Term { Term::from_field_json_path(json_field, path, false) };
|
||||||
|
|
||||||
json_term_writer.push_path_segment("bool");
|
fn set_fast_val<T: FastValue>(val: T, mut term: Term) -> Term {
|
||||||
json_term_writer.set_fast_value(true);
|
term.append_type_and_fast_value(val);
|
||||||
|
term
|
||||||
|
}
|
||||||
|
fn set_str(val: &str, mut term: Term) -> Term {
|
||||||
|
term.append_type_and_str(val);
|
||||||
|
term
|
||||||
|
}
|
||||||
|
|
||||||
|
let term = term_from_path("bool");
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_fast_val(true, term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
json_term_writer.pop_path_segment();
|
let term = term_from_path("complexobject.field\\.with\\.dot");
|
||||||
json_term_writer.push_path_segment("complexobject");
|
|
||||||
json_term_writer.push_path_segment("field.with.dot");
|
|
||||||
json_term_writer.set_fast_value(1i64);
|
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_fast_val(1i64, term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
json_term_writer.pop_path_segment();
|
// Date
|
||||||
json_term_writer.pop_path_segment();
|
let term = term_from_path("date");
|
||||||
json_term_writer.push_path_segment("date");
|
|
||||||
json_term_writer.set_fast_value(DateTime::from_utc(
|
|
||||||
OffsetDateTime::parse("1985-04-12T23:20:50.52Z", &Rfc3339).unwrap(),
|
|
||||||
));
|
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_fast_val(
|
||||||
|
DateTime::from_utc(
|
||||||
|
OffsetDateTime::parse("1985-04-12T23:20:50.52Z", &Rfc3339).unwrap(),
|
||||||
|
),
|
||||||
|
term
|
||||||
|
)
|
||||||
|
.serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
json_term_writer.pop_path_segment();
|
// Float
|
||||||
json_term_writer.push_path_segment("float");
|
let term = term_from_path("float");
|
||||||
json_term_writer.set_fast_value(-0.2f64);
|
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_fast_val(-0.2f64, term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
json_term_writer.pop_path_segment();
|
// Number In Array
|
||||||
json_term_writer.push_path_segment("my_arr");
|
let term = term_from_path("my_arr");
|
||||||
json_term_writer.set_fast_value(2i64);
|
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_fast_val(2i64, term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
json_term_writer.set_fast_value(3i64);
|
let term = term_from_path("my_arr");
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_fast_val(3i64, term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
json_term_writer.set_fast_value(4i64);
|
let term = term_from_path("my_arr");
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_fast_val(4i64, term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
json_term_writer.push_path_segment("my_key");
|
// El in Array
|
||||||
json_term_writer.set_str("tokens");
|
let term = term_from_path("my_arr.my_key");
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_str("tokens", term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
let term = term_from_path("my_arr.my_key");
|
||||||
json_term_writer.set_str("two");
|
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_str("two", term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
json_term_writer.pop_path_segment();
|
// Signed
|
||||||
json_term_writer.pop_path_segment();
|
let term = term_from_path("signed");
|
||||||
json_term_writer.push_path_segment("signed");
|
|
||||||
json_term_writer.set_fast_value(-2i64);
|
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_fast_val(-2i64, term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
json_term_writer.pop_path_segment();
|
let term = term_from_path("toto");
|
||||||
json_term_writer.push_path_segment("toto");
|
|
||||||
json_term_writer.set_str("titi");
|
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_str("titi", term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
// Unsigned
|
||||||
json_term_writer.pop_path_segment();
|
let term = term_from_path("unsigned");
|
||||||
json_term_writer.push_path_segment("unsigned");
|
|
||||||
json_term_writer.set_fast_value(1i64);
|
|
||||||
assert!(term_stream.advance());
|
assert!(term_stream.advance());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_stream.key(),
|
term_stream.key(),
|
||||||
json_term_writer.term().serialized_value_bytes()
|
set_fast_val(1i64, term).serialized_value_bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(!term_stream.advance());
|
assert!(!term_stream.advance());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -774,14 +813,9 @@ mod tests {
|
|||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0u32);
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
||||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
let mut term = Term::from_field_json_path(json_field, "mykey", false);
|
||||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
term.append_type_and_str("token");
|
||||||
json_term_writer.push_path_segment("mykey");
|
let term_info = inv_index.get_term_info(&term).unwrap().unwrap();
|
||||||
json_term_writer.set_str("token");
|
|
||||||
let term_info = inv_index
|
|
||||||
.get_term_info(json_term_writer.term())
|
|
||||||
.unwrap()
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_info,
|
term_info,
|
||||||
TermInfo {
|
TermInfo {
|
||||||
@@ -807,7 +841,7 @@ mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let json_field = schema_builder.add_json_field("json", STRING);
|
let json_field = schema_builder.add_json_field("json", STRING);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
let json_val: serde_json::Value =
|
||||||
serde_json::from_str(r#"{"mykey": "two tokens"}"#).unwrap();
|
serde_json::from_str(r#"{"mykey": "two tokens"}"#).unwrap();
|
||||||
let doc = doc!(json_field=>json_val);
|
let doc = doc!(json_field=>json_val);
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
@@ -818,14 +852,9 @@ mod tests {
|
|||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0u32);
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
||||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
let mut term = Term::from_field_json_path(json_field, "mykey", false);
|
||||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
term.append_type_and_str("two tokens");
|
||||||
json_term_writer.push_path_segment("mykey");
|
let term_info = inv_index.get_term_info(&term).unwrap().unwrap();
|
||||||
json_term_writer.set_str("two tokens");
|
|
||||||
let term_info = inv_index
|
|
||||||
.get_term_info(json_term_writer.term())
|
|
||||||
.unwrap()
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
term_info,
|
term_info,
|
||||||
TermInfo {
|
TermInfo {
|
||||||
@@ -852,7 +881,7 @@ mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let json_field = schema_builder.add_json_field("json", TEXT);
|
let json_field = schema_builder.add_json_field("json", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
|
let json_val: serde_json::Value = serde_json::from_str(
|
||||||
r#"{"mykey": [{"field": "hello happy tax payer"}, {"field": "nothello"}]}"#,
|
r#"{"mykey": [{"field": "hello happy tax payer"}, {"field": "nothello"}]}"#,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -863,16 +892,18 @@ mod tests {
|
|||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let mut term = Term::with_type_and_field(Type::Json, json_field);
|
|
||||||
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
|
let term = Term::from_field_json_path(json_field, "mykey.field", false);
|
||||||
json_term_writer.push_path_segment("mykey");
|
|
||||||
json_term_writer.push_path_segment("field");
|
let mut hello_term = term.clone();
|
||||||
json_term_writer.set_str("hello");
|
hello_term.append_type_and_str("hello");
|
||||||
let hello_term = json_term_writer.term().clone();
|
|
||||||
json_term_writer.set_str("nothello");
|
let mut nothello_term = term.clone();
|
||||||
let nothello_term = json_term_writer.term().clone();
|
nothello_term.append_type_and_str("nothello");
|
||||||
json_term_writer.set_str("happy");
|
|
||||||
let happy_term = json_term_writer.term().clone();
|
let mut happy_term = term.clone();
|
||||||
|
happy_term.append_type_and_str("happy");
|
||||||
|
|
||||||
let phrase_query = PhraseQuery::new(vec![hello_term, happy_term.clone()]);
|
let phrase_query = PhraseQuery::new(vec![hello_term, happy_term.clone()]);
|
||||||
assert_eq!(searcher.search(&phrase_query, &Count).unwrap(), 1);
|
assert_eq!(searcher.search(&phrase_query, &Count).unwrap(), 1);
|
||||||
let phrase_query = PhraseQuery::new(vec![nothello_term, happy_term]);
|
let phrase_query = PhraseQuery::new(vec![nothello_term, happy_term]);
|
||||||
|
|||||||
50
src/lib.rs
50
src/lib.rs
@@ -178,6 +178,7 @@ pub use crate::future_result::FutureResult;
|
|||||||
pub type Result<T> = std::result::Result<T, TantivyError>;
|
pub type Result<T> = std::result::Result<T, TantivyError>;
|
||||||
|
|
||||||
mod core;
|
mod core;
|
||||||
|
#[allow(deprecated)] // Remove with index sorting
|
||||||
pub mod indexer;
|
pub mod indexer;
|
||||||
|
|
||||||
#[allow(unused_doc_comments)]
|
#[allow(unused_doc_comments)]
|
||||||
@@ -189,6 +190,7 @@ pub mod collector;
|
|||||||
pub mod directory;
|
pub mod directory;
|
||||||
pub mod fastfield;
|
pub mod fastfield;
|
||||||
pub mod fieldnorm;
|
pub mod fieldnorm;
|
||||||
|
#[allow(deprecated)] // Remove with index sorting
|
||||||
pub mod index;
|
pub mod index;
|
||||||
pub mod positions;
|
pub mod positions;
|
||||||
pub mod postings;
|
pub mod postings;
|
||||||
@@ -214,29 +216,17 @@ use once_cell::sync::Lazy;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
pub use self::docset::{DocSet, COLLECT_BLOCK_BUFFER_LEN, TERMINATED};
|
pub use self::docset::{DocSet, COLLECT_BLOCK_BUFFER_LEN, TERMINATED};
|
||||||
#[deprecated(
|
|
||||||
since = "0.22.0",
|
|
||||||
note = "Will be removed in tantivy 0.23. Use export from snippet module instead"
|
|
||||||
)]
|
|
||||||
pub use self::snippet::{Snippet, SnippetGenerator};
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub use crate::core::json_utils;
|
pub use crate::core::json_utils;
|
||||||
pub use crate::core::{Executor, Searcher, SearcherGeneration};
|
pub use crate::core::{Executor, Searcher, SearcherGeneration};
|
||||||
pub use crate::directory::Directory;
|
pub use crate::directory::Directory;
|
||||||
|
#[allow(deprecated)] // Remove with index sorting
|
||||||
pub use crate::index::{
|
pub use crate::index::{
|
||||||
Index, IndexBuilder, IndexMeta, IndexSettings, IndexSortByField, InvertedIndexReader, Order,
|
Index, IndexBuilder, IndexMeta, IndexSettings, IndexSortByField, InvertedIndexReader, Order,
|
||||||
Segment, SegmentComponent, SegmentId, SegmentMeta, SegmentReader,
|
Segment, SegmentMeta, SegmentReader,
|
||||||
};
|
};
|
||||||
#[deprecated(
|
|
||||||
since = "0.22.0",
|
|
||||||
note = "Will be removed in tantivy 0.23. Use export from indexer module instead"
|
|
||||||
)]
|
|
||||||
pub use crate::indexer::PreparedCommit;
|
|
||||||
pub use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
|
pub use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
|
||||||
pub use crate::postings::Postings;
|
pub use crate::schema::{Document, TantivyDocument, Term};
|
||||||
#[allow(deprecated)]
|
|
||||||
pub use crate::schema::DatePrecision;
|
|
||||||
pub use crate::schema::{DateOptions, DateTimePrecision, Document, TantivyDocument, Term};
|
|
||||||
|
|
||||||
/// Index format version.
|
/// Index format version.
|
||||||
const INDEX_FORMAT_VERSION: u32 = 6;
|
const INDEX_FORMAT_VERSION: u32 = 6;
|
||||||
@@ -254,7 +244,7 @@ pub struct Version {
|
|||||||
|
|
||||||
impl fmt::Debug for Version {
|
impl fmt::Debug for Version {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{}", self.to_string())
|
fmt::Display::fmt(self, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -265,9 +255,10 @@ static VERSION: Lazy<Version> = Lazy::new(|| Version {
|
|||||||
index_format_version: INDEX_FORMAT_VERSION,
|
index_format_version: INDEX_FORMAT_VERSION,
|
||||||
});
|
});
|
||||||
|
|
||||||
impl ToString for Version {
|
impl fmt::Display for Version {
|
||||||
fn to_string(&self) -> String {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
format!(
|
write!(
|
||||||
|
f,
|
||||||
"tantivy v{}.{}.{}, index_format v{}",
|
"tantivy v{}.{}.{}, index_format v{}",
|
||||||
self.major, self.minor, self.patch, self.index_format_version
|
self.major, self.minor, self.patch, self.index_format_version
|
||||||
)
|
)
|
||||||
@@ -390,9 +381,10 @@ pub mod tests {
|
|||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, TERMINATED};
|
||||||
use crate::index::SegmentReader;
|
use crate::index::SegmentReader;
|
||||||
use crate::merge_policy::NoMergePolicy;
|
use crate::merge_policy::NoMergePolicy;
|
||||||
|
use crate::postings::Postings;
|
||||||
use crate::query::BooleanQuery;
|
use crate::query::BooleanQuery;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::{DateTime, DocAddress, Index, IndexWriter, Postings, ReloadPolicy};
|
use crate::{DateTime, DocAddress, Index, IndexWriter, ReloadPolicy};
|
||||||
|
|
||||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
@@ -444,7 +436,6 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg(not(feature = "lz4"))]
|
|
||||||
fn test_version_string() {
|
fn test_version_string() {
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
let regex_ptn = Regex::new(
|
let regex_ptn = Regex::new(
|
||||||
@@ -944,7 +935,7 @@ pub mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
|
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
|
let json_val: serde_json::Value = serde_json::from_str(
|
||||||
r#"{
|
r#"{
|
||||||
"signed": 2,
|
"signed": 2,
|
||||||
"float": 2.0,
|
"float": 2.0,
|
||||||
@@ -1034,13 +1025,16 @@ pub mod tests {
|
|||||||
text_field => "some other value",
|
text_field => "some other value",
|
||||||
other_text_field => "short");
|
other_text_field => "short");
|
||||||
assert_eq!(document.len(), 3);
|
assert_eq!(document.len(), 3);
|
||||||
let values: Vec<&OwnedValue> = document.get_all(text_field).collect();
|
let values: Vec<OwnedValue> = document.get_all(text_field).map(OwnedValue::from).collect();
|
||||||
assert_eq!(values.len(), 2);
|
assert_eq!(values.len(), 2);
|
||||||
assert_eq!(values[0].as_str(), Some("tantivy"));
|
assert_eq!(values[0].as_ref().as_str(), Some("tantivy"));
|
||||||
assert_eq!(values[1].as_str(), Some("some other value"));
|
assert_eq!(values[1].as_ref().as_str(), Some("some other value"));
|
||||||
let values: Vec<&OwnedValue> = document.get_all(other_text_field).collect();
|
let values: Vec<OwnedValue> = document
|
||||||
|
.get_all(other_text_field)
|
||||||
|
.map(OwnedValue::from)
|
||||||
|
.collect();
|
||||||
assert_eq!(values.len(), 1);
|
assert_eq!(values.len(), 1);
|
||||||
assert_eq!(values[0].as_str(), Some("short"));
|
assert_eq!(values[0].as_ref().as_str(), Some("short"));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1107,9 +1101,9 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_update_via_delete_insert() -> crate::Result<()> {
|
fn test_update_via_delete_insert() -> crate::Result<()> {
|
||||||
use crate::collector::Count;
|
use crate::collector::Count;
|
||||||
|
use crate::index::SegmentId;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::AllQuery;
|
use crate::query::AllQuery;
|
||||||
use crate::SegmentId;
|
|
||||||
|
|
||||||
const DOC_COUNT: u64 = 2u64;
|
const DOC_COUNT: u64 = 2u64;
|
||||||
|
|
||||||
|
|||||||
@@ -41,6 +41,7 @@
|
|||||||
/// );
|
/// );
|
||||||
/// # }
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! doc(
|
macro_rules! doc(
|
||||||
() => {
|
() => {
|
||||||
@@ -52,7 +53,7 @@ macro_rules! doc(
|
|||||||
{
|
{
|
||||||
let mut document = $crate::TantivyDocument::default();
|
let mut document = $crate::TantivyDocument::default();
|
||||||
$(
|
$(
|
||||||
document.add_field_value($field, $value);
|
document.add_field_value($field, &$value);
|
||||||
)*
|
)*
|
||||||
document
|
document
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ pub fn compressed_block_size(num_bits: u8) -> usize {
|
|||||||
pub struct BlockEncoder {
|
pub struct BlockEncoder {
|
||||||
bitpacker: BitPacker4x,
|
bitpacker: BitPacker4x,
|
||||||
pub output: [u8; COMPRESSED_BLOCK_MAX_SIZE],
|
pub output: [u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||||
pub output_len: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for BlockEncoder {
|
impl Default for BlockEncoder {
|
||||||
@@ -28,7 +27,6 @@ impl BlockEncoder {
|
|||||||
BlockEncoder {
|
BlockEncoder {
|
||||||
bitpacker: BitPacker4x::new(),
|
bitpacker: BitPacker4x::new(),
|
||||||
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
|
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
|
||||||
output_len: 0,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
|
use common::json_path_writer::JSON_END_OF_PATH;
|
||||||
use stacker::Addr;
|
use stacker::Addr;
|
||||||
|
|
||||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
@@ -7,7 +8,7 @@ use crate::indexer::path_to_unordered_id::OrderedPathId;
|
|||||||
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
||||||
use crate::postings::recorder::{BufferLender, DocIdRecorder, Recorder};
|
use crate::postings::recorder::{BufferLender, DocIdRecorder, Recorder};
|
||||||
use crate::postings::{FieldSerializer, IndexingContext, IndexingPosition, PostingsWriter};
|
use crate::postings::{FieldSerializer, IndexingContext, IndexingPosition, PostingsWriter};
|
||||||
use crate::schema::{Field, Type, JSON_END_OF_PATH};
|
use crate::schema::{Field, Type};
|
||||||
use crate::tokenizer::TokenStream;
|
use crate::tokenizer::TokenStream;
|
||||||
use crate::{DocId, Term};
|
use crate::{DocId, Term};
|
||||||
|
|
||||||
@@ -67,10 +68,18 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
|
|||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let mut term_buffer = Term::with_capacity(48);
|
let mut term_buffer = Term::with_capacity(48);
|
||||||
let mut buffer_lender = BufferLender::default();
|
let mut buffer_lender = BufferLender::default();
|
||||||
|
term_buffer.clear_with_field_and_type(Type::Json, Field::from_field_id(0));
|
||||||
|
let mut prev_term_id = u32::MAX;
|
||||||
|
let mut term_path_len = 0; // this will be set in the first iteration
|
||||||
for (_field, path_id, term, addr) in term_addrs {
|
for (_field, path_id, term, addr) in term_addrs {
|
||||||
term_buffer.clear_with_field_and_type(Type::Json, Field::from_field_id(0));
|
if prev_term_id != path_id.path_id() {
|
||||||
term_buffer.append_bytes(ordered_id_to_path[path_id.path_id() as usize].as_bytes());
|
term_buffer.truncate_value_bytes(0);
|
||||||
term_buffer.append_bytes(&[JSON_END_OF_PATH]);
|
term_buffer.append_path(ordered_id_to_path[path_id.path_id() as usize].as_bytes());
|
||||||
|
term_buffer.append_bytes(&[JSON_END_OF_PATH]);
|
||||||
|
term_path_len = term_buffer.len_bytes();
|
||||||
|
prev_term_id = path_id.path_id();
|
||||||
|
}
|
||||||
|
term_buffer.truncate_value_bytes(term_path_len);
|
||||||
term_buffer.append_bytes(term);
|
term_buffer.append_bytes(term);
|
||||||
if let Some(json_value) = term_buffer.value().as_json_value_bytes() {
|
if let Some(json_value) = term_buffer.value().as_json_value_bytes() {
|
||||||
let typ = json_value.typ();
|
let typ = json_value.typ();
|
||||||
|
|||||||
@@ -204,11 +204,7 @@ impl<Rec: Recorder> SpecializedPostingsWriter<Rec> {
|
|||||||
impl<Rec: Recorder> PostingsWriter for SpecializedPostingsWriter<Rec> {
|
impl<Rec: Recorder> PostingsWriter for SpecializedPostingsWriter<Rec> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn subscribe(&mut self, doc: DocId, position: u32, term: &Term, ctx: &mut IndexingContext) {
|
fn subscribe(&mut self, doc: DocId, position: u32, term: &Term, ctx: &mut IndexingContext) {
|
||||||
assert!(
|
debug_assert!(term.serialized_term().len() >= 4);
|
||||||
term.serialized_term().len() >= 4,
|
|
||||||
"Term too short expect >=4 but got {:?}",
|
|
||||||
term.serialized_term()
|
|
||||||
);
|
|
||||||
self.total_num_tokens += 1;
|
self.total_num_tokens += 1;
|
||||||
let (term_index, arena) = (&mut ctx.term_index, &mut ctx.arena);
|
let (term_index, arena) = (&mut ctx.term_index, &mut ctx.arena);
|
||||||
term_index.mutate_or_create(term.serialized_term(), |opt_recorder: Option<Rec>| {
|
term_index.mutate_or_create(term.serialized_term(), |opt_recorder: Option<Rec>| {
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ pub struct InvertedIndexSerializer {
|
|||||||
impl InvertedIndexSerializer {
|
impl InvertedIndexSerializer {
|
||||||
/// Open a new `InvertedIndexSerializer` for the given segment
|
/// Open a new `InvertedIndexSerializer` for the given segment
|
||||||
pub fn open(segment: &mut Segment) -> crate::Result<InvertedIndexSerializer> {
|
pub fn open(segment: &mut Segment) -> crate::Result<InvertedIndexSerializer> {
|
||||||
use crate::SegmentComponent::{Positions, Postings, Terms};
|
use crate::index::SegmentComponent::{Positions, Postings, Terms};
|
||||||
let inv_index_serializer = InvertedIndexSerializer {
|
let inv_index_serializer = InvertedIndexSerializer {
|
||||||
terms_write: CompositeWrite::wrap(segment.open_write(Terms)?),
|
terms_write: CompositeWrite::wrap(segment.open_write(Terms)?),
|
||||||
postings_write: CompositeWrite::wrap(segment.open_write(Postings)?),
|
postings_write: CompositeWrite::wrap(segment.open_write(Postings)?),
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
use std::convert::TryInto;
|
|
||||||
|
|
||||||
use crate::directory::OwnedBytes;
|
use crate::directory::OwnedBytes;
|
||||||
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
|
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
|
||||||
use crate::query::Bm25Weight;
|
use crate::query::Bm25Weight;
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
use std::iter::ExactSizeIterator;
|
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
use common::{BinarySerializable, FixedSize};
|
use common::{BinarySerializable, FixedSize};
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
use super::Scorer;
|
use super::Scorer;
|
||||||
use crate::docset::TERMINATED;
|
use crate::docset::TERMINATED;
|
||||||
|
use crate::index::SegmentReader;
|
||||||
use crate::query::explanation::does_not_match;
|
use crate::query::explanation::does_not_match;
|
||||||
use crate::query::{EnableScoring, Explanation, Query, Weight};
|
use crate::query::{EnableScoring, Explanation, Query, Weight};
|
||||||
use crate::{DocId, DocSet, Score, Searcher, SegmentReader};
|
use crate::{DocId, DocSet, Score, Searcher};
|
||||||
|
|
||||||
/// `EmptyQuery` is a dummy `Query` in which no document matches.
|
/// `EmptyQuery` is a dummy `Query` in which no document matches.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -149,7 +149,7 @@ mod tests {
|
|||||||
use crate::query::exist_query::ExistsQuery;
|
use crate::query::exist_query::ExistsQuery;
|
||||||
use crate::query::{BooleanQuery, RangeQuery};
|
use crate::query::{BooleanQuery, RangeQuery};
|
||||||
use crate::schema::{Facet, FacetOptions, Schema, FAST, INDEXED, STRING, TEXT};
|
use crate::schema::{Facet, FacetOptions, Schema, FAST, INDEXED, STRING, TEXT};
|
||||||
use crate::{doc, Index, Searcher};
|
use crate::{Index, Searcher};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_exists_query_simple() -> crate::Result<()> {
|
fn test_exists_query_simple() -> crate::Result<()> {
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ pub struct FuzzyTermQuery {
|
|||||||
distance: u8,
|
distance: u8,
|
||||||
/// Should a transposition cost 1 or 2?
|
/// Should a transposition cost 1 or 2?
|
||||||
transposition_cost_one: bool,
|
transposition_cost_one: bool,
|
||||||
///
|
/// is a starts with query
|
||||||
prefix: bool,
|
prefix: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,8 +138,7 @@ impl FuzzyTermQuery {
|
|||||||
if json_path_type != Type::Str {
|
if json_path_type != Type::Str {
|
||||||
return Err(InvalidArgument(format!(
|
return Err(InvalidArgument(format!(
|
||||||
"The fuzzy term query requires a string path type for a json term. Found \
|
"The fuzzy term query requires a string path type for a json term. Found \
|
||||||
{:?}",
|
{json_path_type:?}"
|
||||||
json_path_type
|
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -180,7 +180,7 @@ impl MoreLikeThis {
|
|||||||
let facets: Vec<&str> = values
|
let facets: Vec<&str> = values
|
||||||
.iter()
|
.iter()
|
||||||
.map(|value| {
|
.map(|value| {
|
||||||
value.as_facet().map(|f| f.encoded_str()).ok_or_else(|| {
|
value.as_facet().ok_or_else(|| {
|
||||||
TantivyError::InvalidArgument("invalid field value".to_string())
|
TantivyError::InvalidArgument("invalid field value".to_string())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -220,7 +220,7 @@ impl MoreLikeThis {
|
|||||||
let mut token_stream = tokenizer.token_stream(text);
|
let mut token_stream = tokenizer.token_stream(text);
|
||||||
token_stream.process(sink);
|
token_stream.process(sink);
|
||||||
} else if let Some(tok_str) = value.as_pre_tokenized_text() {
|
} else if let Some(tok_str) = value.as_pre_tokenized_text() {
|
||||||
let mut token_stream = PreTokenizedStream::from(tok_str.clone());
|
let mut token_stream = PreTokenizedStream::from(*tok_str.clone());
|
||||||
token_stream.process(sink);
|
token_stream.process(sink);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,10 +10,8 @@ use query_grammar::{UserInputAst, UserInputBound, UserInputLeaf, UserInputLitera
|
|||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
|
|
||||||
use super::logical_ast::*;
|
use super::logical_ast::*;
|
||||||
use crate::core::json_utils::{
|
|
||||||
convert_to_fast_value_and_get_term, set_string_and_get_terms, JsonTermWriter,
|
|
||||||
};
|
|
||||||
use crate::index::Index;
|
use crate::index::Index;
|
||||||
|
use crate::json_utils::convert_to_fast_value_and_append_to_json_term;
|
||||||
use crate::query::range_query::{is_type_valid_for_fastfield_range_query, RangeQuery};
|
use crate::query::range_query::{is_type_valid_for_fastfield_range_query, RangeQuery};
|
||||||
use crate::query::{
|
use crate::query::{
|
||||||
AllQuery, BooleanQuery, BoostQuery, EmptyQuery, FuzzyTermQuery, Occur, PhrasePrefixQuery,
|
AllQuery, BooleanQuery, BoostQuery, EmptyQuery, FuzzyTermQuery, Occur, PhrasePrefixQuery,
|
||||||
@@ -965,20 +963,27 @@ fn generate_literals_for_json_object(
|
|||||||
})?;
|
})?;
|
||||||
let index_record_option = text_options.index_option();
|
let index_record_option = text_options.index_option();
|
||||||
let mut logical_literals = Vec::new();
|
let mut logical_literals = Vec::new();
|
||||||
let mut term = Term::with_capacity(100);
|
|
||||||
let mut json_term_writer = JsonTermWriter::from_field_and_json_path(
|
let get_term_with_path =
|
||||||
field,
|
|| Term::from_field_json_path(field, json_path, json_options.is_expand_dots_enabled());
|
||||||
json_path,
|
|
||||||
json_options.is_expand_dots_enabled(),
|
// Try to convert the phrase to a fast value
|
||||||
&mut term,
|
if let Some(term) = convert_to_fast_value_and_append_to_json_term(get_term_with_path(), phrase)
|
||||||
);
|
{
|
||||||
if let Some(term) = convert_to_fast_value_and_get_term(&mut json_term_writer, phrase) {
|
|
||||||
logical_literals.push(LogicalLiteral::Term(term));
|
logical_literals.push(LogicalLiteral::Term(term));
|
||||||
}
|
}
|
||||||
let terms = set_string_and_get_terms(&mut json_term_writer, phrase, &mut text_analyzer);
|
|
||||||
drop(json_term_writer);
|
// Try to tokenize the phrase and create Terms.
|
||||||
if terms.len() <= 1 {
|
let mut positions_and_terms = Vec::<(usize, Term)>::new();
|
||||||
for (_, term) in terms {
|
let mut token_stream = text_analyzer.token_stream(phrase);
|
||||||
|
token_stream.process(&mut |token| {
|
||||||
|
let mut term = get_term_with_path();
|
||||||
|
term.append_type_and_str(&token.text);
|
||||||
|
positions_and_terms.push((token.position, term.clone()));
|
||||||
|
});
|
||||||
|
|
||||||
|
if positions_and_terms.len() <= 1 {
|
||||||
|
for (_, term) in positions_and_terms {
|
||||||
logical_literals.push(LogicalLiteral::Term(term));
|
logical_literals.push(LogicalLiteral::Term(term));
|
||||||
}
|
}
|
||||||
return Ok(logical_literals);
|
return Ok(logical_literals);
|
||||||
@@ -989,7 +994,7 @@ fn generate_literals_for_json_object(
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
logical_literals.push(LogicalLiteral::Phrase {
|
logical_literals.push(LogicalLiteral::Phrase {
|
||||||
terms,
|
terms: positions_and_terms,
|
||||||
slop: 0,
|
slop: 0,
|
||||||
prefix: false,
|
prefix: false,
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -174,7 +174,7 @@ impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> DocSet for RangeDocSe
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn size_hint(&self) -> u32 {
|
fn size_hint(&self) -> u32 {
|
||||||
0 // heuristic possible by checking number of hits when fetching a block
|
self.column.num_docs()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -477,7 +477,7 @@ mod tests {
|
|||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
Field, IntoIpv6Addr, Schema, TantivyDocument, FAST, INDEXED, STORED, TEXT,
|
Field, IntoIpv6Addr, Schema, TantivyDocument, FAST, INDEXED, STORED, TEXT,
|
||||||
};
|
};
|
||||||
use crate::{doc, Index, IndexWriter};
|
use crate::{Index, IndexWriter};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_range_query_simple() -> crate::Result<()> {
|
fn test_range_query_simple() -> crate::Result<()> {
|
||||||
|
|||||||
@@ -185,7 +185,7 @@ mod test {
|
|||||||
Err(crate::TantivyError::InvalidArgument(msg)) => {
|
Err(crate::TantivyError::InvalidArgument(msg)) => {
|
||||||
assert!(msg.contains("error: unclosed group"))
|
assert!(msg.contains("error: unclosed group"))
|
||||||
}
|
}
|
||||||
res => panic!("unexpected result: {:?}", res),
|
res => panic!("unexpected result: {res:?}"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -139,7 +139,7 @@ mod tests {
|
|||||||
use crate::collector::{Count, TopDocs};
|
use crate::collector::{Count, TopDocs};
|
||||||
use crate::query::{Query, QueryParser, TermQuery};
|
use crate::query::{Query, QueryParser, TermQuery};
|
||||||
use crate::schema::{IndexRecordOption, IntoIpv6Addr, Schema, INDEXED, STORED};
|
use crate::schema::{IndexRecordOption, IntoIpv6Addr, Schema, INDEXED, STORED};
|
||||||
use crate::{doc, Index, IndexWriter, Term};
|
use crate::{Index, IndexWriter, Term};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn search_ip_test() {
|
fn search_ip_test() {
|
||||||
|
|||||||
@@ -127,6 +127,7 @@ impl Scorer for TermScorer {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use proptest::prelude::*;
|
use proptest::prelude::*;
|
||||||
|
|
||||||
|
use crate::index::SegmentId;
|
||||||
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
|
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
|
||||||
use crate::merge_policy::NoMergePolicy;
|
use crate::merge_policy::NoMergePolicy;
|
||||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||||
@@ -134,8 +135,7 @@ mod tests {
|
|||||||
use crate::query::{Bm25Weight, EnableScoring, Scorer, TermQuery};
|
use crate::query::{Bm25Weight, EnableScoring, Scorer, TermQuery};
|
||||||
use crate::schema::{IndexRecordOption, Schema, TEXT};
|
use crate::schema::{IndexRecordOption, Schema, TEXT};
|
||||||
use crate::{
|
use crate::{
|
||||||
assert_nearly_equals, DocId, DocSet, Index, IndexWriter, Score, Searcher, SegmentId, Term,
|
assert_nearly_equals, DocId, DocSet, Index, IndexWriter, Score, Searcher, Term, TERMINATED,
|
||||||
TERMINATED,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user