mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-31 06:22:54 +00:00
Compare commits
134 Commits
index_writ
...
test_colum
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ce485b8cc | ||
|
|
c3b92a5412 | ||
|
|
2f55511064 | ||
|
|
08b9fc0b31 | ||
|
|
714f363d43 | ||
|
|
93ff7365b0 | ||
|
|
8151925068 | ||
|
|
b960e40bc8 | ||
|
|
1095c9b073 | ||
|
|
c0686515a9 | ||
|
|
455156f51c | ||
|
|
4143d31865 | ||
|
|
0c634adbe1 | ||
|
|
2e3641c2ae | ||
|
|
b806122c81 | ||
|
|
e1679f3fb9 | ||
|
|
5a80420b10 | ||
|
|
aa26ff5029 | ||
|
|
e197b59258 | ||
|
|
5b7cca13e5 | ||
|
|
a79590477e | ||
|
|
6181c1eb5e | ||
|
|
1ee5f90761 | ||
|
|
71f3b4e4e3 | ||
|
|
8cd7ddc535 | ||
|
|
2b76335a95 | ||
|
|
c6b213d8f0 | ||
|
|
eea70030bf | ||
|
|
92b5526310 | ||
|
|
99a59ad37e | ||
|
|
6a66a71cbb | ||
|
|
ff40764204 | ||
|
|
047da20b5b | ||
|
|
1417eaf3a7 | ||
|
|
4f8493d2de | ||
|
|
8861366137 | ||
|
|
0e9fced336 | ||
|
|
b257b960b3 | ||
|
|
4708171a32 | ||
|
|
b493743f8d | ||
|
|
d2955a3fd2 | ||
|
|
17d5869ad6 | ||
|
|
dfa3aed32d | ||
|
|
398817ce7b | ||
|
|
74940e9345 | ||
|
|
1e9fc51535 | ||
|
|
92c32979d2 | ||
|
|
b644d78a32 | ||
|
|
4e79e11007 | ||
|
|
67ebba3c3c | ||
|
|
7ce950f141 | ||
|
|
0cffe5fb09 | ||
|
|
b0e65560a1 | ||
|
|
ec37295b2f | ||
|
|
f6b0cc1aab | ||
|
|
7e41d31c6e | ||
|
|
40aa4abfe5 | ||
|
|
2650317622 | ||
|
|
6739357314 | ||
|
|
d57622d54b | ||
|
|
f745dbc054 | ||
|
|
79b041f81f | ||
|
|
0e16ed9ef7 | ||
|
|
88a3275dbb | ||
|
|
1223a87eb2 | ||
|
|
48630ceec9 | ||
|
|
72002e8a89 | ||
|
|
3c9297dd64 | ||
|
|
0e04ec3136 | ||
|
|
9b7f3a55cf | ||
|
|
1dacdb6c85 | ||
|
|
30483310ca | ||
|
|
e1d18b5114 | ||
|
|
108f30ba23 | ||
|
|
5943ee46bd | ||
|
|
f95a76293f | ||
|
|
014328e378 | ||
|
|
53f2fe1fbe | ||
|
|
9c75942aaf | ||
|
|
bff7c58497 | ||
|
|
9ebc5ed053 | ||
|
|
0b56c88e69 | ||
|
|
24841f0b2a | ||
|
|
1a9fc10be9 | ||
|
|
07573a7f19 | ||
|
|
daad2dc151 | ||
|
|
054f49dc31 | ||
|
|
47009ed2d3 | ||
|
|
0aae31d7d7 | ||
|
|
9caab45136 | ||
|
|
6d9a7b7eb0 | ||
|
|
7a2c5804b1 | ||
|
|
5319977171 | ||
|
|
828632e8c4 | ||
|
|
6b59ec6fd5 | ||
|
|
b60d862150 | ||
|
|
4837c7811a | ||
|
|
5a2397d57e | ||
|
|
927b4432c9 | ||
|
|
7a0064db1f | ||
|
|
2e7327205d | ||
|
|
7bc5bf78e2 | ||
|
|
ef603c8c7e | ||
|
|
28dd6b6546 | ||
|
|
1dda2bb537 | ||
|
|
bf6544cf28 | ||
|
|
ccecf946f7 | ||
|
|
19a859d6fd | ||
|
|
83af14caa4 | ||
|
|
4feeb2323d | ||
|
|
07bf66a197 | ||
|
|
0d4589219b | ||
|
|
c2b0469180 | ||
|
|
7e1980b218 | ||
|
|
ecb9a89a9f | ||
|
|
5e06e504e6 | ||
|
|
182f58cea6 | ||
|
|
337ffadefd | ||
|
|
22aa4daf19 | ||
|
|
493f9b2f2a | ||
|
|
e246e5765d | ||
|
|
6097235eff | ||
|
|
b700c42246 | ||
|
|
5b1bf1a993 | ||
|
|
041d4fced7 | ||
|
|
166fc15239 | ||
|
|
514a6e7fef | ||
|
|
82d9127191 | ||
|
|
03a1f40767 | ||
|
|
1c7c6fd591 | ||
|
|
b525f653c0 | ||
|
|
90586bc1e2 | ||
|
|
832f1633de | ||
|
|
38db53c465 |
6
.github/workflows/coverage.yml
vendored
6
.github/workflows/coverage.yml
vendored
@@ -3,8 +3,6 @@ name: Coverage
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
# Ensures that we cancel running jobs for the same PR / same workflow.
|
||||
concurrency:
|
||||
@@ -17,11 +15,11 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust
|
||||
run: rustup toolchain install nightly-2023-09-10 --profile minimal --component llvm-tools-preview
|
||||
run: rustup toolchain install nightly-2024-04-10 --profile minimal --component llvm-tools-preview
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo +nightly-2023-09-10 llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
|
||||
run: cargo +nightly-2024-04-10 llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
continue-on-error: true
|
||||
|
||||
7
.github/workflows/test.yml
vendored
7
.github/workflows/test.yml
vendored
@@ -39,6 +39,13 @@ jobs:
|
||||
|
||||
- name: Check Formatting
|
||||
run: cargo +nightly fmt --all -- --check
|
||||
|
||||
- name: Check Stable Compilation
|
||||
run: cargo build --all-features
|
||||
|
||||
|
||||
- name: Check Bench Compilation
|
||||
run: cargo +nightly bench --no-run --profile=dev --all-features
|
||||
|
||||
- uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
|
||||
68
CHANGELOG.md
68
CHANGELOG.md
@@ -1,3 +1,71 @@
|
||||
Tantivy 0.22
|
||||
================================
|
||||
|
||||
Tantivy 0.22 will be able to read indices created with Tantivy 0.21.
|
||||
|
||||
#### Bugfixes
|
||||
- Fix null byte handling in JSON paths (null bytes in json keys caused panic during indexing) [#2345](https://github.com/quickwit-oss/tantivy/pull/2345)(@PSeitz)
|
||||
- Fix bug that can cause `get_docids_for_value_range` to panic. [#2295](https://github.com/quickwit-oss/tantivy/pull/2295)(@fulmicoton)
|
||||
- Avoid 1 document indices by increase min memory to 15MB for indexing [#2176](https://github.com/quickwit-oss/tantivy/pull/2176)(@PSeitz)
|
||||
- Fix merge panic for JSON fields [#2284](https://github.com/quickwit-oss/tantivy/pull/2284)(@PSeitz)
|
||||
- Fix bug occuring when merging JSON object indexed with positions. [#2253](https://github.com/quickwit-oss/tantivy/pull/2253)(@fulmicoton)
|
||||
- Fix empty DateHistogram gap bug [#2183](https://github.com/quickwit-oss/tantivy/pull/2183)(@PSeitz)
|
||||
- Fix range query end check (fields with less than 1 value per doc are affected) [#2226](https://github.com/quickwit-oss/tantivy/pull/2226)(@PSeitz)
|
||||
- Handle exclusive out of bounds ranges on fastfield range queries [#2174](https://github.com/quickwit-oss/tantivy/pull/2174)(@PSeitz)
|
||||
|
||||
#### Breaking API Changes
|
||||
- rename ReloadPolicy onCommit to onCommitWithDelay [#2235](https://github.com/quickwit-oss/tantivy/pull/2235)(@giovannicuccu)
|
||||
- Move exports from the root into modules [#2220](https://github.com/quickwit-oss/tantivy/pull/2220)(@PSeitz)
|
||||
- Accept field name instead of `Field` in FilterCollector [#2196](https://github.com/quickwit-oss/tantivy/pull/2196)(@PSeitz)
|
||||
- remove deprecated IntOptions and DateTime [#2353](https://github.com/quickwit-oss/tantivy/pull/2353)(@PSeitz)
|
||||
|
||||
#### Features/Improvements
|
||||
- Tantivy documents as a trait: Index data directly without converting to tantivy types first [#2071](https://github.com/quickwit-oss/tantivy/pull/2071)(@ChillFish8)
|
||||
- encode some part of posting list as -1 instead of direct values (smaller inverted indices) [#2185](https://github.com/quickwit-oss/tantivy/pull/2185)(@trinity-1686a)
|
||||
- **Aggregation**
|
||||
- Support to deserialize f64 from string [#2311](https://github.com/quickwit-oss/tantivy/pull/2311)(@PSeitz)
|
||||
- Add a top_hits aggregator [#2198](https://github.com/quickwit-oss/tantivy/pull/2198)(@ditsuke)
|
||||
- Support bool type in term aggregation [#2318](https://github.com/quickwit-oss/tantivy/pull/2318)(@PSeitz)
|
||||
- Support ip adresses in term aggregation [#2319](https://github.com/quickwit-oss/tantivy/pull/2319)(@PSeitz)
|
||||
- Support date type in term aggregation [#2172](https://github.com/quickwit-oss/tantivy/pull/2172)(@PSeitz)
|
||||
- Support escaped dot when addressing field [#2250](https://github.com/quickwit-oss/tantivy/pull/2250)(@PSeitz)
|
||||
|
||||
- Add ExistsQuery to check documents that have a value [#2160](https://github.com/quickwit-oss/tantivy/pull/2160)(@imotov)
|
||||
- Expose TopDocs::order_by_u64_field again [#2282](https://github.com/quickwit-oss/tantivy/pull/2282)(@ditsuke)
|
||||
|
||||
- **Memory/Performance**
|
||||
- Faster TopN: replace BinaryHeap with TopNComputer [#2186](https://github.com/quickwit-oss/tantivy/pull/2186)(@PSeitz)
|
||||
- reduce number of allocations during indexing [#2257](https://github.com/quickwit-oss/tantivy/pull/2257)(@PSeitz)
|
||||
- Less Memory while indexing: docid deltas while indexing [#2249](https://github.com/quickwit-oss/tantivy/pull/2249)(@PSeitz)
|
||||
- Faster indexing: use term hashmap in fastfield [#2243](https://github.com/quickwit-oss/tantivy/pull/2243)(@PSeitz)
|
||||
- term hashmap remove copy in is_empty, unused unordered_id [#2229](https://github.com/quickwit-oss/tantivy/pull/2229)(@PSeitz)
|
||||
- add method to fetch block of first values in columnar [#2330](https://github.com/quickwit-oss/tantivy/pull/2330)(@PSeitz)
|
||||
- Faster aggregations: add fast path for full columns in fetch_block [#2328](https://github.com/quickwit-oss/tantivy/pull/2328)(@PSeitz)
|
||||
- Faster sstable loading: use fst for sstable index [#2268](https://github.com/quickwit-oss/tantivy/pull/2268)(@trinity-1686a)
|
||||
|
||||
- **QueryParser**
|
||||
- allow newline where we allow space in query parser [#2302](https://github.com/quickwit-oss/tantivy/pull/2302)(@trinity-1686a)
|
||||
- allow some mixing of occur and bool in strict query parser [#2323](https://github.com/quickwit-oss/tantivy/pull/2323)(@trinity-1686a)
|
||||
- handle * inside term in lenient query parser [#2228](https://github.com/quickwit-oss/tantivy/pull/2228)(@trinity-1686a)
|
||||
- add support for exists query syntax in query parser [#2170](https://github.com/quickwit-oss/tantivy/pull/2170)(@trinity-1686a)
|
||||
- Add shared search executor [#2312](https://github.com/quickwit-oss/tantivy/pull/2312)(@MochiXu)
|
||||
- Truncate keys to u16::MAX in term hashmap [#2299](https://github.com/quickwit-oss/tantivy/pull/2299)(@PSeitz)
|
||||
- report if a term matched when warming up posting list [#2309](https://github.com/quickwit-oss/tantivy/pull/2309)(@trinity-1686a)
|
||||
- Support json fields in FuzzyTermQuery [#2173](https://github.com/quickwit-oss/tantivy/pull/2173)(@PingXia-at)
|
||||
- Read list of fields encoded in term dictionary for JSON fields [#2184](https://github.com/quickwit-oss/tantivy/pull/2184)(@PSeitz)
|
||||
- add collect_block to BoxableSegmentCollector [#2331](https://github.com/quickwit-oss/tantivy/pull/2331)(@PSeitz)
|
||||
- expose collect_block buffer size [#2326](https://github.com/quickwit-oss/tantivy/pull/2326)(@PSeitz)
|
||||
- Forward regex parser errors [#2288](https://github.com/quickwit-oss/tantivy/pull/2288)(@adamreichold)
|
||||
- Make FacetCounts defaultable and cloneable. [#2322](https://github.com/quickwit-oss/tantivy/pull/2322)(@adamreichold)
|
||||
- Derive Debug for SchemaBuilder [#2254](https://github.com/quickwit-oss/tantivy/pull/2254)(@GodTamIt)
|
||||
- add missing inlines to tantivy options [#2245](https://github.com/quickwit-oss/tantivy/pull/2245)(@PSeitz)
|
||||
|
||||
Tantivy 0.21.1
|
||||
================================
|
||||
#### Bugfixes
|
||||
- Range queries on fast fields with less values on that field than documents had an invalid end condition, leading to missing results. [#2226](https://github.com/quickwit-oss/tantivy/issues/2226)(@appaquet @PSeitz)
|
||||
- Increase the minimum memory budget from 3MB to 15MB to avoid single doc segments (API fix). [#2176](https://github.com/quickwit-oss/tantivy/issues/2176)(@PSeitz)
|
||||
|
||||
Tantivy 0.21
|
||||
================================
|
||||
#### Bugfixes
|
||||
|
||||
82
Cargo.toml
82
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.21.0"
|
||||
version = "0.23.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -11,78 +11,84 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2021"
|
||||
rust-version = "1.62"
|
||||
rust-version = "1.63"
|
||||
exclude = ["benches/*.json", "benches/*.txt"]
|
||||
|
||||
[dependencies]
|
||||
oneshot = "0.1.5"
|
||||
base64 = "0.21.0"
|
||||
oneshot = "0.1.7"
|
||||
base64 = "0.22.0"
|
||||
byteorder = "1.4.3"
|
||||
crc32fast = "1.3.2"
|
||||
once_cell = "1.10.0"
|
||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||
regex = { version = "1.5.5", default-features = false, features = [
|
||||
"std",
|
||||
"unicode",
|
||||
] }
|
||||
aho-corasick = "1.0"
|
||||
tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.7.1", optional = true }
|
||||
tantivy-fst = "0.5"
|
||||
memmap2 = { version = "0.9.0", optional = true }
|
||||
lz4_flex = { version = "0.11", default-features = false, optional = true }
|
||||
zstd = { version = "0.12", optional = true, default-features = false }
|
||||
zstd = { version = "0.13", optional = true, default-features = false }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
log = "0.4.16"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = "1.0.79"
|
||||
num_cpus = "1.13.1"
|
||||
fs4 = { version = "0.6.3", optional = true }
|
||||
fs4 = { version = "0.8.0", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
rust-stemmers = "1.2.0"
|
||||
downcast-rs = "1.2.0"
|
||||
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
||||
census = "0.4.0"
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = [
|
||||
"bitpacker4x",
|
||||
] }
|
||||
census = "0.4.2"
|
||||
rustc-hash = "1.1.0"
|
||||
thiserror = "1.0.30"
|
||||
htmlescape = "0.3.1"
|
||||
fail = { version = "0.5.0", optional = true }
|
||||
murmurhash32 = "0.3.0"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.11.0"
|
||||
lru = "0.12.0"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.11.0"
|
||||
itertools = "0.13.0"
|
||||
measure_time = "0.8.2"
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
columnar = { version= "0.2", path="./columnar", package ="tantivy-columnar" }
|
||||
sstable = { version= "0.2", path="./sstable", package ="tantivy-sstable", optional = true }
|
||||
stacker = { version= "0.2", path="./stacker", package ="tantivy-stacker" }
|
||||
query-grammar = { version= "0.21.0", path="./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version= "0.5", path="./bitpacker" }
|
||||
common = { version= "0.6", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version= "0.2", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
||||
sketches-ddsketch = { version = "0.2.1", features = ["use_serde"] }
|
||||
columnar = { version = "0.3", path = "./columnar", package = "tantivy-columnar" }
|
||||
sstable = { version = "0.3", path = "./sstable", package = "tantivy-sstable", optional = true }
|
||||
stacker = { version = "0.3", path = "./stacker", package = "tantivy-stacker" }
|
||||
query-grammar = { version = "0.22.0", path = "./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version = "0.6", path = "./bitpacker" }
|
||||
common = { version = "0.7", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version = "0.3", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
|
||||
sketches-ddsketch = { version = "0.3.0", features = ["use_serde"] }
|
||||
futures-util = { version = "0.3.28", optional = true }
|
||||
fnv = "1.0.7"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
|
||||
[dev-dependencies]
|
||||
binggan = "0.8.0"
|
||||
rand = "0.8.5"
|
||||
maplit = "1.0.2"
|
||||
matches = "0.1.9"
|
||||
pretty_assertions = "1.2.1"
|
||||
proptest = "1.0.0"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.10.0"
|
||||
futures = "0.3.21"
|
||||
paste = "1.0.11"
|
||||
more-asserts = "0.3.1"
|
||||
rand_distr = "0.4.3"
|
||||
time = { version = "0.3.10", features = ["serde-well-known", "macros"] }
|
||||
postcard = { version = "1.0.4", features = [
|
||||
"use-std",
|
||||
], default-features = false }
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
criterion = "0.5"
|
||||
pprof = { git = "https://github.com/PSeitz/pprof-rs/", rev = "53af24b", features = ["flamegraph", "criterion"] } # temp fork that works with criterion 0.5
|
||||
criterion = { version = "0.5", default-features = false }
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.5.0"
|
||||
@@ -111,12 +117,26 @@ lz4-compression = ["lz4_flex"]
|
||||
zstd-compression = ["zstd"]
|
||||
|
||||
failpoints = ["fail", "fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
quickwit = ["sstable", "futures-util"]
|
||||
|
||||
# Compares only the hash of a string when indexing data.
|
||||
# Increases indexing speed, but may lead to extremely rare missing terms, when there's a hash collision.
|
||||
# Uses 64bit ahash.
|
||||
compare_hash_only = ["stacker/compare_hash_only"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar", "bitpacker", "common", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
|
||||
members = [
|
||||
"query-grammar",
|
||||
"bitpacker",
|
||||
"common",
|
||||
"ownedbytes",
|
||||
"stacker",
|
||||
"sstable",
|
||||
"tokenizer-api",
|
||||
"columnar",
|
||||
]
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
# tests that define specific behavior in fail check points
|
||||
@@ -137,3 +157,7 @@ harness = false
|
||||
[[bench]]
|
||||
name = "index-bench"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "agg_bench"
|
||||
harness = false
|
||||
|
||||
33
README.md
33
README.md
@@ -5,19 +5,18 @@
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://crates.io/crates/tantivy)
|
||||
|
||||

|
||||
<img src="https://tantivy-search.github.io/logo/tantivy-logo.png" alt="Tantivy, the fastest full-text search engine library written in Rust" height="250">
|
||||
|
||||
**Tantivy** is a **full-text search engine library** written in Rust.
|
||||
## Fast full-text search engine library written in Rust
|
||||
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used
|
||||
to build such a search engine.
|
||||
**If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our distributed search engine built on top of Tantivy.**
|
||||
|
||||
Tantivy is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used to build such a search engine.
|
||||
|
||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
|
||||
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
|
||||
|
||||
# Benchmark
|
||||
## Benchmark
|
||||
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) breakdowns
|
||||
performance for different types of queries/collections.
|
||||
@@ -28,7 +27,7 @@ Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
Details about the benchmark can be found at this [repository](https://github.com/quickwit-oss/search-benchmark-game).
|
||||
|
||||
# Features
|
||||
## Features
|
||||
|
||||
- Full-text search
|
||||
- Configurable tokenizer (stemming available for 17 Latin languages) with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||
@@ -54,11 +53,11 @@ Details about the benchmark can be found at this [repository](https://github.com
|
||||
- Searcher Warmer API
|
||||
- Cheesy logo with a horse
|
||||
|
||||
## Non-features
|
||||
### Non-features
|
||||
|
||||
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
|
||||
|
||||
# Getting started
|
||||
## Getting started
|
||||
|
||||
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
|
||||
|
||||
@@ -68,7 +67,7 @@ index documents, and search via the CLI or a small server with a REST API.
|
||||
It walks you through getting a Wikipedia search engine up and running in a few minutes.
|
||||
- [Reference doc for the last released version](https://docs.rs/tantivy/)
|
||||
|
||||
# How can I support this project?
|
||||
## How can I support this project?
|
||||
|
||||
There are many ways to support this project.
|
||||
|
||||
@@ -79,16 +78,16 @@ There are many ways to support this project.
|
||||
- Contribute code (you can join [our Discord server](https://discord.gg/MT27AG5EVE))
|
||||
- Talk about Tantivy around you
|
||||
|
||||
# Contributing code
|
||||
## Contributing code
|
||||
|
||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
Feel free to update CHANGELOG.md with your contribution.
|
||||
|
||||
## Tokenizer
|
||||
### Tokenizer
|
||||
|
||||
When implementing a tokenizer for tantivy depend on the `tantivy-tokenizer-api` crate.
|
||||
|
||||
## Clone and build locally
|
||||
### Clone and build locally
|
||||
|
||||
Tantivy compiles on stable Rust.
|
||||
To check out and run tests, you can simply run:
|
||||
@@ -99,7 +98,7 @@ cd tantivy
|
||||
cargo test
|
||||
```
|
||||
|
||||
# Companies Using Tantivy
|
||||
## Companies Using Tantivy
|
||||
|
||||
<p align="left">
|
||||
<img align="center" src="doc/assets/images/etsy.png" alt="Etsy" height="25" width="auto" />
|
||||
@@ -111,7 +110,7 @@ cargo test
|
||||
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
</p>
|
||||
|
||||
# FAQ
|
||||
## FAQ
|
||||
|
||||
### Can I use Tantivy in other languages?
|
||||
|
||||
|
||||
419
benches/agg_bench.rs
Normal file
419
benches/agg_bench.rs
Normal file
@@ -0,0 +1,419 @@
|
||||
use binggan::{black_box, InputGroup, PeakMemAlloc, INSTRUMENTED_SYSTEM};
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_distr::Distribution;
|
||||
use serde_json::json;
|
||||
use tantivy::aggregation::agg_req::Aggregations;
|
||||
use tantivy::aggregation::AggregationCollector;
|
||||
use tantivy::query::{AllQuery, TermQuery};
|
||||
use tantivy::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use tantivy::{doc, Index, Term};
|
||||
|
||||
#[global_allocator]
|
||||
pub static GLOBAL: &PeakMemAlloc<std::alloc::System> = &INSTRUMENTED_SYSTEM;
|
||||
|
||||
/// Mini macro to register a function via its name
|
||||
/// runner.register("average_u64", move |index| average_u64(index));
|
||||
macro_rules! register {
|
||||
($runner:expr, $func:ident) => {
|
||||
$runner.register(stringify!($func), move |index| $func(index))
|
||||
};
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let inputs = vec![
|
||||
("full", get_test_index_bench(Cardinality::Full).unwrap()),
|
||||
(
|
||||
"dense",
|
||||
get_test_index_bench(Cardinality::OptionalDense).unwrap(),
|
||||
),
|
||||
(
|
||||
"sparse",
|
||||
get_test_index_bench(Cardinality::OptionalSparse).unwrap(),
|
||||
),
|
||||
(
|
||||
"multivalue",
|
||||
get_test_index_bench(Cardinality::Multivalued).unwrap(),
|
||||
),
|
||||
];
|
||||
|
||||
bench_agg(InputGroup::new_with_inputs(inputs));
|
||||
}
|
||||
|
||||
fn bench_agg(mut group: InputGroup<Index>) {
|
||||
group.set_alloc(GLOBAL); // Set the peak mem allocator. This will enable peak memory reporting.
|
||||
register!(group, average_u64);
|
||||
register!(group, average_f64);
|
||||
register!(group, average_f64_u64);
|
||||
register!(group, stats_f64);
|
||||
register!(group, extendedstats_f64);
|
||||
register!(group, percentiles_f64);
|
||||
register!(group, terms_few);
|
||||
register!(group, terms_many);
|
||||
register!(group, terms_many_order_by_term);
|
||||
register!(group, terms_many_with_top_hits);
|
||||
register!(group, terms_many_with_avg_sub_agg);
|
||||
register!(group, terms_many_json_mixed_type_with_sub_agg_card);
|
||||
register!(group, range_agg);
|
||||
register!(group, range_agg_with_avg_sub_agg);
|
||||
register!(group, range_agg_with_term_agg_few);
|
||||
register!(group, range_agg_with_term_agg_many);
|
||||
register!(group, histogram);
|
||||
register!(group, histogram_hard_bounds);
|
||||
register!(group, histogram_with_avg_sub_agg);
|
||||
register!(group, avg_and_range_with_avg_sub_agg);
|
||||
|
||||
group.run();
|
||||
}
|
||||
|
||||
fn exec_term_with_agg(index: &Index, agg_req: serde_json::Value) {
|
||||
let agg_req: Aggregations = serde_json::from_value(agg_req).unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
let collector = get_collector(agg_req);
|
||||
let searcher = reader.searcher();
|
||||
black_box(searcher.search(&term_query, &collector).unwrap());
|
||||
}
|
||||
|
||||
fn average_u64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"average": { "avg": { "field": "score", } }
|
||||
});
|
||||
exec_term_with_agg(index, agg_req)
|
||||
}
|
||||
fn average_f64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"average": { "avg": { "field": "score_f64", } }
|
||||
});
|
||||
exec_term_with_agg(index, agg_req)
|
||||
}
|
||||
fn average_f64_u64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"average_f64": { "avg": { "field": "score_f64" } },
|
||||
"average": { "avg": { "field": "score" } },
|
||||
});
|
||||
exec_term_with_agg(index, agg_req)
|
||||
}
|
||||
fn stats_f64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"average_f64": { "stats": { "field": "score_f64", } }
|
||||
});
|
||||
exec_term_with_agg(index, agg_req)
|
||||
}
|
||||
fn extendedstats_f64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"extendedstats_f64": { "extended_stats": { "field": "score_f64", } }
|
||||
});
|
||||
exec_term_with_agg(index, agg_req)
|
||||
}
|
||||
fn percentiles_f64(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"mypercentiles": {
|
||||
"percentiles": {
|
||||
"field": "score_f64",
|
||||
"percents": [ 95, 99, 99.9 ]
|
||||
}
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_few(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": { "terms": { "field": "text_few_terms" } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": { "terms": { "field": "text_many_terms" } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many_order_by_term(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": { "terms": { "field": "text_many_terms", "order": { "_key": "desc" } } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many_with_top_hits(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "text_many_terms" },
|
||||
"aggs": {
|
||||
"top_hits": { "top_hits":
|
||||
{
|
||||
"sort": [
|
||||
{ "score": "desc" }
|
||||
],
|
||||
"size": 2,
|
||||
"doc_value_fields": ["score_f64"]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many_with_avg_sub_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "text_many_terms" },
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn terms_many_json_mixed_type_with_sub_agg_card(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "json.mixed_type" },
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
fn execute_agg(index: &Index, agg_req: serde_json::Value) {
|
||||
let agg_req: Aggregations = serde_json::from_value(agg_req).unwrap();
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
black_box(searcher.search(&AllQuery, &collector).unwrap());
|
||||
}
|
||||
fn range_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"range_f64": { "range": { "field": "score_f64", "ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 30000 },
|
||||
{ "from": 30000, "to": 40000 },
|
||||
{ "from": 40000, "to": 50000 },
|
||||
{ "from": 50000, "to": 60000 }
|
||||
] } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn range_agg_with_avg_sub_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"range": {
|
||||
"field": "score_f64",
|
||||
"ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 30000 },
|
||||
{ "from": 30000, "to": 40000 },
|
||||
{ "from": 40000, "to": 50000 },
|
||||
{ "from": 50000, "to": 60000 }
|
||||
]
|
||||
},
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
fn range_agg_with_term_agg_few(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"range": {
|
||||
"field": "score_f64",
|
||||
"ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 30000 },
|
||||
{ "from": 30000, "to": 40000 },
|
||||
{ "from": 40000, "to": 50000 },
|
||||
{ "from": 50000, "to": 60000 }
|
||||
]
|
||||
},
|
||||
"aggs": {
|
||||
"my_texts": { "terms": { "field": "text_few_terms" } },
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn range_agg_with_term_agg_many(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"range": {
|
||||
"field": "score_f64",
|
||||
"ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 30000 },
|
||||
{ "from": 30000, "to": 40000 },
|
||||
{ "from": 40000, "to": 50000 },
|
||||
{ "from": 50000, "to": 60000 }
|
||||
]
|
||||
},
|
||||
"aggs": {
|
||||
"my_texts": { "terms": { "field": "text_many_terms" } },
|
||||
}
|
||||
},
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn histogram(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"histogram": {
|
||||
"field": "score_f64",
|
||||
"interval": 100 // 1000 buckets
|
||||
},
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn histogram_hard_bounds(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": { "histogram": { "field": "score_f64", "interval": 100, "hard_bounds": { "min": 1000, "max": 300000 } } },
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn histogram_with_avg_sub_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"histogram": { "field": "score_f64", "interval": 100 },
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
}
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
fn avg_and_range_with_avg_sub_agg(index: &Index) {
|
||||
let agg_req = json!({
|
||||
"rangef64": {
|
||||
"range": {
|
||||
"field": "score_f64",
|
||||
"ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 60000 }
|
||||
]
|
||||
},
|
||||
"aggs": {
|
||||
"average_in_range": { "avg": { "field": "score" } }
|
||||
}
|
||||
},
|
||||
"average": { "avg": { "field": "score" } }
|
||||
});
|
||||
execute_agg(index, agg_req);
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
enum Cardinality {
|
||||
/// All documents contain exactly one value.
|
||||
/// `Full` is the default for auto-detecting the Cardinality, since it is the most strict.
|
||||
#[default]
|
||||
Full = 0,
|
||||
/// All documents contain at most one value.
|
||||
OptionalDense = 1,
|
||||
/// All documents may contain any number of values.
|
||||
Multivalued = 2,
|
||||
/// 1 / 20 documents has a value
|
||||
OptionalSparse = 3,
|
||||
}
|
||||
|
||||
fn get_collector(agg_req: Aggregations) -> AggregationCollector {
|
||||
AggregationCollector::from_aggs(agg_req, Default::default())
|
||||
}
|
||||
|
||||
fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_fieldtype = tantivy::schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||
)
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let json_field = schema_builder.add_json_field("json", FAST);
|
||||
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
|
||||
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
|
||||
let score_fieldtype = tantivy::schema::NumericOptions::default().set_fast();
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
let index = Index::create_from_tempdir(schema_builder.build())?;
|
||||
let few_terms_data = ["INFO", "ERROR", "WARN", "DEBUG"];
|
||||
|
||||
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
|
||||
|
||||
let many_terms_data = (0..150_000)
|
||||
.map(|num| format!("author{num}"))
|
||||
.collect::<Vec<_>>();
|
||||
{
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 200_000_000)?;
|
||||
// To make the different test cases comparable we just change one doc to force the
|
||||
// cardinality
|
||||
if cardinality == Cardinality::OptionalDense {
|
||||
index_writer.add_document(doc!())?;
|
||||
}
|
||||
if cardinality == Cardinality::Multivalued {
|
||||
index_writer.add_document(doc!(
|
||||
json_field => json!({"mixed_type": 10.0}),
|
||||
json_field => json!({"mixed_type": 10.0}),
|
||||
text_field => "cool",
|
||||
text_field => "cool",
|
||||
text_field_many_terms => "cool",
|
||||
text_field_many_terms => "cool",
|
||||
text_field_few_terms => "cool",
|
||||
text_field_few_terms => "cool",
|
||||
score_field => 1u64,
|
||||
score_field => 1u64,
|
||||
score_field_f64 => lg_norm.sample(&mut rng),
|
||||
score_field_f64 => lg_norm.sample(&mut rng),
|
||||
score_field_i64 => 1i64,
|
||||
score_field_i64 => 1i64,
|
||||
))?;
|
||||
}
|
||||
let mut doc_with_value = 1_000_000;
|
||||
if cardinality == Cardinality::OptionalSparse {
|
||||
doc_with_value /= 20;
|
||||
}
|
||||
let _val_max = 1_000_000.0;
|
||||
for _ in 0..doc_with_value {
|
||||
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
||||
let json = if rng.gen_bool(0.1) {
|
||||
// 10% are numeric values
|
||||
json!({ "mixed_type": val })
|
||||
} else {
|
||||
json!({"mixed_type": many_terms_data.choose(&mut rng).unwrap().to_string()})
|
||||
};
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
json_field => json,
|
||||
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
score_field => val as u64,
|
||||
score_field_f64 => lg_norm.sample(&mut rng),
|
||||
score_field_i64 => val as i64,
|
||||
))?;
|
||||
if cardinality == Cardinality::OptionalSparse {
|
||||
for _ in 0..20 {
|
||||
index_writer.add_document(doc!(text_field => "cool"))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
// writing the segment
|
||||
index_writer.commit()?;
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
@@ -1,14 +1,98 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use tantivy::schema::{FAST, INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::Index;
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, Bencher, Criterion, Throughput};
|
||||
use tantivy::schema::{TantivyDocument, FAST, INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::{tokenizer, Index, IndexWriter};
|
||||
|
||||
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
||||
const GH_LOGS: &str = include_str!("gh.json");
|
||||
const WIKI: &str = include_str!("wiki.json");
|
||||
|
||||
fn get_lines(input: &str) -> Vec<&str> {
|
||||
input.trim().split('\n').collect()
|
||||
fn benchmark(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
parse_json: bool,
|
||||
is_dynamic: bool,
|
||||
) {
|
||||
if is_dynamic {
|
||||
benchmark_dynamic_json(b, input, schema, commit, parse_json)
|
||||
} else {
|
||||
_benchmark(b, input, schema, commit, parse_json, |schema, doc_json| {
|
||||
TantivyDocument::parse_json(schema, doc_json).unwrap()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn get_index(schema: tantivy::schema::Schema) -> Index {
|
||||
let mut index = Index::create_in_ram(schema.clone());
|
||||
let ff_tokenizer_manager = tokenizer::TokenizerManager::default();
|
||||
ff_tokenizer_manager.register(
|
||||
"raw",
|
||||
tokenizer::TextAnalyzer::builder(tokenizer::RawTokenizer::default())
|
||||
.filter(tokenizer::RemoveLongFilter::limit(255))
|
||||
.build(),
|
||||
);
|
||||
index.set_fast_field_tokenizers(ff_tokenizer_manager.clone());
|
||||
index
|
||||
}
|
||||
|
||||
fn _benchmark(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
include_json_parsing: bool,
|
||||
create_doc: impl Fn(&tantivy::schema::Schema, &str) -> TantivyDocument,
|
||||
) {
|
||||
if include_json_parsing {
|
||||
let lines: Vec<&str> = input.trim().split('\n').collect();
|
||||
b.iter(|| {
|
||||
let index = get_index(schema.clone());
|
||||
let mut index_writer: IndexWriter =
|
||||
index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = create_doc(&schema, doc_json);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
if commit {
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
})
|
||||
} else {
|
||||
let docs: Vec<_> = input
|
||||
.trim()
|
||||
.split('\n')
|
||||
.map(|doc_json| create_doc(&schema, doc_json))
|
||||
.collect();
|
||||
b.iter_batched(
|
||||
|| docs.clone(),
|
||||
|docs| {
|
||||
let index = get_index(schema.clone());
|
||||
let mut index_writer: IndexWriter =
|
||||
index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
if commit {
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
}
|
||||
}
|
||||
fn benchmark_dynamic_json(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
parse_json: bool,
|
||||
) {
|
||||
let json_field = schema.get_field("json").unwrap();
|
||||
_benchmark(b, input, schema, commit, parse_json, |_schema, doc_json| {
|
||||
let json_val: serde_json::Value = serde_json::from_str(doc_json).unwrap();
|
||||
tantivy::doc!(json_field=>json_val)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
@@ -19,7 +103,14 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
schema_builder.add_text_field("severity", STRING);
|
||||
schema_builder.build()
|
||||
};
|
||||
let schema_with_store = {
|
||||
let schema_only_fast = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", FAST);
|
||||
schema_builder.add_text_field("body", FAST);
|
||||
schema_builder.add_text_field("severity", FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
let _schema_with_store = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", INDEXED | STORED);
|
||||
schema_builder.add_text_field("body", TEXT | STORED);
|
||||
@@ -28,74 +119,40 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
};
|
||||
let dynamic_schema = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", TEXT);
|
||||
schema_builder.add_json_field("json", TEXT | FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group("index-hdfs");
|
||||
group.throughput(Throughput::Bytes(HDFS_LOGS.len() as u64));
|
||||
group.sample_size(20);
|
||||
group.bench_function("index-hdfs-no-commit", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
|
||||
let benches = [
|
||||
("only-indexed-".to_string(), schema, false),
|
||||
//("stored-".to_string(), _schema_with_store, false),
|
||||
("only-fast-".to_string(), schema_only_fast, false),
|
||||
("dynamic-".to_string(), dynamic_schema, true),
|
||||
];
|
||||
|
||||
for (prefix, schema, is_dynamic) in benches {
|
||||
for commit in [false, true] {
|
||||
let suffix = if commit { "with-commit" } else { "no-commit" };
|
||||
{
|
||||
let parse_json = false;
|
||||
// for parse_json in [false, true] {
|
||||
let suffix = if parse_json {
|
||||
format!("{suffix}-with-json-parsing")
|
||||
} else {
|
||||
suffix.to_string()
|
||||
};
|
||||
|
||||
let bench_name = format!("{prefix}{suffix}");
|
||||
group.bench_function(bench_name, |b| {
|
||||
benchmark(b, HDFS_LOGS, schema.clone(), commit, parse_json, is_dynamic)
|
||||
});
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-with-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit-with-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gh_index_benchmark(c: &mut Criterion) {
|
||||
@@ -104,38 +161,24 @@ pub fn gh_index_benchmark(c: &mut Criterion) {
|
||||
schema_builder.add_json_field("json", TEXT | FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
let dynamic_schema_fast = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group("index-gh");
|
||||
group.throughput(Throughput::Bytes(GH_LOGS.len() as u64));
|
||||
|
||||
group.bench_function("index-gh-no-commit", |b| {
|
||||
let lines = get_lines(GH_LOGS);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema.clone(), false, false)
|
||||
});
|
||||
group.bench_function("index-gh-with-commit", |b| {
|
||||
let lines = get_lines(GH_LOGS);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
group.bench_function("index-gh-fast", |b| {
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema_fast.clone(), false, false)
|
||||
});
|
||||
|
||||
group.bench_function("index-gh-fast-with-commit", |b| {
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema_fast.clone(), true, false)
|
||||
});
|
||||
}
|
||||
|
||||
@@ -150,33 +193,10 @@ pub fn wiki_index_benchmark(c: &mut Criterion) {
|
||||
group.throughput(Throughput::Bytes(WIKI.len() as u64));
|
||||
|
||||
group.bench_function("index-wiki-no-commit", |b| {
|
||||
let lines = get_lines(WIKI);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
benchmark_dynamic_json(b, WIKI, dynamic_schema.clone(), false, false)
|
||||
});
|
||||
group.bench_function("index-wiki-with-commit", |b| {
|
||||
let lines = get_lines(WIKI);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
benchmark_dynamic_json(b, WIKI, dynamic_schema.clone(), true, false)
|
||||
});
|
||||
}
|
||||
|
||||
@@ -187,12 +207,12 @@ criterion_group! {
|
||||
}
|
||||
criterion_group! {
|
||||
name = gh_benches;
|
||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||
config = Criterion::default();
|
||||
targets = gh_index_benchmark
|
||||
}
|
||||
criterion_group! {
|
||||
name = wiki_benches;
|
||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||
config = Criterion::default();
|
||||
targets = wiki_index_benchmark
|
||||
}
|
||||
criterion_main!(benches, gh_benches, wiki_benches);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
edition = "2021"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
@@ -15,7 +15,7 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bitpacking = {version="0.8", default-features=false, features = ["bitpacker1x"]}
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker1x"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
|
||||
@@ -367,7 +366,7 @@ mod test {
|
||||
let mut output: Vec<u32> = Vec::new();
|
||||
for len in [0, 1, 2, 32, 33, 34, 64] {
|
||||
for start_idx in 0u32..32u32 {
|
||||
output.resize(len as usize, 0);
|
||||
output.resize(len, 0);
|
||||
bitunpacker.get_batch_u32s(start_idx, &buffer, &mut output);
|
||||
for i in 0..len {
|
||||
let expected = (start_idx + i as u32) & mask;
|
||||
|
||||
83
cliff.toml
83
cliff.toml
@@ -1,6 +1,10 @@
|
||||
# configuration file for git-cliff{ pattern = "foo", replace = "bar"}
|
||||
# see https://github.com/orhun/git-cliff#configuration-file
|
||||
|
||||
[remote.github]
|
||||
owner = "quickwit-oss"
|
||||
repo = "tantivy"
|
||||
|
||||
[changelog]
|
||||
# changelog header
|
||||
header = """
|
||||
@@ -8,15 +12,43 @@ header = """
|
||||
# template for the changelog body
|
||||
# https://tera.netlify.app/docs/#introduction
|
||||
body = """
|
||||
{% if version %}\
|
||||
{{ version | trim_start_matches(pat="v") }} ({{ timestamp | date(format="%Y-%m-%d") }})
|
||||
==================
|
||||
{% else %}\
|
||||
## [unreleased]
|
||||
{% endif %}\
|
||||
## What's Changed
|
||||
|
||||
{%- if version %} in {{ version }}{%- endif -%}
|
||||
{% for commit in commits %}
|
||||
- {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | split(pat="\n") | first | trim | upper_first }}(@{{ commit.author.name }})\
|
||||
{% endfor %}
|
||||
{% if commit.github.pr_title -%}
|
||||
{%- set commit_message = commit.github.pr_title -%}
|
||||
{%- else -%}
|
||||
{%- set commit_message = commit.message -%}
|
||||
{%- endif -%}
|
||||
- {{ commit_message | split(pat="\n") | first | trim }}\
|
||||
{% if commit.github.pr_number %} \
|
||||
[#{{ commit.github.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.github.pr_number }}){% if commit.github.username %}(@{{ commit.github.username }}){%- endif -%} \
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
|
||||
{% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
|
||||
{% raw %}\n{% endraw -%}
|
||||
## New Contributors
|
||||
{%- endif %}\
|
||||
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
|
||||
* @{{ contributor.username }} made their first contribution
|
||||
{%- if contributor.pr_number %} in \
|
||||
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
|
||||
{% if version %}
|
||||
{% if previous.version %}
|
||||
**Full Changelog**: {{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}
|
||||
{% endif %}
|
||||
{% else -%}
|
||||
{% raw %}\n{% endraw %}
|
||||
{% endif %}
|
||||
|
||||
{%- macro remote_url() -%}
|
||||
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||
{%- endmacro -%}
|
||||
"""
|
||||
# remove the leading and trailing whitespace from the template
|
||||
trim = true
|
||||
@@ -25,53 +57,24 @@ footer = """
|
||||
"""
|
||||
|
||||
postprocessors = [
|
||||
{ pattern = 'Paul Masurel', replace = "fulmicoton"}, # replace with github user
|
||||
{ pattern = 'PSeitz', replace = "PSeitz"}, # replace with github user
|
||||
{ pattern = 'Adam Reichold', replace = "adamreichold"}, # replace with github user
|
||||
{ pattern = 'trinity-1686a', replace = "trinity-1686a"}, # replace with github user
|
||||
{ pattern = 'Michael Kleen', replace = "mkleen"}, # replace with github user
|
||||
{ pattern = 'Adrien Guillo', replace = "guilload"}, # replace with github user
|
||||
{ pattern = 'François Massot', replace = "fmassot"}, # replace with github user
|
||||
{ pattern = 'Naveen Aiathurai', replace = "naveenann"}, # replace with github user
|
||||
{ pattern = '', replace = ""}, # replace with github user
|
||||
]
|
||||
|
||||
[git]
|
||||
# parse the commits based on https://www.conventionalcommits.org
|
||||
# This is required or commit.message contains the whole commit message and not just the title
|
||||
conventional_commits = true
|
||||
conventional_commits = false
|
||||
# filter out the commits that are not conventional
|
||||
filter_unconventional = false
|
||||
filter_unconventional = true
|
||||
# process each line of a commit as an individual commit
|
||||
split_commits = false
|
||||
# regex for preprocessing the commit messages
|
||||
commit_preprocessors = [
|
||||
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "[#${2}](https://github.com/quickwit-oss/tantivy/issues/${2})"}, # replace issue numbers
|
||||
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = ""},
|
||||
]
|
||||
#link_parsers = [
|
||||
#{ pattern = "#(\\d+)", href = "https://github.com/quickwit-oss/tantivy/pulls/$1"},
|
||||
#]
|
||||
# regex for parsing and grouping commits
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "Features"},
|
||||
{ message = "^fix", group = "Bug Fixes"},
|
||||
{ message = "^doc", group = "Documentation"},
|
||||
{ message = "^perf", group = "Performance"},
|
||||
{ message = "^refactor", group = "Refactor"},
|
||||
{ message = "^style", group = "Styling"},
|
||||
{ message = "^test", group = "Testing"},
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true},
|
||||
{ message = "(?i)clippy", skip = true},
|
||||
{ message = "(?i)dependabot", skip = true},
|
||||
{ message = "(?i)fmt", skip = true},
|
||||
{ message = "(?i)bump", skip = true},
|
||||
{ message = "(?i)readme", skip = true},
|
||||
{ message = "(?i)comment", skip = true},
|
||||
{ message = "(?i)spelling", skip = true},
|
||||
{ message = "^chore", group = "Miscellaneous Tasks"},
|
||||
{ body = ".*security", group = "Security"},
|
||||
{ message = ".*", group = "Other", default_scope = "other"},
|
||||
]
|
||||
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
||||
protect_breaking_commits = false
|
||||
# filter out the commits that are not matched by commit parsers
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-columnar"
|
||||
version = "0.2.0"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
@@ -9,20 +9,26 @@ description = "column oriented storage for tantivy"
|
||||
categories = ["database-implementations", "data-structures", "compression"]
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.11.0"
|
||||
fnv = "1.0.7"
|
||||
itertools = "0.13.0"
|
||||
fastdivide = "0.4.0"
|
||||
|
||||
stacker = { version= "0.2", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.2", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.6", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.5", path = "../bitpacker/" }
|
||||
stacker = { version= "0.3", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.3", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.7", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.6", path = "../bitpacker/" }
|
||||
serde = "1.0.152"
|
||||
downcast-rs = "1.2.0"
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
more-asserts = "0.3.1"
|
||||
rand = "0.8"
|
||||
binggan = "0.8.1"
|
||||
|
||||
[[bench]]
|
||||
name = "bench_merge"
|
||||
harness = false
|
||||
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
|
||||
155
columnar/benches/bench_first_vals.rs
Normal file
155
columnar/benches/bench_first_vals.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::column_values::{serialize_and_load_u64_based_column_values, CodecType};
|
||||
use tantivy_columnar::*;
|
||||
use test::{black_box, Bencher};
|
||||
|
||||
struct Columns {
|
||||
pub optional: Column,
|
||||
pub full: Column,
|
||||
pub multi: Column,
|
||||
}
|
||||
|
||||
fn get_test_columns() -> Columns {
|
||||
let data = generate_permutation();
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
for (idx, val) in data.iter().enumerate() {
|
||||
dataframe_writer.record_numerical(idx as u32, "full_values", NumericalValue::U64(*val));
|
||||
if idx % 2 == 0 {
|
||||
dataframe_writer.record_numerical(
|
||||
idx as u32,
|
||||
"optional_values",
|
||||
NumericalValue::U64(*val),
|
||||
);
|
||||
}
|
||||
dataframe_writer.record_numerical(idx as u32, "multi_values", NumericalValue::U64(*val));
|
||||
dataframe_writer.record_numerical(idx as u32, "multi_values", NumericalValue::U64(*val));
|
||||
}
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(data.len() as u32, None, &mut buffer)
|
||||
.unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("optional_values").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
let optional = cols[0].open_u64_lenient().unwrap().unwrap();
|
||||
assert_eq!(optional.index.get_cardinality(), Cardinality::Optional);
|
||||
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("full_values").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
let column_full = cols[0].open_u64_lenient().unwrap().unwrap();
|
||||
assert_eq!(column_full.index.get_cardinality(), Cardinality::Full);
|
||||
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("multi_values").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
let multi = cols[0].open_u64_lenient().unwrap().unwrap();
|
||||
assert_eq!(multi.index.get_cardinality(), Cardinality::Multivalued);
|
||||
|
||||
Columns {
|
||||
optional,
|
||||
full: column_full,
|
||||
multi,
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_VALUES: u64 = 100_000;
|
||||
fn generate_permutation() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..NUM_VALUES).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
pub fn serialize_and_load(column: &[u64], codec_type: CodecType) -> Arc<dyn ColumnValues<u64>> {
|
||||
serialize_and_load_u64_based_column_values(&column, &[codec_type])
|
||||
}
|
||||
|
||||
fn run_bench_on_column_full_scan(b: &mut Bencher, column: Column) {
|
||||
let num_iter = black_box(NUM_VALUES);
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for i in 0..num_iter as u32 {
|
||||
let val = column.first(i);
|
||||
sum += val.unwrap_or(0);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
fn run_bench_on_column_block_fetch(b: &mut Bencher, column: Column) {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
b.iter(move || {
|
||||
column.first_vals(&fetch_docids, &mut block);
|
||||
block[0]
|
||||
});
|
||||
}
|
||||
fn run_bench_on_column_block_single_calls(b: &mut Bencher, column: Column) {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
b.iter(move || {
|
||||
for i in 0..fetch_docids.len() {
|
||||
block[i] = column.first(fetch_docids[i]);
|
||||
}
|
||||
block[0]
|
||||
});
|
||||
}
|
||||
|
||||
/// Column first method
|
||||
#[bench]
|
||||
fn bench_get_first_on_full_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_first_on_optional_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_first_on_multi_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
/// Block fetch column accessor
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_optional_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_multi_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_full_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_optional_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_multi_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_full_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
97
columnar/benches/bench_merge.rs
Normal file
97
columnar/benches/bench_merge.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use core::fmt;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use binggan::{black_box, BenchRunner};
|
||||
use tantivy_columnar::*;
|
||||
|
||||
enum Card {
|
||||
Multi,
|
||||
Sparse,
|
||||
Dense,
|
||||
}
|
||||
impl Display for Card {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Card::Multi => write!(f, "multi"),
|
||||
Card::Sparse => write!(f, "sparse"),
|
||||
Card::Dense => write!(f, "dense"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_DOCS: u32 = 1_000_000;
|
||||
|
||||
fn generate_columnar(card: Card, num_docs: u32) -> ColumnarReader {
|
||||
use tantivy_columnar::ColumnarWriter;
|
||||
|
||||
let mut columnar_writer = ColumnarWriter::default();
|
||||
|
||||
match card {
|
||||
Card::Multi => {
|
||||
columnar_writer.record_numerical(0, "price", 10u64);
|
||||
columnar_writer.record_numerical(0, "price", 10u64);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
for i in 0..num_docs {
|
||||
match card {
|
||||
Card::Multi | Card::Sparse => {
|
||||
if i % 8 == 0 {
|
||||
columnar_writer.record_numerical(i, "price", i as u64);
|
||||
}
|
||||
}
|
||||
Card::Dense => {
|
||||
if i % 6 == 0 {
|
||||
columnar_writer.record_numerical(i, "price", i as u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut wrt: Vec<u8> = Vec::new();
|
||||
columnar_writer.serialize(num_docs, None, &mut wrt).unwrap();
|
||||
|
||||
ColumnarReader::open(wrt).unwrap()
|
||||
}
|
||||
fn main() {
|
||||
let mut inputs = Vec::new();
|
||||
|
||||
let mut add_combo = |card1: Card, card2: Card| {
|
||||
inputs.push((
|
||||
format!("merge_{card1}_and_{card2}"),
|
||||
vec![
|
||||
generate_columnar(card1, NUM_DOCS),
|
||||
generate_columnar(card2, NUM_DOCS),
|
||||
],
|
||||
));
|
||||
};
|
||||
|
||||
add_combo(Card::Multi, Card::Multi);
|
||||
add_combo(Card::Dense, Card::Dense);
|
||||
add_combo(Card::Sparse, Card::Sparse);
|
||||
add_combo(Card::Sparse, Card::Dense);
|
||||
add_combo(Card::Multi, Card::Dense);
|
||||
add_combo(Card::Multi, Card::Sparse);
|
||||
|
||||
let runner: BenchRunner = BenchRunner::new();
|
||||
let mut group = runner.new_group();
|
||||
for (input_name, columnar_readers) in inputs.iter() {
|
||||
group.register_with_input(
|
||||
input_name,
|
||||
columnar_readers,
|
||||
move |columnar_readers: &Vec<ColumnarReader>| {
|
||||
let mut out = vec![];
|
||||
let columnar_readers = columnar_readers.iter().collect::<Vec<_>>();
|
||||
let merge_row_order = StackMergeOrder::stack(&columnar_readers[..]);
|
||||
|
||||
merge_columnar(&columnar_readers, &[], merge_row_order.into(), &mut out).unwrap();
|
||||
black_box(out);
|
||||
},
|
||||
);
|
||||
}
|
||||
group.run();
|
||||
}
|
||||
@@ -16,14 +16,6 @@ fn generate_permutation() -> Vec<u64> {
|
||||
permutation
|
||||
}
|
||||
|
||||
fn generate_random() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||
.map(|el| el + random::<u16>() as u64)
|
||||
.collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation_gcd() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
|
||||
@@ -8,7 +8,6 @@ license = "MIT"
|
||||
columnar = {path="../", package="tantivy-columnar"}
|
||||
serde_json = "1"
|
||||
serde_json_borrow = {git="https://github.com/PSeitz/serde_json_borrow/"}
|
||||
serde = "1"
|
||||
|
||||
[workspace]
|
||||
members = []
|
||||
|
||||
@@ -14,20 +14,32 @@ impl<T: PartialOrd + Copy + std::fmt::Debug + Send + Sync + 'static + Default>
|
||||
ColumnBlockAccessor<T>
|
||||
{
|
||||
#[inline]
|
||||
pub fn fetch_block(&mut self, docs: &[u32], accessor: &Column<T>) {
|
||||
self.docid_cache.clear();
|
||||
self.row_id_cache.clear();
|
||||
accessor.row_ids_for_docs(docs, &mut self.docid_cache, &mut self.row_id_cache);
|
||||
self.val_cache.resize(self.row_id_cache.len(), T::default());
|
||||
accessor
|
||||
.values
|
||||
.get_vals(&self.row_id_cache, &mut self.val_cache);
|
||||
pub fn fetch_block<'a>(&'a mut self, docs: &'a [u32], accessor: &Column<T>) {
|
||||
if accessor.index.get_cardinality().is_full() {
|
||||
self.val_cache.resize(docs.len(), T::default());
|
||||
accessor.values.get_vals(docs, &mut self.val_cache);
|
||||
} else {
|
||||
self.docid_cache.clear();
|
||||
self.row_id_cache.clear();
|
||||
accessor.row_ids_for_docs(docs, &mut self.docid_cache, &mut self.row_id_cache);
|
||||
self.val_cache.resize(self.row_id_cache.len(), T::default());
|
||||
accessor
|
||||
.values
|
||||
.get_vals(&self.row_id_cache, &mut self.val_cache);
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub fn fetch_block_with_missing(&mut self, docs: &[u32], accessor: &Column<T>, missing: T) {
|
||||
self.fetch_block(docs, accessor);
|
||||
// We can compare docid_cache with docs to find missing docs
|
||||
if docs.len() != self.docid_cache.len() || accessor.index.is_multivalue() {
|
||||
// no missing values
|
||||
if accessor.index.get_cardinality().is_full() {
|
||||
return;
|
||||
}
|
||||
|
||||
// We can compare docid_cache length with docs to find missing docs
|
||||
// For multi value columns we can't rely on the length and always need to scan
|
||||
if accessor.index.get_cardinality().is_multivalue() || docs.len() != self.docid_cache.len()
|
||||
{
|
||||
self.missing_docids_cache.clear();
|
||||
find_missing_docs(docs, &self.docid_cache, |doc| {
|
||||
self.missing_docids_cache.push(doc);
|
||||
@@ -44,11 +56,25 @@ impl<T: PartialOrd + Copy + std::fmt::Debug + Send + Sync + 'static + Default>
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn iter_docid_vals(&self) -> impl Iterator<Item = (DocId, T)> + '_ {
|
||||
self.docid_cache
|
||||
.iter()
|
||||
.cloned()
|
||||
.zip(self.val_cache.iter().cloned())
|
||||
/// Returns an iterator over the docids and values
|
||||
/// The passed in `docs` slice needs to be the same slice that was passed to `fetch_block` or
|
||||
/// `fetch_block_with_missing`.
|
||||
///
|
||||
/// The docs is used if the column is full (each docs has exactly one value), otherwise the
|
||||
/// internal docid vec is used for the iterator, which e.g. may contain duplicate docs.
|
||||
pub fn iter_docid_vals<'a>(
|
||||
&'a self,
|
||||
docs: &'a [u32],
|
||||
accessor: &Column<T>,
|
||||
) -> impl Iterator<Item = (DocId, T)> + '_ {
|
||||
if accessor.index.get_cardinality().is_full() {
|
||||
docs.iter().cloned().zip(self.val_cache.iter().cloned())
|
||||
} else {
|
||||
self.docid_cache
|
||||
.iter()
|
||||
.cloned()
|
||||
.zip(self.val_cache.iter().cloned())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,17 +3,17 @@ mod serialize;
|
||||
|
||||
use std::fmt::{self, Debug};
|
||||
use std::io::Write;
|
||||
use std::ops::{Deref, Range, RangeInclusive};
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
pub use dictionary_encoded::{BytesColumn, StrColumn};
|
||||
pub use serialize::{
|
||||
open_column_bytes, open_column_str, open_column_u128, open_column_u64,
|
||||
serialize_column_mappable_to_u128, serialize_column_mappable_to_u64,
|
||||
open_column_bytes, open_column_str, open_column_u128, open_column_u128_as_compact_u64,
|
||||
open_column_u64, serialize_column_mappable_to_u128, serialize_column_mappable_to_u64,
|
||||
};
|
||||
|
||||
use crate::column_index::ColumnIndex;
|
||||
use crate::column_index::{ColumnIndex, Set};
|
||||
use crate::column_values::monotonic_mapping::StrictlyMonotonicMappingToInternal;
|
||||
use crate::column_values::{monotonic_map_column, ColumnValues};
|
||||
use crate::{Cardinality, DocId, EmptyColumnValues, MonotonicallyMappableToU64, RowId};
|
||||
@@ -83,10 +83,36 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
self.values.max_value()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn first(&self, row_id: RowId) -> Option<T> {
|
||||
self.values_for_doc(row_id).next()
|
||||
}
|
||||
|
||||
/// Load the first value for each docid in the provided slice.
|
||||
#[inline]
|
||||
pub fn first_vals(&self, docids: &[DocId], output: &mut [Option<T>]) {
|
||||
match &self.index {
|
||||
ColumnIndex::Empty { .. } => {}
|
||||
ColumnIndex::Full => self.values.get_vals_opt(docids, output),
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
for (i, docid) in docids.iter().enumerate() {
|
||||
output[i] = optional_index
|
||||
.rank_if_exists(*docid)
|
||||
.map(|rowid| self.values.get_val(rowid));
|
||||
}
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
for (i, docid) in docids.iter().enumerate() {
|
||||
let range = multivalued_index.range(*docid);
|
||||
let is_empty = range.start == range.end;
|
||||
if !is_empty {
|
||||
output[i] = Some(self.values.get_val(range.start));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Translates a block of docis to row_ids.
|
||||
///
|
||||
/// returns the row_ids and the matching docids on the same index
|
||||
@@ -105,7 +131,8 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
}
|
||||
|
||||
pub fn values_for_doc(&self, doc_id: DocId) -> impl Iterator<Item = T> + '_ {
|
||||
self.value_row_ids(doc_id)
|
||||
self.index
|
||||
.value_row_ids(doc_id)
|
||||
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
|
||||
}
|
||||
|
||||
@@ -147,14 +174,6 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for Column<T> {
|
||||
type Target = ColumnIndex;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.index
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Cardinality {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
self.to_code().serialize(writer)
|
||||
@@ -176,6 +195,7 @@ struct FirstValueWithDefault<T: Copy> {
|
||||
impl<T: PartialOrd + Debug + Send + Sync + Copy + 'static> ColumnValues<T>
|
||||
for FirstValueWithDefault<T>
|
||||
{
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
self.column.first(idx).unwrap_or(self.default_value)
|
||||
}
|
||||
|
||||
@@ -76,6 +76,26 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
|
||||
})
|
||||
}
|
||||
|
||||
/// Open the column as u64.
|
||||
///
|
||||
/// See [`open_u128_as_compact_u64`] for more details.
|
||||
pub fn open_column_u128_as_compact_u64(bytes: OwnedBytes) -> io::Result<Column<u64>> {
|
||||
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
|
||||
let column_index_num_bytes = u32::from_le_bytes(
|
||||
column_index_num_bytes_payload
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
||||
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
||||
let column_values = crate::column_values::open_u128_as_compact_u64(column_values_data)?;
|
||||
Ok(Column {
|
||||
index: column_index,
|
||||
values: column_values,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_bytes(data: OwnedBytes) -> io::Result<BytesColumn> {
|
||||
let (body, dictionary_len_bytes) = data.rsplit(4);
|
||||
let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap());
|
||||
|
||||
@@ -73,14 +73,18 @@ fn detect_cardinality(
|
||||
pub fn merge_column_index<'a>(
|
||||
columns: &'a [ColumnIndex],
|
||||
merge_row_order: &'a MergeRowOrder,
|
||||
num_values: u32,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
// For simplification, we do not try to detect whether the cardinality could be
|
||||
// downgraded thanks to deletes.
|
||||
let cardinality_after_merge = detect_cardinality(columns, merge_row_order);
|
||||
match merge_row_order {
|
||||
MergeRowOrder::Stack(stack_merge_order) => {
|
||||
merge_column_index_stacked(columns, cardinality_after_merge, stack_merge_order)
|
||||
}
|
||||
MergeRowOrder::Stack(stack_merge_order) => merge_column_index_stacked(
|
||||
columns,
|
||||
cardinality_after_merge,
|
||||
stack_merge_order,
|
||||
num_values,
|
||||
),
|
||||
MergeRowOrder::Shuffled(complex_merge_order) => {
|
||||
merge_column_index_shuffled(columns, cardinality_after_merge, complex_merge_order)
|
||||
}
|
||||
@@ -167,8 +171,12 @@ mod tests {
|
||||
],
|
||||
)
|
||||
.into();
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index else {
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order, 3);
|
||||
let SerializableColumnIndex::Multivalued {
|
||||
indices: start_index_iterable,
|
||||
..
|
||||
} = merged_column_index
|
||||
else {
|
||||
panic!("Excpected a multivalued index")
|
||||
};
|
||||
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
|
||||
@@ -200,8 +208,12 @@ mod tests {
|
||||
],
|
||||
)
|
||||
.into();
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
|
||||
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index else {
|
||||
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order, 6);
|
||||
let SerializableColumnIndex::Multivalued {
|
||||
indices: start_index_iterable,
|
||||
..
|
||||
} = merged_column_index
|
||||
else {
|
||||
panic!("Excpected a multivalued index")
|
||||
};
|
||||
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
|
||||
|
||||
@@ -22,7 +22,10 @@ pub fn merge_column_index_shuffled<'a>(
|
||||
Cardinality::Multivalued => {
|
||||
let multivalue_start_index =
|
||||
merge_column_index_shuffled_multivalued(column_indexes, shuffle_merge_order);
|
||||
SerializableColumnIndex::Multivalued(multivalue_start_index)
|
||||
SerializableColumnIndex::Multivalued {
|
||||
indices: multivalue_start_index,
|
||||
stats: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -140,7 +143,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_merge_column_index_optional_shuffle() {
|
||||
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
|
||||
let column_indexes = vec![optional_index, ColumnIndex::Full];
|
||||
let column_indexes = [optional_index, ColumnIndex::Full];
|
||||
let row_addrs = vec![
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use std::iter;
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use crate::column_index::{SerializableColumnIndex, Set};
|
||||
use crate::column_values::ColumnStats;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, ColumnIndex, RowId, StackMergeOrder};
|
||||
|
||||
@@ -12,6 +14,7 @@ pub fn merge_column_index_stacked<'a>(
|
||||
columns: &'a [ColumnIndex],
|
||||
cardinality_after_merge: Cardinality,
|
||||
stack_merge_order: &'a StackMergeOrder,
|
||||
num_values: u32,
|
||||
) -> SerializableColumnIndex<'a> {
|
||||
match cardinality_after_merge {
|
||||
Cardinality::Full => SerializableColumnIndex::Full,
|
||||
@@ -27,7 +30,17 @@ pub fn merge_column_index_stacked<'a>(
|
||||
columns,
|
||||
stack_merge_order,
|
||||
};
|
||||
SerializableColumnIndex::Multivalued(Box::new(stacked_multivalued_index))
|
||||
SerializableColumnIndex::Multivalued {
|
||||
indices: Box::new(stacked_multivalued_index),
|
||||
stats: Some(ColumnStats {
|
||||
gcd: NonZeroU64::new(1).unwrap(),
|
||||
// The values in the multivalue index are the positions of the values
|
||||
min_value: 0,
|
||||
max_value: num_values as u64,
|
||||
// This is num docs, but it starts at 0 so we need +1
|
||||
num_rows: stack_merge_order.num_rows() + 1,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -111,10 +124,7 @@ fn stack_multivalued_indexes<'a>(
|
||||
let mut last_row_id = 0;
|
||||
let mut current_it = multivalued_indexes.next();
|
||||
Box::new(std::iter::from_fn(move || loop {
|
||||
let Some(multivalued_index) = current_it.as_mut() else {
|
||||
return None;
|
||||
};
|
||||
if let Some(row_id) = multivalued_index.next() {
|
||||
if let Some(row_id) = current_it.as_mut()?.next() {
|
||||
last_row_id = offset + row_id;
|
||||
return Some(last_row_id);
|
||||
}
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
//! # `column_index`
|
||||
//!
|
||||
//! `column_index` provides rank and select operations to associate positions when not all
|
||||
//! documents have exactly one element.
|
||||
|
||||
mod merge;
|
||||
mod multivalued_index;
|
||||
mod optional_index;
|
||||
@@ -37,14 +42,10 @@ impl From<MultiValueIndex> for ColumnIndex {
|
||||
}
|
||||
|
||||
impl ColumnIndex {
|
||||
#[inline]
|
||||
pub fn is_multivalue(&self) -> bool {
|
||||
matches!(self, ColumnIndex::Multivalued(_))
|
||||
}
|
||||
// Returns the cardinality of the column index.
|
||||
//
|
||||
// By convention, if the column contains no docs, we consider that it is
|
||||
// full.
|
||||
/// Returns the cardinality of the column index.
|
||||
///
|
||||
/// By convention, if the column contains no docs, we consider that it is
|
||||
/// full.
|
||||
#[inline]
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
@@ -121,18 +122,18 @@ impl ColumnIndex {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn docid_range_to_rowids(&self, doc_id: Range<DocId>) -> Range<RowId> {
|
||||
pub fn docid_range_to_rowids(&self, doc_id_range: Range<DocId>) -> Range<RowId> {
|
||||
match self {
|
||||
ColumnIndex::Empty { .. } => 0..0,
|
||||
ColumnIndex::Full => doc_id,
|
||||
ColumnIndex::Full => doc_id_range,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
let row_start = optional_index.rank(doc_id.start);
|
||||
let row_end = optional_index.rank(doc_id.end);
|
||||
let row_start = optional_index.rank(doc_id_range.start);
|
||||
let row_end = optional_index.rank(doc_id_range.end);
|
||||
row_start..row_end
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
let end_docid = doc_id.end.min(multivalued_index.num_docs() - 1) + 1;
|
||||
let start_docid = doc_id.start.min(end_docid);
|
||||
let end_docid = doc_id_range.end.min(multivalued_index.num_docs() - 1) + 1;
|
||||
let start_docid = doc_id_range.start.min(end_docid);
|
||||
|
||||
let row_start = multivalued_index.start_index_column.get_val(start_docid);
|
||||
let row_end = multivalued_index.start_index_column.get_val(end_docid);
|
||||
|
||||
@@ -6,20 +6,29 @@ use std::sync::Arc;
|
||||
use common::OwnedBytes;
|
||||
|
||||
use crate::column_values::{
|
||||
load_u64_based_column_values, serialize_u64_based_column_values, CodecType, ColumnValues,
|
||||
load_u64_based_column_values, serialize_u64_based_column_values,
|
||||
serialize_u64_with_codec_and_stats, CodecType, ColumnStats, ColumnValues,
|
||||
};
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{DocId, RowId};
|
||||
|
||||
pub fn serialize_multivalued_index(
|
||||
multivalued_index: &dyn Iterable<RowId>,
|
||||
stats: Option<ColumnStats>,
|
||||
output: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
serialize_u64_based_column_values(
|
||||
multivalued_index,
|
||||
&[CodecType::Bitpacked, CodecType::Linear],
|
||||
output,
|
||||
)?;
|
||||
if let Some(stats) = stats {
|
||||
// TODO: Add something with higher compression that doesn't require a full scan upfront
|
||||
let estimator = CodecType::Bitpacked.estimator();
|
||||
assert!(!estimator.requires_full_scan());
|
||||
serialize_u64_with_codec_and_stats(multivalued_index, estimator, stats, output)?;
|
||||
} else {
|
||||
serialize_u64_based_column_values(
|
||||
multivalued_index,
|
||||
&[CodecType::Bitpacked, CodecType::Linear],
|
||||
output,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -52,7 +61,7 @@ impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex {
|
||||
impl MultiValueIndex {
|
||||
pub fn for_test(start_offsets: &[RowId]) -> MultiValueIndex {
|
||||
let mut buffer = Vec::new();
|
||||
serialize_multivalued_index(&start_offsets, &mut buffer).unwrap();
|
||||
serialize_multivalued_index(&start_offsets, None, &mut buffer).unwrap();
|
||||
let bytes = OwnedBytes::new(buffer);
|
||||
open_multivalued_index(bytes).unwrap()
|
||||
}
|
||||
|
||||
@@ -21,8 +21,6 @@ const DENSE_BLOCK_THRESHOLD: u32 =
|
||||
|
||||
const ELEMENTS_PER_BLOCK: u32 = u16::MAX as u32 + 1;
|
||||
|
||||
const BLOCK_SIZE: RowId = 1 << 16;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct BlockMeta {
|
||||
non_null_rows_before_block: u32,
|
||||
@@ -109,8 +107,8 @@ struct RowAddr {
|
||||
#[inline(always)]
|
||||
fn row_addr_from_row_id(row_id: RowId) -> RowAddr {
|
||||
RowAddr {
|
||||
block_id: (row_id / BLOCK_SIZE) as u16,
|
||||
in_block_row_id: (row_id % BLOCK_SIZE) as u16,
|
||||
block_id: (row_id / ELEMENTS_PER_BLOCK) as u16,
|
||||
in_block_row_id: (row_id % ELEMENTS_PER_BLOCK) as u16,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,14 +183,20 @@ impl Set<RowId> for OptionalIndex {
|
||||
}
|
||||
}
|
||||
|
||||
/// Any value doc_id is allowed.
|
||||
/// In particular, doc_id = num_rows.
|
||||
#[inline]
|
||||
fn rank(&self, doc_id: DocId) -> RowId {
|
||||
if doc_id >= self.num_docs() {
|
||||
return self.num_non_nulls();
|
||||
}
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(doc_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block = self.block(block_meta);
|
||||
|
||||
let block_offset_row_id = match block {
|
||||
Block::Dense(dense_block) => dense_block.rank(in_block_row_id),
|
||||
Block::Sparse(sparse_block) => sparse_block.rank(in_block_row_id),
|
||||
@@ -200,13 +204,15 @@ impl Set<RowId> for OptionalIndex {
|
||||
block_meta.non_null_rows_before_block + block_offset_row_id
|
||||
}
|
||||
|
||||
/// Any value doc_id is allowed.
|
||||
/// In particular, doc_id = num_rows.
|
||||
#[inline]
|
||||
fn rank_if_exists(&self, doc_id: DocId) -> Option<RowId> {
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(doc_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block_meta = *self.block_metas.get(block_id as usize)?;
|
||||
let block = self.block(block_meta);
|
||||
let block_offset_row_id = match block {
|
||||
Block::Dense(dense_block) => dense_block.rank_if_exists(in_block_row_id),
|
||||
@@ -491,7 +497,7 @@ fn deserialize_optional_index_block_metadatas(
|
||||
non_null_rows_before_block += num_non_null_rows;
|
||||
}
|
||||
block_metas.resize(
|
||||
((num_rows + BLOCK_SIZE - 1) / BLOCK_SIZE) as usize,
|
||||
((num_rows + ELEMENTS_PER_BLOCK - 1) / ELEMENTS_PER_BLOCK) as usize,
|
||||
BlockMeta {
|
||||
non_null_rows_before_block,
|
||||
start_byte_offset,
|
||||
|
||||
@@ -39,7 +39,8 @@ pub trait Set<T> {
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if rank is greater than the number of elements in the Set.
|
||||
/// May panic if rank is greater or equal to the number of
|
||||
/// elements in the Set.
|
||||
fn select(&self, rank: T) -> T;
|
||||
|
||||
/// Creates a brand new select cursor.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
|
||||
@@ -1,8 +1,31 @@
|
||||
use proptest::prelude::{any, prop, *};
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::prelude::*;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use super::*;
|
||||
use crate::{ColumnarReader, ColumnarWriter, DynamicColumnHandle};
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_bug_2293() {
|
||||
// tests for panic in docid_range_to_rowids for docid == num_docs
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK - 1);
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK);
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK + 1);
|
||||
}
|
||||
fn test_optional_index_with_num_docs(num_docs: u32) {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(100, "score", 80i64);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(num_docs, None, &mut buffer)
|
||||
.unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("score").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
|
||||
let col = cols[0].open().unwrap();
|
||||
col.column_index().docid_range_to_rowids(0..num_docs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dense_block_threshold() {
|
||||
@@ -35,7 +58,7 @@ proptest! {
|
||||
|
||||
#[test]
|
||||
fn test_with_random_sets_simple() {
|
||||
let vals = 10..BLOCK_SIZE * 2;
|
||||
let vals = 10..ELEMENTS_PER_BLOCK * 2;
|
||||
let mut out: Vec<u8> = Vec::new();
|
||||
serialize_optional_index(&vals, 100, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
@@ -171,7 +194,7 @@ fn test_optional_index_rank() {
|
||||
test_optional_index_rank_aux(&[0u32, 1u32]);
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
block.extend((0..ELEMENTS_PER_BLOCK).map(|i| i + ELEMENTS_PER_BLOCK + 1));
|
||||
test_optional_index_rank_aux(&block);
|
||||
}
|
||||
|
||||
@@ -185,8 +208,8 @@ fn test_optional_index_iter_empty_one() {
|
||||
fn test_optional_index_iter_dense_block() {
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
test_optional_index_iter_aux(&block, 3 * BLOCK_SIZE);
|
||||
block.extend((0..ELEMENTS_PER_BLOCK).map(|i| i + ELEMENTS_PER_BLOCK + 1));
|
||||
test_optional_index_iter_aux(&block, 3 * ELEMENTS_PER_BLOCK);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -215,12 +238,12 @@ mod bench {
|
||||
let vals: Vec<RowId> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(pos, val)| *val)
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _)| pos as RowId)
|
||||
.collect();
|
||||
serialize_optional_index(&&vals[..], TOTAL_NUM_VALUES, &mut out).unwrap();
|
||||
let codec = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
codec
|
||||
|
||||
open_optional_index(OwnedBytes::new(out)).unwrap()
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
@@ -242,7 +265,7 @@ mod bench {
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent as f32 / 100.0;
|
||||
let ratio = percent / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
|
||||
@@ -6,6 +6,7 @@ use common::{CountingWriter, OwnedBytes};
|
||||
use crate::column_index::multivalued_index::serialize_multivalued_index;
|
||||
use crate::column_index::optional_index::serialize_optional_index;
|
||||
use crate::column_index::ColumnIndex;
|
||||
use crate::column_values::ColumnStats;
|
||||
use crate::iterable::Iterable;
|
||||
use crate::{Cardinality, RowId};
|
||||
|
||||
@@ -15,9 +16,12 @@ pub enum SerializableColumnIndex<'a> {
|
||||
non_null_row_ids: Box<dyn Iterable<RowId> + 'a>,
|
||||
num_rows: RowId,
|
||||
},
|
||||
// TODO remove the Arc<dyn> apart from serialization this is not
|
||||
// dynamic at all.
|
||||
Multivalued(Box<dyn Iterable<RowId> + 'a>),
|
||||
Multivalued {
|
||||
/// Iterator emitting the indices for the index
|
||||
indices: Box<dyn Iterable<RowId> + 'a>,
|
||||
/// In the merge case we can precompute the column stats
|
||||
stats: Option<ColumnStats>,
|
||||
},
|
||||
}
|
||||
|
||||
impl<'a> SerializableColumnIndex<'a> {
|
||||
@@ -25,11 +29,12 @@ impl<'a> SerializableColumnIndex<'a> {
|
||||
match self {
|
||||
SerializableColumnIndex::Full => Cardinality::Full,
|
||||
SerializableColumnIndex::Optional { .. } => Cardinality::Optional,
|
||||
SerializableColumnIndex::Multivalued(_) => Cardinality::Multivalued,
|
||||
SerializableColumnIndex::Multivalued { .. } => Cardinality::Multivalued,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize a column index.
|
||||
pub fn serialize_column_index(
|
||||
column_index: SerializableColumnIndex,
|
||||
output: &mut impl Write,
|
||||
@@ -43,14 +48,16 @@ pub fn serialize_column_index(
|
||||
non_null_row_ids,
|
||||
num_rows,
|
||||
} => serialize_optional_index(non_null_row_ids.as_ref(), num_rows, &mut output)?,
|
||||
SerializableColumnIndex::Multivalued(multivalued_index) => {
|
||||
serialize_multivalued_index(&*multivalued_index, &mut output)?
|
||||
}
|
||||
SerializableColumnIndex::Multivalued {
|
||||
indices: multivalued_index,
|
||||
stats,
|
||||
} => serialize_multivalued_index(&*multivalued_index, stats, &mut output)?,
|
||||
}
|
||||
let column_index_num_bytes = output.written_bytes() as u32;
|
||||
Ok(column_index_num_bytes)
|
||||
}
|
||||
|
||||
/// Open a serialized column index.
|
||||
pub fn open_column_index(mut bytes: OwnedBytes) -> io::Result<ColumnIndex> {
|
||||
if bytes.is_empty() {
|
||||
return Err(io::Error::new(
|
||||
|
||||
@@ -10,7 +10,7 @@ pub(crate) struct MergedColumnValues<'a, T> {
|
||||
pub(crate) merge_row_order: &'a MergeRowOrder,
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Debug> Iterable<T> for MergedColumnValues<'a, T> {
|
||||
impl<'a, T: Copy + PartialOrd + Debug + 'static> Iterable<T> for MergedColumnValues<'a, T> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
match self.merge_row_order {
|
||||
MergeRowOrder::Stack(_) => Box::new(
|
||||
|
||||
@@ -10,6 +10,7 @@ use std::fmt::Debug;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use downcast_rs::DowncastSync;
|
||||
pub use monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
||||
pub use monotonic_mapping_u128::MonotonicallyMappableToU128;
|
||||
|
||||
@@ -25,10 +26,14 @@ mod monotonic_column;
|
||||
|
||||
pub(crate) use merge::MergedColumnValues;
|
||||
pub use stats::ColumnStats;
|
||||
pub use u128_based::{open_u128_mapped, serialize_column_values_u128};
|
||||
pub use u128_based::{
|
||||
open_u128_as_compact_u64, open_u128_mapped, serialize_column_values_u128,
|
||||
CompactSpaceU64Accessor,
|
||||
};
|
||||
pub use u64_based::{
|
||||
load_u64_based_column_values, serialize_and_load_u64_based_column_values,
|
||||
serialize_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
|
||||
serialize_u64_based_column_values, serialize_u64_with_codec_and_stats, CodecType,
|
||||
ALL_U64_CODEC_TYPES,
|
||||
};
|
||||
pub use vec_column::VecColumn;
|
||||
|
||||
@@ -41,7 +46,7 @@ use crate::RowId;
|
||||
///
|
||||
/// Any methods with a default and specialized implementation need to be called in the
|
||||
/// wrappers that implement the trait: Arc and MonotonicMappingColumn
|
||||
pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync + DowncastSync {
|
||||
/// Return the value associated with the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
@@ -68,11 +73,40 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
out_x4[3] = self.get_val(idx_x4[3]);
|
||||
}
|
||||
|
||||
let step_size = 4;
|
||||
let cutoff = indexes.len() - indexes.len() % step_size;
|
||||
let out_and_idx_chunks = output
|
||||
.chunks_exact_mut(4)
|
||||
.into_remainder()
|
||||
.iter_mut()
|
||||
.zip(indexes.chunks_exact(4).remainder());
|
||||
for (out, idx) in out_and_idx_chunks {
|
||||
*out = self.get_val(*idx);
|
||||
}
|
||||
}
|
||||
|
||||
for idx in cutoff..indexes.len() {
|
||||
output[idx] = self.get_val(indexes[idx]);
|
||||
/// Allows to push down multiple fetch calls, to avoid dynamic dispatch overhead.
|
||||
/// The slightly weird `Option<T>` in output allows pushdown to full columns.
|
||||
///
|
||||
/// idx and output should have the same length
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `idx` is greater than the column length.
|
||||
fn get_vals_opt(&self, indexes: &[u32], output: &mut [Option<T>]) {
|
||||
assert!(indexes.len() == output.len());
|
||||
let out_and_idx_chunks = output.chunks_exact_mut(4).zip(indexes.chunks_exact(4));
|
||||
for (out_x4, idx_x4) in out_and_idx_chunks {
|
||||
out_x4[0] = Some(self.get_val(idx_x4[0]));
|
||||
out_x4[1] = Some(self.get_val(idx_x4[1]));
|
||||
out_x4[2] = Some(self.get_val(idx_x4[2]));
|
||||
out_x4[3] = Some(self.get_val(idx_x4[3]));
|
||||
}
|
||||
let out_and_idx_chunks = output
|
||||
.chunks_exact_mut(4)
|
||||
.into_remainder()
|
||||
.iter_mut()
|
||||
.zip(indexes.chunks_exact(4).remainder());
|
||||
for (out, idx) in out_and_idx_chunks {
|
||||
*out = Some(self.get_val(*idx));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +135,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
row_id_hits: &mut Vec<RowId>,
|
||||
) {
|
||||
let row_id_range = row_id_range.start..row_id_range.end.min(self.num_vals());
|
||||
for idx in row_id_range.start..row_id_range.end {
|
||||
for idx in row_id_range {
|
||||
let val = self.get_val(idx);
|
||||
if value_range.contains(&val) {
|
||||
row_id_hits.push(idx);
|
||||
@@ -139,6 +173,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||
}
|
||||
}
|
||||
downcast_rs::impl_downcast!(sync ColumnValues<T> where T: PartialOrd);
|
||||
|
||||
/// Empty column of values.
|
||||
pub struct EmptyColumnValues;
|
||||
@@ -161,12 +196,17 @@ impl<T: PartialOrd + Default> ColumnValues<T> for EmptyColumnValues {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
impl<T: Copy + PartialOrd + Debug + 'static> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
self.as_ref().get_val(idx)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_vals_opt(&self, indexes: &[u32], output: &mut [Option<T>]) {
|
||||
self.as_ref().get_vals_opt(indexes, output)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> T {
|
||||
self.as_ref().min_value()
|
||||
|
||||
@@ -31,10 +31,10 @@ pub fn monotonic_map_column<C, T, Input, Output>(
|
||||
monotonic_mapping: T,
|
||||
) -> impl ColumnValues<Output>
|
||||
where
|
||||
C: ColumnValues<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Debug + Send + Sync + Clone,
|
||||
Output: PartialOrd + Debug + Send + Sync + Clone,
|
||||
C: ColumnValues<Input> + 'static,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync + 'static,
|
||||
Input: PartialOrd + Debug + Send + Sync + Clone + 'static,
|
||||
Output: PartialOrd + Debug + Send + Sync + Clone + 'static,
|
||||
{
|
||||
MonotonicMappingColumn {
|
||||
from_column,
|
||||
@@ -45,10 +45,10 @@ where
|
||||
|
||||
impl<C, T, Input, Output> ColumnValues<Output> for MonotonicMappingColumn<C, T, Input>
|
||||
where
|
||||
C: ColumnValues<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Send + Debug + Sync + Clone,
|
||||
Output: PartialOrd + Send + Debug + Sync + Clone,
|
||||
C: ColumnValues<Input> + 'static,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync + 'static,
|
||||
Input: PartialOrd + Send + Debug + Sync + Clone + 'static,
|
||||
Output: PartialOrd + Send + Debug + Sync + Clone + 'static,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> Output {
|
||||
@@ -107,7 +107,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_monotonic_mapping_iter() {
|
||||
let vals: Vec<u64> = (0..100u64).map(|el| el * 10).collect();
|
||||
let col = VecColumn::from(&vals);
|
||||
let col = VecColumn::from(vals);
|
||||
let mapped = monotonic_map_column(
|
||||
col,
|
||||
StrictlyMonotonicMappingInverter::from(StrictlyMonotonicMappingToInternal::<i64>::new()),
|
||||
|
||||
@@ -22,7 +22,7 @@ mod build_compact_space;
|
||||
|
||||
use build_compact_space::get_compact_space;
|
||||
use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt, VIntU128};
|
||||
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||
use tantivy_bitpacker::{BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::ColumnValues;
|
||||
use crate::RowId;
|
||||
@@ -148,7 +148,7 @@ impl CompactSpace {
|
||||
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
|
||||
// Correctness: Overflow. The first range starts at compact space 0, the error from
|
||||
// binary search can never be 0
|
||||
.map_or_else(|e| e - 1, |v| v);
|
||||
.unwrap_or_else(|e| e - 1);
|
||||
|
||||
let range_mapping = &self.ranges_mapping[pos];
|
||||
let diff = compact - range_mapping.compact_start;
|
||||
@@ -292,6 +292,63 @@ impl BinarySerializable for IPCodecParams {
|
||||
}
|
||||
}
|
||||
|
||||
/// Exposes the compact space compressed values as u64.
|
||||
///
|
||||
/// This allows faster access to the values, as u64 is faster to work with than u128.
|
||||
/// It also allows to handle u128 values like u64, via the `open_u64_lenient` as a uniform
|
||||
/// access interface.
|
||||
///
|
||||
/// When converting from the internal u64 to u128 `compact_to_u128` can be used.
|
||||
pub struct CompactSpaceU64Accessor(CompactSpaceDecompressor);
|
||||
impl CompactSpaceU64Accessor {
|
||||
pub(crate) fn open(data: OwnedBytes) -> io::Result<CompactSpaceU64Accessor> {
|
||||
let decompressor = CompactSpaceU64Accessor(CompactSpaceDecompressor::open(data)?);
|
||||
Ok(decompressor)
|
||||
}
|
||||
/// Convert a compact space value to u128
|
||||
pub fn compact_to_u128(&self, compact: u32) -> u128 {
|
||||
self.0.compact_to_u128(compact)
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnValues<u64> for CompactSpaceU64Accessor {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
let compact = self.0.get_compact(doc);
|
||||
compact as u64
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
self.0.u128_to_compact(self.0.min_value()).unwrap() as u64
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.0.u128_to_compact(self.0.max_value()).unwrap() as u64
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.0.params.num_vals
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(self.0.iter_compact().map(|el| el as u64))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u64>,
|
||||
position_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let value_range = self.0.compact_to_u128(*value_range.start() as u32)
|
||||
..=self.0.compact_to_u128(*value_range.end() as u32);
|
||||
self.0
|
||||
.get_row_ids_for_value_range(value_range, position_range, positions)
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnValues<u128> for CompactSpaceDecompressor {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u128 {
|
||||
@@ -402,9 +459,14 @@ impl CompactSpaceDecompressor {
|
||||
.map(|compact| self.compact_to_u128(compact))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_compact(&self, idx: u32) -> u32 {
|
||||
self.params.bit_unpacker.get(idx, &self.data) as u32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u32) -> u128 {
|
||||
let compact = self.params.bit_unpacker.get(idx, &self.data) as u32;
|
||||
let compact = self.get_compact(idx);
|
||||
self.compact_to_u128(compact)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,9 @@ use std::sync::Arc;
|
||||
mod compact_space;
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes, VInt};
|
||||
use compact_space::{CompactSpaceCompressor, CompactSpaceDecompressor};
|
||||
pub use compact_space::{
|
||||
CompactSpaceCompressor, CompactSpaceDecompressor, CompactSpaceU64Accessor,
|
||||
};
|
||||
|
||||
use crate::column_values::monotonic_map_column;
|
||||
use crate::column_values::monotonic_mapping::{
|
||||
@@ -108,6 +110,23 @@ pub fn open_u128_mapped<T: MonotonicallyMappableToU128 + Debug>(
|
||||
StrictlyMonotonicMappingToInternal::<T>::new().into();
|
||||
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
||||
}
|
||||
|
||||
/// Returns the u64 representation of the u128 data.
|
||||
/// The internal representation of the data as u64 is useful for faster processing.
|
||||
///
|
||||
/// In order to convert to u128 back cast to `CompactSpaceU64Accessor` and call
|
||||
/// `compact_to_u128`.
|
||||
///
|
||||
/// # Notice
|
||||
/// In case there are new codecs added, check for usages of `CompactSpaceDecompressorU64` and
|
||||
/// also handle the new codecs.
|
||||
pub fn open_u128_as_compact_u64(mut bytes: OwnedBytes) -> io::Result<Arc<dyn ColumnValues<u64>>> {
|
||||
let header = U128Header::deserialize(&mut bytes)?;
|
||||
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
|
||||
let reader = CompactSpaceU64Accessor::open(bytes)?;
|
||||
Ok(Arc::new(reader))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -63,7 +63,6 @@ impl ColumnValues for BitpackedReader {
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
self.stats.min_value + self.stats.gcd.get() * self.bit_unpacker.get(doc, &self.data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
@@ -129,6 +128,9 @@ impl ColumnCodecEstimator for BitpackedCodecEstimator {
|
||||
bit_packer.close(wrt)?;
|
||||
Ok(())
|
||||
}
|
||||
fn codec_type(&self) -> super::CodecType {
|
||||
super::CodecType::Bitpacked
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BitpackedCodec;
|
||||
|
||||
@@ -63,7 +63,10 @@ impl BlockwiseLinearEstimator {
|
||||
if self.block.is_empty() {
|
||||
return;
|
||||
}
|
||||
let line = Line::train(&VecColumn::from(&self.block));
|
||||
let column = VecColumn::from(std::mem::take(&mut self.block));
|
||||
let line = Line::train(&column);
|
||||
self.block = column.into();
|
||||
|
||||
let mut max_value = 0u64;
|
||||
for (i, buffer_val) in self.block.iter().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
@@ -125,7 +128,7 @@ impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
||||
*buffer_val = gcd_divider.divide(*buffer_val - stats.min_value);
|
||||
}
|
||||
|
||||
let line = Line::train(&VecColumn::from(&buffer));
|
||||
let line = Line::train(&VecColumn::from(buffer.to_vec()));
|
||||
|
||||
assert!(!buffer.is_empty());
|
||||
|
||||
@@ -160,6 +163,10 @@ impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn codec_type(&self) -> super::CodecType {
|
||||
super::CodecType::BlockwiseLinear
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BlockwiseLinearCodec;
|
||||
|
||||
@@ -184,7 +184,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn test_eval_max_err(ys: &[u64]) -> Option<u64> {
|
||||
let line = Line::train(&VecColumn::from(&ys));
|
||||
let line = Line::train(&VecColumn::from(ys.to_vec()));
|
||||
ys.iter()
|
||||
.enumerate()
|
||||
.map(|(x, y)| y.wrapping_sub(line.eval(x as u32)))
|
||||
|
||||
@@ -153,6 +153,12 @@ impl ColumnCodecEstimator for LinearCodecEstimator {
|
||||
self.collect_before_line_estimation(value);
|
||||
}
|
||||
}
|
||||
fn requires_full_scan(&self) -> bool {
|
||||
true
|
||||
}
|
||||
fn codec_type(&self) -> super::CodecType {
|
||||
super::CodecType::Linear
|
||||
}
|
||||
}
|
||||
|
||||
impl LinearCodecEstimator {
|
||||
@@ -173,7 +179,9 @@ impl LinearCodecEstimator {
|
||||
fn collect_before_line_estimation(&mut self, value: u64) {
|
||||
self.block.push(value);
|
||||
if self.block.len() == LINE_ESTIMATION_BLOCK_LEN {
|
||||
let line = Line::train(&VecColumn::from(&self.block));
|
||||
let column = VecColumn::from(std::mem::take(&mut self.block));
|
||||
let line = Line::train(&column);
|
||||
self.block = column.into();
|
||||
let block = std::mem::take(&mut self.block);
|
||||
for val in block {
|
||||
self.collect_after_line_estimation(&line, val);
|
||||
|
||||
@@ -37,7 +37,11 @@ pub trait ColumnCodecEstimator<T = u64>: 'static {
|
||||
/// This method will be called for each element of the column during
|
||||
/// `estimation`.
|
||||
fn collect(&mut self, value: u64);
|
||||
/// Finalizes the first pass phase.
|
||||
/// Returns true if the estimator needs a full pass over the column before serialization
|
||||
fn requires_full_scan(&self) -> bool {
|
||||
false
|
||||
}
|
||||
fn codec_type(&self) -> CodecType;
|
||||
fn finalize(&mut self) {}
|
||||
/// Returns an accurate estimation of the number of bytes that will
|
||||
/// be used to represent this column.
|
||||
@@ -150,34 +154,45 @@ pub fn serialize_u64_based_column_values<T: MonotonicallyMappableToU64>(
|
||||
wrt: &mut dyn Write,
|
||||
) -> io::Result<()> {
|
||||
let mut stats_collector = StatsCollector::default();
|
||||
let mut estimators: Vec<(CodecType, Box<dyn ColumnCodecEstimator>)> =
|
||||
Vec::with_capacity(codec_types.len());
|
||||
let mut estimators: Vec<Box<dyn ColumnCodecEstimator>> = Vec::with_capacity(codec_types.len());
|
||||
for &codec_type in codec_types {
|
||||
estimators.push((codec_type, codec_type.estimator()));
|
||||
estimators.push(codec_type.estimator());
|
||||
}
|
||||
for val in vals.boxed_iter() {
|
||||
let val_u64 = val.to_u64();
|
||||
stats_collector.collect(val_u64);
|
||||
for (_, estimator) in &mut estimators {
|
||||
for estimator in &mut estimators {
|
||||
estimator.collect(val_u64);
|
||||
}
|
||||
}
|
||||
for (_, estimator) in &mut estimators {
|
||||
for estimator in &mut estimators {
|
||||
estimator.finalize();
|
||||
}
|
||||
let stats = stats_collector.stats();
|
||||
let (_, best_codec, best_codec_estimator) = estimators
|
||||
let (_, best_codec) = estimators
|
||||
.into_iter()
|
||||
.flat_map(|(codec_type, estimator)| {
|
||||
.flat_map(|estimator| {
|
||||
let num_bytes = estimator.estimate(&stats)?;
|
||||
Some((num_bytes, codec_type, estimator))
|
||||
Some((num_bytes, estimator))
|
||||
})
|
||||
.min_by_key(|(num_bytes, _, _)| *num_bytes)
|
||||
.min_by_key(|(num_bytes, _)| *num_bytes)
|
||||
.ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::InvalidData, "No available applicable codec.")
|
||||
})?;
|
||||
best_codec.to_code().serialize(wrt)?;
|
||||
best_codec_estimator.serialize(
|
||||
serialize_u64_with_codec_and_stats(vals, best_codec, stats, wrt)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serializes a given column of u64-mapped values.
|
||||
/// The codec estimator needs to be collected fully for the Line codec before calling this.
|
||||
pub fn serialize_u64_with_codec_and_stats<T: MonotonicallyMappableToU64>(
|
||||
vals: &dyn Iterable<T>,
|
||||
codec: Box<dyn ColumnCodecEstimator>,
|
||||
stats: ColumnStats,
|
||||
wrt: &mut dyn Write,
|
||||
) -> io::Result<()> {
|
||||
codec.codec_type().to_code().serialize(wrt)?;
|
||||
codec.serialize(
|
||||
&stats,
|
||||
&mut vals.boxed_iter().map(MonotonicallyMappableToU64::to_u64),
|
||||
wrt,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use proptest::prelude::*;
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -4,14 +4,14 @@ use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::ColumnValues;
|
||||
|
||||
/// VecColumn provides `Column` over a slice.
|
||||
pub struct VecColumn<'a, T = u64> {
|
||||
pub(crate) values: &'a [T],
|
||||
/// VecColumn provides `Column` over a `Vec<T>`.
|
||||
pub struct VecColumn<T = u64> {
|
||||
pub(crate) values: Vec<T>,
|
||||
pub(crate) min_value: T,
|
||||
pub(crate) max_value: T,
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Send + Sync + Debug> ColumnValues<T> for VecColumn<'a, T> {
|
||||
impl<T: Copy + PartialOrd + Send + Sync + Debug + 'static> ColumnValues<T> for VecColumn<T> {
|
||||
fn get_val(&self, position: u32) -> T {
|
||||
self.values[position as usize]
|
||||
}
|
||||
@@ -37,11 +37,8 @@ impl<'a, T: Copy + PartialOrd + Send + Sync + Debug> ColumnValues<T> for VecColu
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Default, V> From<&'a V> for VecColumn<'a, T>
|
||||
where V: AsRef<[T]> + ?Sized
|
||||
{
|
||||
fn from(values: &'a V) -> Self {
|
||||
let values = values.as_ref();
|
||||
impl<T: Copy + PartialOrd + Default> From<Vec<T>> for VecColumn<T> {
|
||||
fn from(values: Vec<T>) -> Self {
|
||||
let (min_value, max_value) = minmax(values.iter().copied()).unwrap_or_default();
|
||||
Self {
|
||||
values,
|
||||
@@ -50,3 +47,8 @@ where V: AsRef<[T]> + ?Sized
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<VecColumn> for Vec<u64> {
|
||||
fn from(column: VecColumn) -> Self {
|
||||
column.values
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ mod merge_mapping;
|
||||
mod term_merger;
|
||||
|
||||
use std::collections::{BTreeMap, HashSet};
|
||||
use std::io;
|
||||
use std::io::{self};
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -156,8 +156,15 @@ fn merge_column(
|
||||
column_values.push(None);
|
||||
}
|
||||
}
|
||||
let merged_column_index =
|
||||
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order);
|
||||
let num_values: u32 = column_values
|
||||
.iter()
|
||||
.map(|vals| vals.as_ref().map(|idx| idx.num_vals()).unwrap_or(0))
|
||||
.sum();
|
||||
let merged_column_index = crate::column_index::merge_column_index(
|
||||
&column_indexes[..],
|
||||
merge_row_order,
|
||||
num_values,
|
||||
);
|
||||
let merge_column_values = MergedColumnValues {
|
||||
column_indexes: &column_indexes[..],
|
||||
column_values: &column_values[..],
|
||||
@@ -183,8 +190,15 @@ fn merge_column(
|
||||
}
|
||||
}
|
||||
|
||||
let merged_column_index =
|
||||
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order);
|
||||
let num_values: u32 = column_values
|
||||
.iter()
|
||||
.map(|vals| vals.as_ref().map(|idx| idx.num_vals()).unwrap_or(0))
|
||||
.sum();
|
||||
let merged_column_index = crate::column_index::merge_column_index(
|
||||
&column_indexes[..],
|
||||
merge_row_order,
|
||||
num_values,
|
||||
);
|
||||
let merge_column_values = MergedColumnValues {
|
||||
column_indexes: &column_indexes[..],
|
||||
column_values: &column_values,
|
||||
@@ -214,8 +228,19 @@ fn merge_column(
|
||||
}
|
||||
}
|
||||
}
|
||||
let merged_column_index =
|
||||
crate::column_index::merge_column_index(&column_indexes[..], merge_row_order);
|
||||
let num_values: u32 = bytes_columns
|
||||
.iter()
|
||||
.map(|vals| {
|
||||
vals.as_ref()
|
||||
.map(|idx| idx.term_ord_column.values.num_vals())
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.sum();
|
||||
let merged_column_index = crate::column_index::merge_column_index(
|
||||
&column_indexes[..],
|
||||
merge_row_order,
|
||||
num_values,
|
||||
);
|
||||
merge_bytes_or_str_column(merged_column_index, &bytes_columns, merge_row_order, wrt)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::*;
|
||||
use crate::{Cardinality, ColumnarWriter, HasAssociatedColumnType, RowId};
|
||||
|
||||
|
||||
@@ -269,7 +269,8 @@ impl StrOrBytesColumnWriter {
|
||||
dictionaries: &mut [DictionaryBuilder],
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
let unordered_id = dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes);
|
||||
let unordered_id =
|
||||
dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes, arena);
|
||||
self.column_writer.record(doc, unordered_id, arena);
|
||||
}
|
||||
|
||||
|
||||
@@ -13,9 +13,7 @@ pub(crate) use serializer::ColumnarSerializer;
|
||||
use stacker::{Addr, ArenaHashMap, MemoryArena};
|
||||
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::column_values::{
|
||||
ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
|
||||
};
|
||||
use crate::column_values::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
||||
use crate::columnar::column_type::ColumnType;
|
||||
use crate::columnar::writer::column_writers::{
|
||||
ColumnWriter, NumericalColumnWriter, StrOrBytesColumnWriter,
|
||||
@@ -61,22 +59,6 @@ pub struct ColumnarWriter {
|
||||
buffers: SpareBuffers,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn mutate_or_create_column<V, TMutator>(
|
||||
arena_hash_map: &mut ArenaHashMap,
|
||||
column_name: &str,
|
||||
updater: TMutator,
|
||||
) where
|
||||
V: Copy + 'static,
|
||||
TMutator: FnMut(Option<V>) -> V,
|
||||
{
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
arena_hash_map.mutate_or_create(column_name.as_bytes(), updater);
|
||||
}
|
||||
|
||||
impl ColumnarWriter {
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
self.arena.mem_usage()
|
||||
@@ -177,9 +159,8 @@ impl ColumnarWriter {
|
||||
},
|
||||
&mut self.dictionaries,
|
||||
);
|
||||
mutate_or_create_column(
|
||||
hash_map,
|
||||
column_name,
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<StrOrBytesColumnWriter>| {
|
||||
let mut column_writer = if let Some(column_writer) = column_opt {
|
||||
column_writer
|
||||
@@ -194,24 +175,21 @@ impl ColumnarWriter {
|
||||
);
|
||||
}
|
||||
ColumnType::Bool => {
|
||||
mutate_or_create_column(
|
||||
&mut self.bool_field_hash_map,
|
||||
column_name,
|
||||
self.bool_field_hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
ColumnType::DateTime => {
|
||||
mutate_or_create_column(
|
||||
&mut self.datetime_field_hash_map,
|
||||
column_name,
|
||||
self.datetime_field_hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||
);
|
||||
}
|
||||
ColumnType::I64 | ColumnType::F64 | ColumnType::U64 => {
|
||||
let numerical_type = column_type.numerical_type().unwrap();
|
||||
mutate_or_create_column(
|
||||
&mut self.numerical_field_hash_map,
|
||||
column_name,
|
||||
self.numerical_field_hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<NumericalColumnWriter>| {
|
||||
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
||||
column.force_numerical_type(numerical_type);
|
||||
@@ -219,9 +197,8 @@ impl ColumnarWriter {
|
||||
},
|
||||
);
|
||||
}
|
||||
ColumnType::IpAddr => mutate_or_create_column(
|
||||
&mut self.ip_addr_field_hash_map,
|
||||
column_name,
|
||||
ColumnType::IpAddr => self.ip_addr_field_hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
|
||||
),
|
||||
}
|
||||
@@ -234,9 +211,8 @@ impl ColumnarWriter {
|
||||
numerical_value: T,
|
||||
) {
|
||||
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
|
||||
mutate_or_create_column(
|
||||
hash_map,
|
||||
column_name,
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<NumericalColumnWriter>| {
|
||||
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record_numerical_value(doc, numerical_value.into(), arena);
|
||||
@@ -246,10 +222,6 @@ impl ColumnarWriter {
|
||||
}
|
||||
|
||||
pub fn record_ip_addr(&mut self, doc: RowId, column_name: &str, ip_addr: Ipv6Addr) {
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
let (hash_map, arena) = (&mut self.ip_addr_field_hash_map, &mut self.arena);
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
@@ -263,24 +235,30 @@ impl ColumnarWriter {
|
||||
|
||||
pub fn record_bool(&mut self, doc: RowId, column_name: &str, val: bool) {
|
||||
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
|
||||
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(doc, val, arena);
|
||||
column
|
||||
});
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(doc, val, arena);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: common::DateTime) {
|
||||
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
|
||||
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(
|
||||
doc,
|
||||
NumericalValue::I64(datetime.into_timestamp_nanos()),
|
||||
arena,
|
||||
);
|
||||
column
|
||||
});
|
||||
hash_map.mutate_or_create(
|
||||
column_name.as_bytes(),
|
||||
|column_opt: Option<ColumnWriter>| {
|
||||
let mut column: ColumnWriter = column_opt.unwrap_or_default();
|
||||
column.record(
|
||||
doc,
|
||||
NumericalValue::I64(datetime.into_timestamp_nanos()),
|
||||
arena,
|
||||
);
|
||||
column
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn record_str(&mut self, doc: RowId, column_name: &str, value: &str) {
|
||||
@@ -305,10 +283,6 @@ impl ColumnarWriter {
|
||||
}
|
||||
|
||||
pub fn record_bytes(&mut self, doc: RowId, column_name: &str, value: &[u8]) {
|
||||
assert!(
|
||||
!column_name.as_bytes().contains(&0u8),
|
||||
"key may not contain the 0 byte"
|
||||
);
|
||||
let (hash_map, arena, dictionaries) = (
|
||||
&mut self.bytes_field_hash_map,
|
||||
&mut self.arena,
|
||||
@@ -338,7 +312,7 @@ impl ColumnarWriter {
|
||||
let mut columns: Vec<(&[u8], ColumnType, Addr)> = self
|
||||
.numerical_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| {
|
||||
.map(|(column_name, addr)| {
|
||||
let numerical_column_writer: NumericalColumnWriter =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let column_type = numerical_column_writer.numerical_type().into();
|
||||
@@ -348,27 +322,27 @@ impl ColumnarWriter {
|
||||
columns.extend(
|
||||
self.bytes_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, ColumnType::Bytes, addr)),
|
||||
.map(|(term, addr)| (term, ColumnType::Bytes, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.str_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Str, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::Str, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.bool_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Bool, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::Bool, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.ip_addr_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::IpAddr, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::IpAddr, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.datetime_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::DateTime, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::DateTime, addr)),
|
||||
);
|
||||
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
|
||||
|
||||
@@ -437,6 +411,7 @@ impl ColumnarWriter {
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&self.arena,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
column_serializer.finalize()?;
|
||||
@@ -490,6 +465,7 @@ impl ColumnarWriter {
|
||||
|
||||
// Serialize [Dictionary, Column, dictionary num bytes U32::LE]
|
||||
// Column: [Column Index, Column Values, column index num bytes U32::LE]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn serialize_bytes_or_str_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
@@ -497,6 +473,7 @@ fn serialize_bytes_or_str_column(
|
||||
dictionary_builder: &DictionaryBuilder,
|
||||
operation_it: impl Iterator<Item = ColumnOperation<UnorderedId>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
arena: &MemoryArena,
|
||||
wrt: impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
@@ -505,7 +482,8 @@ fn serialize_bytes_or_str_column(
|
||||
..
|
||||
} = buffers;
|
||||
let mut counting_writer = CountingWriter::wrap(wrt);
|
||||
let term_id_mapping: TermIdMapping = dictionary_builder.serialize(&mut counting_writer)?;
|
||||
let term_id_mapping: TermIdMapping =
|
||||
dictionary_builder.serialize(arena, &mut counting_writer)?;
|
||||
let dictionary_num_bytes: u32 = counting_writer.written_bytes() as u32;
|
||||
let mut wrt = counting_writer.finish();
|
||||
let operation_iterator = operation_it.map(|symbol: ColumnOperation<UnorderedId>| {
|
||||
@@ -641,10 +619,7 @@ fn send_to_serialize_column_mappable_to_u128<
|
||||
value_index_builders: &mut PreallocatedIndexBuilders,
|
||||
values: &mut Vec<T>,
|
||||
mut wrt: impl io::Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
for<'a> VecColumn<'a, T>: ColumnValues<T>,
|
||||
{
|
||||
) -> io::Result<()> {
|
||||
values.clear();
|
||||
// TODO: split index and values
|
||||
let serializable_column_index = match cardinality {
|
||||
@@ -669,7 +644,10 @@ where
|
||||
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
|
||||
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
|
||||
let multivalued_index = multivalued_index_builder.finish(num_rows);
|
||||
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
|
||||
SerializableColumnIndex::Multivalued {
|
||||
indices: Box::new(multivalued_index),
|
||||
stats: Default::default(), // TODO: implement stats for u128
|
||||
}
|
||||
}
|
||||
};
|
||||
crate::column::serialize_column_mappable_to_u128(
|
||||
@@ -697,10 +675,7 @@ fn send_to_serialize_column_mappable_to_u64(
|
||||
value_index_builders: &mut PreallocatedIndexBuilders,
|
||||
values: &mut Vec<u64>,
|
||||
mut wrt: impl io::Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
for<'a> VecColumn<'a, u64>: ColumnValues<u64>,
|
||||
{
|
||||
) -> io::Result<()> {
|
||||
values.clear();
|
||||
let serializable_column_index = match cardinality {
|
||||
Cardinality::Full => {
|
||||
@@ -727,7 +702,10 @@ where
|
||||
if sort_values_within_row {
|
||||
sort_values_within_row_in_place(multivalued_index, values);
|
||||
}
|
||||
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
|
||||
SerializableColumnIndex::Multivalued {
|
||||
indices: Box::new(multivalued_index),
|
||||
stats: None,
|
||||
}
|
||||
}
|
||||
};
|
||||
crate::column::serialize_column_mappable_to_u64(
|
||||
|
||||
@@ -18,7 +18,12 @@ pub struct ColumnarSerializer<W: io::Write> {
|
||||
/// code.
|
||||
fn prepare_key(key: &[u8], column_type: ColumnType, buffer: &mut Vec<u8>) {
|
||||
buffer.clear();
|
||||
buffer.extend_from_slice(key);
|
||||
// Convert 0 bytes to '0' string, as 0 bytes are reserved for the end of the path.
|
||||
if key.contains(&0u8) {
|
||||
buffer.extend(key.iter().map(|&b| if b == 0 { b'0' } else { b }));
|
||||
} else {
|
||||
buffer.extend_from_slice(key);
|
||||
}
|
||||
buffer.push(0u8);
|
||||
buffer.push(column_type.to_code());
|
||||
}
|
||||
@@ -96,14 +101,13 @@ impl<'a, W: io::Write> io::Write for ColumnSerializer<'a, W> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::columnar::column_type::ColumnType;
|
||||
|
||||
#[test]
|
||||
fn test_prepare_key_bytes() {
|
||||
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
|
||||
prepare_key(b"root\0child", ColumnType::Str, &mut buffer);
|
||||
assert_eq!(buffer.len(), 12);
|
||||
assert_eq!(&buffer[..10], b"root\0child");
|
||||
assert_eq!(&buffer[..10], b"root0child");
|
||||
assert_eq!(buffer[10], 0u8);
|
||||
assert_eq!(buffer[11], ColumnType::Str.to_code());
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::io;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use sstable::SSTable;
|
||||
use stacker::{MemoryArena, SharedArenaHashMap};
|
||||
|
||||
pub(crate) struct TermIdMapping {
|
||||
unordered_to_ord: Vec<OrderedId>,
|
||||
@@ -31,29 +31,38 @@ pub struct OrderedId(pub u32);
|
||||
/// mapping.
|
||||
#[derive(Default)]
|
||||
pub(crate) struct DictionaryBuilder {
|
||||
dict: FnvHashMap<Vec<u8>, UnorderedId>,
|
||||
memory_consumption: usize,
|
||||
dict: SharedArenaHashMap,
|
||||
}
|
||||
|
||||
impl DictionaryBuilder {
|
||||
/// Get or allocate an unordered id.
|
||||
/// (This ID is simply an auto-incremented id.)
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8]) -> UnorderedId {
|
||||
if let Some(term_id) = self.dict.get(term) {
|
||||
return *term_id;
|
||||
}
|
||||
let new_id = UnorderedId(self.dict.len() as u32);
|
||||
self.dict.insert(term.to_vec(), new_id);
|
||||
self.memory_consumption += term.len();
|
||||
self.memory_consumption += 40; // Term Metadata + HashMap overhead
|
||||
new_id
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8], arena: &mut MemoryArena) -> UnorderedId {
|
||||
let next_id = self.dict.len() as u32;
|
||||
let unordered_id = self
|
||||
.dict
|
||||
.mutate_or_create(term, arena, |unordered_id: Option<u32>| {
|
||||
if let Some(unordered_id) = unordered_id {
|
||||
unordered_id
|
||||
} else {
|
||||
next_id
|
||||
}
|
||||
});
|
||||
UnorderedId(unordered_id)
|
||||
}
|
||||
|
||||
/// Serialize the dictionary into an fst, and returns the
|
||||
/// `UnorderedId -> TermOrdinal` map.
|
||||
pub fn serialize<'a, W: io::Write + 'a>(&self, wrt: &mut W) -> io::Result<TermIdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> =
|
||||
self.dict.iter().map(|(k, v)| (k.as_slice(), *v)).collect();
|
||||
pub fn serialize<'a, W: io::Write + 'a>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
wrt: &mut W,
|
||||
) -> io::Result<TermIdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> = self
|
||||
.dict
|
||||
.iter(arena)
|
||||
.map(|(k, v)| (k, arena.read(v)))
|
||||
.collect();
|
||||
terms.sort_unstable_by_key(|(key, _)| *key);
|
||||
// TODO Remove the allocation.
|
||||
let mut unordered_to_ord: Vec<OrderedId> = vec![OrderedId(0u32); terms.len()];
|
||||
@@ -68,7 +77,7 @@ impl DictionaryBuilder {
|
||||
}
|
||||
|
||||
pub(crate) fn mem_usage(&self) -> usize {
|
||||
self.memory_consumption
|
||||
self.dict.mem_usage()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,12 +87,13 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_dictionary_builder() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut dictionary_builder = DictionaryBuilder::default();
|
||||
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello");
|
||||
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy");
|
||||
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax");
|
||||
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello", &mut arena);
|
||||
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy", &mut arena);
|
||||
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax", &mut arena);
|
||||
let mut buffer = Vec::new();
|
||||
let id_mapping = dictionary_builder.serialize(&mut buffer).unwrap();
|
||||
let id_mapping = dictionary_builder.serialize(&arena, &mut buffer).unwrap();
|
||||
assert_eq!(id_mapping.to_ord(hello_uid), OrderedId(1));
|
||||
assert_eq!(id_mapping.to_ord(happy_uid), OrderedId(0));
|
||||
assert_eq!(id_mapping.to_ord(tax_uid), OrderedId(2));
|
||||
|
||||
@@ -8,7 +8,7 @@ use common::{ByteCount, DateTime, HasLen, OwnedBytes};
|
||||
use crate::column::{BytesColumn, Column, StrColumn};
|
||||
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::{Cardinality, ColumnIndex, NumericalType};
|
||||
use crate::{Cardinality, ColumnIndex, ColumnValues, NumericalType};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DynamicColumn {
|
||||
@@ -247,7 +247,12 @@ impl DynamicColumnHandle {
|
||||
}
|
||||
|
||||
/// Returns the `u64` fast field reader reader associated with `fields` of types
|
||||
/// Str, u64, i64, f64, bool, or datetime.
|
||||
/// Str, u64, i64, f64, bool, ip, or datetime.
|
||||
///
|
||||
/// Notice that for IpAddr, the fastfield reader will return the u64 representation of the
|
||||
/// IpAddr.
|
||||
/// In order to convert to u128 back cast to `CompactSpaceU64Accessor` and call
|
||||
/// `compact_to_u128`.
|
||||
///
|
||||
/// If not, the fastfield reader will returns the u64-value associated with the original
|
||||
/// FastValue.
|
||||
@@ -258,7 +263,10 @@ impl DynamicColumnHandle {
|
||||
let column: BytesColumn = crate::column::open_column_bytes(column_bytes)?;
|
||||
Ok(Some(column.term_ord_column))
|
||||
}
|
||||
ColumnType::IpAddr => Ok(None),
|
||||
ColumnType::IpAddr => {
|
||||
let column = crate::column::open_column_u128_as_compact_u64(column_bytes)?;
|
||||
Ok(Some(column))
|
||||
}
|
||||
ColumnType::Bool
|
||||
| ColumnType::I64
|
||||
| ColumnType::U64
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//! # Tantivy-Columnar
|
||||
//!
|
||||
//! `tantivy-columnar`provides a columnar storage for tantivy.
|
||||
//! The crate allows for efficient read operations on specific columns rather than entire records.
|
||||
//!
|
||||
//! ## Overview
|
||||
//!
|
||||
//! - **columnar**: Reading, writing, and merging multiple columns:
|
||||
//! - **[ColumnarWriter]**: Makes it possible to create a new columnar.
|
||||
//! - **[ColumnarReader]**: The ColumnarReader makes it possible to access a set of columns
|
||||
//! associated to field names.
|
||||
//! - **[merge_columnar]**: Contains the functionalities to merge multiple ColumnarReader or
|
||||
//! segments into a single one.
|
||||
//!
|
||||
//! - **column**: A single column, which contains
|
||||
//! - [column_index]: Resolves the rows for a document id. Manages the cardinality of the
|
||||
//! column.
|
||||
//! - [column_values]: Stores the values of a column in a dense format.
|
||||
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -12,7 +31,7 @@ use std::io;
|
||||
|
||||
mod block_accessor;
|
||||
mod column;
|
||||
mod column_index;
|
||||
pub mod column_index;
|
||||
pub mod column_values;
|
||||
mod columnar;
|
||||
mod dictionary;
|
||||
@@ -94,6 +113,9 @@ impl Cardinality {
|
||||
pub fn is_multivalue(&self) -> bool {
|
||||
matches!(self, Cardinality::Multivalued)
|
||||
}
|
||||
pub fn is_full(&self) -> bool {
|
||||
matches!(self, Cardinality::Full)
|
||||
}
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ fn test_dataframe_writer_str() {
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 87);
|
||||
assert_eq!(cols[0].num_bytes(), 73);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -40,7 +40,7 @@ fn test_dataframe_writer_bytes() {
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 87);
|
||||
assert_eq!(cols[0].num_bytes(), 73);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -330,9 +330,9 @@ fn bytes_strategy() -> impl Strategy<Value = &'static [u8]> {
|
||||
// A random column value
|
||||
fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
|
||||
prop_oneof![
|
||||
10 => string_strategy().prop_map(|s| ColumnValue::Str(s)),
|
||||
1 => bytes_strategy().prop_map(|b| ColumnValue::Bytes(b)),
|
||||
40 => num_strategy().prop_map(|n| ColumnValue::Numerical(n)),
|
||||
10 => string_strategy().prop_map(ColumnValue::Str),
|
||||
1 => bytes_strategy().prop_map(ColumnValue::Bytes),
|
||||
40 => num_strategy().prop_map(ColumnValue::Numerical),
|
||||
1 => (1u16..3u16).prop_map(|ip_addr_byte| ColumnValue::IpAddr(Ipv6Addr::new(
|
||||
127,
|
||||
0,
|
||||
@@ -343,7 +343,7 @@ fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
|
||||
0,
|
||||
ip_addr_byte
|
||||
))),
|
||||
1 => any::<bool>().prop_map(|b| ColumnValue::Bool(b)),
|
||||
1 => any::<bool>().prop_map(ColumnValue::Bool),
|
||||
1 => (0_679_723_993i64..1_679_723_995i64)
|
||||
.prop_map(|val| { ColumnValue::DateTime(DateTime::from_timestamp_secs(val)) })
|
||||
]
|
||||
@@ -419,8 +419,8 @@ fn build_columnar_with_mapping(
|
||||
columnar_writer
|
||||
.serialize(num_docs, old_to_new_row_ids_opt, &mut buffer)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
columnar_reader
|
||||
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
fn build_columnar(docs: &[Vec<(&'static str, ColumnValue)>]) -> ColumnarReader {
|
||||
@@ -738,45 +738,50 @@ proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(1000))]
|
||||
#[test]
|
||||
fn test_columnar_merge_proptest(columnar_docs in proptest::collection::vec(columnar_docs_strategy(), 2..=3)) {
|
||||
let columnar_readers: Vec<ColumnarReader> = columnar_docs.iter()
|
||||
.map(|docs| build_columnar(&docs[..]))
|
||||
.collect::<Vec<_>>();
|
||||
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]).into();
|
||||
crate::merge_columnar(&columnar_readers_arr[..], &[], stack_merge_order, &mut output).unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> = columnar_docs.iter().cloned().flatten().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
test_columnar_docs(columnar_docs);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_columnar_merging_empty_columnar() {
|
||||
let columnar_docs: Vec<Vec<Vec<(&str, ColumnValue)>>> =
|
||||
vec![vec![], vec![vec![("c1", ColumnValue::Str("a"))]]];
|
||||
fn test_columnar_docs(columnar_docs: Vec<Vec<Vec<(&'static str, ColumnValue)>>>) {
|
||||
let columnar_readers: Vec<ColumnarReader> = columnar_docs
|
||||
.iter()
|
||||
.map(|docs| build_columnar(&docs[..]))
|
||||
.collect::<Vec<_>>();
|
||||
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]);
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]).into();
|
||||
crate::merge_columnar(
|
||||
&columnar_readers_arr[..],
|
||||
&[],
|
||||
crate::MergeRowOrder::Stack(stack_merge_order),
|
||||
stack_merge_order,
|
||||
&mut output,
|
||||
)
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
|
||||
columnar_docs.iter().cloned().flatten().collect();
|
||||
columnar_docs.iter().flatten().cloned().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_columnar_merging_empty_columnar() {
|
||||
let columnar_docs: Vec<Vec<Vec<(&str, ColumnValue)>>> =
|
||||
vec![vec![], vec![vec![("c1", ColumnValue::Str("a"))]]];
|
||||
test_columnar_docs(columnar_docs);
|
||||
}
|
||||
#[test]
|
||||
fn test_columnar_merging_simple() {
|
||||
let columnar_docs: Vec<Vec<Vec<(&str, ColumnValue)>>> = vec![
|
||||
vec![],
|
||||
vec![vec![
|
||||
("c1", ColumnValue::Numerical(0u64.into())),
|
||||
("c1", ColumnValue::Numerical(0u64.into())),
|
||||
]],
|
||||
];
|
||||
test_columnar_docs(columnar_docs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_columnar_merging_number_columns() {
|
||||
let columnar_docs: Vec<Vec<Vec<(&str, ColumnValue)>>> = vec![
|
||||
@@ -793,25 +798,7 @@ fn test_columnar_merging_number_columns() {
|
||||
vec![("c2", ColumnValue::Numerical(u64::MAX.into()))],
|
||||
],
|
||||
];
|
||||
let columnar_readers: Vec<ColumnarReader> = columnar_docs
|
||||
.iter()
|
||||
.map(|docs| build_columnar(&docs[..]))
|
||||
.collect::<Vec<_>>();
|
||||
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]);
|
||||
crate::merge_columnar(
|
||||
&columnar_readers_arr[..],
|
||||
&[],
|
||||
crate::MergeRowOrder::Stack(stack_merge_order),
|
||||
&mut output,
|
||||
)
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
|
||||
columnar_docs.iter().cloned().flatten().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
test_columnar_docs(columnar_docs);
|
||||
}
|
||||
|
||||
// TODO add non trivial remap and merge
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-common"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
@@ -14,7 +14,7 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
ownedbytes = { version= "0.6", path="../ownedbytes" }
|
||||
ownedbytes = { version= "0.7", path="../ownedbytes" }
|
||||
async-trait = "0.1"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
@@ -22,3 +22,6 @@ serde = { version = "1.0.136", features = ["derive"] }
|
||||
[dev-dependencies]
|
||||
proptest = "1.0.0"
|
||||
rand = "0.8.4"
|
||||
|
||||
[features]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io::Write;
|
||||
use std::{fmt, io, u64};
|
||||
use std::{fmt, io};
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::fmt;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use time::{OffsetDateTime, PrimitiveDateTime, UtcOffset};
|
||||
|
||||
use crate::BinarySerializable;
|
||||
|
||||
/// Precision with which datetimes are truncated when stored in fast fields. This setting is only
|
||||
/// relevant for fast fields. In the docstore, datetimes are always saved with nanosecond precision.
|
||||
#[derive(
|
||||
@@ -24,9 +25,6 @@ pub enum DateTimePrecision {
|
||||
Nanoseconds,
|
||||
}
|
||||
|
||||
#[deprecated(since = "0.20.0", note = "Use `DateTimePrecision` instead")]
|
||||
pub type DatePrecision = DateTimePrecision;
|
||||
|
||||
/// A date/time value with nanoseconds precision.
|
||||
///
|
||||
/// This timestamp does not carry any explicit time zone information.
|
||||
@@ -37,7 +35,7 @@ pub type DatePrecision = DateTimePrecision;
|
||||
/// All constructors and conversions are provided as explicit
|
||||
/// functions and not by implementing any `From`/`Into` traits
|
||||
/// to prevent unintended usage.
|
||||
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
pub struct DateTime {
|
||||
// Timestamp in nanoseconds.
|
||||
pub(crate) timestamp_nanos: i64,
|
||||
@@ -164,3 +162,15 @@ impl fmt::Debug for DateTime {
|
||||
f.write_str(&utc_rfc3339)
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for DateTime {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
let timestamp_micros = self.into_timestamp_micros();
|
||||
<i64 as BinarySerializable>::serialize(×tamp_micros, writer)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> std::io::Result<Self> {
|
||||
let timestamp_micros = <i64 as BinarySerializable>::deserialize(reader)?;
|
||||
Ok(Self::from_timestamp_micros(timestamp_micros))
|
||||
}
|
||||
}
|
||||
|
||||
144
common/src/json_path_writer.rs
Normal file
144
common/src/json_path_writer.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use crate::replace_in_place;
|
||||
|
||||
/// Separates the different segments of a json path.
|
||||
pub const JSON_PATH_SEGMENT_SEP: u8 = 1u8;
|
||||
pub const JSON_PATH_SEGMENT_SEP_STR: &str =
|
||||
unsafe { std::str::from_utf8_unchecked(&[JSON_PATH_SEGMENT_SEP]) };
|
||||
|
||||
/// Separates the json path and the value in
|
||||
/// a JSON term binary representation.
|
||||
pub const JSON_END_OF_PATH: u8 = 0u8;
|
||||
pub const JSON_END_OF_PATH_STR: &str =
|
||||
unsafe { std::str::from_utf8_unchecked(&[JSON_END_OF_PATH]) };
|
||||
|
||||
/// Create a new JsonPathWriter, that creates flattened json paths for tantivy.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct JsonPathWriter {
|
||||
path: String,
|
||||
indices: Vec<usize>,
|
||||
expand_dots: bool,
|
||||
}
|
||||
|
||||
impl JsonPathWriter {
|
||||
pub fn with_expand_dots(expand_dots: bool) -> Self {
|
||||
JsonPathWriter {
|
||||
path: String::new(),
|
||||
indices: Vec::new(),
|
||||
expand_dots,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
JsonPathWriter {
|
||||
path: String::new(),
|
||||
indices: Vec::new(),
|
||||
expand_dots: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// When expand_dots is enabled, json object like
|
||||
/// `{"k8s.node.id": 5}` is processed as if it was
|
||||
/// `{"k8s": {"node": {"id": 5}}}`.
|
||||
/// This option has the merit of allowing users to
|
||||
/// write queries like `k8s.node.id:5`.
|
||||
/// On the other, enabling that feature can lead to
|
||||
/// ambiguity.
|
||||
#[inline]
|
||||
pub fn set_expand_dots(&mut self, expand_dots: bool) {
|
||||
self.expand_dots = expand_dots;
|
||||
}
|
||||
|
||||
/// Push a new segment to the path.
|
||||
#[inline]
|
||||
pub fn push(&mut self, segment: &str) {
|
||||
let len_path = self.path.len();
|
||||
self.indices.push(len_path);
|
||||
if self.indices.len() > 1 {
|
||||
self.path.push(JSON_PATH_SEGMENT_SEP as char);
|
||||
}
|
||||
self.path.push_str(segment);
|
||||
if self.expand_dots {
|
||||
// This might include the separation byte, which is ok because it is not a dot.
|
||||
let appended_segment = &mut self.path[len_path..];
|
||||
// The unsafe below is safe as long as b'.' and JSON_PATH_SEGMENT_SEP are
|
||||
// valid single byte ut8 strings.
|
||||
// By utf-8 design, they cannot be part of another codepoint.
|
||||
unsafe {
|
||||
replace_in_place(b'.', JSON_PATH_SEGMENT_SEP, appended_segment.as_bytes_mut())
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the end of JSON path marker.
|
||||
#[inline]
|
||||
pub fn set_end(&mut self) {
|
||||
self.path.push_str(JSON_END_OF_PATH_STR);
|
||||
}
|
||||
|
||||
/// Remove the last segment. Does nothing if the path is empty.
|
||||
#[inline]
|
||||
pub fn pop(&mut self) {
|
||||
if let Some(last_idx) = self.indices.pop() {
|
||||
self.path.truncate(last_idx);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear the path.
|
||||
#[inline]
|
||||
pub fn clear(&mut self) {
|
||||
self.path.clear();
|
||||
self.indices.clear();
|
||||
}
|
||||
|
||||
/// Get the current path.
|
||||
#[inline]
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JsonPathWriter> for String {
|
||||
#[inline]
|
||||
fn from(value: JsonPathWriter) -> Self {
|
||||
value.path
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn json_path_writer_test() {
|
||||
let mut writer = JsonPathWriter::new();
|
||||
writer.set_expand_dots(false);
|
||||
|
||||
writer.push("root");
|
||||
assert_eq!(writer.as_str(), "root");
|
||||
|
||||
writer.push("child");
|
||||
assert_eq!(writer.as_str(), "root\u{1}child");
|
||||
|
||||
writer.pop();
|
||||
assert_eq!(writer.as_str(), "root");
|
||||
|
||||
writer.push("k8s.node.id");
|
||||
assert_eq!(writer.as_str(), "root\u{1}k8s.node.id");
|
||||
|
||||
writer.set_expand_dots(true);
|
||||
writer.pop();
|
||||
writer.push("k8s.node.id");
|
||||
assert_eq!(writer.as_str(), "root\u{1}k8s\u{1}node\u{1}id");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_path_expand_dots_enabled_pop_segment() {
|
||||
let mut json_writer = JsonPathWriter::with_expand_dots(true);
|
||||
json_writer.push("hello");
|
||||
assert_eq!(json_writer.as_str(), "hello");
|
||||
json_writer.push("color.hue");
|
||||
assert_eq!(json_writer.as_str(), "hello\x01color\x01hue");
|
||||
json_writer.pop();
|
||||
assert_eq!(json_writer.as_str(), "hello");
|
||||
}
|
||||
}
|
||||
@@ -9,15 +9,15 @@ mod byte_count;
|
||||
mod datetime;
|
||||
pub mod file_slice;
|
||||
mod group_by;
|
||||
pub mod json_path_writer;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
mod writer;
|
||||
pub use bitset::*;
|
||||
pub use byte_count::ByteCount;
|
||||
#[allow(deprecated)]
|
||||
pub use datetime::DatePrecision;
|
||||
pub use datetime::{DateTime, DateTimePrecision};
|
||||
pub use group_by::GroupByIteratorExtended;
|
||||
pub use json_path_writer::JsonPathWriter;
|
||||
pub use ownedbytes::{OwnedBytes, StableDeref};
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
pub use vint::{
|
||||
@@ -116,6 +116,7 @@ pub fn u64_to_f64(val: u64) -> f64 {
|
||||
///
|
||||
/// This function assumes that the needle is rarely contained in the bytes string
|
||||
/// and offers a fast path if the needle is not present.
|
||||
#[inline]
|
||||
pub fn replace_in_place(needle: u8, replacement: u8, bytes: &mut [u8]) {
|
||||
if !bytes.contains(&needle) {
|
||||
return;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::borrow::Cow;
|
||||
use std::io::{Read, Write};
|
||||
use std::{fmt, io};
|
||||
|
||||
@@ -249,11 +250,47 @@ impl BinarySerializable for String {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BinarySerializable for Cow<'a, str> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let data: &[u8] = self.as_bytes();
|
||||
VInt(data.len() as u64).serialize(writer)?;
|
||||
writer.write_all(data)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Cow<'a, str>> {
|
||||
let string_length = VInt::deserialize(reader)?.val() as usize;
|
||||
let mut result = String::with_capacity(string_length);
|
||||
reader
|
||||
.take(string_length as u64)
|
||||
.read_to_string(&mut result)?;
|
||||
Ok(Cow::Owned(result))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BinarySerializable for Cow<'a, [u8]> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.len() as u64).serialize(writer)?;
|
||||
for it in self.iter() {
|
||||
it.serialize(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Cow<'a, [u8]>> {
|
||||
let num_items = VInt::deserialize(reader)?.val();
|
||||
let mut items: Vec<u8> = Vec::with_capacity(num_items as usize);
|
||||
for _ in 0..num_items {
|
||||
let item = u8::deserialize(reader)?;
|
||||
items.push(item);
|
||||
}
|
||||
Ok(Cow::Owned(items))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
use super::{VInt, *};
|
||||
use crate::serialize::BinarySerializable;
|
||||
use super::*;
|
||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||
let mut buffer = Vec::new();
|
||||
O::default().serialize(&mut buffer).unwrap();
|
||||
|
||||
@@ -151,7 +151,7 @@ pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
||||
(result, vlen)
|
||||
}
|
||||
/// Write a `u32` as a vint payload.
|
||||
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||
pub fn write_u32_vint<W: io::Write + ?Sized>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||
let mut buf = [0u8; 8];
|
||||
let data = serialize_vint_u32(val, &mut buf);
|
||||
writer.write_all(data)
|
||||
|
||||
@@ -12,7 +12,7 @@ use tantivy::aggregation::agg_result::AggregationResults;
|
||||
use tantivy::aggregation::AggregationCollector;
|
||||
use tantivy::query::AllQuery;
|
||||
use tantivy::schema::{self, IndexRecordOption, Schema, TextFieldIndexing, FAST};
|
||||
use tantivy::Index;
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Create Schema
|
||||
@@ -132,10 +132,10 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let stream = Deserializer::from_str(data).into_iter::<Value>();
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut num_indexed = 0;
|
||||
for value in stream {
|
||||
let doc = schema.parse_document(&serde_json::to_string(&value.unwrap())?)?;
|
||||
let doc = TantivyDocument::parse_json(&schema, &serde_json::to_string(&value.unwrap())?)?;
|
||||
index_writer.add_document(doc)?;
|
||||
num_indexed += 1;
|
||||
if num_indexed > 4 {
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -75,7 +75,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Here we give tantivy a budget of `50MB`.
|
||||
// Using a bigger memory_arena for the indexer may increase
|
||||
// throughput, but 50 MB is already plenty.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
@@ -87,7 +87,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let mut old_man_doc = Document::default();
|
||||
let mut old_man_doc = TantivyDocument::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
old_man_doc.add_text(
|
||||
body,
|
||||
@@ -164,7 +164,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// will reload the index automatically after each commit.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.try_into()?;
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
@@ -217,8 +217,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// the document returned will only contain
|
||||
// a title.
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
}
|
||||
|
||||
// We can also get an explanation to understand
|
||||
|
||||
@@ -11,9 +11,10 @@ use columnar::Column;
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
use tantivy::index::SegmentReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, Score, SegmentReader};
|
||||
use tantivy::{doc, Index, IndexWriter, Score};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Stats {
|
||||
@@ -142,7 +143,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// this example.
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
index_writer.add_document(doc!(
|
||||
product_name => "Super Broom 2000",
|
||||
product_description => "While it is ok for short distance travel, this broom \
|
||||
|
||||
@@ -6,7 +6,7 @@ use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::NgramTokenizer;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -62,7 +62,7 @@ fn main() -> tantivy::Result<()> {
|
||||
//
|
||||
// Here we use a buffer of 50MB per thread. Using a bigger
|
||||
// memory arena for the indexer can increase its throughput.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Old Man and the Sea",
|
||||
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
@@ -103,8 +103,8 @@ fn main() -> tantivy::Result<()> {
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
for (_, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{DateOptions, Schema, Value, INDEXED, STORED, STRING};
|
||||
use tantivy::Index;
|
||||
use tantivy::schema::{DateOptions, Document, Schema, Value, INDEXED, STORED, STRING};
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -13,7 +13,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let opts = DateOptions::from(INDEXED)
|
||||
.set_stored()
|
||||
.set_fast()
|
||||
.set_precision(tantivy::DateTimePrecision::Seconds);
|
||||
.set_precision(tantivy::schema::DateTimePrecision::Seconds);
|
||||
// Add `occurred_at` date field type
|
||||
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
|
||||
let event_type = schema_builder.add_text_field("event", STRING | STORED);
|
||||
@@ -22,16 +22,18 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
// The dates are passed as string in the RFC3339 format
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T12:53:50.53Z",
|
||||
"event": "pull-request"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T13:00:00.22Z",
|
||||
"event": "comment"
|
||||
@@ -58,13 +60,15 @@ fn main() -> tantivy::Result<()> {
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
|
||||
assert_eq!(count_docs.len(), 1);
|
||||
for (_score, doc_address) in count_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
assert!(matches!(
|
||||
retrieved_doc.get_first(occurred_at),
|
||||
Some(Value::Date(_))
|
||||
));
|
||||
let retrieved_doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||
assert!(retrieved_doc
|
||||
.get_first(occurred_at)
|
||||
.unwrap()
|
||||
.as_value()
|
||||
.as_datetime()
|
||||
.is_some(),);
|
||||
assert_eq!(
|
||||
schema.to_json(&retrieved_doc),
|
||||
retrieved_doc.to_json(&schema),
|
||||
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
||||
);
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, IndexReader};
|
||||
use tantivy::{doc, Index, IndexReader, IndexWriter};
|
||||
|
||||
// A simple helper function to fetch a single document
|
||||
// given its id from our index.
|
||||
@@ -19,7 +19,7 @@ use tantivy::{doc, Index, IndexReader};
|
||||
fn extract_doc_given_isbn(
|
||||
reader: &IndexReader,
|
||||
isbn_term: &Term,
|
||||
) -> tantivy::Result<Option<Document>> {
|
||||
) -> tantivy::Result<Option<TantivyDocument>> {
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// This is the simplest query you can think of.
|
||||
@@ -69,10 +69,10 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// Let's add a couple of documents, for the sake of the example.
|
||||
let mut old_man_doc = Document::default();
|
||||
let mut old_man_doc = TantivyDocument::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
index_writer.add_document(doc!(
|
||||
isbn => "978-0099908401",
|
||||
@@ -94,7 +94,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Oops our frankenstein doc seems misspelled
|
||||
let frankenstein_doc_misspelled = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
schema.to_json(&frankenstein_doc_misspelled),
|
||||
frankenstein_doc_misspelled.to_json(&schema),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankentein"]}"#,
|
||||
);
|
||||
|
||||
@@ -136,7 +136,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// No more typo!
|
||||
let frankenstein_new_doc = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
schema.to_json(&frankenstein_new_doc),
|
||||
frankenstein_new_doc.to_json(&schema),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankenstein"]}"#,
|
||||
);
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
use tantivy::collector::FacetCollector;
|
||||
use tantivy::query::{AllQuery, TermQuery};
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the sake of this example
|
||||
@@ -30,7 +30,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(30_000_000)?;
|
||||
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
|
||||
@@ -12,7 +12,7 @@ use std::collections::HashSet;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::BooleanQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, DocId, Index, Score, SegmentReader};
|
||||
use tantivy::{doc, DocId, Index, IndexWriter, Score, SegmentReader};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -23,7 +23,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(30_000_000)?;
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Fried egg",
|
||||
@@ -51,7 +51,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
{
|
||||
let facets = vec![
|
||||
let facets = [
|
||||
Facet::from("/ingredient/egg"),
|
||||
Facet::from("/ingredient/oil"),
|
||||
Facet::from("/ingredient/garlic"),
|
||||
@@ -91,13 +91,11 @@ fn main() -> tantivy::Result<()> {
|
||||
.iter()
|
||||
.map(|(_, doc_id)| {
|
||||
searcher
|
||||
.doc(*doc_id)
|
||||
.doc::<TantivyDocument>(*doc_id)
|
||||
.unwrap()
|
||||
.get_first(title)
|
||||
.and_then(|v| v.as_str().map(|el| el.to_string()))
|
||||
.unwrap()
|
||||
.as_text()
|
||||
.unwrap()
|
||||
.to_owned()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::FuzzyTermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -66,7 +66,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Here we give tantivy a budget of `50MB`.
|
||||
// Using a bigger memory_arena for the indexer may increase
|
||||
// throughput, but 50 MB is already plenty.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
@@ -123,7 +123,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// will reload the index automatically after each commit.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.try_into()?;
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
@@ -151,10 +151,10 @@ fn main() -> tantivy::Result<()> {
|
||||
assert_eq!(count, 3);
|
||||
assert_eq!(top_docs.len(), 3);
|
||||
for (score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
// Note that the score is not lower for the fuzzy hit.
|
||||
// There's an issue open for that: https://github.com/quickwit-oss/tantivy/issues/563
|
||||
println!("score {score:?} doc {}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("score {score:?} doc {}", retrieved_doc.to_json(&schema));
|
||||
// score 1.0 doc {"title":["The Diary of Muadib"]}
|
||||
//
|
||||
// score 1.0 doc {"title":["The Diary of a Young Girl"]}
|
||||
|
||||
@@ -61,7 +61,7 @@ fn main() -> tantivy::Result<()> {
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
))?;
|
||||
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
|
||||
println!("add doc {i} from thread 1 - opstamp {opstamp}");
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
}
|
||||
Result::<(), TantivyError>::Ok(())
|
||||
@@ -82,7 +82,7 @@ fn main() -> tantivy::Result<()> {
|
||||
body => "Some great book description..."
|
||||
))?
|
||||
};
|
||||
println!("add doc {} from thread 2 - opstamp {}", i, opstamp);
|
||||
println!("add doc {i} from thread 2 - opstamp {opstamp}");
|
||||
thread::sleep(Duration::from_millis(10));
|
||||
}
|
||||
Result::<(), TantivyError>::Ok(())
|
||||
|
||||
@@ -21,7 +21,7 @@ fn main() -> tantivy::Result<()> {
|
||||
}"#;
|
||||
|
||||
// We can parse our document
|
||||
let _mice_and_men_doc = schema.parse_document(mice_and_men_doc_json)?;
|
||||
let _mice_and_men_doc = TantivyDocument::parse_json(&schema, mice_and_men_doc_json)?;
|
||||
|
||||
// Multi-valued field are allowed, they are
|
||||
// expressed in JSON by an array.
|
||||
@@ -30,7 +30,7 @@ fn main() -> tantivy::Result<()> {
|
||||
"title": ["Frankenstein", "The Modern Prometheus"],
|
||||
"year": 1818
|
||||
}"#;
|
||||
let _frankenstein_doc = schema.parse_document(frankenstein_json)?;
|
||||
let _frankenstein_doc = TantivyDocument::parse_json(&schema, frankenstein_json)?;
|
||||
|
||||
// Note that the schema is saved in your index directory.
|
||||
//
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
use tantivy::collector::Count;
|
||||
use tantivy::query::RangeQuery;
|
||||
use tantivy::schema::{Schema, INDEXED};
|
||||
use tantivy::{doc, Index, Result};
|
||||
use tantivy::{doc, Index, IndexWriter, Result};
|
||||
|
||||
fn main() -> Result<()> {
|
||||
// For the sake of simplicity, this schema will only have 1 field
|
||||
@@ -17,7 +17,7 @@ fn main() -> Result<()> {
|
||||
let index = Index::create_in_ram(schema);
|
||||
let reader = index.reader()?;
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer_with_num_threads(1, 6_000_000)?;
|
||||
for year in 1950u64..2019u64 {
|
||||
index_writer.add_document(doc!(year_field => year))?;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, STORED, STRING};
|
||||
use tantivy::Index;
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -22,20 +22,22 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// ### IPv4
|
||||
// Adding documents that contain an IPv4 address. Notice that the IP addresses are passed as
|
||||
// `String`. Since the field is of type ip, we parse the IP address from the string and store it
|
||||
// internally as IPv6.
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"ip": "192.168.0.33",
|
||||
"event_type": "login"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"ip": "192.168.0.80",
|
||||
"event_type": "checkout"
|
||||
@@ -44,7 +46,8 @@ fn main() -> tantivy::Result<()> {
|
||||
index_writer.add_document(doc)?;
|
||||
// ### IPv6
|
||||
// Adding a document that contains an IPv6 address.
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"ip": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
|
||||
"event_type": "checkout"
|
||||
|
||||
@@ -7,10 +7,11 @@
|
||||
// the list of documents containing a term, getting
|
||||
// its term frequency, and accessing its positions.
|
||||
|
||||
use tantivy::postings::Postings;
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, DocSet, Index, Postings, TERMINATED};
|
||||
use tantivy::{doc, DocSet, Index, IndexWriter, TERMINATED};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// We first create a schema for the sake of the
|
||||
@@ -24,7 +25,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer_with_num_threads(1, 50_000_000)?;
|
||||
index_writer.add_document(doc!(title => "The Old Man and the Sea"))?;
|
||||
index_writer.add_document(doc!(title => "Of Mice and Men"))?;
|
||||
index_writer.add_document(doc!(title => "The modern Promotheus"))?;
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, STORED, STRING, TEXT};
|
||||
use tantivy::Index;
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -20,8 +20,9 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let doc = schema.parse_document(
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"timestamp": "2022-02-22T23:20:50.53Z",
|
||||
"event_type": "click",
|
||||
@@ -33,7 +34,8 @@ fn main() -> tantivy::Result<()> {
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"timestamp": "2022-02-22T23:20:51.53Z",
|
||||
"event_type": "click",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy, Result};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy, Result};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
@@ -17,7 +17,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema)?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Old Man and the Sea",
|
||||
@@ -51,7 +51,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.try_into()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
@@ -67,8 +67,12 @@ fn main() -> Result<()> {
|
||||
let mut titles = top_docs
|
||||
.into_iter()
|
||||
.map(|(_score, doc_address)| {
|
||||
let doc = searcher.doc(doc_address)?;
|
||||
let title = doc.get_first(title).unwrap().as_text().unwrap().to_owned();
|
||||
let doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||
let title = doc
|
||||
.get_first(title)
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap()
|
||||
.to_owned();
|
||||
Ok(title)
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
@@ -13,7 +13,7 @@ use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer};
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn pre_tokenize_text(text: &str) -> Vec<Token> {
|
||||
@@ -38,7 +38,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// We can create a document manually, by setting the fields
|
||||
// one by one in a Document object.
|
||||
@@ -83,7 +83,7 @@ fn main() -> tantivy::Result<()> {
|
||||
}]
|
||||
}"#;
|
||||
|
||||
let short_man_doc = schema.parse_document(short_man_json)?;
|
||||
let short_man_doc = TantivyDocument::parse_json(&schema, short_man_json)?;
|
||||
|
||||
index_writer.add_document(short_man_doc)?;
|
||||
|
||||
@@ -94,7 +94,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.try_into()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
@@ -115,8 +115,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// Note that the tokens are not stored along with the original text
|
||||
// in the document store
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("Document: {}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
}
|
||||
|
||||
// In contrary to the previous query, when we search for the "man" term we
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, Snippet, SnippetGenerator};
|
||||
use tantivy::snippet::{Snippet, SnippetGenerator};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -27,7 +28,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_dir(&index_path, schema)?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// we'll only need one doc for this example.
|
||||
index_writer.add_document(doc!(
|
||||
@@ -54,13 +55,10 @@ fn main() -> tantivy::Result<()> {
|
||||
let snippet_generator = SnippetGenerator::create(&searcher, &*query, body)?;
|
||||
|
||||
for (score, doc_address) in top_docs {
|
||||
let doc = searcher.doc(doc_address)?;
|
||||
let doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||
let snippet = snippet_generator.snippet_from_doc(&doc);
|
||||
println!("Document score {score}:");
|
||||
println!(
|
||||
"title: {}",
|
||||
doc.get_first(title).unwrap().as_text().unwrap()
|
||||
);
|
||||
println!("title: {}", doc.get_first(title).unwrap().as_str().unwrap());
|
||||
println!("snippet: {}", snippet.to_html());
|
||||
println!("custom highlighting: {}", highlight(snippet));
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::*;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// this example assumes you understand the content in `basic_search`
|
||||
@@ -60,7 +60,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
index.tokenizers().register("stoppy", tokenizer);
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
@@ -105,9 +105,9 @@ fn main() -> tantivy::Result<()> {
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
for (score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("\n==\nDocument score {score}:");
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -3,11 +3,12 @@ use std::collections::{HashMap, HashSet};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::index::SegmentId;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, TEXT};
|
||||
use tantivy::{
|
||||
doc, DocAddress, DocId, Index, Opstamp, Searcher, SearcherGeneration, SegmentId, SegmentReader,
|
||||
Warmer,
|
||||
doc, DocAddress, DocId, Index, IndexWriter, Opstamp, Searcher, SearcherGeneration,
|
||||
SegmentReader, Warmer,
|
||||
};
|
||||
|
||||
// This example shows how warmers can be used to
|
||||
@@ -143,7 +144,7 @@ fn main() -> tantivy::Result<()> {
|
||||
const SNEAKERS: ProductId = 23222;
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
let mut writer: IndexWriter = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
writer.add_document(doc!(product_id=>OLIVE_OIL, text=>"cooking olive oil from greece"))?;
|
||||
writer.add_document(doc!(product_id=>GLOVES, text=>"kitchen gloves, perfect for cooking"))?;
|
||||
writer.add_document(doc!(product_id=>SNEAKERS, text=>"uber sweet sneakers"))?;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
name = "ownedbytes"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
edition = "2021"
|
||||
description = "Expose data as static slice"
|
||||
license = "MIT"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::convert::TryInto;
|
||||
use std::ops::{Deref, Range};
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.21.0"
|
||||
version = "0.22.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
|
||||
@@ -81,8 +81,8 @@ where
|
||||
T: InputTakeAtPosition + Clone,
|
||||
<T as InputTakeAtPosition>::Item: AsChar + Clone,
|
||||
{
|
||||
opt_i(nom::character::complete::space0)(input)
|
||||
.map(|(left, (spaces, errors))| (left, (spaces.expect("space0 can't fail"), errors)))
|
||||
opt_i(nom::character::complete::multispace0)(input)
|
||||
.map(|(left, (spaces, errors))| (left, (spaces.expect("multispace0 can't fail"), errors)))
|
||||
}
|
||||
|
||||
pub(crate) fn space1_infallible<T>(input: T) -> JResult<T, Option<T>>
|
||||
@@ -90,7 +90,7 @@ where
|
||||
T: InputTakeAtPosition + Clone + InputLength,
|
||||
<T as InputTakeAtPosition>::Item: AsChar + Clone,
|
||||
{
|
||||
opt_i(nom::character::complete::space1)(input).map(|(left, (spaces, mut errors))| {
|
||||
opt_i(nom::character::complete::multispace1)(input).map(|(left, (spaces, mut errors))| {
|
||||
if spaces.is_none() {
|
||||
errors.push(LenientErrorInternal {
|
||||
pos: left.input_len(),
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
use std::borrow::Cow;
|
||||
use std::iter::once;
|
||||
|
||||
use nom::branch::alt;
|
||||
use nom::bytes::complete::tag;
|
||||
use nom::character::complete::{
|
||||
anychar, char, digit1, none_of, one_of, satisfy, space0, space1, u32,
|
||||
anychar, char, digit1, multispace0, multispace1, none_of, one_of, satisfy, u32,
|
||||
};
|
||||
use nom::combinator::{eof, map, map_res, opt, peek, recognize, value, verify};
|
||||
use nom::error::{Error, ErrorKind};
|
||||
use nom::multi::{many0, many1, separated_list0, separated_list1};
|
||||
use nom::multi::{many0, many1, separated_list0};
|
||||
use nom::sequence::{delimited, preceded, separated_pair, terminated, tuple};
|
||||
use nom::IResult;
|
||||
|
||||
@@ -19,7 +20,7 @@ use crate::Occur;
|
||||
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to
|
||||
// special characters.
|
||||
const SPECIAL_CHARS: &[char] = &[
|
||||
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '!', '\\', '*', ' ',
|
||||
'+', '^', '`', ':', '{', '}', '"', '\'', '[', ']', '(', ')', '!', '\\', '*', ' ',
|
||||
];
|
||||
|
||||
/// consume a field name followed by colon. Return the field name with escape sequence
|
||||
@@ -41,36 +42,92 @@ fn field_name(inp: &str) -> IResult<&str, String> {
|
||||
)(inp)
|
||||
}
|
||||
|
||||
const ESCAPE_IN_WORD: &[char] = &['^', '`', ':', '{', '}', '"', '\'', '[', ']', '(', ')', '\\'];
|
||||
|
||||
fn interpret_escape(source: &str) -> String {
|
||||
let mut res = String::with_capacity(source.len());
|
||||
let mut in_escape = false;
|
||||
let require_escape = |c: char| c.is_whitespace() || ESCAPE_IN_WORD.contains(&c) || c == '-';
|
||||
|
||||
for c in source.chars() {
|
||||
if in_escape {
|
||||
if !require_escape(c) {
|
||||
// we re-add the escape sequence
|
||||
res.push('\\');
|
||||
}
|
||||
res.push(c);
|
||||
in_escape = false;
|
||||
} else if c == '\\' {
|
||||
in_escape = true;
|
||||
} else {
|
||||
res.push(c);
|
||||
}
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// Consume a word outside of any context.
|
||||
// TODO should support escape sequences
|
||||
fn word(inp: &str) -> IResult<&str, &str> {
|
||||
fn word(inp: &str) -> IResult<&str, Cow<str>> {
|
||||
map_res(
|
||||
recognize(tuple((
|
||||
satisfy(|c| {
|
||||
!c.is_whitespace()
|
||||
&& !['-', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||
}),
|
||||
many0(satisfy(|c: char| {
|
||||
!c.is_whitespace() && ![':', '^', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||
})),
|
||||
alt((
|
||||
preceded(char('\\'), anychar),
|
||||
satisfy(|c| !c.is_whitespace() && !ESCAPE_IN_WORD.contains(&c) && c != '-'),
|
||||
)),
|
||||
many0(alt((
|
||||
preceded(char('\\'), anychar),
|
||||
satisfy(|c: char| !c.is_whitespace() && !ESCAPE_IN_WORD.contains(&c)),
|
||||
))),
|
||||
))),
|
||||
|s| match s {
|
||||
"OR" | "AND" | "NOT" | "IN" => Err(Error::new(inp, ErrorKind::Tag)),
|
||||
_ => Ok(s),
|
||||
s if s.contains('\\') => Ok(Cow::Owned(interpret_escape(s))),
|
||||
s => Ok(Cow::Borrowed(s)),
|
||||
},
|
||||
)(inp)
|
||||
}
|
||||
|
||||
fn word_infallible(delimiter: &str) -> impl Fn(&str) -> JResult<&str, Option<&str>> + '_ {
|
||||
|inp| {
|
||||
opt_i_err(
|
||||
preceded(
|
||||
space0,
|
||||
recognize(many1(satisfy(|c| {
|
||||
!c.is_whitespace() && !delimiter.contains(c)
|
||||
}))),
|
||||
fn word_infallible(
|
||||
delimiter: &str,
|
||||
emit_error: bool,
|
||||
) -> impl Fn(&str) -> JResult<&str, Option<Cow<str>>> + '_ {
|
||||
// emit error is set when receiving an unescaped `:` should emit an error
|
||||
|
||||
move |inp| {
|
||||
map(
|
||||
opt_i_err(
|
||||
preceded(
|
||||
multispace0,
|
||||
recognize(many1(alt((
|
||||
preceded(char::<&str, _>('\\'), anychar),
|
||||
satisfy(|c| !c.is_whitespace() && !delimiter.contains(c)),
|
||||
)))),
|
||||
),
|
||||
"expected word",
|
||||
),
|
||||
"expected word",
|
||||
|(opt_s, mut errors)| match opt_s {
|
||||
Some(s) => {
|
||||
if emit_error
|
||||
&& (s
|
||||
.as_bytes()
|
||||
.windows(2)
|
||||
.any(|window| window[0] != b'\\' && window[1] == b':')
|
||||
|| s.starts_with(':'))
|
||||
{
|
||||
errors.push(LenientErrorInternal {
|
||||
pos: inp.len(),
|
||||
message: "parsed possible invalid field as term".to_string(),
|
||||
});
|
||||
}
|
||||
if s.contains('\\') {
|
||||
(Some(Cow::Owned(interpret_escape(s))), errors)
|
||||
} else {
|
||||
(Some(Cow::Borrowed(s)), errors)
|
||||
}
|
||||
}
|
||||
None => (None, errors),
|
||||
},
|
||||
)(inp)
|
||||
}
|
||||
}
|
||||
@@ -159,7 +216,7 @@ fn simple_term_infallible(
|
||||
(value((), char('\'')), simple_quotes),
|
||||
),
|
||||
// numbers are parsed with words in this case, as we allow string starting with a -
|
||||
map(word_infallible(delimiter), |(text, errors)| {
|
||||
map(word_infallible(delimiter, true), |(text, errors)| {
|
||||
(text.map(|text| (Delimiter::None, text.to_string())), errors)
|
||||
}),
|
||||
)(inp)
|
||||
@@ -185,7 +242,7 @@ fn term_or_phrase(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
fn term_or_phrase_infallible(inp: &str) -> JResult<&str, Option<UserInputLeaf>> {
|
||||
map(
|
||||
// ~* for slop/prefix, ) inside group or ast tree, ^ if boost
|
||||
tuple_infallible((simple_term_infallible("*)^"), slop_or_prefix_val)),
|
||||
tuple_infallible((simple_term_infallible(")^"), slop_or_prefix_val)),
|
||||
|((delimiter_phrase, (slop, prefix)), errors)| {
|
||||
let leaf = if let Some((delimiter, phrase)) = delimiter_phrase {
|
||||
Some(
|
||||
@@ -218,27 +275,14 @@ fn term_or_phrase_infallible(inp: &str) -> JResult<&str, Option<UserInputLeaf>>
|
||||
}
|
||||
|
||||
fn term_group(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
let occur_symbol = alt((
|
||||
value(Occur::MustNot, char('-')),
|
||||
value(Occur::Must, char('+')),
|
||||
));
|
||||
|
||||
map(
|
||||
tuple((
|
||||
terminated(field_name, space0),
|
||||
delimited(
|
||||
tuple((char('('), space0)),
|
||||
separated_list0(space1, tuple((opt(occur_symbol), term_or_phrase))),
|
||||
char(')'),
|
||||
),
|
||||
terminated(field_name, multispace0),
|
||||
delimited(tuple((char('('), multispace0)), ast, char(')')),
|
||||
)),
|
||||
|(field_name, terms)| {
|
||||
UserInputAst::Clause(
|
||||
terms
|
||||
.into_iter()
|
||||
.map(|(occur, leaf)| (occur, leaf.set_field(Some(field_name.clone())).into()))
|
||||
.collect(),
|
||||
)
|
||||
|(field_name, mut ast)| {
|
||||
ast.set_default_field(field_name);
|
||||
ast
|
||||
},
|
||||
)(inp)
|
||||
}
|
||||
@@ -250,7 +294,7 @@ fn term_group_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
(),
|
||||
peek(tuple((
|
||||
field_name,
|
||||
space0,
|
||||
multispace0,
|
||||
char('('), // when we are here, we know it can't be anything but a term group
|
||||
))),
|
||||
)(inp)
|
||||
@@ -258,46 +302,18 @@ fn term_group_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
}
|
||||
|
||||
fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
let (mut inp, (field_name, _, _, _)) =
|
||||
tuple((field_name, space0, char('('), space0))(inp).expect("precondition failed");
|
||||
let (inp, (field_name, _, _, _)) =
|
||||
tuple((field_name, multispace0, char('('), multispace0))(inp).expect("precondition failed");
|
||||
|
||||
let mut terms = Vec::new();
|
||||
let mut errs = Vec::new();
|
||||
|
||||
let mut first_round = true;
|
||||
loop {
|
||||
let mut space_error = if first_round {
|
||||
first_round = false;
|
||||
Vec::new()
|
||||
} else {
|
||||
let (rest, (_, err)) = space1_infallible(inp)?;
|
||||
inp = rest;
|
||||
err
|
||||
};
|
||||
if inp.is_empty() {
|
||||
errs.push(LenientErrorInternal {
|
||||
pos: inp.len(),
|
||||
message: "missing )".to_string(),
|
||||
});
|
||||
break Ok((inp, (UserInputAst::Clause(terms), errs)));
|
||||
}
|
||||
if let Some(inp) = inp.strip_prefix(')') {
|
||||
break Ok((inp, (UserInputAst::Clause(terms), errs)));
|
||||
}
|
||||
// only append missing space error if we did not reach the end of group
|
||||
errs.append(&mut space_error);
|
||||
|
||||
// here we do the assumption term_or_phrase_infallible always consume something if the
|
||||
// first byte is not `)` or ' '. If it did not, we would end up looping.
|
||||
|
||||
let (rest, ((occur, leaf), mut err)) =
|
||||
tuple_infallible((occur_symbol, term_or_phrase_infallible))(inp)?;
|
||||
errs.append(&mut err);
|
||||
if let Some(leaf) = leaf {
|
||||
terms.push((occur, leaf.set_field(Some(field_name.clone())).into()));
|
||||
}
|
||||
inp = rest;
|
||||
}
|
||||
let res = delimited_infallible(
|
||||
nothing,
|
||||
map(ast_infallible, |(mut ast, errors)| {
|
||||
ast.set_default_field(field_name.to_string());
|
||||
(ast, errors)
|
||||
}),
|
||||
opt_i_err(char(')'), "expected ')'"),
|
||||
)(inp);
|
||||
res
|
||||
}
|
||||
|
||||
fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
@@ -305,7 +321,7 @@ fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
UserInputLeaf::Exists {
|
||||
field: String::new(),
|
||||
},
|
||||
tuple((space0, char('*'))),
|
||||
tuple((multispace0, char('*'))),
|
||||
)(inp)
|
||||
}
|
||||
|
||||
@@ -314,7 +330,7 @@ fn exists_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
(),
|
||||
peek(tuple((
|
||||
field_name,
|
||||
space0,
|
||||
multispace0,
|
||||
char('*'), // when we are here, we know it can't be anything but a exists
|
||||
))),
|
||||
)(inp)
|
||||
@@ -323,7 +339,7 @@ fn exists_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
|
||||
fn exists_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
let (inp, (field_name, _, _)) =
|
||||
tuple((field_name, space0, char('*')))(inp).expect("precondition failed");
|
||||
tuple((field_name, multispace0, char('*')))(inp).expect("precondition failed");
|
||||
|
||||
let exists = UserInputLeaf::Exists { field: field_name }.into();
|
||||
Ok((inp, (exists, Vec::new())))
|
||||
@@ -349,7 +365,7 @@ fn literal_no_group_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>>
|
||||
alt_infallible(
|
||||
(
|
||||
(
|
||||
value((), tuple((tag("IN"), space0, char('[')))),
|
||||
value((), tuple((tag("IN"), multispace0, char('[')))),
|
||||
map(set_infallible, |(set, errs)| (Some(set), errs)),
|
||||
),
|
||||
(
|
||||
@@ -363,15 +379,6 @@ fn literal_no_group_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>>
|
||||
|((field_name, _, leaf), mut errors)| {
|
||||
(
|
||||
leaf.map(|leaf| {
|
||||
if matches!(&leaf, UserInputLeaf::Literal(literal)
|
||||
if literal.phrase.contains(':') && literal.delimiter == Delimiter::None)
|
||||
&& field_name.is_none()
|
||||
{
|
||||
errors.push(LenientErrorInternal {
|
||||
pos: inp.len(),
|
||||
message: "parsed possible invalid field as term".to_string(),
|
||||
});
|
||||
}
|
||||
if matches!(&leaf, UserInputLeaf::Literal(literal)
|
||||
if literal.phrase == "NOT" && literal.delimiter == Delimiter::None)
|
||||
&& field_name.is_none()
|
||||
@@ -430,8 +437,8 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
// check for unbounded range in the form of <5, <=10, >5, >=5
|
||||
let elastic_unbounded_range = map(
|
||||
tuple((
|
||||
preceded(space0, alt((tag(">="), tag("<="), tag("<"), tag(">")))),
|
||||
preceded(space0, range_term_val()),
|
||||
preceded(multispace0, alt((tag(">="), tag("<="), tag("<"), tag(">")))),
|
||||
preceded(multispace0, range_term_val()),
|
||||
)),
|
||||
|(comparison_sign, bound)| match comparison_sign {
|
||||
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
|
||||
@@ -444,7 +451,7 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
);
|
||||
|
||||
let lower_bound = map(
|
||||
separated_pair(one_of("{["), space0, range_term_val()),
|
||||
separated_pair(one_of("{["), multispace0, range_term_val()),
|
||||
|(boundary_char, lower_bound)| {
|
||||
if lower_bound == "*" {
|
||||
UserInputBound::Unbounded
|
||||
@@ -457,7 +464,7 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
);
|
||||
|
||||
let upper_bound = map(
|
||||
separated_pair(range_term_val(), space0, one_of("}]")),
|
||||
separated_pair(range_term_val(), multispace0, one_of("}]")),
|
||||
|(upper_bound, boundary_char)| {
|
||||
if upper_bound == "*" {
|
||||
UserInputBound::Unbounded
|
||||
@@ -469,8 +476,11 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
},
|
||||
);
|
||||
|
||||
let lower_to_upper =
|
||||
separated_pair(lower_bound, tuple((space1, tag("TO"), space1)), upper_bound);
|
||||
let lower_to_upper = separated_pair(
|
||||
lower_bound,
|
||||
tuple((multispace1, tag("TO"), multispace1)),
|
||||
upper_bound,
|
||||
);
|
||||
|
||||
map(
|
||||
alt((elastic_unbounded_range, lower_to_upper)),
|
||||
@@ -487,17 +497,20 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
tuple_infallible((
|
||||
opt_i(anychar),
|
||||
space0_infallible,
|
||||
word_infallible("]}"),
|
||||
word_infallible("]}", false),
|
||||
space1_infallible,
|
||||
opt_i_err(
|
||||
terminated(tag("TO"), alt((value((), space1), value((), eof)))),
|
||||
terminated(tag("TO"), alt((value((), multispace1), value((), eof)))),
|
||||
"missing keyword TO",
|
||||
),
|
||||
word_infallible("]}"),
|
||||
word_infallible("]}", false),
|
||||
opt_i_err(one_of("]}"), "missing range delimiter"),
|
||||
)),
|
||||
|((lower_bound_kind, _space0, lower, _space1, to, upper, upper_bound_kind), errs)| {
|
||||
let lower_bound = match (lower_bound_kind, lower) {
|
||||
|(
|
||||
(lower_bound_kind, _multispace0, lower, _multispace1, to, upper, upper_bound_kind),
|
||||
errs,
|
||||
)| {
|
||||
let lower_bound = match (lower_bound_kind, lower.as_deref()) {
|
||||
(_, Some("*")) => UserInputBound::Unbounded,
|
||||
(_, None) => UserInputBound::Unbounded,
|
||||
// if it is some, TO was actually the bound (i.e. [TO TO something])
|
||||
@@ -506,7 +519,7 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
(Some('{'), Some(bound)) => UserInputBound::Exclusive(bound.to_string()),
|
||||
_ => unreachable!("precondition failed, range did not start with [ or {{"),
|
||||
};
|
||||
let upper_bound = match (upper_bound_kind, upper) {
|
||||
let upper_bound = match (upper_bound_kind, upper.as_deref()) {
|
||||
(_, Some("*")) => UserInputBound::Unbounded,
|
||||
(_, None) => UserInputBound::Unbounded,
|
||||
(Some(']'), Some(bound)) => UserInputBound::Inclusive(bound.to_string()),
|
||||
@@ -523,7 +536,7 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
(
|
||||
(
|
||||
value((), tag(">=")),
|
||||
map(word_infallible(""), |(bound, err)| {
|
||||
map(word_infallible("", false), |(bound, err)| {
|
||||
(
|
||||
(
|
||||
bound
|
||||
@@ -537,7 +550,7 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
),
|
||||
(
|
||||
value((), tag("<=")),
|
||||
map(word_infallible(""), |(bound, err)| {
|
||||
map(word_infallible("", false), |(bound, err)| {
|
||||
(
|
||||
(
|
||||
UserInputBound::Unbounded,
|
||||
@@ -551,7 +564,7 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
),
|
||||
(
|
||||
value((), tag(">")),
|
||||
map(word_infallible(""), |(bound, err)| {
|
||||
map(word_infallible("", false), |(bound, err)| {
|
||||
(
|
||||
(
|
||||
bound
|
||||
@@ -565,7 +578,7 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
),
|
||||
(
|
||||
value((), tag("<")),
|
||||
map(word_infallible(""), |(bound, err)| {
|
||||
map(word_infallible("", false), |(bound, err)| {
|
||||
(
|
||||
(
|
||||
UserInputBound::Unbounded,
|
||||
@@ -596,10 +609,10 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
fn set(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
map(
|
||||
preceded(
|
||||
tuple((space0, tag("IN"), space1)),
|
||||
tuple((multispace0, tag("IN"), multispace1)),
|
||||
delimited(
|
||||
tuple((char('['), space0)),
|
||||
separated_list0(space1, map(simple_term, |(_, term)| term)),
|
||||
tuple((char('['), multispace0)),
|
||||
separated_list0(multispace1, map(simple_term, |(_, term)| term)),
|
||||
char(']'),
|
||||
),
|
||||
),
|
||||
@@ -667,7 +680,7 @@ fn leaf(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
alt((
|
||||
delimited(char('('), ast, char(')')),
|
||||
map(char('*'), |_| UserInputAst::from(UserInputLeaf::All)),
|
||||
map(preceded(tuple((tag("NOT"), space1)), leaf), negate),
|
||||
map(preceded(tuple((tag("NOT"), multispace1)), leaf), negate),
|
||||
literal,
|
||||
))(inp)
|
||||
}
|
||||
@@ -780,27 +793,23 @@ fn binary_operand(inp: &str) -> IResult<&str, BinaryOperand> {
|
||||
}
|
||||
|
||||
fn aggregate_binary_expressions(
|
||||
left: UserInputAst,
|
||||
others: Vec<(BinaryOperand, UserInputAst)>,
|
||||
) -> UserInputAst {
|
||||
let mut dnf: Vec<Vec<UserInputAst>> = vec![vec![left]];
|
||||
for (operator, operand_ast) in others {
|
||||
match operator {
|
||||
BinaryOperand::And => {
|
||||
if let Some(last) = dnf.last_mut() {
|
||||
last.push(operand_ast);
|
||||
}
|
||||
}
|
||||
BinaryOperand::Or => {
|
||||
dnf.push(vec![operand_ast]);
|
||||
}
|
||||
}
|
||||
}
|
||||
if dnf.len() == 1 {
|
||||
UserInputAst::and(dnf.into_iter().next().unwrap()) //< safe
|
||||
left: (Option<Occur>, UserInputAst),
|
||||
others: Vec<(Option<BinaryOperand>, Option<Occur>, UserInputAst)>,
|
||||
) -> Result<UserInputAst, LenientErrorInternal> {
|
||||
let mut leafs = Vec::with_capacity(others.len() + 1);
|
||||
leafs.push((None, left.0, Some(left.1)));
|
||||
leafs.extend(
|
||||
others
|
||||
.into_iter()
|
||||
.map(|(operand, occur, ast)| (operand, occur, Some(ast))),
|
||||
);
|
||||
// the parameters we pass should statically guarantee we can't get errors
|
||||
// (no prefix BinaryOperand is provided)
|
||||
let (res, mut errors) = aggregate_infallible_expressions(leafs);
|
||||
if errors.is_empty() {
|
||||
Ok(res)
|
||||
} else {
|
||||
let conjunctions = dnf.into_iter().map(UserInputAst::and).collect();
|
||||
UserInputAst::or(conjunctions)
|
||||
Err(errors.swap_remove(0))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -816,30 +825,10 @@ fn aggregate_infallible_expressions(
|
||||
return (UserInputAst::empty_query(), err);
|
||||
}
|
||||
|
||||
let use_operand = leafs.iter().any(|(operand, _, _)| operand.is_some());
|
||||
let all_operand = leafs
|
||||
.iter()
|
||||
.skip(1)
|
||||
.all(|(operand, _, _)| operand.is_some());
|
||||
let early_operand = leafs
|
||||
.iter()
|
||||
.take(1)
|
||||
.all(|(operand, _, _)| operand.is_some());
|
||||
let use_occur = leafs.iter().any(|(_, occur, _)| occur.is_some());
|
||||
|
||||
if use_operand && use_occur {
|
||||
err.push(LenientErrorInternal {
|
||||
pos: 0,
|
||||
message: "Use of mixed occur and boolean operator".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if use_operand && !all_operand {
|
||||
err.push(LenientErrorInternal {
|
||||
pos: 0,
|
||||
message: "Missing boolean operator".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if early_operand {
|
||||
err.push(LenientErrorInternal {
|
||||
@@ -866,7 +855,15 @@ fn aggregate_infallible_expressions(
|
||||
Some(BinaryOperand::And) => Some(Occur::Must),
|
||||
_ => Some(Occur::Should),
|
||||
};
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())]);
|
||||
if occur == &Some(Occur::MustNot) && default_op == Some(Occur::Should) {
|
||||
// if occur is MustNot *and* operation is OR, we synthetize a ShouldNot
|
||||
clauses.push(vec![(
|
||||
Some(Occur::Should),
|
||||
ast.clone().unary(Occur::MustNot),
|
||||
)])
|
||||
} else {
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())]);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
let default_op = match next_operator {
|
||||
@@ -874,7 +871,15 @@ fn aggregate_infallible_expressions(
|
||||
Some(BinaryOperand::Or) => Some(Occur::Should),
|
||||
None => None,
|
||||
};
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())])
|
||||
if occur == &Some(Occur::MustNot) && default_op == Some(Occur::Should) {
|
||||
// if occur is MustNot *and* operation is OR, we synthetize a ShouldNot
|
||||
clauses.push(vec![(
|
||||
Some(Occur::Should),
|
||||
ast.clone().unary(Occur::MustNot),
|
||||
)])
|
||||
} else {
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -891,7 +896,12 @@ fn aggregate_infallible_expressions(
|
||||
}
|
||||
}
|
||||
Some(BinaryOperand::Or) => {
|
||||
clauses.push(vec![(last_occur.or(Some(Occur::Should)), last_ast)]);
|
||||
if last_occur == Some(Occur::MustNot) {
|
||||
// if occur is MustNot *and* operation is OR, we synthetize a ShouldNot
|
||||
clauses.push(vec![(Some(Occur::Should), last_ast.unary(Occur::MustNot))]);
|
||||
} else {
|
||||
clauses.push(vec![(last_occur.or(Some(Occur::Should)), last_ast)]);
|
||||
}
|
||||
}
|
||||
None => clauses.push(vec![(last_occur, last_ast)]),
|
||||
}
|
||||
@@ -917,35 +927,29 @@ fn aggregate_infallible_expressions(
|
||||
}
|
||||
}
|
||||
|
||||
fn operand_leaf(inp: &str) -> IResult<&str, (BinaryOperand, UserInputAst)> {
|
||||
tuple((
|
||||
terminated(binary_operand, space0),
|
||||
terminated(boosted_leaf, space0),
|
||||
))(inp)
|
||||
fn operand_leaf(inp: &str) -> IResult<&str, (Option<BinaryOperand>, Option<Occur>, UserInputAst)> {
|
||||
map(
|
||||
tuple((
|
||||
terminated(opt(binary_operand), multispace0),
|
||||
terminated(occur_leaf, multispace0),
|
||||
)),
|
||||
|(operand, (occur, ast))| (operand, occur, ast),
|
||||
)(inp)
|
||||
}
|
||||
|
||||
fn ast(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
let boolean_expr = map(
|
||||
separated_pair(boosted_leaf, space1, many1(operand_leaf)),
|
||||
let boolean_expr = map_res(
|
||||
separated_pair(occur_leaf, multispace1, many1(operand_leaf)),
|
||||
|(left, right)| aggregate_binary_expressions(left, right),
|
||||
);
|
||||
let whitespace_separated_leaves = map(separated_list1(space1, occur_leaf), |subqueries| {
|
||||
if subqueries.len() == 1 {
|
||||
let (occur_opt, ast) = subqueries.into_iter().next().unwrap();
|
||||
match occur_opt.unwrap_or(Occur::Should) {
|
||||
Occur::Must | Occur::Should => ast,
|
||||
Occur::MustNot => UserInputAst::Clause(vec![(Some(Occur::MustNot), ast)]),
|
||||
}
|
||||
let single_leaf = map(occur_leaf, |(occur, ast)| {
|
||||
if occur == Some(Occur::MustNot) {
|
||||
ast.unary(Occur::MustNot)
|
||||
} else {
|
||||
UserInputAst::Clause(subqueries.into_iter().collect())
|
||||
ast
|
||||
}
|
||||
});
|
||||
|
||||
delimited(
|
||||
space0,
|
||||
alt((boolean_expr, whitespace_separated_leaves)),
|
||||
space0,
|
||||
)(inp)
|
||||
delimited(multispace0, alt((boolean_expr, single_leaf)), multispace0)(inp)
|
||||
}
|
||||
|
||||
fn ast_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
@@ -969,7 +973,7 @@ fn ast_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
}
|
||||
|
||||
pub fn parse_to_ast(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
map(delimited(space0, opt(ast), eof), |opt_ast| {
|
||||
map(delimited(multispace0, opt(ast), eof), |opt_ast| {
|
||||
rewrite_ast(opt_ast.unwrap_or_else(UserInputAst::empty_query))
|
||||
})(inp)
|
||||
}
|
||||
@@ -1113,6 +1117,9 @@ mod test {
|
||||
test_parse_query_to_ast_helper("'www-form-encoded'", "'www-form-encoded'");
|
||||
test_parse_query_to_ast_helper("www-form-encoded", "www-form-encoded");
|
||||
test_parse_query_to_ast_helper("www-form-encoded", "www-form-encoded");
|
||||
test_parse_query_to_ast_helper("mr james bo?d", "(*mr *james *bo?d)");
|
||||
test_parse_query_to_ast_helper("mr james bo*", "(*mr *james *bo*)");
|
||||
test_parse_query_to_ast_helper("mr james b*d", "(*mr *james *b*d)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1142,24 +1149,43 @@ mod test {
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_binary_op() {
|
||||
test_parse_query_to_ast_helper("a AND b", "(+a +b)");
|
||||
test_parse_query_to_ast_helper("a\nAND b", "(+a +b)");
|
||||
test_parse_query_to_ast_helper("a OR b", "(?a ?b)");
|
||||
test_parse_query_to_ast_helper("a OR b AND c", "(?a ?(+b +c))");
|
||||
test_parse_query_to_ast_helper("a AND b AND c", "(+a +b +c)");
|
||||
test_is_parse_err("a OR b aaa", "(?a ?b *aaa)");
|
||||
test_is_parse_err("a AND b aaa", "(?(+a +b) *aaa)");
|
||||
test_is_parse_err("aaa a OR b ", "(*aaa ?a ?b)");
|
||||
test_is_parse_err("aaa ccc a OR b ", "(*aaa *ccc ?a ?b)");
|
||||
test_is_parse_err("aaa a AND b ", "(*aaa ?(+a +b))");
|
||||
test_is_parse_err("aaa ccc a AND b ", "(*aaa *ccc ?(+a +b))");
|
||||
test_parse_query_to_ast_helper("a OR b aaa", "(?a ?b *aaa)");
|
||||
test_parse_query_to_ast_helper("a AND b aaa", "(?(+a +b) *aaa)");
|
||||
test_parse_query_to_ast_helper("aaa a OR b ", "(*aaa ?a ?b)");
|
||||
test_parse_query_to_ast_helper("aaa ccc a OR b ", "(*aaa *ccc ?a ?b)");
|
||||
test_parse_query_to_ast_helper("aaa a AND b ", "(*aaa ?(+a +b))");
|
||||
test_parse_query_to_ast_helper("aaa ccc a AND b ", "(*aaa *ccc ?(+a +b))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_mixed_bool_occur() {
|
||||
test_is_parse_err("a OR b +aaa", "(?a ?b +aaa)");
|
||||
test_is_parse_err("a AND b -aaa", "(?(+a +b) -aaa)");
|
||||
test_is_parse_err("+a OR +b aaa", "(+a +b *aaa)");
|
||||
test_is_parse_err("-a AND -b aaa", "(?(-a -b) *aaa)");
|
||||
test_is_parse_err("-aaa +ccc -a OR b ", "(-aaa +ccc -a ?b)");
|
||||
test_parse_query_to_ast_helper("+a OR +b", "(+a +b)");
|
||||
|
||||
test_parse_query_to_ast_helper("a AND -b", "(+a -b)");
|
||||
test_parse_query_to_ast_helper("-a AND b", "(-a +b)");
|
||||
test_parse_query_to_ast_helper("a AND NOT b", "(+a +(-b))");
|
||||
test_parse_query_to_ast_helper("NOT a AND b", "(+(-a) +b)");
|
||||
|
||||
test_parse_query_to_ast_helper("a AND NOT b AND c", "(+a +(-b) +c)");
|
||||
test_parse_query_to_ast_helper("a AND -b AND c", "(+a -b +c)");
|
||||
|
||||
test_parse_query_to_ast_helper("a OR -b", "(?a ?(-b))");
|
||||
test_parse_query_to_ast_helper("-a OR b", "(?(-a) ?b)");
|
||||
test_parse_query_to_ast_helper("a OR NOT b", "(?a ?(-b))");
|
||||
test_parse_query_to_ast_helper("NOT a OR b", "(?(-a) ?b)");
|
||||
|
||||
test_parse_query_to_ast_helper("a OR NOT b OR c", "(?a ?(-b) ?c)");
|
||||
test_parse_query_to_ast_helper("a OR -b OR c", "(?a ?(-b) ?c)");
|
||||
|
||||
test_parse_query_to_ast_helper("a OR b +aaa", "(?a ?b +aaa)");
|
||||
test_parse_query_to_ast_helper("a AND b -aaa", "(?(+a +b) -aaa)");
|
||||
test_parse_query_to_ast_helper("+a OR +b aaa", "(+a +b *aaa)");
|
||||
test_parse_query_to_ast_helper("-a AND -b aaa", "(?(-a -b) *aaa)");
|
||||
test_parse_query_to_ast_helper("-aaa +ccc -a OR b ", "(-aaa +ccc ?(-a) ?b)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1179,6 +1205,12 @@ mod test {
|
||||
test_parse_query_to_ast_helper("weight: <= 70", "\"weight\":{\"*\" TO \"70\"]");
|
||||
|
||||
test_parse_query_to_ast_helper("weight: <= 70.5", "\"weight\":{\"*\" TO \"70.5\"]");
|
||||
|
||||
test_parse_query_to_ast_helper(">a", "{\"a\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper(">=a", "[\"a\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("<a", "{\"*\" TO \"a\"}");
|
||||
test_parse_query_to_ast_helper("<=a", "{\"*\" TO \"a\"]");
|
||||
test_parse_query_to_ast_helper("<=bsd", "{\"*\" TO \"bsd\"]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1449,8 +1481,18 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_term_group() {
|
||||
test_parse_query_to_ast_helper(r#"field:(abc)"#, r#"(*"field":abc)"#);
|
||||
test_parse_query_to_ast_helper(r#"field:(abc)"#, r#""field":abc"#);
|
||||
test_parse_query_to_ast_helper(r#"field:(+a -"b c")"#, r#"(+"field":a -"field":"b c")"#);
|
||||
test_parse_query_to_ast_helper(r#"field:(a AND "b c")"#, r#"(+"field":a +"field":"b c")"#);
|
||||
test_parse_query_to_ast_helper(r#"field:(a OR "b c")"#, r#"(?"field":a ?"field":"b c")"#);
|
||||
test_parse_query_to_ast_helper(
|
||||
r#"field:(a OR (b AND c))"#,
|
||||
r#"(?"field":a ?(+"field":b +"field":c))"#,
|
||||
);
|
||||
test_parse_query_to_ast_helper(
|
||||
r#"field:(a [b TO c])"#,
|
||||
r#"(*"field":a *"field":["b" TO "c"])"#,
|
||||
);
|
||||
|
||||
test_is_parse_err(r#"field:(+a -"b c""#, r#"(+"field":a -"field":"b c")"#);
|
||||
}
|
||||
@@ -1602,5 +1644,21 @@ mod test {
|
||||
r#"myfield:'hello\"happy\'tax'"#,
|
||||
r#""myfield":'hello"happy'tax'"#,
|
||||
);
|
||||
// we don't process escape sequence for chars which don't require it
|
||||
test_parse_query_to_ast_helper(r#"abc\*"#, r#"abc\*"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_queries_with_colons() {
|
||||
test_parse_query_to_ast_helper(r#""abc:def""#, r#""abc:def""#);
|
||||
test_parse_query_to_ast_helper(r#"'abc:def'"#, r#"'abc:def'"#);
|
||||
test_parse_query_to_ast_helper(r#"abc\:def"#, r#"abc:def"#);
|
||||
test_parse_query_to_ast_helper(r#""abc\:def""#, r#""abc:def""#);
|
||||
test_parse_query_to_ast_helper(r#"'abc\:def'"#, r#"'abc:def'"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_field() {
|
||||
test_is_parse_err(r#"!bc:def"#, "!bc:def");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,26 @@ impl UserInputLeaf {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_default_field(&mut self, default_field: String) {
|
||||
match self {
|
||||
UserInputLeaf::Literal(ref mut literal) if literal.field_name.is_none() => {
|
||||
literal.field_name = Some(default_field)
|
||||
}
|
||||
UserInputLeaf::All => {
|
||||
*self = UserInputLeaf::Exists {
|
||||
field: default_field,
|
||||
}
|
||||
}
|
||||
UserInputLeaf::Range { ref mut field, .. } if field.is_none() => {
|
||||
*field = Some(default_field)
|
||||
}
|
||||
UserInputLeaf::Set { ref mut field, .. } if field.is_none() => {
|
||||
*field = Some(default_field)
|
||||
}
|
||||
_ => (), // field was already set, do nothing
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for UserInputLeaf {
|
||||
@@ -205,6 +225,16 @@ impl UserInputAst {
|
||||
pub fn or(asts: Vec<UserInputAst>) -> UserInputAst {
|
||||
UserInputAst::compose(Occur::Should, asts)
|
||||
}
|
||||
|
||||
pub(crate) fn set_default_field(&mut self, field: String) {
|
||||
match self {
|
||||
UserInputAst::Clause(clauses) => clauses
|
||||
.iter_mut()
|
||||
.for_each(|(_, ast)| ast.set_default_field(field.clone())),
|
||||
UserInputAst::Leaf(leaf) => leaf.set_default_field(field),
|
||||
UserInputAst::Boost(ref mut ast, _) => ast.set_default_field(field),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<UserInputLiteral> for UserInputLeaf {
|
||||
|
||||
@@ -1,550 +0,0 @@
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_distr::Distribution;
|
||||
use serde_json::json;
|
||||
use test::{self, Bencher};
|
||||
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use crate::{Index, Term};
|
||||
|
||||
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
enum Cardinality {
|
||||
/// All documents contain exactly one value.
|
||||
/// `Full` is the default for auto-detecting the Cardinality, since it is the most strict.
|
||||
#[default]
|
||||
Full = 0,
|
||||
/// All documents contain at most one value.
|
||||
Optional = 1,
|
||||
/// All documents may contain any number of values.
|
||||
Multivalued = 2,
|
||||
/// 1 / 20 documents has a value
|
||||
Sparse = 3,
|
||||
}
|
||||
|
||||
fn get_collector(agg_req: Aggregations) -> AggregationCollector {
|
||||
AggregationCollector::from_aggs(agg_req, Default::default())
|
||||
}
|
||||
|
||||
fn get_test_index_bench(cardinality: Cardinality) -> crate::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_fieldtype = crate::schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||
)
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let json_field = schema_builder.add_json_field("json", FAST);
|
||||
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
|
||||
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
|
||||
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
|
||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
let index = Index::create_from_tempdir(schema_builder.build())?;
|
||||
let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"];
|
||||
|
||||
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
|
||||
|
||||
let many_terms_data = (0..150_000)
|
||||
.map(|num| format!("author{}", num))
|
||||
.collect::<Vec<_>>();
|
||||
{
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 200_000_000)?;
|
||||
// To make the different test cases comparable we just change one doc to force the
|
||||
// cardinality
|
||||
if cardinality == Cardinality::Optional {
|
||||
index_writer.add_document(doc!())?;
|
||||
}
|
||||
if cardinality == Cardinality::Multivalued {
|
||||
index_writer.add_document(doc!(
|
||||
json_field => json!({"mixed_type": 10.0}),
|
||||
json_field => json!({"mixed_type": 10.0}),
|
||||
text_field => "cool",
|
||||
text_field => "cool",
|
||||
text_field_many_terms => "cool",
|
||||
text_field_many_terms => "cool",
|
||||
text_field_few_terms => "cool",
|
||||
text_field_few_terms => "cool",
|
||||
score_field => 1u64,
|
||||
score_field => 1u64,
|
||||
score_field_f64 => lg_norm.sample(&mut rng),
|
||||
score_field_f64 => lg_norm.sample(&mut rng),
|
||||
score_field_i64 => 1i64,
|
||||
score_field_i64 => 1i64,
|
||||
))?;
|
||||
}
|
||||
let mut doc_with_value = 1_000_000;
|
||||
if cardinality == Cardinality::Sparse {
|
||||
doc_with_value /= 20;
|
||||
}
|
||||
let val_max = 1_000_000.0;
|
||||
for _ in 0..doc_with_value {
|
||||
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
||||
let json = if rng.gen_bool(0.1) {
|
||||
// 10% are numeric values
|
||||
json!({ "mixed_type": val })
|
||||
} else {
|
||||
json!({"mixed_type": many_terms_data.choose(&mut rng).unwrap().to_string()})
|
||||
};
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
json_field => json,
|
||||
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
score_field => val as u64,
|
||||
score_field_f64 => lg_norm.sample(&mut rng),
|
||||
score_field_i64 => val as i64,
|
||||
))?;
|
||||
if cardinality == Cardinality::Sparse {
|
||||
for _ in 0..20 {
|
||||
index_writer.add_document(doc!(text_field => "cool"))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
// writing the segment
|
||||
index_writer.commit()?;
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
use paste::paste;
|
||||
#[macro_export]
|
||||
macro_rules! bench_all_cardinalities {
|
||||
( $x:ident ) => {
|
||||
paste! {
|
||||
#[bench]
|
||||
fn $x(b: &mut Bencher) {
|
||||
[<$x _card>](b, Cardinality::Full)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn [<$x _opt>](b: &mut Bencher) {
|
||||
[<$x _card>](b, Cardinality::Optional)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn [<$x _multi>](b: &mut Bencher) {
|
||||
[<$x _card>](b, Cardinality::Multivalued)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn [<$x _sparse>](b: &mut Bencher) {
|
||||
[<$x _card>](b, Cardinality::Sparse)
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_average_u64);
|
||||
|
||||
fn bench_aggregation_average_u64_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"average": { "avg": { "field": "score", } }
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_stats_f64);
|
||||
|
||||
fn bench_aggregation_stats_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"average_f64": { "stats": { "field": "score_f64", } }
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_average_f64);
|
||||
|
||||
fn bench_aggregation_average_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"average_f64": { "avg": { "field": "score_f64", } }
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_percentiles_f64);
|
||||
|
||||
fn bench_aggregation_percentiles_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req_str = r#"
|
||||
{
|
||||
"mypercentiles": {
|
||||
"percentiles": {
|
||||
"field": "score_f64",
|
||||
"percents": [ 95, 99, 99.9 ]
|
||||
}
|
||||
}
|
||||
} "#;
|
||||
let agg_req_1: Aggregations = serde_json::from_str(agg_req_str).unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_average_u64_and_f64);
|
||||
|
||||
fn bench_aggregation_average_u64_and_f64_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"average_f64": { "avg": { "field": "score_f64" } },
|
||||
"average": { "avg": { "field": "score" } },
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_few);
|
||||
|
||||
fn bench_aggregation_terms_few_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_texts": { "terms": { "field": "text_few_terms" } },
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many_with_sub_agg);
|
||||
|
||||
fn bench_aggregation_terms_many_with_sub_agg_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "text_many_terms" },
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
},
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many_json_mixed_type_with_sub_agg);
|
||||
|
||||
fn bench_aggregation_terms_many_json_mixed_type_with_sub_agg_card(
|
||||
b: &mut Bencher,
|
||||
cardinality: Cardinality,
|
||||
) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "json.mixed_type" },
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
},
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many2);
|
||||
|
||||
fn bench_aggregation_terms_many2_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_texts": { "terms": { "field": "text_many_terms" } },
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many_order_by_term);
|
||||
|
||||
fn bench_aggregation_terms_many_order_by_term_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_texts": { "terms": { "field": "text_many_terms", "order": { "_key": "desc" } } },
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_range_only);
|
||||
|
||||
fn bench_aggregation_range_only_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"range_f64": { "range": { "field": "score_f64", "ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 30000 },
|
||||
{ "from": 30000, "to": 40000 },
|
||||
{ "from": 40000, "to": 50000 },
|
||||
{ "from": 50000, "to": 60000 }
|
||||
] } },
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_range_with_avg);
|
||||
|
||||
fn bench_aggregation_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"rangef64": {
|
||||
"range": {
|
||||
"field": "score_f64",
|
||||
"ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 30000 },
|
||||
{ "from": 30000, "to": 40000 },
|
||||
{ "from": 40000, "to": 50000 },
|
||||
{ "from": 50000, "to": 60000 }
|
||||
]
|
||||
},
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
},
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
// hard bounds has a different algorithm, because it actually limits collection range
|
||||
//
|
||||
bench_all_cardinalities!(bench_aggregation_histogram_only_hard_bounds);
|
||||
|
||||
fn bench_aggregation_histogram_only_hard_bounds_card(
|
||||
b: &mut Bencher,
|
||||
cardinality: Cardinality,
|
||||
) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"rangef64": { "histogram": { "field": "score_f64", "interval": 100, "hard_bounds": { "min": 1000, "max": 300000 } } },
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_histogram_with_avg);
|
||||
|
||||
fn bench_aggregation_histogram_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"rangef64": {
|
||||
"histogram": { "field": "score_f64", "interval": 100 },
|
||||
"aggs": {
|
||||
"average_f64": { "avg": { "field": "score_f64" } }
|
||||
}
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_histogram_only);
|
||||
|
||||
fn bench_aggregation_histogram_only_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"rangef64": {
|
||||
"histogram": {
|
||||
"field": "score_f64",
|
||||
"interval": 100 // 1000 buckets
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_avg_and_range_with_avg);
|
||||
|
||||
fn bench_aggregation_avg_and_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let text_field = reader.searcher().schema().get_field("text").unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "cool"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"rangef64": {
|
||||
"range": {
|
||||
"field": "score_f64",
|
||||
"ranges": [
|
||||
{ "from": 3, "to": 7000 },
|
||||
{ "from": 7000, "to": 20000 },
|
||||
{ "from": 20000, "to": 60000 }
|
||||
]
|
||||
},
|
||||
"aggs": {
|
||||
"average_in_range": { "avg": { "field": "score" } }
|
||||
}
|
||||
},
|
||||
"average": { "avg": { "field": "score" } }
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req_1);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -73,18 +73,19 @@ impl AggregationLimits {
|
||||
/// Create a new ResourceLimitGuard, that will release the memory when dropped.
|
||||
pub fn new_guard(&self) -> ResourceLimitGuard {
|
||||
ResourceLimitGuard {
|
||||
/// The counter which is shared between the aggregations for one request.
|
||||
// The counter which is shared between the aggregations for one request.
|
||||
memory_consumption: Arc::clone(&self.memory_consumption),
|
||||
/// The memory_limit in bytes
|
||||
// The memory_limit in bytes
|
||||
memory_limit: self.memory_limit,
|
||||
allocated_with_the_guard: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn add_memory_consumed(&self, num_bytes: u64) -> crate::Result<()> {
|
||||
self.memory_consumption
|
||||
.fetch_add(num_bytes, Ordering::Relaxed);
|
||||
validate_memory_consumption(&self.memory_consumption, self.memory_limit)?;
|
||||
pub(crate) fn add_memory_consumed(&self, add_num_bytes: u64) -> crate::Result<()> {
|
||||
let prev_value = self
|
||||
.memory_consumption
|
||||
.fetch_add(add_num_bytes, Ordering::Relaxed);
|
||||
validate_memory_consumption(prev_value + add_num_bytes, self.memory_limit)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -94,11 +95,11 @@ impl AggregationLimits {
|
||||
}
|
||||
|
||||
fn validate_memory_consumption(
|
||||
memory_consumption: &AtomicU64,
|
||||
memory_consumption: u64,
|
||||
memory_limit: ByteCount,
|
||||
) -> Result<(), AggregationError> {
|
||||
// Load the estimated memory consumed by the aggregations
|
||||
let memory_consumed: ByteCount = memory_consumption.load(Ordering::Relaxed).into();
|
||||
let memory_consumed: ByteCount = memory_consumption.into();
|
||||
if memory_consumed > memory_limit {
|
||||
return Err(AggregationError::MemoryExceeded {
|
||||
limit: memory_limit,
|
||||
@@ -118,10 +119,11 @@ pub struct ResourceLimitGuard {
|
||||
}
|
||||
|
||||
impl ResourceLimitGuard {
|
||||
pub(crate) fn add_memory_consumed(&self, num_bytes: u64) -> crate::Result<()> {
|
||||
self.memory_consumption
|
||||
.fetch_add(num_bytes, Ordering::Relaxed);
|
||||
validate_memory_consumption(&self.memory_consumption, self.memory_limit)?;
|
||||
pub(crate) fn add_memory_consumed(&self, add_num_bytes: u64) -> crate::Result<()> {
|
||||
let prev_value = self
|
||||
.memory_consumption
|
||||
.fetch_add(add_num_bytes, Ordering::Relaxed);
|
||||
validate_memory_consumption(prev_value + add_num_bytes, self.memory_limit)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,8 +34,8 @@ use super::bucket::{
|
||||
DateHistogramAggregationReq, HistogramAggregation, RangeAggregation, TermsAggregation,
|
||||
};
|
||||
use super::metric::{
|
||||
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation,
|
||||
PercentilesAggregationReq, StatsAggregation, SumAggregation,
|
||||
AverageAggregation, CountAggregation, ExtendedStatsAggregation, MaxAggregation, MinAggregation,
|
||||
PercentilesAggregationReq, StatsAggregation, SumAggregation, TopHitsAggregation,
|
||||
};
|
||||
|
||||
/// The top-level aggregation request structure, which contains [`Aggregation`] and their user
|
||||
@@ -93,7 +93,12 @@ impl Aggregation {
|
||||
}
|
||||
|
||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||
fast_field_names.insert(self.agg.get_fast_field_name().to_string());
|
||||
fast_field_names.extend(
|
||||
self.agg
|
||||
.get_fast_field_names()
|
||||
.iter()
|
||||
.map(|s| s.to_string()),
|
||||
);
|
||||
fast_field_names.extend(get_fast_field_names(&self.sub_aggregation));
|
||||
}
|
||||
}
|
||||
@@ -141,29 +146,39 @@ pub enum AggregationVariants {
|
||||
/// extracted values.
|
||||
#[serde(rename = "stats")]
|
||||
Stats(StatsAggregation),
|
||||
/// Computes a collection of estended statistics (`min`, `max`, `sum`, `count`, `avg`,
|
||||
/// `sum_of_squares`, `variance`, `variance_sampling`, `std_deviation`,
|
||||
/// `std_deviation_sampling`) over the extracted values.
|
||||
#[serde(rename = "extended_stats")]
|
||||
ExtendedStats(ExtendedStatsAggregation),
|
||||
/// Computes the sum of the extracted values.
|
||||
#[serde(rename = "sum")]
|
||||
Sum(SumAggregation),
|
||||
/// Computes the sum of the extracted values.
|
||||
#[serde(rename = "percentiles")]
|
||||
Percentiles(PercentilesAggregationReq),
|
||||
/// Finds the top k values matching some order
|
||||
#[serde(rename = "top_hits")]
|
||||
TopHits(TopHitsAggregation),
|
||||
}
|
||||
|
||||
impl AggregationVariants {
|
||||
/// Returns the name of the field used by the aggregation.
|
||||
pub fn get_fast_field_name(&self) -> &str {
|
||||
/// Returns the name of the fields used by the aggregation.
|
||||
pub fn get_fast_field_names(&self) -> Vec<&str> {
|
||||
match self {
|
||||
AggregationVariants::Terms(terms) => terms.field.as_str(),
|
||||
AggregationVariants::Range(range) => range.field.as_str(),
|
||||
AggregationVariants::Histogram(histogram) => histogram.field.as_str(),
|
||||
AggregationVariants::DateHistogram(histogram) => histogram.field.as_str(),
|
||||
AggregationVariants::Average(avg) => avg.field_name(),
|
||||
AggregationVariants::Count(count) => count.field_name(),
|
||||
AggregationVariants::Max(max) => max.field_name(),
|
||||
AggregationVariants::Min(min) => min.field_name(),
|
||||
AggregationVariants::Stats(stats) => stats.field_name(),
|
||||
AggregationVariants::Sum(sum) => sum.field_name(),
|
||||
AggregationVariants::Percentiles(per) => per.field_name(),
|
||||
AggregationVariants::Terms(terms) => vec![terms.field.as_str()],
|
||||
AggregationVariants::Range(range) => vec![range.field.as_str()],
|
||||
AggregationVariants::Histogram(histogram) => vec![histogram.field.as_str()],
|
||||
AggregationVariants::DateHistogram(histogram) => vec![histogram.field.as_str()],
|
||||
AggregationVariants::Average(avg) => vec![avg.field_name()],
|
||||
AggregationVariants::Count(count) => vec![count.field_name()],
|
||||
AggregationVariants::Max(max) => vec![max.field_name()],
|
||||
AggregationVariants::Min(min) => vec![min.field_name()],
|
||||
AggregationVariants::Stats(stats) => vec![stats.field_name()],
|
||||
AggregationVariants::ExtendedStats(extended_stats) => vec![extended_stats.field_name()],
|
||||
AggregationVariants::Sum(sum) => vec![sum.field_name()],
|
||||
AggregationVariants::Percentiles(per) => vec![per.field_name()],
|
||||
AggregationVariants::TopHits(top_hits) => top_hits.field_names(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,6 +203,12 @@ impl AggregationVariants {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub(crate) fn as_top_hits(&self) -> Option<&TopHitsAggregation> {
|
||||
match &self {
|
||||
AggregationVariants::TopHits(top_hits) => Some(top_hits),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn as_percentile(&self) -> Option<&PercentilesAggregationReq> {
|
||||
match &self {
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
//! This will enhance the request tree with access to the fastfield and metadata.
|
||||
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType, StrColumn};
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType, DynamicColumn, StrColumn};
|
||||
|
||||
use super::agg_limits::ResourceLimitGuard;
|
||||
use super::agg_req::{Aggregation, AggregationVariants, Aggregations};
|
||||
@@ -8,13 +11,14 @@ use super::bucket::{
|
||||
DateHistogramAggregationReq, HistogramAggregation, RangeAggregation, TermsAggregation,
|
||||
};
|
||||
use super::metric::{
|
||||
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation, StatsAggregation,
|
||||
SumAggregation,
|
||||
AverageAggregation, CountAggregation, ExtendedStatsAggregation, MaxAggregation, MinAggregation,
|
||||
StatsAggregation, SumAggregation,
|
||||
};
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::VecWithNames;
|
||||
use crate::aggregation::{f64_to_fastfield_u64, Key};
|
||||
use crate::SegmentReader;
|
||||
use crate::index::SegmentReader;
|
||||
use crate::SegmentOrdinal;
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct AggregationsWithAccessor {
|
||||
@@ -32,6 +36,7 @@ impl AggregationsWithAccessor {
|
||||
}
|
||||
|
||||
pub struct AggregationWithAccessor {
|
||||
pub(crate) segment_ordinal: SegmentOrdinal,
|
||||
/// In general there can be buckets without fast field access, e.g. buckets that are created
|
||||
/// based on search terms. That is not that case currently, but eventually this needs to be
|
||||
/// Option or moved.
|
||||
@@ -44,10 +49,16 @@ pub struct AggregationWithAccessor {
|
||||
pub(crate) limits: ResourceLimitGuard,
|
||||
pub(crate) column_block_accessor: ColumnBlockAccessor<u64>,
|
||||
/// Used for missing term aggregation, which checks all columns for existence.
|
||||
/// And also for `top_hits` aggregation, which may sort on multiple fields.
|
||||
/// By convention the missing aggregation is chosen, when this property is set
|
||||
/// (instead bein set in `agg`).
|
||||
/// If this needs to used by other aggregations, we need to refactor this.
|
||||
pub(crate) accessors: Vec<Column<u64>>,
|
||||
// NOTE: we can make all other aggregations use this instead of the `accessor` and `field_type`
|
||||
// (making them obsolete) But will it have a performance impact?
|
||||
pub(crate) accessors: Vec<(Column<u64>, ColumnType)>,
|
||||
/// Map field names to all associated column accessors.
|
||||
/// This field is used for `docvalue_fields`, which is currently only supported for `top_hits`.
|
||||
pub(crate) value_accessors: HashMap<String, Vec<DynamicColumn>>,
|
||||
pub(crate) agg: Aggregation,
|
||||
}
|
||||
|
||||
@@ -57,19 +68,55 @@ impl AggregationWithAccessor {
|
||||
agg: &Aggregation,
|
||||
sub_aggregation: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: AggregationLimits,
|
||||
) -> crate::Result<Vec<AggregationWithAccessor>> {
|
||||
let add_agg_with_accessor = |accessor: Column<u64>,
|
||||
let mut agg = agg.clone();
|
||||
|
||||
let add_agg_with_accessor = |agg: &Aggregation,
|
||||
accessor: Column<u64>,
|
||||
column_type: ColumnType,
|
||||
aggs: &mut Vec<AggregationWithAccessor>|
|
||||
-> crate::Result<()> {
|
||||
let res = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
accessor,
|
||||
accessors: Vec::new(),
|
||||
accessors: Default::default(),
|
||||
value_accessors: Default::default(),
|
||||
field_type: column_type,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
limits: limits.new_guard(),
|
||||
missing_value_for_accessor: None,
|
||||
str_dict_column: None,
|
||||
column_block_accessor: Default::default(),
|
||||
};
|
||||
aggs.push(res);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let add_agg_with_accessors = |agg: &Aggregation,
|
||||
accessors: Vec<(Column<u64>, ColumnType)>,
|
||||
aggs: &mut Vec<AggregationWithAccessor>,
|
||||
value_accessors: HashMap<String, Vec<DynamicColumn>>|
|
||||
-> crate::Result<()> {
|
||||
let (accessor, field_type) = accessors.first().expect("at least one accessor");
|
||||
let res = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
// TODO: We should do away with the `accessor` field altogether
|
||||
accessor: accessor.clone(),
|
||||
value_accessors,
|
||||
field_type: *field_type,
|
||||
accessors,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
@@ -84,32 +131,36 @@ impl AggregationWithAccessor {
|
||||
|
||||
let mut res: Vec<AggregationWithAccessor> = Vec::new();
|
||||
use AggregationVariants::*;
|
||||
match &agg.agg {
|
||||
|
||||
match agg.agg {
|
||||
Range(RangeAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Histogram(HistogramAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
DateHistogram(DateHistogramAggregationReq {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
// Only DateTime is supported for DateHistogram
|
||||
get_ff_reader(reader, field_name, Some(&[ColumnType::DateTime]))?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Terms(TermsAggregation {
|
||||
field: field_name,
|
||||
missing,
|
||||
field: ref field_name,
|
||||
ref missing,
|
||||
..
|
||||
}) => {
|
||||
let str_dict_column = reader.fast_fields().str(field_name)?;
|
||||
@@ -119,9 +170,9 @@ impl AggregationWithAccessor {
|
||||
ColumnType::F64,
|
||||
ColumnType::Str,
|
||||
ColumnType::DateTime,
|
||||
ColumnType::Bool,
|
||||
ColumnType::IpAddr,
|
||||
// ColumnType::Bytes Unsupported
|
||||
// ColumnType::Bool Unsupported
|
||||
// ColumnType::IpAddr Unsupported
|
||||
];
|
||||
|
||||
// In case the column is empty we want the shim column to match the missing type
|
||||
@@ -162,24 +213,11 @@ impl AggregationWithAccessor {
|
||||
let column_and_types =
|
||||
get_all_ff_reader_or_empty(reader, field_name, None, fallback_type)?;
|
||||
|
||||
let accessors: Vec<Column> =
|
||||
column_and_types.iter().map(|(a, _)| a.clone()).collect();
|
||||
let agg_wit_acc = AggregationWithAccessor {
|
||||
missing_value_for_accessor: None,
|
||||
accessor: accessors[0].clone(),
|
||||
accessors,
|
||||
field_type: ColumnType::U64,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
str_dict_column: str_dict_column.clone(),
|
||||
limits: limits.new_guard(),
|
||||
column_block_accessor: Default::default(),
|
||||
};
|
||||
res.push(agg_wit_acc);
|
||||
let accessors = column_and_types
|
||||
.iter()
|
||||
.map(|c_t| (c_t.0.clone(), c_t.1))
|
||||
.collect();
|
||||
add_agg_with_accessors(&agg, accessors, &mut res, Default::default())?;
|
||||
}
|
||||
|
||||
for (accessor, column_type) in column_and_types {
|
||||
@@ -189,21 +227,25 @@ impl AggregationWithAccessor {
|
||||
missing.clone()
|
||||
};
|
||||
|
||||
let missing_value_for_accessor =
|
||||
if let Some(missing) = missing_value_term_agg.as_ref() {
|
||||
get_missing_val(column_type, missing, agg.agg.get_fast_field_name())?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let missing_value_for_accessor = if let Some(missing) =
|
||||
missing_value_term_agg.as_ref()
|
||||
{
|
||||
get_missing_val(column_type, missing, agg.agg.get_fast_field_names()[0])?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let agg = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
missing_value_for_accessor,
|
||||
accessor,
|
||||
accessors: Vec::new(),
|
||||
accessors: Default::default(),
|
||||
value_accessors: Default::default(),
|
||||
field_type: column_type,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
@@ -215,34 +257,67 @@ impl AggregationWithAccessor {
|
||||
}
|
||||
}
|
||||
Average(AverageAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Count(CountAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Max(MaxAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Min(MinAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Stats(StatsAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| ExtendedStats(ExtendedStatsAggregation {
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Sum(SumAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Percentiles(percentiles) => {
|
||||
Percentiles(ref percentiles) => {
|
||||
let (accessor, column_type) = get_ff_reader(
|
||||
reader,
|
||||
percentiles.field_name(),
|
||||
Some(get_numeric_or_date_column_types()),
|
||||
)?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
TopHits(ref mut top_hits) => {
|
||||
top_hits.validate_and_resolve_field_names(reader.fast_fields().columnar())?;
|
||||
let accessors: Vec<(Column<u64>, ColumnType)> = top_hits
|
||||
.field_names()
|
||||
.iter()
|
||||
.map(|field| {
|
||||
get_ff_reader(reader, field, Some(get_numeric_or_date_column_types()))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
let value_accessors = top_hits
|
||||
.value_field_names()
|
||||
.iter()
|
||||
.map(|field_name| {
|
||||
Ok((
|
||||
field_name.to_string(),
|
||||
get_dynamic_columns(reader, field_name)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
add_agg_with_accessors(&agg, accessors, &mut res, value_accessors)?;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -264,8 +339,8 @@ fn get_missing_val(
|
||||
}
|
||||
_ => {
|
||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||
"Missing value {:?} for field {} is not supported for column type {:?}",
|
||||
missing, field_name, column_type
|
||||
"Missing value {missing:?} for field {field_name} is not supported for column \
|
||||
type {column_type:?}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
@@ -284,6 +359,7 @@ fn get_numeric_or_date_column_types() -> &'static [ColumnType] {
|
||||
pub(crate) fn get_aggs_with_segment_accessor_and_validate(
|
||||
aggs: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<AggregationsWithAccessor> {
|
||||
let mut aggss = Vec::new();
|
||||
@@ -292,6 +368,7 @@ pub(crate) fn get_aggs_with_segment_accessor_and_validate(
|
||||
agg,
|
||||
agg.sub_aggregation(),
|
||||
reader,
|
||||
segment_ordinal,
|
||||
limits.clone(),
|
||||
)?;
|
||||
for agg in aggs {
|
||||
@@ -321,6 +398,19 @@ fn get_ff_reader(
|
||||
Ok(ff_field_with_type)
|
||||
}
|
||||
|
||||
fn get_dynamic_columns(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Vec<columnar::DynamicColumn>> {
|
||||
let ff_fields = reader.fast_fields().dynamic_column_handles(field_name)?;
|
||||
let cols = ff_fields
|
||||
.iter()
|
||||
.map(|h| h.open())
|
||||
.collect::<io::Result<_>>()?;
|
||||
assert!(!ff_fields.is_empty(), "field {field_name} not found");
|
||||
Ok(cols)
|
||||
}
|
||||
|
||||
/// Get all fast field reader or empty as default.
|
||||
///
|
||||
/// Is guaranteed to return at least one column.
|
||||
|
||||
@@ -8,7 +8,9 @@ use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::bucket::GetDocCount;
|
||||
use super::metric::{PercentilesMetricResult, SingleMetricResult, Stats};
|
||||
use super::metric::{
|
||||
ExtendedStats, PercentilesMetricResult, SingleMetricResult, Stats, TopHitsMetricResult,
|
||||
};
|
||||
use super::{AggregationError, Key};
|
||||
use crate::TantivyError;
|
||||
|
||||
@@ -88,10 +90,14 @@ pub enum MetricResult {
|
||||
Min(SingleMetricResult),
|
||||
/// Stats metric result.
|
||||
Stats(Stats),
|
||||
/// ExtendedStats metric result.
|
||||
ExtendedStats(Box<ExtendedStats>),
|
||||
/// Sum metric result.
|
||||
Sum(SingleMetricResult),
|
||||
/// Sum metric result.
|
||||
/// Percentiles metric result.
|
||||
Percentiles(PercentilesMetricResult),
|
||||
/// Top hits metric result
|
||||
TopHits(TopHitsMetricResult),
|
||||
}
|
||||
|
||||
impl MetricResult {
|
||||
@@ -102,10 +108,14 @@ impl MetricResult {
|
||||
MetricResult::Max(max) => Ok(max.value),
|
||||
MetricResult::Min(min) => Ok(min.value),
|
||||
MetricResult::Stats(stats) => stats.get_value(agg_property),
|
||||
MetricResult::ExtendedStats(extended_stats) => extended_stats.get_value(agg_property),
|
||||
MetricResult::Sum(sum) => Ok(sum.value),
|
||||
MetricResult::Percentiles(_) => Err(TantivyError::AggregationError(
|
||||
AggregationError::InvalidRequest("percentiles can't be used to order".to_string()),
|
||||
)),
|
||||
MetricResult::TopHits(_) => Err(TantivyError::AggregationError(
|
||||
AggregationError::InvalidRequest("top_hits can't be used to order".to_string()),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,12 +4,13 @@ use crate::aggregation::agg_req::{Aggregation, Aggregations};
|
||||
use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::buf_collector::DOC_BLOCK_SIZE;
|
||||
use crate::aggregation::collector::AggregationCollector;
|
||||
use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use crate::aggregation::segment_agg_result::AggregationLimits;
|
||||
use crate::aggregation::tests::{get_test_index_2_segments, get_test_index_from_values_and_terms};
|
||||
use crate::aggregation::DistributedAggregationCollector;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{IndexRecordOption, Schema, FAST};
|
||||
use crate::{Index, Term};
|
||||
use crate::{Index, IndexWriter, Term};
|
||||
|
||||
fn get_avg_req(field_name: &str) -> Aggregation {
|
||||
serde_json::from_value(json!({
|
||||
@@ -66,6 +67,22 @@ fn test_aggregation_flushing(
|
||||
}
|
||||
}
|
||||
},
|
||||
"top_hits_test":{
|
||||
"terms": {
|
||||
"field": "string_id"
|
||||
},
|
||||
"aggs": {
|
||||
"bucketsL2": {
|
||||
"top_hits": {
|
||||
"size": 2,
|
||||
"sort": [
|
||||
{ "score": "asc" }
|
||||
],
|
||||
"docvalue_fields": ["score"]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"histogram_test":{
|
||||
"histogram": {
|
||||
"field": "score",
|
||||
@@ -108,6 +125,16 @@ fn test_aggregation_flushing(
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
// Test postcard roundtrip serialization
|
||||
let intermediate_agg_result_bytes = postcard::to_allocvec(&intermediate_agg_result).expect(
|
||||
"Postcard Serialization failed, flatten etc. is not supported in the intermediate \
|
||||
result",
|
||||
);
|
||||
let intermediate_agg_result: IntermediateAggregationResults =
|
||||
postcard::from_bytes(&intermediate_agg_result_bytes)
|
||||
.expect("Post deserialization failed");
|
||||
|
||||
intermediate_agg_result
|
||||
.into_final_result(agg_req, &Default::default())
|
||||
.unwrap()
|
||||
@@ -586,7 +613,10 @@ fn test_aggregation_on_json_object() {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color": "red"})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color": "red"})))
|
||||
.unwrap();
|
||||
@@ -614,12 +644,74 @@ fn test_aggregation_on_json_object() {
|
||||
&serde_json::json!({
|
||||
"jsonagg": {
|
||||
"buckets": [
|
||||
{"doc_count": 2, "key": "red"},
|
||||
{"doc_count": 1, "key": "blue"},
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_on_nested_json_object() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json = schema_builder.add_json_field("json.blub", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color.dot": "red", "color": {"nested":"red"} })))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color.dot": "blue", "color": {"nested":"blue"} })))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color.dot": "blue", "color": {"nested":"blue"} })))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let agg: Aggregations = serde_json::from_value(json!({
|
||||
"jsonagg1": {
|
||||
"terms": {
|
||||
"field": "json\\.blub.color\\.dot",
|
||||
}
|
||||
},
|
||||
"jsonagg2": {
|
||||
"terms": {
|
||||
"field": "json\\.blub.color.nested",
|
||||
}
|
||||
}
|
||||
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let aggregation_collector = get_collector(agg);
|
||||
let aggregation_results = searcher.search(&AllQuery, &aggregation_collector).unwrap();
|
||||
let aggregation_res_json = serde_json::to_value(aggregation_results).unwrap();
|
||||
assert_eq!(
|
||||
&aggregation_res_json,
|
||||
&serde_json::json!({
|
||||
"jsonagg1": {
|
||||
"buckets": [
|
||||
{"doc_count": 2, "key": "blue"},
|
||||
{"doc_count": 1, "key": "red"}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
},
|
||||
"jsonagg2": {
|
||||
"buckets": [
|
||||
{"doc_count": 2, "key": "blue"},
|
||||
{"doc_count": 1, "key": "red"}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -630,7 +722,7 @@ fn test_aggregation_on_json_object_empty_columns() {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Empty column when accessing color
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"price": 10.0})))
|
||||
@@ -748,32 +840,41 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0, "mixed_price": 10.0})))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
// => Segment with all values text
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue"})))
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue", "mixed_price": 5.0})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue", "mixed_price": 5.0})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue", "mixed_price": 5.0})))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
// => Segment with all boolen
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": true})))
|
||||
.add_document(doc!(json => json!({"mixed_type": true, "mixed_price": "no_price"})))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
// => Segment with mixed values
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "red"})))
|
||||
.add_document(doc!(json => json!({"mixed_type": "red", "mixed_price": 1.0})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": -20.5})))
|
||||
.add_document(doc!(json => json!({"mixed_type": "red", "mixed_price": 1.0})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": true})))
|
||||
.add_document(doc!(json => json!({"mixed_type": -20.5, "mixed_price": -20.5})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": true, "mixed_price": "no_price"})))
|
||||
.unwrap();
|
||||
|
||||
index_writer.commit().unwrap();
|
||||
@@ -787,7 +888,7 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
"order": { "min_price": "desc" }
|
||||
},
|
||||
"aggs": {
|
||||
"min_price": { "min": { "field": "json.mixed_type" } }
|
||||
"min_price": { "min": { "field": "json.mixed_price" } }
|
||||
}
|
||||
},
|
||||
"rangeagg": {
|
||||
@@ -811,6 +912,7 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
|
||||
let aggregation_results = searcher.search(&AllQuery, &aggregation_collector).unwrap();
|
||||
let aggregation_res_json = serde_json::to_value(aggregation_results).unwrap();
|
||||
use pretty_assertions::assert_eq;
|
||||
assert_eq!(
|
||||
&aggregation_res_json,
|
||||
&serde_json::json!({
|
||||
@@ -825,10 +927,10 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
"termagg": {
|
||||
"buckets": [
|
||||
{ "doc_count": 1, "key": 10.0, "min_price": { "value": 10.0 } },
|
||||
{ "doc_count": 3, "key": "blue", "min_price": { "value": 5.0 } },
|
||||
{ "doc_count": 2, "key": "red", "min_price": { "value": 1.0 } },
|
||||
{ "doc_count": 1, "key": -20.5, "min_price": { "value": -20.5 } },
|
||||
// TODO bool is also not yet handled in aggregation
|
||||
{ "doc_count": 1, "key": "blue", "min_price": { "value": null } },
|
||||
{ "doc_count": 1, "key": "red", "min_price": { "value": null } },
|
||||
{ "doc_count": 2, "key": 1.0, "key_as_string": "true", "min_price": { "value": null } },
|
||||
],
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{HistogramAggregation, HistogramBounds};
|
||||
use crate::aggregation::AggregationError;
|
||||
use crate::aggregation::*;
|
||||
|
||||
/// DateHistogramAggregation is similar to `HistogramAggregation`, but it can only be used with date
|
||||
/// type.
|
||||
@@ -252,7 +252,7 @@ pub mod tests {
|
||||
use crate::aggregation::tests::exec_request;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::schema::{Schema, FAST, STRING};
|
||||
use crate::Index;
|
||||
use crate::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
#[test]
|
||||
fn test_parse_into_millisecs() {
|
||||
@@ -307,6 +307,7 @@ pub mod tests {
|
||||
) -> crate::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_date_field("date", FAST);
|
||||
schema_builder.add_json_field("mixed", FAST);
|
||||
schema_builder.add_text_field("text", FAST | STRING);
|
||||
schema_builder.add_text_field("text2", FAST | STRING);
|
||||
let schema = schema_builder.build();
|
||||
@@ -316,7 +317,7 @@ pub mod tests {
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
for values in segment_and_docs {
|
||||
for doc_str in values {
|
||||
let doc = schema.parse_document(doc_str)?;
|
||||
let doc = TantivyDocument::parse_json(&schema, doc_str)?;
|
||||
index_writer.add_document(doc)?;
|
||||
}
|
||||
// writing the segment
|
||||
@@ -328,7 +329,7 @@ pub mod tests {
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
if segment_ids.len() > 1 {
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
@@ -351,8 +352,10 @@ pub mod tests {
|
||||
let docs = vec![
|
||||
vec![r#"{ "date": "2015-01-01T12:10:30Z", "text": "aaa" }"#],
|
||||
vec![r#"{ "date": "2015-01-01T11:11:30Z", "text": "bbb" }"#],
|
||||
vec![r#"{ "date": "2015-01-01T11:11:30Z", "text": "bbb" }"#],
|
||||
vec![r#"{ "date": "2015-01-02T00:00:00Z", "text": "bbb" }"#],
|
||||
vec![r#"{ "date": "2015-01-06T00:00:00Z", "text": "ccc" }"#],
|
||||
vec![r#"{ "date": "2015-01-06T00:00:00Z", "text": "ccc" }"#],
|
||||
];
|
||||
let index = get_test_index_from_docs(merge_segments, &docs).unwrap();
|
||||
|
||||
@@ -381,7 +384,7 @@ pub mod tests {
|
||||
{
|
||||
"key_as_string" : "2015-01-01T00:00:00Z",
|
||||
"key" : 1420070400000.0,
|
||||
"doc_count" : 4
|
||||
"doc_count" : 6
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -419,15 +422,15 @@ pub mod tests {
|
||||
{
|
||||
"key_as_string" : "2015-01-01T00:00:00Z",
|
||||
"key" : 1420070400000.0,
|
||||
"doc_count" : 4,
|
||||
"doc_count" : 6,
|
||||
"texts": {
|
||||
"buckets": [
|
||||
{
|
||||
"doc_count": 2,
|
||||
"doc_count": 3,
|
||||
"key": "bbb"
|
||||
},
|
||||
{
|
||||
"doc_count": 1,
|
||||
"doc_count": 2,
|
||||
"key": "ccc"
|
||||
},
|
||||
{
|
||||
@@ -466,7 +469,7 @@ pub mod tests {
|
||||
"sales_over_time": {
|
||||
"buckets": [
|
||||
{
|
||||
"doc_count": 2,
|
||||
"doc_count": 3,
|
||||
"key": 1420070400000.0,
|
||||
"key_as_string": "2015-01-01T00:00:00Z"
|
||||
},
|
||||
@@ -491,7 +494,7 @@ pub mod tests {
|
||||
"key_as_string": "2015-01-05T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"doc_count": 1,
|
||||
"doc_count": 2,
|
||||
"key": 1420502400000.0,
|
||||
"key_as_string": "2015-01-06T00:00:00Z"
|
||||
}
|
||||
@@ -532,7 +535,7 @@ pub mod tests {
|
||||
"key_as_string": "2014-12-31T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"doc_count": 2,
|
||||
"doc_count": 3,
|
||||
"key": 1420070400000.0,
|
||||
"key_as_string": "2015-01-01T00:00:00Z"
|
||||
},
|
||||
@@ -557,7 +560,7 @@ pub mod tests {
|
||||
"key_as_string": "2015-01-05T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"doc_count": 1,
|
||||
"doc_count": 2,
|
||||
"key": 1420502400000.0,
|
||||
"key_as_string": "2015-01-06T00:00:00Z"
|
||||
},
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::Display;
|
||||
|
||||
use columnar::ColumnType;
|
||||
use itertools::Itertools;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tantivy_bitpacker::minmax;
|
||||
@@ -18,9 +15,9 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateHistogramBucketEntry,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector,
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, format_date};
|
||||
use crate::aggregation::*;
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
||||
@@ -73,6 +70,7 @@ pub struct HistogramAggregation {
|
||||
pub field: String,
|
||||
/// The interval to chunk your data range. Each bucket spans a value range of [0..interval).
|
||||
/// Must be a positive value.
|
||||
#[serde(deserialize_with = "deserialize_f64")]
|
||||
pub interval: f64,
|
||||
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
|
||||
/// 1))`.
|
||||
@@ -85,6 +83,7 @@ pub struct HistogramAggregation {
|
||||
/// fall into the buckets with the key 0 and 10.
|
||||
/// With offset 5 and interval 10, they would both fall into the bucket with they key 5 and the
|
||||
/// range [5..15)
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub offset: Option<f64>,
|
||||
/// The minimum number of documents in a bucket to be returned. Defaults to 0.
|
||||
pub min_doc_count: Option<u64>,
|
||||
@@ -308,7 +307,10 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &bucket_agg_accessor.accessor);
|
||||
|
||||
for (doc, val) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
|
||||
for (doc, val) in bucket_agg_accessor
|
||||
.column_block_accessor
|
||||
.iter_docid_vals(docs, &bucket_agg_accessor.accessor)
|
||||
{
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
|
||||
let bucket_pos = get_bucket_pos(val);
|
||||
@@ -329,9 +331,11 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
|
||||
}
|
||||
|
||||
let mem_delta = self.get_memory_consumption() - mem_pre;
|
||||
bucket_agg_accessor
|
||||
.limits
|
||||
.add_memory_consumed(mem_delta as u64)?;
|
||||
if mem_delta > 0 {
|
||||
bucket_agg_accessor
|
||||
.limits
|
||||
.add_memory_consumed(mem_delta as u64)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -595,11 +599,12 @@ mod tests {
|
||||
use serde_json::Value;
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::tests::{
|
||||
exec_request, exec_request_with_query, exec_request_with_query_and_memory_limit,
|
||||
get_test_index_2_segments, get_test_index_from_values, get_test_index_with_num_docs,
|
||||
};
|
||||
use crate::query::AllQuery;
|
||||
|
||||
#[test]
|
||||
fn histogram_test_crooked_values() -> crate::Result<()> {
|
||||
@@ -1351,6 +1356,35 @@ mod tests {
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn test_aggregation_histogram_empty_index() -> crate::Result<()> {
|
||||
// test index without segments
|
||||
let values = vec![];
|
||||
|
||||
let index = get_test_index_from_values(false, &values)?;
|
||||
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"myhisto": {
|
||||
"histogram": {
|
||||
"field": "score",
|
||||
"interval": 10.0
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, Default::default());
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
// Make sure the result structure is correct
|
||||
assert_eq!(res["myhisto"]["buckets"].as_array().unwrap().len(), 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ mod term_agg;
|
||||
mod term_missing_agg;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
|
||||
pub use histogram::*;
|
||||
pub use range::*;
|
||||
@@ -72,12 +73,12 @@ impl From<&str> for OrderTarget {
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for OrderTarget {
|
||||
fn to_string(&self) -> String {
|
||||
impl fmt::Display for OrderTarget {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
OrderTarget::Key => "_key".to_string(),
|
||||
OrderTarget::Count => "_count".to_string(),
|
||||
OrderTarget::SubAggregation(agg) => agg.to_string(),
|
||||
OrderTarget::Key => f.write_str("_key"),
|
||||
OrderTarget::Count => f.write_str("_count"),
|
||||
OrderTarget::SubAggregation(agg) => agg.fmt(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use columnar::{ColumnType, MonotonicallyMappableToU64};
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -14,9 +13,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{
|
||||
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
|
||||
};
|
||||
use crate::aggregation::*;
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Provide user-defined buckets to aggregate on.
|
||||
@@ -72,11 +69,19 @@ pub struct RangeAggregationRange {
|
||||
pub key: Option<String>,
|
||||
/// The from range value, which is inclusive in the range.
|
||||
/// `None` equals to an open ended interval.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(
|
||||
skip_serializing_if = "Option::is_none",
|
||||
default,
|
||||
deserialize_with = "deserialize_option_f64"
|
||||
)]
|
||||
pub from: Option<f64>,
|
||||
/// The to range value, which is not inclusive in the range.
|
||||
/// `None` equals to an open ended interval.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(
|
||||
skip_serializing_if = "Option::is_none",
|
||||
default,
|
||||
deserialize_with = "deserialize_option_f64"
|
||||
)]
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -230,7 +235,10 @@ impl SegmentAggregationCollector for SegmentRangeCollector {
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &bucket_agg_accessor.accessor);
|
||||
|
||||
for (doc, val) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
|
||||
for (doc, val) in bucket_agg_accessor
|
||||
.column_block_accessor
|
||||
.iter_docid_vals(docs, &bucket_agg_accessor.accessor)
|
||||
{
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
|
||||
let bucket = &mut self.buckets[bucket_pos];
|
||||
@@ -441,7 +449,6 @@ pub(crate) fn range_to_key(range: &Range<u64>, field_type: &ColumnType) -> crate
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::*;
|
||||
@@ -450,7 +457,6 @@ mod tests {
|
||||
exec_request, exec_request_with_query, get_test_index_2_segments,
|
||||
get_test_index_with_num_docs,
|
||||
};
|
||||
use crate::aggregation::AggregationLimits;
|
||||
|
||||
pub fn get_collector_from_ranges(
|
||||
ranges: Vec<RangeAggregationRange>,
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use std::fmt::Debug;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use columnar::{BytesColumn, ColumnType, MonotonicallyMappableToU64, StrColumn};
|
||||
use columnar::column_values::CompactSpaceU64Accessor;
|
||||
use columnar::{
|
||||
BytesColumn, ColumnType, MonotonicallyMappableToU128, MonotonicallyMappableToU64, StrColumn,
|
||||
};
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -99,23 +103,14 @@ pub struct TermsAggregation {
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub size: Option<u32>,
|
||||
|
||||
/// Unused by tantivy.
|
||||
///
|
||||
/// Since tantivy doesn't know shards, this parameter is merely there to be used by consumers
|
||||
/// of tantivy. shard_size is the number of terms returned by each shard.
|
||||
/// The default value in elasticsearch is size * 1.5 + 10.
|
||||
///
|
||||
/// Should never be smaller than size.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(alias = "shard_size")]
|
||||
pub split_size: Option<u32>,
|
||||
|
||||
/// The get more accurate results, we fetch more than `size` from each segment.
|
||||
/// To get more accurate results, we fetch more than `size` from each segment.
|
||||
///
|
||||
/// Increasing this value is will increase the cost for more accuracy.
|
||||
///
|
||||
/// Defaults to 10 * size.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(alias = "shard_size")]
|
||||
#[serde(alias = "split_size")]
|
||||
pub segment_size: Option<u32>,
|
||||
|
||||
/// If you set the `show_term_doc_count_error` parameter to true, the terms aggregation will
|
||||
@@ -256,7 +251,7 @@ pub struct SegmentTermCollector {
|
||||
term_buckets: TermBuckets,
|
||||
req: TermsAggregationInternal,
|
||||
blueprint: Option<Box<dyn SegmentAggregationCollector>>,
|
||||
field_type: ColumnType,
|
||||
column_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
}
|
||||
|
||||
@@ -315,7 +310,10 @@ impl SegmentAggregationCollector for SegmentTermCollector {
|
||||
}
|
||||
// has subagg
|
||||
if let Some(blueprint) = self.blueprint.as_ref() {
|
||||
for (doc, term_id) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
|
||||
for (doc, term_id) in bucket_agg_accessor
|
||||
.column_block_accessor
|
||||
.iter_docid_vals(docs, &bucket_agg_accessor.accessor)
|
||||
{
|
||||
let sub_aggregations = self
|
||||
.term_buckets
|
||||
.sub_aggs
|
||||
@@ -326,9 +324,11 @@ impl SegmentAggregationCollector for SegmentTermCollector {
|
||||
}
|
||||
|
||||
let mem_delta = self.get_memory_consumption() - mem_pre;
|
||||
bucket_agg_accessor
|
||||
.limits
|
||||
.add_memory_consumed(mem_delta as u64)?;
|
||||
if mem_delta > 0 {
|
||||
bucket_agg_accessor
|
||||
.limits
|
||||
.add_memory_consumed(mem_delta as u64)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -355,10 +355,9 @@ impl SegmentTermCollector {
|
||||
field_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
) -> crate::Result<Self> {
|
||||
if field_type == ColumnType::Bytes || field_type == ColumnType::Bool {
|
||||
if field_type == ColumnType::Bytes {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"terms aggregation is not supported for column type {:?}",
|
||||
field_type
|
||||
"terms aggregation is not supported for column type {field_type:?}"
|
||||
)));
|
||||
}
|
||||
let term_buckets = TermBuckets::default();
|
||||
@@ -389,7 +388,7 @@ impl SegmentTermCollector {
|
||||
req: TermsAggregationInternal::from_req(req),
|
||||
term_buckets,
|
||||
blueprint,
|
||||
field_type,
|
||||
column_type: field_type,
|
||||
accessor_idx,
|
||||
})
|
||||
}
|
||||
@@ -466,7 +465,7 @@ impl SegmentTermCollector {
|
||||
Ok(intermediate_entry)
|
||||
};
|
||||
|
||||
if self.field_type == ColumnType::Str {
|
||||
if self.column_type == ColumnType::Str {
|
||||
let term_dict = agg_with_accessor
|
||||
.str_dict_column
|
||||
.as_ref()
|
||||
@@ -531,28 +530,55 @@ impl SegmentTermCollector {
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if self.field_type == ColumnType::DateTime {
|
||||
} else if self.column_type == ColumnType::DateTime {
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val = i64::from_u64(val);
|
||||
let date = format_date(val)?;
|
||||
dict.insert(IntermediateKey::Str(date), intermediate_entry);
|
||||
}
|
||||
} else if self.column_type == ColumnType::Bool {
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val = bool::from_u64(val);
|
||||
dict.insert(IntermediateKey::Bool(val), intermediate_entry);
|
||||
}
|
||||
} else if self.column_type == ColumnType::IpAddr {
|
||||
let compact_space_accessor = agg_with_accessor
|
||||
.accessor
|
||||
.values
|
||||
.clone()
|
||||
.downcast_arc::<CompactSpaceU64Accessor>()
|
||||
.map_err(|_| {
|
||||
TantivyError::AggregationError(
|
||||
crate::aggregation::AggregationError::InternalError(
|
||||
"Type mismatch: Could not downcast to CompactSpaceU64Accessor"
|
||||
.to_string(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val: u128 = compact_space_accessor.compact_to_u128(val as u32);
|
||||
let val = Ipv6Addr::from_u128(val);
|
||||
dict.insert(IntermediateKey::IpAddr(val), intermediate_entry);
|
||||
}
|
||||
} else {
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
let val = f64_from_fastfield_u64(val, &self.column_type);
|
||||
dict.insert(IntermediateKey::F64(val), intermediate_entry);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(IntermediateBucketResult::Terms(
|
||||
IntermediateTermBucketResult {
|
||||
Ok(IntermediateBucketResult::Terms {
|
||||
buckets: IntermediateTermBucketResult {
|
||||
entries: dict,
|
||||
sum_other_doc_count,
|
||||
doc_count_error_upper_bound: term_doc_count_before_cutoff,
|
||||
},
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -590,6 +616,9 @@ pub(crate) fn cut_off_buckets<T: GetDocCount + Debug>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::net::IpAddr;
|
||||
use std::str::FromStr;
|
||||
|
||||
use common::DateTime;
|
||||
use time::{Date, Month};
|
||||
|
||||
@@ -600,8 +629,8 @@ mod tests {
|
||||
};
|
||||
use crate::aggregation::AggregationLimits;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::schema::{Schema, FAST, STRING};
|
||||
use crate::Index;
|
||||
use crate::schema::{IntoIpv6Addr, Schema, FAST, STRING};
|
||||
use crate::{Index, IndexWriter};
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_test_single_segment() -> crate::Result<()> {
|
||||
@@ -1182,9 +1211,9 @@ mod tests {
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "terma");
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 4);
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termc");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termb");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 0);
|
||||
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termb");
|
||||
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termc");
|
||||
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 0);
|
||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
||||
assert_eq!(res["my_texts"]["doc_count_error_upper_bound"], 0);
|
||||
@@ -1365,7 +1394,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_different_tokenizer_on_ff_test() -> crate::Result<()> {
|
||||
let terms = vec!["Hello Hello", "Hallo Hallo"];
|
||||
let terms = vec!["Hello Hello", "Hallo Hallo", "Hallo Hallo"];
|
||||
|
||||
let index = get_test_index_from_terms(true, &[terms])?;
|
||||
|
||||
@@ -1383,7 +1412,7 @@ mod tests {
|
||||
println!("{}", serde_json::to_string_pretty(&res).unwrap());
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "Hallo Hallo");
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 1);
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "Hello Hello");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 1);
|
||||
@@ -1473,7 +1502,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with empty json
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
@@ -1894,4 +1923,80 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_bool() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_bool_field("bool_field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut writer = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
writer.add_document(doc!(field=>true))?;
|
||||
writer.add_document(doc!(field=>false))?;
|
||||
writer.add_document(doc!(field=>true))?;
|
||||
writer.commit()?;
|
||||
}
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_bool": {
|
||||
"terms": {
|
||||
"field": "bool_field"
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["key"], 1.0);
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["key_as_string"], "true");
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["doc_count"], 2);
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["key"], 0.0);
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["key_as_string"], "false");
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["doc_count"], 1);
|
||||
assert_eq!(res["my_bool"]["buckets"][2]["key"], serde_json::Value::Null);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_ip_addr() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_ip_addr_field("ip_field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut writer = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
// IpV6 loopback
|
||||
writer.add_document(doc!(field=>IpAddr::from_str("::1").unwrap().into_ipv6_addr()))?;
|
||||
writer.add_document(doc!(field=>IpAddr::from_str("::1").unwrap().into_ipv6_addr()))?;
|
||||
// IpV4
|
||||
writer.add_document(
|
||||
doc!(field=>IpAddr::from_str("127.0.0.1").unwrap().into_ipv6_addr()),
|
||||
)?;
|
||||
writer.commit()?;
|
||||
}
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_bool": {
|
||||
"terms": {
|
||||
"field": "ip_field"
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
// print as json
|
||||
// println!("{}", serde_json::to_string_pretty(&res).unwrap());
|
||||
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["key"], "::1");
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["doc_count"], 2);
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["key"], "127.0.0.1");
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["doc_count"], 1);
|
||||
assert_eq!(res["my_bool"]["buckets"][2]["key"], serde_json::Value::Null);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,11 +73,13 @@ impl SegmentAggregationCollector for TermMissingAgg {
|
||||
|
||||
entries.insert(missing.into(), missing_entry);
|
||||
|
||||
let bucket = IntermediateBucketResult::Terms(IntermediateTermBucketResult {
|
||||
entries,
|
||||
sum_other_doc_count: 0,
|
||||
doc_count_error_upper_bound: 0,
|
||||
});
|
||||
let bucket = IntermediateBucketResult::Terms {
|
||||
buckets: IntermediateTermBucketResult {
|
||||
entries,
|
||||
sum_other_doc_count: 0,
|
||||
doc_count_error_upper_bound: 0,
|
||||
},
|
||||
};
|
||||
|
||||
results.push(name, IntermediateAggregationResult::Bucket(bucket))?;
|
||||
|
||||
@@ -90,7 +92,10 @@ impl SegmentAggregationCollector for TermMissingAgg {
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let agg = &mut agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
let has_value = agg.accessors.iter().any(|acc| acc.index.has_value(doc));
|
||||
let has_value = agg
|
||||
.accessors
|
||||
.iter()
|
||||
.any(|(acc, _)| acc.index.has_value(doc));
|
||||
if !has_value {
|
||||
self.missing_count += 1;
|
||||
if let Some(sub_agg) = self.sub_agg.as_mut() {
|
||||
@@ -117,7 +122,7 @@ mod tests {
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::tests::exec_request_with_query;
|
||||
use crate::schema::{Schema, FAST};
|
||||
use crate::Index;
|
||||
use crate::{Index, IndexWriter};
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_missing_mixed_type_mult_seg_sub_agg() -> crate::Result<()> {
|
||||
@@ -126,7 +131,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(score => 1.0, json => json!({"mixed_type": 10.0})))
|
||||
@@ -186,7 +191,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer.add_document(doc!(score => 1.0, json => json!({"mixed_type": 10.0})))?;
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
@@ -231,7 +236,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
index_writer.commit().unwrap();
|
||||
@@ -278,7 +283,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
@@ -323,7 +328,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
@@ -385,7 +390,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
@@ -427,7 +432,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
|
||||
@@ -8,7 +8,8 @@ use super::segment_agg_result::{
|
||||
};
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_segment_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::{DocId, SegmentReader, TantivyError};
|
||||
use crate::index::SegmentReader;
|
||||
use crate::{DocId, SegmentOrdinal, TantivyError};
|
||||
|
||||
/// The default max bucket count, before the aggregation fails.
|
||||
pub const DEFAULT_BUCKET_LIMIT: u32 = 65000;
|
||||
@@ -64,10 +65,15 @@ impl Collector for DistributedAggregationCollector {
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader, &self.limits)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
segment_local_id,
|
||||
&self.limits,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -89,10 +95,15 @@ impl Collector for AggregationCollector {
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader, &self.limits)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
segment_local_id,
|
||||
&self.limits,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -135,10 +146,11 @@ impl AggregationSegmentCollector {
|
||||
pub fn from_agg_req_and_reader(
|
||||
agg: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<Self> {
|
||||
let mut aggs_with_accessor =
|
||||
get_aggs_with_segment_accessor_and_validate(agg, reader, limits)?;
|
||||
get_aggs_with_segment_accessor_and_validate(agg, reader, segment_ordinal, limits)?;
|
||||
let result =
|
||||
BufAggregationCollector::new(build_segment_agg_collector(&mut aggs_with_accessor)?);
|
||||
Ok(AggregationSegmentCollector {
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::hash::Hash;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use columnar::ColumnType;
|
||||
use itertools::Itertools;
|
||||
@@ -18,8 +19,8 @@ use super::bucket::{
|
||||
GetDocCount, Order, OrderTarget, RangeAggregation, TermsAggregation,
|
||||
};
|
||||
use super::metric::{
|
||||
IntermediateAverage, IntermediateCount, IntermediateMax, IntermediateMin, IntermediateStats,
|
||||
IntermediateSum, PercentilesCollector,
|
||||
IntermediateAverage, IntermediateCount, IntermediateExtendedStats, IntermediateMax,
|
||||
IntermediateMin, IntermediateStats, IntermediateSum, PercentilesCollector, TopHitsTopNComputer,
|
||||
};
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::{format_date, AggregationError, Key, SerializedKey};
|
||||
@@ -41,6 +42,10 @@ pub struct IntermediateAggregationResults {
|
||||
/// This might seem redundant with `Key`, but the point is to have a different
|
||||
/// Serialize implementation.
|
||||
pub enum IntermediateKey {
|
||||
/// Ip Addr key
|
||||
IpAddr(Ipv6Addr),
|
||||
/// Bool key
|
||||
Bool(bool),
|
||||
/// String key
|
||||
Str(String),
|
||||
/// `f64` key
|
||||
@@ -58,7 +63,16 @@ impl From<IntermediateKey> for Key {
|
||||
fn from(value: IntermediateKey) -> Self {
|
||||
match value {
|
||||
IntermediateKey::Str(s) => Self::Str(s),
|
||||
IntermediateKey::IpAddr(s) => {
|
||||
// Prefer to use the IPv4 representation if possible
|
||||
if let Some(ip) = s.to_ipv4_mapped() {
|
||||
Self::Str(ip.to_string())
|
||||
} else {
|
||||
Self::Str(s.to_string())
|
||||
}
|
||||
}
|
||||
IntermediateKey::F64(f) => Self::F64(f),
|
||||
IntermediateKey::Bool(f) => Self::F64(f as u64 as f64),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -71,6 +85,8 @@ impl std::hash::Hash for IntermediateKey {
|
||||
match self {
|
||||
IntermediateKey::Str(text) => text.hash(state),
|
||||
IntermediateKey::F64(val) => val.to_bits().hash(state),
|
||||
IntermediateKey::Bool(val) => val.hash(state),
|
||||
IntermediateKey::IpAddr(val) => val.hash(state),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -166,9 +182,9 @@ impl IntermediateAggregationResults {
|
||||
pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult {
|
||||
use AggregationVariants::*;
|
||||
match req.agg {
|
||||
Terms(_) => IntermediateAggregationResult::Bucket(IntermediateBucketResult::Terms(
|
||||
Default::default(),
|
||||
)),
|
||||
Terms(_) => IntermediateAggregationResult::Bucket(IntermediateBucketResult::Terms {
|
||||
buckets: Default::default(),
|
||||
}),
|
||||
Range(_) => IntermediateAggregationResult::Bucket(IntermediateBucketResult::Range(
|
||||
Default::default(),
|
||||
)),
|
||||
@@ -199,12 +215,18 @@ pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult
|
||||
Stats(_) => IntermediateAggregationResult::Metric(IntermediateMetricResult::Stats(
|
||||
IntermediateStats::default(),
|
||||
)),
|
||||
ExtendedStats(_) => IntermediateAggregationResult::Metric(
|
||||
IntermediateMetricResult::ExtendedStats(IntermediateExtendedStats::default()),
|
||||
),
|
||||
Sum(_) => IntermediateAggregationResult::Metric(IntermediateMetricResult::Sum(
|
||||
IntermediateSum::default(),
|
||||
)),
|
||||
Percentiles(_) => IntermediateAggregationResult::Metric(
|
||||
IntermediateMetricResult::Percentiles(PercentilesCollector::default()),
|
||||
),
|
||||
TopHits(ref req) => IntermediateAggregationResult::Metric(
|
||||
IntermediateMetricResult::TopHits(TopHitsTopNComputer::new(req)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,8 +285,12 @@ pub enum IntermediateMetricResult {
|
||||
Min(IntermediateMin),
|
||||
/// Intermediate stats result.
|
||||
Stats(IntermediateStats),
|
||||
/// Intermediate stats result.
|
||||
ExtendedStats(IntermediateExtendedStats),
|
||||
/// Intermediate sum result.
|
||||
Sum(IntermediateSum),
|
||||
/// Intermediate top_hits result
|
||||
TopHits(TopHitsTopNComputer),
|
||||
}
|
||||
|
||||
impl IntermediateMetricResult {
|
||||
@@ -285,6 +311,9 @@ impl IntermediateMetricResult {
|
||||
IntermediateMetricResult::Stats(intermediate_stats) => {
|
||||
MetricResult::Stats(intermediate_stats.finalize())
|
||||
}
|
||||
IntermediateMetricResult::ExtendedStats(intermediate_stats) => {
|
||||
MetricResult::ExtendedStats(intermediate_stats.finalize())
|
||||
}
|
||||
IntermediateMetricResult::Sum(intermediate_sum) => {
|
||||
MetricResult::Sum(intermediate_sum.finalize().into())
|
||||
}
|
||||
@@ -292,9 +321,13 @@ impl IntermediateMetricResult {
|
||||
percentiles
|
||||
.into_final_result(req.agg.as_percentile().expect("unexpected metric type")),
|
||||
),
|
||||
IntermediateMetricResult::TopHits(top_hits) => {
|
||||
MetricResult::TopHits(top_hits.into_final_result())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this is our top-of-the-chain fruit merge mech
|
||||
fn merge_fruits(&mut self, other: IntermediateMetricResult) -> crate::Result<()> {
|
||||
match (self, other) {
|
||||
(
|
||||
@@ -321,6 +354,12 @@ impl IntermediateMetricResult {
|
||||
) => {
|
||||
stats_left.merge_fruits(stats_right);
|
||||
}
|
||||
(
|
||||
IntermediateMetricResult::ExtendedStats(extended_stats_left),
|
||||
IntermediateMetricResult::ExtendedStats(extended_stats_right),
|
||||
) => {
|
||||
extended_stats_left.merge_fruits(extended_stats_right);
|
||||
}
|
||||
(IntermediateMetricResult::Sum(sum_left), IntermediateMetricResult::Sum(sum_right)) => {
|
||||
sum_left.merge_fruits(sum_right);
|
||||
}
|
||||
@@ -330,6 +369,9 @@ impl IntermediateMetricResult {
|
||||
) => {
|
||||
left.merge_fruits(right)?;
|
||||
}
|
||||
(IntermediateMetricResult::TopHits(left), IntermediateMetricResult::TopHits(right)) => {
|
||||
left.merge_fruits(right)?;
|
||||
}
|
||||
_ => {
|
||||
panic!("incompatible fruit types in tree or missing merge_fruits handler");
|
||||
}
|
||||
@@ -351,11 +393,14 @@ pub enum IntermediateBucketResult {
|
||||
Histogram {
|
||||
/// The column_type of the underlying `Column` is DateTime
|
||||
is_date_agg: bool,
|
||||
/// The buckets
|
||||
/// The histogram buckets
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
},
|
||||
/// Term aggregation
|
||||
Terms(IntermediateTermBucketResult),
|
||||
Terms {
|
||||
/// The term buckets
|
||||
buckets: IntermediateTermBucketResult,
|
||||
},
|
||||
}
|
||||
|
||||
impl IntermediateBucketResult {
|
||||
@@ -432,7 +477,7 @@ impl IntermediateBucketResult {
|
||||
};
|
||||
Ok(BucketResult::Histogram { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
|
||||
IntermediateBucketResult::Terms { buckets: terms } => terms.into_final_result(
|
||||
req.agg
|
||||
.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
@@ -445,8 +490,12 @@ impl IntermediateBucketResult {
|
||||
fn merge_fruits(&mut self, other: IntermediateBucketResult) -> crate::Result<()> {
|
||||
match (self, other) {
|
||||
(
|
||||
IntermediateBucketResult::Terms(term_res_left),
|
||||
IntermediateBucketResult::Terms(term_res_right),
|
||||
IntermediateBucketResult::Terms {
|
||||
buckets: term_res_left,
|
||||
},
|
||||
IntermediateBucketResult::Terms {
|
||||
buckets: term_res_right,
|
||||
},
|
||||
) => {
|
||||
merge_maps(&mut term_res_left.entries, term_res_right.entries)?;
|
||||
term_res_left.sum_other_doc_count += term_res_right.sum_other_doc_count;
|
||||
@@ -530,8 +579,15 @@ impl IntermediateTermBucketResult {
|
||||
.into_iter()
|
||||
.filter(|bucket| bucket.1.doc_count as u64 >= req.min_doc_count)
|
||||
.map(|(key, entry)| {
|
||||
let key_as_string = match key {
|
||||
IntermediateKey::Bool(key) => {
|
||||
let val = if key { "true" } else { "false" };
|
||||
Some(val.to_string())
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
Ok(BucketEntry {
|
||||
key_as_string: None,
|
||||
key_as_string,
|
||||
key: key.into(),
|
||||
doc_count: entry.doc_count as u64,
|
||||
sub_aggregation: entry
|
||||
|
||||
@@ -2,7 +2,8 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
|
||||
/// A single-value metric aggregation that computes the average of numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -24,7 +25,7 @@ pub struct AverageAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default)]
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -65,3 +66,71 @@ impl IntermediateAverage {
|
||||
self.stats.finalize().avg
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn deserialization_with_missing_test1() {
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "10.0"
|
||||
}"#;
|
||||
let avg: AverageAggregation = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(avg.field, "score");
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
// no dot
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "10"
|
||||
}"#;
|
||||
let avg: AverageAggregation = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(avg.field, "score");
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
|
||||
// from value
|
||||
let avg: AverageAggregation = serde_json::from_value(json!({
|
||||
"field": "score_f64",
|
||||
"missing": 10u64,
|
||||
}))
|
||||
.unwrap();
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
// from value
|
||||
let avg: AverageAggregation = serde_json::from_value(json!({
|
||||
"field": "score_f64",
|
||||
"missing": 10u32,
|
||||
}))
|
||||
.unwrap();
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
let avg: AverageAggregation = serde_json::from_value(json!({
|
||||
"field": "score_f64",
|
||||
"missing": 10i8,
|
||||
}))
|
||||
.unwrap();
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialization_with_missing_test_fail() {
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "a"
|
||||
}"#;
|
||||
let avg: Result<AverageAggregation, _> = serde_json::from_str(json);
|
||||
assert!(avg.is_err());
|
||||
assert!(avg
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("Failed to parse f64 from string: \"a\""));
|
||||
|
||||
// Disallow NaN
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "NaN"
|
||||
}"#;
|
||||
let avg: Result<AverageAggregation, _> = serde_json::from_str(json);
|
||||
assert!(avg.is_err());
|
||||
assert!(avg.unwrap_err().to_string().contains("NaN"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,8 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
|
||||
/// A single-value metric aggregation that counts the number of values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -24,7 +25,7 @@ pub struct CountAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default)]
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
|
||||
1181
src/aggregation/metric/extended_stats.rs
Normal file
1181
src/aggregation/metric/extended_stats.rs
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user