mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 04:52:55 +00:00
Compare commits
93 Commits
index_writ
...
0.22.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
17d5869ad6 | ||
|
|
dfa3aed32d | ||
|
|
398817ce7b | ||
|
|
74940e9345 | ||
|
|
1e9fc51535 | ||
|
|
92c32979d2 | ||
|
|
b644d78a32 | ||
|
|
4e79e11007 | ||
|
|
67ebba3c3c | ||
|
|
7ce950f141 | ||
|
|
0cffe5fb09 | ||
|
|
b0e65560a1 | ||
|
|
ec37295b2f | ||
|
|
f6b0cc1aab | ||
|
|
7e41d31c6e | ||
|
|
40aa4abfe5 | ||
|
|
2650317622 | ||
|
|
6739357314 | ||
|
|
d57622d54b | ||
|
|
f745dbc054 | ||
|
|
79b041f81f | ||
|
|
0e16ed9ef7 | ||
|
|
88a3275dbb | ||
|
|
1223a87eb2 | ||
|
|
48630ceec9 | ||
|
|
72002e8a89 | ||
|
|
3c9297dd64 | ||
|
|
0e04ec3136 | ||
|
|
9b7f3a55cf | ||
|
|
1dacdb6c85 | ||
|
|
30483310ca | ||
|
|
e1d18b5114 | ||
|
|
108f30ba23 | ||
|
|
5943ee46bd | ||
|
|
f95a76293f | ||
|
|
014328e378 | ||
|
|
53f2fe1fbe | ||
|
|
9c75942aaf | ||
|
|
bff7c58497 | ||
|
|
9ebc5ed053 | ||
|
|
0b56c88e69 | ||
|
|
24841f0b2a | ||
|
|
1a9fc10be9 | ||
|
|
07573a7f19 | ||
|
|
daad2dc151 | ||
|
|
054f49dc31 | ||
|
|
47009ed2d3 | ||
|
|
0aae31d7d7 | ||
|
|
9caab45136 | ||
|
|
6d9a7b7eb0 | ||
|
|
7a2c5804b1 | ||
|
|
5319977171 | ||
|
|
828632e8c4 | ||
|
|
6b59ec6fd5 | ||
|
|
b60d862150 | ||
|
|
4837c7811a | ||
|
|
5a2397d57e | ||
|
|
927b4432c9 | ||
|
|
7a0064db1f | ||
|
|
2e7327205d | ||
|
|
7bc5bf78e2 | ||
|
|
ef603c8c7e | ||
|
|
28dd6b6546 | ||
|
|
1dda2bb537 | ||
|
|
bf6544cf28 | ||
|
|
ccecf946f7 | ||
|
|
19a859d6fd | ||
|
|
83af14caa4 | ||
|
|
4feeb2323d | ||
|
|
07bf66a197 | ||
|
|
0d4589219b | ||
|
|
c2b0469180 | ||
|
|
7e1980b218 | ||
|
|
ecb9a89a9f | ||
|
|
5e06e504e6 | ||
|
|
182f58cea6 | ||
|
|
337ffadefd | ||
|
|
22aa4daf19 | ||
|
|
493f9b2f2a | ||
|
|
e246e5765d | ||
|
|
6097235eff | ||
|
|
b700c42246 | ||
|
|
5b1bf1a993 | ||
|
|
041d4fced7 | ||
|
|
166fc15239 | ||
|
|
514a6e7fef | ||
|
|
82d9127191 | ||
|
|
03a1f40767 | ||
|
|
1c7c6fd591 | ||
|
|
b525f653c0 | ||
|
|
90586bc1e2 | ||
|
|
832f1633de | ||
|
|
38db53c465 |
2
.github/workflows/coverage.yml
vendored
2
.github/workflows/coverage.yml
vendored
@@ -3,8 +3,6 @@ name: Coverage
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
# Ensures that we cancel running jobs for the same PR / same workflow.
|
||||
concurrency:
|
||||
|
||||
7
.github/workflows/test.yml
vendored
7
.github/workflows/test.yml
vendored
@@ -39,6 +39,13 @@ jobs:
|
||||
|
||||
- name: Check Formatting
|
||||
run: cargo +nightly fmt --all -- --check
|
||||
|
||||
- name: Check Stable Compilation
|
||||
run: cargo build --all-features
|
||||
|
||||
|
||||
- name: Check Bench Compilation
|
||||
run: cargo +nightly bench --no-run --profile=dev --all-features
|
||||
|
||||
- uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
|
||||
68
CHANGELOG.md
68
CHANGELOG.md
@@ -1,3 +1,71 @@
|
||||
Tantivy 0.22
|
||||
================================
|
||||
|
||||
Tantivy 0.22 will be able to read indices created with Tantivy 0.21.
|
||||
|
||||
#### Bugfixes
|
||||
- Fix null byte handling in JSON paths (null bytes in json keys caused panic during indexing) [#2345](https://github.com/quickwit-oss/tantivy/pull/2345)(@PSeitz)
|
||||
- Fix bug that can cause `get_docids_for_value_range` to panic. [#2295](https://github.com/quickwit-oss/tantivy/pull/2295)(@fulmicoton)
|
||||
- Avoid 1 document indices by increase min memory to 15MB for indexing [#2176](https://github.com/quickwit-oss/tantivy/pull/2176)(@PSeitz)
|
||||
- Fix merge panic for JSON fields [#2284](https://github.com/quickwit-oss/tantivy/pull/2284)(@PSeitz)
|
||||
- Fix bug occuring when merging JSON object indexed with positions. [#2253](https://github.com/quickwit-oss/tantivy/pull/2253)(@fulmicoton)
|
||||
- Fix empty DateHistogram gap bug [#2183](https://github.com/quickwit-oss/tantivy/pull/2183)(@PSeitz)
|
||||
- Fix range query end check (fields with less than 1 value per doc are affected) [#2226](https://github.com/quickwit-oss/tantivy/pull/2226)(@PSeitz)
|
||||
- Handle exclusive out of bounds ranges on fastfield range queries [#2174](https://github.com/quickwit-oss/tantivy/pull/2174)(@PSeitz)
|
||||
|
||||
#### Breaking API Changes
|
||||
- rename ReloadPolicy onCommit to onCommitWithDelay [#2235](https://github.com/quickwit-oss/tantivy/pull/2235)(@giovannicuccu)
|
||||
- Move exports from the root into modules [#2220](https://github.com/quickwit-oss/tantivy/pull/2220)(@PSeitz)
|
||||
- Accept field name instead of `Field` in FilterCollector [#2196](https://github.com/quickwit-oss/tantivy/pull/2196)(@PSeitz)
|
||||
- remove deprecated IntOptions and DateTime [#2353](https://github.com/quickwit-oss/tantivy/pull/2353)(@PSeitz)
|
||||
|
||||
#### Features/Improvements
|
||||
- Tantivy documents as a trait: Index data directly without converting to tantivy types first [#2071](https://github.com/quickwit-oss/tantivy/pull/2071)(@ChillFish8)
|
||||
- encode some part of posting list as -1 instead of direct values (smaller inverted indices) [#2185](https://github.com/quickwit-oss/tantivy/pull/2185)(@trinity-1686a)
|
||||
- **Aggregation**
|
||||
- Support to deserialize f64 from string [#2311](https://github.com/quickwit-oss/tantivy/pull/2311)(@PSeitz)
|
||||
- Add a top_hits aggregator [#2198](https://github.com/quickwit-oss/tantivy/pull/2198)(@ditsuke)
|
||||
- Support bool type in term aggregation [#2318](https://github.com/quickwit-oss/tantivy/pull/2318)(@PSeitz)
|
||||
- Support ip adresses in term aggregation [#2319](https://github.com/quickwit-oss/tantivy/pull/2319)(@PSeitz)
|
||||
- Support date type in term aggregation [#2172](https://github.com/quickwit-oss/tantivy/pull/2172)(@PSeitz)
|
||||
- Support escaped dot when addressing field [#2250](https://github.com/quickwit-oss/tantivy/pull/2250)(@PSeitz)
|
||||
|
||||
- Add ExistsQuery to check documents that have a value [#2160](https://github.com/quickwit-oss/tantivy/pull/2160)(@imotov)
|
||||
- Expose TopDocs::order_by_u64_field again [#2282](https://github.com/quickwit-oss/tantivy/pull/2282)(@ditsuke)
|
||||
|
||||
- **Memory/Performance**
|
||||
- Faster TopN: replace BinaryHeap with TopNComputer [#2186](https://github.com/quickwit-oss/tantivy/pull/2186)(@PSeitz)
|
||||
- reduce number of allocations during indexing [#2257](https://github.com/quickwit-oss/tantivy/pull/2257)(@PSeitz)
|
||||
- Less Memory while indexing: docid deltas while indexing [#2249](https://github.com/quickwit-oss/tantivy/pull/2249)(@PSeitz)
|
||||
- Faster indexing: use term hashmap in fastfield [#2243](https://github.com/quickwit-oss/tantivy/pull/2243)(@PSeitz)
|
||||
- term hashmap remove copy in is_empty, unused unordered_id [#2229](https://github.com/quickwit-oss/tantivy/pull/2229)(@PSeitz)
|
||||
- add method to fetch block of first values in columnar [#2330](https://github.com/quickwit-oss/tantivy/pull/2330)(@PSeitz)
|
||||
- Faster aggregations: add fast path for full columns in fetch_block [#2328](https://github.com/quickwit-oss/tantivy/pull/2328)(@PSeitz)
|
||||
- Faster sstable loading: use fst for sstable index [#2268](https://github.com/quickwit-oss/tantivy/pull/2268)(@trinity-1686a)
|
||||
|
||||
- **QueryParser**
|
||||
- allow newline where we allow space in query parser [#2302](https://github.com/quickwit-oss/tantivy/pull/2302)(@trinity-1686a)
|
||||
- allow some mixing of occur and bool in strict query parser [#2323](https://github.com/quickwit-oss/tantivy/pull/2323)(@trinity-1686a)
|
||||
- handle * inside term in lenient query parser [#2228](https://github.com/quickwit-oss/tantivy/pull/2228)(@trinity-1686a)
|
||||
- add support for exists query syntax in query parser [#2170](https://github.com/quickwit-oss/tantivy/pull/2170)(@trinity-1686a)
|
||||
- Add shared search executor [#2312](https://github.com/quickwit-oss/tantivy/pull/2312)(@MochiXu)
|
||||
- Truncate keys to u16::MAX in term hashmap [#2299](https://github.com/quickwit-oss/tantivy/pull/2299)(@PSeitz)
|
||||
- report if a term matched when warming up posting list [#2309](https://github.com/quickwit-oss/tantivy/pull/2309)(@trinity-1686a)
|
||||
- Support json fields in FuzzyTermQuery [#2173](https://github.com/quickwit-oss/tantivy/pull/2173)(@PingXia-at)
|
||||
- Read list of fields encoded in term dictionary for JSON fields [#2184](https://github.com/quickwit-oss/tantivy/pull/2184)(@PSeitz)
|
||||
- add collect_block to BoxableSegmentCollector [#2331](https://github.com/quickwit-oss/tantivy/pull/2331)(@PSeitz)
|
||||
- expose collect_block buffer size [#2326](https://github.com/quickwit-oss/tantivy/pull/2326)(@PSeitz)
|
||||
- Forward regex parser errors [#2288](https://github.com/quickwit-oss/tantivy/pull/2288)(@adamreichold)
|
||||
- Make FacetCounts defaultable and cloneable. [#2322](https://github.com/quickwit-oss/tantivy/pull/2322)(@adamreichold)
|
||||
- Derive Debug for SchemaBuilder [#2254](https://github.com/quickwit-oss/tantivy/pull/2254)(@GodTamIt)
|
||||
- add missing inlines to tantivy options [#2245](https://github.com/quickwit-oss/tantivy/pull/2245)(@PSeitz)
|
||||
|
||||
Tantivy 0.21.1
|
||||
================================
|
||||
#### Bugfixes
|
||||
- Range queries on fast fields with less values on that field than documents had an invalid end condition, leading to missing results. [#2226](https://github.com/quickwit-oss/tantivy/issues/2226)(@appaquet @PSeitz)
|
||||
- Increase the minimum memory budget from 3MB to 15MB to avoid single doc segments (API fix). [#2176](https://github.com/quickwit-oss/tantivy/issues/2176)(@PSeitz)
|
||||
|
||||
Tantivy 0.21
|
||||
================================
|
||||
#### Bugfixes
|
||||
|
||||
52
Cargo.toml
52
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.21.0"
|
||||
version = "0.22.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -11,58 +11,57 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2021"
|
||||
rust-version = "1.62"
|
||||
rust-version = "1.63"
|
||||
exclude = ["benches/*.json", "benches/*.txt"]
|
||||
|
||||
[dependencies]
|
||||
oneshot = "0.1.5"
|
||||
base64 = "0.21.0"
|
||||
base64 = "0.22.0"
|
||||
byteorder = "1.4.3"
|
||||
crc32fast = "1.3.2"
|
||||
once_cell = "1.10.0"
|
||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||
aho-corasick = "1.0"
|
||||
tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.7.1", optional = true }
|
||||
tantivy-fst = "0.5"
|
||||
memmap2 = { version = "0.9.0", optional = true }
|
||||
lz4_flex = { version = "0.11", default-features = false, optional = true }
|
||||
zstd = { version = "0.12", optional = true, default-features = false }
|
||||
zstd = { version = "0.13", optional = true, default-features = false }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
log = "0.4.16"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = "1.0.79"
|
||||
num_cpus = "1.13.1"
|
||||
fs4 = { version = "0.6.3", optional = true }
|
||||
fs4 = { version = "0.8.0", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
rust-stemmers = "1.2.0"
|
||||
downcast-rs = "1.2.0"
|
||||
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
||||
census = "0.4.0"
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker4x"] }
|
||||
census = "0.4.2"
|
||||
rustc-hash = "1.1.0"
|
||||
thiserror = "1.0.30"
|
||||
htmlescape = "0.3.1"
|
||||
fail = { version = "0.5.0", optional = true }
|
||||
murmurhash32 = "0.3.0"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.11.0"
|
||||
lru = "0.12.0"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.11.0"
|
||||
itertools = "0.12.0"
|
||||
measure_time = "0.8.2"
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
columnar = { version= "0.2", path="./columnar", package ="tantivy-columnar" }
|
||||
sstable = { version= "0.2", path="./sstable", package ="tantivy-sstable", optional = true }
|
||||
stacker = { version= "0.2", path="./stacker", package ="tantivy-stacker" }
|
||||
query-grammar = { version= "0.21.0", path="./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version= "0.5", path="./bitpacker" }
|
||||
common = { version= "0.6", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version= "0.2", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
||||
columnar = { version= "0.3", path="./columnar", package ="tantivy-columnar" }
|
||||
sstable = { version= "0.3", path="./sstable", package ="tantivy-sstable", optional = true }
|
||||
stacker = { version= "0.3", path="./stacker", package ="tantivy-stacker" }
|
||||
query-grammar = { version= "0.22.0", path="./query-grammar", package = "tantivy-query-grammar" }
|
||||
tantivy-bitpacker = { version= "0.6", path="./bitpacker" }
|
||||
common = { version= "0.7", path = "./common/", package = "tantivy-common" }
|
||||
tokenizer-api = { version= "0.3", path="./tokenizer-api", package="tantivy-tokenizer-api" }
|
||||
sketches-ddsketch = { version = "0.2.1", features = ["use_serde"] }
|
||||
futures-util = { version = "0.3.28", optional = true }
|
||||
fnv = "1.0.7"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
@@ -74,15 +73,17 @@ matches = "0.1.9"
|
||||
pretty_assertions = "1.2.1"
|
||||
proptest = "1.0.0"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.10.0"
|
||||
futures = "0.3.21"
|
||||
paste = "1.0.11"
|
||||
more-asserts = "0.3.1"
|
||||
rand_distr = "0.4.3"
|
||||
time = { version = "0.3.10", features = ["serde-well-known", "macros"] }
|
||||
postcard = { version = "1.0.4", features = [
|
||||
"use-std",
|
||||
], default-features = false }
|
||||
|
||||
[target.'cfg(not(windows))'.dev-dependencies]
|
||||
criterion = "0.5"
|
||||
pprof = { git = "https://github.com/PSeitz/pprof-rs/", rev = "53af24b", features = ["flamegraph", "criterion"] } # temp fork that works with criterion 0.5
|
||||
criterion = { version = "0.5", default-features = false }
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.5.0"
|
||||
@@ -115,6 +116,11 @@ unstable = [] # useful for benches.
|
||||
|
||||
quickwit = ["sstable", "futures-util"]
|
||||
|
||||
# Compares only the hash of a string when indexing data.
|
||||
# Increases indexing speed, but may lead to extremely rare missing terms, when there's a hash collision.
|
||||
# Uses 64bit ahash.
|
||||
compare_hash_only = ["stacker/compare_hash_only"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar", "bitpacker", "common", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
|
||||
|
||||
|
||||
33
README.md
33
README.md
@@ -5,19 +5,18 @@
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://crates.io/crates/tantivy)
|
||||
|
||||

|
||||
<img src="https://tantivy-search.github.io/logo/tantivy-logo.png" alt="Tantivy, the fastest full-text search engine library written in Rust" height="250">
|
||||
|
||||
**Tantivy** is a **full-text search engine library** written in Rust.
|
||||
## Fast full-text search engine library written in Rust
|
||||
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used
|
||||
to build such a search engine.
|
||||
**If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our distributed search engine built on top of Tantivy.**
|
||||
|
||||
Tantivy is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used to build such a search engine.
|
||||
|
||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
|
||||
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
|
||||
|
||||
# Benchmark
|
||||
## Benchmark
|
||||
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) breakdowns
|
||||
performance for different types of queries/collections.
|
||||
@@ -28,7 +27,7 @@ Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
Details about the benchmark can be found at this [repository](https://github.com/quickwit-oss/search-benchmark-game).
|
||||
|
||||
# Features
|
||||
## Features
|
||||
|
||||
- Full-text search
|
||||
- Configurable tokenizer (stemming available for 17 Latin languages) with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||
@@ -54,11 +53,11 @@ Details about the benchmark can be found at this [repository](https://github.com
|
||||
- Searcher Warmer API
|
||||
- Cheesy logo with a horse
|
||||
|
||||
## Non-features
|
||||
### Non-features
|
||||
|
||||
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
|
||||
|
||||
# Getting started
|
||||
## Getting started
|
||||
|
||||
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
|
||||
|
||||
@@ -68,7 +67,7 @@ index documents, and search via the CLI or a small server with a REST API.
|
||||
It walks you through getting a Wikipedia search engine up and running in a few minutes.
|
||||
- [Reference doc for the last released version](https://docs.rs/tantivy/)
|
||||
|
||||
# How can I support this project?
|
||||
## How can I support this project?
|
||||
|
||||
There are many ways to support this project.
|
||||
|
||||
@@ -79,16 +78,16 @@ There are many ways to support this project.
|
||||
- Contribute code (you can join [our Discord server](https://discord.gg/MT27AG5EVE))
|
||||
- Talk about Tantivy around you
|
||||
|
||||
# Contributing code
|
||||
## Contributing code
|
||||
|
||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
Feel free to update CHANGELOG.md with your contribution.
|
||||
|
||||
## Tokenizer
|
||||
### Tokenizer
|
||||
|
||||
When implementing a tokenizer for tantivy depend on the `tantivy-tokenizer-api` crate.
|
||||
|
||||
## Clone and build locally
|
||||
### Clone and build locally
|
||||
|
||||
Tantivy compiles on stable Rust.
|
||||
To check out and run tests, you can simply run:
|
||||
@@ -99,7 +98,7 @@ cd tantivy
|
||||
cargo test
|
||||
```
|
||||
|
||||
# Companies Using Tantivy
|
||||
## Companies Using Tantivy
|
||||
|
||||
<p align="left">
|
||||
<img align="center" src="doc/assets/images/etsy.png" alt="Etsy" height="25" width="auto" />
|
||||
@@ -111,7 +110,7 @@ cargo test
|
||||
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
</p>
|
||||
|
||||
# FAQ
|
||||
## FAQ
|
||||
|
||||
### Can I use Tantivy in other languages?
|
||||
|
||||
|
||||
@@ -1,14 +1,99 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
|
||||
use pprof::criterion::{Output, PProfProfiler};
|
||||
use tantivy::schema::{FAST, INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::Index;
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, Bencher, Criterion, Throughput};
|
||||
use tantivy::schema::{TantivyDocument, FAST, INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::{tokenizer, Index, IndexWriter};
|
||||
|
||||
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
||||
const GH_LOGS: &str = include_str!("gh.json");
|
||||
const WIKI: &str = include_str!("wiki.json");
|
||||
|
||||
fn get_lines(input: &str) -> Vec<&str> {
|
||||
input.trim().split('\n').collect()
|
||||
fn benchmark(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
parse_json: bool,
|
||||
is_dynamic: bool,
|
||||
) {
|
||||
if is_dynamic {
|
||||
benchmark_dynamic_json(b, input, schema, commit, parse_json)
|
||||
} else {
|
||||
_benchmark(b, input, schema, commit, parse_json, |schema, doc_json| {
|
||||
TantivyDocument::parse_json(&schema, doc_json).unwrap()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn get_index(schema: tantivy::schema::Schema) -> Index {
|
||||
let mut index = Index::create_in_ram(schema.clone());
|
||||
let ff_tokenizer_manager = tokenizer::TokenizerManager::default();
|
||||
ff_tokenizer_manager.register(
|
||||
"raw",
|
||||
tokenizer::TextAnalyzer::builder(tokenizer::RawTokenizer::default())
|
||||
.filter(tokenizer::RemoveLongFilter::limit(255))
|
||||
.build(),
|
||||
);
|
||||
index.set_fast_field_tokenizers(ff_tokenizer_manager.clone());
|
||||
index
|
||||
}
|
||||
|
||||
fn _benchmark(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
include_json_parsing: bool,
|
||||
create_doc: impl Fn(&tantivy::schema::Schema, &str) -> TantivyDocument,
|
||||
) {
|
||||
if include_json_parsing {
|
||||
let lines: Vec<&str> = input.trim().split('\n').collect();
|
||||
b.iter(|| {
|
||||
let index = get_index(schema.clone());
|
||||
let mut index_writer: IndexWriter =
|
||||
index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = create_doc(&schema, doc_json);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
if commit {
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
})
|
||||
} else {
|
||||
let docs: Vec<_> = input
|
||||
.trim()
|
||||
.split('\n')
|
||||
.map(|doc_json| create_doc(&schema, doc_json))
|
||||
.collect();
|
||||
b.iter_batched(
|
||||
|| docs.clone(),
|
||||
|docs| {
|
||||
let index = get_index(schema.clone());
|
||||
let mut index_writer: IndexWriter =
|
||||
index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
if commit {
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
}
|
||||
}
|
||||
fn benchmark_dynamic_json(
|
||||
b: &mut Bencher,
|
||||
input: &str,
|
||||
schema: tantivy::schema::Schema,
|
||||
commit: bool,
|
||||
parse_json: bool,
|
||||
) {
|
||||
let json_field = schema.get_field("json").unwrap();
|
||||
_benchmark(b, input, schema, commit, parse_json, |_schema, doc_json| {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
tantivy::doc!(json_field=>json_val)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
@@ -19,7 +104,14 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
schema_builder.add_text_field("severity", STRING);
|
||||
schema_builder.build()
|
||||
};
|
||||
let schema_with_store = {
|
||||
let schema_only_fast = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", FAST);
|
||||
schema_builder.add_text_field("body", FAST);
|
||||
schema_builder.add_text_field("severity", FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
let _schema_with_store = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_u64_field("timestamp", INDEXED | STORED);
|
||||
schema_builder.add_text_field("body", TEXT | STORED);
|
||||
@@ -28,74 +120,39 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
};
|
||||
let dynamic_schema = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", TEXT);
|
||||
schema_builder.add_json_field("json", TEXT | FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group("index-hdfs");
|
||||
group.throughput(Throughput::Bytes(HDFS_LOGS.len() as u64));
|
||||
group.sample_size(20);
|
||||
group.bench_function("index-hdfs-no-commit", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
|
||||
let benches = [
|
||||
("only-indexed-".to_string(), schema, false),
|
||||
//("stored-".to_string(), _schema_with_store, false),
|
||||
("only-fast-".to_string(), schema_only_fast, false),
|
||||
("dynamic-".to_string(), dynamic_schema, true),
|
||||
];
|
||||
|
||||
for (prefix, schema, is_dynamic) in benches {
|
||||
for commit in [false, true] {
|
||||
let suffix = if commit { "with-commit" } else { "no-commit" };
|
||||
for parse_json in [false] {
|
||||
// for parse_json in [false, true] {
|
||||
let suffix = if parse_json {
|
||||
format!("{}-with-json-parsing", suffix)
|
||||
} else {
|
||||
format!("{}", suffix)
|
||||
};
|
||||
|
||||
let bench_name = format!("{}{}", prefix, suffix);
|
||||
group.bench_function(bench_name, |b| {
|
||||
benchmark(b, HDFS_LOGS, schema.clone(), commit, parse_json, is_dynamic)
|
||||
});
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-with-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit-with-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
||||
let lines = get_lines(HDFS_LOGS);
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gh_index_benchmark(c: &mut Criterion) {
|
||||
@@ -104,38 +161,24 @@ pub fn gh_index_benchmark(c: &mut Criterion) {
|
||||
schema_builder.add_json_field("json", TEXT | FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
let dynamic_schema_fast = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group("index-gh");
|
||||
group.throughput(Throughput::Bytes(GH_LOGS.len() as u64));
|
||||
|
||||
group.bench_function("index-gh-no-commit", |b| {
|
||||
let lines = get_lines(GH_LOGS);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema.clone(), false, false)
|
||||
});
|
||||
group.bench_function("index-gh-with-commit", |b| {
|
||||
let lines = get_lines(GH_LOGS);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
group.bench_function("index-gh-fast", |b| {
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema_fast.clone(), false, false)
|
||||
});
|
||||
|
||||
group.bench_function("index-gh-fast-with-commit", |b| {
|
||||
benchmark_dynamic_json(b, GH_LOGS, dynamic_schema_fast.clone(), true, false)
|
||||
});
|
||||
}
|
||||
|
||||
@@ -150,33 +193,10 @@ pub fn wiki_index_benchmark(c: &mut Criterion) {
|
||||
group.throughput(Throughput::Bytes(WIKI.len() as u64));
|
||||
|
||||
group.bench_function("index-wiki-no-commit", |b| {
|
||||
let lines = get_lines(WIKI);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
})
|
||||
benchmark_dynamic_json(b, WIKI, dynamic_schema.clone(), false, false)
|
||||
});
|
||||
group.bench_function("index-wiki-with-commit", |b| {
|
||||
let lines = get_lines(WIKI);
|
||||
b.iter(|| {
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in &lines {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
benchmark_dynamic_json(b, WIKI, dynamic_schema.clone(), true, false)
|
||||
});
|
||||
}
|
||||
|
||||
@@ -187,12 +207,12 @@ criterion_group! {
|
||||
}
|
||||
criterion_group! {
|
||||
name = gh_benches;
|
||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||
config = Criterion::default();
|
||||
targets = gh_index_benchmark
|
||||
}
|
||||
criterion_group! {
|
||||
name = wiki_benches;
|
||||
config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
|
||||
config = Criterion::default();
|
||||
targets = wiki_index_benchmark
|
||||
}
|
||||
criterion_main!(benches, gh_benches, wiki_benches);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
edition = "2021"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
@@ -15,7 +15,7 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bitpacking = {version="0.8", default-features=false, features = ["bitpacker1x"]}
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker1x"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
|
||||
@@ -367,7 +366,7 @@ mod test {
|
||||
let mut output: Vec<u32> = Vec::new();
|
||||
for len in [0, 1, 2, 32, 33, 34, 64] {
|
||||
for start_idx in 0u32..32u32 {
|
||||
output.resize(len as usize, 0);
|
||||
output.resize(len, 0);
|
||||
bitunpacker.get_batch_u32s(start_idx, &buffer, &mut output);
|
||||
for i in 0..len {
|
||||
let expected = (start_idx + i as u32) & mask;
|
||||
|
||||
83
cliff.toml
83
cliff.toml
@@ -1,6 +1,10 @@
|
||||
# configuration file for git-cliff{ pattern = "foo", replace = "bar"}
|
||||
# see https://github.com/orhun/git-cliff#configuration-file
|
||||
|
||||
[remote.github]
|
||||
owner = "quickwit-oss"
|
||||
repo = "tantivy"
|
||||
|
||||
[changelog]
|
||||
# changelog header
|
||||
header = """
|
||||
@@ -8,15 +12,43 @@ header = """
|
||||
# template for the changelog body
|
||||
# https://tera.netlify.app/docs/#introduction
|
||||
body = """
|
||||
{% if version %}\
|
||||
{{ version | trim_start_matches(pat="v") }} ({{ timestamp | date(format="%Y-%m-%d") }})
|
||||
==================
|
||||
{% else %}\
|
||||
## [unreleased]
|
||||
{% endif %}\
|
||||
## What's Changed
|
||||
|
||||
{%- if version %} in {{ version }}{%- endif -%}
|
||||
{% for commit in commits %}
|
||||
- {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | split(pat="\n") | first | trim | upper_first }}(@{{ commit.author.name }})\
|
||||
{% endfor %}
|
||||
{% if commit.github.pr_title -%}
|
||||
{%- set commit_message = commit.github.pr_title -%}
|
||||
{%- else -%}
|
||||
{%- set commit_message = commit.message -%}
|
||||
{%- endif -%}
|
||||
- {{ commit_message | split(pat="\n") | first | trim }}\
|
||||
{% if commit.github.pr_number %} \
|
||||
[#{{ commit.github.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.github.pr_number }}){% if commit.github.username %}(@{{ commit.github.username }}){%- endif -%} \
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
|
||||
{% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
|
||||
{% raw %}\n{% endraw -%}
|
||||
## New Contributors
|
||||
{%- endif %}\
|
||||
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
|
||||
* @{{ contributor.username }} made their first contribution
|
||||
{%- if contributor.pr_number %} in \
|
||||
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
|
||||
{% if version %}
|
||||
{% if previous.version %}
|
||||
**Full Changelog**: {{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}
|
||||
{% endif %}
|
||||
{% else -%}
|
||||
{% raw %}\n{% endraw %}
|
||||
{% endif %}
|
||||
|
||||
{%- macro remote_url() -%}
|
||||
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||
{%- endmacro -%}
|
||||
"""
|
||||
# remove the leading and trailing whitespace from the template
|
||||
trim = true
|
||||
@@ -25,53 +57,24 @@ footer = """
|
||||
"""
|
||||
|
||||
postprocessors = [
|
||||
{ pattern = 'Paul Masurel', replace = "fulmicoton"}, # replace with github user
|
||||
{ pattern = 'PSeitz', replace = "PSeitz"}, # replace with github user
|
||||
{ pattern = 'Adam Reichold', replace = "adamreichold"}, # replace with github user
|
||||
{ pattern = 'trinity-1686a', replace = "trinity-1686a"}, # replace with github user
|
||||
{ pattern = 'Michael Kleen', replace = "mkleen"}, # replace with github user
|
||||
{ pattern = 'Adrien Guillo', replace = "guilload"}, # replace with github user
|
||||
{ pattern = 'François Massot', replace = "fmassot"}, # replace with github user
|
||||
{ pattern = 'Naveen Aiathurai', replace = "naveenann"}, # replace with github user
|
||||
{ pattern = '', replace = ""}, # replace with github user
|
||||
]
|
||||
|
||||
[git]
|
||||
# parse the commits based on https://www.conventionalcommits.org
|
||||
# This is required or commit.message contains the whole commit message and not just the title
|
||||
conventional_commits = true
|
||||
conventional_commits = false
|
||||
# filter out the commits that are not conventional
|
||||
filter_unconventional = false
|
||||
filter_unconventional = true
|
||||
# process each line of a commit as an individual commit
|
||||
split_commits = false
|
||||
# regex for preprocessing the commit messages
|
||||
commit_preprocessors = [
|
||||
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "[#${2}](https://github.com/quickwit-oss/tantivy/issues/${2})"}, # replace issue numbers
|
||||
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = ""},
|
||||
]
|
||||
#link_parsers = [
|
||||
#{ pattern = "#(\\d+)", href = "https://github.com/quickwit-oss/tantivy/pulls/$1"},
|
||||
#]
|
||||
# regex for parsing and grouping commits
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "Features"},
|
||||
{ message = "^fix", group = "Bug Fixes"},
|
||||
{ message = "^doc", group = "Documentation"},
|
||||
{ message = "^perf", group = "Performance"},
|
||||
{ message = "^refactor", group = "Refactor"},
|
||||
{ message = "^style", group = "Styling"},
|
||||
{ message = "^test", group = "Testing"},
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true},
|
||||
{ message = "(?i)clippy", skip = true},
|
||||
{ message = "(?i)dependabot", skip = true},
|
||||
{ message = "(?i)fmt", skip = true},
|
||||
{ message = "(?i)bump", skip = true},
|
||||
{ message = "(?i)readme", skip = true},
|
||||
{ message = "(?i)comment", skip = true},
|
||||
{ message = "(?i)spelling", skip = true},
|
||||
{ message = "^chore", group = "Miscellaneous Tasks"},
|
||||
{ body = ".*security", group = "Security"},
|
||||
{ message = ".*", group = "Other", default_scope = "other"},
|
||||
]
|
||||
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
||||
protect_breaking_commits = false
|
||||
# filter out the commits that are not matched by commit parsers
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-columnar"
|
||||
version = "0.2.0"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
@@ -9,15 +9,15 @@ description = "column oriented storage for tantivy"
|
||||
categories = ["database-implementations", "data-structures", "compression"]
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.11.0"
|
||||
fnv = "1.0.7"
|
||||
itertools = "0.12.0"
|
||||
fastdivide = "0.4.0"
|
||||
|
||||
stacker = { version= "0.2", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.2", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.6", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.5", path = "../bitpacker/" }
|
||||
stacker = { version= "0.3", path = "../stacker", package="tantivy-stacker"}
|
||||
sstable = { version= "0.3", path = "../sstable", package = "tantivy-sstable" }
|
||||
common = { version= "0.7", path = "../common", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version= "0.6", path = "../bitpacker/" }
|
||||
serde = "1.0.152"
|
||||
downcast-rs = "1.2.0"
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
|
||||
155
columnar/benches/bench_first_vals.rs
Normal file
155
columnar/benches/bench_first_vals.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand::prelude::*;
|
||||
use tantivy_columnar::column_values::{serialize_and_load_u64_based_column_values, CodecType};
|
||||
use tantivy_columnar::*;
|
||||
use test::{black_box, Bencher};
|
||||
|
||||
struct Columns {
|
||||
pub optional: Column,
|
||||
pub full: Column,
|
||||
pub multi: Column,
|
||||
}
|
||||
|
||||
fn get_test_columns() -> Columns {
|
||||
let data = generate_permutation();
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
for (idx, val) in data.iter().enumerate() {
|
||||
dataframe_writer.record_numerical(idx as u32, "full_values", NumericalValue::U64(*val));
|
||||
if idx % 2 == 0 {
|
||||
dataframe_writer.record_numerical(
|
||||
idx as u32,
|
||||
"optional_values",
|
||||
NumericalValue::U64(*val),
|
||||
);
|
||||
}
|
||||
dataframe_writer.record_numerical(idx as u32, "multi_values", NumericalValue::U64(*val));
|
||||
dataframe_writer.record_numerical(idx as u32, "multi_values", NumericalValue::U64(*val));
|
||||
}
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(data.len() as u32, None, &mut buffer)
|
||||
.unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("optional_values").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
let optional = cols[0].open_u64_lenient().unwrap().unwrap();
|
||||
assert_eq!(optional.index.get_cardinality(), Cardinality::Optional);
|
||||
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("full_values").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
let column_full = cols[0].open_u64_lenient().unwrap().unwrap();
|
||||
assert_eq!(column_full.index.get_cardinality(), Cardinality::Full);
|
||||
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("multi_values").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
let multi = cols[0].open_u64_lenient().unwrap().unwrap();
|
||||
assert_eq!(multi.index.get_cardinality(), Cardinality::Multivalued);
|
||||
|
||||
Columns {
|
||||
optional,
|
||||
full: column_full,
|
||||
multi,
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_VALUES: u64 = 100_000;
|
||||
fn generate_permutation() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..NUM_VALUES).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
pub fn serialize_and_load(column: &[u64], codec_type: CodecType) -> Arc<dyn ColumnValues<u64>> {
|
||||
serialize_and_load_u64_based_column_values(&column, &[codec_type])
|
||||
}
|
||||
|
||||
fn run_bench_on_column_full_scan(b: &mut Bencher, column: Column) {
|
||||
let num_iter = black_box(NUM_VALUES);
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for i in 0..num_iter as u32 {
|
||||
let val = column.first(i);
|
||||
sum += val.unwrap_or(0);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
fn run_bench_on_column_block_fetch(b: &mut Bencher, column: Column) {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
b.iter(move || {
|
||||
column.first_vals(&fetch_docids, &mut block);
|
||||
block[0]
|
||||
});
|
||||
}
|
||||
fn run_bench_on_column_block_single_calls(b: &mut Bencher, column: Column) {
|
||||
let mut block: Vec<Option<u64>> = vec![None; 64];
|
||||
let fetch_docids = (0..64).collect::<Vec<_>>();
|
||||
b.iter(move || {
|
||||
for i in 0..fetch_docids.len() {
|
||||
block[i] = column.first(fetch_docids[i]);
|
||||
}
|
||||
block[0]
|
||||
});
|
||||
}
|
||||
|
||||
/// Column first method
|
||||
#[bench]
|
||||
fn bench_get_first_on_full_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_first_on_optional_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_first_on_multi_column_full_scan(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_full_scan(b, column);
|
||||
}
|
||||
|
||||
/// Block fetch column accessor
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_optional_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_multi_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_full_column(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_block_fetch(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_optional_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().optional;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_multi_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().multi;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_get_block_first_on_full_column_single_calls(b: &mut Bencher) {
|
||||
let column = get_test_columns().full;
|
||||
run_bench_on_column_block_single_calls(b, column);
|
||||
}
|
||||
@@ -16,14 +16,6 @@ fn generate_permutation() -> Vec<u64> {
|
||||
permutation
|
||||
}
|
||||
|
||||
fn generate_random() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||
.map(|el| el + random::<u16>() as u64)
|
||||
.collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation_gcd() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
|
||||
@@ -8,7 +8,6 @@ license = "MIT"
|
||||
columnar = {path="../", package="tantivy-columnar"}
|
||||
serde_json = "1"
|
||||
serde_json_borrow = {git="https://github.com/PSeitz/serde_json_borrow/"}
|
||||
serde = "1"
|
||||
|
||||
[workspace]
|
||||
members = []
|
||||
|
||||
@@ -14,20 +14,32 @@ impl<T: PartialOrd + Copy + std::fmt::Debug + Send + Sync + 'static + Default>
|
||||
ColumnBlockAccessor<T>
|
||||
{
|
||||
#[inline]
|
||||
pub fn fetch_block(&mut self, docs: &[u32], accessor: &Column<T>) {
|
||||
self.docid_cache.clear();
|
||||
self.row_id_cache.clear();
|
||||
accessor.row_ids_for_docs(docs, &mut self.docid_cache, &mut self.row_id_cache);
|
||||
self.val_cache.resize(self.row_id_cache.len(), T::default());
|
||||
accessor
|
||||
.values
|
||||
.get_vals(&self.row_id_cache, &mut self.val_cache);
|
||||
pub fn fetch_block<'a>(&'a mut self, docs: &'a [u32], accessor: &Column<T>) {
|
||||
if accessor.index.get_cardinality().is_full() {
|
||||
self.val_cache.resize(docs.len(), T::default());
|
||||
accessor.values.get_vals(docs, &mut self.val_cache);
|
||||
} else {
|
||||
self.docid_cache.clear();
|
||||
self.row_id_cache.clear();
|
||||
accessor.row_ids_for_docs(docs, &mut self.docid_cache, &mut self.row_id_cache);
|
||||
self.val_cache.resize(self.row_id_cache.len(), T::default());
|
||||
accessor
|
||||
.values
|
||||
.get_vals(&self.row_id_cache, &mut self.val_cache);
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub fn fetch_block_with_missing(&mut self, docs: &[u32], accessor: &Column<T>, missing: T) {
|
||||
self.fetch_block(docs, accessor);
|
||||
// We can compare docid_cache with docs to find missing docs
|
||||
if docs.len() != self.docid_cache.len() || accessor.index.is_multivalue() {
|
||||
// no missing values
|
||||
if accessor.index.get_cardinality().is_full() {
|
||||
return;
|
||||
}
|
||||
|
||||
// We can compare docid_cache length with docs to find missing docs
|
||||
// For multi value columns we can't rely on the length and always need to scan
|
||||
if accessor.index.get_cardinality().is_multivalue() || docs.len() != self.docid_cache.len()
|
||||
{
|
||||
self.missing_docids_cache.clear();
|
||||
find_missing_docs(docs, &self.docid_cache, |doc| {
|
||||
self.missing_docids_cache.push(doc);
|
||||
@@ -44,11 +56,25 @@ impl<T: PartialOrd + Copy + std::fmt::Debug + Send + Sync + 'static + Default>
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn iter_docid_vals(&self) -> impl Iterator<Item = (DocId, T)> + '_ {
|
||||
self.docid_cache
|
||||
.iter()
|
||||
.cloned()
|
||||
.zip(self.val_cache.iter().cloned())
|
||||
/// Returns an iterator over the docids and values
|
||||
/// The passed in `docs` slice needs to be the same slice that was passed to `fetch_block` or
|
||||
/// `fetch_block_with_missing`.
|
||||
///
|
||||
/// The docs is used if the column is full (each docs has exactly one value), otherwise the
|
||||
/// internal docid vec is used for the iterator, which e.g. may contain duplicate docs.
|
||||
pub fn iter_docid_vals<'a>(
|
||||
&'a self,
|
||||
docs: &'a [u32],
|
||||
accessor: &Column<T>,
|
||||
) -> impl Iterator<Item = (DocId, T)> + '_ {
|
||||
if accessor.index.get_cardinality().is_full() {
|
||||
docs.iter().cloned().zip(self.val_cache.iter().cloned())
|
||||
} else {
|
||||
self.docid_cache
|
||||
.iter()
|
||||
.cloned()
|
||||
.zip(self.val_cache.iter().cloned())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,17 +3,17 @@ mod serialize;
|
||||
|
||||
use std::fmt::{self, Debug};
|
||||
use std::io::Write;
|
||||
use std::ops::{Deref, Range, RangeInclusive};
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
pub use dictionary_encoded::{BytesColumn, StrColumn};
|
||||
pub use serialize::{
|
||||
open_column_bytes, open_column_str, open_column_u128, open_column_u64,
|
||||
serialize_column_mappable_to_u128, serialize_column_mappable_to_u64,
|
||||
open_column_bytes, open_column_str, open_column_u128, open_column_u128_as_compact_u64,
|
||||
open_column_u64, serialize_column_mappable_to_u128, serialize_column_mappable_to_u64,
|
||||
};
|
||||
|
||||
use crate::column_index::ColumnIndex;
|
||||
use crate::column_index::{ColumnIndex, Set};
|
||||
use crate::column_values::monotonic_mapping::StrictlyMonotonicMappingToInternal;
|
||||
use crate::column_values::{monotonic_map_column, ColumnValues};
|
||||
use crate::{Cardinality, DocId, EmptyColumnValues, MonotonicallyMappableToU64, RowId};
|
||||
@@ -83,10 +83,36 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
self.values.max_value()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn first(&self, row_id: RowId) -> Option<T> {
|
||||
self.values_for_doc(row_id).next()
|
||||
}
|
||||
|
||||
/// Load the first value for each docid in the provided slice.
|
||||
#[inline]
|
||||
pub fn first_vals(&self, docids: &[DocId], output: &mut [Option<T>]) {
|
||||
match &self.index {
|
||||
ColumnIndex::Empty { .. } => {}
|
||||
ColumnIndex::Full => self.values.get_vals_opt(docids, output),
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
for (i, docid) in docids.iter().enumerate() {
|
||||
output[i] = optional_index
|
||||
.rank_if_exists(*docid)
|
||||
.map(|rowid| self.values.get_val(rowid));
|
||||
}
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
for (i, docid) in docids.iter().enumerate() {
|
||||
let range = multivalued_index.range(*docid);
|
||||
let is_empty = range.start == range.end;
|
||||
if !is_empty {
|
||||
output[i] = Some(self.values.get_val(range.start));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Translates a block of docis to row_ids.
|
||||
///
|
||||
/// returns the row_ids and the matching docids on the same index
|
||||
@@ -105,7 +131,8 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
}
|
||||
|
||||
pub fn values_for_doc(&self, doc_id: DocId) -> impl Iterator<Item = T> + '_ {
|
||||
self.value_row_ids(doc_id)
|
||||
self.index
|
||||
.value_row_ids(doc_id)
|
||||
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
|
||||
}
|
||||
|
||||
@@ -147,14 +174,6 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for Column<T> {
|
||||
type Target = ColumnIndex;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.index
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Cardinality {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
self.to_code().serialize(writer)
|
||||
@@ -176,6 +195,7 @@ struct FirstValueWithDefault<T: Copy> {
|
||||
impl<T: PartialOrd + Debug + Send + Sync + Copy + 'static> ColumnValues<T>
|
||||
for FirstValueWithDefault<T>
|
||||
{
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
self.column.first(idx).unwrap_or(self.default_value)
|
||||
}
|
||||
|
||||
@@ -76,6 +76,26 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
|
||||
})
|
||||
}
|
||||
|
||||
/// Open the column as u64.
|
||||
///
|
||||
/// See [`open_u128_as_compact_u64`] for more details.
|
||||
pub fn open_column_u128_as_compact_u64(bytes: OwnedBytes) -> io::Result<Column<u64>> {
|
||||
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
|
||||
let column_index_num_bytes = u32::from_le_bytes(
|
||||
column_index_num_bytes_payload
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
|
||||
let column_index = crate::column_index::open_column_index(column_index_data)?;
|
||||
let column_values = crate::column_values::open_u128_as_compact_u64(column_values_data)?;
|
||||
Ok(Column {
|
||||
index: column_index,
|
||||
values: column_values,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn open_column_bytes(data: OwnedBytes) -> io::Result<BytesColumn> {
|
||||
let (body, dictionary_len_bytes) = data.rsplit(4);
|
||||
let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap());
|
||||
|
||||
@@ -140,7 +140,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_merge_column_index_optional_shuffle() {
|
||||
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
|
||||
let column_indexes = vec![optional_index, ColumnIndex::Full];
|
||||
let column_indexes = [optional_index, ColumnIndex::Full];
|
||||
let row_addrs = vec![
|
||||
RowAddr {
|
||||
segment_ord: 0u32,
|
||||
|
||||
@@ -111,10 +111,7 @@ fn stack_multivalued_indexes<'a>(
|
||||
let mut last_row_id = 0;
|
||||
let mut current_it = multivalued_indexes.next();
|
||||
Box::new(std::iter::from_fn(move || loop {
|
||||
let Some(multivalued_index) = current_it.as_mut() else {
|
||||
return None;
|
||||
};
|
||||
if let Some(row_id) = multivalued_index.next() {
|
||||
if let Some(row_id) = current_it.as_mut()?.next() {
|
||||
last_row_id = offset + row_id;
|
||||
return Some(last_row_id);
|
||||
}
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
//! # `column_index`
|
||||
//!
|
||||
//! `column_index` provides rank and select operations to associate positions when not all
|
||||
//! documents have exactly one element.
|
||||
|
||||
mod merge;
|
||||
mod multivalued_index;
|
||||
mod optional_index;
|
||||
@@ -37,14 +42,10 @@ impl From<MultiValueIndex> for ColumnIndex {
|
||||
}
|
||||
|
||||
impl ColumnIndex {
|
||||
#[inline]
|
||||
pub fn is_multivalue(&self) -> bool {
|
||||
matches!(self, ColumnIndex::Multivalued(_))
|
||||
}
|
||||
// Returns the cardinality of the column index.
|
||||
//
|
||||
// By convention, if the column contains no docs, we consider that it is
|
||||
// full.
|
||||
/// Returns the cardinality of the column index.
|
||||
///
|
||||
/// By convention, if the column contains no docs, we consider that it is
|
||||
/// full.
|
||||
#[inline]
|
||||
pub fn get_cardinality(&self) -> Cardinality {
|
||||
match self {
|
||||
@@ -121,18 +122,18 @@ impl ColumnIndex {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn docid_range_to_rowids(&self, doc_id: Range<DocId>) -> Range<RowId> {
|
||||
pub fn docid_range_to_rowids(&self, doc_id_range: Range<DocId>) -> Range<RowId> {
|
||||
match self {
|
||||
ColumnIndex::Empty { .. } => 0..0,
|
||||
ColumnIndex::Full => doc_id,
|
||||
ColumnIndex::Full => doc_id_range,
|
||||
ColumnIndex::Optional(optional_index) => {
|
||||
let row_start = optional_index.rank(doc_id.start);
|
||||
let row_end = optional_index.rank(doc_id.end);
|
||||
let row_start = optional_index.rank(doc_id_range.start);
|
||||
let row_end = optional_index.rank(doc_id_range.end);
|
||||
row_start..row_end
|
||||
}
|
||||
ColumnIndex::Multivalued(multivalued_index) => {
|
||||
let end_docid = doc_id.end.min(multivalued_index.num_docs() - 1) + 1;
|
||||
let start_docid = doc_id.start.min(end_docid);
|
||||
let end_docid = doc_id_range.end.min(multivalued_index.num_docs() - 1) + 1;
|
||||
let start_docid = doc_id_range.start.min(end_docid);
|
||||
|
||||
let row_start = multivalued_index.start_index_column.get_val(start_docid);
|
||||
let row_end = multivalued_index.start_index_column.get_val(end_docid);
|
||||
|
||||
@@ -21,8 +21,6 @@ const DENSE_BLOCK_THRESHOLD: u32 =
|
||||
|
||||
const ELEMENTS_PER_BLOCK: u32 = u16::MAX as u32 + 1;
|
||||
|
||||
const BLOCK_SIZE: RowId = 1 << 16;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct BlockMeta {
|
||||
non_null_rows_before_block: u32,
|
||||
@@ -109,8 +107,8 @@ struct RowAddr {
|
||||
#[inline(always)]
|
||||
fn row_addr_from_row_id(row_id: RowId) -> RowAddr {
|
||||
RowAddr {
|
||||
block_id: (row_id / BLOCK_SIZE) as u16,
|
||||
in_block_row_id: (row_id % BLOCK_SIZE) as u16,
|
||||
block_id: (row_id / ELEMENTS_PER_BLOCK) as u16,
|
||||
in_block_row_id: (row_id % ELEMENTS_PER_BLOCK) as u16,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,8 +183,13 @@ impl Set<RowId> for OptionalIndex {
|
||||
}
|
||||
}
|
||||
|
||||
/// Any value doc_id is allowed.
|
||||
/// In particular, doc_id = num_rows.
|
||||
#[inline]
|
||||
fn rank(&self, doc_id: DocId) -> RowId {
|
||||
if doc_id >= self.num_docs() {
|
||||
return self.num_non_nulls();
|
||||
}
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
@@ -200,13 +203,15 @@ impl Set<RowId> for OptionalIndex {
|
||||
block_meta.non_null_rows_before_block + block_offset_row_id
|
||||
}
|
||||
|
||||
/// Any value doc_id is allowed.
|
||||
/// In particular, doc_id = num_rows.
|
||||
#[inline]
|
||||
fn rank_if_exists(&self, doc_id: DocId) -> Option<RowId> {
|
||||
let RowAddr {
|
||||
block_id,
|
||||
in_block_row_id,
|
||||
} = row_addr_from_row_id(doc_id);
|
||||
let block_meta = self.block_metas[block_id as usize];
|
||||
let block_meta = *self.block_metas.get(block_id as usize)?;
|
||||
let block = self.block(block_meta);
|
||||
let block_offset_row_id = match block {
|
||||
Block::Dense(dense_block) => dense_block.rank_if_exists(in_block_row_id),
|
||||
@@ -491,7 +496,7 @@ fn deserialize_optional_index_block_metadatas(
|
||||
non_null_rows_before_block += num_non_null_rows;
|
||||
}
|
||||
block_metas.resize(
|
||||
((num_rows + BLOCK_SIZE - 1) / BLOCK_SIZE) as usize,
|
||||
((num_rows + ELEMENTS_PER_BLOCK - 1) / ELEMENTS_PER_BLOCK) as usize,
|
||||
BlockMeta {
|
||||
non_null_rows_before_block,
|
||||
start_byte_offset,
|
||||
|
||||
@@ -39,7 +39,8 @@ pub trait Set<T> {
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if rank is greater than the number of elements in the Set.
|
||||
/// May panic if rank is greater or equal to the number of
|
||||
/// elements in the Set.
|
||||
fn select(&self, rank: T) -> T;
|
||||
|
||||
/// Creates a brand new select cursor.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
|
||||
@@ -1,8 +1,31 @@
|
||||
use proptest::prelude::{any, prop, *};
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::prelude::*;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use super::*;
|
||||
use crate::{ColumnarReader, ColumnarWriter, DynamicColumnHandle};
|
||||
|
||||
#[test]
|
||||
fn test_optional_index_bug_2293() {
|
||||
// tests for panic in docid_range_to_rowids for docid == num_docs
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK - 1);
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK);
|
||||
test_optional_index_with_num_docs(ELEMENTS_PER_BLOCK + 1);
|
||||
}
|
||||
fn test_optional_index_with_num_docs(num_docs: u32) {
|
||||
let mut dataframe_writer = ColumnarWriter::default();
|
||||
dataframe_writer.record_numerical(100, "score", 80i64);
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
dataframe_writer
|
||||
.serialize(num_docs, None, &mut buffer)
|
||||
.unwrap();
|
||||
let columnar = ColumnarReader::open(buffer).unwrap();
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("score").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
|
||||
let col = cols[0].open().unwrap();
|
||||
col.column_index().docid_range_to_rowids(0..num_docs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dense_block_threshold() {
|
||||
@@ -35,7 +58,7 @@ proptest! {
|
||||
|
||||
#[test]
|
||||
fn test_with_random_sets_simple() {
|
||||
let vals = 10..BLOCK_SIZE * 2;
|
||||
let vals = 10..ELEMENTS_PER_BLOCK * 2;
|
||||
let mut out: Vec<u8> = Vec::new();
|
||||
serialize_optional_index(&vals, 100, &mut out).unwrap();
|
||||
let null_index = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
@@ -171,7 +194,7 @@ fn test_optional_index_rank() {
|
||||
test_optional_index_rank_aux(&[0u32, 1u32]);
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
block.extend((0..ELEMENTS_PER_BLOCK).map(|i| i + ELEMENTS_PER_BLOCK + 1));
|
||||
test_optional_index_rank_aux(&block);
|
||||
}
|
||||
|
||||
@@ -185,8 +208,8 @@ fn test_optional_index_iter_empty_one() {
|
||||
fn test_optional_index_iter_dense_block() {
|
||||
let mut block = Vec::new();
|
||||
block.push(3u32);
|
||||
block.extend((0..BLOCK_SIZE).map(|i| i + BLOCK_SIZE + 1));
|
||||
test_optional_index_iter_aux(&block, 3 * BLOCK_SIZE);
|
||||
block.extend((0..ELEMENTS_PER_BLOCK).map(|i| i + ELEMENTS_PER_BLOCK + 1));
|
||||
test_optional_index_iter_aux(&block, 3 * ELEMENTS_PER_BLOCK);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -215,12 +238,12 @@ mod bench {
|
||||
let vals: Vec<RowId> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(pos, val)| *val)
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _)| pos as RowId)
|
||||
.collect();
|
||||
serialize_optional_index(&&vals[..], TOTAL_NUM_VALUES, &mut out).unwrap();
|
||||
let codec = open_optional_index(OwnedBytes::new(out)).unwrap();
|
||||
codec
|
||||
|
||||
open_optional_index(OwnedBytes::new(out)).unwrap()
|
||||
}
|
||||
|
||||
fn random_range_iterator(
|
||||
@@ -242,7 +265,7 @@ mod bench {
|
||||
}
|
||||
|
||||
fn n_percent_step_iterator(percent: f32, num_values: u32) -> impl Iterator<Item = u32> {
|
||||
let ratio = percent as f32 / 100.0;
|
||||
let ratio = percent / 100.0;
|
||||
let step_size = (1f32 / ratio) as u32;
|
||||
let deviation = step_size - 1;
|
||||
random_range_iterator(0, num_values, step_size, deviation)
|
||||
|
||||
@@ -30,6 +30,7 @@ impl<'a> SerializableColumnIndex<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize a column index.
|
||||
pub fn serialize_column_index(
|
||||
column_index: SerializableColumnIndex,
|
||||
output: &mut impl Write,
|
||||
@@ -51,6 +52,7 @@ pub fn serialize_column_index(
|
||||
Ok(column_index_num_bytes)
|
||||
}
|
||||
|
||||
/// Open a serialized column index.
|
||||
pub fn open_column_index(mut bytes: OwnedBytes) -> io::Result<ColumnIndex> {
|
||||
if bytes.is_empty() {
|
||||
return Err(io::Error::new(
|
||||
|
||||
@@ -10,7 +10,7 @@ pub(crate) struct MergedColumnValues<'a, T> {
|
||||
pub(crate) merge_row_order: &'a MergeRowOrder,
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Debug> Iterable<T> for MergedColumnValues<'a, T> {
|
||||
impl<'a, T: Copy + PartialOrd + Debug + 'static> Iterable<T> for MergedColumnValues<'a, T> {
|
||||
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
match self.merge_row_order {
|
||||
MergeRowOrder::Stack(_) => Box::new(
|
||||
|
||||
@@ -10,6 +10,7 @@ use std::fmt::Debug;
|
||||
use std::ops::{Range, RangeInclusive};
|
||||
use std::sync::Arc;
|
||||
|
||||
use downcast_rs::DowncastSync;
|
||||
pub use monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
||||
pub use monotonic_mapping_u128::MonotonicallyMappableToU128;
|
||||
|
||||
@@ -25,7 +26,10 @@ mod monotonic_column;
|
||||
|
||||
pub(crate) use merge::MergedColumnValues;
|
||||
pub use stats::ColumnStats;
|
||||
pub use u128_based::{open_u128_mapped, serialize_column_values_u128};
|
||||
pub use u128_based::{
|
||||
open_u128_as_compact_u64, open_u128_mapped, serialize_column_values_u128,
|
||||
CompactSpaceU64Accessor,
|
||||
};
|
||||
pub use u64_based::{
|
||||
load_u64_based_column_values, serialize_and_load_u64_based_column_values,
|
||||
serialize_u64_based_column_values, CodecType, ALL_U64_CODEC_TYPES,
|
||||
@@ -41,7 +45,7 @@ use crate::RowId;
|
||||
///
|
||||
/// Any methods with a default and specialized implementation need to be called in the
|
||||
/// wrappers that implement the trait: Arc and MonotonicMappingColumn
|
||||
pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync + DowncastSync {
|
||||
/// Return the value associated with the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
@@ -68,11 +72,40 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
out_x4[3] = self.get_val(idx_x4[3]);
|
||||
}
|
||||
|
||||
let step_size = 4;
|
||||
let cutoff = indexes.len() - indexes.len() % step_size;
|
||||
let out_and_idx_chunks = output
|
||||
.chunks_exact_mut(4)
|
||||
.into_remainder()
|
||||
.iter_mut()
|
||||
.zip(indexes.chunks_exact(4).remainder());
|
||||
for (out, idx) in out_and_idx_chunks {
|
||||
*out = self.get_val(*idx);
|
||||
}
|
||||
}
|
||||
|
||||
for idx in cutoff..indexes.len() {
|
||||
output[idx] = self.get_val(indexes[idx]);
|
||||
/// Allows to push down multiple fetch calls, to avoid dynamic dispatch overhead.
|
||||
/// The slightly weird `Option<T>` in output allows pushdown to full columns.
|
||||
///
|
||||
/// idx and output should have the same length
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `idx` is greater than the column length.
|
||||
fn get_vals_opt(&self, indexes: &[u32], output: &mut [Option<T>]) {
|
||||
assert!(indexes.len() == output.len());
|
||||
let out_and_idx_chunks = output.chunks_exact_mut(4).zip(indexes.chunks_exact(4));
|
||||
for (out_x4, idx_x4) in out_and_idx_chunks {
|
||||
out_x4[0] = Some(self.get_val(idx_x4[0]));
|
||||
out_x4[1] = Some(self.get_val(idx_x4[1]));
|
||||
out_x4[2] = Some(self.get_val(idx_x4[2]));
|
||||
out_x4[3] = Some(self.get_val(idx_x4[3]));
|
||||
}
|
||||
let out_and_idx_chunks = output
|
||||
.chunks_exact_mut(4)
|
||||
.into_remainder()
|
||||
.iter_mut()
|
||||
.zip(indexes.chunks_exact(4).remainder());
|
||||
for (out, idx) in out_and_idx_chunks {
|
||||
*out = Some(self.get_val(*idx));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +134,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
row_id_hits: &mut Vec<RowId>,
|
||||
) {
|
||||
let row_id_range = row_id_range.start..row_id_range.end.min(self.num_vals());
|
||||
for idx in row_id_range.start..row_id_range.end {
|
||||
for idx in row_id_range {
|
||||
let val = self.get_val(idx);
|
||||
if value_range.contains(&val) {
|
||||
row_id_hits.push(idx);
|
||||
@@ -139,6 +172,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync {
|
||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||
}
|
||||
}
|
||||
downcast_rs::impl_downcast!(sync ColumnValues<T> where T: PartialOrd);
|
||||
|
||||
/// Empty column of values.
|
||||
pub struct EmptyColumnValues;
|
||||
@@ -161,12 +195,17 @@ impl<T: PartialOrd + Default> ColumnValues<T> for EmptyColumnValues {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialOrd + Debug> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
impl<T: Copy + PartialOrd + Debug + 'static> ColumnValues<T> for Arc<dyn ColumnValues<T>> {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> T {
|
||||
self.as_ref().get_val(idx)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_vals_opt(&self, indexes: &[u32], output: &mut [Option<T>]) {
|
||||
self.as_ref().get_vals_opt(indexes, output)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn min_value(&self) -> T {
|
||||
self.as_ref().min_value()
|
||||
|
||||
@@ -31,10 +31,10 @@ pub fn monotonic_map_column<C, T, Input, Output>(
|
||||
monotonic_mapping: T,
|
||||
) -> impl ColumnValues<Output>
|
||||
where
|
||||
C: ColumnValues<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Debug + Send + Sync + Clone,
|
||||
Output: PartialOrd + Debug + Send + Sync + Clone,
|
||||
C: ColumnValues<Input> + 'static,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync + 'static,
|
||||
Input: PartialOrd + Debug + Send + Sync + Clone + 'static,
|
||||
Output: PartialOrd + Debug + Send + Sync + Clone + 'static,
|
||||
{
|
||||
MonotonicMappingColumn {
|
||||
from_column,
|
||||
@@ -45,10 +45,10 @@ where
|
||||
|
||||
impl<C, T, Input, Output> ColumnValues<Output> for MonotonicMappingColumn<C, T, Input>
|
||||
where
|
||||
C: ColumnValues<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Send + Debug + Sync + Clone,
|
||||
Output: PartialOrd + Send + Debug + Sync + Clone,
|
||||
C: ColumnValues<Input> + 'static,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync + 'static,
|
||||
Input: PartialOrd + Send + Debug + Sync + Clone + 'static,
|
||||
Output: PartialOrd + Send + Debug + Sync + Clone + 'static,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u32) -> Output {
|
||||
@@ -107,7 +107,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_monotonic_mapping_iter() {
|
||||
let vals: Vec<u64> = (0..100u64).map(|el| el * 10).collect();
|
||||
let col = VecColumn::from(&vals);
|
||||
let col = VecColumn::from(vals);
|
||||
let mapped = monotonic_map_column(
|
||||
col,
|
||||
StrictlyMonotonicMappingInverter::from(StrictlyMonotonicMappingToInternal::<i64>::new()),
|
||||
|
||||
@@ -22,7 +22,7 @@ mod build_compact_space;
|
||||
|
||||
use build_compact_space::get_compact_space;
|
||||
use common::{BinarySerializable, CountingWriter, OwnedBytes, VInt, VIntU128};
|
||||
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||
use tantivy_bitpacker::{BitPacker, BitUnpacker};
|
||||
|
||||
use crate::column_values::ColumnValues;
|
||||
use crate::RowId;
|
||||
@@ -148,7 +148,7 @@ impl CompactSpace {
|
||||
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
|
||||
// Correctness: Overflow. The first range starts at compact space 0, the error from
|
||||
// binary search can never be 0
|
||||
.map_or_else(|e| e - 1, |v| v);
|
||||
.unwrap_or_else(|e| e - 1);
|
||||
|
||||
let range_mapping = &self.ranges_mapping[pos];
|
||||
let diff = compact - range_mapping.compact_start;
|
||||
@@ -292,6 +292,63 @@ impl BinarySerializable for IPCodecParams {
|
||||
}
|
||||
}
|
||||
|
||||
/// Exposes the compact space compressed values as u64.
|
||||
///
|
||||
/// This allows faster access to the values, as u64 is faster to work with than u128.
|
||||
/// It also allows to handle u128 values like u64, via the `open_u64_lenient` as a uniform
|
||||
/// access interface.
|
||||
///
|
||||
/// When converting from the internal u64 to u128 `compact_to_u128` can be used.
|
||||
pub struct CompactSpaceU64Accessor(CompactSpaceDecompressor);
|
||||
impl CompactSpaceU64Accessor {
|
||||
pub(crate) fn open(data: OwnedBytes) -> io::Result<CompactSpaceU64Accessor> {
|
||||
let decompressor = CompactSpaceU64Accessor(CompactSpaceDecompressor::open(data)?);
|
||||
Ok(decompressor)
|
||||
}
|
||||
/// Convert a compact space value to u128
|
||||
pub fn compact_to_u128(&self, compact: u32) -> u128 {
|
||||
self.0.compact_to_u128(compact)
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnValues<u64> for CompactSpaceU64Accessor {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
let compact = self.0.get_compact(doc);
|
||||
compact as u64
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
self.0.u128_to_compact(self.0.min_value()).unwrap() as u64
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.0.u128_to_compact(self.0.max_value()).unwrap() as u64
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u32 {
|
||||
self.0.params.num_vals
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(self.0.iter_compact().map(|el| el as u64))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_row_ids_for_value_range(
|
||||
&self,
|
||||
value_range: RangeInclusive<u64>,
|
||||
position_range: Range<u32>,
|
||||
positions: &mut Vec<u32>,
|
||||
) {
|
||||
let value_range = self.0.compact_to_u128(*value_range.start() as u32)
|
||||
..=self.0.compact_to_u128(*value_range.end() as u32);
|
||||
self.0
|
||||
.get_row_ids_for_value_range(value_range, position_range, positions)
|
||||
}
|
||||
}
|
||||
|
||||
impl ColumnValues<u128> for CompactSpaceDecompressor {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u32) -> u128 {
|
||||
@@ -402,9 +459,14 @@ impl CompactSpaceDecompressor {
|
||||
.map(|compact| self.compact_to_u128(compact))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_compact(&self, idx: u32) -> u32 {
|
||||
self.params.bit_unpacker.get(idx, &self.data) as u32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u32) -> u128 {
|
||||
let compact = self.params.bit_unpacker.get(idx, &self.data) as u32;
|
||||
let compact = self.get_compact(idx);
|
||||
self.compact_to_u128(compact)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,9 @@ use std::sync::Arc;
|
||||
mod compact_space;
|
||||
|
||||
use common::{BinarySerializable, OwnedBytes, VInt};
|
||||
use compact_space::{CompactSpaceCompressor, CompactSpaceDecompressor};
|
||||
pub use compact_space::{
|
||||
CompactSpaceCompressor, CompactSpaceDecompressor, CompactSpaceU64Accessor,
|
||||
};
|
||||
|
||||
use crate::column_values::monotonic_map_column;
|
||||
use crate::column_values::monotonic_mapping::{
|
||||
@@ -108,6 +110,23 @@ pub fn open_u128_mapped<T: MonotonicallyMappableToU128 + Debug>(
|
||||
StrictlyMonotonicMappingToInternal::<T>::new().into();
|
||||
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
||||
}
|
||||
|
||||
/// Returns the u64 representation of the u128 data.
|
||||
/// The internal representation of the data as u64 is useful for faster processing.
|
||||
///
|
||||
/// In order to convert to u128 back cast to `CompactSpaceU64Accessor` and call
|
||||
/// `compact_to_u128`.
|
||||
///
|
||||
/// # Notice
|
||||
/// In case there are new codecs added, check for usages of `CompactSpaceDecompressorU64` and
|
||||
/// also handle the new codecs.
|
||||
pub fn open_u128_as_compact_u64(mut bytes: OwnedBytes) -> io::Result<Arc<dyn ColumnValues<u64>>> {
|
||||
let header = U128Header::deserialize(&mut bytes)?;
|
||||
assert_eq!(header.codec_type, U128FastFieldCodecType::CompactSpace);
|
||||
let reader = CompactSpaceU64Accessor::open(bytes)?;
|
||||
Ok(Arc::new(reader))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -63,7 +63,6 @@ impl ColumnValues for BitpackedReader {
|
||||
fn get_val(&self, doc: u32) -> u64 {
|
||||
self.stats.min_value + self.stats.gcd.get() * self.bit_unpacker.get(doc, &self.data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
|
||||
@@ -63,7 +63,10 @@ impl BlockwiseLinearEstimator {
|
||||
if self.block.is_empty() {
|
||||
return;
|
||||
}
|
||||
let line = Line::train(&VecColumn::from(&self.block));
|
||||
let column = VecColumn::from(std::mem::take(&mut self.block));
|
||||
let line = Line::train(&column);
|
||||
self.block = column.into();
|
||||
|
||||
let mut max_value = 0u64;
|
||||
for (i, buffer_val) in self.block.iter().enumerate() {
|
||||
let interpolated_val = line.eval(i as u32);
|
||||
@@ -125,7 +128,7 @@ impl ColumnCodecEstimator for BlockwiseLinearEstimator {
|
||||
*buffer_val = gcd_divider.divide(*buffer_val - stats.min_value);
|
||||
}
|
||||
|
||||
let line = Line::train(&VecColumn::from(&buffer));
|
||||
let line = Line::train(&VecColumn::from(buffer.to_vec()));
|
||||
|
||||
assert!(!buffer.is_empty());
|
||||
|
||||
|
||||
@@ -184,7 +184,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn test_eval_max_err(ys: &[u64]) -> Option<u64> {
|
||||
let line = Line::train(&VecColumn::from(&ys));
|
||||
let line = Line::train(&VecColumn::from(ys.to_vec()));
|
||||
ys.iter()
|
||||
.enumerate()
|
||||
.map(|(x, y)| y.wrapping_sub(line.eval(x as u32)))
|
||||
|
||||
@@ -173,7 +173,9 @@ impl LinearCodecEstimator {
|
||||
fn collect_before_line_estimation(&mut self, value: u64) {
|
||||
self.block.push(value);
|
||||
if self.block.len() == LINE_ESTIMATION_BLOCK_LEN {
|
||||
let line = Line::train(&VecColumn::from(&self.block));
|
||||
let column = VecColumn::from(std::mem::take(&mut self.block));
|
||||
let line = Line::train(&column);
|
||||
self.block = column.into();
|
||||
let block = std::mem::take(&mut self.block);
|
||||
for val in block {
|
||||
self.collect_after_line_estimation(&line, val);
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use proptest::prelude::*;
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -4,14 +4,14 @@ use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::ColumnValues;
|
||||
|
||||
/// VecColumn provides `Column` over a slice.
|
||||
pub struct VecColumn<'a, T = u64> {
|
||||
pub(crate) values: &'a [T],
|
||||
/// VecColumn provides `Column` over a `Vec<T>`.
|
||||
pub struct VecColumn<T = u64> {
|
||||
pub(crate) values: Vec<T>,
|
||||
pub(crate) min_value: T,
|
||||
pub(crate) max_value: T,
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Send + Sync + Debug> ColumnValues<T> for VecColumn<'a, T> {
|
||||
impl<T: Copy + PartialOrd + Send + Sync + Debug + 'static> ColumnValues<T> for VecColumn<T> {
|
||||
fn get_val(&self, position: u32) -> T {
|
||||
self.values[position as usize]
|
||||
}
|
||||
@@ -37,11 +37,8 @@ impl<'a, T: Copy + PartialOrd + Send + Sync + Debug> ColumnValues<T> for VecColu
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Default, V> From<&'a V> for VecColumn<'a, T>
|
||||
where V: AsRef<[T]> + ?Sized
|
||||
{
|
||||
fn from(values: &'a V) -> Self {
|
||||
let values = values.as_ref();
|
||||
impl<T: Copy + PartialOrd + Default> From<Vec<T>> for VecColumn<T> {
|
||||
fn from(values: Vec<T>) -> Self {
|
||||
let (min_value, max_value) = minmax(values.iter().copied()).unwrap_or_default();
|
||||
Self {
|
||||
values,
|
||||
@@ -50,3 +47,8 @@ where V: AsRef<[T]> + ?Sized
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<VecColumn> for Vec<u64> {
|
||||
fn from(column: VecColumn) -> Self {
|
||||
column.values
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::*;
|
||||
use crate::{Cardinality, ColumnarWriter, HasAssociatedColumnType, RowId};
|
||||
|
||||
|
||||
@@ -269,7 +269,8 @@ impl StrOrBytesColumnWriter {
|
||||
dictionaries: &mut [DictionaryBuilder],
|
||||
arena: &mut MemoryArena,
|
||||
) {
|
||||
let unordered_id = dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes);
|
||||
let unordered_id =
|
||||
dictionaries[self.dictionary_id as usize].get_or_allocate_id(bytes, arena);
|
||||
self.column_writer.record(doc, unordered_id, arena);
|
||||
}
|
||||
|
||||
|
||||
@@ -13,9 +13,7 @@ pub(crate) use serializer::ColumnarSerializer;
|
||||
use stacker::{Addr, ArenaHashMap, MemoryArena};
|
||||
|
||||
use crate::column_index::SerializableColumnIndex;
|
||||
use crate::column_values::{
|
||||
ColumnValues, MonotonicallyMappableToU128, MonotonicallyMappableToU64, VecColumn,
|
||||
};
|
||||
use crate::column_values::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
|
||||
use crate::columnar::column_type::ColumnType;
|
||||
use crate::columnar::writer::column_writers::{
|
||||
ColumnWriter, NumericalColumnWriter, StrOrBytesColumnWriter,
|
||||
@@ -338,7 +336,7 @@ impl ColumnarWriter {
|
||||
let mut columns: Vec<(&[u8], ColumnType, Addr)> = self
|
||||
.numerical_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| {
|
||||
.map(|(column_name, addr)| {
|
||||
let numerical_column_writer: NumericalColumnWriter =
|
||||
self.numerical_field_hash_map.read(addr);
|
||||
let column_type = numerical_column_writer.numerical_type().into();
|
||||
@@ -348,27 +346,27 @@ impl ColumnarWriter {
|
||||
columns.extend(
|
||||
self.bytes_field_hash_map
|
||||
.iter()
|
||||
.map(|(term, addr, _)| (term, ColumnType::Bytes, addr)),
|
||||
.map(|(term, addr)| (term, ColumnType::Bytes, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.str_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Str, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::Str, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.bool_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::Bool, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::Bool, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.ip_addr_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::IpAddr, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::IpAddr, addr)),
|
||||
);
|
||||
columns.extend(
|
||||
self.datetime_field_hash_map
|
||||
.iter()
|
||||
.map(|(column_name, addr, _)| (column_name, ColumnType::DateTime, addr)),
|
||||
.map(|(column_name, addr)| (column_name, ColumnType::DateTime, addr)),
|
||||
);
|
||||
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
|
||||
|
||||
@@ -437,6 +435,7 @@ impl ColumnarWriter {
|
||||
&mut symbol_byte_buffer,
|
||||
),
|
||||
buffers,
|
||||
&self.arena,
|
||||
&mut column_serializer,
|
||||
)?;
|
||||
column_serializer.finalize()?;
|
||||
@@ -490,6 +489,7 @@ impl ColumnarWriter {
|
||||
|
||||
// Serialize [Dictionary, Column, dictionary num bytes U32::LE]
|
||||
// Column: [Column Index, Column Values, column index num bytes U32::LE]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn serialize_bytes_or_str_column(
|
||||
cardinality: Cardinality,
|
||||
num_docs: RowId,
|
||||
@@ -497,6 +497,7 @@ fn serialize_bytes_or_str_column(
|
||||
dictionary_builder: &DictionaryBuilder,
|
||||
operation_it: impl Iterator<Item = ColumnOperation<UnorderedId>>,
|
||||
buffers: &mut SpareBuffers,
|
||||
arena: &MemoryArena,
|
||||
wrt: impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
let SpareBuffers {
|
||||
@@ -505,7 +506,8 @@ fn serialize_bytes_or_str_column(
|
||||
..
|
||||
} = buffers;
|
||||
let mut counting_writer = CountingWriter::wrap(wrt);
|
||||
let term_id_mapping: TermIdMapping = dictionary_builder.serialize(&mut counting_writer)?;
|
||||
let term_id_mapping: TermIdMapping =
|
||||
dictionary_builder.serialize(arena, &mut counting_writer)?;
|
||||
let dictionary_num_bytes: u32 = counting_writer.written_bytes() as u32;
|
||||
let mut wrt = counting_writer.finish();
|
||||
let operation_iterator = operation_it.map(|symbol: ColumnOperation<UnorderedId>| {
|
||||
@@ -641,10 +643,7 @@ fn send_to_serialize_column_mappable_to_u128<
|
||||
value_index_builders: &mut PreallocatedIndexBuilders,
|
||||
values: &mut Vec<T>,
|
||||
mut wrt: impl io::Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
for<'a> VecColumn<'a, T>: ColumnValues<T>,
|
||||
{
|
||||
) -> io::Result<()> {
|
||||
values.clear();
|
||||
// TODO: split index and values
|
||||
let serializable_column_index = match cardinality {
|
||||
@@ -697,10 +696,7 @@ fn send_to_serialize_column_mappable_to_u64(
|
||||
value_index_builders: &mut PreallocatedIndexBuilders,
|
||||
values: &mut Vec<u64>,
|
||||
mut wrt: impl io::Write,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
for<'a> VecColumn<'a, u64>: ColumnValues<u64>,
|
||||
{
|
||||
) -> io::Result<()> {
|
||||
values.clear();
|
||||
let serializable_column_index = match cardinality {
|
||||
Cardinality::Full => {
|
||||
|
||||
@@ -18,7 +18,12 @@ pub struct ColumnarSerializer<W: io::Write> {
|
||||
/// code.
|
||||
fn prepare_key(key: &[u8], column_type: ColumnType, buffer: &mut Vec<u8>) {
|
||||
buffer.clear();
|
||||
buffer.extend_from_slice(key);
|
||||
// Convert 0 bytes to '0' string, as 0 bytes are reserved for the end of the path.
|
||||
if key.contains(&0u8) {
|
||||
buffer.extend(key.iter().map(|&b| if b == 0 { b'0' } else { b }));
|
||||
} else {
|
||||
buffer.extend_from_slice(key);
|
||||
}
|
||||
buffer.push(0u8);
|
||||
buffer.push(column_type.to_code());
|
||||
}
|
||||
@@ -96,14 +101,13 @@ impl<'a, W: io::Write> io::Write for ColumnSerializer<'a, W> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::columnar::column_type::ColumnType;
|
||||
|
||||
#[test]
|
||||
fn test_prepare_key_bytes() {
|
||||
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
|
||||
prepare_key(b"root\0child", ColumnType::Str, &mut buffer);
|
||||
assert_eq!(buffer.len(), 12);
|
||||
assert_eq!(&buffer[..10], b"root\0child");
|
||||
assert_eq!(&buffer[..10], b"root0child");
|
||||
assert_eq!(buffer[10], 0u8);
|
||||
assert_eq!(buffer[11], ColumnType::Str.to_code());
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::io;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use sstable::SSTable;
|
||||
use stacker::{MemoryArena, SharedArenaHashMap};
|
||||
|
||||
pub(crate) struct TermIdMapping {
|
||||
unordered_to_ord: Vec<OrderedId>,
|
||||
@@ -31,29 +31,38 @@ pub struct OrderedId(pub u32);
|
||||
/// mapping.
|
||||
#[derive(Default)]
|
||||
pub(crate) struct DictionaryBuilder {
|
||||
dict: FnvHashMap<Vec<u8>, UnorderedId>,
|
||||
memory_consumption: usize,
|
||||
dict: SharedArenaHashMap,
|
||||
}
|
||||
|
||||
impl DictionaryBuilder {
|
||||
/// Get or allocate an unordered id.
|
||||
/// (This ID is simply an auto-incremented id.)
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8]) -> UnorderedId {
|
||||
if let Some(term_id) = self.dict.get(term) {
|
||||
return *term_id;
|
||||
}
|
||||
let new_id = UnorderedId(self.dict.len() as u32);
|
||||
self.dict.insert(term.to_vec(), new_id);
|
||||
self.memory_consumption += term.len();
|
||||
self.memory_consumption += 40; // Term Metadata + HashMap overhead
|
||||
new_id
|
||||
pub fn get_or_allocate_id(&mut self, term: &[u8], arena: &mut MemoryArena) -> UnorderedId {
|
||||
let next_id = self.dict.len() as u32;
|
||||
let unordered_id = self
|
||||
.dict
|
||||
.mutate_or_create(term, arena, |unordered_id: Option<u32>| {
|
||||
if let Some(unordered_id) = unordered_id {
|
||||
unordered_id
|
||||
} else {
|
||||
next_id
|
||||
}
|
||||
});
|
||||
UnorderedId(unordered_id)
|
||||
}
|
||||
|
||||
/// Serialize the dictionary into an fst, and returns the
|
||||
/// `UnorderedId -> TermOrdinal` map.
|
||||
pub fn serialize<'a, W: io::Write + 'a>(&self, wrt: &mut W) -> io::Result<TermIdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> =
|
||||
self.dict.iter().map(|(k, v)| (k.as_slice(), *v)).collect();
|
||||
pub fn serialize<'a, W: io::Write + 'a>(
|
||||
&self,
|
||||
arena: &MemoryArena,
|
||||
wrt: &mut W,
|
||||
) -> io::Result<TermIdMapping> {
|
||||
let mut terms: Vec<(&[u8], UnorderedId)> = self
|
||||
.dict
|
||||
.iter(arena)
|
||||
.map(|(k, v)| (k, arena.read(v)))
|
||||
.collect();
|
||||
terms.sort_unstable_by_key(|(key, _)| *key);
|
||||
// TODO Remove the allocation.
|
||||
let mut unordered_to_ord: Vec<OrderedId> = vec![OrderedId(0u32); terms.len()];
|
||||
@@ -68,7 +77,7 @@ impl DictionaryBuilder {
|
||||
}
|
||||
|
||||
pub(crate) fn mem_usage(&self) -> usize {
|
||||
self.memory_consumption
|
||||
self.dict.mem_usage()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,12 +87,13 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_dictionary_builder() {
|
||||
let mut arena = MemoryArena::default();
|
||||
let mut dictionary_builder = DictionaryBuilder::default();
|
||||
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello");
|
||||
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy");
|
||||
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax");
|
||||
let hello_uid = dictionary_builder.get_or_allocate_id(b"hello", &mut arena);
|
||||
let happy_uid = dictionary_builder.get_or_allocate_id(b"happy", &mut arena);
|
||||
let tax_uid = dictionary_builder.get_or_allocate_id(b"tax", &mut arena);
|
||||
let mut buffer = Vec::new();
|
||||
let id_mapping = dictionary_builder.serialize(&mut buffer).unwrap();
|
||||
let id_mapping = dictionary_builder.serialize(&arena, &mut buffer).unwrap();
|
||||
assert_eq!(id_mapping.to_ord(hello_uid), OrderedId(1));
|
||||
assert_eq!(id_mapping.to_ord(happy_uid), OrderedId(0));
|
||||
assert_eq!(id_mapping.to_ord(tax_uid), OrderedId(2));
|
||||
|
||||
@@ -8,7 +8,7 @@ use common::{ByteCount, DateTime, HasLen, OwnedBytes};
|
||||
use crate::column::{BytesColumn, Column, StrColumn};
|
||||
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
|
||||
use crate::columnar::ColumnType;
|
||||
use crate::{Cardinality, ColumnIndex, NumericalType};
|
||||
use crate::{Cardinality, ColumnIndex, ColumnValues, NumericalType};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DynamicColumn {
|
||||
@@ -247,7 +247,12 @@ impl DynamicColumnHandle {
|
||||
}
|
||||
|
||||
/// Returns the `u64` fast field reader reader associated with `fields` of types
|
||||
/// Str, u64, i64, f64, bool, or datetime.
|
||||
/// Str, u64, i64, f64, bool, ip, or datetime.
|
||||
///
|
||||
/// Notice that for IpAddr, the fastfield reader will return the u64 representation of the
|
||||
/// IpAddr.
|
||||
/// In order to convert to u128 back cast to `CompactSpaceU64Accessor` and call
|
||||
/// `compact_to_u128`.
|
||||
///
|
||||
/// If not, the fastfield reader will returns the u64-value associated with the original
|
||||
/// FastValue.
|
||||
@@ -258,7 +263,10 @@ impl DynamicColumnHandle {
|
||||
let column: BytesColumn = crate::column::open_column_bytes(column_bytes)?;
|
||||
Ok(Some(column.term_ord_column))
|
||||
}
|
||||
ColumnType::IpAddr => Ok(None),
|
||||
ColumnType::IpAddr => {
|
||||
let column = crate::column::open_column_u128_as_compact_u64(column_bytes)?;
|
||||
Ok(Some(column))
|
||||
}
|
||||
ColumnType::Bool
|
||||
| ColumnType::I64
|
||||
| ColumnType::U64
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
//! # Tantivy-Columnar
|
||||
//!
|
||||
//! `tantivy-columnar`provides a columnar storage for tantivy.
|
||||
//! The crate allows for efficient read operations on specific columns rather than entire records.
|
||||
//!
|
||||
//! ## Overview
|
||||
//!
|
||||
//! - **columnar**: Reading, writing, and merging multiple columns:
|
||||
//! - **[ColumnarWriter]**: Makes it possible to create a new columnar.
|
||||
//! - **[ColumnarReader]**: The ColumnarReader makes it possible to access a set of columns
|
||||
//! associated to field names.
|
||||
//! - **[merge_columnar]**: Contains the functionalities to merge multiple ColumnarReader or
|
||||
//! segments into a single one.
|
||||
//!
|
||||
//! - **column**: A single column, which contains
|
||||
//! - [column_index]: Resolves the rows for a document id. Manages the cardinality of the
|
||||
//! column.
|
||||
//! - [column_values]: Stores the values of a column in a dense format.
|
||||
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -12,7 +31,7 @@ use std::io;
|
||||
|
||||
mod block_accessor;
|
||||
mod column;
|
||||
mod column_index;
|
||||
pub mod column_index;
|
||||
pub mod column_values;
|
||||
mod columnar;
|
||||
mod dictionary;
|
||||
@@ -94,6 +113,9 @@ impl Cardinality {
|
||||
pub fn is_multivalue(&self) -> bool {
|
||||
matches!(self, Cardinality::Multivalued)
|
||||
}
|
||||
pub fn is_full(&self) -> bool {
|
||||
matches!(self, Cardinality::Full)
|
||||
}
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ fn test_dataframe_writer_str() {
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 87);
|
||||
assert_eq!(cols[0].num_bytes(), 73);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -40,7 +40,7 @@ fn test_dataframe_writer_bytes() {
|
||||
assert_eq!(columnar.num_columns(), 1);
|
||||
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
|
||||
assert_eq!(cols.len(), 1);
|
||||
assert_eq!(cols[0].num_bytes(), 87);
|
||||
assert_eq!(cols[0].num_bytes(), 73);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -330,9 +330,9 @@ fn bytes_strategy() -> impl Strategy<Value = &'static [u8]> {
|
||||
// A random column value
|
||||
fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
|
||||
prop_oneof![
|
||||
10 => string_strategy().prop_map(|s| ColumnValue::Str(s)),
|
||||
1 => bytes_strategy().prop_map(|b| ColumnValue::Bytes(b)),
|
||||
40 => num_strategy().prop_map(|n| ColumnValue::Numerical(n)),
|
||||
10 => string_strategy().prop_map(ColumnValue::Str),
|
||||
1 => bytes_strategy().prop_map(ColumnValue::Bytes),
|
||||
40 => num_strategy().prop_map(ColumnValue::Numerical),
|
||||
1 => (1u16..3u16).prop_map(|ip_addr_byte| ColumnValue::IpAddr(Ipv6Addr::new(
|
||||
127,
|
||||
0,
|
||||
@@ -343,7 +343,7 @@ fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
|
||||
0,
|
||||
ip_addr_byte
|
||||
))),
|
||||
1 => any::<bool>().prop_map(|b| ColumnValue::Bool(b)),
|
||||
1 => any::<bool>().prop_map(ColumnValue::Bool),
|
||||
1 => (0_679_723_993i64..1_679_723_995i64)
|
||||
.prop_map(|val| { ColumnValue::DateTime(DateTime::from_timestamp_secs(val)) })
|
||||
]
|
||||
@@ -419,8 +419,8 @@ fn build_columnar_with_mapping(
|
||||
columnar_writer
|
||||
.serialize(num_docs, old_to_new_row_ids_opt, &mut buffer)
|
||||
.unwrap();
|
||||
let columnar_reader = ColumnarReader::open(buffer).unwrap();
|
||||
columnar_reader
|
||||
|
||||
ColumnarReader::open(buffer).unwrap()
|
||||
}
|
||||
|
||||
fn build_columnar(docs: &[Vec<(&'static str, ColumnValue)>]) -> ColumnarReader {
|
||||
@@ -746,7 +746,7 @@ proptest! {
|
||||
let stack_merge_order = StackMergeOrder::stack(&columnar_readers_arr[..]).into();
|
||||
crate::merge_columnar(&columnar_readers_arr[..], &[], stack_merge_order, &mut output).unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> = columnar_docs.iter().cloned().flatten().collect();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> = columnar_docs.iter().flatten().cloned().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
@@ -772,7 +772,7 @@ fn test_columnar_merging_empty_columnar() {
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
|
||||
columnar_docs.iter().cloned().flatten().collect();
|
||||
columnar_docs.iter().flatten().cloned().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
@@ -809,7 +809,7 @@ fn test_columnar_merging_number_columns() {
|
||||
.unwrap();
|
||||
let merged_columnar = ColumnarReader::open(output).unwrap();
|
||||
let concat_rows: Vec<Vec<(&'static str, ColumnValue)>> =
|
||||
columnar_docs.iter().cloned().flatten().collect();
|
||||
columnar_docs.iter().flatten().cloned().collect();
|
||||
let expected_merged_columnar = build_columnar(&concat_rows[..]);
|
||||
assert_columnar_eq_strict(&merged_columnar, &expected_merged_columnar);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-common"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2021"
|
||||
@@ -14,7 +14,7 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
ownedbytes = { version= "0.6", path="../ownedbytes" }
|
||||
ownedbytes = { version= "0.7", path="../ownedbytes" }
|
||||
async-trait = "0.1"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::convert::TryInto;
|
||||
use std::io::Write;
|
||||
use std::{fmt, io, u64};
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::fmt;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use time::{OffsetDateTime, PrimitiveDateTime, UtcOffset};
|
||||
|
||||
use crate::BinarySerializable;
|
||||
|
||||
/// Precision with which datetimes are truncated when stored in fast fields. This setting is only
|
||||
/// relevant for fast fields. In the docstore, datetimes are always saved with nanosecond precision.
|
||||
#[derive(
|
||||
@@ -24,9 +25,6 @@ pub enum DateTimePrecision {
|
||||
Nanoseconds,
|
||||
}
|
||||
|
||||
#[deprecated(since = "0.20.0", note = "Use `DateTimePrecision` instead")]
|
||||
pub type DatePrecision = DateTimePrecision;
|
||||
|
||||
/// A date/time value with nanoseconds precision.
|
||||
///
|
||||
/// This timestamp does not carry any explicit time zone information.
|
||||
@@ -37,7 +35,7 @@ pub type DatePrecision = DateTimePrecision;
|
||||
/// All constructors and conversions are provided as explicit
|
||||
/// functions and not by implementing any `From`/`Into` traits
|
||||
/// to prevent unintended usage.
|
||||
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
pub struct DateTime {
|
||||
// Timestamp in nanoseconds.
|
||||
pub(crate) timestamp_nanos: i64,
|
||||
@@ -164,3 +162,15 @@ impl fmt::Debug for DateTime {
|
||||
f.write_str(&utc_rfc3339)
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for DateTime {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> std::io::Result<()> {
|
||||
let timestamp_micros = self.into_timestamp_micros();
|
||||
<i64 as BinarySerializable>::serialize(×tamp_micros, writer)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> std::io::Result<Self> {
|
||||
let timestamp_micros = <i64 as BinarySerializable>::deserialize(reader)?;
|
||||
Ok(Self::from_timestamp_micros(timestamp_micros))
|
||||
}
|
||||
}
|
||||
|
||||
112
common/src/json_path_writer.rs
Normal file
112
common/src/json_path_writer.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
use crate::replace_in_place;
|
||||
|
||||
/// Separates the different segments of a json path.
|
||||
pub const JSON_PATH_SEGMENT_SEP: u8 = 1u8;
|
||||
pub const JSON_PATH_SEGMENT_SEP_STR: &str =
|
||||
unsafe { std::str::from_utf8_unchecked(&[JSON_PATH_SEGMENT_SEP]) };
|
||||
|
||||
/// Create a new JsonPathWriter, that creates flattened json paths for tantivy.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct JsonPathWriter {
|
||||
path: String,
|
||||
indices: Vec<usize>,
|
||||
expand_dots: bool,
|
||||
}
|
||||
|
||||
impl JsonPathWriter {
|
||||
pub fn new() -> Self {
|
||||
JsonPathWriter {
|
||||
path: String::new(),
|
||||
indices: Vec::new(),
|
||||
expand_dots: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// When expand_dots is enabled, json object like
|
||||
/// `{"k8s.node.id": 5}` is processed as if it was
|
||||
/// `{"k8s": {"node": {"id": 5}}}`.
|
||||
/// This option has the merit of allowing users to
|
||||
/// write queries like `k8s.node.id:5`.
|
||||
/// On the other, enabling that feature can lead to
|
||||
/// ambiguity.
|
||||
#[inline]
|
||||
pub fn set_expand_dots(&mut self, expand_dots: bool) {
|
||||
self.expand_dots = expand_dots;
|
||||
}
|
||||
|
||||
/// Push a new segment to the path.
|
||||
#[inline]
|
||||
pub fn push(&mut self, segment: &str) {
|
||||
let len_path = self.path.len();
|
||||
self.indices.push(len_path);
|
||||
if !self.path.is_empty() {
|
||||
self.path.push_str(JSON_PATH_SEGMENT_SEP_STR);
|
||||
}
|
||||
self.path.push_str(segment);
|
||||
if self.expand_dots {
|
||||
// This might include the separation byte, which is ok because it is not a dot.
|
||||
let appended_segment = &mut self.path[len_path..];
|
||||
// The unsafe below is safe as long as b'.' and JSON_PATH_SEGMENT_SEP are
|
||||
// valid single byte ut8 strings.
|
||||
// By utf-8 design, they cannot be part of another codepoint.
|
||||
unsafe {
|
||||
replace_in_place(b'.', JSON_PATH_SEGMENT_SEP, appended_segment.as_bytes_mut())
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove the last segment. Does nothing if the path is empty.
|
||||
#[inline]
|
||||
pub fn pop(&mut self) {
|
||||
if let Some(last_idx) = self.indices.pop() {
|
||||
self.path.truncate(last_idx);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear the path.
|
||||
#[inline]
|
||||
pub fn clear(&mut self) {
|
||||
self.path.clear();
|
||||
self.indices.clear();
|
||||
}
|
||||
|
||||
/// Get the current path.
|
||||
#[inline]
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JsonPathWriter> for String {
|
||||
#[inline]
|
||||
fn from(value: JsonPathWriter) -> Self {
|
||||
value.path
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn json_path_writer_test() {
|
||||
let mut writer = JsonPathWriter::new();
|
||||
|
||||
writer.push("root");
|
||||
assert_eq!(writer.as_str(), "root");
|
||||
|
||||
writer.push("child");
|
||||
assert_eq!(writer.as_str(), "root\u{1}child");
|
||||
|
||||
writer.pop();
|
||||
assert_eq!(writer.as_str(), "root");
|
||||
|
||||
writer.push("k8s.node.id");
|
||||
assert_eq!(writer.as_str(), "root\u{1}k8s.node.id");
|
||||
|
||||
writer.set_expand_dots(true);
|
||||
writer.pop();
|
||||
writer.push("k8s.node.id");
|
||||
assert_eq!(writer.as_str(), "root\u{1}k8s\u{1}node\u{1}id");
|
||||
}
|
||||
}
|
||||
@@ -9,15 +9,15 @@ mod byte_count;
|
||||
mod datetime;
|
||||
pub mod file_slice;
|
||||
mod group_by;
|
||||
mod json_path_writer;
|
||||
mod serialize;
|
||||
mod vint;
|
||||
mod writer;
|
||||
pub use bitset::*;
|
||||
pub use byte_count::ByteCount;
|
||||
#[allow(deprecated)]
|
||||
pub use datetime::DatePrecision;
|
||||
pub use datetime::{DateTime, DateTimePrecision};
|
||||
pub use group_by::GroupByIteratorExtended;
|
||||
pub use json_path_writer::JsonPathWriter;
|
||||
pub use ownedbytes::{OwnedBytes, StableDeref};
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
pub use vint::{
|
||||
@@ -116,6 +116,7 @@ pub fn u64_to_f64(val: u64) -> f64 {
|
||||
///
|
||||
/// This function assumes that the needle is rarely contained in the bytes string
|
||||
/// and offers a fast path if the needle is not present.
|
||||
#[inline]
|
||||
pub fn replace_in_place(needle: u8, replacement: u8, bytes: &mut [u8]) {
|
||||
if !bytes.contains(&needle) {
|
||||
return;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::borrow::Cow;
|
||||
use std::io::{Read, Write};
|
||||
use std::{fmt, io};
|
||||
|
||||
@@ -249,11 +250,47 @@ impl BinarySerializable for String {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BinarySerializable for Cow<'a, str> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let data: &[u8] = self.as_bytes();
|
||||
VInt(data.len() as u64).serialize(writer)?;
|
||||
writer.write_all(data)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Cow<'a, str>> {
|
||||
let string_length = VInt::deserialize(reader)?.val() as usize;
|
||||
let mut result = String::with_capacity(string_length);
|
||||
reader
|
||||
.take(string_length as u64)
|
||||
.read_to_string(&mut result)?;
|
||||
Ok(Cow::Owned(result))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BinarySerializable for Cow<'a, [u8]> {
|
||||
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.len() as u64).serialize(writer)?;
|
||||
for it in self.iter() {
|
||||
it.serialize(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Cow<'a, [u8]>> {
|
||||
let num_items = VInt::deserialize(reader)?.val();
|
||||
let mut items: Vec<u8> = Vec::with_capacity(num_items as usize);
|
||||
for _ in 0..num_items {
|
||||
let item = u8::deserialize(reader)?;
|
||||
items.push(item);
|
||||
}
|
||||
Ok(Cow::Owned(items))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
use super::{VInt, *};
|
||||
use crate::serialize::BinarySerializable;
|
||||
use super::*;
|
||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||
let mut buffer = Vec::new();
|
||||
O::default().serialize(&mut buffer).unwrap();
|
||||
|
||||
@@ -12,7 +12,7 @@ use tantivy::aggregation::agg_result::AggregationResults;
|
||||
use tantivy::aggregation::AggregationCollector;
|
||||
use tantivy::query::AllQuery;
|
||||
use tantivy::schema::{self, IndexRecordOption, Schema, TextFieldIndexing, FAST};
|
||||
use tantivy::Index;
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Create Schema
|
||||
@@ -132,10 +132,10 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let stream = Deserializer::from_str(data).into_iter::<Value>();
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let mut num_indexed = 0;
|
||||
for value in stream {
|
||||
let doc = schema.parse_document(&serde_json::to_string(&value.unwrap())?)?;
|
||||
let doc = TantivyDocument::parse_json(&schema, &serde_json::to_string(&value.unwrap())?)?;
|
||||
index_writer.add_document(doc)?;
|
||||
num_indexed += 1;
|
||||
if num_indexed > 4 {
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -75,7 +75,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Here we give tantivy a budget of `50MB`.
|
||||
// Using a bigger memory_arena for the indexer may increase
|
||||
// throughput, but 50 MB is already plenty.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
@@ -87,7 +87,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let mut old_man_doc = Document::default();
|
||||
let mut old_man_doc = TantivyDocument::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
old_man_doc.add_text(
|
||||
body,
|
||||
@@ -164,7 +164,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// will reload the index automatically after each commit.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.try_into()?;
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
@@ -217,8 +217,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// the document returned will only contain
|
||||
// a title.
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
}
|
||||
|
||||
// We can also get an explanation to understand
|
||||
|
||||
@@ -13,7 +13,7 @@ use columnar::Column;
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, Score, SegmentReader};
|
||||
use tantivy::{doc, Index, IndexWriter, Score, SegmentReader};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Stats {
|
||||
@@ -142,7 +142,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// this example.
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
index_writer.add_document(doc!(
|
||||
product_name => "Super Broom 2000",
|
||||
product_description => "While it is ok for short distance travel, this broom \
|
||||
|
||||
@@ -6,7 +6,7 @@ use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::NgramTokenizer;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -62,7 +62,7 @@ fn main() -> tantivy::Result<()> {
|
||||
//
|
||||
// Here we use a buffer of 50MB per thread. Using a bigger
|
||||
// memory arena for the indexer can increase its throughput.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Old Man and the Sea",
|
||||
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
@@ -103,8 +103,8 @@ fn main() -> tantivy::Result<()> {
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
for (_, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{DateOptions, Schema, Value, INDEXED, STORED, STRING};
|
||||
use tantivy::Index;
|
||||
use tantivy::schema::{DateOptions, Document, OwnedValue, Schema, INDEXED, STORED, STRING};
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -22,16 +22,18 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
// The dates are passed as string in the RFC3339 format
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T12:53:50.53Z",
|
||||
"event": "pull-request"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T13:00:00.22Z",
|
||||
"event": "comment"
|
||||
@@ -58,13 +60,13 @@ fn main() -> tantivy::Result<()> {
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
|
||||
assert_eq!(count_docs.len(), 1);
|
||||
for (_score, doc_address) in count_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
let retrieved_doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||
assert!(matches!(
|
||||
retrieved_doc.get_first(occurred_at),
|
||||
Some(Value::Date(_))
|
||||
Some(OwnedValue::Date(_))
|
||||
));
|
||||
assert_eq!(
|
||||
schema.to_json(&retrieved_doc),
|
||||
retrieved_doc.to_json(&schema),
|
||||
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
||||
);
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, IndexReader};
|
||||
use tantivy::{doc, Index, IndexReader, IndexWriter};
|
||||
|
||||
// A simple helper function to fetch a single document
|
||||
// given its id from our index.
|
||||
@@ -19,7 +19,7 @@ use tantivy::{doc, Index, IndexReader};
|
||||
fn extract_doc_given_isbn(
|
||||
reader: &IndexReader,
|
||||
isbn_term: &Term,
|
||||
) -> tantivy::Result<Option<Document>> {
|
||||
) -> tantivy::Result<Option<TantivyDocument>> {
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// This is the simplest query you can think of.
|
||||
@@ -69,10 +69,10 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// Let's add a couple of documents, for the sake of the example.
|
||||
let mut old_man_doc = Document::default();
|
||||
let mut old_man_doc = TantivyDocument::default();
|
||||
old_man_doc.add_text(title, "The Old Man and the Sea");
|
||||
index_writer.add_document(doc!(
|
||||
isbn => "978-0099908401",
|
||||
@@ -94,7 +94,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Oops our frankenstein doc seems misspelled
|
||||
let frankenstein_doc_misspelled = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
schema.to_json(&frankenstein_doc_misspelled),
|
||||
frankenstein_doc_misspelled.to_json(&schema),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankentein"]}"#,
|
||||
);
|
||||
|
||||
@@ -136,7 +136,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// No more typo!
|
||||
let frankenstein_new_doc = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
schema.to_json(&frankenstein_new_doc),
|
||||
frankenstein_new_doc.to_json(&schema),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankenstein"]}"#,
|
||||
);
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
use tantivy::collector::FacetCollector;
|
||||
use tantivy::query::{AllQuery, TermQuery};
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the sake of this example
|
||||
@@ -30,7 +30,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(30_000_000)?;
|
||||
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
|
||||
@@ -12,7 +12,7 @@ use std::collections::HashSet;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::BooleanQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, DocId, Index, Score, SegmentReader};
|
||||
use tantivy::{doc, DocId, Index, IndexWriter, Score, SegmentReader};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -23,7 +23,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(30_000_000)?;
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Fried egg",
|
||||
@@ -91,11 +91,10 @@ fn main() -> tantivy::Result<()> {
|
||||
.iter()
|
||||
.map(|(_, doc_id)| {
|
||||
searcher
|
||||
.doc(*doc_id)
|
||||
.doc::<TantivyDocument>(*doc_id)
|
||||
.unwrap()
|
||||
.get_first(title)
|
||||
.unwrap()
|
||||
.as_text()
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap()
|
||||
.to_owned()
|
||||
})
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::FuzzyTermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -66,7 +66,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Here we give tantivy a budget of `50MB`.
|
||||
// Using a bigger memory_arena for the indexer may increase
|
||||
// throughput, but 50 MB is already plenty.
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// Let's index our documents!
|
||||
// We first need a handle on the title and the body field.
|
||||
@@ -123,7 +123,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// will reload the index automatically after each commit.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.try_into()?;
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
@@ -151,10 +151,10 @@ fn main() -> tantivy::Result<()> {
|
||||
assert_eq!(count, 3);
|
||||
assert_eq!(top_docs.len(), 3);
|
||||
for (score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
// Note that the score is not lower for the fuzzy hit.
|
||||
// There's an issue open for that: https://github.com/quickwit-oss/tantivy/issues/563
|
||||
println!("score {score:?} doc {}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("score {score:?} doc {}", retrieved_doc.to_json(&schema));
|
||||
// score 1.0 doc {"title":["The Diary of Muadib"]}
|
||||
//
|
||||
// score 1.0 doc {"title":["The Diary of a Young Girl"]}
|
||||
|
||||
@@ -21,7 +21,7 @@ fn main() -> tantivy::Result<()> {
|
||||
}"#;
|
||||
|
||||
// We can parse our document
|
||||
let _mice_and_men_doc = schema.parse_document(mice_and_men_doc_json)?;
|
||||
let _mice_and_men_doc = TantivyDocument::parse_json(&schema, mice_and_men_doc_json)?;
|
||||
|
||||
// Multi-valued field are allowed, they are
|
||||
// expressed in JSON by an array.
|
||||
@@ -30,7 +30,7 @@ fn main() -> tantivy::Result<()> {
|
||||
"title": ["Frankenstein", "The Modern Prometheus"],
|
||||
"year": 1818
|
||||
}"#;
|
||||
let _frankenstein_doc = schema.parse_document(frankenstein_json)?;
|
||||
let _frankenstein_doc = TantivyDocument::parse_json(&schema, frankenstein_json)?;
|
||||
|
||||
// Note that the schema is saved in your index directory.
|
||||
//
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
use tantivy::collector::Count;
|
||||
use tantivy::query::RangeQuery;
|
||||
use tantivy::schema::{Schema, INDEXED};
|
||||
use tantivy::{doc, Index, Result};
|
||||
use tantivy::{doc, Index, IndexWriter, Result};
|
||||
|
||||
fn main() -> Result<()> {
|
||||
// For the sake of simplicity, this schema will only have 1 field
|
||||
@@ -17,7 +17,7 @@ fn main() -> Result<()> {
|
||||
let index = Index::create_in_ram(schema);
|
||||
let reader = index.reader()?;
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer_with_num_threads(1, 6_000_000)?;
|
||||
for year in 1950u64..2019u64 {
|
||||
index_writer.add_document(doc!(year_field => year))?;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, STORED, STRING};
|
||||
use tantivy::Index;
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -22,20 +22,22 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// ### IPv4
|
||||
// Adding documents that contain an IPv4 address. Notice that the IP addresses are passed as
|
||||
// `String`. Since the field is of type ip, we parse the IP address from the string and store it
|
||||
// internally as IPv6.
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"ip": "192.168.0.33",
|
||||
"event_type": "login"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"ip": "192.168.0.80",
|
||||
"event_type": "checkout"
|
||||
@@ -44,7 +46,8 @@ fn main() -> tantivy::Result<()> {
|
||||
index_writer.add_document(doc)?;
|
||||
// ### IPv6
|
||||
// Adding a document that contains an IPv6 address.
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"ip": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
|
||||
"event_type": "checkout"
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, DocSet, Index, Postings, TERMINATED};
|
||||
use tantivy::{doc, DocSet, Index, IndexWriter, Postings, TERMINATED};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// We first create a schema for the sake of the
|
||||
@@ -24,7 +24,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer_with_num_threads(1, 50_000_000)?;
|
||||
index_writer.add_document(doc!(title => "The Old Man and the Sea"))?;
|
||||
index_writer.add_document(doc!(title => "Of Mice and Men"))?;
|
||||
index_writer.add_document(doc!(title => "The modern Promotheus"))?;
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, STORED, STRING, TEXT};
|
||||
use tantivy::Index;
|
||||
use tantivy::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -20,8 +20,9 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let doc = schema.parse_document(
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"timestamp": "2022-02-22T23:20:50.53Z",
|
||||
"event_type": "click",
|
||||
@@ -33,7 +34,8 @@ fn main() -> tantivy::Result<()> {
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
let doc = TantivyDocument::parse_json(
|
||||
&schema,
|
||||
r#"{
|
||||
"timestamp": "2022-02-22T23:20:51.53Z",
|
||||
"event_type": "click",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, ReloadPolicy, Result};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy, Result};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
@@ -17,7 +17,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema)?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Old Man and the Sea",
|
||||
@@ -51,7 +51,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.try_into()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
@@ -67,8 +67,12 @@ fn main() -> Result<()> {
|
||||
let mut titles = top_docs
|
||||
.into_iter()
|
||||
.map(|(_score, doc_address)| {
|
||||
let doc = searcher.doc(doc_address)?;
|
||||
let title = doc.get_first(title).unwrap().as_text().unwrap().to_owned();
|
||||
let doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||
let title = doc
|
||||
.get_first(title)
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap()
|
||||
.to_owned();
|
||||
Ok(title)
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
@@ -13,7 +13,7 @@ use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer};
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tantivy::{doc, Index, IndexWriter, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn pre_tokenize_text(text: &str) -> Vec<Token> {
|
||||
@@ -38,7 +38,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// We can create a document manually, by setting the fields
|
||||
// one by one in a Document object.
|
||||
@@ -83,7 +83,7 @@ fn main() -> tantivy::Result<()> {
|
||||
}]
|
||||
}"#;
|
||||
|
||||
let short_man_doc = schema.parse_document(short_man_json)?;
|
||||
let short_man_doc = TantivyDocument::parse_json(&schema, short_man_json)?;
|
||||
|
||||
index_writer.add_document(short_man_doc)?;
|
||||
|
||||
@@ -94,7 +94,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.reload_policy(ReloadPolicy::OnCommitWithDelay)
|
||||
.try_into()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
@@ -115,8 +115,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// Note that the tokens are not stored along with the original text
|
||||
// in the document store
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("Document: {}", schema.to_json(&retrieved_doc));
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
}
|
||||
|
||||
// In contrary to the previous query, when we search for the "man" term we
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index, Snippet, SnippetGenerator};
|
||||
use tantivy::snippet::{Snippet, SnippetGenerator};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
@@ -27,7 +28,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_dir(&index_path, schema)?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
// we'll only need one doc for this example.
|
||||
index_writer.add_document(doc!(
|
||||
@@ -54,13 +55,10 @@ fn main() -> tantivy::Result<()> {
|
||||
let snippet_generator = SnippetGenerator::create(&searcher, &*query, body)?;
|
||||
|
||||
for (score, doc_address) in top_docs {
|
||||
let doc = searcher.doc(doc_address)?;
|
||||
let doc = searcher.doc::<TantivyDocument>(doc_address)?;
|
||||
let snippet = snippet_generator.snippet_from_doc(&doc);
|
||||
println!("Document score {score}:");
|
||||
println!(
|
||||
"title: {}",
|
||||
doc.get_first(title).unwrap().as_text().unwrap()
|
||||
);
|
||||
println!("title: {}", doc.get_first(title).unwrap().as_str().unwrap());
|
||||
println!("snippet: {}", snippet.to_html());
|
||||
println!("custom highlighting: {}", highlight(snippet));
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::*;
|
||||
use tantivy::{doc, Index};
|
||||
use tantivy::{doc, Index, IndexWriter};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// this example assumes you understand the content in `basic_search`
|
||||
@@ -60,7 +60,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
index.tokenizers().register("stoppy", tokenizer);
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer: IndexWriter = index.writer(50_000_000)?;
|
||||
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
@@ -105,9 +105,9 @@ fn main() -> tantivy::Result<()> {
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
|
||||
for (score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
let retrieved_doc: TantivyDocument = searcher.doc(doc_address)?;
|
||||
println!("\n==\nDocument score {score}:");
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
println!("{}", retrieved_doc.to_json(&schema));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -6,8 +6,8 @@ use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, TEXT};
|
||||
use tantivy::{
|
||||
doc, DocAddress, DocId, Index, Opstamp, Searcher, SearcherGeneration, SegmentId, SegmentReader,
|
||||
Warmer,
|
||||
doc, DocAddress, DocId, Index, IndexWriter, Opstamp, Searcher, SearcherGeneration, SegmentId,
|
||||
SegmentReader, Warmer,
|
||||
};
|
||||
|
||||
// This example shows how warmers can be used to
|
||||
@@ -143,7 +143,7 @@ fn main() -> tantivy::Result<()> {
|
||||
const SNEAKERS: ProductId = 23222;
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
let mut writer: IndexWriter = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
writer.add_document(doc!(product_id=>OLIVE_OIL, text=>"cooking olive oil from greece"))?;
|
||||
writer.add_document(doc!(product_id=>GLOVES, text=>"kitchen gloves, perfect for cooking"))?;
|
||||
writer.add_document(doc!(product_id=>SNEAKERS, text=>"uber sweet sneakers"))?;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
name = "ownedbytes"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
edition = "2021"
|
||||
description = "Expose data as static slice"
|
||||
license = "MIT"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::convert::TryInto;
|
||||
use std::ops::{Deref, Range};
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.21.0"
|
||||
version = "0.22.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
|
||||
@@ -81,8 +81,8 @@ where
|
||||
T: InputTakeAtPosition + Clone,
|
||||
<T as InputTakeAtPosition>::Item: AsChar + Clone,
|
||||
{
|
||||
opt_i(nom::character::complete::space0)(input)
|
||||
.map(|(left, (spaces, errors))| (left, (spaces.expect("space0 can't fail"), errors)))
|
||||
opt_i(nom::character::complete::multispace0)(input)
|
||||
.map(|(left, (spaces, errors))| (left, (spaces.expect("multispace0 can't fail"), errors)))
|
||||
}
|
||||
|
||||
pub(crate) fn space1_infallible<T>(input: T) -> JResult<T, Option<T>>
|
||||
@@ -90,7 +90,7 @@ where
|
||||
T: InputTakeAtPosition + Clone + InputLength,
|
||||
<T as InputTakeAtPosition>::Item: AsChar + Clone,
|
||||
{
|
||||
opt_i(nom::character::complete::space1)(input).map(|(left, (spaces, mut errors))| {
|
||||
opt_i(nom::character::complete::multispace1)(input).map(|(left, (spaces, mut errors))| {
|
||||
if spaces.is_none() {
|
||||
errors.push(LenientErrorInternal {
|
||||
pos: left.input_len(),
|
||||
|
||||
@@ -3,11 +3,11 @@ use std::iter::once;
|
||||
use nom::branch::alt;
|
||||
use nom::bytes::complete::tag;
|
||||
use nom::character::complete::{
|
||||
anychar, char, digit1, none_of, one_of, satisfy, space0, space1, u32,
|
||||
anychar, char, digit1, multispace0, multispace1, none_of, one_of, satisfy, u32,
|
||||
};
|
||||
use nom::combinator::{eof, map, map_res, opt, peek, recognize, value, verify};
|
||||
use nom::error::{Error, ErrorKind};
|
||||
use nom::multi::{many0, many1, separated_list0, separated_list1};
|
||||
use nom::multi::{many0, many1, separated_list0};
|
||||
use nom::sequence::{delimited, preceded, separated_pair, terminated, tuple};
|
||||
use nom::IResult;
|
||||
|
||||
@@ -65,7 +65,7 @@ fn word_infallible(delimiter: &str) -> impl Fn(&str) -> JResult<&str, Option<&st
|
||||
|inp| {
|
||||
opt_i_err(
|
||||
preceded(
|
||||
space0,
|
||||
multispace0,
|
||||
recognize(many1(satisfy(|c| {
|
||||
!c.is_whitespace() && !delimiter.contains(c)
|
||||
}))),
|
||||
@@ -185,7 +185,7 @@ fn term_or_phrase(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
fn term_or_phrase_infallible(inp: &str) -> JResult<&str, Option<UserInputLeaf>> {
|
||||
map(
|
||||
// ~* for slop/prefix, ) inside group or ast tree, ^ if boost
|
||||
tuple_infallible((simple_term_infallible("*)^"), slop_or_prefix_val)),
|
||||
tuple_infallible((simple_term_infallible(")^"), slop_or_prefix_val)),
|
||||
|((delimiter_phrase, (slop, prefix)), errors)| {
|
||||
let leaf = if let Some((delimiter, phrase)) = delimiter_phrase {
|
||||
Some(
|
||||
@@ -225,10 +225,10 @@ fn term_group(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
|
||||
map(
|
||||
tuple((
|
||||
terminated(field_name, space0),
|
||||
terminated(field_name, multispace0),
|
||||
delimited(
|
||||
tuple((char('('), space0)),
|
||||
separated_list0(space1, tuple((opt(occur_symbol), term_or_phrase))),
|
||||
tuple((char('('), multispace0)),
|
||||
separated_list0(multispace1, tuple((opt(occur_symbol), term_or_phrase))),
|
||||
char(')'),
|
||||
),
|
||||
)),
|
||||
@@ -250,7 +250,7 @@ fn term_group_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
(),
|
||||
peek(tuple((
|
||||
field_name,
|
||||
space0,
|
||||
multispace0,
|
||||
char('('), // when we are here, we know it can't be anything but a term group
|
||||
))),
|
||||
)(inp)
|
||||
@@ -259,7 +259,7 @@ fn term_group_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
|
||||
fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
let (mut inp, (field_name, _, _, _)) =
|
||||
tuple((field_name, space0, char('('), space0))(inp).expect("precondition failed");
|
||||
tuple((field_name, multispace0, char('('), multispace0))(inp).expect("precondition failed");
|
||||
|
||||
let mut terms = Vec::new();
|
||||
let mut errs = Vec::new();
|
||||
@@ -305,7 +305,7 @@ fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
UserInputLeaf::Exists {
|
||||
field: String::new(),
|
||||
},
|
||||
tuple((space0, char('*'))),
|
||||
tuple((multispace0, char('*'))),
|
||||
)(inp)
|
||||
}
|
||||
|
||||
@@ -314,7 +314,7 @@ fn exists_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
(),
|
||||
peek(tuple((
|
||||
field_name,
|
||||
space0,
|
||||
multispace0,
|
||||
char('*'), // when we are here, we know it can't be anything but a exists
|
||||
))),
|
||||
)(inp)
|
||||
@@ -323,7 +323,7 @@ fn exists_precond(inp: &str) -> IResult<&str, (), ()> {
|
||||
|
||||
fn exists_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
let (inp, (field_name, _, _)) =
|
||||
tuple((field_name, space0, char('*')))(inp).expect("precondition failed");
|
||||
tuple((field_name, multispace0, char('*')))(inp).expect("precondition failed");
|
||||
|
||||
let exists = UserInputLeaf::Exists { field: field_name }.into();
|
||||
Ok((inp, (exists, Vec::new())))
|
||||
@@ -349,7 +349,7 @@ fn literal_no_group_infallible(inp: &str) -> JResult<&str, Option<UserInputAst>>
|
||||
alt_infallible(
|
||||
(
|
||||
(
|
||||
value((), tuple((tag("IN"), space0, char('[')))),
|
||||
value((), tuple((tag("IN"), multispace0, char('[')))),
|
||||
map(set_infallible, |(set, errs)| (Some(set), errs)),
|
||||
),
|
||||
(
|
||||
@@ -430,8 +430,8 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
// check for unbounded range in the form of <5, <=10, >5, >=5
|
||||
let elastic_unbounded_range = map(
|
||||
tuple((
|
||||
preceded(space0, alt((tag(">="), tag("<="), tag("<"), tag(">")))),
|
||||
preceded(space0, range_term_val()),
|
||||
preceded(multispace0, alt((tag(">="), tag("<="), tag("<"), tag(">")))),
|
||||
preceded(multispace0, range_term_val()),
|
||||
)),
|
||||
|(comparison_sign, bound)| match comparison_sign {
|
||||
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
|
||||
@@ -444,7 +444,7 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
);
|
||||
|
||||
let lower_bound = map(
|
||||
separated_pair(one_of("{["), space0, range_term_val()),
|
||||
separated_pair(one_of("{["), multispace0, range_term_val()),
|
||||
|(boundary_char, lower_bound)| {
|
||||
if lower_bound == "*" {
|
||||
UserInputBound::Unbounded
|
||||
@@ -457,7 +457,7 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
);
|
||||
|
||||
let upper_bound = map(
|
||||
separated_pair(range_term_val(), space0, one_of("}]")),
|
||||
separated_pair(range_term_val(), multispace0, one_of("}]")),
|
||||
|(upper_bound, boundary_char)| {
|
||||
if upper_bound == "*" {
|
||||
UserInputBound::Unbounded
|
||||
@@ -469,8 +469,11 @@ fn range(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
},
|
||||
);
|
||||
|
||||
let lower_to_upper =
|
||||
separated_pair(lower_bound, tuple((space1, tag("TO"), space1)), upper_bound);
|
||||
let lower_to_upper = separated_pair(
|
||||
lower_bound,
|
||||
tuple((multispace1, tag("TO"), multispace1)),
|
||||
upper_bound,
|
||||
);
|
||||
|
||||
map(
|
||||
alt((elastic_unbounded_range, lower_to_upper)),
|
||||
@@ -490,13 +493,16 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
word_infallible("]}"),
|
||||
space1_infallible,
|
||||
opt_i_err(
|
||||
terminated(tag("TO"), alt((value((), space1), value((), eof)))),
|
||||
terminated(tag("TO"), alt((value((), multispace1), value((), eof)))),
|
||||
"missing keyword TO",
|
||||
),
|
||||
word_infallible("]}"),
|
||||
opt_i_err(one_of("]}"), "missing range delimiter"),
|
||||
)),
|
||||
|((lower_bound_kind, _space0, lower, _space1, to, upper, upper_bound_kind), errs)| {
|
||||
|(
|
||||
(lower_bound_kind, _multispace0, lower, _multispace1, to, upper, upper_bound_kind),
|
||||
errs,
|
||||
)| {
|
||||
let lower_bound = match (lower_bound_kind, lower) {
|
||||
(_, Some("*")) => UserInputBound::Unbounded,
|
||||
(_, None) => UserInputBound::Unbounded,
|
||||
@@ -596,10 +602,10 @@ fn range_infallible(inp: &str) -> JResult<&str, UserInputLeaf> {
|
||||
fn set(inp: &str) -> IResult<&str, UserInputLeaf> {
|
||||
map(
|
||||
preceded(
|
||||
tuple((space0, tag("IN"), space1)),
|
||||
tuple((multispace0, tag("IN"), multispace1)),
|
||||
delimited(
|
||||
tuple((char('['), space0)),
|
||||
separated_list0(space1, map(simple_term, |(_, term)| term)),
|
||||
tuple((char('['), multispace0)),
|
||||
separated_list0(multispace1, map(simple_term, |(_, term)| term)),
|
||||
char(']'),
|
||||
),
|
||||
),
|
||||
@@ -667,7 +673,7 @@ fn leaf(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
alt((
|
||||
delimited(char('('), ast, char(')')),
|
||||
map(char('*'), |_| UserInputAst::from(UserInputLeaf::All)),
|
||||
map(preceded(tuple((tag("NOT"), space1)), leaf), negate),
|
||||
map(preceded(tuple((tag("NOT"), multispace1)), leaf), negate),
|
||||
literal,
|
||||
))(inp)
|
||||
}
|
||||
@@ -780,27 +786,23 @@ fn binary_operand(inp: &str) -> IResult<&str, BinaryOperand> {
|
||||
}
|
||||
|
||||
fn aggregate_binary_expressions(
|
||||
left: UserInputAst,
|
||||
others: Vec<(BinaryOperand, UserInputAst)>,
|
||||
) -> UserInputAst {
|
||||
let mut dnf: Vec<Vec<UserInputAst>> = vec![vec![left]];
|
||||
for (operator, operand_ast) in others {
|
||||
match operator {
|
||||
BinaryOperand::And => {
|
||||
if let Some(last) = dnf.last_mut() {
|
||||
last.push(operand_ast);
|
||||
}
|
||||
}
|
||||
BinaryOperand::Or => {
|
||||
dnf.push(vec![operand_ast]);
|
||||
}
|
||||
}
|
||||
}
|
||||
if dnf.len() == 1 {
|
||||
UserInputAst::and(dnf.into_iter().next().unwrap()) //< safe
|
||||
left: (Option<Occur>, UserInputAst),
|
||||
others: Vec<(Option<BinaryOperand>, Option<Occur>, UserInputAst)>,
|
||||
) -> Result<UserInputAst, LenientErrorInternal> {
|
||||
let mut leafs = Vec::with_capacity(others.len() + 1);
|
||||
leafs.push((None, left.0, Some(left.1)));
|
||||
leafs.extend(
|
||||
others
|
||||
.into_iter()
|
||||
.map(|(operand, occur, ast)| (operand, occur, Some(ast))),
|
||||
);
|
||||
// the parameters we pass should statically guarantee we can't get errors
|
||||
// (no prefix BinaryOperand is provided)
|
||||
let (res, mut errors) = aggregate_infallible_expressions(leafs);
|
||||
if errors.is_empty() {
|
||||
Ok(res)
|
||||
} else {
|
||||
let conjunctions = dnf.into_iter().map(UserInputAst::and).collect();
|
||||
UserInputAst::or(conjunctions)
|
||||
Err(errors.swap_remove(0))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -816,30 +818,10 @@ fn aggregate_infallible_expressions(
|
||||
return (UserInputAst::empty_query(), err);
|
||||
}
|
||||
|
||||
let use_operand = leafs.iter().any(|(operand, _, _)| operand.is_some());
|
||||
let all_operand = leafs
|
||||
.iter()
|
||||
.skip(1)
|
||||
.all(|(operand, _, _)| operand.is_some());
|
||||
let early_operand = leafs
|
||||
.iter()
|
||||
.take(1)
|
||||
.all(|(operand, _, _)| operand.is_some());
|
||||
let use_occur = leafs.iter().any(|(_, occur, _)| occur.is_some());
|
||||
|
||||
if use_operand && use_occur {
|
||||
err.push(LenientErrorInternal {
|
||||
pos: 0,
|
||||
message: "Use of mixed occur and boolean operator".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if use_operand && !all_operand {
|
||||
err.push(LenientErrorInternal {
|
||||
pos: 0,
|
||||
message: "Missing boolean operator".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if early_operand {
|
||||
err.push(LenientErrorInternal {
|
||||
@@ -866,7 +848,15 @@ fn aggregate_infallible_expressions(
|
||||
Some(BinaryOperand::And) => Some(Occur::Must),
|
||||
_ => Some(Occur::Should),
|
||||
};
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())]);
|
||||
if occur == &Some(Occur::MustNot) && default_op == Some(Occur::Should) {
|
||||
// if occur is MustNot *and* operation is OR, we synthetize a ShouldNot
|
||||
clauses.push(vec![(
|
||||
Some(Occur::Should),
|
||||
ast.clone().unary(Occur::MustNot),
|
||||
)])
|
||||
} else {
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())]);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
let default_op = match next_operator {
|
||||
@@ -874,7 +864,15 @@ fn aggregate_infallible_expressions(
|
||||
Some(BinaryOperand::Or) => Some(Occur::Should),
|
||||
None => None,
|
||||
};
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())])
|
||||
if occur == &Some(Occur::MustNot) && default_op == Some(Occur::Should) {
|
||||
// if occur is MustNot *and* operation is OR, we synthetize a ShouldNot
|
||||
clauses.push(vec![(
|
||||
Some(Occur::Should),
|
||||
ast.clone().unary(Occur::MustNot),
|
||||
)])
|
||||
} else {
|
||||
clauses.push(vec![(occur.or(default_op), ast.clone())])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -891,7 +889,12 @@ fn aggregate_infallible_expressions(
|
||||
}
|
||||
}
|
||||
Some(BinaryOperand::Or) => {
|
||||
clauses.push(vec![(last_occur.or(Some(Occur::Should)), last_ast)]);
|
||||
if last_occur == Some(Occur::MustNot) {
|
||||
// if occur is MustNot *and* operation is OR, we synthetize a ShouldNot
|
||||
clauses.push(vec![(Some(Occur::Should), last_ast.unary(Occur::MustNot))]);
|
||||
} else {
|
||||
clauses.push(vec![(last_occur.or(Some(Occur::Should)), last_ast)]);
|
||||
}
|
||||
}
|
||||
None => clauses.push(vec![(last_occur, last_ast)]),
|
||||
}
|
||||
@@ -917,35 +920,29 @@ fn aggregate_infallible_expressions(
|
||||
}
|
||||
}
|
||||
|
||||
fn operand_leaf(inp: &str) -> IResult<&str, (BinaryOperand, UserInputAst)> {
|
||||
tuple((
|
||||
terminated(binary_operand, space0),
|
||||
terminated(boosted_leaf, space0),
|
||||
))(inp)
|
||||
fn operand_leaf(inp: &str) -> IResult<&str, (Option<BinaryOperand>, Option<Occur>, UserInputAst)> {
|
||||
map(
|
||||
tuple((
|
||||
terminated(opt(binary_operand), multispace0),
|
||||
terminated(occur_leaf, multispace0),
|
||||
)),
|
||||
|(operand, (occur, ast))| (operand, occur, ast),
|
||||
)(inp)
|
||||
}
|
||||
|
||||
fn ast(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
let boolean_expr = map(
|
||||
separated_pair(boosted_leaf, space1, many1(operand_leaf)),
|
||||
let boolean_expr = map_res(
|
||||
separated_pair(occur_leaf, multispace1, many1(operand_leaf)),
|
||||
|(left, right)| aggregate_binary_expressions(left, right),
|
||||
);
|
||||
let whitespace_separated_leaves = map(separated_list1(space1, occur_leaf), |subqueries| {
|
||||
if subqueries.len() == 1 {
|
||||
let (occur_opt, ast) = subqueries.into_iter().next().unwrap();
|
||||
match occur_opt.unwrap_or(Occur::Should) {
|
||||
Occur::Must | Occur::Should => ast,
|
||||
Occur::MustNot => UserInputAst::Clause(vec![(Some(Occur::MustNot), ast)]),
|
||||
}
|
||||
let single_leaf = map(occur_leaf, |(occur, ast)| {
|
||||
if occur == Some(Occur::MustNot) {
|
||||
ast.unary(Occur::MustNot)
|
||||
} else {
|
||||
UserInputAst::Clause(subqueries.into_iter().collect())
|
||||
ast
|
||||
}
|
||||
});
|
||||
|
||||
delimited(
|
||||
space0,
|
||||
alt((boolean_expr, whitespace_separated_leaves)),
|
||||
space0,
|
||||
)(inp)
|
||||
delimited(multispace0, alt((boolean_expr, single_leaf)), multispace0)(inp)
|
||||
}
|
||||
|
||||
fn ast_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
@@ -969,7 +966,7 @@ fn ast_infallible(inp: &str) -> JResult<&str, UserInputAst> {
|
||||
}
|
||||
|
||||
pub fn parse_to_ast(inp: &str) -> IResult<&str, UserInputAst> {
|
||||
map(delimited(space0, opt(ast), eof), |opt_ast| {
|
||||
map(delimited(multispace0, opt(ast), eof), |opt_ast| {
|
||||
rewrite_ast(opt_ast.unwrap_or_else(UserInputAst::empty_query))
|
||||
})(inp)
|
||||
}
|
||||
@@ -1113,6 +1110,9 @@ mod test {
|
||||
test_parse_query_to_ast_helper("'www-form-encoded'", "'www-form-encoded'");
|
||||
test_parse_query_to_ast_helper("www-form-encoded", "www-form-encoded");
|
||||
test_parse_query_to_ast_helper("www-form-encoded", "www-form-encoded");
|
||||
test_parse_query_to_ast_helper("mr james bo?d", "(*mr *james *bo?d)");
|
||||
test_parse_query_to_ast_helper("mr james bo*", "(*mr *james *bo*)");
|
||||
test_parse_query_to_ast_helper("mr james b*d", "(*mr *james *b*d)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1142,24 +1142,43 @@ mod test {
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_binary_op() {
|
||||
test_parse_query_to_ast_helper("a AND b", "(+a +b)");
|
||||
test_parse_query_to_ast_helper("a\nAND b", "(+a +b)");
|
||||
test_parse_query_to_ast_helper("a OR b", "(?a ?b)");
|
||||
test_parse_query_to_ast_helper("a OR b AND c", "(?a ?(+b +c))");
|
||||
test_parse_query_to_ast_helper("a AND b AND c", "(+a +b +c)");
|
||||
test_is_parse_err("a OR b aaa", "(?a ?b *aaa)");
|
||||
test_is_parse_err("a AND b aaa", "(?(+a +b) *aaa)");
|
||||
test_is_parse_err("aaa a OR b ", "(*aaa ?a ?b)");
|
||||
test_is_parse_err("aaa ccc a OR b ", "(*aaa *ccc ?a ?b)");
|
||||
test_is_parse_err("aaa a AND b ", "(*aaa ?(+a +b))");
|
||||
test_is_parse_err("aaa ccc a AND b ", "(*aaa *ccc ?(+a +b))");
|
||||
test_parse_query_to_ast_helper("a OR b aaa", "(?a ?b *aaa)");
|
||||
test_parse_query_to_ast_helper("a AND b aaa", "(?(+a +b) *aaa)");
|
||||
test_parse_query_to_ast_helper("aaa a OR b ", "(*aaa ?a ?b)");
|
||||
test_parse_query_to_ast_helper("aaa ccc a OR b ", "(*aaa *ccc ?a ?b)");
|
||||
test_parse_query_to_ast_helper("aaa a AND b ", "(*aaa ?(+a +b))");
|
||||
test_parse_query_to_ast_helper("aaa ccc a AND b ", "(*aaa *ccc ?(+a +b))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_mixed_bool_occur() {
|
||||
test_is_parse_err("a OR b +aaa", "(?a ?b +aaa)");
|
||||
test_is_parse_err("a AND b -aaa", "(?(+a +b) -aaa)");
|
||||
test_is_parse_err("+a OR +b aaa", "(+a +b *aaa)");
|
||||
test_is_parse_err("-a AND -b aaa", "(?(-a -b) *aaa)");
|
||||
test_is_parse_err("-aaa +ccc -a OR b ", "(-aaa +ccc -a ?b)");
|
||||
test_parse_query_to_ast_helper("+a OR +b", "(+a +b)");
|
||||
|
||||
test_parse_query_to_ast_helper("a AND -b", "(+a -b)");
|
||||
test_parse_query_to_ast_helper("-a AND b", "(-a +b)");
|
||||
test_parse_query_to_ast_helper("a AND NOT b", "(+a +(-b))");
|
||||
test_parse_query_to_ast_helper("NOT a AND b", "(+(-a) +b)");
|
||||
|
||||
test_parse_query_to_ast_helper("a AND NOT b AND c", "(+a +(-b) +c)");
|
||||
test_parse_query_to_ast_helper("a AND -b AND c", "(+a -b +c)");
|
||||
|
||||
test_parse_query_to_ast_helper("a OR -b", "(?a ?(-b))");
|
||||
test_parse_query_to_ast_helper("-a OR b", "(?(-a) ?b)");
|
||||
test_parse_query_to_ast_helper("a OR NOT b", "(?a ?(-b))");
|
||||
test_parse_query_to_ast_helper("NOT a OR b", "(?(-a) ?b)");
|
||||
|
||||
test_parse_query_to_ast_helper("a OR NOT b OR c", "(?a ?(-b) ?c)");
|
||||
test_parse_query_to_ast_helper("a OR -b OR c", "(?a ?(-b) ?c)");
|
||||
|
||||
test_parse_query_to_ast_helper("a OR b +aaa", "(?a ?b +aaa)");
|
||||
test_parse_query_to_ast_helper("a AND b -aaa", "(?(+a +b) -aaa)");
|
||||
test_parse_query_to_ast_helper("+a OR +b aaa", "(+a +b *aaa)");
|
||||
test_parse_query_to_ast_helper("-a AND -b aaa", "(?(-a -b) *aaa)");
|
||||
test_parse_query_to_ast_helper("-aaa +ccc -a OR b ", "(-aaa +ccc ?(-a) ?b)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -48,7 +48,7 @@ mod bench {
|
||||
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
|
||||
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
|
||||
let index = Index::create_from_tempdir(schema_builder.build())?;
|
||||
let few_terms_data = vec!["INFO", "ERROR", "WARN", "DEBUG"];
|
||||
let few_terms_data = ["INFO", "ERROR", "WARN", "DEBUG"];
|
||||
|
||||
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
|
||||
|
||||
@@ -85,7 +85,7 @@ mod bench {
|
||||
if cardinality == Cardinality::Sparse {
|
||||
doc_with_value /= 20;
|
||||
}
|
||||
let val_max = 1_000_000.0;
|
||||
let _val_max = 1_000_000.0;
|
||||
for _ in 0..doc_with_value {
|
||||
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
||||
let json = if rng.gen_bool(0.1) {
|
||||
@@ -290,6 +290,41 @@ mod bench {
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many_with_top_hits_agg);
|
||||
|
||||
fn bench_aggregation_terms_many_with_top_hits_agg_card(
|
||||
b: &mut Bencher,
|
||||
cardinality: Cardinality,
|
||||
) {
|
||||
let index = get_test_index_bench(cardinality).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_texts": {
|
||||
"terms": { "field": "text_many_terms" },
|
||||
"aggs": {
|
||||
"top_hits": { "top_hits":
|
||||
{
|
||||
"sort": [
|
||||
{ "score": "desc" }
|
||||
],
|
||||
"size": 2,
|
||||
"doc_value_fields": ["score_f64"]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = get_collector(agg_req);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
});
|
||||
}
|
||||
|
||||
bench_all_cardinalities!(bench_aggregation_terms_many_with_sub_agg);
|
||||
|
||||
fn bench_aggregation_terms_many_with_sub_agg_card(b: &mut Bencher, cardinality: Cardinality) {
|
||||
|
||||
@@ -73,9 +73,9 @@ impl AggregationLimits {
|
||||
/// Create a new ResourceLimitGuard, that will release the memory when dropped.
|
||||
pub fn new_guard(&self) -> ResourceLimitGuard {
|
||||
ResourceLimitGuard {
|
||||
/// The counter which is shared between the aggregations for one request.
|
||||
// The counter which is shared between the aggregations for one request.
|
||||
memory_consumption: Arc::clone(&self.memory_consumption),
|
||||
/// The memory_limit in bytes
|
||||
// The memory_limit in bytes
|
||||
memory_limit: self.memory_limit,
|
||||
allocated_with_the_guard: 0,
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ use super::bucket::{
|
||||
};
|
||||
use super::metric::{
|
||||
AverageAggregation, CountAggregation, MaxAggregation, MinAggregation,
|
||||
PercentilesAggregationReq, StatsAggregation, SumAggregation,
|
||||
PercentilesAggregationReq, StatsAggregation, SumAggregation, TopHitsAggregation,
|
||||
};
|
||||
|
||||
/// The top-level aggregation request structure, which contains [`Aggregation`] and their user
|
||||
@@ -93,7 +93,12 @@ impl Aggregation {
|
||||
}
|
||||
|
||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||
fast_field_names.insert(self.agg.get_fast_field_name().to_string());
|
||||
fast_field_names.extend(
|
||||
self.agg
|
||||
.get_fast_field_names()
|
||||
.iter()
|
||||
.map(|s| s.to_string()),
|
||||
);
|
||||
fast_field_names.extend(get_fast_field_names(&self.sub_aggregation));
|
||||
}
|
||||
}
|
||||
@@ -147,23 +152,27 @@ pub enum AggregationVariants {
|
||||
/// Computes the sum of the extracted values.
|
||||
#[serde(rename = "percentiles")]
|
||||
Percentiles(PercentilesAggregationReq),
|
||||
/// Finds the top k values matching some order
|
||||
#[serde(rename = "top_hits")]
|
||||
TopHits(TopHitsAggregation),
|
||||
}
|
||||
|
||||
impl AggregationVariants {
|
||||
/// Returns the name of the field used by the aggregation.
|
||||
pub fn get_fast_field_name(&self) -> &str {
|
||||
/// Returns the name of the fields used by the aggregation.
|
||||
pub fn get_fast_field_names(&self) -> Vec<&str> {
|
||||
match self {
|
||||
AggregationVariants::Terms(terms) => terms.field.as_str(),
|
||||
AggregationVariants::Range(range) => range.field.as_str(),
|
||||
AggregationVariants::Histogram(histogram) => histogram.field.as_str(),
|
||||
AggregationVariants::DateHistogram(histogram) => histogram.field.as_str(),
|
||||
AggregationVariants::Average(avg) => avg.field_name(),
|
||||
AggregationVariants::Count(count) => count.field_name(),
|
||||
AggregationVariants::Max(max) => max.field_name(),
|
||||
AggregationVariants::Min(min) => min.field_name(),
|
||||
AggregationVariants::Stats(stats) => stats.field_name(),
|
||||
AggregationVariants::Sum(sum) => sum.field_name(),
|
||||
AggregationVariants::Percentiles(per) => per.field_name(),
|
||||
AggregationVariants::Terms(terms) => vec![terms.field.as_str()],
|
||||
AggregationVariants::Range(range) => vec![range.field.as_str()],
|
||||
AggregationVariants::Histogram(histogram) => vec![histogram.field.as_str()],
|
||||
AggregationVariants::DateHistogram(histogram) => vec![histogram.field.as_str()],
|
||||
AggregationVariants::Average(avg) => vec![avg.field_name()],
|
||||
AggregationVariants::Count(count) => vec![count.field_name()],
|
||||
AggregationVariants::Max(max) => vec![max.field_name()],
|
||||
AggregationVariants::Min(min) => vec![min.field_name()],
|
||||
AggregationVariants::Stats(stats) => vec![stats.field_name()],
|
||||
AggregationVariants::Sum(sum) => vec![sum.field_name()],
|
||||
AggregationVariants::Percentiles(per) => vec![per.field_name()],
|
||||
AggregationVariants::TopHits(top_hits) => top_hits.field_names(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
//! This will enhance the request tree with access to the fastfield and metadata.
|
||||
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType, StrColumn};
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
|
||||
use columnar::{Column, ColumnBlockAccessor, ColumnType, DynamicColumn, StrColumn};
|
||||
|
||||
use super::agg_limits::ResourceLimitGuard;
|
||||
use super::agg_req::{Aggregation, AggregationVariants, Aggregations};
|
||||
@@ -14,7 +17,7 @@ use super::metric::{
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::VecWithNames;
|
||||
use crate::aggregation::{f64_to_fastfield_u64, Key};
|
||||
use crate::SegmentReader;
|
||||
use crate::{SegmentOrdinal, SegmentReader};
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct AggregationsWithAccessor {
|
||||
@@ -32,6 +35,7 @@ impl AggregationsWithAccessor {
|
||||
}
|
||||
|
||||
pub struct AggregationWithAccessor {
|
||||
pub(crate) segment_ordinal: SegmentOrdinal,
|
||||
/// In general there can be buckets without fast field access, e.g. buckets that are created
|
||||
/// based on search terms. That is not that case currently, but eventually this needs to be
|
||||
/// Option or moved.
|
||||
@@ -44,10 +48,16 @@ pub struct AggregationWithAccessor {
|
||||
pub(crate) limits: ResourceLimitGuard,
|
||||
pub(crate) column_block_accessor: ColumnBlockAccessor<u64>,
|
||||
/// Used for missing term aggregation, which checks all columns for existence.
|
||||
/// And also for `top_hits` aggregation, which may sort on multiple fields.
|
||||
/// By convention the missing aggregation is chosen, when this property is set
|
||||
/// (instead bein set in `agg`).
|
||||
/// If this needs to used by other aggregations, we need to refactor this.
|
||||
pub(crate) accessors: Vec<Column<u64>>,
|
||||
// NOTE: we can make all other aggregations use this instead of the `accessor` and `field_type`
|
||||
// (making them obsolete) But will it have a performance impact?
|
||||
pub(crate) accessors: Vec<(Column<u64>, ColumnType)>,
|
||||
/// Map field names to all associated column accessors.
|
||||
/// This field is used for `docvalue_fields`, which is currently only supported for `top_hits`.
|
||||
pub(crate) value_accessors: HashMap<String, Vec<DynamicColumn>>,
|
||||
pub(crate) agg: Aggregation,
|
||||
}
|
||||
|
||||
@@ -57,19 +67,55 @@ impl AggregationWithAccessor {
|
||||
agg: &Aggregation,
|
||||
sub_aggregation: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: AggregationLimits,
|
||||
) -> crate::Result<Vec<AggregationWithAccessor>> {
|
||||
let add_agg_with_accessor = |accessor: Column<u64>,
|
||||
let mut agg = agg.clone();
|
||||
|
||||
let add_agg_with_accessor = |agg: &Aggregation,
|
||||
accessor: Column<u64>,
|
||||
column_type: ColumnType,
|
||||
aggs: &mut Vec<AggregationWithAccessor>|
|
||||
-> crate::Result<()> {
|
||||
let res = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
accessor,
|
||||
accessors: Vec::new(),
|
||||
accessors: Default::default(),
|
||||
value_accessors: Default::default(),
|
||||
field_type: column_type,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
limits: limits.new_guard(),
|
||||
missing_value_for_accessor: None,
|
||||
str_dict_column: None,
|
||||
column_block_accessor: Default::default(),
|
||||
};
|
||||
aggs.push(res);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let add_agg_with_accessors = |agg: &Aggregation,
|
||||
accessors: Vec<(Column<u64>, ColumnType)>,
|
||||
aggs: &mut Vec<AggregationWithAccessor>,
|
||||
value_accessors: HashMap<String, Vec<DynamicColumn>>|
|
||||
-> crate::Result<()> {
|
||||
let (accessor, field_type) = accessors.first().expect("at least one accessor");
|
||||
let res = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
// TODO: We should do away with the `accessor` field altogether
|
||||
accessor: accessor.clone(),
|
||||
value_accessors,
|
||||
field_type: *field_type,
|
||||
accessors,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
@@ -84,32 +130,36 @@ impl AggregationWithAccessor {
|
||||
|
||||
let mut res: Vec<AggregationWithAccessor> = Vec::new();
|
||||
use AggregationVariants::*;
|
||||
match &agg.agg {
|
||||
|
||||
match agg.agg {
|
||||
Range(RangeAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Histogram(HistogramAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
DateHistogram(DateHistogramAggregationReq {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
// Only DateTime is supported for DateHistogram
|
||||
get_ff_reader(reader, field_name, Some(&[ColumnType::DateTime]))?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Terms(TermsAggregation {
|
||||
field: field_name,
|
||||
missing,
|
||||
field: ref field_name,
|
||||
ref missing,
|
||||
..
|
||||
}) => {
|
||||
let str_dict_column = reader.fast_fields().str(field_name)?;
|
||||
@@ -119,9 +169,9 @@ impl AggregationWithAccessor {
|
||||
ColumnType::F64,
|
||||
ColumnType::Str,
|
||||
ColumnType::DateTime,
|
||||
ColumnType::Bool,
|
||||
ColumnType::IpAddr,
|
||||
// ColumnType::Bytes Unsupported
|
||||
// ColumnType::Bool Unsupported
|
||||
// ColumnType::IpAddr Unsupported
|
||||
];
|
||||
|
||||
// In case the column is empty we want the shim column to match the missing type
|
||||
@@ -162,24 +212,11 @@ impl AggregationWithAccessor {
|
||||
let column_and_types =
|
||||
get_all_ff_reader_or_empty(reader, field_name, None, fallback_type)?;
|
||||
|
||||
let accessors: Vec<Column> =
|
||||
column_and_types.iter().map(|(a, _)| a.clone()).collect();
|
||||
let agg_wit_acc = AggregationWithAccessor {
|
||||
missing_value_for_accessor: None,
|
||||
accessor: accessors[0].clone(),
|
||||
accessors,
|
||||
field_type: ColumnType::U64,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
str_dict_column: str_dict_column.clone(),
|
||||
limits: limits.new_guard(),
|
||||
column_block_accessor: Default::default(),
|
||||
};
|
||||
res.push(agg_wit_acc);
|
||||
let accessors = column_and_types
|
||||
.iter()
|
||||
.map(|c_t| (c_t.0.clone(), c_t.1))
|
||||
.collect();
|
||||
add_agg_with_accessors(&agg, accessors, &mut res, Default::default())?;
|
||||
}
|
||||
|
||||
for (accessor, column_type) in column_and_types {
|
||||
@@ -189,21 +226,25 @@ impl AggregationWithAccessor {
|
||||
missing.clone()
|
||||
};
|
||||
|
||||
let missing_value_for_accessor =
|
||||
if let Some(missing) = missing_value_term_agg.as_ref() {
|
||||
get_missing_val(column_type, missing, agg.agg.get_fast_field_name())?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let missing_value_for_accessor = if let Some(missing) =
|
||||
missing_value_term_agg.as_ref()
|
||||
{
|
||||
get_missing_val(column_type, missing, agg.agg.get_fast_field_names()[0])?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let agg = AggregationWithAccessor {
|
||||
segment_ordinal,
|
||||
missing_value_for_accessor,
|
||||
accessor,
|
||||
accessors: Vec::new(),
|
||||
accessors: Default::default(),
|
||||
value_accessors: Default::default(),
|
||||
field_type: column_type,
|
||||
sub_aggregation: get_aggs_with_segment_accessor_and_validate(
|
||||
sub_aggregation,
|
||||
reader,
|
||||
segment_ordinal,
|
||||
&limits,
|
||||
)?,
|
||||
agg: agg.clone(),
|
||||
@@ -215,34 +256,63 @@ impl AggregationWithAccessor {
|
||||
}
|
||||
}
|
||||
Average(AverageAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Count(CountAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Max(MaxAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Min(MinAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Stats(StatsAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
})
|
||||
| Sum(SumAggregation {
|
||||
field: field_name, ..
|
||||
field: ref field_name,
|
||||
..
|
||||
}) => {
|
||||
let (accessor, column_type) =
|
||||
get_ff_reader(reader, field_name, Some(get_numeric_or_date_column_types()))?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
Percentiles(percentiles) => {
|
||||
Percentiles(ref percentiles) => {
|
||||
let (accessor, column_type) = get_ff_reader(
|
||||
reader,
|
||||
percentiles.field_name(),
|
||||
Some(get_numeric_or_date_column_types()),
|
||||
)?;
|
||||
add_agg_with_accessor(accessor, column_type, &mut res)?;
|
||||
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
|
||||
}
|
||||
TopHits(ref mut top_hits) => {
|
||||
top_hits.validate_and_resolve_field_names(reader.fast_fields().columnar())?;
|
||||
let accessors: Vec<(Column<u64>, ColumnType)> = top_hits
|
||||
.field_names()
|
||||
.iter()
|
||||
.map(|field| {
|
||||
get_ff_reader(reader, field, Some(get_numeric_or_date_column_types()))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
let value_accessors = top_hits
|
||||
.value_field_names()
|
||||
.iter()
|
||||
.map(|field_name| {
|
||||
Ok((
|
||||
field_name.to_string(),
|
||||
get_dynamic_columns(reader, field_name)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
add_agg_with_accessors(&agg, accessors, &mut res, value_accessors)?;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -284,6 +354,7 @@ fn get_numeric_or_date_column_types() -> &'static [ColumnType] {
|
||||
pub(crate) fn get_aggs_with_segment_accessor_and_validate(
|
||||
aggs: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<AggregationsWithAccessor> {
|
||||
let mut aggss = Vec::new();
|
||||
@@ -292,6 +363,7 @@ pub(crate) fn get_aggs_with_segment_accessor_and_validate(
|
||||
agg,
|
||||
agg.sub_aggregation(),
|
||||
reader,
|
||||
segment_ordinal,
|
||||
limits.clone(),
|
||||
)?;
|
||||
for agg in aggs {
|
||||
@@ -321,6 +393,19 @@ fn get_ff_reader(
|
||||
Ok(ff_field_with_type)
|
||||
}
|
||||
|
||||
fn get_dynamic_columns(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Vec<columnar::DynamicColumn>> {
|
||||
let ff_fields = reader.fast_fields().dynamic_column_handles(field_name)?;
|
||||
let cols = ff_fields
|
||||
.iter()
|
||||
.map(|h| h.open())
|
||||
.collect::<io::Result<_>>()?;
|
||||
assert!(!ff_fields.is_empty(), "field {} not found", field_name);
|
||||
Ok(cols)
|
||||
}
|
||||
|
||||
/// Get all fast field reader or empty as default.
|
||||
///
|
||||
/// Is guaranteed to return at least one column.
|
||||
|
||||
@@ -8,7 +8,7 @@ use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::bucket::GetDocCount;
|
||||
use super::metric::{PercentilesMetricResult, SingleMetricResult, Stats};
|
||||
use super::metric::{PercentilesMetricResult, SingleMetricResult, Stats, TopHitsMetricResult};
|
||||
use super::{AggregationError, Key};
|
||||
use crate::TantivyError;
|
||||
|
||||
@@ -90,8 +90,10 @@ pub enum MetricResult {
|
||||
Stats(Stats),
|
||||
/// Sum metric result.
|
||||
Sum(SingleMetricResult),
|
||||
/// Sum metric result.
|
||||
/// Percentiles metric result.
|
||||
Percentiles(PercentilesMetricResult),
|
||||
/// Top hits metric result
|
||||
TopHits(TopHitsMetricResult),
|
||||
}
|
||||
|
||||
impl MetricResult {
|
||||
@@ -106,6 +108,9 @@ impl MetricResult {
|
||||
MetricResult::Percentiles(_) => Err(TantivyError::AggregationError(
|
||||
AggregationError::InvalidRequest("percentiles can't be used to order".to_string()),
|
||||
)),
|
||||
MetricResult::TopHits(_) => Err(TantivyError::AggregationError(
|
||||
AggregationError::InvalidRequest("top_hits can't be used to order".to_string()),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,12 +4,13 @@ use crate::aggregation::agg_req::{Aggregation, Aggregations};
|
||||
use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::buf_collector::DOC_BLOCK_SIZE;
|
||||
use crate::aggregation::collector::AggregationCollector;
|
||||
use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use crate::aggregation::segment_agg_result::AggregationLimits;
|
||||
use crate::aggregation::tests::{get_test_index_2_segments, get_test_index_from_values_and_terms};
|
||||
use crate::aggregation::DistributedAggregationCollector;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{IndexRecordOption, Schema, FAST};
|
||||
use crate::{Index, Term};
|
||||
use crate::{Index, IndexWriter, Term};
|
||||
|
||||
fn get_avg_req(field_name: &str) -> Aggregation {
|
||||
serde_json::from_value(json!({
|
||||
@@ -66,6 +67,22 @@ fn test_aggregation_flushing(
|
||||
}
|
||||
}
|
||||
},
|
||||
"top_hits_test":{
|
||||
"terms": {
|
||||
"field": "string_id"
|
||||
},
|
||||
"aggs": {
|
||||
"bucketsL2": {
|
||||
"top_hits": {
|
||||
"size": 2,
|
||||
"sort": [
|
||||
{ "score": "asc" }
|
||||
],
|
||||
"docvalue_fields": ["score"]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"histogram_test":{
|
||||
"histogram": {
|
||||
"field": "score",
|
||||
@@ -108,6 +125,16 @@ fn test_aggregation_flushing(
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
// Test postcard roundtrip serialization
|
||||
let intermediate_agg_result_bytes = postcard::to_allocvec(&intermediate_agg_result).expect(
|
||||
"Postcard Serialization failed, flatten etc. is not supported in the intermediate \
|
||||
result",
|
||||
);
|
||||
let intermediate_agg_result: IntermediateAggregationResults =
|
||||
postcard::from_bytes(&intermediate_agg_result_bytes)
|
||||
.expect("Post deserialization failed");
|
||||
|
||||
intermediate_agg_result
|
||||
.into_final_result(agg_req, &Default::default())
|
||||
.unwrap()
|
||||
@@ -586,7 +613,10 @@ fn test_aggregation_on_json_object() {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color": "red"})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color": "red"})))
|
||||
.unwrap();
|
||||
@@ -614,12 +644,74 @@ fn test_aggregation_on_json_object() {
|
||||
&serde_json::json!({
|
||||
"jsonagg": {
|
||||
"buckets": [
|
||||
{"doc_count": 2, "key": "red"},
|
||||
{"doc_count": 1, "key": "blue"},
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_on_nested_json_object() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let json = schema_builder.add_json_field("json.blub", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color.dot": "red", "color": {"nested":"red"} })))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color.dot": "blue", "color": {"nested":"blue"} })))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"color.dot": "blue", "color": {"nested":"blue"} })))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let agg: Aggregations = serde_json::from_value(json!({
|
||||
"jsonagg1": {
|
||||
"terms": {
|
||||
"field": "json\\.blub.color\\.dot",
|
||||
}
|
||||
},
|
||||
"jsonagg2": {
|
||||
"terms": {
|
||||
"field": "json\\.blub.color.nested",
|
||||
}
|
||||
}
|
||||
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let aggregation_collector = get_collector(agg);
|
||||
let aggregation_results = searcher.search(&AllQuery, &aggregation_collector).unwrap();
|
||||
let aggregation_res_json = serde_json::to_value(aggregation_results).unwrap();
|
||||
assert_eq!(
|
||||
&aggregation_res_json,
|
||||
&serde_json::json!({
|
||||
"jsonagg1": {
|
||||
"buckets": [
|
||||
{"doc_count": 2, "key": "blue"},
|
||||
{"doc_count": 1, "key": "red"}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
},
|
||||
"jsonagg2": {
|
||||
"buckets": [
|
||||
{"doc_count": 2, "key": "blue"},
|
||||
{"doc_count": 1, "key": "red"}
|
||||
],
|
||||
"doc_count_error_upper_bound": 0,
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -630,7 +722,7 @@ fn test_aggregation_on_json_object_empty_columns() {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Empty column when accessing color
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"price": 10.0})))
|
||||
@@ -748,32 +840,41 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0, "mixed_price": 10.0})))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
// => Segment with all values text
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue"})))
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue", "mixed_price": 5.0})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue", "mixed_price": 5.0})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "blue", "mixed_price": 5.0})))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
// => Segment with all boolen
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": true})))
|
||||
.add_document(doc!(json => json!({"mixed_type": true, "mixed_price": "no_price"})))
|
||||
.unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
// => Segment with mixed values
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": "red"})))
|
||||
.add_document(doc!(json => json!({"mixed_type": "red", "mixed_price": 1.0})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": -20.5})))
|
||||
.add_document(doc!(json => json!({"mixed_type": "red", "mixed_price": 1.0})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": true})))
|
||||
.add_document(doc!(json => json!({"mixed_type": -20.5, "mixed_price": -20.5})))
|
||||
.unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": true, "mixed_price": "no_price"})))
|
||||
.unwrap();
|
||||
|
||||
index_writer.commit().unwrap();
|
||||
@@ -787,7 +888,7 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
"order": { "min_price": "desc" }
|
||||
},
|
||||
"aggs": {
|
||||
"min_price": { "min": { "field": "json.mixed_type" } }
|
||||
"min_price": { "min": { "field": "json.mixed_price" } }
|
||||
}
|
||||
},
|
||||
"rangeagg": {
|
||||
@@ -811,6 +912,7 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
|
||||
let aggregation_results = searcher.search(&AllQuery, &aggregation_collector).unwrap();
|
||||
let aggregation_res_json = serde_json::to_value(aggregation_results).unwrap();
|
||||
use pretty_assertions::assert_eq;
|
||||
assert_eq!(
|
||||
&aggregation_res_json,
|
||||
&serde_json::json!({
|
||||
@@ -825,10 +927,10 @@ fn test_aggregation_on_json_object_mixed_types() {
|
||||
"termagg": {
|
||||
"buckets": [
|
||||
{ "doc_count": 1, "key": 10.0, "min_price": { "value": 10.0 } },
|
||||
{ "doc_count": 3, "key": "blue", "min_price": { "value": 5.0 } },
|
||||
{ "doc_count": 2, "key": "red", "min_price": { "value": 1.0 } },
|
||||
{ "doc_count": 1, "key": -20.5, "min_price": { "value": -20.5 } },
|
||||
// TODO bool is also not yet handled in aggregation
|
||||
{ "doc_count": 1, "key": "blue", "min_price": { "value": null } },
|
||||
{ "doc_count": 1, "key": "red", "min_price": { "value": null } },
|
||||
{ "doc_count": 2, "key": 1.0, "key_as_string": "true", "min_price": { "value": null } },
|
||||
],
|
||||
"sum_other_doc_count": 0
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{HistogramAggregation, HistogramBounds};
|
||||
use crate::aggregation::AggregationError;
|
||||
use crate::aggregation::*;
|
||||
|
||||
/// DateHistogramAggregation is similar to `HistogramAggregation`, but it can only be used with date
|
||||
/// type.
|
||||
@@ -252,7 +252,7 @@ pub mod tests {
|
||||
use crate::aggregation::tests::exec_request;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::schema::{Schema, FAST, STRING};
|
||||
use crate::Index;
|
||||
use crate::{Index, IndexWriter, TantivyDocument};
|
||||
|
||||
#[test]
|
||||
fn test_parse_into_millisecs() {
|
||||
@@ -307,6 +307,7 @@ pub mod tests {
|
||||
) -> crate::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_date_field("date", FAST);
|
||||
schema_builder.add_json_field("mixed", FAST);
|
||||
schema_builder.add_text_field("text", FAST | STRING);
|
||||
schema_builder.add_text_field("text2", FAST | STRING);
|
||||
let schema = schema_builder.build();
|
||||
@@ -316,7 +317,7 @@ pub mod tests {
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
for values in segment_and_docs {
|
||||
for doc_str in values {
|
||||
let doc = schema.parse_document(doc_str)?;
|
||||
let doc = TantivyDocument::parse_json(&schema, doc_str)?;
|
||||
index_writer.add_document(doc)?;
|
||||
}
|
||||
// writing the segment
|
||||
@@ -328,7 +329,7 @@ pub mod tests {
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
if segment_ids.len() > 1 {
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
@@ -351,8 +352,10 @@ pub mod tests {
|
||||
let docs = vec![
|
||||
vec![r#"{ "date": "2015-01-01T12:10:30Z", "text": "aaa" }"#],
|
||||
vec![r#"{ "date": "2015-01-01T11:11:30Z", "text": "bbb" }"#],
|
||||
vec![r#"{ "date": "2015-01-01T11:11:30Z", "text": "bbb" }"#],
|
||||
vec![r#"{ "date": "2015-01-02T00:00:00Z", "text": "bbb" }"#],
|
||||
vec![r#"{ "date": "2015-01-06T00:00:00Z", "text": "ccc" }"#],
|
||||
vec![r#"{ "date": "2015-01-06T00:00:00Z", "text": "ccc" }"#],
|
||||
];
|
||||
let index = get_test_index_from_docs(merge_segments, &docs).unwrap();
|
||||
|
||||
@@ -381,7 +384,7 @@ pub mod tests {
|
||||
{
|
||||
"key_as_string" : "2015-01-01T00:00:00Z",
|
||||
"key" : 1420070400000.0,
|
||||
"doc_count" : 4
|
||||
"doc_count" : 6
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -419,15 +422,15 @@ pub mod tests {
|
||||
{
|
||||
"key_as_string" : "2015-01-01T00:00:00Z",
|
||||
"key" : 1420070400000.0,
|
||||
"doc_count" : 4,
|
||||
"doc_count" : 6,
|
||||
"texts": {
|
||||
"buckets": [
|
||||
{
|
||||
"doc_count": 2,
|
||||
"doc_count": 3,
|
||||
"key": "bbb"
|
||||
},
|
||||
{
|
||||
"doc_count": 1,
|
||||
"doc_count": 2,
|
||||
"key": "ccc"
|
||||
},
|
||||
{
|
||||
@@ -466,7 +469,7 @@ pub mod tests {
|
||||
"sales_over_time": {
|
||||
"buckets": [
|
||||
{
|
||||
"doc_count": 2,
|
||||
"doc_count": 3,
|
||||
"key": 1420070400000.0,
|
||||
"key_as_string": "2015-01-01T00:00:00Z"
|
||||
},
|
||||
@@ -491,7 +494,7 @@ pub mod tests {
|
||||
"key_as_string": "2015-01-05T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"doc_count": 1,
|
||||
"doc_count": 2,
|
||||
"key": 1420502400000.0,
|
||||
"key_as_string": "2015-01-06T00:00:00Z"
|
||||
}
|
||||
@@ -532,7 +535,7 @@ pub mod tests {
|
||||
"key_as_string": "2014-12-31T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"doc_count": 2,
|
||||
"doc_count": 3,
|
||||
"key": 1420070400000.0,
|
||||
"key_as_string": "2015-01-01T00:00:00Z"
|
||||
},
|
||||
@@ -557,7 +560,7 @@ pub mod tests {
|
||||
"key_as_string": "2015-01-05T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"doc_count": 1,
|
||||
"doc_count": 2,
|
||||
"key": 1420502400000.0,
|
||||
"key_as_string": "2015-01-06T00:00:00Z"
|
||||
},
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::Display;
|
||||
|
||||
use columnar::ColumnType;
|
||||
use itertools::Itertools;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tantivy_bitpacker::minmax;
|
||||
@@ -18,9 +15,9 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateHistogramBucketEntry,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector,
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, format_date};
|
||||
use crate::aggregation::*;
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Histogram is a bucket aggregation, where buckets are created dynamically for given `interval`.
|
||||
@@ -73,6 +70,7 @@ pub struct HistogramAggregation {
|
||||
pub field: String,
|
||||
/// The interval to chunk your data range. Each bucket spans a value range of [0..interval).
|
||||
/// Must be a positive value.
|
||||
#[serde(deserialize_with = "deserialize_f64")]
|
||||
pub interval: f64,
|
||||
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
|
||||
/// 1))`.
|
||||
@@ -85,6 +83,7 @@ pub struct HistogramAggregation {
|
||||
/// fall into the buckets with the key 0 and 10.
|
||||
/// With offset 5 and interval 10, they would both fall into the bucket with they key 5 and the
|
||||
/// range [5..15)
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub offset: Option<f64>,
|
||||
/// The minimum number of documents in a bucket to be returned. Defaults to 0.
|
||||
pub min_doc_count: Option<u64>,
|
||||
@@ -308,7 +307,10 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &bucket_agg_accessor.accessor);
|
||||
|
||||
for (doc, val) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
|
||||
for (doc, val) in bucket_agg_accessor
|
||||
.column_block_accessor
|
||||
.iter_docid_vals(docs, &bucket_agg_accessor.accessor)
|
||||
{
|
||||
let val = self.f64_from_fastfield_u64(val);
|
||||
|
||||
let bucket_pos = get_bucket_pos(val);
|
||||
@@ -595,11 +597,12 @@ mod tests {
|
||||
use serde_json::Value;
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::tests::{
|
||||
exec_request, exec_request_with_query, exec_request_with_query_and_memory_limit,
|
||||
get_test_index_2_segments, get_test_index_from_values, get_test_index_with_num_docs,
|
||||
};
|
||||
use crate::query::AllQuery;
|
||||
|
||||
#[test]
|
||||
fn histogram_test_crooked_values() -> crate::Result<()> {
|
||||
@@ -1351,6 +1354,35 @@ mod tests {
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn test_aggregation_histogram_empty_index() -> crate::Result<()> {
|
||||
// test index without segments
|
||||
let values = vec![];
|
||||
|
||||
let index = get_test_index_from_values(false, &values)?;
|
||||
|
||||
let agg_req_1: Aggregations = serde_json::from_value(json!({
|
||||
"myhisto": {
|
||||
"histogram": {
|
||||
"field": "score",
|
||||
"interval": 10.0
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, Default::default());
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
// Make sure the result structure is correct
|
||||
assert_eq!(res["myhisto"]["buckets"].as_array().unwrap().len(), 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use columnar::{ColumnType, MonotonicallyMappableToU64};
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -14,9 +13,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
build_segment_agg_collector, SegmentAggregationCollector,
|
||||
};
|
||||
use crate::aggregation::{
|
||||
f64_from_fastfield_u64, f64_to_fastfield_u64, format_date, Key, SerializedKey,
|
||||
};
|
||||
use crate::aggregation::*;
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Provide user-defined buckets to aggregate on.
|
||||
@@ -72,11 +69,19 @@ pub struct RangeAggregationRange {
|
||||
pub key: Option<String>,
|
||||
/// The from range value, which is inclusive in the range.
|
||||
/// `None` equals to an open ended interval.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(
|
||||
skip_serializing_if = "Option::is_none",
|
||||
default,
|
||||
deserialize_with = "deserialize_option_f64"
|
||||
)]
|
||||
pub from: Option<f64>,
|
||||
/// The to range value, which is not inclusive in the range.
|
||||
/// `None` equals to an open ended interval.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(
|
||||
skip_serializing_if = "Option::is_none",
|
||||
default,
|
||||
deserialize_with = "deserialize_option_f64"
|
||||
)]
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -230,7 +235,10 @@ impl SegmentAggregationCollector for SegmentRangeCollector {
|
||||
.column_block_accessor
|
||||
.fetch_block(docs, &bucket_agg_accessor.accessor);
|
||||
|
||||
for (doc, val) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
|
||||
for (doc, val) in bucket_agg_accessor
|
||||
.column_block_accessor
|
||||
.iter_docid_vals(docs, &bucket_agg_accessor.accessor)
|
||||
{
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
|
||||
let bucket = &mut self.buckets[bucket_pos];
|
||||
@@ -441,7 +449,6 @@ pub(crate) fn range_to_key(range: &Range<u64>, field_type: &ColumnType) -> crate
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use columnar::MonotonicallyMappableToU64;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::*;
|
||||
@@ -450,7 +457,6 @@ mod tests {
|
||||
exec_request, exec_request_with_query, get_test_index_2_segments,
|
||||
get_test_index_with_num_docs,
|
||||
};
|
||||
use crate::aggregation::AggregationLimits;
|
||||
|
||||
pub fn get_collector_from_ranges(
|
||||
ranges: Vec<RangeAggregationRange>,
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use std::fmt::Debug;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use columnar::{BytesColumn, ColumnType, MonotonicallyMappableToU64, StrColumn};
|
||||
use columnar::column_values::CompactSpaceU64Accessor;
|
||||
use columnar::{
|
||||
BytesColumn, ColumnType, MonotonicallyMappableToU128, MonotonicallyMappableToU64, StrColumn,
|
||||
};
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -99,23 +103,14 @@ pub struct TermsAggregation {
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub size: Option<u32>,
|
||||
|
||||
/// Unused by tantivy.
|
||||
///
|
||||
/// Since tantivy doesn't know shards, this parameter is merely there to be used by consumers
|
||||
/// of tantivy. shard_size is the number of terms returned by each shard.
|
||||
/// The default value in elasticsearch is size * 1.5 + 10.
|
||||
///
|
||||
/// Should never be smaller than size.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(alias = "shard_size")]
|
||||
pub split_size: Option<u32>,
|
||||
|
||||
/// The get more accurate results, we fetch more than `size` from each segment.
|
||||
/// To get more accurate results, we fetch more than `size` from each segment.
|
||||
///
|
||||
/// Increasing this value is will increase the cost for more accuracy.
|
||||
///
|
||||
/// Defaults to 10 * size.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(alias = "shard_size")]
|
||||
#[serde(alias = "split_size")]
|
||||
pub segment_size: Option<u32>,
|
||||
|
||||
/// If you set the `show_term_doc_count_error` parameter to true, the terms aggregation will
|
||||
@@ -256,7 +251,7 @@ pub struct SegmentTermCollector {
|
||||
term_buckets: TermBuckets,
|
||||
req: TermsAggregationInternal,
|
||||
blueprint: Option<Box<dyn SegmentAggregationCollector>>,
|
||||
field_type: ColumnType,
|
||||
column_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
}
|
||||
|
||||
@@ -315,7 +310,10 @@ impl SegmentAggregationCollector for SegmentTermCollector {
|
||||
}
|
||||
// has subagg
|
||||
if let Some(blueprint) = self.blueprint.as_ref() {
|
||||
for (doc, term_id) in bucket_agg_accessor.column_block_accessor.iter_docid_vals() {
|
||||
for (doc, term_id) in bucket_agg_accessor
|
||||
.column_block_accessor
|
||||
.iter_docid_vals(docs, &bucket_agg_accessor.accessor)
|
||||
{
|
||||
let sub_aggregations = self
|
||||
.term_buckets
|
||||
.sub_aggs
|
||||
@@ -355,7 +353,7 @@ impl SegmentTermCollector {
|
||||
field_type: ColumnType,
|
||||
accessor_idx: usize,
|
||||
) -> crate::Result<Self> {
|
||||
if field_type == ColumnType::Bytes || field_type == ColumnType::Bool {
|
||||
if field_type == ColumnType::Bytes {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"terms aggregation is not supported for column type {:?}",
|
||||
field_type
|
||||
@@ -389,7 +387,7 @@ impl SegmentTermCollector {
|
||||
req: TermsAggregationInternal::from_req(req),
|
||||
term_buckets,
|
||||
blueprint,
|
||||
field_type,
|
||||
column_type: field_type,
|
||||
accessor_idx,
|
||||
})
|
||||
}
|
||||
@@ -466,7 +464,7 @@ impl SegmentTermCollector {
|
||||
Ok(intermediate_entry)
|
||||
};
|
||||
|
||||
if self.field_type == ColumnType::Str {
|
||||
if self.column_type == ColumnType::Str {
|
||||
let term_dict = agg_with_accessor
|
||||
.str_dict_column
|
||||
.as_ref()
|
||||
@@ -531,28 +529,55 @@ impl SegmentTermCollector {
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if self.field_type == ColumnType::DateTime {
|
||||
} else if self.column_type == ColumnType::DateTime {
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val = i64::from_u64(val);
|
||||
let date = format_date(val)?;
|
||||
dict.insert(IntermediateKey::Str(date), intermediate_entry);
|
||||
}
|
||||
} else if self.column_type == ColumnType::Bool {
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val = bool::from_u64(val);
|
||||
dict.insert(IntermediateKey::Bool(val), intermediate_entry);
|
||||
}
|
||||
} else if self.column_type == ColumnType::IpAddr {
|
||||
let compact_space_accessor = agg_with_accessor
|
||||
.accessor
|
||||
.values
|
||||
.clone()
|
||||
.downcast_arc::<CompactSpaceU64Accessor>()
|
||||
.map_err(|_| {
|
||||
TantivyError::AggregationError(
|
||||
crate::aggregation::AggregationError::InternalError(
|
||||
"Type mismatch: Could not downcast to CompactSpaceU64Accessor"
|
||||
.to_string(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val: u128 = compact_space_accessor.compact_to_u128(val as u32);
|
||||
let val = Ipv6Addr::from_u128(val);
|
||||
dict.insert(IntermediateKey::IpAddr(val), intermediate_entry);
|
||||
}
|
||||
} else {
|
||||
for (val, doc_count) in entries {
|
||||
let intermediate_entry = into_intermediate_bucket_entry(val, doc_count)?;
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
let val = f64_from_fastfield_u64(val, &self.column_type);
|
||||
dict.insert(IntermediateKey::F64(val), intermediate_entry);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(IntermediateBucketResult::Terms(
|
||||
IntermediateTermBucketResult {
|
||||
Ok(IntermediateBucketResult::Terms {
|
||||
buckets: IntermediateTermBucketResult {
|
||||
entries: dict,
|
||||
sum_other_doc_count,
|
||||
doc_count_error_upper_bound: term_doc_count_before_cutoff,
|
||||
},
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -590,6 +615,9 @@ pub(crate) fn cut_off_buckets<T: GetDocCount + Debug>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::net::IpAddr;
|
||||
use std::str::FromStr;
|
||||
|
||||
use common::DateTime;
|
||||
use time::{Date, Month};
|
||||
|
||||
@@ -600,8 +628,8 @@ mod tests {
|
||||
};
|
||||
use crate::aggregation::AggregationLimits;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::schema::{Schema, FAST, STRING};
|
||||
use crate::Index;
|
||||
use crate::schema::{IntoIpv6Addr, Schema, FAST, STRING};
|
||||
use crate::{Index, IndexWriter};
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_test_single_segment() -> crate::Result<()> {
|
||||
@@ -1182,9 +1210,9 @@ mod tests {
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "terma");
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 4);
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termc");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "termb");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 0);
|
||||
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termb");
|
||||
assert_eq!(res["my_texts"]["buckets"][2]["key"], "termc");
|
||||
assert_eq!(res["my_texts"]["buckets"][2]["doc_count"], 0);
|
||||
assert_eq!(res["my_texts"]["sum_other_doc_count"], 0);
|
||||
assert_eq!(res["my_texts"]["doc_count_error_upper_bound"], 0);
|
||||
@@ -1365,7 +1393,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_different_tokenizer_on_ff_test() -> crate::Result<()> {
|
||||
let terms = vec!["Hello Hello", "Hallo Hallo"];
|
||||
let terms = vec!["Hello Hello", "Hallo Hallo", "Hallo Hallo"];
|
||||
|
||||
let index = get_test_index_from_terms(true, &[terms])?;
|
||||
|
||||
@@ -1383,7 +1411,7 @@ mod tests {
|
||||
println!("{}", serde_json::to_string_pretty(&res).unwrap());
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "Hallo Hallo");
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 1);
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "Hello Hello");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 1);
|
||||
@@ -1473,7 +1501,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with empty json
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
@@ -1894,4 +1922,80 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_bool() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_bool_field("bool_field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut writer = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
writer.add_document(doc!(field=>true))?;
|
||||
writer.add_document(doc!(field=>false))?;
|
||||
writer.add_document(doc!(field=>true))?;
|
||||
writer.commit()?;
|
||||
}
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_bool": {
|
||||
"terms": {
|
||||
"field": "bool_field"
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["key"], 1.0);
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["key_as_string"], "true");
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["doc_count"], 2);
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["key"], 0.0);
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["key_as_string"], "false");
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["doc_count"], 1);
|
||||
assert_eq!(res["my_bool"]["buckets"][2]["key"], serde_json::Value::Null);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_ip_addr() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_ip_addr_field("ip_field", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut writer = index.writer_with_num_threads(1, 15_000_000)?;
|
||||
// IpV6 loopback
|
||||
writer.add_document(doc!(field=>IpAddr::from_str("::1").unwrap().into_ipv6_addr()))?;
|
||||
writer.add_document(doc!(field=>IpAddr::from_str("::1").unwrap().into_ipv6_addr()))?;
|
||||
// IpV4
|
||||
writer.add_document(
|
||||
doc!(field=>IpAddr::from_str("127.0.0.1").unwrap().into_ipv6_addr()),
|
||||
)?;
|
||||
writer.commit()?;
|
||||
}
|
||||
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_bool": {
|
||||
"terms": {
|
||||
"field": "ip_field"
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
// print as json
|
||||
// println!("{}", serde_json::to_string_pretty(&res).unwrap());
|
||||
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["key"], "::1");
|
||||
assert_eq!(res["my_bool"]["buckets"][0]["doc_count"], 2);
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["key"], "127.0.0.1");
|
||||
assert_eq!(res["my_bool"]["buckets"][1]["doc_count"], 1);
|
||||
assert_eq!(res["my_bool"]["buckets"][2]["key"], serde_json::Value::Null);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,11 +73,13 @@ impl SegmentAggregationCollector for TermMissingAgg {
|
||||
|
||||
entries.insert(missing.into(), missing_entry);
|
||||
|
||||
let bucket = IntermediateBucketResult::Terms(IntermediateTermBucketResult {
|
||||
entries,
|
||||
sum_other_doc_count: 0,
|
||||
doc_count_error_upper_bound: 0,
|
||||
});
|
||||
let bucket = IntermediateBucketResult::Terms {
|
||||
buckets: IntermediateTermBucketResult {
|
||||
entries,
|
||||
sum_other_doc_count: 0,
|
||||
doc_count_error_upper_bound: 0,
|
||||
},
|
||||
};
|
||||
|
||||
results.push(name, IntermediateAggregationResult::Bucket(bucket))?;
|
||||
|
||||
@@ -90,7 +92,10 @@ impl SegmentAggregationCollector for TermMissingAgg {
|
||||
agg_with_accessor: &mut AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let agg = &mut agg_with_accessor.aggs.values[self.accessor_idx];
|
||||
let has_value = agg.accessors.iter().any(|acc| acc.index.has_value(doc));
|
||||
let has_value = agg
|
||||
.accessors
|
||||
.iter()
|
||||
.any(|(acc, _)| acc.index.has_value(doc));
|
||||
if !has_value {
|
||||
self.missing_count += 1;
|
||||
if let Some(sub_agg) = self.sub_agg.as_mut() {
|
||||
@@ -117,7 +122,7 @@ mod tests {
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::tests::exec_request_with_query;
|
||||
use crate::schema::{Schema, FAST};
|
||||
use crate::Index;
|
||||
use crate::{Index, IndexWriter};
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_missing_mixed_type_mult_seg_sub_agg() -> crate::Result<()> {
|
||||
@@ -126,7 +131,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(score => 1.0, json => json!({"mixed_type": 10.0})))
|
||||
@@ -186,7 +191,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer.add_document(doc!(score => 1.0, json => json!({"mixed_type": 10.0})))?;
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
@@ -231,7 +236,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
index_writer.commit().unwrap();
|
||||
@@ -278,7 +283,7 @@ mod tests {
|
||||
let score = schema_builder.add_f64_field("score", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
index_writer.add_document(doc!(score => 5.0))?;
|
||||
@@ -323,7 +328,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
@@ -385,7 +390,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
@@ -427,7 +432,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with all values numeric
|
||||
index_writer
|
||||
.add_document(doc!(json => json!({"mixed_type": 10.0})))
|
||||
|
||||
@@ -8,7 +8,7 @@ use super::segment_agg_result::{
|
||||
};
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_segment_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::{DocId, SegmentReader, TantivyError};
|
||||
use crate::{DocId, SegmentOrdinal, SegmentReader, TantivyError};
|
||||
|
||||
/// The default max bucket count, before the aggregation fails.
|
||||
pub const DEFAULT_BUCKET_LIMIT: u32 = 65000;
|
||||
@@ -64,10 +64,15 @@ impl Collector for DistributedAggregationCollector {
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader, &self.limits)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
segment_local_id,
|
||||
&self.limits,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -89,10 +94,15 @@ impl Collector for AggregationCollector {
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader, &self.limits)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
segment_local_id,
|
||||
&self.limits,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -135,10 +145,11 @@ impl AggregationSegmentCollector {
|
||||
pub fn from_agg_req_and_reader(
|
||||
agg: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
limits: &AggregationLimits,
|
||||
) -> crate::Result<Self> {
|
||||
let mut aggs_with_accessor =
|
||||
get_aggs_with_segment_accessor_and_validate(agg, reader, limits)?;
|
||||
get_aggs_with_segment_accessor_and_validate(agg, reader, segment_ordinal, limits)?;
|
||||
let result =
|
||||
BufAggregationCollector::new(build_segment_agg_collector(&mut aggs_with_accessor)?);
|
||||
Ok(AggregationSegmentCollector {
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::hash::Hash;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use columnar::ColumnType;
|
||||
use itertools::Itertools;
|
||||
@@ -19,7 +20,7 @@ use super::bucket::{
|
||||
};
|
||||
use super::metric::{
|
||||
IntermediateAverage, IntermediateCount, IntermediateMax, IntermediateMin, IntermediateStats,
|
||||
IntermediateSum, PercentilesCollector,
|
||||
IntermediateSum, PercentilesCollector, TopHitsTopNComputer,
|
||||
};
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::{format_date, AggregationError, Key, SerializedKey};
|
||||
@@ -41,6 +42,10 @@ pub struct IntermediateAggregationResults {
|
||||
/// This might seem redundant with `Key`, but the point is to have a different
|
||||
/// Serialize implementation.
|
||||
pub enum IntermediateKey {
|
||||
/// Ip Addr key
|
||||
IpAddr(Ipv6Addr),
|
||||
/// Bool key
|
||||
Bool(bool),
|
||||
/// String key
|
||||
Str(String),
|
||||
/// `f64` key
|
||||
@@ -58,7 +63,16 @@ impl From<IntermediateKey> for Key {
|
||||
fn from(value: IntermediateKey) -> Self {
|
||||
match value {
|
||||
IntermediateKey::Str(s) => Self::Str(s),
|
||||
IntermediateKey::IpAddr(s) => {
|
||||
// Prefer to use the IPv4 representation if possible
|
||||
if let Some(ip) = s.to_ipv4_mapped() {
|
||||
Self::Str(ip.to_string())
|
||||
} else {
|
||||
Self::Str(s.to_string())
|
||||
}
|
||||
}
|
||||
IntermediateKey::F64(f) => Self::F64(f),
|
||||
IntermediateKey::Bool(f) => Self::F64(f as u64 as f64),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -71,6 +85,8 @@ impl std::hash::Hash for IntermediateKey {
|
||||
match self {
|
||||
IntermediateKey::Str(text) => text.hash(state),
|
||||
IntermediateKey::F64(val) => val.to_bits().hash(state),
|
||||
IntermediateKey::Bool(val) => val.hash(state),
|
||||
IntermediateKey::IpAddr(val) => val.hash(state),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -166,9 +182,9 @@ impl IntermediateAggregationResults {
|
||||
pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult {
|
||||
use AggregationVariants::*;
|
||||
match req.agg {
|
||||
Terms(_) => IntermediateAggregationResult::Bucket(IntermediateBucketResult::Terms(
|
||||
Default::default(),
|
||||
)),
|
||||
Terms(_) => IntermediateAggregationResult::Bucket(IntermediateBucketResult::Terms {
|
||||
buckets: Default::default(),
|
||||
}),
|
||||
Range(_) => IntermediateAggregationResult::Bucket(IntermediateBucketResult::Range(
|
||||
Default::default(),
|
||||
)),
|
||||
@@ -205,6 +221,9 @@ pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult
|
||||
Percentiles(_) => IntermediateAggregationResult::Metric(
|
||||
IntermediateMetricResult::Percentiles(PercentilesCollector::default()),
|
||||
),
|
||||
TopHits(ref req) => IntermediateAggregationResult::Metric(
|
||||
IntermediateMetricResult::TopHits(TopHitsTopNComputer::new(req.clone())),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -265,6 +284,8 @@ pub enum IntermediateMetricResult {
|
||||
Stats(IntermediateStats),
|
||||
/// Intermediate sum result.
|
||||
Sum(IntermediateSum),
|
||||
/// Intermediate top_hits result
|
||||
TopHits(TopHitsTopNComputer),
|
||||
}
|
||||
|
||||
impl IntermediateMetricResult {
|
||||
@@ -292,9 +313,13 @@ impl IntermediateMetricResult {
|
||||
percentiles
|
||||
.into_final_result(req.agg.as_percentile().expect("unexpected metric type")),
|
||||
),
|
||||
IntermediateMetricResult::TopHits(top_hits) => {
|
||||
MetricResult::TopHits(top_hits.into_final_result())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this is our top-of-the-chain fruit merge mech
|
||||
fn merge_fruits(&mut self, other: IntermediateMetricResult) -> crate::Result<()> {
|
||||
match (self, other) {
|
||||
(
|
||||
@@ -330,6 +355,9 @@ impl IntermediateMetricResult {
|
||||
) => {
|
||||
left.merge_fruits(right)?;
|
||||
}
|
||||
(IntermediateMetricResult::TopHits(left), IntermediateMetricResult::TopHits(right)) => {
|
||||
left.merge_fruits(right)?;
|
||||
}
|
||||
_ => {
|
||||
panic!("incompatible fruit types in tree or missing merge_fruits handler");
|
||||
}
|
||||
@@ -351,11 +379,14 @@ pub enum IntermediateBucketResult {
|
||||
Histogram {
|
||||
/// The column_type of the underlying `Column` is DateTime
|
||||
is_date_agg: bool,
|
||||
/// The buckets
|
||||
/// The histogram buckets
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
},
|
||||
/// Term aggregation
|
||||
Terms(IntermediateTermBucketResult),
|
||||
Terms {
|
||||
/// The term buckets
|
||||
buckets: IntermediateTermBucketResult,
|
||||
},
|
||||
}
|
||||
|
||||
impl IntermediateBucketResult {
|
||||
@@ -432,7 +463,7 @@ impl IntermediateBucketResult {
|
||||
};
|
||||
Ok(BucketResult::Histogram { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
|
||||
IntermediateBucketResult::Terms { buckets: terms } => terms.into_final_result(
|
||||
req.agg
|
||||
.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
@@ -445,8 +476,12 @@ impl IntermediateBucketResult {
|
||||
fn merge_fruits(&mut self, other: IntermediateBucketResult) -> crate::Result<()> {
|
||||
match (self, other) {
|
||||
(
|
||||
IntermediateBucketResult::Terms(term_res_left),
|
||||
IntermediateBucketResult::Terms(term_res_right),
|
||||
IntermediateBucketResult::Terms {
|
||||
buckets: term_res_left,
|
||||
},
|
||||
IntermediateBucketResult::Terms {
|
||||
buckets: term_res_right,
|
||||
},
|
||||
) => {
|
||||
merge_maps(&mut term_res_left.entries, term_res_right.entries)?;
|
||||
term_res_left.sum_other_doc_count += term_res_right.sum_other_doc_count;
|
||||
@@ -530,8 +565,15 @@ impl IntermediateTermBucketResult {
|
||||
.into_iter()
|
||||
.filter(|bucket| bucket.1.doc_count as u64 >= req.min_doc_count)
|
||||
.map(|(key, entry)| {
|
||||
let key_as_string = match key {
|
||||
IntermediateKey::Bool(key) => {
|
||||
let val = if key { "true" } else { "false" };
|
||||
Some(val.to_string())
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
Ok(BucketEntry {
|
||||
key_as_string: None,
|
||||
key_as_string,
|
||||
key: key.into(),
|
||||
doc_count: entry.doc_count as u64,
|
||||
sub_aggregation: entry
|
||||
|
||||
@@ -2,7 +2,8 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
|
||||
/// A single-value metric aggregation that computes the average of numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -24,7 +25,7 @@ pub struct AverageAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default)]
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -65,3 +66,71 @@ impl IntermediateAverage {
|
||||
self.stats.finalize().avg
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn deserialization_with_missing_test1() {
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "10.0"
|
||||
}"#;
|
||||
let avg: AverageAggregation = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(avg.field, "score");
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
// no dot
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "10"
|
||||
}"#;
|
||||
let avg: AverageAggregation = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(avg.field, "score");
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
|
||||
// from value
|
||||
let avg: AverageAggregation = serde_json::from_value(json!({
|
||||
"field": "score_f64",
|
||||
"missing": 10u64,
|
||||
}))
|
||||
.unwrap();
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
// from value
|
||||
let avg: AverageAggregation = serde_json::from_value(json!({
|
||||
"field": "score_f64",
|
||||
"missing": 10u32,
|
||||
}))
|
||||
.unwrap();
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
let avg: AverageAggregation = serde_json::from_value(json!({
|
||||
"field": "score_f64",
|
||||
"missing": 10i8,
|
||||
}))
|
||||
.unwrap();
|
||||
assert_eq!(avg.missing, Some(10.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialization_with_missing_test_fail() {
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "a"
|
||||
}"#;
|
||||
let avg: Result<AverageAggregation, _> = serde_json::from_str(json);
|
||||
assert!(avg.is_err());
|
||||
assert!(avg
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("Failed to parse f64 from string: \"a\""));
|
||||
|
||||
// Disallow NaN
|
||||
let json = r#"{
|
||||
"field": "score",
|
||||
"missing": "NaN"
|
||||
}"#;
|
||||
let avg: Result<AverageAggregation, _> = serde_json::from_str(json);
|
||||
assert!(avg.is_err());
|
||||
assert!(avg.unwrap_err().to_string().contains("NaN"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,8 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
|
||||
/// A single-value metric aggregation that counts the number of values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -24,7 +25,7 @@ pub struct CountAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default)]
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,8 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
|
||||
/// A single-value metric aggregation that computes the maximum of numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -24,7 +25,7 @@ pub struct MaxAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default)]
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -71,7 +72,7 @@ mod tests {
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::tests::exec_request_with_query;
|
||||
use crate::schema::{Schema, FAST};
|
||||
use crate::Index;
|
||||
use crate::{Index, IndexWriter};
|
||||
|
||||
#[test]
|
||||
fn test_max_agg_with_missing() -> crate::Result<()> {
|
||||
@@ -79,7 +80,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with empty json
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
@@ -2,7 +2,8 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
|
||||
/// A single-value metric aggregation that computes the minimum of numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -24,7 +25,7 @@ pub struct MinAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default)]
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,10 @@ mod min;
|
||||
mod percentiles;
|
||||
mod stats;
|
||||
mod sum;
|
||||
mod top_hits;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub use average::*;
|
||||
pub use count::*;
|
||||
pub use max::*;
|
||||
@@ -32,6 +36,9 @@ use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
pub use stats::*;
|
||||
pub use sum::*;
|
||||
pub use top_hits::*;
|
||||
|
||||
use crate::schema::OwnedValue;
|
||||
|
||||
/// Single-metric aggregations use this common result structure.
|
||||
///
|
||||
@@ -81,6 +88,28 @@ pub struct PercentilesMetricResult {
|
||||
pub values: PercentileValues,
|
||||
}
|
||||
|
||||
/// The top_hits metric results entry
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct TopHitsVecEntry {
|
||||
/// The sort values of the document, depending on the sort criteria in the request.
|
||||
pub sort: Vec<Option<u64>>,
|
||||
|
||||
/// Search results, for queries that include field retrieval requests
|
||||
/// (`docvalue_fields`).
|
||||
#[serde(rename = "docvalue_fields")]
|
||||
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||
pub doc_value_fields: HashMap<String, OwnedValue>,
|
||||
}
|
||||
|
||||
/// The top_hits metric aggregation results a list of top hits by sort criteria.
|
||||
///
|
||||
/// The main reason for wrapping it in `hits` is to match elasticsearch output structure.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct TopHitsMetricResult {
|
||||
/// The result of the top_hits metric.
|
||||
pub hits: Vec<TopHitsVecEntry>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
@@ -88,7 +117,7 @@ mod tests {
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::{NumericOptions, Schema};
|
||||
use crate::Index;
|
||||
use crate::{Index, IndexWriter};
|
||||
|
||||
#[test]
|
||||
fn test_metric_aggregations() {
|
||||
@@ -96,7 +125,7 @@ mod tests {
|
||||
let field_options = NumericOptions::default().set_fast();
|
||||
let field = schema_builder.add_f64_field("price", field_options);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
|
||||
for i in 0..3 {
|
||||
index_writer
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use columnar::ColumnType;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
@@ -11,7 +10,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, AggregationError};
|
||||
use crate::aggregation::*;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// # Percentiles
|
||||
@@ -84,7 +83,11 @@ pub struct PercentilesAggregationReq {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
#[serde(
|
||||
skip_serializing_if = "Option::is_none",
|
||||
default,
|
||||
deserialize_with = "deserialize_option_f64"
|
||||
)]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
fn default_percentiles() -> &'static [f64] {
|
||||
@@ -133,7 +136,6 @@ pub(crate) struct SegmentPercentilesCollector {
|
||||
field_type: ColumnType,
|
||||
pub(crate) percentiles: PercentilesCollector,
|
||||
pub(crate) accessor_idx: usize,
|
||||
val_cache: Vec<u64>,
|
||||
missing: Option<u64>,
|
||||
}
|
||||
|
||||
@@ -243,7 +245,6 @@ impl SegmentPercentilesCollector {
|
||||
field_type,
|
||||
percentiles: PercentilesCollector::new(),
|
||||
accessor_idx,
|
||||
val_cache: Default::default(),
|
||||
missing,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use columnar::ColumnType;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
@@ -9,7 +8,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64};
|
||||
use crate::aggregation::*;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// A multi-value metric aggregation that computes a collection of statistics on numeric values that
|
||||
@@ -33,7 +32,7 @@ pub struct StatsAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default)]
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -300,7 +299,7 @@ mod tests {
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{IndexRecordOption, Schema, FAST};
|
||||
use crate::{Index, Term};
|
||||
use crate::{Index, IndexWriter, Term};
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_stats_empty_index() -> crate::Result<()> {
|
||||
@@ -494,7 +493,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with empty json
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
@@ -541,7 +540,7 @@ mod tests {
|
||||
let json = schema_builder.add_json_field("json", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
// => Segment with empty json
|
||||
index_writer.add_document(doc!()).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
@@ -580,6 +579,30 @@ mod tests {
|
||||
})
|
||||
);
|
||||
|
||||
// From string
|
||||
let agg_req: Aggregations = serde_json::from_value(json!({
|
||||
"my_stats": {
|
||||
"stats": {
|
||||
"field": "json.partially_empty",
|
||||
"missing": "0.0"
|
||||
},
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res["my_stats"],
|
||||
json!({
|
||||
"avg": 2.5,
|
||||
"count": 4,
|
||||
"max": 10.0,
|
||||
"min": 0.0,
|
||||
"sum": 10.0
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,8 @@ use std::fmt::Debug;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{IntermediateStats, SegmentStatsCollector};
|
||||
use super::*;
|
||||
use crate::aggregation::*;
|
||||
|
||||
/// A single-value metric aggregation that sums up numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
@@ -24,7 +25,7 @@ pub struct SumAggregation {
|
||||
/// By default they will be ignored but it is also possible to treat them as if they had a
|
||||
/// value. Examples in JSON format:
|
||||
/// { "field": "my_numbers", "missing": "10.0" }
|
||||
#[serde(default)]
|
||||
#[serde(default, deserialize_with = "deserialize_option_f64")]
|
||||
pub missing: Option<f64>,
|
||||
}
|
||||
|
||||
|
||||
897
src/aggregation/metric/top_hits.rs
Normal file
897
src/aggregation/metric/top_hits.rs
Normal file
@@ -0,0 +1,897 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use columnar::{ColumnarReader, DynamicColumn};
|
||||
use common::DateTime;
|
||||
use regex::Regex;
|
||||
use serde::ser::SerializeMap;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use super::{TopHitsMetricResult, TopHitsVecEntry};
|
||||
use crate::aggregation::bucket::Order;
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateMetricResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
|
||||
use crate::aggregation::AggregationError;
|
||||
use crate::collector::TopNComputer;
|
||||
use crate::schema::term::JSON_PATH_SEGMENT_SEP_STR;
|
||||
use crate::schema::OwnedValue;
|
||||
use crate::{DocAddress, DocId, SegmentOrdinal};
|
||||
|
||||
/// # Top Hits
|
||||
///
|
||||
/// The top hits aggregation is a useful tool to answer questions like:
|
||||
/// - "What are the most recent posts by each author?"
|
||||
/// - "What are the most popular items in each category?"
|
||||
///
|
||||
/// It does so by keeping track of the most relevant document being aggregated,
|
||||
/// in terms of a sort criterion that can consist of multiple fields and their
|
||||
/// sort-orders (ascending or descending).
|
||||
///
|
||||
/// `top_hits` should not be used as a top-level aggregation. It is intended to be
|
||||
/// used as a sub-aggregation, inside a `terms` aggregation or a `filters` aggregation,
|
||||
/// for example.
|
||||
///
|
||||
/// Note that this aggregator does not return the actual document addresses, but
|
||||
/// rather a list of the values of the fields that were requested to be retrieved.
|
||||
/// These values can be specified in the `docvalue_fields` parameter, which can include
|
||||
/// a list of fast fields to be retrieved. At the moment, only fast fields are supported
|
||||
/// but it is possible that we support the `fields` parameter to retrieve any stored
|
||||
/// field in the future.
|
||||
///
|
||||
/// The following example demonstrates a request for the top_hits aggregation:
|
||||
/// ```JSON
|
||||
/// {
|
||||
/// "aggs": {
|
||||
/// "top_authors": {
|
||||
/// "terms": {
|
||||
/// "field": "author",
|
||||
/// "size": 5
|
||||
/// }
|
||||
/// },
|
||||
/// "aggs": {
|
||||
/// "top_hits": {
|
||||
/// "size": 2,
|
||||
/// "from": 0
|
||||
/// "sort": [
|
||||
/// { "date": "desc" }
|
||||
/// ]
|
||||
/// "docvalue_fields": ["date", "title", "iden"]
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// This request will return an object containing the top two documents, sorted
|
||||
/// by the `date` field in descending order. You can also sort by multiple fields, which
|
||||
/// helps to resolve ties. The aggregation object for each bucket will look like:
|
||||
/// ```JSON
|
||||
/// {
|
||||
/// "hits": [
|
||||
/// {
|
||||
/// "score": [<time_u64>],
|
||||
/// "docvalue_fields": {
|
||||
/// "date": "<date_RFC3339>",
|
||||
/// "title": "<title>",
|
||||
/// "iden": "<iden>"
|
||||
/// }
|
||||
/// },
|
||||
/// {
|
||||
/// "score": [<time_u64>]
|
||||
/// "docvalue_fields": {
|
||||
/// "date": "<date_RFC3339>",
|
||||
/// "title": "<title>",
|
||||
/// "iden": "<iden>"
|
||||
/// }
|
||||
/// }
|
||||
/// ]
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
|
||||
pub struct TopHitsAggregation {
|
||||
sort: Vec<KeyOrder>,
|
||||
size: usize,
|
||||
from: Option<usize>,
|
||||
|
||||
#[serde(rename = "docvalue_fields")]
|
||||
#[serde(default)]
|
||||
doc_value_fields: Vec<String>,
|
||||
|
||||
// Not supported
|
||||
_source: Option<serde_json::Value>,
|
||||
fields: Option<serde_json::Value>,
|
||||
script_fields: Option<serde_json::Value>,
|
||||
highlight: Option<serde_json::Value>,
|
||||
explain: Option<serde_json::Value>,
|
||||
version: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Default)]
|
||||
struct KeyOrder {
|
||||
field: String,
|
||||
order: Order,
|
||||
}
|
||||
|
||||
impl Serialize for KeyOrder {
|
||||
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
||||
let KeyOrder { field, order } = self;
|
||||
let mut map = serializer.serialize_map(Some(1))?;
|
||||
map.serialize_entry(field, order)?;
|
||||
map.end()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for KeyOrder {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where D: Deserializer<'de> {
|
||||
let mut key_order = <HashMap<String, Order>>::deserialize(deserializer)?.into_iter();
|
||||
let (field, order) = key_order.next().ok_or(serde::de::Error::custom(
|
||||
"Expected exactly one key-value pair in sort parameter of top_hits, found none",
|
||||
))?;
|
||||
if key_order.next().is_some() {
|
||||
return Err(serde::de::Error::custom(format!(
|
||||
"Expected exactly one key-value pair in sort parameter of top_hits, found {:?}",
|
||||
key_order
|
||||
)));
|
||||
}
|
||||
Ok(Self { field, order })
|
||||
}
|
||||
}
|
||||
|
||||
// Tranform a glob (`pattern*`, for example) into a regex::Regex (`^pattern.*$`)
|
||||
fn globbed_string_to_regex(glob: &str) -> Result<Regex, crate::TantivyError> {
|
||||
// Replace `*` glob with `.*` regex
|
||||
let sanitized = format!("^{}$", regex::escape(glob).replace(r"\*", ".*"));
|
||||
Regex::new(&sanitized.replace('*', ".*")).map_err(|e| {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
"Invalid regex '{}' in docvalue_fields: {}",
|
||||
glob, e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
fn use_doc_value_fields_err(parameter: &str) -> crate::Result<()> {
|
||||
Err(crate::TantivyError::AggregationError(
|
||||
AggregationError::InvalidRequest(format!(
|
||||
"The `{}` parameter is not supported, only `docvalue_fields` is supported in \
|
||||
`top_hits` aggregation",
|
||||
parameter
|
||||
)),
|
||||
))
|
||||
}
|
||||
fn unsupported_err(parameter: &str) -> crate::Result<()> {
|
||||
Err(crate::TantivyError::AggregationError(
|
||||
AggregationError::InvalidRequest(format!(
|
||||
"The `{}` parameter is not supported in the `top_hits` aggregation",
|
||||
parameter
|
||||
)),
|
||||
))
|
||||
}
|
||||
|
||||
impl TopHitsAggregation {
|
||||
/// Validate and resolve field retrieval parameters
|
||||
pub fn validate_and_resolve_field_names(
|
||||
&mut self,
|
||||
reader: &ColumnarReader,
|
||||
) -> crate::Result<()> {
|
||||
if self._source.is_some() {
|
||||
use_doc_value_fields_err("_source")?;
|
||||
}
|
||||
if self.fields.is_some() {
|
||||
use_doc_value_fields_err("fields")?;
|
||||
}
|
||||
if self.script_fields.is_some() {
|
||||
use_doc_value_fields_err("script_fields")?;
|
||||
}
|
||||
if self.explain.is_some() {
|
||||
unsupported_err("explain")?;
|
||||
}
|
||||
if self.highlight.is_some() {
|
||||
unsupported_err("highlight")?;
|
||||
}
|
||||
if self.version.is_some() {
|
||||
unsupported_err("version")?;
|
||||
}
|
||||
|
||||
self.doc_value_fields = self
|
||||
.doc_value_fields
|
||||
.iter()
|
||||
.map(|field| {
|
||||
if !field.contains('*')
|
||||
&& reader
|
||||
.iter_columns()?
|
||||
.any(|(name, _)| name.as_str() == field)
|
||||
{
|
||||
return Ok(vec![field.to_owned()]);
|
||||
}
|
||||
|
||||
let pattern = globbed_string_to_regex(field)?;
|
||||
let fields = reader
|
||||
.iter_columns()?
|
||||
.map(|(name, _)| {
|
||||
// normalize path from internal fast field repr
|
||||
name.replace(JSON_PATH_SEGMENT_SEP_STR, ".")
|
||||
})
|
||||
.filter(|name| pattern.is_match(name))
|
||||
.collect::<Vec<_>>();
|
||||
assert!(
|
||||
!fields.is_empty(),
|
||||
"No fields matched the glob '{}' in docvalue_fields",
|
||||
field
|
||||
);
|
||||
Ok(fields)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return fields accessed by the aggregator, in order.
|
||||
pub fn field_names(&self) -> Vec<&str> {
|
||||
self.sort
|
||||
.iter()
|
||||
.map(|KeyOrder { field, .. }| field.as_str())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Return fields accessed by the aggregator's value retrieval.
|
||||
pub fn value_field_names(&self) -> Vec<&str> {
|
||||
self.doc_value_fields.iter().map(|s| s.as_str()).collect()
|
||||
}
|
||||
|
||||
fn get_document_field_data(
|
||||
&self,
|
||||
accessors: &HashMap<String, Vec<DynamicColumn>>,
|
||||
doc_id: DocId,
|
||||
) -> HashMap<String, FastFieldValue> {
|
||||
let doc_value_fields = self
|
||||
.doc_value_fields
|
||||
.iter()
|
||||
.map(|field| {
|
||||
let accessors = accessors
|
||||
.get(field)
|
||||
.unwrap_or_else(|| panic!("field '{}' not found in accessors", field));
|
||||
|
||||
let values: Vec<FastFieldValue> = accessors
|
||||
.iter()
|
||||
.flat_map(|accessor| match accessor {
|
||||
DynamicColumn::U64(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(FastFieldValue::U64)
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::I64(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(FastFieldValue::I64)
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::F64(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(FastFieldValue::F64)
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::Bytes(accessor) => accessor
|
||||
.term_ords(doc_id)
|
||||
.map(|term_ord| {
|
||||
let mut buffer = vec![];
|
||||
assert!(
|
||||
accessor
|
||||
.ord_to_bytes(term_ord, &mut buffer)
|
||||
.expect("could not read term dictionary"),
|
||||
"term corresponding to term_ord does not exist"
|
||||
);
|
||||
FastFieldValue::Bytes(buffer)
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::Str(accessor) => accessor
|
||||
.term_ords(doc_id)
|
||||
.map(|term_ord| {
|
||||
let mut buffer = vec![];
|
||||
assert!(
|
||||
accessor
|
||||
.ord_to_bytes(term_ord, &mut buffer)
|
||||
.expect("could not read term dictionary"),
|
||||
"term corresponding to term_ord does not exist"
|
||||
);
|
||||
FastFieldValue::Str(String::from_utf8(buffer).unwrap())
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::Bool(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(FastFieldValue::Bool)
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::IpAddr(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(FastFieldValue::IpAddr)
|
||||
.collect::<Vec<_>>(),
|
||||
DynamicColumn::DateTime(accessor) => accessor
|
||||
.values_for_doc(doc_id)
|
||||
.map(FastFieldValue::Date)
|
||||
.collect::<Vec<_>>(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
(field.to_owned(), FastFieldValue::Array(values))
|
||||
})
|
||||
.collect();
|
||||
doc_value_fields
|
||||
}
|
||||
}
|
||||
|
||||
/// A retrieved value from a fast field.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub enum FastFieldValue {
|
||||
/// The str type is used for any text information.
|
||||
Str(String),
|
||||
/// Unsigned 64-bits Integer `u64`
|
||||
U64(u64),
|
||||
/// Signed 64-bits Integer `i64`
|
||||
I64(i64),
|
||||
/// 64-bits Float `f64`
|
||||
F64(f64),
|
||||
/// Bool value
|
||||
Bool(bool),
|
||||
/// Date/time with nanoseconds precision
|
||||
Date(DateTime),
|
||||
/// Arbitrarily sized byte array
|
||||
Bytes(Vec<u8>),
|
||||
/// IpV6 Address. Internally there is no IpV4, it needs to be converted to `Ipv6Addr`.
|
||||
IpAddr(Ipv6Addr),
|
||||
/// A list of values.
|
||||
Array(Vec<Self>),
|
||||
}
|
||||
|
||||
impl From<FastFieldValue> for OwnedValue {
|
||||
fn from(value: FastFieldValue) -> Self {
|
||||
match value {
|
||||
FastFieldValue::Str(s) => OwnedValue::Str(s),
|
||||
FastFieldValue::U64(u) => OwnedValue::U64(u),
|
||||
FastFieldValue::I64(i) => OwnedValue::I64(i),
|
||||
FastFieldValue::F64(f) => OwnedValue::F64(f),
|
||||
FastFieldValue::Bool(b) => OwnedValue::Bool(b),
|
||||
FastFieldValue::Date(d) => OwnedValue::Date(d),
|
||||
FastFieldValue::Bytes(b) => OwnedValue::Bytes(b),
|
||||
FastFieldValue::IpAddr(ip) => OwnedValue::IpAddr(ip),
|
||||
FastFieldValue::Array(a) => {
|
||||
OwnedValue::Array(a.into_iter().map(OwnedValue::from).collect())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds a fast field value in its u64 representation, and the order in which it should be sorted.
|
||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||
struct DocValueAndOrder {
|
||||
/// A fast field value in its u64 representation.
|
||||
value: Option<u64>,
|
||||
/// Sort order for the value
|
||||
order: Order,
|
||||
}
|
||||
|
||||
impl Ord for DocValueAndOrder {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
let invert = |cmp: std::cmp::Ordering| match self.order {
|
||||
Order::Asc => cmp,
|
||||
Order::Desc => cmp.reverse(),
|
||||
};
|
||||
|
||||
match (self.value, other.value) {
|
||||
(Some(self_value), Some(other_value)) => invert(self_value.cmp(&other_value)),
|
||||
(Some(_), None) => std::cmp::Ordering::Greater,
|
||||
(None, Some(_)) => std::cmp::Ordering::Less,
|
||||
(None, None) => std::cmp::Ordering::Equal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for DocValueAndOrder {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for DocValueAndOrder {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.value.cmp(&other.value) == std::cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for DocValueAndOrder {}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||
struct DocSortValuesAndFields {
|
||||
sorts: Vec<DocValueAndOrder>,
|
||||
|
||||
#[serde(rename = "docvalue_fields")]
|
||||
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||
doc_value_fields: HashMap<String, FastFieldValue>,
|
||||
}
|
||||
|
||||
impl Ord for DocSortValuesAndFields {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
for (self_feature, other_feature) in self.sorts.iter().zip(other.sorts.iter()) {
|
||||
let cmp = self_feature.cmp(other_feature);
|
||||
if cmp != std::cmp::Ordering::Equal {
|
||||
return cmp;
|
||||
}
|
||||
}
|
||||
std::cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for DocSortValuesAndFields {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for DocSortValuesAndFields {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.cmp(other) == std::cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for DocSortValuesAndFields {}
|
||||
|
||||
/// The TopHitsCollector used for collecting over segments and merging results.
|
||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||
pub struct TopHitsTopNComputer {
|
||||
req: TopHitsAggregation,
|
||||
top_n: TopNComputer<DocSortValuesAndFields, DocAddress, false>,
|
||||
}
|
||||
|
||||
impl std::cmp::PartialEq for TopHitsTopNComputer {
|
||||
fn eq(&self, _other: &Self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl TopHitsTopNComputer {
|
||||
/// Create a new TopHitsCollector
|
||||
pub fn new(req: TopHitsAggregation) -> Self {
|
||||
Self {
|
||||
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
|
||||
req,
|
||||
}
|
||||
}
|
||||
|
||||
fn collect(&mut self, features: DocSortValuesAndFields, doc: DocAddress) {
|
||||
self.top_n.push(features, doc);
|
||||
}
|
||||
|
||||
pub(crate) fn merge_fruits(&mut self, other_fruit: Self) -> crate::Result<()> {
|
||||
for doc in other_fruit.top_n.into_vec() {
|
||||
self.collect(doc.feature, doc.doc);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finalize by converting self into the final result form
|
||||
pub fn into_final_result(self) -> TopHitsMetricResult {
|
||||
let mut hits: Vec<TopHitsVecEntry> = self
|
||||
.top_n
|
||||
.into_sorted_vec()
|
||||
.into_iter()
|
||||
.map(|doc| TopHitsVecEntry {
|
||||
sort: doc.feature.sorts.iter().map(|f| f.value).collect(),
|
||||
doc_value_fields: doc
|
||||
.feature
|
||||
.doc_value_fields
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, v.into()))
|
||||
.collect(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Remove the first `from` elements
|
||||
// Truncating from end would be more efficient, but we need to truncate from the front
|
||||
// because `into_sorted_vec` gives us a descending order because of the inverted
|
||||
// `Ord` semantics of the heap elements.
|
||||
hits.drain(..self.req.from.unwrap_or(0));
|
||||
TopHitsMetricResult { hits }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct TopHitsSegmentCollector {
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
accessor_idx: usize,
|
||||
req: TopHitsAggregation,
|
||||
top_n: TopNComputer<Vec<DocValueAndOrder>, DocAddress, false>,
|
||||
}
|
||||
|
||||
impl TopHitsSegmentCollector {
|
||||
pub fn from_req(
|
||||
req: &TopHitsAggregation,
|
||||
accessor_idx: usize,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
) -> Self {
|
||||
Self {
|
||||
req: req.clone(),
|
||||
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
|
||||
segment_ordinal,
|
||||
accessor_idx,
|
||||
}
|
||||
}
|
||||
fn into_top_hits_collector(
|
||||
self,
|
||||
value_accessors: &HashMap<String, Vec<DynamicColumn>>,
|
||||
) -> TopHitsTopNComputer {
|
||||
let mut top_hits_computer = TopHitsTopNComputer::new(self.req.clone());
|
||||
let top_results = self.top_n.into_vec();
|
||||
|
||||
for res in top_results {
|
||||
let doc_value_fields = self
|
||||
.req
|
||||
.get_document_field_data(value_accessors, res.doc.doc_id);
|
||||
top_hits_computer.collect(
|
||||
DocSortValuesAndFields {
|
||||
sorts: res.feature,
|
||||
doc_value_fields,
|
||||
},
|
||||
res.doc,
|
||||
);
|
||||
}
|
||||
|
||||
top_hits_computer
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentAggregationCollector for TopHitsSegmentCollector {
|
||||
fn add_intermediate_aggregation_result(
|
||||
self: Box<Self>,
|
||||
agg_with_accessor: &crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||
results: &mut crate::aggregation::intermediate_agg_result::IntermediateAggregationResults,
|
||||
) -> crate::Result<()> {
|
||||
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
|
||||
|
||||
let value_accessors = &agg_with_accessor.aggs.values[self.accessor_idx].value_accessors;
|
||||
|
||||
let intermediate_result =
|
||||
IntermediateMetricResult::TopHits(self.into_top_hits_collector(value_accessors));
|
||||
results.push(
|
||||
name,
|
||||
IntermediateAggregationResult::Metric(intermediate_result),
|
||||
)
|
||||
}
|
||||
|
||||
fn collect(
|
||||
&mut self,
|
||||
doc_id: crate::DocId,
|
||||
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
let accessors = &agg_with_accessor.aggs.values[self.accessor_idx].accessors;
|
||||
let sorts: Vec<DocValueAndOrder> = self
|
||||
.req
|
||||
.sort
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, KeyOrder { order, .. })| {
|
||||
let order = *order;
|
||||
let value = accessors
|
||||
.get(idx)
|
||||
.expect("could not find field in accessors")
|
||||
.0
|
||||
.values_for_doc(doc_id)
|
||||
.next();
|
||||
DocValueAndOrder { value, order }
|
||||
})
|
||||
.collect();
|
||||
|
||||
self.top_n.push(
|
||||
sorts,
|
||||
DocAddress {
|
||||
segment_ord: self.segment_ordinal,
|
||||
doc_id,
|
||||
},
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect_block(
|
||||
&mut self,
|
||||
docs: &[crate::DocId],
|
||||
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
|
||||
) -> crate::Result<()> {
|
||||
// TODO: Consider getting fields with the column block accessor.
|
||||
for doc in docs {
|
||||
self.collect(*doc, agg_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common::DateTime;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::Value;
|
||||
use time::macros::datetime;
|
||||
|
||||
use super::{DocSortValuesAndFields, DocValueAndOrder, Order};
|
||||
use crate::aggregation::agg_req::Aggregations;
|
||||
use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::bucket::tests::get_test_index_from_docs;
|
||||
use crate::aggregation::tests::get_test_index_from_values;
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::collector::ComparableDoc;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::OwnedValue;
|
||||
|
||||
fn invert_order(cmp_feature: DocValueAndOrder) -> DocValueAndOrder {
|
||||
let DocValueAndOrder { value, order } = cmp_feature;
|
||||
let order = match order {
|
||||
Order::Asc => Order::Desc,
|
||||
Order::Desc => Order::Asc,
|
||||
};
|
||||
DocValueAndOrder { value, order }
|
||||
}
|
||||
|
||||
fn collector_with_capacity(capacity: usize) -> super::TopHitsTopNComputer {
|
||||
super::TopHitsTopNComputer {
|
||||
top_n: super::TopNComputer::new(capacity),
|
||||
req: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn invert_order_features(mut cmp_features: DocSortValuesAndFields) -> DocSortValuesAndFields {
|
||||
cmp_features.sorts = cmp_features
|
||||
.sorts
|
||||
.into_iter()
|
||||
.map(invert_order)
|
||||
.collect::<Vec<_>>();
|
||||
cmp_features
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_comparable_doc_feature() -> crate::Result<()> {
|
||||
let small = DocValueAndOrder {
|
||||
value: Some(1),
|
||||
order: Order::Asc,
|
||||
};
|
||||
let big = DocValueAndOrder {
|
||||
value: Some(2),
|
||||
order: Order::Asc,
|
||||
};
|
||||
let none = DocValueAndOrder {
|
||||
value: None,
|
||||
order: Order::Asc,
|
||||
};
|
||||
|
||||
assert!(small < big);
|
||||
assert!(none < small);
|
||||
assert!(none < big);
|
||||
|
||||
let small = invert_order(small);
|
||||
let big = invert_order(big);
|
||||
let none = invert_order(none);
|
||||
|
||||
assert!(small > big);
|
||||
assert!(none < small);
|
||||
assert!(none < big);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_comparable_doc_features() -> crate::Result<()> {
|
||||
let features_1 = DocSortValuesAndFields {
|
||||
sorts: vec![DocValueAndOrder {
|
||||
value: Some(1),
|
||||
order: Order::Asc,
|
||||
}],
|
||||
doc_value_fields: Default::default(),
|
||||
};
|
||||
|
||||
let features_2 = DocSortValuesAndFields {
|
||||
sorts: vec![DocValueAndOrder {
|
||||
value: Some(2),
|
||||
order: Order::Asc,
|
||||
}],
|
||||
doc_value_fields: Default::default(),
|
||||
};
|
||||
|
||||
assert!(features_1 < features_2);
|
||||
|
||||
assert!(invert_order_features(features_1.clone()) > invert_order_features(features_2));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_top_hits_empty_index() -> crate::Result<()> {
|
||||
let values = vec![];
|
||||
|
||||
let index = get_test_index_from_values(false, &values)?;
|
||||
|
||||
let d: Aggregations = serde_json::from_value(json!({
|
||||
"top_hits_req": {
|
||||
"top_hits": {
|
||||
"size": 2,
|
||||
"sort": [
|
||||
{ "date": "desc" }
|
||||
],
|
||||
"from": 0,
|
||||
}
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(d, Default::default());
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let res: Value = serde_json::from_str(
|
||||
&serde_json::to_string(&agg_res).expect("JSON serialization failed"),
|
||||
)
|
||||
.expect("JSON parsing failed");
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"top_hits_req": {
|
||||
"hits": []
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_hits_collector_single_feature() -> crate::Result<()> {
|
||||
let docs = vec![
|
||||
ComparableDoc::<_, _, false> {
|
||||
doc: crate::DocAddress {
|
||||
segment_ord: 0,
|
||||
doc_id: 0,
|
||||
},
|
||||
feature: DocSortValuesAndFields {
|
||||
sorts: vec![DocValueAndOrder {
|
||||
value: Some(1),
|
||||
order: Order::Asc,
|
||||
}],
|
||||
doc_value_fields: Default::default(),
|
||||
},
|
||||
},
|
||||
ComparableDoc {
|
||||
doc: crate::DocAddress {
|
||||
segment_ord: 0,
|
||||
doc_id: 2,
|
||||
},
|
||||
feature: DocSortValuesAndFields {
|
||||
sorts: vec![DocValueAndOrder {
|
||||
value: Some(3),
|
||||
order: Order::Asc,
|
||||
}],
|
||||
doc_value_fields: Default::default(),
|
||||
},
|
||||
},
|
||||
ComparableDoc {
|
||||
doc: crate::DocAddress {
|
||||
segment_ord: 0,
|
||||
doc_id: 1,
|
||||
},
|
||||
feature: DocSortValuesAndFields {
|
||||
sorts: vec![DocValueAndOrder {
|
||||
value: Some(5),
|
||||
order: Order::Asc,
|
||||
}],
|
||||
doc_value_fields: Default::default(),
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
let mut collector = collector_with_capacity(3);
|
||||
for doc in docs.clone() {
|
||||
collector.collect(doc.feature, doc.doc);
|
||||
}
|
||||
|
||||
let res = collector.into_final_result();
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
super::TopHitsMetricResult {
|
||||
hits: vec![
|
||||
super::TopHitsVecEntry {
|
||||
sort: vec![docs[0].feature.sorts[0].value],
|
||||
doc_value_fields: Default::default(),
|
||||
},
|
||||
super::TopHitsVecEntry {
|
||||
sort: vec![docs[1].feature.sorts[0].value],
|
||||
doc_value_fields: Default::default(),
|
||||
},
|
||||
super::TopHitsVecEntry {
|
||||
sort: vec![docs[2].feature.sorts[0].value],
|
||||
doc_value_fields: Default::default(),
|
||||
},
|
||||
]
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_aggregation_top_hits(merge_segments: bool) -> crate::Result<()> {
|
||||
let docs = vec![
|
||||
vec![
|
||||
r#"{ "date": "2015-01-02T00:00:00Z", "text": "bbb", "text2": "bbb", "mixed": { "dyn_arr": [1, "2"] } }"#,
|
||||
r#"{ "date": "2017-06-15T00:00:00Z", "text": "ccc", "text2": "ddd", "mixed": { "dyn_arr": [3, "4"] } }"#,
|
||||
],
|
||||
vec![
|
||||
r#"{ "text": "aaa", "text2": "bbb", "date": "2018-01-02T00:00:00Z", "mixed": { "dyn_arr": ["9", 8] } }"#,
|
||||
r#"{ "text": "aaa", "text2": "bbb", "date": "2016-01-02T00:00:00Z", "mixed": { "dyn_arr": ["7", 6] } }"#,
|
||||
],
|
||||
];
|
||||
|
||||
let index = get_test_index_from_docs(merge_segments, &docs)?;
|
||||
|
||||
let d: Aggregations = serde_json::from_value(json!({
|
||||
"top_hits_req": {
|
||||
"top_hits": {
|
||||
"size": 2,
|
||||
"sort": [
|
||||
{ "date": "desc" }
|
||||
],
|
||||
"from": 1,
|
||||
"docvalue_fields": [
|
||||
"date",
|
||||
"tex*",
|
||||
"mixed.*",
|
||||
],
|
||||
}
|
||||
}
|
||||
}))?;
|
||||
|
||||
let collector = AggregationCollector::from_aggs(d, Default::default());
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let agg_res =
|
||||
serde_json::to_value(searcher.search(&AllQuery, &collector).unwrap()).unwrap();
|
||||
|
||||
let date_2017 = datetime!(2017-06-15 00:00:00 UTC);
|
||||
let date_2016 = datetime!(2016-01-02 00:00:00 UTC);
|
||||
|
||||
assert_eq!(
|
||||
agg_res["top_hits_req"],
|
||||
json!({
|
||||
"hits": [
|
||||
{
|
||||
"sort": [common::i64_to_u64(date_2017.unix_timestamp_nanos() as i64)],
|
||||
"docvalue_fields": {
|
||||
"date": [ OwnedValue::Date(DateTime::from_utc(date_2017)) ],
|
||||
"text": [ "ccc" ],
|
||||
"text2": [ "ddd" ],
|
||||
"mixed.dyn_arr": [ 3, "4" ],
|
||||
}
|
||||
},
|
||||
{
|
||||
"sort": [common::i64_to_u64(date_2016.unix_timestamp_nanos() as i64)],
|
||||
"docvalue_fields": {
|
||||
"date": [ OwnedValue::Date(DateTime::from_utc(date_2016)) ],
|
||||
"text": [ "aaa" ],
|
||||
"text2": [ "bbb" ],
|
||||
"mixed.dyn_arr": [ 6, "7" ],
|
||||
}
|
||||
}
|
||||
]
|
||||
}),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_top_hits_single_segment() -> crate::Result<()> {
|
||||
test_aggregation_top_hits(true)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_top_hits_multi_segment() -> crate::Result<()> {
|
||||
test_aggregation_top_hits(false)
|
||||
}
|
||||
}
|
||||
@@ -145,6 +145,8 @@ mod agg_tests;
|
||||
|
||||
mod agg_bench;
|
||||
|
||||
use core::fmt;
|
||||
|
||||
pub use agg_limits::AggregationLimits;
|
||||
pub use collector::{
|
||||
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
||||
@@ -154,7 +156,110 @@ use columnar::{ColumnType, MonotonicallyMappableToU64};
|
||||
pub(crate) use date::format_date;
|
||||
pub use error::AggregationError;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::de::{self, Visitor};
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
|
||||
pub(crate) fn invalid_agg_request(message: String) -> crate::TantivyError {
|
||||
crate::TantivyError::AggregationError(AggregationError::InvalidRequest(message))
|
||||
}
|
||||
|
||||
fn parse_str_into_f64<E: de::Error>(value: &str) -> Result<f64, E> {
|
||||
let parsed = value.parse::<f64>().map_err(|_err| {
|
||||
de::Error::custom(format!("Failed to parse f64 from string: {:?}", value))
|
||||
})?;
|
||||
|
||||
// Check if the parsed value is NaN or infinity
|
||||
if parsed.is_nan() || parsed.is_infinite() {
|
||||
Err(de::Error::custom(format!(
|
||||
"Value is not a valid f64 (NaN or Infinity): {:?}",
|
||||
value
|
||||
)))
|
||||
} else {
|
||||
Ok(parsed)
|
||||
}
|
||||
}
|
||||
|
||||
/// deserialize Option<f64> from string or float
|
||||
pub(crate) fn deserialize_option_f64<'de, D>(deserializer: D) -> Result<Option<f64>, D::Error>
|
||||
where D: Deserializer<'de> {
|
||||
struct StringOrFloatVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for StringOrFloatVisitor {
|
||||
type Value = Option<f64>;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("a string or a float")
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
parse_str_into_f64(value).map(Some)
|
||||
}
|
||||
|
||||
fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(Some(value))
|
||||
}
|
||||
|
||||
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(Some(value as f64))
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(Some(value as f64))
|
||||
}
|
||||
|
||||
fn visit_none<E>(self) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn visit_unit<E>(self) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_any(StringOrFloatVisitor)
|
||||
}
|
||||
|
||||
/// deserialize f64 from string or float
|
||||
pub(crate) fn deserialize_f64<'de, D>(deserializer: D) -> Result<f64, D::Error>
|
||||
where D: Deserializer<'de> {
|
||||
struct StringOrFloatVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for StringOrFloatVisitor {
|
||||
type Value = f64;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("a string or a float")
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
parse_str_into_f64(value)
|
||||
}
|
||||
|
||||
fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(value as f64)
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
|
||||
where E: de::Error {
|
||||
Ok(value as f64)
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_any(StringOrFloatVisitor)
|
||||
}
|
||||
|
||||
/// Represents an associative array `(key => values)` in a very efficient manner.
|
||||
#[derive(PartialEq, Serialize, Deserialize)]
|
||||
@@ -281,6 +386,7 @@ pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &ColumnType) -> f64 {
|
||||
ColumnType::U64 => val as f64,
|
||||
ColumnType::I64 | ColumnType::DateTime => i64::from_u64(val) as f64,
|
||||
ColumnType::F64 => f64::from_u64(val),
|
||||
ColumnType::Bool => val as f64,
|
||||
_ => {
|
||||
panic!("unexpected type {field_type:?}. This should not happen")
|
||||
}
|
||||
@@ -301,6 +407,7 @@ pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &ColumnType) -> Option<
|
||||
ColumnType::U64 => Some(val as u64),
|
||||
ColumnType::I64 | ColumnType::DateTime => Some((val as i64).to_u64()),
|
||||
ColumnType::F64 => Some(val.to_u64()),
|
||||
ColumnType::Bool => Some(val as u64),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -314,12 +421,11 @@ mod tests {
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use super::agg_req::Aggregations;
|
||||
use super::segment_agg_result::AggregationLimits;
|
||||
use super::*;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
|
||||
use crate::{Index, Term};
|
||||
use crate::{Index, IndexWriter, Term};
|
||||
|
||||
pub fn get_test_index_with_num_docs(
|
||||
merge_segments: bool,
|
||||
@@ -451,7 +557,7 @@ mod tests {
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
if segment_ids.len() > 1 {
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
@@ -565,7 +671,7 @@ mod tests {
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ use super::metric::{
|
||||
SumAggregation,
|
||||
};
|
||||
use crate::aggregation::bucket::TermMissingAgg;
|
||||
use crate::aggregation::metric::TopHitsSegmentCollector;
|
||||
|
||||
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
|
||||
fn add_intermediate_aggregation_result(
|
||||
@@ -160,6 +161,11 @@ pub(crate) fn build_single_agg_segment_collector(
|
||||
accessor_idx,
|
||||
)?,
|
||||
)),
|
||||
TopHits(top_hits_req) => Ok(Box::new(TopHitsSegmentCollector::from_req(
|
||||
top_hits_req,
|
||||
accessor_idx,
|
||||
req.segment_ordinal,
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -410,6 +410,7 @@ impl SegmentCollector for FacetSegmentCollector {
|
||||
|
||||
/// Intermediary result of the `FacetCollector` that stores
|
||||
/// the facet counts for all the segments.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct FacetCounts {
|
||||
facet_counts: BTreeMap<Facet, u64>,
|
||||
}
|
||||
@@ -493,10 +494,10 @@ mod tests {
|
||||
use super::{FacetCollector, FacetCounts};
|
||||
use crate::collector::facet_collector::compress_mapping;
|
||||
use crate::collector::Count;
|
||||
use crate::core::Index;
|
||||
use crate::index::Index;
|
||||
use crate::query::{AllQuery, QueryParser, TermQuery};
|
||||
use crate::schema::{Document, Facet, FacetOptions, IndexRecordOption, Schema};
|
||||
use crate::Term;
|
||||
use crate::schema::{Facet, FacetOptions, IndexRecordOption, Schema, TantivyDocument};
|
||||
use crate::{IndexWriter, Term};
|
||||
|
||||
fn test_collapse_mapping_aux(
|
||||
facet_terms: &[&str],
|
||||
@@ -559,7 +560,7 @@ mod tests {
|
||||
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
index_writer
|
||||
.add_document(doc!(facet_field=>Facet::from("/facet/a")))
|
||||
.unwrap();
|
||||
@@ -588,7 +589,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
let num_facets: usize = 3 * 4 * 5;
|
||||
let facets: Vec<Facet> = (0..num_facets)
|
||||
.map(|mut n| {
|
||||
@@ -601,7 +602,7 @@ mod tests {
|
||||
})
|
||||
.collect();
|
||||
for i in 0..num_facets * 10 {
|
||||
let mut doc = Document::new();
|
||||
let mut doc = TantivyDocument::new();
|
||||
doc.add_facet(facet_field, facets[i % num_facets].clone());
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
@@ -732,24 +733,25 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let uniform = Uniform::new_inclusive(1, 100_000);
|
||||
let mut docs: Vec<Document> = vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
||||
.into_iter()
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet/{}", c));
|
||||
let doc = doc!(facet_field => facet);
|
||||
iter::repeat(doc).take(count)
|
||||
})
|
||||
.map(|mut doc| {
|
||||
doc.add_facet(
|
||||
facet_field,
|
||||
&format!("/facet/{}", thread_rng().sample(uniform)),
|
||||
);
|
||||
doc
|
||||
})
|
||||
.collect();
|
||||
let mut docs: Vec<TantivyDocument> =
|
||||
vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
||||
.into_iter()
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet/{}", c));
|
||||
let doc = doc!(facet_field => facet);
|
||||
iter::repeat(doc).take(count)
|
||||
})
|
||||
.map(|mut doc| {
|
||||
doc.add_facet(
|
||||
facet_field,
|
||||
&format!("/facet/{}", thread_rng().sample(uniform)),
|
||||
);
|
||||
doc
|
||||
})
|
||||
.collect();
|
||||
docs[..].shuffle(&mut thread_rng());
|
||||
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
@@ -780,7 +782,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let docs: Vec<Document> = vec![("b", 2), ("a", 2), ("c", 4)]
|
||||
let docs: Vec<TantivyDocument> = vec![("b", 2), ("a", 2), ("c", 4)]
|
||||
.into_iter()
|
||||
.flat_map(|(c, count)| {
|
||||
let facet = Facet::from(&format!("/facet/{}", c));
|
||||
@@ -828,7 +830,7 @@ mod bench {
|
||||
use crate::collector::FacetCollector;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::{Facet, Schema, INDEXED};
|
||||
use crate::Index;
|
||||
use crate::{Index, IndexWriter};
|
||||
|
||||
#[bench]
|
||||
fn bench_facet_collector(b: &mut Bencher) {
|
||||
@@ -847,7 +849,7 @@ mod bench {
|
||||
// 40425 docs
|
||||
docs[..].shuffle(&mut thread_rng());
|
||||
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
|
||||
@@ -12,8 +12,7 @@ use std::marker::PhantomData;
|
||||
use columnar::{BytesColumn, Column, DynamicColumn, HasAssociatedColumnType};
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::schema::Field;
|
||||
use crate::{DocId, Score, SegmentReader, TantivyError};
|
||||
use crate::{DocId, Score, SegmentReader};
|
||||
|
||||
/// The `FilterCollector` filters docs using a fast field value and a predicate.
|
||||
///
|
||||
@@ -50,13 +49,13 @@ use crate::{DocId, Score, SegmentReader, TantivyError};
|
||||
///
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let no_filter_collector = FilterCollector::new(price, |value: u64| value > 20_120u64, TopDocs::with_limit(2));
|
||||
/// let no_filter_collector = FilterCollector::new("price".to_string(), |value: u64| value > 20_120u64, TopDocs::with_limit(2));
|
||||
/// let top_docs = searcher.search(&query, &no_filter_collector)?;
|
||||
///
|
||||
/// assert_eq!(top_docs.len(), 1);
|
||||
/// assert_eq!(top_docs[0].1, DocAddress::new(0, 1));
|
||||
///
|
||||
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, |value| value < 5u64, TopDocs::with_limit(2));
|
||||
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new("price".to_string(), |value| value < 5u64, TopDocs::with_limit(2));
|
||||
/// let filtered_top_docs = searcher.search(&query, &filter_all_collector)?;
|
||||
///
|
||||
/// assert_eq!(filtered_top_docs.len(), 0);
|
||||
@@ -70,7 +69,7 @@ use crate::{DocId, Score, SegmentReader, TantivyError};
|
||||
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue>
|
||||
where TPredicate: 'static + Clone
|
||||
{
|
||||
field: Field,
|
||||
field: String,
|
||||
collector: TCollector,
|
||||
predicate: TPredicate,
|
||||
t_predicate_value: PhantomData<TPredicateValue>,
|
||||
@@ -83,7 +82,7 @@ where
|
||||
TPredicate: Fn(TPredicateValue) -> bool + Send + Sync + Clone,
|
||||
{
|
||||
/// Create a new `FilterCollector`.
|
||||
pub fn new(field: Field, predicate: TPredicate, collector: TCollector) -> Self {
|
||||
pub fn new(field: String, predicate: TPredicate, collector: TCollector) -> Self {
|
||||
Self {
|
||||
field,
|
||||
predicate,
|
||||
@@ -110,18 +109,7 @@ where
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let schema = segment_reader.schema();
|
||||
let field_entry = schema.get_field_entry(self.field);
|
||||
if !field_entry.is_fast() {
|
||||
return Err(TantivyError::SchemaError(format!(
|
||||
"Field {:?} is not a fast field.",
|
||||
field_entry.name()
|
||||
)));
|
||||
}
|
||||
|
||||
let column_opt = segment_reader
|
||||
.fast_fields()
|
||||
.column_opt(field_entry.name())?;
|
||||
let column_opt = segment_reader.fast_fields().column_opt(&self.field)?;
|
||||
|
||||
let segment_collector = self
|
||||
.collector
|
||||
@@ -229,7 +217,7 @@ where
|
||||
///
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let filter_collector = BytesFilterCollector::new(barcode, |bytes: &[u8]| bytes.starts_with(b"01"), TopDocs::with_limit(2));
|
||||
/// let filter_collector = BytesFilterCollector::new("barcode".to_string(), |bytes: &[u8]| bytes.starts_with(b"01"), TopDocs::with_limit(2));
|
||||
/// let top_docs = searcher.search(&query, &filter_collector)?;
|
||||
///
|
||||
/// assert_eq!(top_docs.len(), 1);
|
||||
@@ -240,7 +228,7 @@ where
|
||||
pub struct BytesFilterCollector<TCollector, TPredicate>
|
||||
where TPredicate: 'static + Clone
|
||||
{
|
||||
field: Field,
|
||||
field: String,
|
||||
collector: TCollector,
|
||||
predicate: TPredicate,
|
||||
}
|
||||
@@ -251,7 +239,7 @@ where
|
||||
TPredicate: Fn(&[u8]) -> bool + Send + Sync + Clone,
|
||||
{
|
||||
/// Create a new `BytesFilterCollector`.
|
||||
pub fn new(field: Field, predicate: TPredicate, collector: TCollector) -> Self {
|
||||
pub fn new(field: String, predicate: TPredicate, collector: TCollector) -> Self {
|
||||
Self {
|
||||
field,
|
||||
predicate,
|
||||
@@ -274,10 +262,7 @@ where
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let schema = segment_reader.schema();
|
||||
let field_name = schema.get_field_name(self.field);
|
||||
|
||||
let column_opt = segment_reader.fast_fields().bytes(field_name)?;
|
||||
let column_opt = segment_reader.fast_fields().bytes(&self.field)?;
|
||||
|
||||
let segment_collector = self
|
||||
.collector
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user