Compare commits

...

35 Commits

Author SHA1 Message Date
Adam Reichold
083aec5125 Provide StoreReader::enumerate to simplify creation of secondary indexes
For secondary indexes, it is often necessary to read all documents, compute
some function on them and associated the result with a document ID.

Currently, this requires something like

```rust
let reader = segment.get_store_reader(1)?;

for doc_id in segment.doc_ids_alive() {
    let doc = reader.get(doc_id)?;

    // Use doc and doc_id here ...
}
```

which can be simplified to

```rust
let reader = segment.get_store_reader(1)?;

for res in reader.enumerate() {
    let (doc_id, doc) = res?;

    // Use doc and doc_id here ...
}
```

using the method proposed here.

(I added a new method instead of modifying `StoreReader::iter` to make the
change backwards compatible, i.e. possible to include in a point release.)
2024-05-23 08:30:30 +02:00
Paul Masurel
b806122c81 Fixing flaky test (#2407) 2024-05-22 10:10:55 +09:00
PSeitz
e1679f3fb9 compact doc (#2402)
* compact doc

* add any value type

* pass references when building CompactDoc

* remove OwnedValue from API

* clippy

* clippy

* fail on large documents

* fmt

* cleanup

* cleanup

* implement Value for different types

fix serde_json date Value implementation

* fmt

* cleanup

* fmt

* cleanup

* store positions instead of pos+len

* remove nodes array

* remove mediumvec

* cleanup

* infallible serialize into vec

* remove positions indirection

* remove 24MB limitation in document

use u32 for Addr
Remove the 3 byte addressing limitation and use VInt instead

* cleanup

* extend test

* cleanup, add comments

* rename, remove pub
2024-05-21 10:16:08 +02:00
dependabot[bot]
5a80420b10 --- (#2406)
updated-dependencies:
- dependency-name: binggan
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-21 04:36:32 +02:00
dependabot[bot]
aa26ff5029 Update binggan requirement from 0.6.2 to 0.7.0 (#2401)
---
updated-dependencies:
- dependency-name: binggan
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-17 02:53:25 +02:00
dependabot[bot]
e197b59258 Update itertools requirement from 0.12.0 to 0.13.0 (#2400)
Updates the requirements on [itertools](https://github.com/rust-itertools/itertools) to permit the latest version.
- [Changelog](https://github.com/rust-itertools/itertools/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-itertools/itertools/compare/v0.12.0...v0.13.0)

---
updated-dependencies:
- dependency-name: itertools
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-17 02:53:02 +02:00
PSeitz
5b7cca13e5 lower contention on AggregationLimits (#2394)
PR https://github.com/quickwit-oss/quickwit/pull/4962 fixes an issue
where the AggregationLimits are not passed correctly. Since the
AggregationLimits are shared properly we run into contention issues.

This PR includes some straightforward improvement to reduce contention,
by only calling if the memory changed and avoiding the second read.

We probably need some sharding with multiple counters or local caching before updating the
global after some threshold.
2024-05-15 12:25:40 +02:00
dependabot[bot]
a79590477e Update binggan requirement from 0.5.2 to 0.6.2 (#2399)
---
updated-dependencies:
- dependency-name: binggan
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-15 05:40:37 +02:00
Paul Masurel
6181c1eb5e Small changes in the Executor API. (#2391)
Warning, this change is mildly not backward compatible
so I bumped tantivy's version.
2024-05-10 17:19:12 +09:00
Adam Reichold
1ee5f90761 Give allocation control to the caller instead of force a clone (#2389)
Achieved by moving the boxes out of the temporary reference wrappers which are
cloneable themselves, i.e. if required the caller can clone them already or
consume them to reuse existing allocations.
2024-05-09 16:01:13 +09:00
PSeitz
71f3b4e4e3 fix ReferenceValue API flaw (#2372)
* fix ReferenceValue API flaw

Remove `Facet` and `TokenizedString` values from the `ReferenceValue` API,
as this requires the trait value to have them stored somewhere.

Since `TokenizedString` is quite niche, I just copy it into a Box,
instead of designing a reference API around it.

* fix comment link
2024-05-09 06:14:42 +02:00
trinity-1686a
8cd7ddc535 run block decompression from executor (#2386)
* run block decompression from executor

* add a wrapper with is_closed to oneshot channel

* add cancelation test to Executor::spawn_blocking
2024-05-08 12:22:44 +02:00
Paul Masurel
2b76335a95 Removed usage of num_cpus (#2387)
* Removed usage of num_cpus
* handling error
2024-05-08 13:32:52 +09:00
PSeitz
c6b213d8f0 use bingang for agg benchmark (#2378)
* use bingang for agg benchmark

use bingang for agg benchmark, which includes memory consumption

Output:
```
full
histogram                     Memory: 15.8 KB              Avg: 10.9322ms  (+5.44%)    Median: 10.8790ms  (+9.28%)     Min: 10.7470ms    Max: 11.3263ms
histogram_hard_bounds         Memory: 15.5 KB              Avg: 5.1939ms  (+6.61%)     Median: 5.1722ms  (+10.98%)     Min: 5.0432ms     Max: 5.3910ms
histogram_with_avg_sub_agg    Memory: 48.7 KB              Avg: 23.8165ms  (+4.57%)    Median: 23.7264ms  (+10.06%)    Min: 23.4995ms    Max: 24.8107ms
dense
histogram                     Memory: 17.3 KB              Avg: 15.6810ms  (-8.54%)    Median: 15.6174ms  (-8.89%)    Min: 15.4953ms    Max: 16.0702ms
histogram_hard_bounds         Memory: 15.4 KB              Avg: 10.0720ms  (-7.33%)    Median: 10.0572ms  (-7.06%)    Min: 9.8500ms     Max: 10.4819ms
histogram_with_avg_sub_agg    Memory: 50.1 KB              Avg: 33.0993ms  (-7.04%)    Median: 32.9499ms  (-6.86%)    Min: 32.8284ms    Max: 34.0529ms
sparse
histogram                     Memory: 16.3 KB              Avg: 19.2325ms  (-0.44%)    Median: 19.1211ms  (-1.26%)    Min: 19.0348ms    Max: 19.7902ms
histogram_hard_bounds         Memory: 16.1 KB              Avg: 18.5179ms  (-0.61%)    Median: 18.4552ms  (-0.90%)    Min: 18.3799ms    Max: 19.0535ms
histogram_with_avg_sub_agg    Memory: 34.7 KB              Avg: 21.2589ms  (-0.69%)    Median: 21.1867ms  (-1.05%)    Min: 21.0342ms    Max: 21.9900ms
```

* add more bench with term as sub agg
2024-05-07 11:29:49 +02:00
PSeitz
eea70030bf cleanup top level exports (#2382)
remove some top level exports
2024-05-07 09:59:41 +02:00
PSeitz
92b5526310 allow more JSON values, fix i64 special case (#2383)
This changes three things:
- Reuse positions_per_path hashmap instead of allocating one per
  indexed JSON value
- Try to cast u64 values to i64 to streamline with search behaviour
- Allow top level json values to be of any type, instead of limiting it
  to JSON objects. Remove special JSON object handling method.

TODO: We probably should also try to check f64 to i64 and u64 when
indexing, as values may get converted to f64 by the JSON parser
2024-05-01 12:08:12 +02:00
PSeitz
99a59ad37e remove zero byte check (#2379)
remove zero byte checks in columnar. zero bytes are converted during serialization now.
unify code paths
extend test for expected column names
2024-04-26 06:03:28 +02:00
trinity-1686a
6a66a71cbb modify fastfield range query heuristic (#2375) 2024-04-25 10:06:11 +02:00
PSeitz
ff40764204 make convert_to_fast_value_and_append_to_json_term pub (#2370)
* make convert_to_fast_value_and_append_to_json_term pub

* clippy
2024-04-23 04:05:41 +02:00
PSeitz
047da20b5b add json path constructor to term (#2367) 2024-04-22 12:23:35 +02:00
PSeitz
1417eaf3a7 fix coverage (#2368) 2024-04-22 12:23:15 +02:00
PSeitz
4f8493d2de improve document docs (#2359) 2024-04-22 12:05:16 +02:00
Paul Masurel
8861366137 Owned value relying on Vec instead of BTreeMap (#2364)
* Owned value relying on Vec instead of BTreeMap

* fmt

* fix build

* fix serialization

---------

Co-authored-by: Pascal Seitz <pascal.seitz@gmail.com>
2024-04-22 09:38:05 +02:00
PSeitz
0e9fced336 remove JsonTermWriter (#2238)
* remove JsonTermWriter

remove JsonTermWriter
remove path truncation logic, add assertion

* fix json_path_writer add sep logic
2024-04-18 16:28:05 +02:00
PSeitz
b257b960b3 validate sort by field type (#2336)
* validate sort by field type

* Update src/index/index.rs

Co-authored-by: Adam Reichold <adamreichold@users.noreply.github.com>

---------

Co-authored-by: Adam Reichold <adamreichold@users.noreply.github.com>
2024-04-16 04:42:24 +02:00
Adam Reichold
4708171a32 Fix some of the things current Clippy complains about (#2363) 2024-04-16 04:27:06 +02:00
Adam Reichold
b493743f8d Fix trait bound of StoreReader::iter (#2360)
* Fix trait bound of StoreReader::iter

Similar to `StoreReader::get`, `StoreReader::iter` should only require
`DocumentDeserialize` and not `Document`.

* Mark the iterator returned by SegmentReader::doc_ids_alive as Send so it can be used in impls of Stream/AsyncIterator.
2024-04-15 15:50:02 +02:00
trinity-1686a
d2955a3fd2 extend field grouping (#2333)
* extend field grouping
2024-04-15 10:36:32 +02:00
PSeitz
17d5869ad6 update CHANGELOG, use github API in cliff (#2354)
* update CHANGELOG, use github API in cliff

* reset version to 0.21.1, before release

* chore: Release

* remove unreleased from CHANGELOG
2024-04-15 10:07:20 +02:00
PSeitz
dfa3aed32d check unsupported parameters top_hits (#2351)
* check unsupported parameters top_hits

* move to function
2024-04-10 08:20:52 +02:00
PSeitz
398817ce7b add index sorting deprecation warning (#2353)
* add index sorting deprecation warning

* remove deprecated IntOptions and DatePrecision
2024-04-10 08:09:09 +02:00
PSeitz
74940e9345 clippy (#2349)
* fix clippy

* fix clippy

* fix duplicate imports
2024-04-09 07:54:44 +02:00
PSeitz
1e9fc51535 update ahash (#2344) 2024-04-09 06:35:39 +02:00
PSeitz
92c32979d2 fix postcard compatibility for top_hits, add postcard test (#2346)
* fix postcard compatibility for top_hits, add postcard test

* fix top_hits naming, delay data fetch

closes #2347

* fix import
2024-04-09 06:17:25 +02:00
PSeitz
b644d78a32 fix null byte handling in JSON paths (#2345)
* fix null byte handling in JSON paths

closes https://github.com/quickwit-oss/tantivy/issues/2193
closes https://github.com/quickwit-oss/tantivy/issues/2340

* avoid repeated term truncation

* fix test

* Apply suggestions from code review

Co-authored-by: Paul Masurel <paul@quickwit.io>

* add comment

---------

Co-authored-by: Paul Masurel <paul@quickwit.io>
2024-04-05 09:53:35 +02:00
127 changed files with 2846 additions and 2041 deletions

View File

@@ -15,11 +15,11 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Install Rust
run: rustup toolchain install nightly-2023-09-10 --profile minimal --component llvm-tools-preview
run: rustup toolchain install nightly-2024-04-10 --profile minimal --component llvm-tools-preview
- uses: Swatinem/rust-cache@v2
- uses: taiki-e/install-action@cargo-llvm-cov
- name: Generate code coverage
run: cargo +nightly-2023-09-10 llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
run: cargo +nightly-2024-04-10 llvm-cov --all-features --workspace --doctests --lcov --output-path lcov.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
continue-on-error: true

View File

@@ -1,3 +1,65 @@
Tantivy 0.22
================================
Tantivy 0.22 will be able to read indices created with Tantivy 0.21.
#### Bugfixes
- Fix null byte handling in JSON paths (null bytes in json keys caused panic during indexing) [#2345](https://github.com/quickwit-oss/tantivy/pull/2345)(@PSeitz)
- Fix bug that can cause `get_docids_for_value_range` to panic. [#2295](https://github.com/quickwit-oss/tantivy/pull/2295)(@fulmicoton)
- Avoid 1 document indices by increase min memory to 15MB for indexing [#2176](https://github.com/quickwit-oss/tantivy/pull/2176)(@PSeitz)
- Fix merge panic for JSON fields [#2284](https://github.com/quickwit-oss/tantivy/pull/2284)(@PSeitz)
- Fix bug occuring when merging JSON object indexed with positions. [#2253](https://github.com/quickwit-oss/tantivy/pull/2253)(@fulmicoton)
- Fix empty DateHistogram gap bug [#2183](https://github.com/quickwit-oss/tantivy/pull/2183)(@PSeitz)
- Fix range query end check (fields with less than 1 value per doc are affected) [#2226](https://github.com/quickwit-oss/tantivy/pull/2226)(@PSeitz)
- Handle exclusive out of bounds ranges on fastfield range queries [#2174](https://github.com/quickwit-oss/tantivy/pull/2174)(@PSeitz)
#### Breaking API Changes
- rename ReloadPolicy onCommit to onCommitWithDelay [#2235](https://github.com/quickwit-oss/tantivy/pull/2235)(@giovannicuccu)
- Move exports from the root into modules [#2220](https://github.com/quickwit-oss/tantivy/pull/2220)(@PSeitz)
- Accept field name instead of `Field` in FilterCollector [#2196](https://github.com/quickwit-oss/tantivy/pull/2196)(@PSeitz)
- remove deprecated IntOptions and DateTime [#2353](https://github.com/quickwit-oss/tantivy/pull/2353)(@PSeitz)
#### Features/Improvements
- Tantivy documents as a trait: Index data directly without converting to tantivy types first [#2071](https://github.com/quickwit-oss/tantivy/pull/2071)(@ChillFish8)
- encode some part of posting list as -1 instead of direct values (smaller inverted indices) [#2185](https://github.com/quickwit-oss/tantivy/pull/2185)(@trinity-1686a)
- **Aggregation**
- Support to deserialize f64 from string [#2311](https://github.com/quickwit-oss/tantivy/pull/2311)(@PSeitz)
- Add a top_hits aggregator [#2198](https://github.com/quickwit-oss/tantivy/pull/2198)(@ditsuke)
- Support bool type in term aggregation [#2318](https://github.com/quickwit-oss/tantivy/pull/2318)(@PSeitz)
- Support ip adresses in term aggregation [#2319](https://github.com/quickwit-oss/tantivy/pull/2319)(@PSeitz)
- Support date type in term aggregation [#2172](https://github.com/quickwit-oss/tantivy/pull/2172)(@PSeitz)
- Support escaped dot when addressing field [#2250](https://github.com/quickwit-oss/tantivy/pull/2250)(@PSeitz)
- Add ExistsQuery to check documents that have a value [#2160](https://github.com/quickwit-oss/tantivy/pull/2160)(@imotov)
- Expose TopDocs::order_by_u64_field again [#2282](https://github.com/quickwit-oss/tantivy/pull/2282)(@ditsuke)
- **Memory/Performance**
- Faster TopN: replace BinaryHeap with TopNComputer [#2186](https://github.com/quickwit-oss/tantivy/pull/2186)(@PSeitz)
- reduce number of allocations during indexing [#2257](https://github.com/quickwit-oss/tantivy/pull/2257)(@PSeitz)
- Less Memory while indexing: docid deltas while indexing [#2249](https://github.com/quickwit-oss/tantivy/pull/2249)(@PSeitz)
- Faster indexing: use term hashmap in fastfield [#2243](https://github.com/quickwit-oss/tantivy/pull/2243)(@PSeitz)
- term hashmap remove copy in is_empty, unused unordered_id [#2229](https://github.com/quickwit-oss/tantivy/pull/2229)(@PSeitz)
- add method to fetch block of first values in columnar [#2330](https://github.com/quickwit-oss/tantivy/pull/2330)(@PSeitz)
- Faster aggregations: add fast path for full columns in fetch_block [#2328](https://github.com/quickwit-oss/tantivy/pull/2328)(@PSeitz)
- Faster sstable loading: use fst for sstable index [#2268](https://github.com/quickwit-oss/tantivy/pull/2268)(@trinity-1686a)
- **QueryParser**
- allow newline where we allow space in query parser [#2302](https://github.com/quickwit-oss/tantivy/pull/2302)(@trinity-1686a)
- allow some mixing of occur and bool in strict query parser [#2323](https://github.com/quickwit-oss/tantivy/pull/2323)(@trinity-1686a)
- handle * inside term in lenient query parser [#2228](https://github.com/quickwit-oss/tantivy/pull/2228)(@trinity-1686a)
- add support for exists query syntax in query parser [#2170](https://github.com/quickwit-oss/tantivy/pull/2170)(@trinity-1686a)
- Add shared search executor [#2312](https://github.com/quickwit-oss/tantivy/pull/2312)(@MochiXu)
- Truncate keys to u16::MAX in term hashmap [#2299](https://github.com/quickwit-oss/tantivy/pull/2299)(@PSeitz)
- report if a term matched when warming up posting list [#2309](https://github.com/quickwit-oss/tantivy/pull/2309)(@trinity-1686a)
- Support json fields in FuzzyTermQuery [#2173](https://github.com/quickwit-oss/tantivy/pull/2173)(@PingXia-at)
- Read list of fields encoded in term dictionary for JSON fields [#2184](https://github.com/quickwit-oss/tantivy/pull/2184)(@PSeitz)
- add collect_block to BoxableSegmentCollector [#2331](https://github.com/quickwit-oss/tantivy/pull/2331)(@PSeitz)
- expose collect_block buffer size [#2326](https://github.com/quickwit-oss/tantivy/pull/2326)(@PSeitz)
- Forward regex parser errors [#2288](https://github.com/quickwit-oss/tantivy/pull/2288)(@adamreichold)
- Make FacetCounts defaultable and cloneable. [#2322](https://github.com/quickwit-oss/tantivy/pull/2322)(@adamreichold)
- Derive Debug for SchemaBuilder [#2254](https://github.com/quickwit-oss/tantivy/pull/2254)(@GodTamIt)
- add missing inlines to tantivy options [#2245](https://github.com/quickwit-oss/tantivy/pull/2245)(@PSeitz)
Tantivy 0.21.1
================================
#### Bugfixes

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.22.0-dev"
version = "0.23.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
@@ -11,16 +11,20 @@ repository = "https://github.com/quickwit-oss/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2021"
rust-version = "1.62"
rust-version = "1.63"
exclude = ["benches/*.json", "benches/*.txt"]
[dependencies]
oneshot = "0.1.5"
# Switch back to the non-forked oneshot crate once https://github.com/faern/oneshot/pull/35 is merged
oneshot = { git = "https://github.com/fulmicoton/oneshot.git", rev = "c10a3ba" }
base64 = "0.22.0"
byteorder = "1.4.3"
crc32fast = "1.3.2"
once_cell = "1.10.0"
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
regex = { version = "1.5.5", default-features = false, features = [
"std",
"unicode",
] }
aho-corasick = "1.0"
tantivy-fst = "0.5"
memmap2 = { version = "0.9.0", optional = true }
@@ -30,14 +34,15 @@ tempfile = { version = "3.3.0", optional = true }
log = "0.4.16"
serde = { version = "1.0.136", features = ["derive"] }
serde_json = "1.0.79"
num_cpus = "1.13.1"
fs4 = { version = "0.8.0", optional = true }
levenshtein_automata = "0.2.1"
uuid = { version = "1.0.0", features = ["v4", "serde"] }
crossbeam-channel = "0.5.4"
rust-stemmers = "1.2.0"
downcast-rs = "1.2.0"
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker4x"] }
bitpacking = { version = "0.9.2", default-features = false, features = [
"bitpacker4x",
] }
census = "0.4.2"
rustc-hash = "1.1.0"
thiserror = "1.0.30"
@@ -48,17 +53,17 @@ smallvec = "1.8.0"
rayon = "1.5.2"
lru = "0.12.0"
fastdivide = "0.4.0"
itertools = "0.12.0"
itertools = "0.13.0"
measure_time = "0.8.2"
arc-swap = "1.5.0"
columnar = { version= "0.2", path="./columnar", package ="tantivy-columnar" }
sstable = { version= "0.2", path="./sstable", package ="tantivy-sstable", optional = true }
stacker = { version= "0.2", path="./stacker", package ="tantivy-stacker" }
query-grammar = { version= "0.21.0", path="./query-grammar", package = "tantivy-query-grammar" }
tantivy-bitpacker = { version= "0.5", path="./bitpacker" }
common = { version= "0.6", path = "./common/", package = "tantivy-common" }
tokenizer-api = { version= "0.2", path="./tokenizer-api", package="tantivy-tokenizer-api" }
columnar = { version = "0.3", path = "./columnar", package = "tantivy-columnar" }
sstable = { version = "0.3", path = "./sstable", package = "tantivy-sstable", optional = true }
stacker = { version = "0.3", path = "./stacker", package = "tantivy-stacker" }
query-grammar = { version = "0.22.0", path = "./query-grammar", package = "tantivy-query-grammar" }
tantivy-bitpacker = { version = "0.6", path = "./bitpacker" }
common = { version = "0.7", path = "./common/", package = "tantivy-common" }
tokenizer-api = { version = "0.3", path = "./tokenizer-api", package = "tantivy-tokenizer-api" }
sketches-ddsketch = { version = "0.2.1", features = ["use_serde"] }
futures-util = { version = "0.3.28", optional = true }
fnv = "1.0.7"
@@ -67,6 +72,7 @@ fnv = "1.0.7"
winapi = "0.3.9"
[dev-dependencies]
binggan = "0.8.0"
rand = "0.8.5"
maplit = "1.0.2"
matches = "0.1.9"
@@ -78,6 +84,9 @@ paste = "1.0.11"
more-asserts = "0.3.1"
rand_distr = "0.4.3"
time = { version = "0.3.10", features = ["serde-well-known", "macros"] }
postcard = { version = "1.0.4", features = [
"use-std",
], default-features = false }
[target.'cfg(not(windows))'.dev-dependencies]
criterion = { version = "0.5", default-features = false }
@@ -109,17 +118,26 @@ lz4-compression = ["lz4_flex"]
zstd-compression = ["zstd"]
failpoints = ["fail", "fail/failpoints"]
unstable = [] # useful for benches.
unstable = [] # useful for benches.
quickwit = ["sstable", "futures-util"]
# Compares only the hash of a string when indexing data.
# Compares only the hash of a string when indexing data.
# Increases indexing speed, but may lead to extremely rare missing terms, when there's a hash collision.
# Uses 64bit ahash.
compare_hash_only = ["stacker/compare_hash_only"]
[workspace]
members = ["query-grammar", "bitpacker", "common", "ownedbytes", "stacker", "sstable", "tokenizer-api", "columnar"]
members = [
"query-grammar",
"bitpacker",
"common",
"ownedbytes",
"stacker",
"sstable",
"tokenizer-api",
"columnar",
]
# Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points
@@ -140,3 +158,7 @@ harness = false
[[bench]]
name = "index-bench"
harness = false
[[bench]]
name = "agg_bench"
harness = false

413
benches/agg_bench.rs Normal file
View File

@@ -0,0 +1,413 @@
use binggan::{black_box, InputGroup, PeakMemAlloc, INSTRUMENTED_SYSTEM};
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use rand_distr::Distribution;
use serde_json::json;
use tantivy::aggregation::agg_req::Aggregations;
use tantivy::aggregation::AggregationCollector;
use tantivy::query::{AllQuery, TermQuery};
use tantivy::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
use tantivy::{doc, Index, Term};
#[global_allocator]
pub static GLOBAL: &PeakMemAlloc<std::alloc::System> = &INSTRUMENTED_SYSTEM;
/// Mini macro to register a function via its name
/// runner.register("average_u64", move |index| average_u64(index));
macro_rules! register {
($runner:expr, $func:ident) => {
$runner.register(stringify!($func), move |index| $func(index))
};
}
fn main() {
let inputs = vec![
("full", get_test_index_bench(Cardinality::Full).unwrap()),
(
"dense",
get_test_index_bench(Cardinality::OptionalDense).unwrap(),
),
(
"sparse",
get_test_index_bench(Cardinality::OptionalSparse).unwrap(),
),
(
"multivalue",
get_test_index_bench(Cardinality::Multivalued).unwrap(),
),
];
bench_agg(InputGroup::new_with_inputs(inputs));
}
fn bench_agg(mut group: InputGroup<Index>) {
group.set_alloc(GLOBAL); // Set the peak mem allocator. This will enable peak memory reporting.
register!(group, average_u64);
register!(group, average_f64);
register!(group, average_f64_u64);
register!(group, stats_f64);
register!(group, percentiles_f64);
register!(group, terms_few);
register!(group, terms_many);
register!(group, terms_many_order_by_term);
register!(group, terms_many_with_top_hits);
register!(group, terms_many_with_avg_sub_agg);
register!(group, terms_many_json_mixed_type_with_sub_agg_card);
register!(group, range_agg);
register!(group, range_agg_with_avg_sub_agg);
register!(group, range_agg_with_term_agg_few);
register!(group, range_agg_with_term_agg_many);
register!(group, histogram);
register!(group, histogram_hard_bounds);
register!(group, histogram_with_avg_sub_agg);
register!(group, avg_and_range_with_avg_sub_agg);
group.run();
}
fn exec_term_with_agg(index: &Index, agg_req: serde_json::Value) {
let agg_req: Aggregations = serde_json::from_value(agg_req).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let collector = get_collector(agg_req);
let searcher = reader.searcher();
black_box(searcher.search(&term_query, &collector).unwrap());
}
fn average_u64(index: &Index) {
let agg_req = json!({
"average": { "avg": { "field": "score", } }
});
exec_term_with_agg(index, agg_req)
}
fn average_f64(index: &Index) {
let agg_req = json!({
"average": { "avg": { "field": "score_f64", } }
});
exec_term_with_agg(index, agg_req)
}
fn average_f64_u64(index: &Index) {
let agg_req = json!({
"average_f64": { "avg": { "field": "score_f64" } },
"average": { "avg": { "field": "score" } },
});
exec_term_with_agg(index, agg_req)
}
fn stats_f64(index: &Index) {
let agg_req = json!({
"average_f64": { "stats": { "field": "score_f64", } }
});
exec_term_with_agg(index, agg_req)
}
fn percentiles_f64(index: &Index) {
let agg_req = json!({
"mypercentiles": {
"percentiles": {
"field": "score_f64",
"percents": [ 95, 99, 99.9 ]
}
}
});
execute_agg(index, agg_req);
}
fn terms_few(index: &Index) {
let agg_req = json!({
"my_texts": { "terms": { "field": "text_few_terms" } },
});
execute_agg(index, agg_req);
}
fn terms_many(index: &Index) {
let agg_req = json!({
"my_texts": { "terms": { "field": "text_many_terms" } },
});
execute_agg(index, agg_req);
}
fn terms_many_order_by_term(index: &Index) {
let agg_req = json!({
"my_texts": { "terms": { "field": "text_many_terms", "order": { "_key": "desc" } } },
});
execute_agg(index, agg_req);
}
fn terms_many_with_top_hits(index: &Index) {
let agg_req = json!({
"my_texts": {
"terms": { "field": "text_many_terms" },
"aggs": {
"top_hits": { "top_hits":
{
"sort": [
{ "score": "desc" }
],
"size": 2,
"doc_value_fields": ["score_f64"]
}
}
}
},
});
execute_agg(index, agg_req);
}
fn terms_many_with_avg_sub_agg(index: &Index) {
let agg_req = json!({
"my_texts": {
"terms": { "field": "text_many_terms" },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
});
execute_agg(index, agg_req);
}
fn terms_many_json_mixed_type_with_sub_agg_card(index: &Index) {
let agg_req = json!({
"my_texts": {
"terms": { "field": "json.mixed_type" },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
});
execute_agg(index, agg_req);
}
fn execute_agg(index: &Index, agg_req: serde_json::Value) {
let agg_req: Aggregations = serde_json::from_value(agg_req).unwrap();
let collector = get_collector(agg_req);
let reader = index.reader().unwrap();
let searcher = reader.searcher();
black_box(searcher.search(&AllQuery, &collector).unwrap());
}
fn range_agg(index: &Index) {
let agg_req = json!({
"range_f64": { "range": { "field": "score_f64", "ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
] } },
});
execute_agg(index, agg_req);
}
fn range_agg_with_avg_sub_agg(index: &Index) {
let agg_req = json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
]
},
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
});
execute_agg(index, agg_req);
}
fn range_agg_with_term_agg_few(index: &Index) {
let agg_req = json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
]
},
"aggs": {
"my_texts": { "terms": { "field": "text_few_terms" } },
}
},
});
execute_agg(index, agg_req);
}
fn range_agg_with_term_agg_many(index: &Index) {
let agg_req = json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
]
},
"aggs": {
"my_texts": { "terms": { "field": "text_many_terms" } },
}
},
});
execute_agg(index, agg_req);
}
fn histogram(index: &Index) {
let agg_req = json!({
"rangef64": {
"histogram": {
"field": "score_f64",
"interval": 100 // 1000 buckets
},
}
});
execute_agg(index, agg_req);
}
fn histogram_hard_bounds(index: &Index) {
let agg_req = json!({
"rangef64": { "histogram": { "field": "score_f64", "interval": 100, "hard_bounds": { "min": 1000, "max": 300000 } } },
});
execute_agg(index, agg_req);
}
fn histogram_with_avg_sub_agg(index: &Index) {
let agg_req = json!({
"rangef64": {
"histogram": { "field": "score_f64", "interval": 100 },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
}
});
execute_agg(index, agg_req);
}
fn avg_and_range_with_avg_sub_agg(index: &Index) {
let agg_req = json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 60000 }
]
},
"aggs": {
"average_in_range": { "avg": { "field": "score" } }
}
},
"average": { "avg": { "field": "score" } }
});
execute_agg(index, agg_req);
}
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum Cardinality {
/// All documents contain exactly one value.
/// `Full` is the default for auto-detecting the Cardinality, since it is the most strict.
#[default]
Full = 0,
/// All documents contain at most one value.
OptionalDense = 1,
/// All documents may contain any number of values.
Multivalued = 2,
/// 1 / 20 documents has a value
OptionalSparse = 3,
}
fn get_collector(agg_req: Aggregations) -> AggregationCollector {
AggregationCollector::from_aggs(agg_req, Default::default())
}
fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
let mut schema_builder = Schema::builder();
let text_fieldtype = tantivy::schema::TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
)
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let json_field = schema_builder.add_json_field("json", FAST);
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
let score_fieldtype = tantivy::schema::NumericOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
let index = Index::create_from_tempdir(schema_builder.build())?;
let few_terms_data = ["INFO", "ERROR", "WARN", "DEBUG"];
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
let many_terms_data = (0..150_000)
.map(|num| format!("author{}", num))
.collect::<Vec<_>>();
{
let mut rng = StdRng::from_seed([1u8; 32]);
let mut index_writer = index.writer_with_num_threads(1, 200_000_000)?;
// To make the different test cases comparable we just change one doc to force the
// cardinality
if cardinality == Cardinality::OptionalDense {
index_writer.add_document(doc!())?;
}
if cardinality == Cardinality::Multivalued {
index_writer.add_document(doc!(
json_field => json!({"mixed_type": 10.0}),
json_field => json!({"mixed_type": 10.0}),
text_field => "cool",
text_field => "cool",
text_field_many_terms => "cool",
text_field_many_terms => "cool",
text_field_few_terms => "cool",
text_field_few_terms => "cool",
score_field => 1u64,
score_field => 1u64,
score_field_f64 => lg_norm.sample(&mut rng),
score_field_f64 => lg_norm.sample(&mut rng),
score_field_i64 => 1i64,
score_field_i64 => 1i64,
))?;
}
let mut doc_with_value = 1_000_000;
if cardinality == Cardinality::OptionalSparse {
doc_with_value /= 20;
}
let _val_max = 1_000_000.0;
for _ in 0..doc_with_value {
let val: f64 = rng.gen_range(0.0..1_000_000.0);
let json = if rng.gen_bool(0.1) {
// 10% are numeric values
json!({ "mixed_type": val })
} else {
json!({"mixed_type": many_terms_data.choose(&mut rng).unwrap().to_string()})
};
index_writer.add_document(doc!(
text_field => "cool",
json_field => json,
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
score_field => val as u64,
score_field_f64 => lg_norm.sample(&mut rng),
score_field_i64 => val as i64,
))?;
if cardinality == Cardinality::OptionalSparse {
for _ in 0..20 {
index_writer.add_document(doc!(text_field => "cool"))?;
}
}
}
// writing the segment
index_writer.commit()?;
}
Ok(index)
}

View File

@@ -18,7 +18,7 @@ fn benchmark(
benchmark_dynamic_json(b, input, schema, commit, parse_json)
} else {
_benchmark(b, input, schema, commit, parse_json, |schema, doc_json| {
TantivyDocument::parse_json(&schema, doc_json).unwrap()
TantivyDocument::parse_json(schema, doc_json).unwrap()
})
}
}
@@ -90,8 +90,7 @@ fn benchmark_dynamic_json(
) {
let json_field = schema.get_field("json").unwrap();
_benchmark(b, input, schema, commit, parse_json, |_schema, doc_json| {
let json_val: serde_json::Map<String, serde_json::Value> =
serde_json::from_str(doc_json).unwrap();
let json_val: serde_json::Value = serde_json::from_str(doc_json).unwrap();
tantivy::doc!(json_field=>json_val)
})
}
@@ -138,12 +137,13 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
for (prefix, schema, is_dynamic) in benches {
for commit in [false, true] {
let suffix = if commit { "with-commit" } else { "no-commit" };
for parse_json in [false] {
{
let parse_json = false;
// for parse_json in [false, true] {
let suffix = if parse_json {
format!("{}-with-json-parsing", suffix)
} else {
format!("{}", suffix)
suffix.to_string()
};
let bench_name = format!("{}{}", prefix, suffix);

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy-bitpacker"
version = "0.5.0"
version = "0.6.0"
edition = "2021"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"

View File

@@ -1,6 +1,10 @@
# configuration file for git-cliff{ pattern = "foo", replace = "bar"}
# see https://github.com/orhun/git-cliff#configuration-file
[remote.github]
owner = "quickwit-oss"
repo = "tantivy"
[changelog]
# changelog header
header = """
@@ -8,15 +12,43 @@ header = """
# template for the changelog body
# https://tera.netlify.app/docs/#introduction
body = """
{% if version %}\
{{ version | trim_start_matches(pat="v") }} ({{ timestamp | date(format="%Y-%m-%d") }})
==================
{% else %}\
## [unreleased]
{% endif %}\
## What's Changed
{%- if version %} in {{ version }}{%- endif -%}
{% for commit in commits %}
- {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | split(pat="\n") | first | trim | upper_first }}(@{{ commit.author.name }})\
{% endfor %}
{% if commit.github.pr_title -%}
{%- set commit_message = commit.github.pr_title -%}
{%- else -%}
{%- set commit_message = commit.message -%}
{%- endif -%}
- {{ commit_message | split(pat="\n") | first | trim }}\
{% if commit.github.pr_number %} \
[#{{ commit.github.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.github.pr_number }}){% if commit.github.username %}(@{{ commit.github.username }}){%- endif -%} \
{%- endif %}
{%- endfor -%}
{% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
{% raw %}\n{% endraw -%}
## New Contributors
{%- endif %}\
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
* @{{ contributor.username }} made their first contribution
{%- if contributor.pr_number %} in \
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
{%- endif %}
{%- endfor -%}
{% if version %}
{% if previous.version %}
**Full Changelog**: {{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}
{% endif %}
{% else -%}
{% raw %}\n{% endraw %}
{% endif %}
{%- macro remote_url() -%}
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
{%- endmacro -%}
"""
# remove the leading and trailing whitespace from the template
trim = true
@@ -25,53 +57,24 @@ footer = """
"""
postprocessors = [
{ pattern = 'Paul Masurel', replace = "fulmicoton"}, # replace with github user
{ pattern = 'PSeitz', replace = "PSeitz"}, # replace with github user
{ pattern = 'Adam Reichold', replace = "adamreichold"}, # replace with github user
{ pattern = 'trinity-1686a', replace = "trinity-1686a"}, # replace with github user
{ pattern = 'Michael Kleen', replace = "mkleen"}, # replace with github user
{ pattern = 'Adrien Guillo', replace = "guilload"}, # replace with github user
{ pattern = 'François Massot', replace = "fmassot"}, # replace with github user
{ pattern = 'Naveen Aiathurai', replace = "naveenann"}, # replace with github user
{ pattern = '', replace = ""}, # replace with github user
]
[git]
# parse the commits based on https://www.conventionalcommits.org
# This is required or commit.message contains the whole commit message and not just the title
conventional_commits = true
conventional_commits = false
# filter out the commits that are not conventional
filter_unconventional = false
filter_unconventional = true
# process each line of a commit as an individual commit
split_commits = false
# regex for preprocessing the commit messages
commit_preprocessors = [
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "[#${2}](https://github.com/quickwit-oss/tantivy/issues/${2})"}, # replace issue numbers
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = ""},
]
#link_parsers = [
#{ pattern = "#(\\d+)", href = "https://github.com/quickwit-oss/tantivy/pulls/$1"},
#]
# regex for parsing and grouping commits
commit_parsers = [
{ message = "^feat", group = "Features"},
{ message = "^fix", group = "Bug Fixes"},
{ message = "^doc", group = "Documentation"},
{ message = "^perf", group = "Performance"},
{ message = "^refactor", group = "Refactor"},
{ message = "^style", group = "Styling"},
{ message = "^test", group = "Testing"},
{ message = "^chore\\(release\\): prepare for", skip = true},
{ message = "(?i)clippy", skip = true},
{ message = "(?i)dependabot", skip = true},
{ message = "(?i)fmt", skip = true},
{ message = "(?i)bump", skip = true},
{ message = "(?i)readme", skip = true},
{ message = "(?i)comment", skip = true},
{ message = "(?i)spelling", skip = true},
{ message = "^chore", group = "Miscellaneous Tasks"},
{ body = ".*security", group = "Security"},
{ message = ".*", group = "Other", default_scope = "other"},
]
# protect breaking changes from being skipped due to matching a skipping commit_parser
protect_breaking_commits = false
# filter out the commits that are not matched by commit parsers

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy-columnar"
version = "0.2.0"
version = "0.3.0"
edition = "2021"
license = "MIT"
homepage = "https://github.com/quickwit-oss/tantivy"
@@ -9,13 +9,13 @@ description = "column oriented storage for tantivy"
categories = ["database-implementations", "data-structures", "compression"]
[dependencies]
itertools = "0.12.0"
itertools = "0.13.0"
fastdivide = "0.4.0"
stacker = { version= "0.2", path = "../stacker", package="tantivy-stacker"}
sstable = { version= "0.2", path = "../sstable", package = "tantivy-sstable" }
common = { version= "0.6", path = "../common", package = "tantivy-common" }
tantivy-bitpacker = { version= "0.5", path = "../bitpacker/" }
stacker = { version= "0.3", path = "../stacker", package="tantivy-stacker"}
sstable = { version= "0.3", path = "../sstable", package = "tantivy-sstable" }
common = { version= "0.7", path = "../common", package = "tantivy-common" }
tantivy-bitpacker = { version= "0.6", path = "../bitpacker/" }
serde = "1.0.152"
downcast-rs = "1.2.0"

View File

@@ -140,7 +140,7 @@ mod tests {
#[test]
fn test_merge_column_index_optional_shuffle() {
let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into();
let column_indexes = vec![optional_index, ColumnIndex::Full];
let column_indexes = [optional_index, ColumnIndex::Full];
let row_addrs = vec![
RowAddr {
segment_ord: 0u32,

View File

@@ -75,7 +75,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync + DowncastSync {
let out_and_idx_chunks = output
.chunks_exact_mut(4)
.into_remainder()
.into_iter()
.iter_mut()
.zip(indexes.chunks_exact(4).remainder());
for (out, idx) in out_and_idx_chunks {
*out = self.get_val(*idx);
@@ -102,7 +102,7 @@ pub trait ColumnValues<T: PartialOrd = u64>: Send + Sync + DowncastSync {
let out_and_idx_chunks = output
.chunks_exact_mut(4)
.into_remainder()
.into_iter()
.iter_mut()
.zip(indexes.chunks_exact(4).remainder());
for (out, idx) in out_and_idx_chunks {
*out = Some(self.get_val(*idx));

View File

@@ -148,7 +148,7 @@ impl CompactSpace {
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
// Correctness: Overflow. The first range starts at compact space 0, the error from
// binary search can never be 0
.map_or_else(|e| e - 1, |v| v);
.unwrap_or_else(|e| e - 1);
let range_mapping = &self.ranges_mapping[pos];
let diff = compact - range_mapping.compact_start;

View File

@@ -59,22 +59,6 @@ pub struct ColumnarWriter {
buffers: SpareBuffers,
}
#[inline]
fn mutate_or_create_column<V, TMutator>(
arena_hash_map: &mut ArenaHashMap,
column_name: &str,
updater: TMutator,
) where
V: Copy + 'static,
TMutator: FnMut(Option<V>) -> V,
{
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
arena_hash_map.mutate_or_create(column_name.as_bytes(), updater);
}
impl ColumnarWriter {
pub fn mem_usage(&self) -> usize {
self.arena.mem_usage()
@@ -175,9 +159,8 @@ impl ColumnarWriter {
},
&mut self.dictionaries,
);
mutate_or_create_column(
hash_map,
column_name,
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<StrOrBytesColumnWriter>| {
let mut column_writer = if let Some(column_writer) = column_opt {
column_writer
@@ -192,24 +175,21 @@ impl ColumnarWriter {
);
}
ColumnType::Bool => {
mutate_or_create_column(
&mut self.bool_field_hash_map,
column_name,
self.bool_field_hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
);
}
ColumnType::DateTime => {
mutate_or_create_column(
&mut self.datetime_field_hash_map,
column_name,
self.datetime_field_hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
);
}
ColumnType::I64 | ColumnType::F64 | ColumnType::U64 => {
let numerical_type = column_type.numerical_type().unwrap();
mutate_or_create_column(
&mut self.numerical_field_hash_map,
column_name,
self.numerical_field_hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.force_numerical_type(numerical_type);
@@ -217,9 +197,8 @@ impl ColumnarWriter {
},
);
}
ColumnType::IpAddr => mutate_or_create_column(
&mut self.ip_addr_field_hash_map,
column_name,
ColumnType::IpAddr => self.ip_addr_field_hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<ColumnWriter>| column_opt.unwrap_or_default(),
),
}
@@ -232,9 +211,8 @@ impl ColumnarWriter {
numerical_value: T,
) {
let (hash_map, arena) = (&mut self.numerical_field_hash_map, &mut self.arena);
mutate_or_create_column(
hash_map,
column_name,
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<NumericalColumnWriter>| {
let mut column: NumericalColumnWriter = column_opt.unwrap_or_default();
column.record_numerical_value(doc, numerical_value.into(), arena);
@@ -244,10 +222,6 @@ impl ColumnarWriter {
}
pub fn record_ip_addr(&mut self, doc: RowId, column_name: &str, ip_addr: Ipv6Addr) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena) = (&mut self.ip_addr_field_hash_map, &mut self.arena);
hash_map.mutate_or_create(
column_name.as_bytes(),
@@ -261,24 +235,30 @@ impl ColumnarWriter {
pub fn record_bool(&mut self, doc: RowId, column_name: &str, val: bool) {
let (hash_map, arena) = (&mut self.bool_field_hash_map, &mut self.arena);
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, val, arena);
column
});
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(doc, val, arena);
column
},
);
}
pub fn record_datetime(&mut self, doc: RowId, column_name: &str, datetime: common::DateTime) {
let (hash_map, arena) = (&mut self.datetime_field_hash_map, &mut self.arena);
mutate_or_create_column(hash_map, column_name, |column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(
doc,
NumericalValue::I64(datetime.into_timestamp_nanos()),
arena,
);
column
});
hash_map.mutate_or_create(
column_name.as_bytes(),
|column_opt: Option<ColumnWriter>| {
let mut column: ColumnWriter = column_opt.unwrap_or_default();
column.record(
doc,
NumericalValue::I64(datetime.into_timestamp_nanos()),
arena,
);
column
},
);
}
pub fn record_str(&mut self, doc: RowId, column_name: &str, value: &str) {
@@ -303,10 +283,6 @@ impl ColumnarWriter {
}
pub fn record_bytes(&mut self, doc: RowId, column_name: &str, value: &[u8]) {
assert!(
!column_name.as_bytes().contains(&0u8),
"key may not contain the 0 byte"
);
let (hash_map, arena, dictionaries) = (
&mut self.bytes_field_hash_map,
&mut self.arena,

View File

@@ -18,7 +18,12 @@ pub struct ColumnarSerializer<W: io::Write> {
/// code.
fn prepare_key(key: &[u8], column_type: ColumnType, buffer: &mut Vec<u8>) {
buffer.clear();
buffer.extend_from_slice(key);
// Convert 0 bytes to '0' string, as 0 bytes are reserved for the end of the path.
if key.contains(&0u8) {
buffer.extend(key.iter().map(|&b| if b == 0 { b'0' } else { b }));
} else {
buffer.extend_from_slice(key);
}
buffer.push(0u8);
buffer.push(column_type.to_code());
}
@@ -102,7 +107,7 @@ mod tests {
let mut buffer: Vec<u8> = b"somegarbage".to_vec();
prepare_key(b"root\0child", ColumnType::Str, &mut buffer);
assert_eq!(buffer.len(), 12);
assert_eq!(&buffer[..10], b"root\0child");
assert_eq!(&buffer[..10], b"root0child");
assert_eq!(buffer[10], 0u8);
assert_eq!(buffer[11], ColumnType::Str.to_code());
}

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy-common"
version = "0.6.0"
version = "0.7.0"
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
license = "MIT"
edition = "2021"
@@ -14,7 +14,7 @@ repository = "https://github.com/quickwit-oss/tantivy"
[dependencies]
byteorder = "1.4.3"
ownedbytes = { version= "0.6", path="../ownedbytes" }
ownedbytes = { version= "0.7", path="../ownedbytes" }
async-trait = "0.1"
time = { version = "0.3.10", features = ["serde-well-known"] }
serde = { version = "1.0.136", features = ["derive"] }

View File

@@ -1,5 +1,5 @@
use std::io::Write;
use std::{fmt, io, u64};
use std::{fmt, io};
use ownedbytes::OwnedBytes;

View File

@@ -1,5 +1,3 @@
#![allow(deprecated)]
use std::fmt;
use std::io::{Read, Write};
@@ -27,9 +25,6 @@ pub enum DateTimePrecision {
Nanoseconds,
}
#[deprecated(since = "0.20.0", note = "Use `DateTimePrecision` instead")]
pub type DatePrecision = DateTimePrecision;
/// A date/time value with nanoseconds precision.
///
/// This timestamp does not carry any explicit time zone information.
@@ -40,7 +35,7 @@ pub type DatePrecision = DateTimePrecision;
/// All constructors and conversions are provided as explicit
/// functions and not by implementing any `From`/`Into` traits
/// to prevent unintended usage.
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[derive(Clone, Default, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct DateTime {
// Timestamp in nanoseconds.
pub(crate) timestamp_nanos: i64,

View File

@@ -5,6 +5,12 @@ pub const JSON_PATH_SEGMENT_SEP: u8 = 1u8;
pub const JSON_PATH_SEGMENT_SEP_STR: &str =
unsafe { std::str::from_utf8_unchecked(&[JSON_PATH_SEGMENT_SEP]) };
/// Separates the json path and the value in
/// a JSON term binary representation.
pub const JSON_END_OF_PATH: u8 = 0u8;
pub const JSON_END_OF_PATH_STR: &str =
unsafe { std::str::from_utf8_unchecked(&[JSON_END_OF_PATH]) };
/// Create a new JsonPathWriter, that creates flattened json paths for tantivy.
#[derive(Clone, Debug, Default)]
pub struct JsonPathWriter {
@@ -14,6 +20,14 @@ pub struct JsonPathWriter {
}
impl JsonPathWriter {
pub fn with_expand_dots(expand_dots: bool) -> Self {
JsonPathWriter {
path: String::new(),
indices: Vec::new(),
expand_dots,
}
}
pub fn new() -> Self {
JsonPathWriter {
path: String::new(),
@@ -39,8 +53,8 @@ impl JsonPathWriter {
pub fn push(&mut self, segment: &str) {
let len_path = self.path.len();
self.indices.push(len_path);
if !self.path.is_empty() {
self.path.push_str(JSON_PATH_SEGMENT_SEP_STR);
if self.indices.len() > 1 {
self.path.push(JSON_PATH_SEGMENT_SEP as char);
}
self.path.push_str(segment);
if self.expand_dots {
@@ -55,6 +69,12 @@ impl JsonPathWriter {
}
}
/// Set the end of JSON path marker.
#[inline]
pub fn set_end(&mut self) {
self.path.push_str(JSON_END_OF_PATH_STR);
}
/// Remove the last segment. Does nothing if the path is empty.
#[inline]
pub fn pop(&mut self) {
@@ -91,6 +111,7 @@ mod tests {
#[test]
fn json_path_writer_test() {
let mut writer = JsonPathWriter::new();
writer.set_expand_dots(false);
writer.push("root");
assert_eq!(writer.as_str(), "root");
@@ -109,4 +130,15 @@ mod tests {
writer.push("k8s.node.id");
assert_eq!(writer.as_str(), "root\u{1}k8s\u{1}node\u{1}id");
}
#[test]
fn test_json_path_expand_dots_enabled_pop_segment() {
let mut json_writer = JsonPathWriter::with_expand_dots(true);
json_writer.push("hello");
assert_eq!(json_writer.as_str(), "hello");
json_writer.push("color.hue");
assert_eq!(json_writer.as_str(), "hello\x01color\x01hue");
json_writer.pop();
assert_eq!(json_writer.as_str(), "hello");
}
}

View File

@@ -9,14 +9,12 @@ mod byte_count;
mod datetime;
pub mod file_slice;
mod group_by;
mod json_path_writer;
pub mod json_path_writer;
mod serialize;
mod vint;
mod writer;
pub use bitset::*;
pub use byte_count::ByteCount;
#[allow(deprecated)]
pub use datetime::DatePrecision;
pub use datetime::{DateTime, DateTimePrecision};
pub use group_by::GroupByIteratorExtended;
pub use json_path_writer::JsonPathWriter;

View File

@@ -290,8 +290,7 @@ impl<'a> BinarySerializable for Cow<'a, [u8]> {
#[cfg(test)]
pub mod test {
use super::{VInt, *};
use crate::serialize::BinarySerializable;
use super::*;
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
O::default().serialize(&mut buffer).unwrap();

View File

@@ -151,7 +151,7 @@ pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
(result, vlen)
}
/// Write a `u32` as a vint payload.
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
pub fn write_u32_vint<W: io::Write + ?Sized>(val: u32, writer: &mut W) -> io::Result<()> {
let mut buf = [0u8; 8];
let data = serialize_vint_u32(val, &mut buf);
writer.write_all(data)

View File

@@ -11,9 +11,10 @@ use columnar::Column;
// ---
// Importing tantivy...
use tantivy::collector::{Collector, SegmentCollector};
use tantivy::index::SegmentReader;
use tantivy::query::QueryParser;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, IndexWriter, Score, SegmentReader};
use tantivy::{doc, Index, IndexWriter, Score};
#[derive(Default)]
struct Stats {

View File

@@ -4,7 +4,7 @@
use tantivy::collector::TopDocs;
use tantivy::query::QueryParser;
use tantivy::schema::{DateOptions, Document, OwnedValue, Schema, INDEXED, STORED, STRING};
use tantivy::schema::{DateOptions, Document, Schema, INDEXED, STORED, STRING};
use tantivy::{Index, IndexWriter, TantivyDocument};
fn main() -> tantivy::Result<()> {
@@ -13,7 +13,7 @@ fn main() -> tantivy::Result<()> {
let opts = DateOptions::from(INDEXED)
.set_stored()
.set_fast()
.set_precision(tantivy::DateTimePrecision::Seconds);
.set_precision(tantivy::schema::DateTimePrecision::Seconds);
// Add `occurred_at` date field type
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
let event_type = schema_builder.add_text_field("event", STRING | STORED);
@@ -61,10 +61,11 @@ fn main() -> tantivy::Result<()> {
assert_eq!(count_docs.len(), 1);
for (_score, doc_address) in count_docs {
let retrieved_doc = searcher.doc::<TantivyDocument>(doc_address)?;
assert!(matches!(
retrieved_doc.get_first(occurred_at),
Some(OwnedValue::Date(_))
));
assert!(retrieved_doc
.get_first(occurred_at)
.unwrap()
.as_datetime()
.is_some(),);
assert_eq!(
retrieved_doc.to_json(&schema),
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#

View File

@@ -51,7 +51,7 @@ fn main() -> tantivy::Result<()> {
let reader = index.reader()?;
let searcher = reader.searcher();
{
let facets = vec![
let facets = [
Facet::from("/ingredient/egg"),
Facet::from("/ingredient/oil"),
Facet::from("/ingredient/garlic"),
@@ -94,9 +94,8 @@ fn main() -> tantivy::Result<()> {
.doc::<TantivyDocument>(*doc_id)
.unwrap()
.get_first(title)
.and_then(|v| v.as_str())
.and_then(|v| v.as_str().map(|el| el.to_string()))
.unwrap()
.to_owned()
})
.collect();
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);

View File

@@ -7,10 +7,11 @@
// the list of documents containing a term, getting
// its term frequency, and accessing its positions.
use tantivy::postings::Postings;
// ---
// Importing tantivy...
use tantivy::schema::*;
use tantivy::{doc, DocSet, Index, IndexWriter, Postings, TERMINATED};
use tantivy::{doc, DocSet, Index, IndexWriter, TERMINATED};
fn main() -> tantivy::Result<()> {
// We first create a schema for the sake of the

View File

@@ -3,10 +3,11 @@ use std::collections::{HashMap, HashSet};
use std::sync::{Arc, RwLock, Weak};
use tantivy::collector::TopDocs;
use tantivy::index::SegmentId;
use tantivy::query::QueryParser;
use tantivy::schema::{Schema, FAST, TEXT};
use tantivy::{
doc, DocAddress, DocId, Index, IndexWriter, Opstamp, Searcher, SearcherGeneration, SegmentId,
doc, DocAddress, DocId, Index, IndexWriter, Opstamp, Searcher, SearcherGeneration,
SegmentReader, Warmer,
};

View File

@@ -1,7 +1,7 @@
[package]
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
name = "ownedbytes"
version = "0.6.0"
version = "0.7.0"
edition = "2021"
description = "Expose data as static slice"
license = "MIT"

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy-query-grammar"
version = "0.21.0"
version = "0.22.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]

View File

@@ -218,27 +218,14 @@ fn term_or_phrase_infallible(inp: &str) -> JResult<&str, Option<UserInputLeaf>>
}
fn term_group(inp: &str) -> IResult<&str, UserInputAst> {
let occur_symbol = alt((
value(Occur::MustNot, char('-')),
value(Occur::Must, char('+')),
));
map(
tuple((
terminated(field_name, multispace0),
delimited(
tuple((char('('), multispace0)),
separated_list0(multispace1, tuple((opt(occur_symbol), term_or_phrase))),
char(')'),
),
delimited(tuple((char('('), multispace0)), ast, char(')')),
)),
|(field_name, terms)| {
UserInputAst::Clause(
terms
.into_iter()
.map(|(occur, leaf)| (occur, leaf.set_field(Some(field_name.clone())).into()))
.collect(),
)
|(field_name, mut ast)| {
ast.set_default_field(field_name);
ast
},
)(inp)
}
@@ -258,46 +245,18 @@ fn term_group_precond(inp: &str) -> IResult<&str, (), ()> {
}
fn term_group_infallible(inp: &str) -> JResult<&str, UserInputAst> {
let (mut inp, (field_name, _, _, _)) =
let (inp, (field_name, _, _, _)) =
tuple((field_name, multispace0, char('('), multispace0))(inp).expect("precondition failed");
let mut terms = Vec::new();
let mut errs = Vec::new();
let mut first_round = true;
loop {
let mut space_error = if first_round {
first_round = false;
Vec::new()
} else {
let (rest, (_, err)) = space1_infallible(inp)?;
inp = rest;
err
};
if inp.is_empty() {
errs.push(LenientErrorInternal {
pos: inp.len(),
message: "missing )".to_string(),
});
break Ok((inp, (UserInputAst::Clause(terms), errs)));
}
if let Some(inp) = inp.strip_prefix(')') {
break Ok((inp, (UserInputAst::Clause(terms), errs)));
}
// only append missing space error if we did not reach the end of group
errs.append(&mut space_error);
// here we do the assumption term_or_phrase_infallible always consume something if the
// first byte is not `)` or ' '. If it did not, we would end up looping.
let (rest, ((occur, leaf), mut err)) =
tuple_infallible((occur_symbol, term_or_phrase_infallible))(inp)?;
errs.append(&mut err);
if let Some(leaf) = leaf {
terms.push((occur, leaf.set_field(Some(field_name.clone())).into()));
}
inp = rest;
}
let res = delimited_infallible(
nothing,
map(ast_infallible, |(mut ast, errors)| {
ast.set_default_field(field_name.to_string());
(ast, errors)
}),
opt_i_err(char(')'), "expected ')'"),
)(inp);
res
}
fn exists(inp: &str) -> IResult<&str, UserInputLeaf> {
@@ -1468,8 +1427,18 @@ mod test {
#[test]
fn test_parse_query_term_group() {
test_parse_query_to_ast_helper(r#"field:(abc)"#, r#"(*"field":abc)"#);
test_parse_query_to_ast_helper(r#"field:(abc)"#, r#""field":abc"#);
test_parse_query_to_ast_helper(r#"field:(+a -"b c")"#, r#"(+"field":a -"field":"b c")"#);
test_parse_query_to_ast_helper(r#"field:(a AND "b c")"#, r#"(+"field":a +"field":"b c")"#);
test_parse_query_to_ast_helper(r#"field:(a OR "b c")"#, r#"(?"field":a ?"field":"b c")"#);
test_parse_query_to_ast_helper(
r#"field:(a OR (b AND c))"#,
r#"(?"field":a ?(+"field":b +"field":c))"#,
);
test_parse_query_to_ast_helper(
r#"field:(a [b TO c])"#,
r#"(*"field":a *"field":["b" TO "c"])"#,
);
test_is_parse_err(r#"field:(+a -"b c""#, r#"(+"field":a -"field":"b c")"#);
}

View File

@@ -44,6 +44,26 @@ impl UserInputLeaf {
},
}
}
pub(crate) fn set_default_field(&mut self, default_field: String) {
match self {
UserInputLeaf::Literal(ref mut literal) if literal.field_name.is_none() => {
literal.field_name = Some(default_field)
}
UserInputLeaf::All => {
*self = UserInputLeaf::Exists {
field: default_field,
}
}
UserInputLeaf::Range { ref mut field, .. } if field.is_none() => {
*field = Some(default_field)
}
UserInputLeaf::Set { ref mut field, .. } if field.is_none() => {
*field = Some(default_field)
}
_ => (), // field was already set, do nothing
}
}
}
impl Debug for UserInputLeaf {
@@ -205,6 +225,16 @@ impl UserInputAst {
pub fn or(asts: Vec<UserInputAst>) -> UserInputAst {
UserInputAst::compose(Occur::Should, asts)
}
pub(crate) fn set_default_field(&mut self, field: String) {
match self {
UserInputAst::Clause(clauses) => clauses
.iter_mut()
.for_each(|(_, ast)| ast.set_default_field(field.clone())),
UserInputAst::Leaf(leaf) => leaf.set_default_field(field),
UserInputAst::Boost(ref mut ast, _) => ast.set_default_field(field),
}
}
}
impl From<UserInputLiteral> for UserInputLeaf {

View File

@@ -1,585 +0,0 @@
#[cfg(all(test, feature = "unstable"))]
mod bench {
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use rand_distr::Distribution;
use serde_json::json;
use test::{self, Bencher};
use crate::aggregation::agg_req::Aggregations;
use crate::aggregation::AggregationCollector;
use crate::query::{AllQuery, TermQuery};
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, FAST, STRING};
use crate::{Index, Term};
#[derive(Clone, Copy, Hash, Default, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum Cardinality {
/// All documents contain exactly one value.
/// `Full` is the default for auto-detecting the Cardinality, since it is the most strict.
#[default]
Full = 0,
/// All documents contain at most one value.
Optional = 1,
/// All documents may contain any number of values.
Multivalued = 2,
/// 1 / 20 documents has a value
Sparse = 3,
}
fn get_collector(agg_req: Aggregations) -> AggregationCollector {
AggregationCollector::from_aggs(agg_req, Default::default())
}
fn get_test_index_bench(cardinality: Cardinality) -> crate::Result<Index> {
let mut schema_builder = Schema::builder();
let text_fieldtype = crate::schema::TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
)
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let json_field = schema_builder.add_json_field("json", FAST);
let text_field_many_terms = schema_builder.add_text_field("text_many_terms", STRING | FAST);
let text_field_few_terms = schema_builder.add_text_field("text_few_terms", STRING | FAST);
let score_fieldtype = crate::schema::NumericOptions::default().set_fast();
let score_field = schema_builder.add_u64_field("score", score_fieldtype.clone());
let score_field_f64 = schema_builder.add_f64_field("score_f64", score_fieldtype.clone());
let score_field_i64 = schema_builder.add_i64_field("score_i64", score_fieldtype);
let index = Index::create_from_tempdir(schema_builder.build())?;
let few_terms_data = ["INFO", "ERROR", "WARN", "DEBUG"];
let lg_norm = rand_distr::LogNormal::new(2.996f64, 0.979f64).unwrap();
let many_terms_data = (0..150_000)
.map(|num| format!("author{}", num))
.collect::<Vec<_>>();
{
let mut rng = StdRng::from_seed([1u8; 32]);
let mut index_writer = index.writer_with_num_threads(1, 200_000_000)?;
// To make the different test cases comparable we just change one doc to force the
// cardinality
if cardinality == Cardinality::Optional {
index_writer.add_document(doc!())?;
}
if cardinality == Cardinality::Multivalued {
index_writer.add_document(doc!(
json_field => json!({"mixed_type": 10.0}),
json_field => json!({"mixed_type": 10.0}),
text_field => "cool",
text_field => "cool",
text_field_many_terms => "cool",
text_field_many_terms => "cool",
text_field_few_terms => "cool",
text_field_few_terms => "cool",
score_field => 1u64,
score_field => 1u64,
score_field_f64 => lg_norm.sample(&mut rng),
score_field_f64 => lg_norm.sample(&mut rng),
score_field_i64 => 1i64,
score_field_i64 => 1i64,
))?;
}
let mut doc_with_value = 1_000_000;
if cardinality == Cardinality::Sparse {
doc_with_value /= 20;
}
let _val_max = 1_000_000.0;
for _ in 0..doc_with_value {
let val: f64 = rng.gen_range(0.0..1_000_000.0);
let json = if rng.gen_bool(0.1) {
// 10% are numeric values
json!({ "mixed_type": val })
} else {
json!({"mixed_type": many_terms_data.choose(&mut rng).unwrap().to_string()})
};
index_writer.add_document(doc!(
text_field => "cool",
json_field => json,
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
text_field_few_terms => few_terms_data.choose(&mut rng).unwrap().to_string(),
score_field => val as u64,
score_field_f64 => lg_norm.sample(&mut rng),
score_field_i64 => val as i64,
))?;
if cardinality == Cardinality::Sparse {
for _ in 0..20 {
index_writer.add_document(doc!(text_field => "cool"))?;
}
}
}
// writing the segment
index_writer.commit()?;
}
Ok(index)
}
use paste::paste;
#[macro_export]
macro_rules! bench_all_cardinalities {
( $x:ident ) => {
paste! {
#[bench]
fn $x(b: &mut Bencher) {
[<$x _card>](b, Cardinality::Full)
}
#[bench]
fn [<$x _opt>](b: &mut Bencher) {
[<$x _card>](b, Cardinality::Optional)
}
#[bench]
fn [<$x _multi>](b: &mut Bencher) {
[<$x _card>](b, Cardinality::Multivalued)
}
#[bench]
fn [<$x _sparse>](b: &mut Bencher) {
[<$x _card>](b, Cardinality::Sparse)
}
}
};
}
bench_all_cardinalities!(bench_aggregation_average_u64);
fn bench_aggregation_average_u64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = serde_json::from_value(json!({
"average": { "avg": { "field": "score", } }
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_stats_f64);
fn bench_aggregation_stats_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = serde_json::from_value(json!({
"average_f64": { "stats": { "field": "score_f64", } }
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_average_f64);
fn bench_aggregation_average_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = serde_json::from_value(json!({
"average_f64": { "avg": { "field": "score_f64", } }
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_percentiles_f64);
fn bench_aggregation_percentiles_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_str = r#"
{
"mypercentiles": {
"percentiles": {
"field": "score_f64",
"percents": [ 95, 99, 99.9 ]
}
}
} "#;
let agg_req_1: Aggregations = serde_json::from_str(agg_req_str).unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_average_u64_and_f64);
fn bench_aggregation_average_u64_and_f64_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = serde_json::from_value(json!({
"average_f64": { "avg": { "field": "score_f64" } },
"average": { "avg": { "field": "score" } },
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_few);
fn bench_aggregation_terms_few_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": { "terms": { "field": "text_few_terms" } },
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_with_top_hits_agg);
fn bench_aggregation_terms_many_with_top_hits_agg_card(
b: &mut Bencher,
cardinality: Cardinality,
) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": {
"terms": { "field": "text_many_terms" },
"aggs": {
"top_hits": { "top_hits":
{
"sort": [
{ "score": "desc" }
],
"size": 2,
"doc_value_fields": ["score_f64"]
}
}
}
},
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_with_sub_agg);
fn bench_aggregation_terms_many_with_sub_agg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": {
"terms": { "field": "text_many_terms" },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_json_mixed_type_with_sub_agg);
fn bench_aggregation_terms_many_json_mixed_type_with_sub_agg_card(
b: &mut Bencher,
cardinality: Cardinality,
) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": {
"terms": { "field": "json.mixed_type" },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many2);
fn bench_aggregation_terms_many2_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": { "terms": { "field": "text_many_terms" } },
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_terms_many_order_by_term);
fn bench_aggregation_terms_many_order_by_term_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req: Aggregations = serde_json::from_value(json!({
"my_texts": { "terms": { "field": "text_many_terms", "order": { "_key": "desc" } } },
}))
.unwrap();
let collector = get_collector(agg_req);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_range_only);
fn bench_aggregation_range_only_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = serde_json::from_value(json!({
"range_f64": { "range": { "field": "score_f64", "ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
] } },
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_range_with_avg);
fn bench_aggregation_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = serde_json::from_value(json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 30000 },
{ "from": 30000, "to": 40000 },
{ "from": 40000, "to": 50000 },
{ "from": 50000, "to": 60000 }
]
},
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
},
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
// hard bounds has a different algorithm, because it actually limits collection range
//
bench_all_cardinalities!(bench_aggregation_histogram_only_hard_bounds);
fn bench_aggregation_histogram_only_hard_bounds_card(
b: &mut Bencher,
cardinality: Cardinality,
) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = serde_json::from_value(json!({
"rangef64": { "histogram": { "field": "score_f64", "interval": 100, "hard_bounds": { "min": 1000, "max": 300000 } } },
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_histogram_with_avg);
fn bench_aggregation_histogram_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = serde_json::from_value(json!({
"rangef64": {
"histogram": { "field": "score_f64", "interval": 100 },
"aggs": {
"average_f64": { "avg": { "field": "score_f64" } }
}
}
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_histogram_only);
fn bench_aggregation_histogram_only_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let agg_req_1: Aggregations = serde_json::from_value(json!({
"rangef64": {
"histogram": {
"field": "score_f64",
"interval": 100 // 1000 buckets
},
}
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&AllQuery, &collector).unwrap()
});
}
bench_all_cardinalities!(bench_aggregation_avg_and_range_with_avg);
fn bench_aggregation_avg_and_range_with_avg_card(b: &mut Bencher, cardinality: Cardinality) {
let index = get_test_index_bench(cardinality).unwrap();
let reader = index.reader().unwrap();
let text_field = reader.searcher().schema().get_field("text").unwrap();
b.iter(|| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, "cool"),
IndexRecordOption::Basic,
);
let agg_req_1: Aggregations = serde_json::from_value(json!({
"rangef64": {
"range": {
"field": "score_f64",
"ranges": [
{ "from": 3, "to": 7000 },
{ "from": 7000, "to": 20000 },
{ "from": 20000, "to": 60000 }
]
},
"aggs": {
"average_in_range": { "avg": { "field": "score" } }
}
},
"average": { "avg": { "field": "score" } }
}))
.unwrap();
let collector = get_collector(agg_req_1);
let searcher = reader.searcher();
searcher.search(&term_query, &collector).unwrap()
});
}
}

View File

@@ -81,10 +81,11 @@ impl AggregationLimits {
}
}
pub(crate) fn add_memory_consumed(&self, num_bytes: u64) -> crate::Result<()> {
self.memory_consumption
.fetch_add(num_bytes, Ordering::Relaxed);
validate_memory_consumption(&self.memory_consumption, self.memory_limit)?;
pub(crate) fn add_memory_consumed(&self, add_num_bytes: u64) -> crate::Result<()> {
let prev_value = self
.memory_consumption
.fetch_add(add_num_bytes, Ordering::Relaxed);
validate_memory_consumption(prev_value + add_num_bytes, self.memory_limit)?;
Ok(())
}
@@ -94,11 +95,11 @@ impl AggregationLimits {
}
fn validate_memory_consumption(
memory_consumption: &AtomicU64,
memory_consumption: u64,
memory_limit: ByteCount,
) -> Result<(), AggregationError> {
// Load the estimated memory consumed by the aggregations
let memory_consumed: ByteCount = memory_consumption.load(Ordering::Relaxed).into();
let memory_consumed: ByteCount = memory_consumption.into();
if memory_consumed > memory_limit {
return Err(AggregationError::MemoryExceeded {
limit: memory_limit,
@@ -118,10 +119,11 @@ pub struct ResourceLimitGuard {
}
impl ResourceLimitGuard {
pub(crate) fn add_memory_consumed(&self, num_bytes: u64) -> crate::Result<()> {
self.memory_consumption
.fetch_add(num_bytes, Ordering::Relaxed);
validate_memory_consumption(&self.memory_consumption, self.memory_limit)?;
pub(crate) fn add_memory_consumed(&self, add_num_bytes: u64) -> crate::Result<()> {
let prev_value = self
.memory_consumption
.fetch_add(add_num_bytes, Ordering::Relaxed);
validate_memory_consumption(prev_value + add_num_bytes, self.memory_limit)?;
Ok(())
}
}

View File

@@ -17,7 +17,8 @@ use super::metric::{
use super::segment_agg_result::AggregationLimits;
use super::VecWithNames;
use crate::aggregation::{f64_to_fastfield_u64, Key};
use crate::{SegmentOrdinal, SegmentReader};
use crate::index::SegmentReader;
use crate::SegmentOrdinal;
#[derive(Default)]
pub(crate) struct AggregationsWithAccessor {
@@ -292,7 +293,7 @@ impl AggregationWithAccessor {
add_agg_with_accessor(&agg, accessor, column_type, &mut res)?;
}
TopHits(ref mut top_hits) => {
top_hits.validate_and_resolve(reader.fast_fields().columnar())?;
top_hits.validate_and_resolve_field_names(reader.fast_fields().columnar())?;
let accessors: Vec<(Column<u64>, ColumnType)> = top_hits
.field_names()
.iter()

View File

@@ -4,6 +4,7 @@ use crate::aggregation::agg_req::{Aggregation, Aggregations};
use crate::aggregation::agg_result::AggregationResults;
use crate::aggregation::buf_collector::DOC_BLOCK_SIZE;
use crate::aggregation::collector::AggregationCollector;
use crate::aggregation::intermediate_agg_result::IntermediateAggregationResults;
use crate::aggregation::segment_agg_result::AggregationLimits;
use crate::aggregation::tests::{get_test_index_2_segments, get_test_index_from_values_and_terms};
use crate::aggregation::DistributedAggregationCollector;
@@ -66,6 +67,22 @@ fn test_aggregation_flushing(
}
}
},
"top_hits_test":{
"terms": {
"field": "string_id"
},
"aggs": {
"bucketsL2": {
"top_hits": {
"size": 2,
"sort": [
{ "score": "asc" }
],
"docvalue_fields": ["score"]
}
}
}
},
"histogram_test":{
"histogram": {
"field": "score",
@@ -108,6 +125,16 @@ fn test_aggregation_flushing(
let searcher = reader.searcher();
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
// Test postcard roundtrip serialization
let intermediate_agg_result_bytes = postcard::to_allocvec(&intermediate_agg_result).expect(
"Postcard Serialization failed, flatten etc. is not supported in the intermediate \
result",
);
let intermediate_agg_result: IntermediateAggregationResults =
postcard::from_bytes(&intermediate_agg_result_bytes)
.expect("Post deserialization failed");
intermediate_agg_result
.into_final_result(agg_req, &Default::default())
.unwrap()

View File

@@ -1,7 +1,5 @@
use std::cmp::Ordering;
use columnar::ColumnType;
use itertools::Itertools;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use tantivy_bitpacker::minmax;
@@ -17,7 +15,7 @@ use crate::aggregation::intermediate_agg_result::{
IntermediateHistogramBucketEntry,
};
use crate::aggregation::segment_agg_result::{
build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector,
build_segment_agg_collector, SegmentAggregationCollector,
};
use crate::aggregation::*;
use crate::TantivyError;
@@ -333,9 +331,11 @@ impl SegmentAggregationCollector for SegmentHistogramCollector {
}
let mem_delta = self.get_memory_consumption() - mem_pre;
bucket_agg_accessor
.limits
.add_memory_consumed(mem_delta as u64)?;
if mem_delta > 0 {
bucket_agg_accessor
.limits
.add_memory_consumed(mem_delta as u64)?;
}
Ok(())
}

View File

@@ -28,6 +28,7 @@ mod term_agg;
mod term_missing_agg;
use std::collections::HashMap;
use std::fmt;
pub use histogram::*;
pub use range::*;
@@ -72,12 +73,12 @@ impl From<&str> for OrderTarget {
}
}
impl ToString for OrderTarget {
fn to_string(&self) -> String {
impl fmt::Display for OrderTarget {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
OrderTarget::Key => "_key".to_string(),
OrderTarget::Count => "_count".to_string(),
OrderTarget::SubAggregation(agg) => agg.to_string(),
OrderTarget::Key => f.write_str("_key"),
OrderTarget::Count => f.write_str("_count"),
OrderTarget::SubAggregation(agg) => agg.fmt(f),
}
}
}

View File

@@ -1,7 +1,6 @@
use std::fmt::Debug;
use std::ops::Range;
use columnar::{ColumnType, MonotonicallyMappableToU64};
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
@@ -450,7 +449,6 @@ pub(crate) fn range_to_key(range: &Range<u64>, field_type: &ColumnType) -> crate
#[cfg(test)]
mod tests {
use columnar::MonotonicallyMappableToU64;
use serde_json::Value;
use super::*;
@@ -459,7 +457,6 @@ mod tests {
exec_request, exec_request_with_query, get_test_index_2_segments,
get_test_index_with_num_docs,
};
use crate::aggregation::AggregationLimits;
pub fn get_collector_from_ranges(
ranges: Vec<RangeAggregationRange>,

View File

@@ -324,9 +324,11 @@ impl SegmentAggregationCollector for SegmentTermCollector {
}
let mem_delta = self.get_memory_consumption() - mem_pre;
bucket_agg_accessor
.limits
.add_memory_consumed(mem_delta as u64)?;
if mem_delta > 0 {
bucket_agg_accessor
.limits
.add_memory_consumed(mem_delta as u64)?;
}
Ok(())
}

View File

@@ -8,7 +8,8 @@ use super::segment_agg_result::{
};
use crate::aggregation::agg_req_with_accessor::get_aggs_with_segment_accessor_and_validate;
use crate::collector::{Collector, SegmentCollector};
use crate::{DocId, SegmentOrdinal, SegmentReader, TantivyError};
use crate::index::SegmentReader;
use crate::{DocId, SegmentOrdinal, TantivyError};
/// The default max bucket count, before the aggregation fails.
pub const DEFAULT_BUCKET_LIMIT: u32 = 65000;

View File

@@ -20,7 +20,7 @@ use super::bucket::{
};
use super::metric::{
IntermediateAverage, IntermediateCount, IntermediateMax, IntermediateMin, IntermediateStats,
IntermediateSum, PercentilesCollector, TopHitsCollector,
IntermediateSum, PercentilesCollector, TopHitsTopNComputer,
};
use super::segment_agg_result::AggregationLimits;
use super::{format_date, AggregationError, Key, SerializedKey};
@@ -221,9 +221,9 @@ pub(crate) fn empty_from_req(req: &Aggregation) -> IntermediateAggregationResult
Percentiles(_) => IntermediateAggregationResult::Metric(
IntermediateMetricResult::Percentiles(PercentilesCollector::default()),
),
TopHits(_) => IntermediateAggregationResult::Metric(IntermediateMetricResult::TopHits(
TopHitsCollector::default(),
)),
TopHits(ref req) => IntermediateAggregationResult::Metric(
IntermediateMetricResult::TopHits(TopHitsTopNComputer::new(req.clone())),
),
}
}
@@ -285,7 +285,7 @@ pub enum IntermediateMetricResult {
/// Intermediate sum result.
Sum(IntermediateSum),
/// Intermediate top_hits result
TopHits(TopHitsCollector),
TopHits(TopHitsTopNComputer),
}
impl IntermediateMetricResult {
@@ -314,7 +314,7 @@ impl IntermediateMetricResult {
.into_final_result(req.agg.as_percentile().expect("unexpected metric type")),
),
IntermediateMetricResult::TopHits(top_hits) => {
MetricResult::TopHits(top_hits.finalize())
MetricResult::TopHits(top_hits.into_final_result())
}
}
}

View File

@@ -25,6 +25,8 @@ mod stats;
mod sum;
mod top_hits;
use std::collections::HashMap;
pub use average::*;
pub use count::*;
pub use max::*;
@@ -36,6 +38,8 @@ pub use stats::*;
pub use sum::*;
pub use top_hits::*;
use crate::schema::OwnedValue;
/// Single-metric aggregations use this common result structure.
///
/// Main reason to wrap it in value is to match elasticsearch output structure.
@@ -92,8 +96,9 @@ pub struct TopHitsVecEntry {
/// Search results, for queries that include field retrieval requests
/// (`docvalue_fields`).
#[serde(flatten)]
pub search_results: FieldRetrivalResult,
#[serde(rename = "docvalue_fields")]
#[serde(skip_serializing_if = "HashMap::is_empty")]
pub doc_value_fields: HashMap<String, OwnedValue>,
}
/// The top_hits metric aggregation results a list of top hits by sort criteria.

View File

@@ -1,6 +1,5 @@
use std::fmt::Debug;
use columnar::ColumnType;
use serde::{Deserialize, Serialize};
use super::*;

View File

@@ -1,4 +1,3 @@
use columnar::ColumnType;
use serde::{Deserialize, Serialize};
use super::*;

View File

@@ -1,7 +1,9 @@
use std::collections::HashMap;
use std::fmt::Formatter;
use std::net::Ipv6Addr;
use columnar::{ColumnarReader, DynamicColumn};
use common::json_path_writer::JSON_PATH_SEGMENT_SEP_STR;
use common::DateTime;
use regex::Regex;
use serde::ser::SerializeMap;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
@@ -12,8 +14,8 @@ use crate::aggregation::intermediate_agg_result::{
IntermediateAggregationResult, IntermediateMetricResult,
};
use crate::aggregation::segment_agg_result::SegmentAggregationCollector;
use crate::aggregation::AggregationError;
use crate::collector::TopNComputer;
use crate::schema::term::JSON_PATH_SEGMENT_SEP_STR;
use crate::schema::OwnedValue;
use crate::{DocAddress, DocId, SegmentOrdinal};
@@ -92,53 +94,106 @@ pub struct TopHitsAggregation {
size: usize,
from: Option<usize>,
#[serde(flatten)]
retrieval: RetrievalFields,
}
const fn default_doc_value_fields() -> Vec<String> {
Vec::new()
}
/// Search query spec for each matched document
/// TODO: move this to a common module
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
pub struct RetrievalFields {
/// The fast fields to return for each hit.
/// This is the only variant supported for now.
/// TODO: support the {field, format} variant for custom formatting.
#[serde(rename = "docvalue_fields")]
#[serde(default = "default_doc_value_fields")]
pub doc_value_fields: Vec<String>,
#[serde(default)]
doc_value_fields: Vec<String>,
// Not supported
_source: Option<serde_json::Value>,
fields: Option<serde_json::Value>,
script_fields: Option<serde_json::Value>,
highlight: Option<serde_json::Value>,
explain: Option<serde_json::Value>,
version: Option<serde_json::Value>,
}
/// Search query result for each matched document
/// TODO: move this to a common module
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
pub struct FieldRetrivalResult {
/// The fast fields returned for each hit.
#[serde(rename = "docvalue_fields")]
#[serde(skip_serializing_if = "HashMap::is_empty")]
pub doc_value_fields: HashMap<String, OwnedValue>,
#[derive(Debug, Clone, PartialEq, Default)]
struct KeyOrder {
field: String,
order: Order,
}
impl RetrievalFields {
fn get_field_names(&self) -> Vec<&str> {
self.doc_value_fields.iter().map(|s| s.as_str()).collect()
impl Serialize for KeyOrder {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let KeyOrder { field, order } = self;
let mut map = serializer.serialize_map(Some(1))?;
map.serialize_entry(field, order)?;
map.end()
}
}
impl<'de> Deserialize<'de> for KeyOrder {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de> {
let mut key_order = <HashMap<String, Order>>::deserialize(deserializer)?.into_iter();
let (field, order) = key_order.next().ok_or(serde::de::Error::custom(
"Expected exactly one key-value pair in sort parameter of top_hits, found none",
))?;
if key_order.next().is_some() {
return Err(serde::de::Error::custom(format!(
"Expected exactly one key-value pair in sort parameter of top_hits, found {:?}",
key_order
)));
}
Ok(Self { field, order })
}
}
// Tranform a glob (`pattern*`, for example) into a regex::Regex (`^pattern.*$`)
fn globbed_string_to_regex(glob: &str) -> Result<Regex, crate::TantivyError> {
// Replace `*` glob with `.*` regex
let sanitized = format!("^{}$", regex::escape(glob).replace(r"\*", ".*"));
Regex::new(&sanitized.replace('*', ".*")).map_err(|e| {
crate::TantivyError::SchemaError(format!(
"Invalid regex '{}' in docvalue_fields: {}",
glob, e
))
})
}
fn use_doc_value_fields_err(parameter: &str) -> crate::Result<()> {
Err(crate::TantivyError::AggregationError(
AggregationError::InvalidRequest(format!(
"The `{}` parameter is not supported, only `docvalue_fields` is supported in \
`top_hits` aggregation",
parameter
)),
))
}
fn unsupported_err(parameter: &str) -> crate::Result<()> {
Err(crate::TantivyError::AggregationError(
AggregationError::InvalidRequest(format!(
"The `{}` parameter is not supported in the `top_hits` aggregation",
parameter
)),
))
}
impl TopHitsAggregation {
/// Validate and resolve field retrieval parameters
pub fn validate_and_resolve_field_names(
&mut self,
reader: &ColumnarReader,
) -> crate::Result<()> {
if self._source.is_some() {
use_doc_value_fields_err("_source")?;
}
if self.fields.is_some() {
use_doc_value_fields_err("fields")?;
}
if self.script_fields.is_some() {
use_doc_value_fields_err("script_fields")?;
}
if self.explain.is_some() {
unsupported_err("explain")?;
}
if self.highlight.is_some() {
unsupported_err("highlight")?;
}
if self.version.is_some() {
unsupported_err("version")?;
}
fn resolve_field_names(&mut self, reader: &ColumnarReader) -> crate::Result<()> {
// Tranform a glob (`pattern*`, for example) into a regex::Regex (`^pattern.*$`)
let globbed_string_to_regex = |glob: &str| {
// Replace `*` glob with `.*` regex
let sanitized = format!("^{}$", regex::escape(glob).replace(r"\*", ".*"));
Regex::new(&sanitized.replace('*', ".*")).map_err(|e| {
crate::TantivyError::SchemaError(format!(
"Invalid regex '{}' in docvalue_fields: {}",
glob, e
))
})
};
self.doc_value_fields = self
.doc_value_fields
.iter()
@@ -175,12 +230,25 @@ impl RetrievalFields {
Ok(())
}
/// Return fields accessed by the aggregator, in order.
pub fn field_names(&self) -> Vec<&str> {
self.sort
.iter()
.map(|KeyOrder { field, .. }| field.as_str())
.collect()
}
/// Return fields accessed by the aggregator's value retrieval.
pub fn value_field_names(&self) -> Vec<&str> {
self.doc_value_fields.iter().map(|s| s.as_str()).collect()
}
fn get_document_field_data(
&self,
accessors: &HashMap<String, Vec<DynamicColumn>>,
doc_id: DocId,
) -> FieldRetrivalResult {
let dvf = self
) -> HashMap<String, FastFieldValue> {
let doc_value_fields = self
.doc_value_fields
.iter()
.map(|field| {
@@ -188,20 +256,20 @@ impl RetrievalFields {
.get(field)
.unwrap_or_else(|| panic!("field '{}' not found in accessors", field));
let values: Vec<OwnedValue> = accessors
let values: Vec<FastFieldValue> = accessors
.iter()
.flat_map(|accessor| match accessor {
DynamicColumn::U64(accessor) => accessor
.values_for_doc(doc_id)
.map(OwnedValue::U64)
.map(FastFieldValue::U64)
.collect::<Vec<_>>(),
DynamicColumn::I64(accessor) => accessor
.values_for_doc(doc_id)
.map(OwnedValue::I64)
.map(FastFieldValue::I64)
.collect::<Vec<_>>(),
DynamicColumn::F64(accessor) => accessor
.values_for_doc(doc_id)
.map(OwnedValue::F64)
.map(FastFieldValue::F64)
.collect::<Vec<_>>(),
DynamicColumn::Bytes(accessor) => accessor
.term_ords(doc_id)
@@ -213,7 +281,7 @@ impl RetrievalFields {
.expect("could not read term dictionary"),
"term corresponding to term_ord does not exist"
);
OwnedValue::Bytes(buffer)
FastFieldValue::Bytes(buffer)
})
.collect::<Vec<_>>(),
DynamicColumn::Str(accessor) => accessor
@@ -226,94 +294,82 @@ impl RetrievalFields {
.expect("could not read term dictionary"),
"term corresponding to term_ord does not exist"
);
OwnedValue::Str(String::from_utf8(buffer).unwrap())
FastFieldValue::Str(String::from_utf8(buffer).unwrap())
})
.collect::<Vec<_>>(),
DynamicColumn::Bool(accessor) => accessor
.values_for_doc(doc_id)
.map(OwnedValue::Bool)
.map(FastFieldValue::Bool)
.collect::<Vec<_>>(),
DynamicColumn::IpAddr(accessor) => accessor
.values_for_doc(doc_id)
.map(OwnedValue::IpAddr)
.map(FastFieldValue::IpAddr)
.collect::<Vec<_>>(),
DynamicColumn::DateTime(accessor) => accessor
.values_for_doc(doc_id)
.map(OwnedValue::Date)
.map(FastFieldValue::Date)
.collect::<Vec<_>>(),
})
.collect();
(field.to_owned(), OwnedValue::Array(values))
(field.to_owned(), FastFieldValue::Array(values))
})
.collect();
FieldRetrivalResult {
doc_value_fields: dvf,
doc_value_fields
}
}
/// A retrieved value from a fast field.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum FastFieldValue {
/// The str type is used for any text information.
Str(String),
/// Unsigned 64-bits Integer `u64`
U64(u64),
/// Signed 64-bits Integer `i64`
I64(i64),
/// 64-bits Float `f64`
F64(f64),
/// Bool value
Bool(bool),
/// Date/time with nanoseconds precision
Date(DateTime),
/// Arbitrarily sized byte array
Bytes(Vec<u8>),
/// IpV6 Address. Internally there is no IpV4, it needs to be converted to `Ipv6Addr`.
IpAddr(Ipv6Addr),
/// A list of values.
Array(Vec<Self>),
}
impl From<FastFieldValue> for OwnedValue {
fn from(value: FastFieldValue) -> Self {
match value {
FastFieldValue::Str(s) => OwnedValue::Str(s),
FastFieldValue::U64(u) => OwnedValue::U64(u),
FastFieldValue::I64(i) => OwnedValue::I64(i),
FastFieldValue::F64(f) => OwnedValue::F64(f),
FastFieldValue::Bool(b) => OwnedValue::Bool(b),
FastFieldValue::Date(d) => OwnedValue::Date(d),
FastFieldValue::Bytes(b) => OwnedValue::Bytes(b),
FastFieldValue::IpAddr(ip) => OwnedValue::IpAddr(ip),
FastFieldValue::Array(a) => {
OwnedValue::Array(a.into_iter().map(OwnedValue::from).collect())
}
}
}
}
#[derive(Debug, Clone, PartialEq, Default)]
struct KeyOrder {
field: String,
order: Order,
}
impl Serialize for KeyOrder {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let KeyOrder { field, order } = self;
let mut map = serializer.serialize_map(Some(1))?;
map.serialize_entry(field, order)?;
map.end()
}
}
impl<'de> Deserialize<'de> for KeyOrder {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de> {
let mut k_o = <HashMap<String, Order>>::deserialize(deserializer)?.into_iter();
let (k, v) = k_o.next().ok_or(serde::de::Error::custom(
"Expected exactly one key-value pair in KeyOrder, found none",
))?;
if k_o.next().is_some() {
return Err(serde::de::Error::custom(
"Expected exactly one key-value pair in KeyOrder, found more",
));
}
Ok(Self { field: k, order: v })
}
}
impl TopHitsAggregation {
/// Validate and resolve field retrieval parameters
pub fn validate_and_resolve(&mut self, reader: &ColumnarReader) -> crate::Result<()> {
self.retrieval.resolve_field_names(reader)
}
/// Return fields accessed by the aggregator, in order.
pub fn field_names(&self) -> Vec<&str> {
self.sort
.iter()
.map(|KeyOrder { field, .. }| field.as_str())
.collect()
}
/// Return fields accessed by the aggregator's value retrieval.
pub fn value_field_names(&self) -> Vec<&str> {
self.retrieval.get_field_names()
}
}
/// Holds a single comparable doc feature, and the order in which it should be sorted.
/// Holds a fast field value in its u64 representation, and the order in which it should be sorted.
#[derive(Clone, Serialize, Deserialize, Debug)]
struct ComparableDocFeature {
/// Stores any u64-mappable feature.
struct DocValueAndOrder {
/// A fast field value in its u64 representation.
value: Option<u64>,
/// Sort order for the doc feature
/// Sort order for the value
order: Order,
}
impl Ord for ComparableDocFeature {
impl Ord for DocValueAndOrder {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let invert = |cmp: std::cmp::Ordering| match self.order {
Order::Asc => cmp,
@@ -329,26 +385,32 @@ impl Ord for ComparableDocFeature {
}
}
impl PartialOrd for ComparableDocFeature {
impl PartialOrd for DocValueAndOrder {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for ComparableDocFeature {
impl PartialEq for DocValueAndOrder {
fn eq(&self, other: &Self) -> bool {
self.value.cmp(&other.value) == std::cmp::Ordering::Equal
}
}
impl Eq for ComparableDocFeature {}
impl Eq for DocValueAndOrder {}
#[derive(Clone, Serialize, Deserialize, Debug)]
struct ComparableDocFeatures(Vec<ComparableDocFeature>, FieldRetrivalResult);
struct DocSortValuesAndFields {
sorts: Vec<DocValueAndOrder>,
impl Ord for ComparableDocFeatures {
#[serde(rename = "docvalue_fields")]
#[serde(skip_serializing_if = "HashMap::is_empty")]
doc_value_fields: HashMap<String, FastFieldValue>,
}
impl Ord for DocSortValuesAndFields {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
for (self_feature, other_feature) in self.0.iter().zip(other.0.iter()) {
for (self_feature, other_feature) in self.sorts.iter().zip(other.sorts.iter()) {
let cmp = self_feature.cmp(other_feature);
if cmp != std::cmp::Ordering::Equal {
return cmp;
@@ -358,53 +420,43 @@ impl Ord for ComparableDocFeatures {
}
}
impl PartialOrd for ComparableDocFeatures {
impl PartialOrd for DocSortValuesAndFields {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for ComparableDocFeatures {
impl PartialEq for DocSortValuesAndFields {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == std::cmp::Ordering::Equal
}
}
impl Eq for ComparableDocFeatures {}
impl Eq for DocSortValuesAndFields {}
/// The TopHitsCollector used for collecting over segments and merging results.
#[derive(Clone, Serialize, Deserialize)]
pub struct TopHitsCollector {
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct TopHitsTopNComputer {
req: TopHitsAggregation,
top_n: TopNComputer<ComparableDocFeatures, DocAddress, false>,
top_n: TopNComputer<DocSortValuesAndFields, DocAddress, false>,
}
impl Default for TopHitsCollector {
fn default() -> Self {
Self {
req: TopHitsAggregation::default(),
top_n: TopNComputer::new(1),
}
}
}
impl std::fmt::Debug for TopHitsCollector {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TopHitsCollector")
.field("req", &self.req)
.field("top_n_threshold", &self.top_n.threshold)
.finish()
}
}
impl std::cmp::PartialEq for TopHitsCollector {
impl std::cmp::PartialEq for TopHitsTopNComputer {
fn eq(&self, _other: &Self) -> bool {
false
}
}
impl TopHitsCollector {
fn collect(&mut self, features: ComparableDocFeatures, doc: DocAddress) {
impl TopHitsTopNComputer {
/// Create a new TopHitsCollector
pub fn new(req: TopHitsAggregation) -> Self {
Self {
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
req,
}
}
fn collect(&mut self, features: DocSortValuesAndFields, doc: DocAddress) {
self.top_n.push(features, doc);
}
@@ -416,14 +468,19 @@ impl TopHitsCollector {
}
/// Finalize by converting self into the final result form
pub fn finalize(self) -> TopHitsMetricResult {
pub fn into_final_result(self) -> TopHitsMetricResult {
let mut hits: Vec<TopHitsVecEntry> = self
.top_n
.into_sorted_vec()
.into_iter()
.map(|doc| TopHitsVecEntry {
sort: doc.feature.0.iter().map(|f| f.value).collect(),
search_results: doc.feature.1,
sort: doc.feature.sorts.iter().map(|f| f.value).collect(),
doc_value_fields: doc
.feature
.doc_value_fields
.into_iter()
.map(|(k, v)| (k, v.into()))
.collect(),
})
.collect();
@@ -436,48 +493,63 @@ impl TopHitsCollector {
}
}
#[derive(Clone)]
pub(crate) struct SegmentTopHitsCollector {
#[derive(Clone, Debug)]
pub(crate) struct TopHitsSegmentCollector {
segment_ordinal: SegmentOrdinal,
accessor_idx: usize,
inner_collector: TopHitsCollector,
req: TopHitsAggregation,
top_n: TopNComputer<Vec<DocValueAndOrder>, DocAddress, false>,
}
impl SegmentTopHitsCollector {
impl TopHitsSegmentCollector {
pub fn from_req(
req: &TopHitsAggregation,
accessor_idx: usize,
segment_ordinal: SegmentOrdinal,
) -> Self {
Self {
inner_collector: TopHitsCollector {
req: req.clone(),
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
},
req: req.clone(),
top_n: TopNComputer::new(req.size + req.from.unwrap_or(0)),
segment_ordinal,
accessor_idx,
}
}
}
fn into_top_hits_collector(
self,
value_accessors: &HashMap<String, Vec<DynamicColumn>>,
) -> TopHitsTopNComputer {
let mut top_hits_computer = TopHitsTopNComputer::new(self.req.clone());
let top_results = self.top_n.into_vec();
impl std::fmt::Debug for SegmentTopHitsCollector {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SegmentTopHitsCollector")
.field("segment_id", &self.segment_ordinal)
.field("accessor_idx", &self.accessor_idx)
.field("inner_collector", &self.inner_collector)
.finish()
for res in top_results {
let doc_value_fields = self
.req
.get_document_field_data(value_accessors, res.doc.doc_id);
top_hits_computer.collect(
DocSortValuesAndFields {
sorts: res.feature,
doc_value_fields,
},
res.doc,
);
}
top_hits_computer
}
}
impl SegmentAggregationCollector for SegmentTopHitsCollector {
impl SegmentAggregationCollector for TopHitsSegmentCollector {
fn add_intermediate_aggregation_result(
self: Box<Self>,
agg_with_accessor: &crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
results: &mut crate::aggregation::intermediate_agg_result::IntermediateAggregationResults,
) -> crate::Result<()> {
let name = agg_with_accessor.aggs.keys[self.accessor_idx].to_string();
let intermediate_result = IntermediateMetricResult::TopHits(self.inner_collector);
let value_accessors = &agg_with_accessor.aggs.values[self.accessor_idx].value_accessors;
let intermediate_result =
IntermediateMetricResult::TopHits(self.into_top_hits_collector(value_accessors));
results.push(
name,
IntermediateAggregationResult::Metric(intermediate_result),
@@ -490,9 +562,7 @@ impl SegmentAggregationCollector for SegmentTopHitsCollector {
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
) -> crate::Result<()> {
let accessors = &agg_with_accessor.aggs.values[self.accessor_idx].accessors;
let value_accessors = &agg_with_accessor.aggs.values[self.accessor_idx].value_accessors;
let features: Vec<ComparableDocFeature> = self
.inner_collector
let sorts: Vec<DocValueAndOrder> = self
.req
.sort
.iter()
@@ -505,18 +575,12 @@ impl SegmentAggregationCollector for SegmentTopHitsCollector {
.0
.values_for_doc(doc_id)
.next();
ComparableDocFeature { value, order }
DocValueAndOrder { value, order }
})
.collect();
let retrieval_result = self
.inner_collector
.req
.retrieval
.get_document_field_data(value_accessors, doc_id);
self.inner_collector.collect(
ComparableDocFeatures(features, retrieval_result),
self.top_n.push(
sorts,
DocAddress {
segment_ord: self.segment_ordinal,
doc_id,
@@ -530,11 +594,7 @@ impl SegmentAggregationCollector for SegmentTopHitsCollector {
docs: &[crate::DocId],
agg_with_accessor: &mut crate::aggregation::agg_req_with_accessor::AggregationsWithAccessor,
) -> crate::Result<()> {
// TODO: Consider getting fields with the column block accessor and refactor this.
// ---
// Would the additional complexity of getting fields with the column_block_accessor
// make sense here? Probably yes, but I want to get a first-pass review first
// before proceeding.
// TODO: Consider getting fields with the column block accessor.
for doc in docs {
self.collect(*doc, agg_with_accessor)?;
}
@@ -549,7 +609,7 @@ mod tests {
use serde_json::Value;
use time::macros::datetime;
use super::{ComparableDocFeature, ComparableDocFeatures, Order};
use super::{DocSortValuesAndFields, DocValueAndOrder, Order};
use crate::aggregation::agg_req::Aggregations;
use crate::aggregation::agg_result::AggregationResults;
use crate::aggregation::bucket::tests::get_test_index_from_docs;
@@ -557,44 +617,44 @@ mod tests {
use crate::aggregation::AggregationCollector;
use crate::collector::ComparableDoc;
use crate::query::AllQuery;
use crate::schema::OwnedValue as SchemaValue;
use crate::schema::OwnedValue;
fn invert_order(cmp_feature: ComparableDocFeature) -> ComparableDocFeature {
let ComparableDocFeature { value, order } = cmp_feature;
fn invert_order(cmp_feature: DocValueAndOrder) -> DocValueAndOrder {
let DocValueAndOrder { value, order } = cmp_feature;
let order = match order {
Order::Asc => Order::Desc,
Order::Desc => Order::Asc,
};
ComparableDocFeature { value, order }
DocValueAndOrder { value, order }
}
fn collector_with_capacity(capacity: usize) -> super::TopHitsCollector {
super::TopHitsCollector {
fn collector_with_capacity(capacity: usize) -> super::TopHitsTopNComputer {
super::TopHitsTopNComputer {
top_n: super::TopNComputer::new(capacity),
..Default::default()
req: Default::default(),
}
}
fn invert_order_features(cmp_features: ComparableDocFeatures) -> ComparableDocFeatures {
let ComparableDocFeatures(cmp_features, search_results) = cmp_features;
let cmp_features = cmp_features
fn invert_order_features(mut cmp_features: DocSortValuesAndFields) -> DocSortValuesAndFields {
cmp_features.sorts = cmp_features
.sorts
.into_iter()
.map(invert_order)
.collect::<Vec<_>>();
ComparableDocFeatures(cmp_features, search_results)
cmp_features
}
#[test]
fn test_comparable_doc_feature() -> crate::Result<()> {
let small = ComparableDocFeature {
let small = DocValueAndOrder {
value: Some(1),
order: Order::Asc,
};
let big = ComparableDocFeature {
let big = DocValueAndOrder {
value: Some(2),
order: Order::Asc,
};
let none = ComparableDocFeature {
let none = DocValueAndOrder {
value: None,
order: Order::Asc,
};
@@ -616,21 +676,21 @@ mod tests {
#[test]
fn test_comparable_doc_features() -> crate::Result<()> {
let features_1 = ComparableDocFeatures(
vec![ComparableDocFeature {
let features_1 = DocSortValuesAndFields {
sorts: vec![DocValueAndOrder {
value: Some(1),
order: Order::Asc,
}],
Default::default(),
);
doc_value_fields: Default::default(),
};
let features_2 = ComparableDocFeatures(
vec![ComparableDocFeature {
let features_2 = DocSortValuesAndFields {
sorts: vec![DocValueAndOrder {
value: Some(2),
order: Order::Asc,
}],
Default::default(),
);
doc_value_fields: Default::default(),
};
assert!(features_1 < features_2);
@@ -689,39 +749,39 @@ mod tests {
segment_ord: 0,
doc_id: 0,
},
feature: ComparableDocFeatures(
vec![ComparableDocFeature {
feature: DocSortValuesAndFields {
sorts: vec![DocValueAndOrder {
value: Some(1),
order: Order::Asc,
}],
Default::default(),
),
doc_value_fields: Default::default(),
},
},
ComparableDoc {
doc: crate::DocAddress {
segment_ord: 0,
doc_id: 2,
},
feature: ComparableDocFeatures(
vec![ComparableDocFeature {
feature: DocSortValuesAndFields {
sorts: vec![DocValueAndOrder {
value: Some(3),
order: Order::Asc,
}],
Default::default(),
),
doc_value_fields: Default::default(),
},
},
ComparableDoc {
doc: crate::DocAddress {
segment_ord: 0,
doc_id: 1,
},
feature: ComparableDocFeatures(
vec![ComparableDocFeature {
feature: DocSortValuesAndFields {
sorts: vec![DocValueAndOrder {
value: Some(5),
order: Order::Asc,
}],
Default::default(),
),
doc_value_fields: Default::default(),
},
},
];
@@ -730,23 +790,23 @@ mod tests {
collector.collect(doc.feature, doc.doc);
}
let res = collector.finalize();
let res = collector.into_final_result();
assert_eq!(
res,
super::TopHitsMetricResult {
hits: vec![
super::TopHitsVecEntry {
sort: vec![docs[0].feature.0[0].value],
search_results: Default::default(),
sort: vec![docs[0].feature.sorts[0].value],
doc_value_fields: Default::default(),
},
super::TopHitsVecEntry {
sort: vec![docs[1].feature.0[0].value],
search_results: Default::default(),
sort: vec![docs[1].feature.sorts[0].value],
doc_value_fields: Default::default(),
},
super::TopHitsVecEntry {
sort: vec![docs[2].feature.0[0].value],
search_results: Default::default(),
sort: vec![docs[2].feature.sorts[0].value],
doc_value_fields: Default::default(),
},
]
}
@@ -803,7 +863,7 @@ mod tests {
{
"sort": [common::i64_to_u64(date_2017.unix_timestamp_nanos() as i64)],
"docvalue_fields": {
"date": [ SchemaValue::Date(DateTime::from_utc(date_2017)) ],
"date": [ OwnedValue::Date(DateTime::from_utc(date_2017)) ],
"text": [ "ccc" ],
"text2": [ "ddd" ],
"mixed.dyn_arr": [ 3, "4" ],
@@ -812,7 +872,7 @@ mod tests {
{
"sort": [common::i64_to_u64(date_2016.unix_timestamp_nanos() as i64)],
"docvalue_fields": {
"date": [ SchemaValue::Date(DateTime::from_utc(date_2016)) ],
"date": [ OwnedValue::Date(DateTime::from_utc(date_2016)) ],
"text": [ "aaa" ],
"text2": [ "bbb" ],
"mixed.dyn_arr": [ 6, "7" ],

View File

@@ -143,8 +143,6 @@ use std::fmt::Display;
#[cfg(test)]
mod agg_tests;
mod agg_bench;
use core::fmt;
pub use agg_limits::AggregationLimits;
@@ -417,7 +415,6 @@ mod tests {
use time::OffsetDateTime;
use super::agg_req::Aggregations;
use super::segment_agg_result::AggregationLimits;
use super::*;
use crate::indexer::NoMergePolicy;
use crate::query::{AllQuery, TermQuery};

View File

@@ -16,7 +16,7 @@ use super::metric::{
SumAggregation,
};
use crate::aggregation::bucket::TermMissingAgg;
use crate::aggregation::metric::SegmentTopHitsCollector;
use crate::aggregation::metric::TopHitsSegmentCollector;
pub(crate) trait SegmentAggregationCollector: CollectorClone + Debug {
fn add_intermediate_aggregation_result(
@@ -161,7 +161,7 @@ pub(crate) fn build_single_agg_segment_collector(
accessor_idx,
)?,
)),
TopHits(top_hits_req) => Ok(Box::new(SegmentTopHitsCollector::from_req(
TopHits(top_hits_req) => Ok(Box::new(TopHitsSegmentCollector::from_req(
top_hits_req,
accessor_idx,
req.segment_ordinal,

View File

@@ -1,7 +1,7 @@
use std::cmp::Ordering;
use std::collections::{btree_map, BTreeMap, BTreeSet, BinaryHeap};
use std::io;
use std::ops::Bound;
use std::{io, u64, usize};
use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::FacetReader;

View File

@@ -160,7 +160,7 @@ mod tests {
use super::{add_vecs, HistogramCollector, HistogramComputer};
use crate::schema::{Schema, FAST};
use crate::time::{Date, Month};
use crate::{doc, query, DateTime, Index};
use crate::{query, DateTime, Index};
#[test]
fn test_add_histograms_simple() {

View File

@@ -1,15 +1,11 @@
use columnar::{BytesColumn, Column};
use super::*;
use crate::collector::{Count, FilterCollector, TopDocs};
use crate::index::SegmentReader;
use crate::query::{AllQuery, QueryParser};
use crate::schema::{Schema, FAST, TEXT};
use crate::time::format_description::well_known::Rfc3339;
use crate::time::OffsetDateTime;
use crate::{
doc, DateTime, DocAddress, DocId, Index, Score, Searcher, SegmentOrdinal, TantivyDocument,
};
use crate::{DateTime, DocAddress, Index, Searcher, TantivyDocument};
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
compute_score: true,

View File

@@ -4,7 +4,8 @@ use std::marker::PhantomData;
use serde::{Deserialize, Serialize};
use super::top_score_collector::TopNComputer;
use crate::{DocAddress, DocId, SegmentOrdinal, SegmentReader};
use crate::index::SegmentReader;
use crate::{DocAddress, DocId, SegmentOrdinal};
/// Contains a feature (field, score, etc.) of a document along with the document address.
///

View File

@@ -732,6 +732,19 @@ pub struct TopNComputer<Score, D, const REVERSE_ORDER: bool = true> {
top_n: usize,
pub(crate) threshold: Option<Score>,
}
impl<Score: std::fmt::Debug, D, const REVERSE_ORDER: bool> std::fmt::Debug
for TopNComputer<Score, D, REVERSE_ORDER>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TopNComputer")
.field("buffer_len", &self.buffer.len())
.field("top_n", &self.top_n)
.field("current_threshold", &self.threshold)
.finish()
}
}
// Intermediate struct for TopNComputer for deserialization, to keep vec capacity
#[derive(Deserialize)]
struct TopNComputerDeser<Score, D, const REVERSE_ORDER: bool> {

View File

@@ -1,19 +1,25 @@
use rayon::{ThreadPool, ThreadPoolBuilder};
use std::sync::Arc;
#[cfg(feature = "quickwit")]
use futures_util::{future::Either, FutureExt};
use crate::TantivyError;
/// Search executor whether search request are single thread or multithread.
///
/// We don't expose Rayon thread pool directly here for several reasons.
///
/// First dependency hell. It is not a good idea to expose the
/// API of a dependency, knowing it might conflict with a different version
/// used by the client. Second, we may stop using rayon in the future.
/// Executor makes it possible to run tasks in single thread or
/// in a thread pool.
#[derive(Clone)]
pub enum Executor {
/// Single thread variant of an Executor
SingleThread,
/// Thread pool variant of an Executor
ThreadPool(ThreadPool),
ThreadPool(Arc<rayon::ThreadPool>),
}
#[cfg(feature = "quickwit")]
impl From<Arc<rayon::ThreadPool>> for Executor {
fn from(thread_pool: Arc<rayon::ThreadPool>) -> Self {
Executor::ThreadPool(thread_pool)
}
}
impl Executor {
@@ -24,11 +30,11 @@ impl Executor {
/// Creates an Executor that dispatches the tasks in a thread pool.
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> crate::Result<Executor> {
let pool = ThreadPoolBuilder::new()
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(move |num| format!("{prefix}{num}"))
.build()?;
Ok(Executor::ThreadPool(pool))
Ok(Executor::ThreadPool(Arc::new(pool)))
}
/// Perform a map in the thread pool.
@@ -91,11 +97,36 @@ impl Executor {
}
}
}
/// Spawn a task on the pool, returning a future completing on task success.
///
/// If the task panic, returns `Err(())`.
#[cfg(feature = "quickwit")]
pub fn spawn_blocking<T: Send + 'static>(
&self,
cpu_intensive_task: impl FnOnce() -> T + Send + 'static,
) -> impl std::future::Future<Output = Result<T, ()>> {
match self {
Executor::SingleThread => Either::Left(std::future::ready(Ok(cpu_intensive_task()))),
Executor::ThreadPool(pool) => {
let (sender, receiver) = oneshot::channel();
pool.spawn(|| {
if sender.is_closed() {
return;
}
let task_result = cpu_intensive_task();
let _ = sender.send(task_result);
});
let res = receiver.map(|res| res.map_err(|_| ()));
Either::Right(res)
}
}
}
}
#[cfg(test)]
mod tests {
use super::Executor;
#[test]
@@ -147,4 +178,62 @@ mod tests {
assert_eq!(result[i], i * 2);
}
}
#[cfg(feature = "quickwit")]
#[test]
fn test_cancel_cpu_intensive_tasks() {
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
let counter: Arc<AtomicU64> = Default::default();
let other_counter: Arc<AtomicU64> = Default::default();
let mut futures = Vec::new();
let mut other_futures = Vec::new();
let (tx, rx) = crossbeam_channel::bounded::<()>(0);
let rx = Arc::new(rx);
let executor = Executor::multi_thread(3, "search-test").unwrap();
for i in 0..1000 {
let counter_clone: Arc<AtomicU64> = counter.clone();
let other_counter_clone: Arc<AtomicU64> = other_counter.clone();
let rx_clone = rx.clone();
let rx_clone2 = rx.clone();
let fut = executor.spawn_blocking(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
let () = rx_clone.recv().unwrap();
});
futures.push(fut);
let other_fut = executor.spawn_blocking(move || {
other_counter_clone.fetch_add(1, Ordering::SeqCst);
let () = rx_clone2.recv().unwrap();
});
other_futures.push(other_fut);
}
// We execute 100 futures.
for i in 0..100 {
tx.send(()).unwrap();
}
let counter_val = counter.load(Ordering::SeqCst);
let other_counter_val = other_counter.load(Ordering::SeqCst);
assert!(counter_val >= 30);
assert!(other_counter_val >= 30);
drop(other_futures);
// We execute 100 futures.
for i in 0..100 {
tx.send(()).unwrap();
}
let counter_val2 = counter.load(Ordering::SeqCst);
assert!(counter_val2 >= counter_val + 100 - 6);
let other_counter_val2 = other_counter.load(Ordering::SeqCst);
assert!(other_counter_val2 <= other_counter_val + 6);
}
}

View File

@@ -1,12 +1,10 @@
use columnar::MonotonicallyMappableToU64;
use common::json_path_writer::JSON_PATH_SEGMENT_SEP;
use common::{replace_in_place, JsonPathWriter};
use rustc_hash::FxHashMap;
use crate::fastfield::FastValue;
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter};
use crate::schema::document::{ReferenceValue, ReferenceValueLeaf, Value};
use crate::schema::term::JSON_PATH_SEGMENT_SEP;
use crate::schema::{Field, Type, DATE_TIME_PRECISION_INDEXED};
use crate::schema::Type;
use crate::time::format_description::well_known::Rfc3339;
use crate::time::{OffsetDateTime, UtcOffset};
use crate::tokenizer::TextAnalyzer;
@@ -33,7 +31,7 @@ use crate::{DateTime, DocId, Term};
/// position 1.
/// As a result, with lemmatization, "The Smiths" will match our object.
///
/// Worse, if a same term is appears in the second object, a non increasing value would be pushed
/// Worse, if a same term appears in the second object, a non increasing value would be pushed
/// to the position recorder probably provoking a panic.
///
/// This problem is solved for regular multivalued object by offsetting the position
@@ -52,7 +50,7 @@ use crate::{DateTime, DocId, Term};
/// We can therefore afford working with a map that is not imperfect. It is fine if several
/// path map to the same index position as long as the probability is relatively low.
#[derive(Default)]
struct IndexingPositionsPerPath {
pub(crate) struct IndexingPositionsPerPath {
positions_per_path: FxHashMap<u32, IndexingPosition>,
}
@@ -60,6 +58,9 @@ impl IndexingPositionsPerPath {
fn get_position_from_id(&mut self, id: u32) -> &mut IndexingPosition {
self.positions_per_path.entry(id).or_default()
}
pub fn clear(&mut self) {
self.positions_per_path.clear();
}
}
/// Convert JSON_PATH_SEGMENT_SEP to a dot.
@@ -70,36 +71,6 @@ pub fn json_path_sep_to_dot(path: &mut str) {
}
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn index_json_values<'a, V: Value<'a>>(
doc: DocId,
json_visitors: impl Iterator<Item = crate::Result<V::ObjectIter>>,
text_analyzer: &mut TextAnalyzer,
expand_dots_enabled: bool,
term_buffer: &mut Term,
postings_writer: &mut dyn PostingsWriter,
json_path_writer: &mut JsonPathWriter,
ctx: &mut IndexingContext,
) -> crate::Result<()> {
json_path_writer.clear();
json_path_writer.set_expand_dots(expand_dots_enabled);
let mut positions_per_path: IndexingPositionsPerPath = Default::default();
for json_visitor_res in json_visitors {
let json_visitor = json_visitor_res?;
index_json_object::<V>(
doc,
json_visitor,
text_analyzer,
term_buffer,
json_path_writer,
postings_writer,
ctx,
&mut positions_per_path,
);
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn index_json_object<'a, V: Value<'a>>(
doc: DocId,
@@ -128,7 +99,7 @@ fn index_json_object<'a, V: Value<'a>>(
}
#[allow(clippy::too_many_arguments)]
fn index_json_value<'a, V: Value<'a>>(
pub(crate) fn index_json_value<'a, V: Value<'a>>(
doc: DocId,
json_value: V,
text_analyzer: &mut TextAnalyzer,
@@ -168,12 +139,18 @@ fn index_json_value<'a, V: Value<'a>>(
);
}
ReferenceValueLeaf::U64(val) => {
// try to parse to i64, since when querying we will apply the same logic and prefer
// i64 values
set_path_id(
term_buffer,
ctx.path_to_unordered_id
.get_or_allocate_unordered_id(json_path_writer.as_str()),
);
term_buffer.append_type_and_fast_value(val);
if let Ok(i64_val) = val.try_into() {
term_buffer.append_type_and_fast_value::<i64>(i64_val);
} else {
term_buffer.append_type_and_fast_value(val);
}
postings_writer.subscribe(doc, 0u32, term_buffer, ctx);
}
ReferenceValueLeaf::I64(val) => {
@@ -256,71 +233,42 @@ fn index_json_value<'a, V: Value<'a>>(
}
}
// Tries to infer a JSON type from a string.
pub fn convert_to_fast_value_and_get_term(
json_term_writer: &mut JsonTermWriter,
phrase: &str,
) -> Option<Term> {
/// Tries to infer a JSON type from a string and append it to the term.
///
/// The term must be json + JSON path.
pub fn convert_to_fast_value_and_append_to_json_term(mut term: Term, phrase: &str) -> Option<Term> {
assert_eq!(
term.value()
.as_json_value_bytes()
.expect("expecting a Term with a json type and json path")
.as_serialized()
.len(),
0,
"JSON value bytes should be empty"
);
if let Ok(dt) = OffsetDateTime::parse(phrase, &Rfc3339) {
let dt_utc = dt.to_offset(UtcOffset::UTC);
return Some(set_fastvalue_and_get_term(
json_term_writer,
DateTime::from_utc(dt_utc),
));
term.append_type_and_fast_value(DateTime::from_utc(dt_utc));
return Some(term);
}
if let Ok(i64_val) = str::parse::<i64>(phrase) {
return Some(set_fastvalue_and_get_term(json_term_writer, i64_val));
term.append_type_and_fast_value(i64_val);
return Some(term);
}
if let Ok(u64_val) = str::parse::<u64>(phrase) {
return Some(set_fastvalue_and_get_term(json_term_writer, u64_val));
term.append_type_and_fast_value(u64_val);
return Some(term);
}
if let Ok(f64_val) = str::parse::<f64>(phrase) {
return Some(set_fastvalue_and_get_term(json_term_writer, f64_val));
term.append_type_and_fast_value(f64_val);
return Some(term);
}
if let Ok(bool_val) = str::parse::<bool>(phrase) {
return Some(set_fastvalue_and_get_term(json_term_writer, bool_val));
term.append_type_and_fast_value(bool_val);
return Some(term);
}
None
}
// helper function to generate a Term from a json fastvalue
pub(crate) fn set_fastvalue_and_get_term<T: FastValue>(
json_term_writer: &mut JsonTermWriter,
value: T,
) -> Term {
json_term_writer.set_fast_value(value);
json_term_writer.term().clone()
}
// helper function to generate a list of terms with their positions from a textual json value
pub(crate) fn set_string_and_get_terms(
json_term_writer: &mut JsonTermWriter,
value: &str,
text_analyzer: &mut TextAnalyzer,
) -> Vec<(usize, Term)> {
let mut positions_and_terms = Vec::<(usize, Term)>::new();
json_term_writer.close_path_and_set_type(Type::Str);
let term_num_bytes = json_term_writer.term_buffer.len_bytes();
let mut token_stream = text_analyzer.token_stream(value);
token_stream.process(&mut |token| {
json_term_writer
.term_buffer
.truncate_value_bytes(term_num_bytes);
json_term_writer
.term_buffer
.append_bytes(token.text.as_bytes());
positions_and_terms.push((token.position, json_term_writer.term().clone()));
});
positions_and_terms
}
/// Writes a value of a JSON field to a `Term`.
/// The Term format is as follows:
/// `[JSON_TYPE][JSON_PATH][JSON_END_OF_PATH][VALUE_BYTES]`
pub struct JsonTermWriter<'a> {
term_buffer: &'a mut Term,
path_stack: Vec<usize>,
expand_dots_enabled: bool,
}
/// Splits a json path supplied to the query parser in such a way that
/// `.` can be escaped.
@@ -377,158 +325,48 @@ pub(crate) fn encode_column_name(
path.into()
}
impl<'a> JsonTermWriter<'a> {
pub fn from_field_and_json_path(
field: Field,
json_path: &str,
expand_dots_enabled: bool,
term_buffer: &'a mut Term,
) -> Self {
term_buffer.set_field_and_type(field, Type::Json);
let mut json_term_writer = Self::wrap(term_buffer, expand_dots_enabled);
for segment in split_json_path(json_path) {
json_term_writer.push_path_segment(&segment);
}
json_term_writer
}
pub fn wrap(term_buffer: &'a mut Term, expand_dots_enabled: bool) -> Self {
term_buffer.clear_with_type(Type::Json);
let mut path_stack = Vec::with_capacity(10);
path_stack.push(0);
Self {
term_buffer,
path_stack,
expand_dots_enabled,
}
}
fn trim_to_end_of_path(&mut self) {
let end_of_path = *self.path_stack.last().unwrap();
self.term_buffer.truncate_value_bytes(end_of_path);
}
pub fn close_path_and_set_type(&mut self, typ: Type) {
self.trim_to_end_of_path();
self.term_buffer.set_json_path_end();
self.term_buffer.append_bytes(&[typ.to_code()]);
}
// TODO: Remove this function and use JsonPathWriter instead.
pub fn push_path_segment(&mut self, segment: &str) {
// the path stack should never be empty.
self.trim_to_end_of_path();
if self.path_stack.len() > 1 {
self.term_buffer.set_json_path_separator();
}
let appended_segment = self.term_buffer.append_bytes(segment.as_bytes());
if self.expand_dots_enabled {
// We need to replace `.` by JSON_PATH_SEGMENT_SEP.
replace_in_place(b'.', JSON_PATH_SEGMENT_SEP, appended_segment);
}
self.term_buffer.add_json_path_separator();
self.path_stack.push(self.term_buffer.len_bytes());
}
pub fn pop_path_segment(&mut self) {
self.path_stack.pop();
assert!(!self.path_stack.is_empty());
self.trim_to_end_of_path();
}
/// Returns the json path of the term being currently built.
#[cfg(test)]
pub(crate) fn path(&self) -> &[u8] {
let end_of_path = self.path_stack.last().cloned().unwrap_or(1);
&self.term().serialized_value_bytes()[..end_of_path - 1]
}
pub(crate) fn set_fast_value<T: FastValue>(&mut self, val: T) {
self.close_path_and_set_type(T::to_type());
let value = if T::to_type() == Type::Date {
DateTime::from_u64(val.to_u64())
.truncate(DATE_TIME_PRECISION_INDEXED)
.to_u64()
} else {
val.to_u64()
};
self.term_buffer
.append_bytes(value.to_be_bytes().as_slice());
}
pub fn set_str(&mut self, text: &str) {
self.close_path_and_set_type(Type::Str);
self.term_buffer.append_bytes(text.as_bytes());
}
pub fn term(&self) -> &Term {
self.term_buffer
}
}
#[cfg(test)]
mod tests {
use super::{split_json_path, JsonTermWriter};
use crate::schema::{Field, Type};
use super::split_json_path;
use crate::schema::Field;
use crate::Term;
#[test]
fn test_json_writer() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
json_writer.push_path_segment("attributes");
json_writer.push_path_segment("color");
json_writer.set_str("red");
let mut term = Term::from_field_json_path(field, "attributes.color", false);
term.append_type_and_str("red");
assert_eq!(
format!("{:?}", json_writer.term()),
format!("{:?}", term),
"Term(field=1, type=Json, path=attributes.color, type=Str, \"red\")"
);
json_writer.set_str("blue");
let mut term = Term::from_field_json_path(field, "attributes.dimensions.width", false);
term.append_type_and_fast_value(400i64);
assert_eq!(
format!("{:?}", json_writer.term()),
"Term(field=1, type=Json, path=attributes.color, type=Str, \"blue\")"
);
json_writer.pop_path_segment();
json_writer.push_path_segment("dimensions");
json_writer.push_path_segment("width");
json_writer.set_fast_value(400i64);
assert_eq!(
format!("{:?}", json_writer.term()),
format!("{:?}", term),
"Term(field=1, type=Json, path=attributes.dimensions.width, type=I64, 400)"
);
json_writer.pop_path_segment();
json_writer.push_path_segment("height");
json_writer.set_fast_value(300i64);
assert_eq!(
format!("{:?}", json_writer.term()),
"Term(field=1, type=Json, path=attributes.dimensions.height, type=I64, 300)"
);
}
#[test]
fn test_string_term() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
json_writer.push_path_segment("color");
json_writer.set_str("red");
assert_eq!(
json_writer.term().serialized_term(),
b"\x00\x00\x00\x01jcolor\x00sred"
)
let mut term = Term::from_field_json_path(field, "color", false);
term.append_type_and_str("red");
assert_eq!(term.serialized_term(), b"\x00\x00\x00\x01jcolor\x00sred")
}
#[test]
fn test_i64_term() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
json_writer.push_path_segment("color");
json_writer.set_fast_value(-4i64);
let mut term = Term::from_field_json_path(field, "color", false);
term.append_type_and_fast_value(-4i64);
assert_eq!(
json_writer.term().serialized_term(),
term.serialized_term(),
b"\x00\x00\x00\x01jcolor\x00i\x7f\xff\xff\xff\xff\xff\xff\xfc"
)
}
@@ -536,12 +374,11 @@ mod tests {
#[test]
fn test_u64_term() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
json_writer.push_path_segment("color");
json_writer.set_fast_value(4u64);
let mut term = Term::from_field_json_path(field, "color", false);
term.append_type_and_fast_value(4u64);
assert_eq!(
json_writer.term().serialized_term(),
term.serialized_term(),
b"\x00\x00\x00\x01jcolor\x00u\x00\x00\x00\x00\x00\x00\x00\x04"
)
}
@@ -549,12 +386,10 @@ mod tests {
#[test]
fn test_f64_term() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
json_writer.push_path_segment("color");
json_writer.set_fast_value(4.0f64);
let mut term = Term::from_field_json_path(field, "color", false);
term.append_type_and_fast_value(4.0f64);
assert_eq!(
json_writer.term().serialized_term(),
term.serialized_term(),
b"\x00\x00\x00\x01jcolor\x00f\xc0\x10\x00\x00\x00\x00\x00\x00"
)
}
@@ -562,90 +397,14 @@ mod tests {
#[test]
fn test_bool_term() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
json_writer.push_path_segment("color");
json_writer.set_fast_value(true);
let mut term = Term::from_field_json_path(field, "color", false);
term.append_type_and_fast_value(true);
assert_eq!(
json_writer.term().serialized_term(),
term.serialized_term(),
b"\x00\x00\x00\x01jcolor\x00o\x00\x00\x00\x00\x00\x00\x00\x01"
)
}
#[test]
fn test_push_after_set_path_segment() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
json_writer.push_path_segment("attribute");
json_writer.set_str("something");
json_writer.push_path_segment("color");
json_writer.set_str("red");
assert_eq!(
json_writer.term().serialized_term(),
b"\x00\x00\x00\x01jattribute\x01color\x00sred"
)
}
#[test]
fn test_pop_segment() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
json_writer.push_path_segment("color");
json_writer.push_path_segment("hue");
json_writer.pop_path_segment();
json_writer.set_str("red");
assert_eq!(
json_writer.term().serialized_term(),
b"\x00\x00\x00\x01jcolor\x00sred"
)
}
#[test]
fn test_json_writer_path() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
json_writer.push_path_segment("color");
assert_eq!(json_writer.path(), b"color");
json_writer.push_path_segment("hue");
assert_eq!(json_writer.path(), b"color\x01hue");
json_writer.set_str("pink");
assert_eq!(json_writer.path(), b"color\x01hue");
}
#[test]
fn test_json_path_expand_dots_disabled() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, false);
json_writer.push_path_segment("color.hue");
assert_eq!(json_writer.path(), b"color.hue");
}
#[test]
fn test_json_path_expand_dots_enabled() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, true);
json_writer.push_path_segment("color.hue");
assert_eq!(json_writer.path(), b"color\x01hue");
}
#[test]
fn test_json_path_expand_dots_enabled_pop_segment() {
let field = Field::from_field_id(1);
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_writer = JsonTermWriter::wrap(&mut term, true);
json_writer.push_path_segment("hello");
assert_eq!(json_writer.path(), b"hello");
json_writer.push_path_segment("color.hue");
assert_eq!(json_writer.path(), b"hello\x01color\x01hue");
json_writer.pop_path_segment();
assert_eq!(json_writer.path(), b"hello");
}
#[test]
fn test_split_json_path_simple() {
let json_path = split_json_path("titi.toto");

View File

@@ -4,13 +4,13 @@ use std::{fmt, io};
use crate::collector::Collector;
use crate::core::Executor;
use crate::index::SegmentReader;
use crate::index::{SegmentId, SegmentReader};
use crate::query::{Bm25StatisticsProvider, EnableScoring, Query};
use crate::schema::document::DocumentDeserialize;
use crate::schema::{Schema, Term};
use crate::space_usage::SearcherSpaceUsage;
use crate::store::{CacheStats, StoreReader};
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject};
use crate::{DocAddress, Index, Opstamp, TrackedObject};
/// Identifies the searcher generation accessed by a [`Searcher`].
///
@@ -109,8 +109,9 @@ impl Searcher {
&self,
doc_address: DocAddress,
) -> crate::Result<D> {
let executor = self.inner.index.search_executor();
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
store_reader.get_async(doc_address.doc_id).await
store_reader.get_async(doc_address.doc_id, executor).await
}
/// Access the schema associated with the index of this searcher.

View File

@@ -1,13 +1,14 @@
use crate::collector::Count;
use crate::directory::{RamDirectory, WatchCallback};
use crate::index::SegmentId;
use crate::indexer::{LogMergePolicy, NoMergePolicy};
use crate::json_utils::JsonTermWriter;
use crate::postings::Postings;
use crate::query::TermQuery;
use crate::schema::{Field, IndexRecordOption, Schema, Type, INDEXED, STRING, TEXT};
use crate::schema::{Field, IndexRecordOption, Schema, INDEXED, STRING, TEXT};
use crate::tokenizer::TokenizerManager;
use crate::{
Directory, DocSet, Index, IndexBuilder, IndexReader, IndexSettings, IndexWriter, Postings,
ReloadPolicy, SegmentId, TantivyDocument, Term,
Directory, DocSet, Index, IndexBuilder, IndexReader, IndexSettings, IndexWriter, ReloadPolicy,
TantivyDocument, Term,
};
#[test]
@@ -416,16 +417,12 @@ fn test_non_text_json_term_freq() {
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0u32);
let inv_idx = segment_reader.inverted_index(field).unwrap();
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
json_term_writer.push_path_segment("tenant_id");
json_term_writer.close_path_and_set_type(Type::U64);
json_term_writer.set_fast_value(75u64);
let mut term = Term::from_field_json_path(field, "tenant_id", false);
term.append_type_and_fast_value(75i64);
let postings = inv_idx
.read_postings(
json_term_writer.term(),
IndexRecordOption::WithFreqsAndPositions,
)
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap()
.unwrap();
assert_eq!(postings.doc(), 0);
@@ -454,16 +451,12 @@ fn test_non_text_json_term_freq_bitpacked() {
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0u32);
let inv_idx = segment_reader.inverted_index(field).unwrap();
let mut term = Term::with_type_and_field(Type::Json, field);
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
json_term_writer.push_path_segment("tenant_id");
json_term_writer.close_path_and_set_type(Type::U64);
json_term_writer.set_fast_value(75u64);
let mut term = Term::from_field_json_path(field, "tenant_id", false);
term.append_type_and_fast_value(75i64);
let mut postings = inv_idx
.read_postings(
json_term_writer.term(),
IndexRecordOption::WithFreqsAndPositions,
)
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap()
.unwrap();
assert_eq!(postings.doc(), 0);

View File

@@ -1,6 +1,5 @@
use std::collections::HashMap;
use std::io::{self, Read, Write};
use std::iter::ExactSizeIterator;
use std::ops::Range;
use common::{BinarySerializable, CountingWriter, HasLen, VInt};

View File

@@ -1,5 +1,4 @@
use std::io::Write;
use std::marker::{Send, Sync};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
@@ -40,6 +39,7 @@ impl RetryPolicy {
/// The `DirectoryLock` is an object that represents a file lock.
///
/// It is associated with a lock file, that gets deleted on `Drop.`
#[allow(dead_code)]
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
struct DirectoryLockGuard {

View File

@@ -1,6 +1,6 @@
use std::io::Write;
use std::mem;
use std::path::{Path, PathBuf};
use std::path::Path;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::Arc;

View File

@@ -32,6 +32,7 @@ pub struct WatchCallbackList {
/// file change is detected.
#[must_use = "This `WatchHandle` controls the lifetime of the watch and should therefore be used."]
#[derive(Clone)]
#[allow(dead_code)]
pub struct WatchHandle(Arc<WatchCallback>);
impl WatchHandle {

View File

@@ -62,7 +62,6 @@ impl FacetReader {
#[cfg(test)]
mod tests {
use crate::schema::document::Value;
use crate::schema::{Facet, FacetOptions, SchemaBuilder, STORED};
use crate::{DocAddress, Index, IndexWriter, TantivyDocument};
@@ -146,8 +145,11 @@ mod tests {
facet_ords.extend(facet_reader.facet_ords(0u32));
assert_eq!(&facet_ords, &[0u64]);
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0u32, 0u32))?;
let value: Option<&Facet> = doc.get_first(facet_field).and_then(|v| v.as_facet());
assert_eq!(value, Facet::from_text("/a/b").ok().as_ref());
let value: Option<Facet> = doc
.get_first(facet_field)
.and_then(|v| v.as_facet())
.map(|facet| Facet::from_encoded_string(facet.to_string()));
assert_eq!(value, Facet::from_text("/a/b").ok());
Ok(())
}

View File

@@ -79,8 +79,8 @@ mod tests {
use std::ops::{Range, RangeInclusive};
use std::path::Path;
use columnar::{Column, MonotonicallyMappableToU64, StrColumn};
use common::{ByteCount, HasLen, TerminatingWrite};
use columnar::StrColumn;
use common::{ByteCount, DateTimePrecision, HasLen, TerminatingWrite};
use once_cell::sync::Lazy;
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
@@ -88,14 +88,15 @@ mod tests {
use super::*;
use crate::directory::{Directory, RamDirectory, WritePtr};
use crate::index::SegmentId;
use crate::merge_policy::NoMergePolicy;
use crate::schema::{
Facet, FacetOptions, Field, JsonObjectOptions, Schema, SchemaBuilder, TantivyDocument,
TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
DateOptions, Facet, FacetOptions, Field, JsonObjectOptions, Schema, SchemaBuilder,
TantivyDocument, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
};
use crate::time::OffsetDateTime;
use crate::tokenizer::{LowerCaser, RawTokenizer, TextAnalyzer, TokenizerManager};
use crate::{DateOptions, DateTimePrecision, Index, IndexWriter, SegmentId, SegmentReader};
use crate::{Index, IndexWriter, SegmentReader};
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
let mut schema_builder = Schema::builder();

View File

@@ -1,14 +1,14 @@
use std::io;
use columnar::{ColumnarWriter, NumericalValue};
use common::JsonPathWriter;
use common::{DateTimePrecision, JsonPathWriter};
use tokenizer_api::Token;
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::schema::document::{Document, ReferenceValue, ReferenceValueLeaf, Value};
use crate::schema::{value_type_to_column_type, Field, FieldType, Schema, Type};
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::{DateTimePrecision, DocId, TantivyError};
use crate::{DocId, TantivyError};
/// Only index JSON down to a depth of 20.
/// This is mostly to guard us from a stack overflow triggered by malicious input.
@@ -183,8 +183,7 @@ impl FastFieldsWriter {
.record_datetime(doc_id, field_name, truncated_datetime);
}
ReferenceValueLeaf::Facet(val) => {
self.columnar_writer
.record_str(doc_id, field_name, val.encoded_str());
self.columnar_writer.record_str(doc_id, field_name, val);
}
ReferenceValueLeaf::Bytes(val) => {
self.columnar_writer.record_bytes(doc_id, field_name, val);

View File

@@ -1,9 +1,12 @@
#![allow(deprecated)] // Remove with index sorting
use std::collections::HashSet;
use rand::{thread_rng, Rng};
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::schema::*;
#[allow(deprecated)]
use crate::{doc, schema, Index, IndexSettings, IndexSortByField, IndexWriter, Order, Searcher};
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {

View File

@@ -3,7 +3,7 @@ use std::fmt;
#[cfg(feature = "mmap")]
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::thread::available_parallelism;
use super::segment::Segment;
use super::segment_reader::merge_field_meta_data;
@@ -20,7 +20,7 @@ use crate::indexer::segment_updater::save_metas;
use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
use crate::reader::{IndexReader, IndexReaderBuilder};
use crate::schema::document::Document;
use crate::schema::{Field, FieldType, Schema};
use crate::schema::{Field, FieldType, Schema, Type};
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::SegmentReader;
@@ -83,7 +83,7 @@ fn save_new_metas(
///
/// ```
/// use tantivy::schema::*;
/// use tantivy::{Index, IndexSettings, IndexSortByField, Order};
/// use tantivy::{Index, IndexSettings};
///
/// let mut schema_builder = Schema::builder();
/// let id_field = schema_builder.add_text_field("id", STRING);
@@ -96,10 +96,7 @@ fn save_new_metas(
///
/// let schema = schema_builder.build();
/// let settings = IndexSettings{
/// sort_by_field: Some(IndexSortByField{
/// field: "number".to_string(),
/// order: Order::Asc
/// }),
/// docstore_blocksize: 100_000,
/// ..Default::default()
/// };
/// let index = Index::builder().schema(schema).settings(settings).create_in_ram();
@@ -251,6 +248,15 @@ impl IndexBuilder {
sort_by_field.field
)));
}
let supported_field_types = [Type::I64, Type::U64, Type::F64, Type::Date];
let field_type = entry.field_type().value_type();
if !supported_field_types.contains(&field_type) {
return Err(TantivyError::InvalidArgument(format!(
"Unsupported field type in sort_by_field: {:?}. Supported field types: \
{:?} ",
field_type, supported_field_types,
)));
}
}
Ok(())
} else {
@@ -287,7 +293,7 @@ pub struct Index {
directory: ManagedDirectory,
schema: Schema,
settings: IndexSettings,
executor: Arc<Executor>,
executor: Executor,
tokenizers: TokenizerManager,
fast_field_tokenizers: TokenizerManager,
inventory: SegmentMetaInventory,
@@ -312,29 +318,25 @@ impl Index {
///
/// By default the executor is single thread, and simply runs in the calling thread.
pub fn search_executor(&self) -> &Executor {
self.executor.as_ref()
&self.executor
}
/// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads.
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> {
self.executor = Arc::new(Executor::multi_thread(num_threads, "tantivy-search-")?);
self.executor = Executor::multi_thread(num_threads, "tantivy-search-")?;
Ok(())
}
/// Custom thread pool by a outer thread pool.
pub fn set_shared_multithread_executor(
&mut self,
shared_thread_pool: Arc<Executor>,
) -> crate::Result<()> {
self.executor = shared_thread_pool.clone();
Ok(())
pub fn set_executor(&mut self, executor: Executor) {
self.executor = executor;
}
/// Replace the default single thread search executor pool
/// by a thread pool with as many threads as there are CPUs on the system.
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
let default_num_threads = num_cpus::get();
let default_num_threads = available_parallelism()?.get();
self.set_multithread_executor(default_num_threads)
}
@@ -412,7 +414,7 @@ impl Index {
schema,
tokenizers: TokenizerManager::default(),
fast_field_tokenizers: TokenizerManager::default(),
executor: Arc::new(Executor::single_thread()),
executor: Executor::single_thread(),
inventory,
}
}
@@ -615,7 +617,7 @@ impl Index {
&self,
memory_budget_in_bytes: usize,
) -> crate::Result<IndexWriter<D>> {
let mut num_threads = std::cmp::min(num_cpus::get(), MAX_NUM_THREAD);
let mut num_threads = std::cmp::min(available_parallelism()?.get(), MAX_NUM_THREAD);
let memory_budget_num_bytes_per_thread = memory_budget_in_bytes / num_threads;
if memory_budget_num_bytes_per_thread < MEMORY_BUDGET_NUM_BYTES_MIN {
num_threads = (memory_budget_in_bytes / MEMORY_BUDGET_NUM_BYTES_MIN).max(1);

View File

@@ -288,6 +288,10 @@ impl Default for IndexSettings {
/// Presorting documents can greatly improve performance
/// in some scenarios, by applying top n
/// optimizations.
#[deprecated(
since = "0.22.0",
note = "We plan to remove index sorting in `0.23`. If you need index sorting, please comment on the related issue https://github.com/quickwit-oss/tantivy/issues/2352 and explain your use case."
)]
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct IndexSortByField {
/// The field to sort the documents by

View File

@@ -1,12 +1,13 @@
use std::io;
use common::json_path_writer::JSON_END_OF_PATH;
use common::BinarySerializable;
use fnv::FnvHashSet;
use crate::directory::FileSlice;
use crate::positions::PositionReader;
use crate::postings::{BlockSegmentPostings, SegmentPostings, TermInfo};
use crate::schema::{IndexRecordOption, Term, Type, JSON_END_OF_PATH};
use crate::schema::{IndexRecordOption, Term, Type};
use crate::termdict::TermDictionary;
/// The inverted index reader is in charge of accessing

View File

@@ -1,5 +1,3 @@
//! # Index Module
//!
//! The `index` module in Tantivy contains core components to read and write indexes.
//!
//! It contains `Index` and `Segment`, where a `Index` consists of one or more `Segment`s.

View File

@@ -1,4 +1,4 @@
use std::cmp::{Ord, Ordering};
use std::cmp::Ordering;
use std::error::Error;
use std::fmt;
use std::str::FromStr;

View File

@@ -406,7 +406,7 @@ impl SegmentReader {
}
/// Returns an iterator that will iterate over the alive document ids
pub fn doc_ids_alive(&self) -> Box<dyn Iterator<Item = DocId> + '_> {
pub fn doc_ids_alive(&self) -> Box<dyn Iterator<Item = DocId> + Send + '_> {
if let Some(alive_bitset) = &self.alive_bitset_opt {
Box::new(alive_bitset.iter_alive())
} else {
@@ -516,8 +516,8 @@ impl fmt::Debug for SegmentReader {
mod test {
use super::*;
use crate::index::Index;
use crate::schema::{Schema, SchemaBuilder, Term, STORED, TEXT};
use crate::{DocId, IndexWriter};
use crate::schema::{SchemaBuilder, Term, STORED, TEXT};
use crate::IndexWriter;
#[test]
fn test_merge_field_meta_data_same() {

View File

@@ -246,8 +246,9 @@ impl DeleteCursor {
mod tests {
use super::{DeleteOperation, DeleteQueue};
use crate::index::SegmentReader;
use crate::query::{Explanation, Scorer, Weight};
use crate::{DocId, Score, SegmentReader};
use crate::{DocId, Score};
struct DummyWeight;
impl Weight for DummyWeight {

View File

@@ -159,7 +159,7 @@ mod tests_indexsorting {
use crate::indexer::NoMergePolicy;
use crate::query::QueryParser;
use crate::schema::*;
use crate::{DocAddress, Index, IndexSettings, IndexSortByField, Order};
use crate::{DocAddress, Index, IndexBuilder, IndexSettings, IndexSortByField, Order};
fn create_test_index(
index_settings: Option<IndexSettings>,
@@ -306,12 +306,10 @@ mod tests_indexsorting {
let my_string_field = index.schema().get_field("string_field").unwrap();
let searcher = index.reader()?.searcher();
{
assert_eq!(
searcher
.doc::<TantivyDocument>(DocAddress::new(0, 0))?
.get_first(my_string_field),
None
);
assert!(searcher
.doc::<TantivyDocument>(DocAddress::new(0, 0))?
.get_first(my_string_field)
.is_none());
assert_eq!(
searcher
.doc::<TantivyDocument>(DocAddress::new(0, 3))?
@@ -344,7 +342,7 @@ mod tests_indexsorting {
Some("blublub")
);
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 4))?;
assert_eq!(doc.get_first(my_string_field), None);
assert!(doc.get_first(my_string_field).is_none());
}
// sort by field desc
let index = create_test_index(
@@ -557,4 +555,28 @@ mod tests_indexsorting {
&[2000, 8000, 3000]
);
}
#[test]
fn test_text_sort() -> crate::Result<()> {
let mut schema_builder = SchemaBuilder::new();
schema_builder.add_text_field("id", STRING | FAST | STORED);
schema_builder.add_text_field("name", TEXT | STORED);
let resp = IndexBuilder::new()
.schema(schema_builder.build())
.settings(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id".to_string(),
order: Order::Asc,
}),
..Default::default()
})
.create_in_ram();
assert!(resp
.unwrap_err()
.to_string()
.contains("Unsupported field type"));
Ok(())
}
}

View File

@@ -22,6 +22,7 @@ where
}
}
#[allow(dead_code)]
pub trait FlatMapWithBufferIter: Iterator {
/// Function similar to `flat_map`, but allows reusing a shared `Vec`.
fn flat_map_with_buffer<F, T>(self, fill_buffer: F) -> FlatMapWithBuffer<T, F, Self>

View File

@@ -814,7 +814,6 @@ mod tests {
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::indexer::NoMergePolicy;
use crate::query::{BooleanQuery, Occur, Query, QueryParser, TermQuery};
use crate::schema::document::Value;
use crate::schema::{
self, Facet, FacetOptions, IndexRecordOption, IpAddrOptions, NumericOptions, Schema,
TextFieldIndexing, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
@@ -2013,7 +2012,7 @@ mod tests {
let mut bool2 = doc.get_all(multi_bools);
assert_eq!(bool, bool2.next().unwrap().as_bool().unwrap());
assert_ne!(bool, bool2.next().unwrap().as_bool().unwrap());
assert_eq!(None, bool2.next())
assert!(bool2.next().is_none())
}
}
}

View File

@@ -144,10 +144,9 @@ mod tests {
use once_cell::sync::Lazy;
use super::*;
use crate::index::SegmentMetaInventory;
use crate::indexer::merge_policy::MergePolicy;
use crate::index::{SegmentId, SegmentMetaInventory};
use crate::schema;
use crate::schema::INDEXED;
use crate::{schema, SegmentId};
static INVENTORY: Lazy<SegmentMetaInventory> = Lazy::new(SegmentMetaInventory::default);

View File

@@ -1,7 +1,8 @@
use std::collections::HashSet;
use std::ops::Deref;
use crate::{Inventory, Opstamp, SegmentId, TrackedObject};
use crate::index::SegmentId;
use crate::{Inventory, Opstamp, TrackedObject};
#[derive(Default)]
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);

View File

@@ -39,7 +39,6 @@ impl MergePolicy for NoMergePolicy {
pub mod tests {
use super::*;
use crate::index::{SegmentId, SegmentMeta};
/// `MergePolicy` useful for test purposes.
///

View File

@@ -13,7 +13,7 @@ use crate::docset::{DocSet, TERMINATED};
use crate::error::DataCorruption;
use crate::fastfield::{AliveBitSet, FastFieldNotAvailableError};
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
use crate::index::{Segment, SegmentReader};
use crate::index::{Segment, SegmentComponent, SegmentReader};
use crate::indexer::doc_id_mapping::{MappingType, SegmentDocIdMapping};
use crate::indexer::SegmentSerializer;
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
@@ -21,8 +21,7 @@ use crate::schema::{value_type_to_column_type, Field, FieldType, Schema};
use crate::store::StoreWriter;
use crate::termdict::{TermMerger, TermOrdinal};
use crate::{
DocAddress, DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order,
SegmentComponent, SegmentOrdinal,
DocAddress, DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order, SegmentOrdinal,
};
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
@@ -576,7 +575,7 @@ impl IndexMerger {
//
// Overall the reliable way to know if we have actual frequencies loaded or not
// is to check whether the actual decoded array is empty or not.
if has_term_freq != !postings.block_cursor.freqs().is_empty() {
if has_term_freq == postings.block_cursor.freqs().is_empty() {
return Err(DataCorruption::comment_only(
"Term freqs are inconsistent across segments",
)
@@ -697,7 +696,7 @@ impl IndexMerger {
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
let doc_bytes_it = &mut document_iterators[old_doc_addr.segment_ord as usize];
if let Some(doc_bytes_res) = doc_bytes_it.next() {
let doc_bytes = doc_bytes_res?;
let (_, doc_bytes) = doc_bytes_res?;
store_writer.store_bytes(&doc_bytes)?;
} else {
return Err(DataCorruption::comment_only(format!(
@@ -729,7 +728,7 @@ impl IndexMerger {
|| store_reader.decompressor() != store_writer.compressor().into()
{
for doc_bytes_res in store_reader.iter_raw(reader.alive_bitset()) {
let doc_bytes = doc_bytes_res?;
let (_, doc_bytes) = doc_bytes_res?;
store_writer.store_bytes(&doc_bytes)?;
}
} else {
@@ -794,9 +793,8 @@ mod tests {
BytesFastFieldTestCollector, FastFieldTestCollector, TEST_COLLECTOR_WITH_SCORE,
};
use crate::collector::{Count, FacetCollector};
use crate::index::Index;
use crate::index::{Index, SegmentId};
use crate::query::{AllQuery, BooleanQuery, EnableScoring, Scorer, TermQuery};
use crate::schema::document::Value;
use crate::schema::{
Facet, FacetOptions, IndexRecordOption, NumericOptions, TantivyDocument, Term,
TextFieldIndexing, INDEXED, TEXT,
@@ -804,7 +802,7 @@ mod tests {
use crate::time::OffsetDateTime;
use crate::{
assert_nearly_equals, schema, DateTime, DocAddress, DocId, DocSet, IndexSettings,
IndexSortByField, IndexWriter, Order, Searcher, SegmentId,
IndexSortByField, IndexWriter, Order, Searcher,
};
#[test]

View File

@@ -3,15 +3,15 @@ mod tests {
use crate::collector::TopDocs;
use crate::fastfield::AliveBitSet;
use crate::index::Index;
use crate::postings::Postings;
use crate::query::QueryParser;
use crate::schema::document::Value;
use crate::schema::{
self, BytesOptions, Facet, FacetOptions, IndexRecordOption, NumericOptions,
TextFieldIndexing, TextOptions,
};
use crate::{
DocAddress, DocSet, IndexSettings, IndexSortByField, IndexWriter, Order, Postings,
TantivyDocument, Term,
DocAddress, DocSet, IndexSettings, IndexSortByField, IndexWriter, Order, TantivyDocument,
Term,
};
fn create_test_index_posting_list_issue(index_settings: Option<IndexSettings>) -> Index {

View File

@@ -144,6 +144,181 @@ mod tests_mmap {
assert_eq!(num_docs, 256);
}
}
#[test]
fn test_json_field_null_byte() {
// Test when field name contains a zero byte, which has special meaning in tantivy.
// As a workaround, we convert the zero byte to the ASCII character '0'.
// https://github.com/quickwit-oss/tantivy/issues/2340
// https://github.com/quickwit-oss/tantivy/issues/2193
let field_name_in = "\u{0000}";
let field_name_out = "0";
test_json_field_name(field_name_in, field_name_out);
}
#[test]
fn test_json_field_1byte() {
// Test when field name contains a '1' byte, which has special meaning in tantivy.
// The 1 byte can be addressed as '1' byte or '.'.
let field_name_in = "\u{0001}";
let field_name_out = "\u{0001}";
test_json_field_name(field_name_in, field_name_out);
// Test when field name contains a '1' byte, which has special meaning in tantivy.
let field_name_in = "\u{0001}";
let field_name_out = ".";
test_json_field_name(field_name_in, field_name_out);
}
#[test]
fn test_json_field_dot() {
// Test when field name contains a '.'
let field_name_in = ".";
let field_name_out = ".";
test_json_field_name(field_name_in, field_name_out);
}
fn test_json_field_name(field_name_in: &str, field_name_out: &str) {
let mut schema_builder = Schema::builder();
let options = JsonObjectOptions::from(TEXT | FAST).set_expand_dots_enabled();
let field = schema_builder.add_json_field("json", options);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap();
index_writer
.add_document(doc!(field=>json!({format!("{field_name_in}"): "test1", format!("num{field_name_in}"): 10})))
.unwrap();
index_writer
.add_document(doc!(field=>json!({format!("a{field_name_in}"): "test2"})))
.unwrap();
index_writer
.add_document(doc!(field=>json!({format!("a{field_name_in}a"): "test3"})))
.unwrap();
index_writer
.add_document(
doc!(field=>json!({format!("a{field_name_in}a{field_name_in}"): "test4"})),
)
.unwrap();
index_writer
.add_document(
doc!(field=>json!({format!("a{field_name_in}.ab{field_name_in}"): "test5"})),
)
.unwrap();
index_writer
.add_document(
doc!(field=>json!({format!("a{field_name_in}"): json!({format!("a{field_name_in}"): "test6"}) })),
)
.unwrap();
index_writer
.add_document(doc!(field=>json!({format!("{field_name_in}a" ): "test7"})))
.unwrap();
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let parse_query = QueryParser::for_index(&index, Vec::new());
let test_query = |query_str: &str| {
let query = parse_query.parse_query(query_str).unwrap();
let num_docs = searcher.search(&query, &Count).unwrap();
assert_eq!(num_docs, 1, "{}", query_str);
};
test_query(format!("json.{field_name_out}:test1").as_str());
test_query(format!("json.a{field_name_out}:test2").as_str());
test_query(format!("json.a{field_name_out}a:test3").as_str());
test_query(format!("json.a{field_name_out}a{field_name_out}:test4").as_str());
test_query(format!("json.a{field_name_out}.ab{field_name_out}:test5").as_str());
test_query(format!("json.a{field_name_out}.a{field_name_out}:test6").as_str());
test_query(format!("json.{field_name_out}a:test7").as_str());
let test_agg = |field_name: &str, expected: &str| {
let agg_req_str = json!(
{
"termagg": {
"terms": {
"field": field_name,
}
}
});
let agg_req: Aggregations = serde_json::from_value(agg_req_str).unwrap();
let collector = AggregationCollector::from_aggs(agg_req, Default::default());
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
let res = serde_json::to_value(agg_res).unwrap();
assert_eq!(res["termagg"]["buckets"][0]["doc_count"], 1);
assert_eq!(res["termagg"]["buckets"][0]["key"], expected);
};
test_agg(format!("json.{field_name_out}").as_str(), "test1");
test_agg(format!("json.a{field_name_out}").as_str(), "test2");
test_agg(format!("json.a{field_name_out}a").as_str(), "test3");
test_agg(
format!("json.a{field_name_out}a{field_name_out}").as_str(),
"test4",
);
test_agg(
format!("json.a{field_name_out}.ab{field_name_out}").as_str(),
"test5",
);
test_agg(
format!("json.a{field_name_out}.a{field_name_out}").as_str(),
"test6",
);
test_agg(format!("json.{field_name_out}a").as_str(), "test7");
// `.` is stored as `\u{0001}` internally in tantivy
let field_name_out_internal = if field_name_out == "." {
"\u{0001}"
} else {
field_name_out
};
let mut fields = reader.searcher().segment_readers()[0]
.inverted_index(field)
.unwrap()
.list_encoded_fields()
.unwrap();
assert_eq!(fields.len(), 8);
fields.sort();
let mut expected_fields = vec![
(format!("a{field_name_out_internal}"), Type::Str),
(format!("a{field_name_out_internal}a"), Type::Str),
(
format!("a{field_name_out_internal}a{field_name_out_internal}"),
Type::Str,
),
(
format!("a{field_name_out_internal}\u{1}ab{field_name_out_internal}"),
Type::Str,
),
(
format!("a{field_name_out_internal}\u{1}a{field_name_out_internal}"),
Type::Str,
),
(format!("{field_name_out_internal}a"), Type::Str),
(format!("{field_name_out_internal}"), Type::Str),
(format!("num{field_name_out_internal}"), Type::I64),
];
expected_fields.sort();
assert_eq!(fields, expected_fields);
// Check columnar reader
let mut columns = reader.searcher().segment_readers()[0]
.fast_fields()
.columnar()
.list_columns()
.unwrap()
.into_iter()
.map(|(name, _)| name)
.collect::<Vec<_>>();
let mut expected_columns = vec![
format!("json\u{1}{field_name_out_internal}"),
format!("json\u{1}{field_name_out_internal}a"),
format!("json\u{1}a{field_name_out_internal}"),
format!("json\u{1}a{field_name_out_internal}a"),
format!("json\u{1}a{field_name_out_internal}a{field_name_out_internal}"),
format!("json\u{1}a{field_name_out_internal}\u{1}ab{field_name_out_internal}"),
format!("json\u{1}a{field_name_out_internal}\u{1}a{field_name_out_internal}"),
format!("json\u{1}num{field_name_out_internal}"),
];
columns.sort();
expected_columns.sort();
assert_eq!(columns, expected_columns);
}
#[test]
fn test_json_field_expand_dots_enabled_dot_escape_not_required() {

View File

@@ -5,20 +5,20 @@ use tokenizer_api::BoxTokenStream;
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
use super::operation::AddOperation;
use crate::core::json_utils::index_json_values;
use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
use crate::index::Segment;
use crate::index::{Segment, SegmentComponent};
use crate::indexer::segment_serializer::SegmentSerializer;
use crate::json_utils::{index_json_value, IndexingPositionsPerPath};
use crate::postings::{
compute_table_memory_size, serialize_postings, IndexingContext, IndexingPosition,
PerFieldPostingsWriter, PostingsWriter,
};
use crate::schema::document::{Document, ReferenceValue, Value};
use crate::schema::document::{Document, Value};
use crate::schema::{FieldEntry, FieldType, Schema, Term, DATE_TIME_PRECISION_INDEXED};
use crate::store::{StoreReader, StoreWriter};
use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer};
use crate::{DocId, Opstamp, SegmentComponent, TantivyError};
use crate::{DocId, Opstamp, TantivyError};
/// Computes the initial size of the hash table.
///
@@ -68,6 +68,7 @@ pub struct SegmentWriter {
pub(crate) fast_field_writers: FastFieldsWriter,
pub(crate) fieldnorms_writer: FieldNormsWriter,
pub(crate) json_path_writer: JsonPathWriter,
pub(crate) json_positions_per_path: IndexingPositionsPerPath,
pub(crate) doc_opstamps: Vec<Opstamp>,
per_field_text_analyzers: Vec<TextAnalyzer>,
term_buffer: Term,
@@ -119,6 +120,7 @@ impl SegmentWriter {
per_field_postings_writers,
fieldnorms_writer: FieldNormsWriter::for_schema(&schema),
json_path_writer: JsonPathWriter::default(),
json_positions_per_path: IndexingPositionsPerPath::default(),
segment_serializer,
fast_field_writers: FastFieldsWriter::from_schema_and_tokenizer_manager(
&schema,
@@ -204,8 +206,7 @@ impl SegmentWriter {
// Used to help with linting and type checking.
let value = value_access as D::Value<'_>;
let facet = value.as_facet().ok_or_else(make_schema_error)?;
let facet_str = facet.encoded_str();
let facet_str = value.as_facet().ok_or_else(make_schema_error)?;
let mut facet_tokenizer = facet_tokenizer.token_stream(facet_str);
let mut indexing_position = IndexingPosition::default();
postings_writer.index_text(
@@ -228,7 +229,7 @@ impl SegmentWriter {
&mut self.per_field_text_analyzers[field.field_id() as usize];
text_analyzer.token_stream(text)
} else if let Some(tok_str) = value.as_pre_tokenized_text() {
BoxTokenStream::new(PreTokenizedStream::from(tok_str.clone()))
BoxTokenStream::new(PreTokenizedStream::from(*tok_str.clone()))
} else {
continue;
};
@@ -342,26 +343,24 @@ impl SegmentWriter {
FieldType::JsonObject(json_options) => {
let text_analyzer =
&mut self.per_field_text_analyzers[field.field_id() as usize];
let json_values_it = values.map(|value_access| {
// Used to help with linting and type checking.
let value_access = value_access as D::Value<'_>;
let value = value_access.as_value();
match value {
ReferenceValue::Object(object_iter) => Ok(object_iter),
_ => Err(make_schema_error()),
}
});
index_json_values::<D::Value<'_>>(
doc_id,
json_values_it,
text_analyzer,
json_options.is_expand_dots_enabled(),
term_buffer,
postings_writer,
&mut self.json_path_writer,
ctx,
)?;
self.json_positions_per_path.clear();
self.json_path_writer
.set_expand_dots(json_options.is_expand_dots_enabled());
for json_value in values {
self.json_path_writer.clear();
index_json_value(
doc_id,
json_value,
text_analyzer,
term_buffer,
&mut self.json_path_writer,
postings_writer,
ctx,
&mut self.json_positions_per_path,
);
}
}
FieldType::IpAddr(_) => {
let mut num_vals = 0;
@@ -496,22 +495,21 @@ mod tests {
use tempfile::TempDir;
use crate::collector::{Count, TopDocs};
use crate::core::json_utils::JsonTermWriter;
use crate::directory::RamDirectory;
use crate::postings::TermInfo;
use crate::fastfield::FastValue;
use crate::postings::{Postings, TermInfo};
use crate::query::{PhraseQuery, QueryParser};
use crate::schema::document::Value;
use crate::schema::{
Document, IndexRecordOption, Schema, TextFieldIndexing, TextOptions, Type, STORED, STRING,
TEXT,
Document, IndexRecordOption, OwnedValue, Schema, TextFieldIndexing, TextOptions, STORED,
STRING, TEXT,
};
use crate::store::{Compressor, StoreReader, StoreWriter};
use crate::time::format_description::well_known::Rfc3339;
use crate::time::OffsetDateTime;
use crate::tokenizer::{PreTokenizedString, Token};
use crate::{
DateTime, Directory, DocAddress, DocSet, Index, IndexWriter, Postings, TantivyDocument,
Term, TERMINATED,
DateTime, Directory, DocAddress, DocSet, Index, IndexWriter, TantivyDocument, Term,
TERMINATED,
};
#[test]
@@ -556,9 +554,12 @@ mod tests {
let reader = StoreReader::open(directory.open_read(path).unwrap(), 0).unwrap();
let doc = reader.get::<TantivyDocument>(0).unwrap();
assert_eq!(doc.field_values().len(), 2);
assert_eq!(doc.field_values()[0].value().as_str(), Some("A"));
assert_eq!(doc.field_values()[1].value().as_str(), Some("title"));
assert_eq!(doc.field_values().count(), 2);
assert_eq!(doc.get_all(text_field).next().unwrap().as_str(), Some("A"));
assert_eq!(
doc.get_all(text_field).nth(1).unwrap().as_str(),
Some("title")
);
}
#[test]
fn test_simple_json_indexing() {
@@ -598,12 +599,51 @@ mod tests {
assert_eq!(score_docs.len(), 2);
}
#[test]
fn test_flat_json_indexing() {
// A JSON Object that contains mixed values on the first level
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", STORED | STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let mut writer = index.writer_for_tests().unwrap();
// Text, i64, u64
writer.add_document(doc!(json_field=>"b")).unwrap();
writer
.add_document(doc!(json_field=>OwnedValue::I64(10i64)))
.unwrap();
writer
.add_document(doc!(json_field=>OwnedValue::U64(55u64)))
.unwrap();
writer
.add_document(doc!(json_field=>json!({"my_field": "a"})))
.unwrap();
writer.commit().unwrap();
let search_and_expect = |query| {
let query_parser = QueryParser::for_index(&index, vec![json_field]);
let text_query = query_parser.parse_query(query).unwrap();
let score_docs: Vec<(_, DocAddress)> = index
.reader()
.unwrap()
.searcher()
.search(&text_query, &TopDocs::with_limit(4))
.unwrap();
assert_eq!(score_docs.len(), 1);
};
search_and_expect("my_field:a");
search_and_expect("b");
search_and_expect("10");
search_and_expect("55");
}
#[test]
fn test_json_indexing() {
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
let schema = schema_builder.build();
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
let json_val: serde_json::Value = serde_json::from_str(
r#"{
"toto": "titi",
"float": -0.2,
@@ -631,129 +671,125 @@ mod tests {
doc_id: 0u32,
})
.unwrap();
let serdeser_json_val = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(
&doc.to_json(&schema),
)
.unwrap()
.get("json")
.unwrap()[0]
.as_object()
let serdeser_json_val = serde_json::from_str::<serde_json::Value>(&doc.to_json(&schema))
.unwrap()
.get("json")
.unwrap()[0]
.clone();
assert_eq!(json_val, serdeser_json_val);
let segment_reader = searcher.segment_reader(0u32);
let inv_idx = segment_reader.inverted_index(json_field).unwrap();
let term_dict = inv_idx.terms();
let mut term = Term::with_type_and_field(Type::Json, json_field);
let mut term_stream = term_dict.stream().unwrap();
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
let term_from_path =
|path: &str| -> Term { Term::from_field_json_path(json_field, path, false) };
json_term_writer.push_path_segment("bool");
json_term_writer.set_fast_value(true);
fn set_fast_val<T: FastValue>(val: T, mut term: Term) -> Term {
term.append_type_and_fast_value(val);
term
}
fn set_str(val: &str, mut term: Term) -> Term {
term.append_type_and_str(val);
term
}
let term = term_from_path("bool");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_fast_val(true, term).serialized_value_bytes()
);
json_term_writer.pop_path_segment();
json_term_writer.push_path_segment("complexobject");
json_term_writer.push_path_segment("field.with.dot");
json_term_writer.set_fast_value(1i64);
let term = term_from_path("complexobject.field\\.with\\.dot");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_fast_val(1i64, term).serialized_value_bytes()
);
json_term_writer.pop_path_segment();
json_term_writer.pop_path_segment();
json_term_writer.push_path_segment("date");
json_term_writer.set_fast_value(DateTime::from_utc(
OffsetDateTime::parse("1985-04-12T23:20:50.52Z", &Rfc3339).unwrap(),
));
// Date
let term = term_from_path("date");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_fast_val(
DateTime::from_utc(
OffsetDateTime::parse("1985-04-12T23:20:50.52Z", &Rfc3339).unwrap(),
),
term
)
.serialized_value_bytes()
);
json_term_writer.pop_path_segment();
json_term_writer.push_path_segment("float");
json_term_writer.set_fast_value(-0.2f64);
// Float
let term = term_from_path("float");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_fast_val(-0.2f64, term).serialized_value_bytes()
);
json_term_writer.pop_path_segment();
json_term_writer.push_path_segment("my_arr");
json_term_writer.set_fast_value(2i64);
// Number In Array
let term = term_from_path("my_arr");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_fast_val(2i64, term).serialized_value_bytes()
);
json_term_writer.set_fast_value(3i64);
let term = term_from_path("my_arr");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_fast_val(3i64, term).serialized_value_bytes()
);
json_term_writer.set_fast_value(4i64);
let term = term_from_path("my_arr");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_fast_val(4i64, term).serialized_value_bytes()
);
json_term_writer.push_path_segment("my_key");
json_term_writer.set_str("tokens");
// El in Array
let term = term_from_path("my_arr.my_key");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_str("tokens", term).serialized_value_bytes()
);
json_term_writer.set_str("two");
let term = term_from_path("my_arr.my_key");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_str("two", term).serialized_value_bytes()
);
json_term_writer.pop_path_segment();
json_term_writer.pop_path_segment();
json_term_writer.push_path_segment("signed");
json_term_writer.set_fast_value(-2i64);
// Signed
let term = term_from_path("signed");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_fast_val(-2i64, term).serialized_value_bytes()
);
json_term_writer.pop_path_segment();
json_term_writer.push_path_segment("toto");
json_term_writer.set_str("titi");
let term = term_from_path("toto");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_str("titi", term).serialized_value_bytes()
);
json_term_writer.pop_path_segment();
json_term_writer.push_path_segment("unsigned");
json_term_writer.set_fast_value(1i64);
// Unsigned
let term = term_from_path("unsigned");
assert!(term_stream.advance());
assert_eq!(
term_stream.key(),
json_term_writer.term().serialized_value_bytes()
set_fast_val(1i64, term).serialized_value_bytes()
);
assert!(!term_stream.advance());
}
@@ -774,14 +810,9 @@ mod tests {
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0u32);
let inv_index = segment_reader.inverted_index(json_field).unwrap();
let mut term = Term::with_type_and_field(Type::Json, json_field);
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
json_term_writer.push_path_segment("mykey");
json_term_writer.set_str("token");
let term_info = inv_index
.get_term_info(json_term_writer.term())
.unwrap()
.unwrap();
let mut term = Term::from_field_json_path(json_field, "mykey", false);
term.append_type_and_str("token");
let term_info = inv_index.get_term_info(&term).unwrap().unwrap();
assert_eq!(
term_info,
TermInfo {
@@ -807,7 +838,7 @@ mod tests {
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", STRING);
let schema = schema_builder.build();
let json_val: serde_json::Map<String, serde_json::Value> =
let json_val: serde_json::Value =
serde_json::from_str(r#"{"mykey": "two tokens"}"#).unwrap();
let doc = doc!(json_field=>json_val);
let index = Index::create_in_ram(schema);
@@ -818,14 +849,9 @@ mod tests {
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0u32);
let inv_index = segment_reader.inverted_index(json_field).unwrap();
let mut term = Term::with_type_and_field(Type::Json, json_field);
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
json_term_writer.push_path_segment("mykey");
json_term_writer.set_str("two tokens");
let term_info = inv_index
.get_term_info(json_term_writer.term())
.unwrap()
.unwrap();
let mut term = Term::from_field_json_path(json_field, "mykey", false);
term.append_type_and_str("two tokens");
let term_info = inv_index.get_term_info(&term).unwrap().unwrap();
assert_eq!(
term_info,
TermInfo {
@@ -852,7 +878,7 @@ mod tests {
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", TEXT);
let schema = schema_builder.build();
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
let json_val: serde_json::Value = serde_json::from_str(
r#"{"mykey": [{"field": "hello happy tax payer"}, {"field": "nothello"}]}"#,
)
.unwrap();
@@ -863,16 +889,18 @@ mod tests {
writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let mut term = Term::with_type_and_field(Type::Json, json_field);
let mut json_term_writer = JsonTermWriter::wrap(&mut term, false);
json_term_writer.push_path_segment("mykey");
json_term_writer.push_path_segment("field");
json_term_writer.set_str("hello");
let hello_term = json_term_writer.term().clone();
json_term_writer.set_str("nothello");
let nothello_term = json_term_writer.term().clone();
json_term_writer.set_str("happy");
let happy_term = json_term_writer.term().clone();
let term = Term::from_field_json_path(json_field, "mykey.field", false);
let mut hello_term = term.clone();
hello_term.append_type_and_str("hello");
let mut nothello_term = term.clone();
nothello_term.append_type_and_str("nothello");
let mut happy_term = term.clone();
happy_term.append_type_and_str("happy");
let phrase_query = PhraseQuery::new(vec![hello_term, happy_term.clone()]);
assert_eq!(searcher.search(&phrase_query, &Count).unwrap(), 1);
let phrase_query = PhraseQuery::new(vec![nothello_term, happy_term]);

View File

@@ -178,6 +178,7 @@ pub use crate::future_result::FutureResult;
pub type Result<T> = std::result::Result<T, TantivyError>;
mod core;
#[allow(deprecated)] // Remove with index sorting
pub mod indexer;
#[allow(unused_doc_comments)]
@@ -189,6 +190,7 @@ pub mod collector;
pub mod directory;
pub mod fastfield;
pub mod fieldnorm;
#[allow(deprecated)] // Remove with index sorting
pub mod index;
pub mod positions;
pub mod postings;
@@ -214,29 +216,17 @@ use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
pub use self::docset::{DocSet, COLLECT_BLOCK_BUFFER_LEN, TERMINATED};
#[deprecated(
since = "0.22.0",
note = "Will be removed in tantivy 0.23. Use export from snippet module instead"
)]
pub use self::snippet::{Snippet, SnippetGenerator};
#[doc(hidden)]
pub use crate::core::json_utils;
pub use crate::core::{Executor, Searcher, SearcherGeneration};
pub use crate::directory::Directory;
#[allow(deprecated)] // Remove with index sorting
pub use crate::index::{
Index, IndexBuilder, IndexMeta, IndexSettings, IndexSortByField, InvertedIndexReader, Order,
Segment, SegmentComponent, SegmentId, SegmentMeta, SegmentReader,
Segment, SegmentMeta, SegmentReader,
};
#[deprecated(
since = "0.22.0",
note = "Will be removed in tantivy 0.23. Use export from indexer module instead"
)]
pub use crate::indexer::PreparedCommit;
pub use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
pub use crate::postings::Postings;
#[allow(deprecated)]
pub use crate::schema::DatePrecision;
pub use crate::schema::{DateOptions, DateTimePrecision, Document, TantivyDocument, Term};
pub use crate::schema::{Document, TantivyDocument, Term};
/// Index format version.
const INDEX_FORMAT_VERSION: u32 = 6;
@@ -254,7 +244,7 @@ pub struct Version {
impl fmt::Debug for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_string())
fmt::Display::fmt(self, f)
}
}
@@ -265,9 +255,10 @@ static VERSION: Lazy<Version> = Lazy::new(|| Version {
index_format_version: INDEX_FORMAT_VERSION,
});
impl ToString for Version {
fn to_string(&self) -> String {
format!(
impl fmt::Display for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"tantivy v{}.{}.{}, index_format v{}",
self.major, self.minor, self.patch, self.index_format_version
)
@@ -390,9 +381,10 @@ pub mod tests {
use crate::docset::{DocSet, TERMINATED};
use crate::index::SegmentReader;
use crate::merge_policy::NoMergePolicy;
use crate::postings::Postings;
use crate::query::BooleanQuery;
use crate::schema::*;
use crate::{DateTime, DocAddress, Index, IndexWriter, Postings, ReloadPolicy};
use crate::{DateTime, DocAddress, Index, IndexWriter, ReloadPolicy};
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
@@ -444,7 +436,6 @@ pub mod tests {
}
#[test]
#[cfg(not(feature = "lz4"))]
fn test_version_string() {
use regex::Regex;
let regex_ptn = Regex::new(
@@ -944,7 +935,7 @@ pub mod tests {
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
let schema = schema_builder.build();
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
let json_val: serde_json::Value = serde_json::from_str(
r#"{
"signed": 2,
"float": 2.0,
@@ -1034,13 +1025,16 @@ pub mod tests {
text_field => "some other value",
other_text_field => "short");
assert_eq!(document.len(), 3);
let values: Vec<&OwnedValue> = document.get_all(text_field).collect();
let values: Vec<OwnedValue> = document.get_all(text_field).map(OwnedValue::from).collect();
assert_eq!(values.len(), 2);
assert_eq!(values[0].as_str(), Some("tantivy"));
assert_eq!(values[1].as_str(), Some("some other value"));
let values: Vec<&OwnedValue> = document.get_all(other_text_field).collect();
assert_eq!(values[0].as_ref().as_str(), Some("tantivy"));
assert_eq!(values[1].as_ref().as_str(), Some("some other value"));
let values: Vec<OwnedValue> = document
.get_all(other_text_field)
.map(OwnedValue::from)
.collect();
assert_eq!(values.len(), 1);
assert_eq!(values[0].as_str(), Some("short"));
assert_eq!(values[0].as_ref().as_str(), Some("short"));
}
#[test]
@@ -1107,9 +1101,9 @@ pub mod tests {
#[test]
fn test_update_via_delete_insert() -> crate::Result<()> {
use crate::collector::Count;
use crate::index::SegmentId;
use crate::indexer::NoMergePolicy;
use crate::query::AllQuery;
use crate::SegmentId;
const DOC_COUNT: u64 = 2u64;

View File

@@ -41,6 +41,7 @@
/// );
/// # }
/// ```
#[macro_export]
macro_rules! doc(
() => {
@@ -52,7 +53,7 @@ macro_rules! doc(
{
let mut document = $crate::TantivyDocument::default();
$(
document.add_field_value($field, $value);
document.add_field_value($field, &$value);
)*
document
}

View File

@@ -14,7 +14,6 @@ pub fn compressed_block_size(num_bits: u8) -> usize {
pub struct BlockEncoder {
bitpacker: BitPacker4x,
pub output: [u8; COMPRESSED_BLOCK_MAX_SIZE],
pub output_len: usize,
}
impl Default for BlockEncoder {
@@ -28,7 +27,6 @@ impl BlockEncoder {
BlockEncoder {
bitpacker: BitPacker4x::new(),
output: [0u8; COMPRESSED_BLOCK_MAX_SIZE],
output_len: 0,
}
}

View File

@@ -1,5 +1,6 @@
use std::io;
use common::json_path_writer::JSON_END_OF_PATH;
use stacker::Addr;
use crate::indexer::doc_id_mapping::DocIdMapping;
@@ -7,7 +8,7 @@ use crate::indexer::path_to_unordered_id::OrderedPathId;
use crate::postings::postings_writer::SpecializedPostingsWriter;
use crate::postings::recorder::{BufferLender, DocIdRecorder, Recorder};
use crate::postings::{FieldSerializer, IndexingContext, IndexingPosition, PostingsWriter};
use crate::schema::{Field, Type, JSON_END_OF_PATH};
use crate::schema::{Field, Type};
use crate::tokenizer::TokenStream;
use crate::{DocId, Term};
@@ -67,10 +68,18 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
) -> io::Result<()> {
let mut term_buffer = Term::with_capacity(48);
let mut buffer_lender = BufferLender::default();
term_buffer.clear_with_field_and_type(Type::Json, Field::from_field_id(0));
let mut prev_term_id = u32::MAX;
let mut term_path_len = 0; // this will be set in the first iteration
for (_field, path_id, term, addr) in term_addrs {
term_buffer.clear_with_field_and_type(Type::Json, Field::from_field_id(0));
term_buffer.append_bytes(ordered_id_to_path[path_id.path_id() as usize].as_bytes());
term_buffer.append_bytes(&[JSON_END_OF_PATH]);
if prev_term_id != path_id.path_id() {
term_buffer.truncate_value_bytes(0);
term_buffer.append_path(ordered_id_to_path[path_id.path_id() as usize].as_bytes());
term_buffer.append_bytes(&[JSON_END_OF_PATH]);
term_path_len = term_buffer.len_bytes();
prev_term_id = path_id.path_id();
}
term_buffer.truncate_value_bytes(term_path_len);
term_buffer.append_bytes(term);
if let Some(json_value) = term_buffer.value().as_json_value_bytes() {
let typ = json_value.typ();

View File

@@ -56,7 +56,7 @@ pub struct InvertedIndexSerializer {
impl InvertedIndexSerializer {
/// Open a new `InvertedIndexSerializer` for the given segment
pub fn open(segment: &mut Segment) -> crate::Result<InvertedIndexSerializer> {
use crate::SegmentComponent::{Positions, Postings, Terms};
use crate::index::SegmentComponent::{Positions, Postings, Terms};
let inv_index_serializer = InvertedIndexSerializer {
terms_write: CompositeWrite::wrap(segment.open_write(Terms)?),
postings_write: CompositeWrite::wrap(segment.open_write(Postings)?),

View File

@@ -1,5 +1,3 @@
use std::convert::TryInto;
use crate::directory::OwnedBytes;
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
use crate::query::Bm25Weight;

View File

@@ -1,5 +1,4 @@
use std::io;
use std::iter::ExactSizeIterator;
use std::ops::Range;
use common::{BinarySerializable, FixedSize};

View File

@@ -1,8 +1,9 @@
use super::Scorer;
use crate::docset::TERMINATED;
use crate::index::SegmentReader;
use crate::query::explanation::does_not_match;
use crate::query::{EnableScoring, Explanation, Query, Weight};
use crate::{DocId, DocSet, Score, Searcher, SegmentReader};
use crate::{DocId, DocSet, Score, Searcher};
/// `EmptyQuery` is a dummy `Query` in which no document matches.
///

View File

@@ -149,7 +149,7 @@ mod tests {
use crate::query::exist_query::ExistsQuery;
use crate::query::{BooleanQuery, RangeQuery};
use crate::schema::{Facet, FacetOptions, Schema, FAST, INDEXED, STRING, TEXT};
use crate::{doc, Index, Searcher};
use crate::{Index, Searcher};
#[test]
fn test_exists_query_simple() -> crate::Result<()> {

View File

@@ -84,7 +84,7 @@ pub struct FuzzyTermQuery {
distance: u8,
/// Should a transposition cost 1 or 2?
transposition_cost_one: bool,
///
/// is a starts with query
prefix: bool,
}

View File

@@ -180,7 +180,7 @@ impl MoreLikeThis {
let facets: Vec<&str> = values
.iter()
.map(|value| {
value.as_facet().map(|f| f.encoded_str()).ok_or_else(|| {
value.as_facet().ok_or_else(|| {
TantivyError::InvalidArgument("invalid field value".to_string())
})
})
@@ -220,7 +220,7 @@ impl MoreLikeThis {
let mut token_stream = tokenizer.token_stream(text);
token_stream.process(sink);
} else if let Some(tok_str) = value.as_pre_tokenized_text() {
let mut token_stream = PreTokenizedStream::from(tok_str.clone());
let mut token_stream = PreTokenizedStream::from(*tok_str.clone());
token_stream.process(sink);
}
}

View File

@@ -10,10 +10,8 @@ use query_grammar::{UserInputAst, UserInputBound, UserInputLeaf, UserInputLitera
use rustc_hash::FxHashMap;
use super::logical_ast::*;
use crate::core::json_utils::{
convert_to_fast_value_and_get_term, set_string_and_get_terms, JsonTermWriter,
};
use crate::index::Index;
use crate::json_utils::convert_to_fast_value_and_append_to_json_term;
use crate::query::range_query::{is_type_valid_for_fastfield_range_query, RangeQuery};
use crate::query::{
AllQuery, BooleanQuery, BoostQuery, EmptyQuery, FuzzyTermQuery, Occur, PhrasePrefixQuery,
@@ -965,20 +963,27 @@ fn generate_literals_for_json_object(
})?;
let index_record_option = text_options.index_option();
let mut logical_literals = Vec::new();
let mut term = Term::with_capacity(100);
let mut json_term_writer = JsonTermWriter::from_field_and_json_path(
field,
json_path,
json_options.is_expand_dots_enabled(),
&mut term,
);
if let Some(term) = convert_to_fast_value_and_get_term(&mut json_term_writer, phrase) {
let get_term_with_path =
|| Term::from_field_json_path(field, json_path, json_options.is_expand_dots_enabled());
// Try to convert the phrase to a fast value
if let Some(term) = convert_to_fast_value_and_append_to_json_term(get_term_with_path(), phrase)
{
logical_literals.push(LogicalLiteral::Term(term));
}
let terms = set_string_and_get_terms(&mut json_term_writer, phrase, &mut text_analyzer);
drop(json_term_writer);
if terms.len() <= 1 {
for (_, term) in terms {
// Try to tokenize the phrase and create Terms.
let mut positions_and_terms = Vec::<(usize, Term)>::new();
let mut token_stream = text_analyzer.token_stream(phrase);
token_stream.process(&mut |token| {
let mut term = get_term_with_path();
term.append_type_and_str(&token.text);
positions_and_terms.push((token.position, term.clone()));
});
if positions_and_terms.len() <= 1 {
for (_, term) in positions_and_terms {
logical_literals.push(LogicalLiteral::Term(term));
}
return Ok(logical_literals);
@@ -989,7 +994,7 @@ fn generate_literals_for_json_object(
));
}
logical_literals.push(LogicalLiteral::Phrase {
terms,
terms: positions_and_terms,
slop: 0,
prefix: false,
});

View File

@@ -174,7 +174,7 @@ impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> DocSet for RangeDocSe
}
fn size_hint(&self) -> u32 {
0 // heuristic possible by checking number of hits when fetching a block
self.column.num_docs()
}
}

View File

@@ -477,7 +477,7 @@ mod tests {
use crate::schema::{
Field, IntoIpv6Addr, Schema, TantivyDocument, FAST, INDEXED, STORED, TEXT,
};
use crate::{doc, Index, IndexWriter};
use crate::{Index, IndexWriter};
#[test]
fn test_range_query_simple() -> crate::Result<()> {

View File

@@ -139,7 +139,7 @@ mod tests {
use crate::collector::{Count, TopDocs};
use crate::query::{Query, QueryParser, TermQuery};
use crate::schema::{IndexRecordOption, IntoIpv6Addr, Schema, INDEXED, STORED};
use crate::{doc, Index, IndexWriter, Term};
use crate::{Index, IndexWriter, Term};
#[test]
fn search_ip_test() {

View File

@@ -127,6 +127,7 @@ impl Scorer for TermScorer {
mod tests {
use proptest::prelude::*;
use crate::index::SegmentId;
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::merge_policy::NoMergePolicy;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
@@ -134,8 +135,7 @@ mod tests {
use crate::query::{Bm25Weight, EnableScoring, Scorer, TermQuery};
use crate::schema::{IndexRecordOption, Schema, TEXT};
use crate::{
assert_nearly_equals, DocId, DocSet, Index, IndexWriter, Score, Searcher, SegmentId, Term,
TERMINATED,
assert_nearly_equals, DocId, DocSet, Index, IndexWriter, Score, Searcher, Term, TERMINATED,
};
#[test]

View File

@@ -53,8 +53,7 @@ impl HasLen for VecDocSet {
pub mod tests {
use super::*;
use crate::docset::{DocSet, COLLECT_BLOCK_BUFFER_LEN};
use crate::DocId;
use crate::docset::COLLECT_BLOCK_BUFFER_LEN;
#[test]
pub fn test_vec_postings() {

View File

@@ -179,9 +179,10 @@ mod tests {
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::index::SegmentId;
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher};
#[derive(Default)]
struct TestWarmer {

View File

@@ -1,7 +1,5 @@
use std::ops::BitOr;
#[allow(deprecated)]
pub use common::DatePrecision;
pub use common::DateTimePrecision;
use serde::{Deserialize, Serialize};

View File

@@ -160,7 +160,7 @@ pub enum ValueType {
/// A dynamic object value.
Object,
/// A JSON object value. Deprecated.
#[deprecated]
#[deprecated(note = "We keep this for backwards compatibility, use Object instead")]
JSONObject,
}
@@ -819,7 +819,6 @@ mod tests {
use crate::schema::document::existing_type_impls::JsonObjectIter;
use crate::schema::document::se::BinaryValueSerializer;
use crate::schema::document::{ReferenceValue, ReferenceValueLeaf};
use crate::schema::OwnedValue;
fn serialize_value<'a>(value: ReferenceValue<'a, &'a serde_json::Value>) -> Vec<u8> {
let mut writer = Vec::new();
@@ -874,7 +873,7 @@ mod tests {
);
let facet = Facet::from_text("/hello/world").unwrap();
let result = serialize_value(ReferenceValueLeaf::Facet(&facet).into());
let result = serialize_value(ReferenceValueLeaf::Facet(facet.encoded_str()).into());
let value = deserialize_value(result);
assert_eq!(value, crate::schema::OwnedValue::Facet(facet));
@@ -882,7 +881,8 @@ mod tests {
text: "hello, world".to_string(),
tokens: vec![Token::default(), Token::default()],
};
let result = serialize_value(ReferenceValueLeaf::PreTokStr(&pre_tok_str).into());
let result =
serialize_value(ReferenceValueLeaf::PreTokStr(pre_tok_str.clone().into()).into());
let value = deserialize_value(result);
assert_eq!(value, crate::schema::OwnedValue::PreTokStr(pre_tok_str));
}
@@ -961,13 +961,19 @@ mod tests {
"my-third-key".to_string(),
crate::schema::OwnedValue::F64(123.0),
);
assert_eq!(value, crate::schema::OwnedValue::Object(expected_object));
assert_eq!(
value,
crate::schema::OwnedValue::Object(expected_object.into_iter().collect())
);
let object = serde_json::Map::new();
let result = serialize_value(ReferenceValue::Object(JsonObjectIter(object.iter())));
let value = deserialize_value(result);
let expected_object = BTreeMap::new();
assert_eq!(value, crate::schema::OwnedValue::Object(expected_object));
assert_eq!(
value,
crate::schema::OwnedValue::Object(expected_object.into_iter().collect())
);
let mut object = serde_json::Map::new();
object.insert("my-first-key".into(), serde_json::Value::Null);
@@ -979,7 +985,10 @@ mod tests {
expected_object.insert("my-first-key".to_string(), crate::schema::OwnedValue::Null);
expected_object.insert("my-second-key".to_string(), crate::schema::OwnedValue::Null);
expected_object.insert("my-third-key".to_string(), crate::schema::OwnedValue::Null);
assert_eq!(value, crate::schema::OwnedValue::Object(expected_object));
assert_eq!(
value,
crate::schema::OwnedValue::Object(expected_object.into_iter().collect())
);
}
#[test]
@@ -1056,7 +1065,10 @@ mod tests {
.collect(),
),
);
assert_eq!(value, crate::schema::OwnedValue::Object(expected_object));
assert_eq!(
value,
crate::schema::OwnedValue::Object(expected_object.into_iter().collect())
);
// Some more extreme nesting that might behave weirdly
let mut object = serde_json::Map::new();
@@ -1078,6 +1090,9 @@ mod tests {
OwnedValue::Array(vec![OwnedValue::Null]),
])]),
);
assert_eq!(value, OwnedValue::Object(expected_object));
assert_eq!(
value,
OwnedValue::Object(expected_object.into_iter().collect())
);
}
}

View File

@@ -1,93 +1,64 @@
use std::collections::{BTreeMap, HashMap, HashSet};
use std::io::{self, Read, Write};
use std::net::Ipv6Addr;
use common::DateTime;
use columnar::MonotonicallyMappableToU128;
use common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable, DateTime, VInt};
use serde_json::Map;
pub use CompactDoc as TantivyDocument;
use super::{ReferenceValue, ReferenceValueLeaf, Value};
use crate::schema::document::{
DeserializeError, Document, DocumentDeserialize, DocumentDeserializer,
};
use crate::schema::field_type::ValueParsingError;
use crate::schema::field_value::FieldValueIter;
use crate::schema::{Facet, Field, FieldValue, NamedFieldDocument, OwnedValue, Schema};
use crate::schema::{Facet, Field, NamedFieldDocument, OwnedValue, Schema};
use crate::tokenizer::PreTokenizedString;
/// TantivyDocument provides a default implementation of the `Document` trait.
/// It is the object that can be indexed and then searched for.
///
/// Documents are fundamentally a collection of unordered couples `(field, value)`.
/// In this list, one field may appear more than once.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)]
pub struct TantivyDocument {
field_values: Vec<FieldValue>,
#[repr(packed)]
#[derive(Debug, Clone)]
/// A field value pair in the compact tantivy document
struct FieldValueAddr {
pub field: u16,
pub value_addr: ValueAddr,
}
impl Document for TantivyDocument {
type Value<'a> = &'a OwnedValue;
type FieldsValuesIter<'a> = FieldValueIter<'a>;
#[derive(Debug, Clone)]
/// The default document in tantivy. It encodes data in a compact form.
pub struct CompactDoc {
/// `node_data` is a vec of bytes, where each value is serialized into bytes and stored. It
/// includes all the data of the document and also metadata like where the nodes are located
/// in an object or array.
pub node_data: Vec<u8>,
/// The root (Field, Value) pairs
field_values: Vec<FieldValueAddr>,
}
fn iter_fields_and_values(&self) -> Self::FieldsValuesIter<'_> {
FieldValueIter(self.field_values.iter())
impl Default for CompactDoc {
fn default() -> Self {
Self::new()
}
}
impl DocumentDeserialize for TantivyDocument {
fn deserialize<'de, D>(mut deserializer: D) -> Result<Self, DeserializeError>
where D: DocumentDeserializer<'de> {
let mut field_values = Vec::with_capacity(deserializer.size_hint());
while let Some((field, value)) = deserializer.next_field()? {
field_values.push(FieldValue::new(field, value));
}
Ok(Self { field_values })
}
}
impl From<Vec<FieldValue>> for TantivyDocument {
fn from(field_values: Vec<FieldValue>) -> Self {
Self { field_values }
}
}
impl PartialEq for TantivyDocument {
fn eq(&self, other: &Self) -> bool {
// super slow, but only here for tests
let convert_to_comparable_map = |field_values: &[FieldValue]| {
let mut field_value_set: HashMap<Field, HashSet<String>> = Default::default();
for field_value in field_values.iter() {
let value = serde_json::to_string(field_value.value()).unwrap();
field_value_set
.entry(field_value.field())
.or_default()
.insert(value);
}
field_value_set
};
let self_field_values: HashMap<Field, HashSet<String>> =
convert_to_comparable_map(&self.field_values);
let other_field_values: HashMap<Field, HashSet<String>> =
convert_to_comparable_map(&other.field_values);
self_field_values.eq(&other_field_values)
}
}
impl Eq for TantivyDocument {}
impl IntoIterator for TantivyDocument {
type Item = FieldValue;
type IntoIter = std::vec::IntoIter<FieldValue>;
fn into_iter(self) -> Self::IntoIter {
self.field_values.into_iter()
}
}
impl TantivyDocument {
impl CompactDoc {
/// Creates a new, empty document object
pub fn new() -> TantivyDocument {
TantivyDocument::default()
/// The reserved capacity is for the total serialized data
pub fn with_capacity(bytes: usize) -> CompactDoc {
CompactDoc {
node_data: Vec::with_capacity(bytes),
field_values: Vec::with_capacity(4),
}
}
/// Creates a new, empty document object
pub fn new() -> CompactDoc {
CompactDoc::with_capacity(1024)
}
/// Skrinks the capacity of the document to fit the data
pub fn shrink_to_fit(&mut self) {
self.node_data.shrink_to_fit();
self.field_values.shrink_to_fit();
}
/// Returns the length of the document.
@@ -99,83 +70,116 @@ impl TantivyDocument {
pub fn add_facet<F>(&mut self, field: Field, path: F)
where Facet: From<F> {
let facet = Facet::from(path);
let value = OwnedValue::Facet(facet);
self.add_field_value(field, value);
self.add_leaf_field_value(field, ReferenceValueLeaf::Facet(facet.encoded_str()));
}
/// Add a text field.
pub fn add_text<S: ToString>(&mut self, field: Field, text: S) {
let value = OwnedValue::Str(text.to_string());
self.add_field_value(field, value);
pub fn add_text<S: AsRef<str>>(&mut self, field: Field, text: S) {
self.add_leaf_field_value(field, ReferenceValueLeaf::Str(text.as_ref()));
}
/// Add a pre-tokenized text field.
pub fn add_pre_tokenized_text(&mut self, field: Field, pre_tokenized_text: PreTokenizedString) {
self.add_field_value(field, pre_tokenized_text);
self.add_leaf_field_value(field, pre_tokenized_text);
}
/// Add a u64 field
pub fn add_u64(&mut self, field: Field, value: u64) {
self.add_field_value(field, value);
self.add_leaf_field_value(field, value);
}
/// Add a IP address field. Internally only Ipv6Addr is used.
pub fn add_ip_addr(&mut self, field: Field, value: Ipv6Addr) {
self.add_field_value(field, value);
self.add_leaf_field_value(field, value);
}
/// Add a i64 field
pub fn add_i64(&mut self, field: Field, value: i64) {
self.add_field_value(field, value);
self.add_leaf_field_value(field, value);
}
/// Add a f64 field
pub fn add_f64(&mut self, field: Field, value: f64) {
self.add_field_value(field, value);
self.add_leaf_field_value(field, value);
}
/// Add a bool field
pub fn add_bool(&mut self, field: Field, value: bool) {
self.add_field_value(field, value);
self.add_leaf_field_value(field, value);
}
/// Add a date field with unspecified time zone offset
pub fn add_date(&mut self, field: Field, value: DateTime) {
self.add_field_value(field, value);
self.add_leaf_field_value(field, value);
}
/// Add a bytes field
pub fn add_bytes<T: Into<Vec<u8>>>(&mut self, field: Field, value: T) {
self.add_field_value(field, value.into());
pub fn add_bytes(&mut self, field: Field, value: &[u8]) {
self.add_leaf_field_value(field, value);
}
/// Add a dynamic object field
pub fn add_object(&mut self, field: Field, object: BTreeMap<String, OwnedValue>) {
self.add_field_value(field, object);
self.add_field_value(field, &OwnedValue::from(object));
}
/// Add a (field, value) to the document.
pub fn add_field_value<T: Into<OwnedValue>>(&mut self, field: Field, typed_val: T) {
///
/// `OwnedValue` implements Value, which should be easiest to use, but is not the most
/// performant.
pub fn add_field_value<'a, V: Value<'a>>(&mut self, field: Field, value: V) {
let field_value = FieldValueAddr {
field: field
.field_id()
.try_into()
.expect("support only up to u16::MAX field ids"),
value_addr: self.add_value(value),
};
self.field_values.push(field_value);
}
/// Add a (field, leaf value) to the document.
/// Leaf values don't have nested values.
pub fn add_leaf_field_value<'a, T: Into<ReferenceValueLeaf<'a>>>(
&mut self,
field: Field,
typed_val: T,
) {
let value = typed_val.into();
let field_value = FieldValue { field, value };
let field_value = FieldValueAddr {
field: field
.field_id()
.try_into()
.expect("support only up to u16::MAX field ids"),
value_addr: self.add_value_leaf(value),
};
self.field_values.push(field_value);
}
/// field_values accessor
pub fn field_values(&self) -> &[FieldValue] {
&self.field_values
pub fn field_values(
&self,
) -> impl Iterator<Item = (Field, ReferenceValue<'_, CompactDocValue<'_>>)> {
self.field_values.iter().map(|field_val| {
let field = Field::from_field_id(field_val.field as u32);
let val = self.extract_value(field_val.value_addr).unwrap();
(field, val)
})
}
/// Returns all of the `FieldValue`s associated the given field
pub fn get_all(&self, field: Field) -> impl Iterator<Item = &OwnedValue> {
/// Returns all of the `ReferenceValue`s associated the given field
pub fn get_all(
&self,
field: Field,
) -> impl Iterator<Item = ReferenceValue<'_, CompactDocValue<'_>>> + '_ {
self.field_values
.iter()
.filter(move |field_value| field_value.field() == field)
.map(FieldValue::value)
.filter(move |field_value| Field::from_field_id(field_value.field as u32) == field)
.map(|val| self.extract_value(val.value_addr).unwrap())
}
/// Returns the first `FieldValue` associated the given field
pub fn get_first(&self, field: Field) -> Option<&OwnedValue> {
/// Returns the first `ReferenceValue` associated the given field
pub fn get_first(&self, field: Field) -> Option<ReferenceValue<'_, CompactDocValue<'_>>> {
self.get_all(field).next()
}
@@ -183,12 +187,12 @@ impl TantivyDocument {
pub fn convert_named_doc(
schema: &Schema,
named_doc: NamedFieldDocument,
) -> Result<TantivyDocument, DocParsingError> {
let mut document = TantivyDocument::new();
) -> Result<Self, DocParsingError> {
let mut document = Self::new();
for (field_name, values) in named_doc.0 {
if let Ok(field) = schema.get_field(&field_name) {
for value in values {
document.add_field_value(field, value);
document.add_field_value(field, &value);
}
}
}
@@ -196,7 +200,7 @@ impl TantivyDocument {
}
/// Build a document object from a json-object.
pub fn parse_json(schema: &Schema, doc_json: &str) -> Result<TantivyDocument, DocParsingError> {
pub fn parse_json(schema: &Schema, doc_json: &str) -> Result<Self, DocParsingError> {
let json_obj: Map<String, serde_json::Value> =
serde_json::from_str(doc_json).map_err(|_| DocParsingError::invalid_json(doc_json))?;
Self::from_json_object(schema, json_obj)
@@ -206,8 +210,8 @@ impl TantivyDocument {
pub fn from_json_object(
schema: &Schema,
json_obj: Map<String, serde_json::Value>,
) -> Result<TantivyDocument, DocParsingError> {
let mut doc = TantivyDocument::default();
) -> Result<Self, DocParsingError> {
let mut doc = Self::default();
for (field_name, json_value) in json_obj {
if let Ok(field) = schema.get_field(&field_name) {
let field_entry = schema.get_field_entry(field);
@@ -218,20 +222,457 @@ impl TantivyDocument {
let value = field_type
.value_from_json(json_item)
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
doc.add_field_value(field, value);
doc.add_field_value(field, &value);
}
}
_ => {
let value = field_type
.value_from_json(json_value)
.map_err(|e| DocParsingError::ValueError(field_name.clone(), e))?;
doc.add_field_value(field, value);
doc.add_field_value(field, &value);
}
}
}
}
Ok(doc)
}
fn add_value_leaf(&mut self, leaf: ReferenceValueLeaf) -> ValueAddr {
let type_id = ValueType::from(&leaf);
// Write into `node_data` and return u32 position as its address
// Null and bool are inlined into the address
let val_addr = match leaf {
ReferenceValueLeaf::Null => 0,
ReferenceValueLeaf::Str(bytes) => {
write_bytes_into(&mut self.node_data, bytes.as_bytes())
}
ReferenceValueLeaf::Facet(bytes) => {
write_bytes_into(&mut self.node_data, bytes.as_bytes())
}
ReferenceValueLeaf::Bytes(bytes) => write_bytes_into(&mut self.node_data, bytes),
ReferenceValueLeaf::U64(num) => write_into(&mut self.node_data, num),
ReferenceValueLeaf::I64(num) => write_into(&mut self.node_data, num),
ReferenceValueLeaf::F64(num) => write_into(&mut self.node_data, num),
ReferenceValueLeaf::Bool(b) => b as u32,
ReferenceValueLeaf::Date(date) => {
write_into(&mut self.node_data, date.into_timestamp_nanos())
}
ReferenceValueLeaf::IpAddr(num) => write_into(&mut self.node_data, num.to_u128()),
ReferenceValueLeaf::PreTokStr(pre_tok) => write_into(&mut self.node_data, *pre_tok),
};
ValueAddr { type_id, val_addr }
}
/// Adds a value and returns in address into the
fn add_value<'a, V: Value<'a>>(&mut self, value: V) -> ValueAddr {
let value = value.as_value();
let type_id = ValueType::from(&value);
match value {
ReferenceValue::Leaf(leaf) => self.add_value_leaf(leaf),
ReferenceValue::Array(elements) => {
// addresses of the elements in node_data
// Reusing a vec would be nicer, but it's not easy because of the recursion
// A global vec would work if every writer get it's discriminator
let mut addresses = Vec::new();
for elem in elements {
let value_addr = self.add_value(elem);
write_into(&mut addresses, value_addr);
}
ValueAddr {
type_id,
val_addr: write_bytes_into(&mut self.node_data, &addresses),
}
}
ReferenceValue::Object(entries) => {
// addresses of the elements in node_data
let mut addresses = Vec::new();
for (key, value) in entries {
let key_addr = self.add_value_leaf(ReferenceValueLeaf::Str(key));
let value_addr = self.add_value(value);
write_into(&mut addresses, key_addr);
write_into(&mut addresses, value_addr);
}
ValueAddr {
type_id,
val_addr: write_bytes_into(&mut self.node_data, &addresses),
}
}
}
}
fn extract_value(
&self,
ref_value: ValueAddr,
) -> io::Result<ReferenceValue<'_, CompactDocValue<'_>>> {
match ref_value.type_id {
ValueType::Null => Ok(ReferenceValueLeaf::Null.into()),
ValueType::Str => {
let str_ref = self.extract_str(ref_value.val_addr);
Ok(ReferenceValueLeaf::Str(str_ref).into())
}
ValueType::Facet => {
let str_ref = self.extract_str(ref_value.val_addr);
Ok(ReferenceValueLeaf::Facet(str_ref).into())
}
ValueType::Bytes => {
let data = self.extract_bytes(ref_value.val_addr);
Ok(ReferenceValueLeaf::Bytes(data).into())
}
ValueType::U64 => self
.read_from::<u64>(ref_value.val_addr)
.map(ReferenceValueLeaf::U64)
.map(Into::into),
ValueType::I64 => self
.read_from::<i64>(ref_value.val_addr)
.map(ReferenceValueLeaf::I64)
.map(Into::into),
ValueType::F64 => self
.read_from::<f64>(ref_value.val_addr)
.map(ReferenceValueLeaf::F64)
.map(Into::into),
ValueType::Bool => Ok(ReferenceValueLeaf::Bool(ref_value.val_addr != 0).into()),
ValueType::Date => self
.read_from::<i64>(ref_value.val_addr)
.map(|ts| ReferenceValueLeaf::Date(DateTime::from_timestamp_nanos(ts)))
.map(Into::into),
ValueType::IpAddr => self
.read_from::<u128>(ref_value.val_addr)
.map(|num| ReferenceValueLeaf::IpAddr(Ipv6Addr::from_u128(num)))
.map(Into::into),
ValueType::PreTokStr => self
.read_from::<PreTokenizedString>(ref_value.val_addr)
.map(Into::into)
.map(ReferenceValueLeaf::PreTokStr)
.map(Into::into),
ValueType::Object => Ok(ReferenceValue::Object(CompactDocObjectIter::new(
self,
ref_value.val_addr,
)?)),
ValueType::Array => Ok(ReferenceValue::Array(CompactDocArrayIter::new(
self,
ref_value.val_addr,
)?)),
}
}
/// get &[u8] reference from node_data
fn extract_bytes(&self, addr: Addr) -> &[u8] {
binary_deserialize_bytes(self.get_slice(addr))
}
/// get &str reference from node_data
fn extract_str(&self, addr: Addr) -> &str {
let data = self.extract_bytes(addr);
// Utf-8 checks would have a noticeable performance overhead here
unsafe { std::str::from_utf8_unchecked(data) }
}
/// deserialized owned value from node_data
fn read_from<T: BinarySerializable>(&self, addr: Addr) -> io::Result<T> {
let data_slice = &self.node_data[addr as usize..];
let mut cursor = std::io::Cursor::new(data_slice);
T::deserialize(&mut cursor)
}
/// get slice from address. The returned slice is open ended
fn get_slice(&self, addr: Addr) -> &[u8] {
&self.node_data[addr as usize..]
}
}
/// BinarySerializable alternative to read references
fn binary_deserialize_bytes(data: &[u8]) -> &[u8] {
let (len, bytes_read) = read_u32_vint_no_advance(data);
&data[bytes_read..bytes_read + len as usize]
}
/// Write bytes and return the position of the written data.
///
/// BinarySerializable alternative to write references
fn write_bytes_into(vec: &mut Vec<u8>, data: &[u8]) -> u32 {
let pos = vec.len() as u32;
let mut buf = [0u8; 8];
let len_vint_bytes = serialize_vint_u32(data.len() as u32, &mut buf);
vec.extend_from_slice(len_vint_bytes);
vec.extend_from_slice(data);
pos
}
/// Serialize and return the position
fn write_into<T: BinarySerializable>(vec: &mut Vec<u8>, value: T) -> u32 {
let pos = vec.len() as u32;
value.serialize(vec).unwrap();
pos
}
impl PartialEq for CompactDoc {
fn eq(&self, other: &Self) -> bool {
// super slow, but only here for tests
let convert_to_comparable_map = |doc: &CompactDoc| {
let mut field_value_set: HashMap<Field, HashSet<String>> = Default::default();
for field_value in doc.field_values.iter() {
let value: OwnedValue = doc.extract_value(field_value.value_addr).unwrap().into();
let value = serde_json::to_string(&value).unwrap();
field_value_set
.entry(Field::from_field_id(field_value.field as u32))
.or_default()
.insert(value);
}
field_value_set
};
let self_field_values: HashMap<Field, HashSet<String>> = convert_to_comparable_map(self);
let other_field_values: HashMap<Field, HashSet<String>> = convert_to_comparable_map(other);
self_field_values.eq(&other_field_values)
}
}
impl Eq for CompactDoc {}
impl DocumentDeserialize for CompactDoc {
fn deserialize<'de, D>(mut deserializer: D) -> Result<Self, DeserializeError>
where D: DocumentDeserializer<'de> {
let mut doc = CompactDoc::default();
// TODO: Deserializing into OwnedValue is wasteful. The deserializer should be able to work
// on slices and referenced data.
while let Some((field, value)) = deserializer.next_field::<OwnedValue>()? {
doc.add_field_value(field, &value);
}
Ok(doc)
}
}
/// A value of Compact Doc needs a reference to the container to extract its payload
#[derive(Debug, Clone, Copy)]
pub struct CompactDocValue<'a> {
container: &'a CompactDoc,
value: ValueAddr,
}
impl<'a> Value<'a> for CompactDocValue<'a> {
type ArrayIter = CompactDocArrayIter<'a>;
type ObjectIter = CompactDocObjectIter<'a>;
fn as_value(&self) -> ReferenceValue<'a, Self> {
self.container.extract_value(self.value).unwrap()
}
}
/// The address in the vec
type Addr = u32;
#[derive(Clone, Copy, Default)]
#[repr(packed)]
/// The value type and the address to its payload in the container.
struct ValueAddr {
type_id: ValueType,
/// This is the address to the value in the vec, except for bool and null, which are inlined
val_addr: Addr,
}
impl BinarySerializable for ValueAddr {
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
self.type_id.serialize(writer)?;
VInt(self.val_addr as u64).serialize(writer)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let type_id = ValueType::deserialize(reader)?;
let val_addr = VInt::deserialize(reader)?.0 as u32;
Ok(ValueAddr { type_id, val_addr })
}
}
impl std::fmt::Debug for ValueAddr {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let val_addr = self.val_addr;
f.write_fmt(format_args!("{:?} at {:?}", self.type_id, val_addr))
}
}
/// A enum representing a value for tantivy to index.
///
/// Any changes need to be reflected in `BinarySerializable` for `ValueType`
///
/// We can't use [schema::Type] or [columnar::ColumnType] here, because they are missing
/// some items like Array and PreTokStr.
#[derive(Default, Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum ValueType {
/// A null value.
#[default]
Null = 0,
/// The str type is used for any text information.
Str = 1,
/// Unsigned 64-bits Integer `u64`
U64 = 2,
/// Signed 64-bits Integer `i64`
I64 = 3,
/// 64-bits Float `f64`
F64 = 4,
/// Date/time with nanoseconds precision
Date = 5,
/// Facet
Facet = 6,
/// Arbitrarily sized byte array
Bytes = 7,
/// IpV6 Address. Internally there is no IpV4, it needs to be converted to `Ipv6Addr`.
IpAddr = 8,
/// Bool value
Bool = 9,
/// Pre-tokenized str type,
PreTokStr = 10,
/// Object
Object = 11,
/// Pre-tokenized str type,
Array = 12,
}
impl BinarySerializable for ValueType {
fn serialize<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
(*self as u8).serialize(writer)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let num = u8::deserialize(reader)?;
let type_id = if (0..=12).contains(&num) {
unsafe { std::mem::transmute(num) }
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid value type id: {}", num),
));
};
Ok(type_id)
}
}
impl<'a, V: Value<'a>> From<&ReferenceValue<'a, V>> for ValueType {
fn from(value: &ReferenceValue<'a, V>) -> Self {
match value {
ReferenceValue::Leaf(leaf) => leaf.into(),
ReferenceValue::Array(_) => ValueType::Array,
ReferenceValue::Object(_) => ValueType::Object,
}
}
}
impl<'a> From<&ReferenceValueLeaf<'a>> for ValueType {
fn from(value: &ReferenceValueLeaf<'a>) -> Self {
match value {
ReferenceValueLeaf::Null => ValueType::Null,
ReferenceValueLeaf::Str(_) => ValueType::Str,
ReferenceValueLeaf::U64(_) => ValueType::U64,
ReferenceValueLeaf::I64(_) => ValueType::I64,
ReferenceValueLeaf::F64(_) => ValueType::F64,
ReferenceValueLeaf::Bool(_) => ValueType::Bool,
ReferenceValueLeaf::Date(_) => ValueType::Date,
ReferenceValueLeaf::IpAddr(_) => ValueType::IpAddr,
ReferenceValueLeaf::PreTokStr(_) => ValueType::PreTokStr,
ReferenceValueLeaf::Facet(_) => ValueType::Facet,
ReferenceValueLeaf::Bytes(_) => ValueType::Bytes,
}
}
}
#[derive(Debug, Clone)]
/// The Iterator for the object values in the compact document
pub struct CompactDocObjectIter<'a> {
container: &'a CompactDoc,
node_addresses_slice: &'a [u8],
}
impl<'a> CompactDocObjectIter<'a> {
fn new(container: &'a CompactDoc, addr: Addr) -> io::Result<Self> {
// Objects are `&[ValueAddr]` serialized into bytes
let node_addresses_slice = container.extract_bytes(addr);
Ok(Self {
container,
node_addresses_slice,
})
}
}
impl<'a> Iterator for CompactDocObjectIter<'a> {
type Item = (&'a str, CompactDocValue<'a>);
fn next(&mut self) -> Option<Self::Item> {
if self.node_addresses_slice.is_empty() {
return None;
}
let key_addr = ValueAddr::deserialize(&mut self.node_addresses_slice).ok()?;
let key = self.container.extract_str(key_addr.val_addr);
let value = ValueAddr::deserialize(&mut self.node_addresses_slice).ok()?;
let value = CompactDocValue {
container: self.container,
value,
};
return Some((key, value));
}
}
#[derive(Debug, Clone)]
/// The Iterator for the array values in the compact document
pub struct CompactDocArrayIter<'a> {
container: &'a CompactDoc,
node_addresses_slice: &'a [u8],
}
impl<'a> CompactDocArrayIter<'a> {
fn new(container: &'a CompactDoc, addr: Addr) -> io::Result<Self> {
// Arrays are &[ValueAddr] serialized into bytes
let node_addresses_slice = container.extract_bytes(addr);
Ok(Self {
container,
node_addresses_slice,
})
}
}
impl<'a> Iterator for CompactDocArrayIter<'a> {
type Item = CompactDocValue<'a>;
fn next(&mut self) -> Option<Self::Item> {
if self.node_addresses_slice.is_empty() {
return None;
}
let value = ValueAddr::deserialize(&mut self.node_addresses_slice).ok()?;
let value = CompactDocValue {
container: self.container,
value,
};
return Some(value);
}
}
impl Document for CompactDoc {
type Value<'a> = CompactDocValue<'a>;
type FieldsValuesIter<'a> = FieldValueIterRef<'a>;
fn iter_fields_and_values(&self) -> Self::FieldsValuesIter<'_> {
FieldValueIterRef {
slice: self.field_values.iter(),
container: self,
}
}
}
/// A helper wrapper for creating an iterator over the field values
pub struct FieldValueIterRef<'a> {
slice: std::slice::Iter<'a, FieldValueAddr>,
container: &'a CompactDoc,
}
impl<'a> Iterator for FieldValueIterRef<'a> {
type Item = (Field, CompactDocValue<'a>);
fn next(&mut self) -> Option<Self::Item> {
self.slice.next().map(|field_value| {
(
Field::from_field_id(field_value.field as u32),
CompactDocValue::<'a> {
container: self.container,
value: field_value.value_addr,
},
)
})
}
}
/// Error that may happen when deserializing
@@ -256,7 +697,6 @@ impl DocParsingError {
#[cfg(test)]
mod tests {
use crate::schema::document::default_document::TantivyDocument;
use crate::schema::*;
#[test]
@@ -265,7 +705,40 @@ mod tests {
let text_field = schema_builder.add_text_field("title", TEXT);
let mut doc = TantivyDocument::default();
doc.add_text(text_field, "My title");
assert_eq!(doc.field_values().len(), 1);
assert_eq!(doc.field_values().count(), 1);
let schema = schema_builder.build();
let _val = doc.get_first(text_field).unwrap();
let _json = doc.to_named_doc(&schema);
}
#[test]
fn test_json_value() {
let json_str = r#"{
"toto": "titi",
"float": -0.2,
"bool": true,
"unsigned": 1,
"signed": -2,
"complexobject": {
"field.with.dot": 1
},
"date": "1985-04-12T23:20:50.52Z",
"my_arr": [2, 3, {"my_key": "two tokens"}, 4, {"nested_array": [2, 5, 6, [7, 8, {"a": [{"d": {"e":[99]}}, 9000]}, 9, 10], [5, 5]]}]
}"#;
let json_val: std::collections::BTreeMap<String, OwnedValue> =
serde_json::from_str(json_str).unwrap();
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", TEXT);
let mut doc = TantivyDocument::default();
doc.add_object(json_field, json_val);
let schema = schema_builder.build();
let json = doc.to_json(&schema);
let actual_json: serde_json::Value = serde_json::from_str(&json).unwrap();
let expected_json: serde_json::Value = serde_json::from_str(json_str).unwrap();
assert_eq!(actual_json["json"][0], expected_json);
}
// TODO: Should this be re-added with the serialize method

Some files were not shown because too many files have changed in this diff Show More