Compare commits
317 Commits
issue/1251
...
column-tra
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1553901f51 | ||
|
|
e8a6e123ae | ||
|
|
43a4c8287c | ||
|
|
0dd62169c8 | ||
|
|
3a9727aa91 | ||
|
|
17093e8ffe | ||
|
|
03e4630cd8 | ||
|
|
4ae0317d68 | ||
|
|
107b19855f | ||
|
|
d8f66ba07e | ||
|
|
f908549245 | ||
|
|
3673a5df9b | ||
|
|
298b5dd726 | ||
|
|
8bbb22e9bf | ||
|
|
513f68209d | ||
|
|
91f2f7e722 | ||
|
|
c476b530cf | ||
|
|
77dd202e19 | ||
|
|
00ebff3c16 | ||
|
|
9a6d37c42c | ||
|
|
bb01e99e05 | ||
|
|
535f1a5d83 | ||
|
|
625f9174a7 | ||
|
|
11a4d97cf5 | ||
|
|
1c3d39677a | ||
|
|
6f65995cfd | ||
|
|
e2e4190571 | ||
|
|
82209c58aa | ||
|
|
21519788ea | ||
|
|
4c6c6e4a9c | ||
|
|
df0ac9e901 | ||
|
|
71ab482720 | ||
|
|
2ae383e452 | ||
|
|
8b3a6f6231 | ||
|
|
11edd6bd59 | ||
|
|
193a3c21f4 | ||
|
|
998b1263f6 | ||
|
|
72272bdf81 | ||
|
|
c39c2d79da | ||
|
|
67d94f5bd2 | ||
|
|
abbd934ac9 | ||
|
|
7f9ba0ee50 | ||
|
|
8edcd6f958 | ||
|
|
f50700835d | ||
|
|
494e92ca59 | ||
|
|
4a3169011d | ||
|
|
050fc5dde9 | ||
|
|
ce45889add | ||
|
|
4875174d16 | ||
|
|
0c634c5bc6 | ||
|
|
e25ab5d537 | ||
|
|
27400c9ad3 | ||
|
|
19074e1d5e | ||
|
|
014b1adc3e | ||
|
|
84295d5b35 | ||
|
|
625bcb4877 | ||
|
|
f01cb7d3aa | ||
|
|
8e773ade77 | ||
|
|
fad3faefe2 | ||
|
|
9811d15657 | ||
|
|
31ba5a3c16 | ||
|
|
f4d7621370 | ||
|
|
d4b2b7de8b | ||
|
|
d5ee4edf25 | ||
|
|
fcc7bd7024 | ||
|
|
ce8d6b259a | ||
|
|
099e626156 | ||
|
|
71041b2314 | ||
|
|
09aae134e6 | ||
|
|
6a9d09cf7a | ||
|
|
704d0a8d8b | ||
|
|
195309a557 | ||
|
|
da0f78e06c | ||
|
|
9b6b60cc2b | ||
|
|
6444516a82 | ||
|
|
a9b0d1a0ab | ||
|
|
2b333ca635 | ||
|
|
80a1418284 | ||
|
|
5ab5f070ed | ||
|
|
d122f2c74e | ||
|
|
5b564916f0 | ||
|
|
06fd8684b7 | ||
|
|
931bab8010 | ||
|
|
8dac30e6d1 | ||
|
|
2e0a7d072f | ||
|
|
af84e74284 | ||
|
|
fff1a03842 | ||
|
|
90e296f2d0 | ||
|
|
5f966d747b | ||
|
|
d24f31f965 | ||
|
|
f26b686a1c | ||
|
|
775e936f7d | ||
|
|
7e032a9efd | ||
|
|
23fe73a6c0 | ||
|
|
a4be239d38 | ||
|
|
2406d9278b | ||
|
|
6c2d9737f1 | ||
|
|
a5688572a5 | ||
|
|
431b5a091e | ||
|
|
2c17271cd9 | ||
|
|
5750224d4c | ||
|
|
02691f2445 | ||
|
|
e31e78f39f | ||
|
|
9db2f0e82b | ||
|
|
2ed5cc873d | ||
|
|
d278417300 | ||
|
|
d89a8dd118 | ||
|
|
1bd44a5f61 | ||
|
|
d750ced813 | ||
|
|
fbc469e5df | ||
|
|
c1273670e4 | ||
|
|
7eb267341e | ||
|
|
db1836691e | ||
|
|
437cd350a2 | ||
|
|
8024ecf013 | ||
|
|
9baefbe2ab | ||
|
|
ad76d11008 | ||
|
|
c3220bece0 | ||
|
|
2b713f0977 | ||
|
|
0bc6b4a117 | ||
|
|
79e42d4a6d | ||
|
|
0135fbc4c8 | ||
|
|
449594f67a | ||
|
|
8b6647e908 | ||
|
|
efabcbcdf5 | ||
|
|
7bf5962554 | ||
|
|
4c7dedef29 | ||
|
|
93f356a7a7 | ||
|
|
6ca5f77466 | ||
|
|
2e2822f89d | ||
|
|
de178a1901 | ||
|
|
11e4225f23 | ||
|
|
f21b73d1f6 | ||
|
|
1440f3243b | ||
|
|
83d0c13fb0 | ||
|
|
88054aa333 | ||
|
|
635c39ba48 | ||
|
|
eab2257637 | ||
|
|
328bd96c24 | ||
|
|
fc24842a43 | ||
|
|
2d6f1d43ff | ||
|
|
ca0973ec78 | ||
|
|
38ee60d792 | ||
|
|
f68be28284 | ||
|
|
fc43ab9280 | ||
|
|
38c2ea6a5d | ||
|
|
26a0fd1fbe | ||
|
|
811b91ecb3 | ||
|
|
25c00ce856 | ||
|
|
e5debb97a7 | ||
|
|
bc4cd9ffaa | ||
|
|
9a13d8709b | ||
|
|
e6eadf1a2f | ||
|
|
7cca7e6a47 | ||
|
|
ef2492dba6 | ||
|
|
2981e6c1df | ||
|
|
b33b4c0092 | ||
|
|
4d9d2b6db0 | ||
|
|
ed868f93a3 | ||
|
|
5e599d96d7 | ||
|
|
314ae43a45 | ||
|
|
fce91b2f3a | ||
|
|
9bcd2b8104 | ||
|
|
0c9c257150 | ||
|
|
1af85a2956 | ||
|
|
bc4c3d0c6b | ||
|
|
6937c75f05 | ||
|
|
e54429e827 | ||
|
|
ca836b6414 | ||
|
|
f0a2b1cc44 | ||
|
|
fcfdc44c61 | ||
|
|
3171f0b9ba | ||
|
|
89e19f14b5 | ||
|
|
1a6a1396cd | ||
|
|
e766375700 | ||
|
|
496b4a4fdb | ||
|
|
93cc8498b3 | ||
|
|
0aa3d63a9f | ||
|
|
4e2a053b69 | ||
|
|
71c4393ec4 | ||
|
|
b2e97e266a | ||
|
|
9ee4772140 | ||
|
|
c95013b11e | ||
|
|
71f75071d2 | ||
|
|
b114e553cd | ||
|
|
17dcc99e43 | ||
|
|
c5c2e59b2b | ||
|
|
fc045e6bf9 | ||
|
|
6837a4d468 | ||
|
|
0759bf9448 | ||
|
|
152e8238d7 | ||
|
|
d4e5b48437 | ||
|
|
03040ed81d | ||
|
|
aaa22ad225 | ||
|
|
44ea7313ca | ||
|
|
3223bdf254 | ||
|
|
11ac451250 | ||
|
|
6a4632211a | ||
|
|
a99e5459e3 | ||
|
|
3f88718f38 | ||
|
|
cbd06ab189 | ||
|
|
749395bbb8 | ||
|
|
617ba1f0c0 | ||
|
|
2f1cd7e7f0 | ||
|
|
58c0cb5fc4 | ||
|
|
7f45a6ac96 | ||
|
|
0ade871126 | ||
|
|
aab65490c9 | ||
|
|
d77e8de36a | ||
|
|
d11a8cce26 | ||
|
|
bc607a921b | ||
|
|
1273f33338 | ||
|
|
e30449743c | ||
|
|
ed26552296 | ||
|
|
65d129afbd | ||
|
|
386ffab76c | ||
|
|
57a8d0359c | ||
|
|
14cb66ee00 | ||
|
|
9e38343352 | ||
|
|
944302ae2f | ||
|
|
be70804d17 | ||
|
|
a1afc80600 | ||
|
|
02e24fda52 | ||
|
|
7e3c0c5392 | ||
|
|
fdb2524f9e | ||
|
|
4db655ae82 | ||
|
|
bb44cc84c4 | ||
|
|
8c1e1cf1ad | ||
|
|
b5b16948b0 | ||
|
|
c305d3a2a2 | ||
|
|
038d234ff1 | ||
|
|
c45eb9a9fa | ||
|
|
824d6f96fe | ||
|
|
7cf821bac0 | ||
|
|
ae83fc8298 | ||
|
|
a7bc361145 | ||
|
|
2805291400 | ||
|
|
6614a2cba0 | ||
|
|
6f4d203d1b | ||
|
|
1be6c6111c | ||
|
|
c7c3eab256 | ||
|
|
ec69875d15 | ||
|
|
d832cfcfd8 | ||
|
|
ab6b532cc4 | ||
|
|
4b6047f7d7 | ||
|
|
5ca04beb94 | ||
|
|
902d05ebec | ||
|
|
f1b298642a | ||
|
|
dd13dedaeb | ||
|
|
46724b4a05 | ||
|
|
24432bf523 | ||
|
|
31d3bcfff2 | ||
|
|
706fbd6886 | ||
|
|
8a8a048015 | ||
|
|
c72549cb9a | ||
|
|
d6f803212c | ||
|
|
dac73537d2 | ||
|
|
bb5254de12 | ||
|
|
be5218c2f6 | ||
|
|
ec9478830a | ||
|
|
8807bfd13d | ||
|
|
447811c111 | ||
|
|
f29acf5d8c | ||
|
|
125707dbe0 | ||
|
|
46d5de920d | ||
|
|
d2a7bcf217 | ||
|
|
141b9aa245 | ||
|
|
c5a6282fa8 | ||
|
|
c0f524e1a3 | ||
|
|
958b2bee08 | ||
|
|
f619658e2c | ||
|
|
aa391bf843 | ||
|
|
47dcbdbeae | ||
|
|
691245bf20 | ||
|
|
90798d4b39 | ||
|
|
0b6d9f90cf | ||
|
|
8a5a12d961 | ||
|
|
e73542e2e8 | ||
|
|
0262e44bbd | ||
|
|
613aad7a8a | ||
|
|
1aa88b0c51 | ||
|
|
564fa38085 | ||
|
|
59ec21479f | ||
|
|
42283f9e91 | ||
|
|
b105bf72e1 | ||
|
|
226f577803 | ||
|
|
2e255c4bef | ||
|
|
387592809f | ||
|
|
cedced5bb0 | ||
|
|
d31f045872 | ||
|
|
6656a70d1b | ||
|
|
d36e0a9549 | ||
|
|
8771b2673f | ||
|
|
a41d3d51a4 | ||
|
|
cae34ffe47 | ||
|
|
4b62f7907d | ||
|
|
7fa6a0b665 | ||
|
|
458ed29a31 | ||
|
|
e37775fe21 | ||
|
|
1cd2434a32 | ||
|
|
de2cba6d1e | ||
|
|
c0b1a58d27 | ||
|
|
848b795b9f | ||
|
|
091b668624 | ||
|
|
5004290daa | ||
|
|
5d2c2b804c | ||
|
|
1a92b588e0 | ||
|
|
010e92c118 | ||
|
|
2ead010c83 | ||
|
|
c4f66eb185 | ||
|
|
d7b46d2137 | ||
|
|
d042ce74c7 | ||
|
|
7ba9e662b8 | ||
|
|
fdd5ef85e5 | ||
|
|
704498a1ac | ||
|
|
1232af7928 | ||
|
|
d37633e034 |
13
.github/workflows/coverage.yml
vendored
@@ -10,15 +10,16 @@ jobs:
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
run: rustup toolchain install nightly --component llvm-tools-preview
|
||||
- name: Install cargo-llvm-cov
|
||||
run: curl -LsSf https://github.com/taiki-e/cargo-llvm-cov/releases/latest/download/cargo-llvm-cov-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin
|
||||
run: rustup toolchain install nightly --profile minimal --component llvm-tools-preview
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
||||
run: cargo +nightly llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
uses: codecov/codecov-action@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
|
||||
files: lcov.info
|
||||
|
||||
18
.github/workflows/long_running.yml
vendored
@@ -9,16 +9,20 @@ env:
|
||||
NUM_FUNCTIONAL_TEST_ITERATIONS: 20000
|
||||
|
||||
jobs:
|
||||
functional_test_unsorted:
|
||||
test:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Run indexing_unsorted
|
||||
run: cargo test indexing_unsorted -- --ignored
|
||||
functional_test_sorted:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Run indexing_sorted
|
||||
run: cargo test indexing_sorted -- --ignored
|
||||
|
||||
|
||||
54
.github/workflows/test.yml
vendored
@@ -10,33 +10,65 @@ env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
test:
|
||||
check:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: cargo build --verbose --workspace
|
||||
- name: Install latest nightly to test also against unstable feature flag
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install nightly
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
profile: minimal
|
||||
components: rustfmt
|
||||
- name: Install latest nightly to test also against unstable feature flag
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Run tests
|
||||
run: cargo +stable test --features mmap,brotli-compression,lz4-compression,snappy-compression,failpoints --verbose --workspace
|
||||
profile: minimal
|
||||
components: clippy
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Check Formatting
|
||||
run: cargo +nightly fmt --all -- --check
|
||||
|
||||
- uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --tests
|
||||
|
||||
test:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
features: [
|
||||
{ label: "all", flags: "mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
|
||||
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
|
||||
]
|
||||
|
||||
name: test-${{ matrix.features.label}}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- uses: taiki-e/install-action@nextest
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run tests
|
||||
run: cargo +stable nextest run --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
|
||||
- name: Run doctests
|
||||
run: cargo +stable test --doc --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
|
||||
@@ -10,6 +10,7 @@ Tantivy's bread and butter is to address the problem of full-text search :
|
||||
Given a large set of textual documents, and a text query, return the K-most relevant documents in a very efficient way. To execute these queries rapidly, the tantivy needs to build an index beforehand. The relevance score implemented in the tantivy is not configurable. Tantivy uses the same score as the default similarity used in Lucene / Elasticsearch, called [BM25](https://en.wikipedia.org/wiki/Okapi_BM25).
|
||||
|
||||
But tantivy's scope does not stop there. Numerous features are required to power rich-search applications. For instance, one may want to:
|
||||
|
||||
- compute the count of documents matching a query in the different section of an e-commerce website,
|
||||
- display an average price per meter square for a real estate search engine,
|
||||
- take into account historical user data to rank documents in a specific way,
|
||||
@@ -22,27 +23,28 @@ rapidly select all documents matching a given predicate (also known as a query)
|
||||
collect some information about them ([See collector](#collector-define-what-to-do-with-matched-documents)).
|
||||
|
||||
Roughly speaking the design is following these guiding principles:
|
||||
|
||||
- Search should be O(1) in memory.
|
||||
- Indexing should be O(1) in memory. (In practice it is just sublinear)
|
||||
- Search should be as fast as possible
|
||||
|
||||
This comes at the cost of the dynamicity of the index: while it is possible to add, and delete documents from our corpus, the tantivy is designed to handle these updates in large batches.
|
||||
|
||||
## [core/](src/core): Index, segments, searchers.
|
||||
## [core/](src/core): Index, segments, searchers
|
||||
|
||||
Core contains all of the high-level code to make it possible to create an index, add documents, delete documents and commit.
|
||||
|
||||
This is both the most high-level part of tantivy, the least performance-sensitive one, the seemingly most mundane code... And paradoxically the most complicated part.
|
||||
|
||||
### Index and Segments...
|
||||
### Index and Segments
|
||||
|
||||
A tantivy index is a collection of smaller independent immutable segments.
|
||||
A tantivy index is a collection of smaller independent immutable segments.
|
||||
Each segment contains its own independent set of data structures.
|
||||
|
||||
A segment is identified by a segment id that is in fact a UUID.
|
||||
The file of a segment has the format
|
||||
|
||||
```segment-id . ext ```
|
||||
```segment-id . ext```
|
||||
|
||||
The extension signals which data structure (or [`SegmentComponent`](src/core/segment_component.rs)) is stored in the file.
|
||||
|
||||
@@ -52,17 +54,15 @@ On commit, one segment per indexing thread is written to disk, and the `meta.jso
|
||||
|
||||
For a better idea of how indexing works, you may read the [following blog post](https://fulmicoton.com/posts/behold-tantivy-part2/).
|
||||
|
||||
|
||||
### Deletes
|
||||
|
||||
Deletes happen by deleting a "term". Tantivy does not offer any notion of primary id, so it is up to the user to use a field in their schema as if it was a primary id, and delete the associated term if they want to delete only one specific document.
|
||||
|
||||
On commit, tantivy will find all of the segments with documents matching this existing term and create a [tombstone file](src/fastfield/delete.rs) that represents the bitset of the document that are deleted.
|
||||
Like all segment files, this file is immutable. Because it is possible to have more than one tombstone file at a given instant, the tombstone filename has the format ``` segment_id . commit_opstamp . del```.
|
||||
On commit, tantivy will find all of the segments with documents matching this existing term and remove from [alive bitset file](src/fastfield/alive_bitset.rs) that represents the bitset of the alive document ids.
|
||||
Like all segment files, this file is immutable. Because it is possible to have more than one alive bitset file at a given instant, the alive bitset filename has the format ```segment_id . commit_opstamp . del```.
|
||||
|
||||
An opstamp is simply an incremental id that identifies any operation applied to the index. For instance, performing a commit or adding a document.
|
||||
|
||||
|
||||
### DocId
|
||||
|
||||
Within a segment, all documents are identified by a DocId that ranges within `[0, max_doc)`.
|
||||
@@ -74,6 +74,7 @@ The DocIds are simply allocated in the order documents are added to the index.
|
||||
|
||||
In separate threads, tantivy's index writer search for opportunities to merge segments.
|
||||
The point of segment merge is to:
|
||||
|
||||
- eventually get rid of tombstoned documents
|
||||
- reduce the otherwise ever-growing number of segments.
|
||||
|
||||
@@ -104,6 +105,7 @@ Tantivy's document follows a very strict schema, decided before building any ind
|
||||
The schema defines all of the fields that the indexes [`Document`](src/schema/document.rs) may and should contain, their types (`text`, `i64`, `u64`, `Date`, ...) as well as how it should be indexed / represented in tantivy.
|
||||
|
||||
Depending on the type of the field, you can decide to
|
||||
|
||||
- put it in the docstore
|
||||
- store it as a fast field
|
||||
- index it
|
||||
@@ -117,9 +119,10 @@ As of today, tantivy's schema imposes a 1:1 relationship between a field that is
|
||||
|
||||
This is not something tantivy supports, and it is up to the user to duplicate field / concatenate fields before feeding them to tantivy.
|
||||
|
||||
## General information about these data structures.
|
||||
## General information about these data structures
|
||||
|
||||
All data structures in tantivy, have:
|
||||
|
||||
- a writer
|
||||
- a serializer
|
||||
- a reader
|
||||
@@ -132,7 +135,7 @@ This conversion is done by the serializer.
|
||||
Finally, the reader is in charge of offering an API to read on this on-disk read-only representation.
|
||||
In tantivy, readers are designed to require very little anonymous memory. The data is read straight from an mmapped file, and loading an index is as fast as mmapping its files.
|
||||
|
||||
## [store/](src/store): Here is my DocId, Gimme my document!
|
||||
## [store/](src/store): Here is my DocId, Gimme my document
|
||||
|
||||
The docstore is a row-oriented storage that, for each document, stores a subset of the fields
|
||||
that are marked as stored in the schema. The docstore is compressed using a general-purpose algorithm
|
||||
@@ -146,6 +149,7 @@ Once the top 10 documents have been identified, we fetch them from the store, an
|
||||
**Not useful for**
|
||||
|
||||
Fetching a document from the store is typically a "slow" operation. It usually consists in
|
||||
|
||||
- searching into a compact tree-like data structure to find the position of the right block.
|
||||
- decompressing a small block
|
||||
- returning the document from this block.
|
||||
@@ -154,8 +158,7 @@ It is NOT meant to be called for every document matching a query.
|
||||
|
||||
As a rule of thumb, if you hit the docstore more than 100 times per search query, you are probably misusing tantivy.
|
||||
|
||||
|
||||
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value!
|
||||
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value
|
||||
|
||||
Fast fields are stored in a column-oriented storage that allows for random access.
|
||||
The only compression applied is bitpacking. The column comes with two meta data.
|
||||
@@ -163,7 +166,7 @@ The minimum value in the column and the number of bits per doc.
|
||||
|
||||
Fetching a value for a `DocId` is then as simple as computing
|
||||
|
||||
```
|
||||
```rust
|
||||
min_value + fetch_bits(num_bits * doc_id..num_bits * (doc_id+1))
|
||||
```
|
||||
|
||||
@@ -190,7 +193,7 @@ For advanced search engine, it is possible to store all of the features required
|
||||
|
||||
Finally facets are a specific kind of fast field, and the associated source code is in [`fastfield/facet_reader.rs`](src/fastfield/facet_reader.rs).
|
||||
|
||||
# The inverted search index.
|
||||
# The inverted search index
|
||||
|
||||
The inverted index is the core part of full-text search.
|
||||
When presented a new document with the text field "Hello, happy tax payer!", tantivy breaks it into a list of so-called tokens. In addition to just splitting these strings into tokens, it might also do different kinds of operations like dropping the punctuation, converting the character to lowercase, apply stemming, etc. Tantivy makes it possible to configure the operations to be applied in the schema (tokenizer/ is the place where these operations are implemented).
|
||||
@@ -215,19 +218,18 @@ The inverted index actually consists of two data structures chained together.
|
||||
|
||||
Where [TermInfo](src/postings/term_info.rs) is an object containing some meta data about a term.
|
||||
|
||||
|
||||
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)!
|
||||
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)
|
||||
|
||||
Tantivy's term dictionary is mainly in charge of supplying the function
|
||||
|
||||
[Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs)
|
||||
|
||||
It is itself broken into two parts.
|
||||
|
||||
- [Term](src/schema/term.rs) ⟶ [TermOrdinal](src/termdict/mod.rs) is addressed by a finite state transducer, implemented by the fst crate.
|
||||
- [TermOrdinal](src/termdict/mod.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term info store.
|
||||
|
||||
|
||||
## [postings/](src/postings): Iterate over documents... very fast!
|
||||
## [postings/](src/postings): Iterate over documents... very fast
|
||||
|
||||
A posting list makes it possible to store a sorted list of doc ids and for each doc store
|
||||
a term frequency as well.
|
||||
@@ -249,7 +251,7 @@ For instance, when the phrase query "the art of war" does not match "the war of
|
||||
To make it possible, it is possible to specify in the schema that a field should store positions in addition to being indexed.
|
||||
|
||||
The token positions of all of the terms are then stored in a separate file with the extension `.pos`.
|
||||
The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate throught the docset,
|
||||
The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate through the docset,
|
||||
we advance the position reader by the number of term frequencies of the current document.
|
||||
|
||||
## [fieldnorms/](src/fieldnorms): Here is my doc, how many tokens in this field?
|
||||
@@ -257,7 +259,6 @@ we advance the position reader by the number of term frequencies of the current
|
||||
The [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) formula also requires to know the number of tokens stored in a specific field for a given document. We store this information on one byte per document in the fieldnorm.
|
||||
The fieldnorm is therefore compressed. Values up to 40 are encoded unchanged.
|
||||
|
||||
|
||||
## [tokenizer/](src/tokenizer): How should we process text?
|
||||
|
||||
Text processing is key to a good search experience.
|
||||
@@ -268,7 +269,6 @@ Text processing can be configured by selecting an off-the-shelf [`Tokenizer`](./
|
||||
|
||||
Tantivy's comes with few tokenizers, but external crates are offering advanced tokenizers, such as [Lindera](https://crates.io/crates/lindera) for Japanese.
|
||||
|
||||
|
||||
## [query/](src/query): Define and compose queries
|
||||
|
||||
The [Query](src/query/query.rs) trait defines what a query is.
|
||||
|
||||
139
CHANGELOG.md
@@ -1,5 +1,30 @@
|
||||
Tantivy 0.19
|
||||
================================
|
||||
|
||||
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
|
||||
The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
||||
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing).
|
||||
- Remove Searcher pool and make `Searcher` cloneable.
|
||||
|
||||
Tantivy 0.18
|
||||
================================
|
||||
|
||||
- For date values `chrono` has been replaced with `time` (@uklotzde) #1304 :
|
||||
- The `time` crate is re-exported as `tantivy::time` instead of `tantivy::chrono`.
|
||||
- The type alias `tantivy::DateTime` has been removed.
|
||||
- `Value::Date` wraps `time::PrimitiveDateTime` without time zone information.
|
||||
- Internally date/time values are stored as seconds since UNIX epoch in UTC.
|
||||
- Converting a `time::OffsetDateTime` to `Value::Date` implicitly converts the value into UTC.
|
||||
If this is not desired do the time zone conversion yourself and use `time::PrimitiveDateTime`
|
||||
directly instead.
|
||||
- Add [histogram](https://github.com/quickwit-oss/tantivy/pull/1306) aggregation (@PSeitz)
|
||||
- Add support for fastfield on text fields (@PSeitz)
|
||||
- Add terms aggregation (@PSeitz)
|
||||
- Add support for zstd compression (@kryesh)
|
||||
|
||||
Tantivy 0.17
|
||||
================================
|
||||
|
||||
- LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar @fulmicoton) [#115](https://github.com/quickwit-oss/tantivy/issues/115)
|
||||
- Adds a searcher Warmer API (@shikhar @fulmicoton)
|
||||
- Change to non-strict schema. Ignore fields in data which are not defined in schema. Previously this returned an error. #1211
|
||||
@@ -8,37 +33,45 @@ Tantivy 0.17
|
||||
- Schema now offers not indexing fieldnorms (@lpouget) [#922](https://github.com/quickwit-oss/tantivy/issues/922)
|
||||
- Reduce the number of fsync calls [#1225](https://github.com/quickwit-oss/tantivy/issues/1225)
|
||||
- Fix opening bytes index with dynamic codec (@PSeitz) [#1278](https://github.com/quickwit-oss/tantivy/issues/1278)
|
||||
- Added an aggregation collector compatible with Elasticsearch (@PSeitz)
|
||||
- Added an aggregation collector for range, average and stats compatible with Elasticsearch. (@PSeitz)
|
||||
- Added a JSON schema type @fulmicoton [#1251](https://github.com/quickwit-oss/tantivy/issues/1251)
|
||||
- Added support for slop in phrase queries @halvorboe [#1068](https://github.com/quickwit-oss/tantivy/issues/1068)
|
||||
|
||||
Tantivy 0.16.2
|
||||
================================
|
||||
- Bugfix in FuzzyTermQuery. (tranposition_cost_one was not doing anything)
|
||||
|
||||
- Bugfix in FuzzyTermQuery. (transposition_cost_one was not doing anything)
|
||||
|
||||
Tantivy 0.16.1
|
||||
========================
|
||||
|
||||
- Major Bugfix on multivalued fastfield. #1151
|
||||
- Demux operation (@PSeitz)
|
||||
|
||||
Tantivy 0.16.0
|
||||
=========================
|
||||
|
||||
- Bugfix in the filesum check. (@evanxg852000) #1127
|
||||
- Bugfix in positions when the index is sorted by a field. (@appaquet) #1125
|
||||
|
||||
Tantivy 0.15.3
|
||||
=========================
|
||||
- Major bugfix. Deleting documents was broken when the index was sorted by a field. (@appaquet, @fulmicoton) #1101
|
||||
|
||||
- Major bugfix. Deleting documents was broken when the index was sorted by a field. (@appaquet, @fulmicoton) #1101
|
||||
|
||||
Tantivy 0.15.2
|
||||
========================
|
||||
|
||||
- Major bugfix. DocStore still panics when a deleted doc is at the beginning of a block. (@appaquet) #1088
|
||||
|
||||
Tantivy 0.15.1
|
||||
=========================
|
||||
|
||||
- Major bugfix. DocStore panics when first block is deleted. (@appaquet) #1077
|
||||
|
||||
Tantivy 0.15.0
|
||||
=========================
|
||||
|
||||
- API Changes. Using Range instead of (start, end) in the API and internals (`FileSlice`, `OwnedBytes`, `Snippets`, ...)
|
||||
This change is breaking but migration is trivial.
|
||||
- Added an Histogram collector. (@fulmicoton) #994
|
||||
@@ -60,9 +93,9 @@ Tantivy 0.15.0
|
||||
- Updated TermMerger implementation to rely on the union feature of the FST (@scampi) #469
|
||||
- Add boolean marking whether position is required in the query_terms API call (@fulmicoton). #1070
|
||||
|
||||
|
||||
Tantivy 0.14.0
|
||||
=========================
|
||||
|
||||
- Remove dependency to atomicwrites #833 .Implemented by @fulmicoton upon suggestion and research from @asafigan).
|
||||
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
|
||||
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
|
||||
@@ -81,16 +114,19 @@ This version breaks compatibility and requires users to reindex everything.
|
||||
|
||||
Tantivy 0.13.2
|
||||
===================
|
||||
|
||||
Bugfix. Acquiring a facet reader on a segment that does not contain any
|
||||
doc with this facet returns `None`. (#896)
|
||||
|
||||
Tantivy 0.13.1
|
||||
===================
|
||||
|
||||
Made `Query` and `Collector` `Send + Sync`.
|
||||
Updated misc dependency versions.
|
||||
|
||||
Tantivy 0.13.0
|
||||
======================
|
||||
|
||||
Tantivy 0.13 introduce a change in the index format that will require
|
||||
you to reindex your index (BlockWAND information are added in the skiplist).
|
||||
The index size increase is minor as this information is only added for
|
||||
@@ -105,6 +141,7 @@ so that we can discuss possible solutions.
|
||||
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
||||
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
||||
As a result, iterating through DocSet now looks as follows
|
||||
|
||||
```rust
|
||||
let mut doc = docset.doc();
|
||||
while doc != TERMINATED {
|
||||
@@ -112,7 +149,9 @@ while doc != TERMINATED {
|
||||
doc = docset.advance();
|
||||
}
|
||||
```
|
||||
|
||||
The change made it possible to greatly simplify a lot of the docset's code.
|
||||
|
||||
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
||||
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
||||
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
|
||||
@@ -120,6 +159,7 @@ to the PISA team for answering all my questions!)
|
||||
|
||||
Tantivy 0.12.0
|
||||
======================
|
||||
|
||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
|
||||
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
|
||||
@@ -130,30 +170,32 @@ Tantivy 0.12.0
|
||||
## How to update?
|
||||
|
||||
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
|
||||
minor changes. Check https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs
|
||||
minor changes. Check <https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs>
|
||||
to check for some code sample.
|
||||
|
||||
Tantivy 0.11.3
|
||||
=======================
|
||||
|
||||
- Fixed DateTime as a fast field (#735)
|
||||
|
||||
Tantivy 0.11.2
|
||||
=======================
|
||||
|
||||
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
|
||||
- Exposing a constructor for `WatchHandle` (#731)
|
||||
|
||||
Tantivy 0.11.1
|
||||
=====================
|
||||
- Bug fix #729
|
||||
|
||||
- Bug fix #729
|
||||
|
||||
Tantivy 0.11.0
|
||||
=====================
|
||||
|
||||
- Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima)
|
||||
- Various bugfixes in the query parser.
|
||||
- Better handling of hyphens in query parser. (#609)
|
||||
- Better handling of whitespaces.
|
||||
- Better handling of hyphens in query parser. (#609)
|
||||
- Better handling of whitespaces.
|
||||
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
|
||||
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
||||
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
||||
@@ -184,7 +226,6 @@ Tantivy 0.10.1
|
||||
Avoid watching the mmap directory until someone effectively creates a reader that uses
|
||||
this functionality.
|
||||
|
||||
|
||||
Tantivy 0.10.0
|
||||
=====================
|
||||
|
||||
@@ -200,6 +241,7 @@ Tantivy 0.10.0
|
||||
|
||||
Minor
|
||||
---------
|
||||
|
||||
- Switched to Rust 2018 (@uvd)
|
||||
- Small simplification of the code.
|
||||
Calling .freq() or .doc() when .advance() has never been called
|
||||
@@ -207,8 +249,7 @@ on segment postings should panic from now on.
|
||||
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
|
||||
- Fast fields are now preloaded when the `SegmentReader` is created.
|
||||
- `IndexMeta` is now public. (@hntd187)
|
||||
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
|
||||
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||
only require a read lock. (@fulmicoton)
|
||||
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
|
||||
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
|
||||
@@ -224,16 +265,17 @@ Your program should be usable as is.
|
||||
Fast fields used to be accessed directly from the `SegmentReader`.
|
||||
The API changed, you are now required to acquire your fast field reader via the
|
||||
`segment_reader.fast_fields()`, and use one of the typed method:
|
||||
|
||||
- `.u64()`, `.i64()` if your field is single-valued ;
|
||||
- `.u64s()`, `.i64s()` if your field is multi-valued ;
|
||||
- `.bytes()` if your field is bytes fast field.
|
||||
|
||||
|
||||
|
||||
Tantivy 0.9.0
|
||||
=====================
|
||||
|
||||
*0.9.0 index format is not compatible with the
|
||||
previous index format.*
|
||||
|
||||
- MAJOR BUGFIX :
|
||||
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
|
||||
- Removed most unsafe (@fulmicoton)
|
||||
@@ -277,37 +319,40 @@ To update from tantivy 0.8, you will need to go through the following steps.
|
||||
|
||||
```
|
||||
|
||||
|
||||
Tantivy 0.8.2
|
||||
=====================
|
||||
|
||||
Fixing build for x86_64 platforms. (#496)
|
||||
No need to update from 0.8.1 if tantivy
|
||||
is building on your platform.
|
||||
|
||||
|
||||
Tantivy 0.8.1
|
||||
=====================
|
||||
|
||||
Hotfix of #476.
|
||||
|
||||
Merge was reflecting deletes before commit was passed.
|
||||
Thanks @barrotsteindev for reporting the bug.
|
||||
|
||||
|
||||
Tantivy 0.8.0
|
||||
=====================
|
||||
|
||||
*No change in the index format*
|
||||
|
||||
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
|
||||
- Multithreaded search (@jwolfe, @fulmicoton)
|
||||
|
||||
|
||||
Tantivy 0.7.1
|
||||
=====================
|
||||
|
||||
*No change in the index format*
|
||||
|
||||
- Bugfix: NGramTokenizer panics on non ascii chars
|
||||
- Added a space usage API
|
||||
|
||||
Tantivy 0.7
|
||||
=====================
|
||||
|
||||
- Skip data for doc ids and positions (@fulmicoton),
|
||||
greatly improving performance
|
||||
- Tantivy error now rely on the failure crate (@drusellers)
|
||||
@@ -317,15 +362,15 @@ Tantivy 0.7
|
||||
|
||||
Tantivy 0.6.1
|
||||
=========================
|
||||
|
||||
- Bugfix #324. GC removing was removing file that were still in useful
|
||||
- Added support for parsing AllQuery and RangeQuery via QueryParser
|
||||
- AllQuery: `*`
|
||||
- RangeQuery:
|
||||
- Inclusive `field:[startIncl to endIncl]`
|
||||
- Exclusive `field:{startExcl to endExcl}`
|
||||
- Mixed `field:[startIncl to endExcl}` and vice versa
|
||||
- Unbounded `field:[start to *]`, `field:[* to end]`
|
||||
|
||||
- AllQuery: `*`
|
||||
- RangeQuery:
|
||||
- Inclusive `field:[startIncl to endIncl]`
|
||||
- Exclusive `field:{startExcl to endExcl}`
|
||||
- Mixed `field:[startIncl to endExcl}` and vice versa
|
||||
- Unbounded `field:[start to *]`, `field:[* to end]`
|
||||
|
||||
Tantivy 0.6
|
||||
==========================
|
||||
@@ -338,58 +383,53 @@ to this release!
|
||||
- Approximate field norms encoded over 1 byte. (@fulmicoton)
|
||||
- Compiles on stable rust (@fulmicoton)
|
||||
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
|
||||
- Completely uncompressed
|
||||
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
||||
- Completely uncompressed
|
||||
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
||||
- Add NGram token support (@drusellers)
|
||||
- Add Stopword Filter support (@drusellers)
|
||||
- Add a FuzzyTermQuery (@drusellers)
|
||||
- Add a RegexQuery (@drusellers)
|
||||
- Various performance improvements (@fulmicoton)_
|
||||
|
||||
|
||||
Tantivy 0.5.2
|
||||
===========================
|
||||
|
||||
- bugfix #274
|
||||
- bugfix #280
|
||||
- bugfix #289
|
||||
|
||||
|
||||
Tantivy 0.5.1
|
||||
==========================
|
||||
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
|
||||
|
||||
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
|
||||
|
||||
Tantivy 0.5
|
||||
==========================
|
||||
|
||||
- Faceting
|
||||
- RangeQuery
|
||||
- Configurable tokenization pipeline
|
||||
- Bugfix in PhraseQuery
|
||||
- Various query optimisation
|
||||
- Allowing very large indexes
|
||||
- 64 bits file address
|
||||
- Smarter encoding of the `TermInfo` objects
|
||||
|
||||
|
||||
- 64 bits file address
|
||||
- Smarter encoding of the `TermInfo` objects
|
||||
|
||||
Tantivy 0.4.3
|
||||
==========================
|
||||
|
||||
- Bugfix race condition when deleting files. (#198)
|
||||
|
||||
|
||||
Tantivy 0.4.2
|
||||
==========================
|
||||
|
||||
- Prevent usage of AVX2 instructions (#201)
|
||||
|
||||
|
||||
Tantivy 0.4.1
|
||||
==========================
|
||||
|
||||
- Bugfix for non-indexed fields. (#199)
|
||||
|
||||
|
||||
Tantivy 0.4.0
|
||||
==========================
|
||||
|
||||
@@ -404,37 +444,31 @@ Tantivy 0.4.0
|
||||
- Searching for a non-indexed field returns an explicit Error
|
||||
- Phrase query for non-tokenized field are not tokenized by the query parser.
|
||||
- Faster/Better indexing (@fulmicoton)
|
||||
- using murmurhash2
|
||||
- faster merging
|
||||
- more memory efficient fast field writer (@lnicola )
|
||||
- better handling of collisions
|
||||
- lesser memory usage
|
||||
- using murmurhash2
|
||||
- faster merging
|
||||
- more memory efficient fast field writer (@lnicola )
|
||||
- better handling of collisions
|
||||
- lesser memory usage
|
||||
- Added API, most notably to iterate over ranges of terms (@fulmicoton)
|
||||
- Bugfix that was preventing to unmap segment files, on index drop (@fulmicoton)
|
||||
- Made the doc! macro public (@fulmicoton)
|
||||
- Added an alternative implementation of the streaming dictionary (@fulmicoton)
|
||||
|
||||
|
||||
|
||||
Tantivy 0.3.1
|
||||
==========================
|
||||
|
||||
- Expose a method to trigger files garbage collection
|
||||
|
||||
|
||||
|
||||
Tantivy 0.3
|
||||
==========================
|
||||
|
||||
|
||||
Special thanks to @Kodraus @lnicola @Ameobea @manuel-woelker @celaus
|
||||
for their contribution to this release.
|
||||
|
||||
Thanks also to everyone in tantivy gitter chat
|
||||
for their advise and company :)
|
||||
|
||||
https://gitter.im/tantivy-search/tantivy
|
||||
|
||||
<https://gitter.im/tantivy-search/tantivy>
|
||||
|
||||
Warning:
|
||||
|
||||
@@ -443,19 +477,16 @@ code and index format.
|
||||
You should not expect backward compatibility before
|
||||
tantivy 1.0.
|
||||
|
||||
|
||||
|
||||
New Features
|
||||
------------
|
||||
|
||||
- Delete. You can now delete documents from an index.
|
||||
- Support for windows (Thanks to @lnicola)
|
||||
|
||||
|
||||
Various Bugfixes & small improvements
|
||||
----------------------------------------
|
||||
|
||||
- Added CI for Windows (https://ci.appveyor.com/project/fulmicoton/tantivy)
|
||||
- Added CI for Windows (<https://ci.appveyor.com/project/fulmicoton/tantivy>)
|
||||
Thanks to @KodrAus ! (#108)
|
||||
- Various dependy version update (Thanks to @Ameobea) #76
|
||||
- Fixed several race conditions in `Index.wait_merge_threads`
|
||||
@@ -467,7 +498,3 @@ Thanks to @KodrAus ! (#108)
|
||||
- Building binary targets for tantivy-cli (Thanks to @KodrAus)
|
||||
- Misc invisible bug fixes, and code cleanup.
|
||||
- Use
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
98
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.17.0-dev"
|
||||
version = "0.18.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -10,68 +10,73 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.13"
|
||||
oneshot = "0.1.3"
|
||||
base64 = "0.13.0"
|
||||
byteorder = "1.4.3"
|
||||
crc32fast = "1.2.1"
|
||||
once_cell = "1.7.2"
|
||||
regex ={ version = "1.5.4", default-features = false, features = ["std"] }
|
||||
tantivy-fst = "0.3"
|
||||
memmap2 = {version = "0.5", optional=true}
|
||||
lz4_flex = { version = "0.9", default-features = false, features = ["checked-decode"], optional = true }
|
||||
brotli = { version = "3.3", optional = true }
|
||||
crc32fast = "1.3.2"
|
||||
once_cell = "1.10.0"
|
||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||
tantivy-fst = "0.3.0"
|
||||
memmap2 = { version = "0.5.3", optional = true }
|
||||
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
||||
brotli = { version = "3.3.4", optional = true }
|
||||
zstd = { version = "0.11", optional = true }
|
||||
snap = { version = "1.0.5", optional = true }
|
||||
tempfile = { version = "3.2", optional = true }
|
||||
log = "0.4.14"
|
||||
serde = { version = "1.0.126", features = ["derive"] }
|
||||
serde_json = "1.0.64"
|
||||
num_cpus = "1.13"
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
log = "0.4.16"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = "1.0.79"
|
||||
num_cpus = "1.13.1"
|
||||
fs2={ version = "0.4.3", optional = true }
|
||||
levenshtein_automata = "0.2"
|
||||
uuid = { version = "0.8.2", features = ["v4", "serde"] }
|
||||
crossbeam = "0.8.1"
|
||||
futures = { version = "0.3.15", features = ["thread-pool"] }
|
||||
tantivy-query-grammar = { version="0.15.0", path="./query-grammar" }
|
||||
tantivy-bitpacker = { version="0.1", path="./bitpacker" }
|
||||
common = { version = "0.1", path = "./common/", package = "tantivy-common" }
|
||||
fastfield_codecs = { version="0.1", path="./fastfield_codecs", default-features = false }
|
||||
ownedbytes = { version="0.2", path="./ownedbytes" }
|
||||
stable_deref_trait = "1.2"
|
||||
rust-stemmers = "1.2"
|
||||
downcast-rs = "1.2"
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
tantivy-query-grammar = { version="0.18.0", path="./query-grammar" }
|
||||
tantivy-bitpacker = { version="0.2", path="./bitpacker" }
|
||||
common = { version = "0.3", path = "./common/", package = "tantivy-common" }
|
||||
fastfield_codecs = { version="0.2", path="./fastfield_codecs", default-features = false }
|
||||
ownedbytes = { version="0.3", path="./ownedbytes" }
|
||||
stable_deref_trait = "1.2.0"
|
||||
rust-stemmers = "1.2.0"
|
||||
downcast-rs = "1.2.0"
|
||||
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
||||
census = "0.4"
|
||||
census = "0.4.0"
|
||||
fnv = "1.0.7"
|
||||
thiserror = "1.0.24"
|
||||
thiserror = "1.0.30"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.5"
|
||||
murmurhash32 = "0.2"
|
||||
chrono = "0.4.19"
|
||||
smallvec = "1.6.1"
|
||||
rayon = "1.5"
|
||||
lru = "0.7.0"
|
||||
fastdivide = "0.4"
|
||||
itertools = "0.10.0"
|
||||
measure_time = "0.8.0"
|
||||
pretty_assertions = "1.1.0"
|
||||
fail = "0.5.0"
|
||||
murmurhash32 = "0.2.0"
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.7.5"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.10.3"
|
||||
measure_time = "0.8.2"
|
||||
pretty_assertions = "1.2.1"
|
||||
serde_cbor = { version = "0.11.2", optional = true }
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8.3"
|
||||
rand = "0.8.5"
|
||||
maplit = "1.0.2"
|
||||
matches = "0.1.8"
|
||||
proptest = "1.0"
|
||||
matches = "0.1.9"
|
||||
proptest = "1.0.0"
|
||||
criterion = "0.3.5"
|
||||
test-log = "0.2.8"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.9.0"
|
||||
pprof = {version= "0.6", features=["flamegraph", "criterion"]}
|
||||
pprof = { version = "0.10.0", features = ["flamegraph", "criterion"] }
|
||||
futures = "0.3.21"
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.5"
|
||||
version = "0.5.0"
|
||||
features = ["failpoints"]
|
||||
|
||||
[profile.release]
|
||||
@@ -90,10 +95,13 @@ mmap = ["fs2", "tempfile", "memmap2"]
|
||||
brotli-compression = ["brotli"]
|
||||
lz4-compression = ["lz4_flex"]
|
||||
snappy-compression = ["snap"]
|
||||
zstd-compression = ["zstd"]
|
||||
|
||||
failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
quickwit = ["serde_cbor"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
|
||||
|
||||
|
||||
71
README.md
@@ -1,4 +1,3 @@
|
||||
|
||||
[](https://docs.rs/crate/tantivy/)
|
||||
[](https://github.com/quickwit-oss/tantivy/actions/workflows/test.yml)
|
||||
[](https://codecov.io/gh/quickwit-oss/tantivy)
|
||||
@@ -8,7 +7,7 @@
|
||||
|
||||

|
||||
|
||||
**Tantivy** is a **full text search engine library** written in Rust.
|
||||
**Tantivy** is a **full-text search engine library** written in Rust.
|
||||
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used
|
||||
@@ -16,19 +15,23 @@ to build such a search engine.
|
||||
|
||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
|
||||
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
|
||||
|
||||
# Benchmark
|
||||
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||
performance for different type of queries / collection.
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) breakdowns
|
||||
performance for different types of queries/collections.
|
||||
|
||||
Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
<img src="doc/assets/images/searchbenchmark.png">
|
||||
|
||||
# Features
|
||||
|
||||
- Full-text search
|
||||
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
|
||||
- Tiny startup time (<10ms), perfect for command line tools
|
||||
- Tiny startup time (<10ms), perfect for command-line tools
|
||||
- BM25 scoring (the same as Lucene)
|
||||
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`)
|
||||
- Phrase queries search (e.g. `"michael jackson"`)
|
||||
@@ -43,23 +46,24 @@ Your mileage WILL vary depending on the nature of queries and their load.
|
||||
- Range queries
|
||||
- Faceted search
|
||||
- Configurable indexing (optional term frequency and position indexing)
|
||||
- JSON Field
|
||||
- Aggregation Collector: range buckets, average, and stats metrics
|
||||
- LogMergePolicy with deletes
|
||||
- Searcher Warmer API
|
||||
- Cheesy logo with a horse
|
||||
|
||||
## Non-features
|
||||
|
||||
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a
|
||||
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
|
||||
are within the scope of Tantivy.
|
||||
|
||||
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
|
||||
|
||||
# Getting started
|
||||
|
||||
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows.
|
||||
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
|
||||
|
||||
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
|
||||
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
|
||||
index documents, and search via the CLI or a small server with a REST API.
|
||||
It walks you through getting a wikipedia search engine up and running in a few minutes.
|
||||
It walks you through getting a Wikipedia search engine up and running in a few minutes.
|
||||
- [Reference doc for the last released version](https://docs.rs/tantivy/)
|
||||
|
||||
# How can I support this project?
|
||||
@@ -119,3 +123,46 @@ By default, `rustc` compiles everything in the `examples/` directory in debug mo
|
||||
rust-gdb target/debug/examples/$EXAMPLE_NAME
|
||||
$ gdb run
|
||||
```
|
||||
|
||||
# Companies Using Tantivy
|
||||
|
||||
<p align="left">
|
||||
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/humanfirst.png#gh-light-mode-only" alt="Humanfirst.ai" height="30" width="auto" />
|
||||
<img align="center" src="doc/assets/images/element.io.svg#gh-light-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/nuclia-dark-theme.png#gh-dark-mode-only" alt="Nuclia" height="35" width="auto" />
|
||||
<img align="center" src="doc/assets/images/humanfirst.ai-dark-theme.png#gh-dark-mode-only" alt="Humanfirst.ai" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
</p>
|
||||
|
||||
# FAQ
|
||||
|
||||
### Can I use Tantivy in other languages?
|
||||
|
||||
- Python → [tantivy-py](https://github.com/quickwit-oss/tantivy-py)
|
||||
- Ruby → [tantiny](https://github.com/baygeldin/tantiny)
|
||||
|
||||
You can also find other bindings on [GitHub](https://github.com/search?q=tantivy) but they may be less maintained.
|
||||
|
||||
### What are some examples of Tantivy use?
|
||||
|
||||
- [seshat](https://github.com/matrix-org/seshat/): A matrix message database/indexer
|
||||
- [tantiny](https://github.com/baygeldin/tantiny): Tiny full-text search for Ruby
|
||||
- [lnx](https://github.com/lnx-search/lnx): adaptable, typo tolerant search engine with a REST API
|
||||
- and [more](https://github.com/search?q=tantivy)!
|
||||
|
||||
### On average, how much faster is Tantivy compared to Lucene?
|
||||
|
||||
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
|
||||
|
||||
### Does tantivy support incremental indexing?
|
||||
|
||||
- Yes.
|
||||
|
||||
### How can I edit documents?
|
||||
|
||||
- Data in tantivy is immutable. To edit a document, the document needs to be deleted and reindexed.
|
||||
|
||||
### When will my documents be searchable during indexing?
|
||||
|
||||
- Documents will be searchable after a `commit` is called on an `IndexWriter`. Existing `IndexReader`s will also need to be reloaded in order to reflect the changes. Finally, changes are only visible to newly acquired `Searcher`.
|
||||
|
||||
@@ -4,6 +4,7 @@ use tantivy::schema::{INDEXED, STORED, STRING, TEXT};
|
||||
use tantivy::Index;
|
||||
|
||||
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
||||
const NUM_REPEATS: usize = 2;
|
||||
|
||||
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
let schema = {
|
||||
@@ -20,6 +21,11 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
schema_builder.add_text_field("severity", STRING | STORED);
|
||||
schema_builder.build()
|
||||
};
|
||||
let dynamic_schema = {
|
||||
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||
schema_builder.add_json_field("json", TEXT);
|
||||
schema_builder.build()
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group("index-hdfs");
|
||||
group.sample_size(20);
|
||||
@@ -27,7 +33,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..10 {
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
@@ -39,7 +45,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..10 {
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
@@ -52,9 +58,11 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
@@ -62,9 +70,43 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(schema_with_store.clone());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
});
|
||||
group.bench_function("index-hdfs-with-commit-json-without-docstore", |b| {
|
||||
b.iter(|| {
|
||||
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||
for _ in 0..NUM_REPEATS {
|
||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||
serde_json::from_str(doc_json).unwrap();
|
||||
let doc = tantivy::doc!(json_field=>json_val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
}
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
})
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.1.1"
|
||||
edition = "2018"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = []
|
||||
|
||||
@@ -6,6 +6,7 @@ extern crate test;
|
||||
mod tests {
|
||||
use tantivy_bitpacker::BlockedBitpacker;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_blockedbitp_read(b: &mut Bencher) {
|
||||
let mut blocked_bitpacker = BlockedBitpacker::new();
|
||||
@@ -20,6 +21,7 @@ mod tests {
|
||||
out
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_blockedbitp_create(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
|
||||
@@ -82,14 +82,16 @@ impl BitUnpacker {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bit_width(&self) -> u8 {
|
||||
self.num_bits as u8
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u64, data: &[u8]) -> u64 {
|
||||
if self.num_bits == 0 {
|
||||
return 0u64;
|
||||
}
|
||||
let num_bits = self.num_bits;
|
||||
let mask = self.mask;
|
||||
let addr_in_bits = idx * num_bits;
|
||||
let addr_in_bits = idx * self.num_bits;
|
||||
let addr = addr_in_bits >> 3;
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
debug_assert!(
|
||||
@@ -101,7 +103,7 @@ impl BitUnpacker {
|
||||
.unwrap();
|
||||
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
val_shifted & mask
|
||||
val_shifted & self.mask
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,6 +58,10 @@ fn metadata_test() {
|
||||
assert_eq!(meta.num_bits(), 6);
|
||||
}
|
||||
|
||||
fn mem_usage<T>(items: &Vec<T>) -> usize {
|
||||
items.capacity() * std::mem::size_of::<T>()
|
||||
}
|
||||
|
||||
impl BlockedBitpacker {
|
||||
pub fn new() -> Self {
|
||||
let mut compressed_blocks = vec![];
|
||||
@@ -73,10 +77,8 @@ impl BlockedBitpacker {
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
std::mem::size_of::<BlockedBitpacker>()
|
||||
+ self.compressed_blocks.capacity()
|
||||
+ self.offset_and_bits.capacity()
|
||||
* std::mem::size_of_val(&self.offset_and_bits.get(0).cloned().unwrap_or_default())
|
||||
+ self.buffer.capacity()
|
||||
* std::mem::size_of_val(&self.buffer.get(0).cloned().unwrap_or_default())
|
||||
+ mem_usage(&self.offset_and_bits)
|
||||
+ mem_usage(&self.buffer)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
[package]
|
||||
name = "tantivy-common"
|
||||
version = "0.1.0"
|
||||
version = "0.3.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
description = "common traits and utility functions used by multiple tantivy subcrates"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
ownedbytes = { version="0.2", path="../ownedbytes" }
|
||||
ownedbytes = { version="0.3", path="../ownedbytes" }
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1.0.0"
|
||||
|
||||
@@ -104,8 +104,6 @@ pub fn u64_to_f64(val: u64) -> f64 {
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
use std::f64;
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64, BinarySerializable, FixedSize};
|
||||
@@ -135,11 +133,11 @@ pub mod test {
|
||||
|
||||
#[test]
|
||||
fn test_i64_converter() {
|
||||
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
||||
assert_eq!(i64_to_u64(i64::max_value()), u64::max_value());
|
||||
assert_eq!(i64_to_u64(i64::MIN), u64::MIN);
|
||||
assert_eq!(i64_to_u64(i64::MAX), u64::MAX);
|
||||
test_i64_converter_helper(0i64);
|
||||
test_i64_converter_helper(i64::min_value());
|
||||
test_i64_converter_helper(i64::max_value());
|
||||
test_i64_converter_helper(i64::MIN);
|
||||
test_i64_converter_helper(i64::MAX);
|
||||
for i in -1000i64..1000i64 {
|
||||
test_i64_converter_helper(i);
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ pub trait DeserializeFrom<T: BinarySerializable> {
|
||||
|
||||
/// Implement deserialize from &[u8] for all types which implement BinarySerializable.
|
||||
///
|
||||
/// TryFrom would actually be preferrable, but not possible because of the orphan
|
||||
/// TryFrom would actually be preferable, but not possible because of the orphan
|
||||
/// rules (not completely sure if this could be resolved)
|
||||
impl<T: BinarySerializable> DeserializeFrom<T> for &[u8] {
|
||||
fn deserialize(&mut self) -> io::Result<T> {
|
||||
@@ -229,7 +229,7 @@ pub mod test {
|
||||
fixed_size_test::<u32>();
|
||||
assert_eq!(4, serialize_test(3u32));
|
||||
assert_eq!(4, serialize_test(5u32));
|
||||
assert_eq!(4, serialize_test(u32::max_value()));
|
||||
assert_eq!(4, serialize_test(u32::MAX));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -247,6 +247,11 @@ pub mod test {
|
||||
fixed_size_test::<u64>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_bool() {
|
||||
fixed_size_test::<bool>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_string() {
|
||||
assert_eq!(serialize_test(String::from("")), 1);
|
||||
@@ -272,6 +277,6 @@ pub mod test {
|
||||
assert_eq!(serialize_test(VInt(1234u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_383u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_384u64)), 3);
|
||||
assert_eq!(serialize_test(VInt(u64::max_value())), 10);
|
||||
assert_eq!(serialize_test(VInt(u64::MAX)), 10);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,7 +199,7 @@ mod tests {
|
||||
aux_test_vint(0);
|
||||
aux_test_vint(1);
|
||||
aux_test_vint(5);
|
||||
aux_test_vint(u64::max_value());
|
||||
aux_test_vint(u64::MAX);
|
||||
for i in 1..9 {
|
||||
let power_of_128 = 1u64 << (7 * i);
|
||||
aux_test_vint(power_of_128 - 1u64);
|
||||
@@ -228,6 +228,6 @@ mod tests {
|
||||
aux_test_serialize_vint_u32(power_of_128);
|
||||
aux_test_serialize_vint_u32(power_of_128 + 1u32);
|
||||
}
|
||||
aux_test_serialize_vint_u32(u32::max_value());
|
||||
aux_test_serialize_vint_u32(u32::MAX);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
||||
pub struct AntiCallToken(());
|
||||
|
||||
/// Trait used to indicate when no more write need to be done on a writer
|
||||
pub trait TerminatingWrite: Write {
|
||||
pub trait TerminatingWrite: Write + Send {
|
||||
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
|
||||
fn terminate(mut self) -> io::Result<()>
|
||||
where Self: Sized {
|
||||
|
||||
BIN
doc/assets/images/Nuclia.png
Normal file
|
After Width: | Height: | Size: 3.1 KiB |
BIN
doc/assets/images/element-dark-theme.png
Normal file
|
After Width: | Height: | Size: 56 KiB |
8
doc/assets/images/element.io.svg
Normal file
@@ -0,0 +1,8 @@
|
||||
<svg width="518" height="112" viewBox="0 0 518 112" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M56 112C86.9279 112 112 86.9279 112 56C112 25.0721 86.9279 0 56 0C25.0721 0 0 25.0721 0 56C0 86.9279 25.0721 112 56 112Z" fill="#0DBD8B"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M45.7615 26.093C45.7615 23.8325 47.5977 22.0001 49.8629 22.0001C65.2154 22.0001 77.6611 34.4199 77.6611 49.7406C77.6611 52.001 75.8248 53.8335 73.5597 53.8335C71.2945 53.8335 69.4583 52.001 69.4583 49.7406C69.4583 38.9408 60.6851 30.1859 49.8629 30.1859C47.5977 30.1859 45.7615 28.3534 45.7615 26.093Z" fill="white"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M85.8986 45.6477C88.1637 45.6477 89.9999 47.4801 89.9999 49.7406C89.9999 65.0612 77.5543 77.4811 62.2017 77.4811C59.9366 77.4811 58.1003 75.6486 58.1003 73.3882C58.1003 71.1277 59.9366 69.2953 62.2017 69.2953C73.024 69.2953 81.7972 60.5403 81.7972 49.7406C81.7972 47.4801 83.6334 45.6477 85.8986 45.6477Z" fill="white"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M66.3031 85.907C66.3031 88.1675 64.4668 89.9999 62.2017 89.9999C46.8492 89.9999 34.4035 77.58 34.4035 62.2594C34.4035 59.9989 36.2398 58.1665 38.5049 58.1665C40.77 58.1665 42.6063 59.9989 42.6063 62.2594C42.6063 73.0592 51.3795 81.8141 62.2017 81.8141C64.4668 81.8141 66.3031 83.6466 66.3031 85.907Z" fill="white"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M26.1014 66.3523C23.8363 66.3523 22.0001 64.5199 22.0001 62.2594C22 46.9388 34.4457 34.5189 49.7983 34.5189C52.0634 34.5189 53.8997 36.3514 53.8997 38.6118C53.8997 40.8723 52.0634 42.7047 49.7983 42.7047C38.976 42.7047 30.2028 51.4597 30.2028 62.2594C30.2028 64.5199 28.3666 66.3523 26.1014 66.3523Z" fill="white"/>
|
||||
<path d="M197 63.5H157.5C157.967 67.6333 159.467 70.9333 162 73.4C164.533 75.8 167.867 77 172 77C174.733 77 177.2 76.3333 179.4 75C181.6 73.6667 183.167 71.8667 184.1 69.6H196.1C194.5 74.8667 191.5 79.1333 187.1 82.4C182.767 85.6 177.633 87.2 171.7 87.2C163.967 87.2 157.7 84.6333 152.9 79.5C148.167 74.3667 145.8 67.8667 145.8 60C145.8 52.3333 148.2 45.9 153 40.7C157.8 35.5 164 32.9 171.6 32.9C179.2 32.9 185.333 35.4667 190 40.6C194.733 45.6667 197.1 52.0667 197.1 59.8L197 63.5ZM171.6 42.6C167.867 42.6 164.767 43.7 162.3 45.9C159.833 48.1 158.3 51.0333 157.7 54.7H185.3C184.767 51.0333 183.3 48.1 180.9 45.9C178.5 43.7 175.4 42.6 171.6 42.6ZM205.289 70.5V11H217.189V70.7C217.189 73.3667 218.656 74.7 221.589 74.7L223.689 74.6V85.9C222.556 86.1 221.356 86.2 220.089 86.2C214.956 86.2 211.189 84.9 208.789 82.3C206.456 79.7 205.289 75.7667 205.289 70.5ZM279.109 63.5H239.609C240.076 67.6333 241.576 70.9333 244.109 73.4C246.643 75.8 249.976 77 254.109 77C256.843 77 259.309 76.3333 261.509 75C263.709 73.6667 265.276 71.8667 266.209 69.6H278.209C276.609 74.8667 273.609 79.1333 269.209 82.4C264.876 85.6 259.743 87.2 253.809 87.2C246.076 87.2 239.809 84.6333 235.009 79.5C230.276 74.3667 227.909 67.8667 227.909 60C227.909 52.3333 230.309 45.9 235.109 40.7C239.909 35.5 246.109 32.9 253.709 32.9C261.309 32.9 267.443 35.4667 272.109 40.6C276.843 45.6667 279.209 52.0667 279.209 59.8L279.109 63.5ZM253.709 42.6C249.976 42.6 246.876 43.7 244.409 45.9C241.943 48.1 240.409 51.0333 239.809 54.7H267.409C266.876 51.0333 265.409 48.1 263.009 45.9C260.609 43.7 257.509 42.6 253.709 42.6ZM332.798 56.2V86H320.898V54.9C320.898 47.0333 317.632 43.1 311.098 43.1C307.565 43.1 304.732 44.2333 302.598 46.5C300.532 48.7667 299.498 51.8667 299.498 55.8V86H287.598V34.1H298.598V41C299.865 38.6667 301.798 36.7333 304.398 35.2C306.998 33.6667 310.232 32.9 314.098 32.9C321.298 32.9 326.498 35.6333 329.698 41.1C334.098 35.6333 339.965 32.9 347.298 32.9C353.365 32.9 358.032 34.8 361.298 38.6C364.565 42.3333 366.198 47.2667 366.198 53.4V86H354.298V54.9C354.298 47.0333 351.032 43.1 344.498 43.1C340.898 43.1 338.032 44.2667 335.898 46.6C333.832 48.8667 332.798 52.0667 332.798 56.2ZM425.379 63.5H385.879C386.346 67.6333 387.846 70.9333 390.379 73.4C392.912 75.8 396.246 77 400.379 77C403.112 77 405.579 76.3333 407.779 75C409.979 73.6667 411.546 71.8667 412.479 69.6H424.479C422.879 74.8667 419.879 79.1333 415.479 82.4C411.146 85.6 406.012 87.2 400.079 87.2C392.346 87.2 386.079 84.6333 381.279 79.5C376.546 74.3667 374.179 67.8667 374.179 60C374.179 52.3333 376.579 45.9 381.379 40.7C386.179 35.5 392.379 32.9 399.979 32.9C407.579 32.9 413.712 35.4667 418.379 40.6C423.112 45.6667 425.479 52.0667 425.479 59.8L425.379 63.5ZM399.979 42.6C396.246 42.6 393.146 43.7 390.679 45.9C388.212 48.1 386.679 51.0333 386.079 54.7H413.679C413.146 51.0333 411.679 48.1 409.279 45.9C406.879 43.7 403.779 42.6 399.979 42.6ZM444.868 34.1V41C446.068 38.7333 448.035 36.8333 450.768 35.3C453.568 33.7 456.935 32.9 460.868 32.9C467.001 32.9 471.735 34.7667 475.068 38.5C478.468 42.2333 480.168 47.2 480.168 53.4V86H468.268V54.9C468.268 51.2333 467.401 48.3667 465.668 46.3C464.001 44.1667 461.435 43.1 457.968 43.1C454.168 43.1 451.168 44.2333 448.968 46.5C446.835 48.7667 445.768 51.9 445.768 55.9V86H433.868V34.1H444.868ZM514.922 75.4V85.7C513.455 86.1 511.389 86.3 508.722 86.3C498.589 86.3 493.522 81.2 493.522 71V43.6H485.622V34.1H493.522V20.6H505.422V34.1H515.122V43.6H505.422V69.8C505.422 73.8667 507.355 75.9 511.222 75.9L514.922 75.4Z" fill="black"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.2 KiB |
BIN
doc/assets/images/humanfirst.ai-dark-theme.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
doc/assets/images/humanfirst.png
Normal file
|
After Width: | Height: | Size: 102 KiB |
BIN
doc/assets/images/nuclia-dark-theme.png
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
BIN
doc/assets/images/searchbenchmark.png
Normal file
|
After Width: | Height: | Size: 653 KiB |
@@ -1,7 +1,5 @@
|
||||
# Summary
|
||||
|
||||
|
||||
|
||||
[Avant Propos](./avant-propos.md)
|
||||
|
||||
- [Segments](./basis.md)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
> Tantivy is a **search** engine **library** for Rust.
|
||||
|
||||
If you are familiar with Lucene, it's an excellent approximation to consider tantivy as Lucene for rust. tantivy is heavily inspired by Lucene's design and
|
||||
they both have the same scope and targetted use cases.
|
||||
they both have the same scope and targeted use cases.
|
||||
|
||||
If you are not familiar with Lucene, let's break down our little tagline.
|
||||
|
||||
@@ -31,4 +31,4 @@ relevancy, collapsing, highlighting, spatial search.
|
||||
index from a different format.
|
||||
|
||||
Tantivy exposes a lot of low level API to do all of these things.
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ directory shipped with tantivy is the `MmapDirectory`.
|
||||
While this design has some downsides, this greatly simplifies the source code of
|
||||
tantivy. Caching is also entirely delegated to the OS.
|
||||
|
||||
`tantivy` works entirely (or almost) by directly reading the datastructures as they are layed on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
|
||||
`tantivy` works entirely (or almost) by directly reading the datastructures as they are laid on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
|
||||
|
||||
This is an interesting property for a command line search engine, or for some multi-tenant log search engine : spawning a new process for each new query can be a perfectly sensible solution in some use case.
|
||||
|
||||
@@ -22,7 +22,6 @@ Of course this is crucial to reduce IO, and ensure that as much of our index can
|
||||
Also, whenever possible its data is accessed sequentially. Of course, this is an amazing property when tantivy needs to access the data from your spinning hard disk, but this is also
|
||||
critical for performance, if your data is read from and an `SSD` or even already in your pagecache.
|
||||
|
||||
|
||||
## Segments, and the log method
|
||||
|
||||
That kind of compact layout comes at one cost: it prevents our datastructures from being dynamic.
|
||||
@@ -53,11 +52,7 @@ to get tantivy to fit your use case:
|
||||
|
||||
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Merging
|
||||
## Merging
|
||||
|
||||
As you index more and more data, your index will accumulate more and more segments.
|
||||
Having a lot of small segments is not really optimal. There is a bit of redundancy in having
|
||||
@@ -66,11 +61,7 @@ all these term dictionary. Also when searching, we will need to do term lookups
|
||||
That's where merging or compacting comes into place. Tantivy will continuously consider merge
|
||||
opportunities and start merging segments in the background.
|
||||
|
||||
|
||||
# Indexing throughput, number of indexing threads
|
||||
|
||||
|
||||
|
||||
## Indexing throughput, number of indexing threads
|
||||
|
||||
[^1]: This may eventually change.
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Examples
|
||||
|
||||
- [Basic search](/examples/basic_search.html)
|
||||
- [Basic search](/examples/basic_search.html)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
|
||||
- [Index Sorting](#index-sorting)
|
||||
+ [Why Sorting](#why-sorting)
|
||||
* [Compression](#compression)
|
||||
* [Top-N Optimization](#top-n-optimization)
|
||||
* [Pruning](#pruning)
|
||||
* [Other](#other)
|
||||
+ [Usage](#usage)
|
||||
- [Why Sorting](#why-sorting)
|
||||
- [Compression](#compression)
|
||||
- [Top-N Optimization](#top-n-optimization)
|
||||
- [Pruning](#pruning)
|
||||
- [Other](#other)
|
||||
- [Usage](#usage)
|
||||
|
||||
# Index Sorting
|
||||
|
||||
@@ -15,32 +15,34 @@ Tantivy allows you to sort the index according to a property.
|
||||
|
||||
Presorting an index has several advantages:
|
||||
|
||||
###### Compression
|
||||
### Compression
|
||||
|
||||
When data is sorted it is easier to compress the data. E.g. the numbers sequence [5, 2, 3, 1, 4] would be sorted to [1, 2, 3, 4, 5].
|
||||
When data is sorted it is easier to compress the data. E.g. the numbers sequence [5, 2, 3, 1, 4] would be sorted to [1, 2, 3, 4, 5].
|
||||
If we apply delta encoding this list would be unsorted [5, -3, 1, -2, 3] vs. [1, 1, 1, 1, 1].
|
||||
Compression ratio is mainly affected on the fast field of the sorted property, every thing else is likely unaffected.
|
||||
###### Top-N Optimization
|
||||
Compression ratio is mainly affected on the fast field of the sorted property, every thing else is likely unaffected.
|
||||
|
||||
When data is presorted by a field and search queries request sorting by the same field, we can leverage the natural order of the documents.
|
||||
### Top-N Optimization
|
||||
|
||||
When data is presorted by a field and search queries request sorting by the same field, we can leverage the natural order of the documents.
|
||||
E.g. if the data is sorted by timestamp and want the top n newest docs containing a term, we can simply leveraging the order of the docids.
|
||||
|
||||
Note: Tantivy 0.16 does not do this optimization yet.
|
||||
|
||||
###### Pruning
|
||||
### Pruning
|
||||
|
||||
Let's say we want all documents and want to apply the filter `>= 2010-08-11`. When the data is sorted, we could make a lookup in the fast field to find the docid range and use this as the filter.
|
||||
|
||||
Note: Tantivy 0.16 does not do this optimization yet.
|
||||
|
||||
###### Other?
|
||||
### Other?
|
||||
|
||||
In principle there are many algorithms possible that exploit the monotonically increasing nature. (aggregations maybe?)
|
||||
|
||||
## Usage
|
||||
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of tantvy 0.16 only fast fields are allowed to be used.
|
||||
|
||||
```
|
||||
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of Tantivy 0.16 only fast fields are allowed to be used.
|
||||
|
||||
```rust
|
||||
let settings = IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "intval".to_string(),
|
||||
@@ -58,4 +60,3 @@ let index = index_builder.create_in_ram().unwrap();
|
||||
Sorting an index is applied in the serialization step. In general there are two serialization steps: [Finishing a single segment](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/segment_writer.rs#L338) and [merging multiple segments](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/merger.rs#L1073).
|
||||
|
||||
In both cases we generate a docid mapping reflecting the sort. This mapping is used when serializing the different components (doc store, fastfields, posting list, normfield, facets).
|
||||
|
||||
|
||||
130
doc/src/json.md
Normal file
@@ -0,0 +1,130 @@
|
||||
# Json
|
||||
|
||||
As of tantivy 0.17, tantivy supports a json object type.
|
||||
This type can be used to allow for a schema-less search index.
|
||||
|
||||
When indexing a json object, we "flatten" the JSON. This operation emits terms that represent a triplet `(json_path, value_type, value)`
|
||||
|
||||
For instance, if user is a json field, the following document:
|
||||
|
||||
```json
|
||||
{
|
||||
"user": {
|
||||
"name": "Paul Masurel",
|
||||
"address": {
|
||||
"city": "Tokyo",
|
||||
"country": "Japan"
|
||||
},
|
||||
"created_at": "2018-11-12T23:20:50.52Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
emits the following tokens:
|
||||
|
||||
- ("name", Text, "Paul")
|
||||
- ("name", Text, "Masurel")
|
||||
- ("address.city", Text, "Tokyo")
|
||||
- ("address.country", Text, "Japan")
|
||||
- ("created_at", Date, 15420648505)
|
||||
|
||||
## Bytes-encoding and lexicographical sort
|
||||
|
||||
Like any other terms, these triplets are encoded into a binary format as follows.
|
||||
|
||||
- `json_path`: the json path is a sequence of "segments". In the example above, `address.city`
|
||||
is just a debug representation of the json path `["address", "city"]`.
|
||||
Its representation is done by separating segments by a unicode char `\x01`, and ending the path by `\x00`.
|
||||
- `value type`: One byte represents the `Value` type.
|
||||
- `value`: The value representation is just the regular Value representation.
|
||||
|
||||
This representation is designed to align the natural sort of Terms with the lexicographical sort
|
||||
of their binary representation (Tantivy's dictionary (whether fst or sstable) is sorted and does prefix encoding).
|
||||
|
||||
In the example above, the terms will be sorted as
|
||||
|
||||
- ("address.city", Text, "Tokyo")
|
||||
- ("address.country", Text, "Japan")
|
||||
- ("name", Text, "Masurel")
|
||||
- ("name", Text, "Paul")
|
||||
- ("created_at", Date, 15420648505)
|
||||
|
||||
As seen in "pitfalls", we may end up having to search for a value for a same path in several different fields. Putting the field code after the path makes it maximizes compression opportunities but also increases the chances for the two terms to end up in the actual same term dictionary block.
|
||||
|
||||
## Pitfalls, limitation and corner cases
|
||||
|
||||
Json gives very little information about the type of the literals it stores.
|
||||
All numeric types end up mapped as a "Number" and there are no types for dates.
|
||||
|
||||
At indexing, tantivy will try to interpret number and strings as different type with a
|
||||
priority order.
|
||||
|
||||
Numbers will be interpreted as u64, i64 and f64 in that order.
|
||||
Strings will be interpreted as rfc3999 dates or simple strings.
|
||||
|
||||
The first working type is picked and is the only term that is emitted for indexing.
|
||||
Note this interpretation happens on a per-document basis, and there is no effort to try to sniff
|
||||
a consistent field type at the scale of a segment.
|
||||
|
||||
On the query parser side on the other hand, we may end up emitting more than one type.
|
||||
For instance, we do not even know if the type is a number or string based.
|
||||
|
||||
So the query
|
||||
|
||||
```rust
|
||||
my_path.my_segment:233
|
||||
```
|
||||
|
||||
Will be interpreted as
|
||||
|
||||
```rust
|
||||
(my_path.my_segment, String, 233) or (my_path.my_segment, u64, 233)
|
||||
```
|
||||
|
||||
Likewise, we need to emit two tokens if the query contains an rfc3999 date.
|
||||
Indeed the date could have been actually a single token inside the text of a document at ingestion time. Generally speaking, we will always at least emit a string token in query parsing, and sometimes more.
|
||||
|
||||
If one more json field is defined, things get even more complicated.
|
||||
|
||||
## Default json field
|
||||
|
||||
If the schema contains a text field called "text" and a json field that is set as a default field:
|
||||
`text:hello` could be reasonably interpreted as targeting the text field or as targeting the json field called `json_dynamic` with the json_path "text".
|
||||
|
||||
If there is such an ambiguity, we decide to only search in the "text" field: `text:hello`.
|
||||
|
||||
In other words, the parser will not search in default json fields if there is a schema hit.
|
||||
This is a product decision.
|
||||
|
||||
The user can still target the JSON field by specifying its name explicitly:
|
||||
`json_dynamic.text:hello`.
|
||||
|
||||
## Range queries are not supported
|
||||
|
||||
Json field do not support range queries.
|
||||
|
||||
## Arrays do not work like nested object
|
||||
|
||||
If json object contains an array, a search query might return more documents
|
||||
than what might be expected.
|
||||
|
||||
Let's take an example.
|
||||
|
||||
```json
|
||||
{
|
||||
"cart_id": 3234234 ,
|
||||
"cart": [
|
||||
{"product_type": "sneakers", "attributes": {"color": "white"} },
|
||||
{"product_type": "t-shirt", "attributes": {"color": "red"}},
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Despite the array structure, a document in tantivy is a bag of terms.
|
||||
The query:
|
||||
|
||||
```rust
|
||||
cart.product_type:sneakers AND cart.attributes.color:red
|
||||
```
|
||||
|
||||
Actually match the document above.
|
||||
@@ -20,13 +20,12 @@ fn main() -> tantivy::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_fieldtype = schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
TextFieldIndexing::default()
|
||||
.set_tokenizer("default")
|
||||
.set_index_option(IndexRecordOption::WithFreqs),
|
||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||
)
|
||||
.set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let score_fieldtype = crate::schema::IntOptions::default().set_fast(Cardinality::SingleValue);
|
||||
let score_fieldtype =
|
||||
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
|
||||
let highscore_field = schema_builder.add_f64_field("highscore", score_fieldtype.clone());
|
||||
let price_field = schema_builder.add_f64_field("price", score_fieldtype.clone());
|
||||
|
||||
@@ -111,6 +110,7 @@ fn main() -> tantivy::Result<()> {
|
||||
(9f64..14f64).into(),
|
||||
(14f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req_1.clone(),
|
||||
}),
|
||||
@@ -118,12 +118,12 @@ fn main() -> tantivy::Result<()> {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
let res: Value = serde_json::to_value(&agg_res)?;
|
||||
println!("{}", serde_json::to_string_pretty(&res)?);
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -7,10 +7,11 @@
|
||||
// Of course, you can have a look at the tantivy's built-in collectors
|
||||
// such as the `CountCollector` for more examples.
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
use tantivy::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use tantivy::fastfield::DynamicFastFieldReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, Score, SegmentReader};
|
||||
@@ -103,7 +104,7 @@ impl SegmentCollector for StatsSegmentCollector {
|
||||
type Fruit = Option<Stats>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: Score) {
|
||||
let value = self.fast_field_reader.get(doc) as f64;
|
||||
let value = self.fast_field_reader.get_val(doc as u64) as f64;
|
||||
self.stats.count += 1;
|
||||
self.stats.sum += value;
|
||||
self.stats.squared_sum += value * value;
|
||||
|
||||
@@ -50,7 +50,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// for your unit tests... Or this example.
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
// here we are registering our custome tokenizer
|
||||
// here we are registering our custom tokenizer
|
||||
// this will store tokens of 3 characters each
|
||||
index
|
||||
.tokenizers()
|
||||
|
||||
69
examples/date_time_field.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
// # DateTime field example
|
||||
//
|
||||
// This example shows how the DateTime field can be used
|
||||
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Cardinality, DateOptions, Schema, Value, INDEXED, STORED, STRING};
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
let mut schema_builder = Schema::builder();
|
||||
let opts = DateOptions::from(INDEXED)
|
||||
.set_stored()
|
||||
.set_fast(Cardinality::SingleValue)
|
||||
.set_precision(tantivy::DatePrecision::Seconds);
|
||||
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
|
||||
let event_type = schema_builder.add_text_field("event", STRING | STORED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T12:53:50.53Z",
|
||||
"event": "pull-request"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T13:00:00.22Z",
|
||||
"event": "comment"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// # Default fields: event_type
|
||||
let query_parser = QueryParser::for_index(&index, vec![event_type]);
|
||||
{
|
||||
let query = query_parser.parse_query("event:comment")?;
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(5))?;
|
||||
assert_eq!(count_docs.len(), 1);
|
||||
}
|
||||
{
|
||||
let query = query_parser
|
||||
.parse_query(r#"occurred_at:[2022-06-22T12:58:00Z TO 2022-06-23T00:00:00Z}"#)?;
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
|
||||
assert_eq!(count_docs.len(), 1);
|
||||
for (_score, doc_address) in count_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
assert!(matches!(
|
||||
retrieved_doc.get_first(occurred_at),
|
||||
Some(Value::Date(_))
|
||||
));
|
||||
assert_eq!(
|
||||
schema.to_json(&retrieved_doc),
|
||||
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
105
examples/json_field.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
// # Json field example
|
||||
//
|
||||
// This example shows how the json field can be used
|
||||
// to make tantivy partially schemaless by setting it as
|
||||
// default query parser field.
|
||||
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Schema, FAST, STORED, STRING, TEXT};
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_date_field("timestamp", FAST | STORED);
|
||||
let event_type = schema_builder.add_text_field("event_type", STRING | STORED);
|
||||
let attributes = schema_builder.add_json_field("attributes", STORED | TEXT);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"timestamp": "2022-02-22T23:20:50.53Z",
|
||||
"event_type": "click",
|
||||
"attributes": {
|
||||
"target": "submit-button",
|
||||
"cart": {"product_id": 103},
|
||||
"description": "the best vacuum cleaner ever"
|
||||
}
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"timestamp": "2022-02-22T23:20:51.53Z",
|
||||
"event_type": "click",
|
||||
"attributes": {
|
||||
"target": "submit-button",
|
||||
"cart": {"product_id": 133},
|
||||
"description": "das keyboard",
|
||||
"event_type": "holiday-sale"
|
||||
}
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// # Default fields: event_type and attributes
|
||||
// By setting attributes as a default field it allows omitting attributes itself, e.g. "target",
|
||||
// instead of "attributes.target"
|
||||
let query_parser = QueryParser::for_index(&index, vec![event_type, attributes]);
|
||||
{
|
||||
let query = query_parser.parse_query("target:submit-button")?;
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||
assert_eq!(count_docs.len(), 2);
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("target:submit")?;
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||
assert_eq!(count_docs.len(), 2);
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("cart.product_id:103")?;
|
||||
let count_docs = searcher.search(&*query, &Count)?;
|
||||
assert_eq!(count_docs, 1);
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("click AND cart.product_id:133")?;
|
||||
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||
assert_eq!(hits.len(), 1);
|
||||
}
|
||||
{
|
||||
// The sub-fields in the json field marked as default field still need to be explicitly
|
||||
// addressed
|
||||
let query = query_parser.parse_query("click AND 133")?;
|
||||
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||
assert_eq!(hits.len(), 0);
|
||||
}
|
||||
{
|
||||
// Default json fields are ignored if they collide with the schema
|
||||
let query = query_parser.parse_query("event_type:holiday-sale")?;
|
||||
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||
assert_eq!(hits.len(), 0);
|
||||
}
|
||||
// # Query via full attribute path
|
||||
{
|
||||
// This only searches in our schema's `event_type` field
|
||||
let query = query_parser.parse_query("event_type:click")?;
|
||||
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||
assert_eq!(hits.len(), 2);
|
||||
}
|
||||
{
|
||||
// Default json fields can still be accessed by full path
|
||||
let query = query_parser.parse_query("attributes.event_type:holiday-sale")?;
|
||||
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||
assert_eq!(hits.len(), 1);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -2,8 +2,8 @@ use std::cmp::Reverse;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::fastfield::FastFieldReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Field, Schema, FAST, TEXT};
|
||||
use tantivy::{
|
||||
@@ -52,7 +52,7 @@ impl Warmer for DynamicPriceColumn {
|
||||
let product_id_reader = segment.fast_fields().u64(self.field)?;
|
||||
let product_ids: Vec<ProductId> = segment
|
||||
.doc_ids_alive()
|
||||
.map(|doc| product_id_reader.get(doc))
|
||||
.map(|doc| product_id_reader.get_val(doc as u64))
|
||||
.collect();
|
||||
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
|
||||
let mut price_vals: Vec<Price> = Vec::new();
|
||||
@@ -145,11 +145,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let warmers: Vec<Weak<dyn Warmer>> = vec![Arc::downgrade(
|
||||
&(price_dynamic_column.clone() as Arc<dyn Warmer>),
|
||||
)];
|
||||
let reader: IndexReader = index
|
||||
.reader_builder()
|
||||
.warmers(warmers)
|
||||
.num_searchers(1)
|
||||
.try_into()?;
|
||||
let reader: IndexReader = index.reader_builder().warmers(warmers).try_into()?;
|
||||
reader.reload()?;
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![text]);
|
||||
|
||||
@@ -1,21 +1,23 @@
|
||||
[package]
|
||||
name = "fastfield_codecs"
|
||||
version = "0.1.0"
|
||||
version = "0.2.0"
|
||||
authors = ["Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
description = "Fast field codecs used by tantivy"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
common = { version = "0.1", path = "../common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version="0.1.1", path = "../bitpacker/" }
|
||||
prettytable-rs = {version="0.8.0", optional= true}
|
||||
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
|
||||
ownedbytes = { version = "0.3.0", path = "../ownedbytes" }
|
||||
prettytable-rs = {version="0.9.0", optional= true}
|
||||
rand = {version="0.8.3", optional= true}
|
||||
|
||||
[dev-dependencies]
|
||||
more-asserts = "0.2.1"
|
||||
more-asserts = "0.3.0"
|
||||
proptest = "1.0.0"
|
||||
rand = "0.8.3"
|
||||
|
||||
[features]
|
||||
|
||||
@@ -4,13 +4,9 @@ extern crate test;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use fastfield_codecs::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer};
|
||||
use fastfield_codecs::linearinterpol::{
|
||||
LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use fastfield_codecs::multilinearinterpol::{
|
||||
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use fastfield_codecs::bitpacked::BitpackedCodec;
|
||||
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec;
|
||||
use fastfield_codecs::linear::LinearCodec;
|
||||
use fastfield_codecs::*;
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
@@ -29,72 +25,59 @@ mod tests {
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
}
|
||||
fn bench_get<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
|
||||
b: &mut Bencher,
|
||||
data: &[u64],
|
||||
) {
|
||||
fn bench_get<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let mut bytes = vec![];
|
||||
S::serialize(
|
||||
&mut bytes,
|
||||
&data,
|
||||
stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
let reader = R::open_from_bytes(&bytes).unwrap();
|
||||
Codec::serialize(&mut bytes, &data).unwrap();
|
||||
let reader = Codec::open_from_bytes(OwnedBytes::new(bytes)).unwrap();
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
reader.get_u64(pos as u64, &bytes);
|
||||
let val = reader.get_val(pos as u64);
|
||||
debug_assert_eq!(data[pos as usize], val);
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
fn bench_create<S: FastFieldCodecSerializer>(b: &mut Bencher, data: &[u64]) {
|
||||
let mut bytes = vec![];
|
||||
fn bench_create<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let mut bytes = Vec::new();
|
||||
b.iter(|| {
|
||||
S::serialize(
|
||||
&mut bytes,
|
||||
&data,
|
||||
stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
bytes.clear();
|
||||
Codec::serialize(&mut bytes, &data).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
use test::Bencher;
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BitpackedFastFieldSerializer>(b, &data);
|
||||
bench_create::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<LinearInterpolFastFieldSerializer>(b, &data);
|
||||
bench_create::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<MultiLinearInterpolFastFieldSerializer>(b, &data);
|
||||
bench_create::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(b, &data);
|
||||
bench_get::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<LinearInterpolFastFieldSerializer, LinearInterpolFastFieldReader>(b, &data);
|
||||
bench_get::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<MultiLinearInterpolFastFieldSerializer, MultiLinearInterpolFastFieldReader>(
|
||||
b, &data,
|
||||
);
|
||||
bench_get::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
||||
let min_value = data.iter().cloned().min().unwrap_or(0);
|
||||
|
||||
@@ -1,37 +1,26 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
use ownedbytes::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
use crate::{Column, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct BitpackedFastFieldReader {
|
||||
pub struct BitpackedReader {
|
||||
data: OwnedBytes,
|
||||
bit_unpacker: BitUnpacker,
|
||||
pub min_value_u64: u64,
|
||||
pub max_value_u64: u64,
|
||||
min_value_u64: u64,
|
||||
max_value_u64: u64,
|
||||
num_vals: u64,
|
||||
}
|
||||
|
||||
impl<'data> FastFieldCodecReader for BitpackedFastFieldReader {
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
|
||||
let (_data, mut footer) = bytes.split_at(bytes.len() - 16);
|
||||
let min_value = u64::deserialize(&mut footer)?;
|
||||
let amplitude = u64::deserialize(&mut footer)?;
|
||||
let max_value = min_value + amplitude;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(BitpackedFastFieldReader {
|
||||
min_value_u64: min_value,
|
||||
max_value_u64: max_value,
|
||||
bit_unpacker,
|
||||
})
|
||||
}
|
||||
impl Column for BitpackedReader {
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
self.min_value_u64 + self.bit_unpacker.get(doc, data)
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
self.min_value_u64 + self.bit_unpacker.get(doc, &self.data)
|
||||
}
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
@@ -41,16 +30,21 @@ impl<'data> FastFieldCodecReader for BitpackedFastFieldReader {
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value_u64
|
||||
}
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
pub struct BitpackedFastFieldSerializerLegacy<'a, W: 'a + Write> {
|
||||
pub struct BitpackedSerializerLegacy<'a, W: 'a + Write> {
|
||||
bit_packer: BitPacker,
|
||||
write: &'a mut W,
|
||||
min_value: u64,
|
||||
num_vals: u64,
|
||||
amplitude: u64,
|
||||
num_bits: u8,
|
||||
}
|
||||
|
||||
impl<'a, W: Write> BitpackedFastFieldSerializerLegacy<'a, W> {
|
||||
impl<'a, W: Write> BitpackedSerializerLegacy<'a, W> {
|
||||
/// Creates a new fast field serializer.
|
||||
///
|
||||
/// The serializer in fact encode the values by bitpacking
|
||||
@@ -63,15 +57,16 @@ impl<'a, W: Write> BitpackedFastFieldSerializerLegacy<'a, W> {
|
||||
write: &'a mut W,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
) -> io::Result<BitpackedFastFieldSerializerLegacy<'a, W>> {
|
||||
) -> io::Result<BitpackedSerializerLegacy<'a, W>> {
|
||||
assert!(min_value <= max_value);
|
||||
let amplitude = max_value - min_value;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let bit_packer = BitPacker::new();
|
||||
Ok(BitpackedFastFieldSerializerLegacy {
|
||||
Ok(BitpackedSerializerLegacy {
|
||||
bit_packer,
|
||||
write,
|
||||
min_value,
|
||||
num_vals: 0,
|
||||
amplitude,
|
||||
num_bits,
|
||||
})
|
||||
@@ -82,21 +77,45 @@ impl<'a, W: Write> BitpackedFastFieldSerializerLegacy<'a, W> {
|
||||
let val_to_write: u64 = val - self.min_value;
|
||||
self.bit_packer
|
||||
.write(val_to_write, self.num_bits, &mut self.write)?;
|
||||
self.num_vals += 1;
|
||||
Ok(())
|
||||
}
|
||||
pub fn close_field(mut self) -> io::Result<()> {
|
||||
self.bit_packer.close(&mut self.write)?;
|
||||
self.min_value.serialize(&mut self.write)?;
|
||||
self.amplitude.serialize(&mut self.write)?;
|
||||
self.num_vals.serialize(&mut self.write)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BitpackedFastFieldSerializer {}
|
||||
pub struct BitpackedCodec;
|
||||
|
||||
impl FastFieldCodec for BitpackedCodec {
|
||||
/// The CODEC_TYPE is an enum value used for serialization.
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Bitpacked;
|
||||
|
||||
type Reader = BitpackedReader;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let footer_offset = bytes.len() - 24;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
let min_value = u64::deserialize(&mut footer)?;
|
||||
let amplitude = u64::deserialize(&mut footer)?;
|
||||
let num_vals = u64::deserialize(&mut footer)?;
|
||||
let max_value = min_value + amplitude;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(BitpackedReader {
|
||||
data,
|
||||
bit_unpacker,
|
||||
min_value_u64: min_value,
|
||||
max_value_u64: max_value,
|
||||
num_vals,
|
||||
})
|
||||
}
|
||||
|
||||
impl FastFieldCodecSerializer for BitpackedFastFieldSerializer {
|
||||
const NAME: &'static str = "Bitpacked";
|
||||
const ID: u8 = 1;
|
||||
/// Serializes data with the BitpackedFastFieldSerializer.
|
||||
///
|
||||
/// The serializer in fact encode the values by bitpacking
|
||||
@@ -105,51 +124,41 @@ impl FastFieldCodecSerializer for BitpackedFastFieldSerializer {
|
||||
/// It requires a `min_value` and a `max_value` to compute
|
||||
/// compute the minimum number of bits required to encode
|
||||
/// values.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
_data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
let mut serializer =
|
||||
BitpackedFastFieldSerializerLegacy::open(write, stats.min_value, stats.max_value)?;
|
||||
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> {
|
||||
let mut serializer = BitpackedSerializerLegacy::open(
|
||||
write,
|
||||
fastfield_accessor.min_value(),
|
||||
fastfield_accessor.max_value(),
|
||||
)?;
|
||||
|
||||
for val in data_iter {
|
||||
for val in fastfield_accessor.iter() {
|
||||
serializer.add_val(val)?;
|
||||
}
|
||||
serializer.close_field()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn is_applicable(
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
_stats: FastFieldStats,
|
||||
) -> bool {
|
||||
true
|
||||
}
|
||||
fn estimate(_fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
|
||||
let amplitude = stats.max_value - stats.min_value;
|
||||
|
||||
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> {
|
||||
let amplitude = fastfield_accessor.max_value() - fastfield_accessor.min_value();
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let num_bits_uncompressed = 64;
|
||||
num_bits as f32 / num_bits_uncompressed as f32
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_data_sets;
|
||||
use crate::tests::get_codec_test_datasets;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) {
|
||||
crate::tests::create_and_validate::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(
|
||||
data, name,
|
||||
);
|
||||
crate::tests::create_and_validate::<BitpackedCodec>(data, name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = get_codec_test_data_sets();
|
||||
let data_sets = get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//! MultiLinearInterpol compressor uses linear interpolation to guess a values and stores the
|
||||
//! The BlockwiseLinear codec uses linear interpolation to guess a values and stores the
|
||||
//! offset, but in blocks of 512.
|
||||
//!
|
||||
//! With a CHUNK_SIZE of 512 and 29 byte metadata per block, we get a overhead for metadata of 232 /
|
||||
@@ -14,22 +14,25 @@ use std::io::{self, Read, Write};
|
||||
use std::ops::Sub;
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, DeserializeFrom};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
use crate::linear::{get_calculated_value, get_slope};
|
||||
use crate::{Column, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
const CHUNK_SIZE: u64 = 512;
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct MultiLinearInterpolFastFieldReader {
|
||||
pub footer: MultiLinearInterpolFooter,
|
||||
pub struct BlockwiseLinearReader {
|
||||
data: OwnedBytes,
|
||||
pub footer: BlockwiseLinearFooter,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
struct Function {
|
||||
// The offset in the data is required, because we have diffrent bit_widths per block
|
||||
// The offset in the data is required, because we have different bit_widths per block
|
||||
data_start_offset: u64,
|
||||
// start_pos in the block will be CHUNK_SIZE * BLOCK_NUM
|
||||
start_pos: u64,
|
||||
@@ -99,14 +102,14 @@ impl BinarySerializable for Function {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MultiLinearInterpolFooter {
|
||||
pub struct BlockwiseLinearFooter {
|
||||
pub num_vals: u64,
|
||||
pub min_value: u64,
|
||||
pub max_value: u64,
|
||||
interpolations: Vec<Function>,
|
||||
}
|
||||
|
||||
impl BinarySerializable for MultiLinearInterpolFooter {
|
||||
impl BinarySerializable for BlockwiseLinearFooter {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
let mut out = vec![];
|
||||
self.num_vals.serialize(&mut out)?;
|
||||
@@ -118,8 +121,8 @@ impl BinarySerializable for MultiLinearInterpolFooter {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<MultiLinearInterpolFooter> {
|
||||
let mut footer = MultiLinearInterpolFooter {
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<BlockwiseLinearFooter> {
|
||||
let mut footer = BlockwiseLinearFooter {
|
||||
num_vals: u64::deserialize(reader)?,
|
||||
min_value: u64::deserialize(reader)?,
|
||||
max_value: u64::deserialize(reader)?,
|
||||
@@ -143,26 +146,20 @@ fn get_interpolation_function(doc: u64, interpolations: &[Function]) -> &Functio
|
||||
&interpolations[get_interpolation_position(doc)]
|
||||
}
|
||||
|
||||
impl FastFieldCodecReader for MultiLinearInterpolFastFieldReader {
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
|
||||
let (_data, mut footer) = bytes.split_at(bytes.len() - (4 + footer_len) as usize);
|
||||
let footer = MultiLinearInterpolFooter::deserialize(&mut footer)?;
|
||||
|
||||
Ok(MultiLinearInterpolFastFieldReader { footer })
|
||||
}
|
||||
|
||||
impl Column for BlockwiseLinearReader {
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
let interpolation = get_interpolation_function(doc, &self.footer.interpolations);
|
||||
let doc = doc - interpolation.start_pos;
|
||||
let calculated_value =
|
||||
get_calculated_value(interpolation.value_start_pos, doc, interpolation.slope);
|
||||
let diff = interpolation
|
||||
.bit_unpacker
|
||||
.get(doc, &data[interpolation.data_start_offset as usize..]);
|
||||
fn get_val(&self, idx: u64) -> u64 {
|
||||
let interpolation = get_interpolation_function(idx, &self.footer.interpolations);
|
||||
let in_block_idx = idx - interpolation.start_pos;
|
||||
let calculated_value = get_calculated_value(
|
||||
interpolation.value_start_pos,
|
||||
in_block_idx,
|
||||
interpolation.slope,
|
||||
);
|
||||
let diff = interpolation.bit_unpacker.get(
|
||||
in_block_idx,
|
||||
&self.data[interpolation.data_start_offset as usize..],
|
||||
);
|
||||
(calculated_value + diff) - interpolation.positive_val_offset
|
||||
}
|
||||
|
||||
@@ -174,39 +171,38 @@ impl FastFieldCodecReader for MultiLinearInterpolFastFieldReader {
|
||||
fn max_value(&self) -> u64 {
|
||||
self.footer.max_value
|
||||
}
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.footer.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
|
||||
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
|
||||
}
|
||||
/// Same as LinearSerializer, but working on chunks of CHUNK_SIZE elements.
|
||||
pub struct BlockwiseLinearCodec;
|
||||
|
||||
#[inline]
|
||||
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
|
||||
(first_val as i64 + (pos as f32 * slope) as i64) as u64
|
||||
}
|
||||
impl FastFieldCodec for BlockwiseLinearCodec {
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::BlockwiseLinear;
|
||||
|
||||
/// Same as LinearInterpolFastFieldSerializer, but working on chunks of CHUNK_SIZE elements.
|
||||
pub struct MultiLinearInterpolFastFieldSerializer {}
|
||||
type Reader = BlockwiseLinearReader;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
let footer = BlockwiseLinearFooter::deserialize(&mut footer)?;
|
||||
Ok(BlockwiseLinearReader { data, footer })
|
||||
}
|
||||
|
||||
impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
|
||||
const NAME: &'static str = "MultiLinearInterpol";
|
||||
const ID: u8 = 3;
|
||||
/// Creates a new fast field serializer.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
_data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
assert!(stats.min_value <= stats.max_value);
|
||||
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> {
|
||||
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value());
|
||||
|
||||
let first_val = fastfield_accessor.get_val(0);
|
||||
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
|
||||
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
|
||||
|
||||
let mut first_function = Function {
|
||||
end_pos: stats.num_vals,
|
||||
end_pos: fastfield_accessor.num_vals(),
|
||||
value_start_pos: first_val,
|
||||
value_end_pos: last_val,
|
||||
..Default::default()
|
||||
@@ -217,7 +213,7 @@ impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
|
||||
// Since we potentially apply multiple passes over the data, the data is cached.
|
||||
// Multiple iteration can be expensive (merge with index sorting can add lot of overhead per
|
||||
// iteration)
|
||||
let data = data_iter.collect::<Vec<_>>();
|
||||
let data = fastfield_accessor.iter().collect::<Vec<_>>();
|
||||
|
||||
//// let's split this into chunks of CHUNK_SIZE
|
||||
for data_pos in (0..data.len() as u64).step_by(CHUNK_SIZE as usize).skip(1) {
|
||||
@@ -280,49 +276,46 @@ impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
|
||||
}
|
||||
bit_packer.close(write)?;
|
||||
|
||||
let footer = MultiLinearInterpolFooter {
|
||||
num_vals: stats.num_vals,
|
||||
min_value: stats.min_value,
|
||||
max_value: stats.max_value,
|
||||
let footer = BlockwiseLinearFooter {
|
||||
num_vals: fastfield_accessor.num_vals(),
|
||||
min_value: fastfield_accessor.min_value(),
|
||||
max_value: fastfield_accessor.max_value(),
|
||||
interpolations,
|
||||
};
|
||||
footer.serialize(write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_applicable(
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
) -> bool {
|
||||
if stats.num_vals < 5_000 {
|
||||
return false;
|
||||
}
|
||||
// On serialization the offset is added to the actual value.
|
||||
// We need to make sure this won't run into overflow calculation issues.
|
||||
// For this we take the maximum theroretical offset and add this to the max value.
|
||||
// If this doesn't overflow the algortihm should be fine
|
||||
let theorethical_maximum_offset = stats.max_value - stats.min_value;
|
||||
if stats
|
||||
.max_value
|
||||
.checked_add(theorethical_maximum_offset)
|
||||
.is_none()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
/// estimation for linear interpolation is hard because, you don't know
|
||||
/// where the local maxima are for the deviation of the calculated value and
|
||||
/// the offset is also unknown.
|
||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
|
||||
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> {
|
||||
if fastfield_accessor.num_vals() < 10 * CHUNK_SIZE {
|
||||
return None;
|
||||
}
|
||||
|
||||
// On serialization the offset is added to the actual value.
|
||||
// We need to make sure this won't run into overflow calculation issues.
|
||||
// For this we take the maximum theroretical offset and add this to the max value.
|
||||
// If this doesn't overflow the algorithm should be fine
|
||||
let theorethical_maximum_offset =
|
||||
fastfield_accessor.max_value() - fastfield_accessor.min_value();
|
||||
if fastfield_accessor
|
||||
.max_value()
|
||||
.checked_add(theorethical_maximum_offset)
|
||||
.is_none()
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
let first_val_in_first_block = fastfield_accessor.get_val(0);
|
||||
let last_elem_in_first_chunk = CHUNK_SIZE.min(stats.num_vals);
|
||||
let last_elem_in_first_chunk = CHUNK_SIZE.min(fastfield_accessor.num_vals());
|
||||
let last_val_in_first_block =
|
||||
fastfield_accessor.get_val(last_elem_in_first_chunk as u64 - 1);
|
||||
let slope = get_slope(
|
||||
first_val_in_first_block,
|
||||
last_val_in_first_block,
|
||||
stats.num_vals,
|
||||
fastfield_accessor.num_vals(),
|
||||
);
|
||||
|
||||
// let's sample at 0%, 5%, 10% .. 95%, 100%, but for the first block only
|
||||
@@ -349,11 +342,11 @@ impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
|
||||
//
|
||||
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
|
||||
|
||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
|
||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * fastfield_accessor.num_vals() as u64
|
||||
// function metadata per block
|
||||
+ 29 * (stats.num_vals / CHUNK_SIZE);
|
||||
let num_bits_uncompressed = 64 * stats.num_vals;
|
||||
num_bits as f32 / num_bits_uncompressed as f32
|
||||
+ 29 * (fastfield_accessor.num_vals() / CHUNK_SIZE);
|
||||
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals();
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -368,20 +361,35 @@ fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_data_sets;
|
||||
use crate::tests::get_codec_test_datasets;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
|
||||
crate::tests::create_and_validate::<
|
||||
MultiLinearInterpolFastFieldSerializer,
|
||||
MultiLinearInterpolFastFieldReader,
|
||||
>(data, name)
|
||||
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> {
|
||||
crate::tests::create_and_validate::<BlockwiseLinearCodec>(data, name)
|
||||
}
|
||||
|
||||
const HIGHEST_BIT: u64 = 1 << 63;
|
||||
pub fn i64_to_u64(val: i64) -> u64 {
|
||||
(val as u64) ^ HIGHEST_BIT
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression_i64() {
|
||||
let data = (i64::MAX - 600_000..=i64::MAX - 550_000)
|
||||
.map(i64_to_u64)
|
||||
.collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate(&data, "simple monotonically large i64").unwrap();
|
||||
assert!(actual_compression < 0.2);
|
||||
assert!(estimate < 0.20);
|
||||
assert!(estimate > 0.15);
|
||||
assert!(actual_compression > 0.01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression() {
|
||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate(&data, "simple monotonically large");
|
||||
create_and_validate(&data, "simple monotonically large").unwrap();
|
||||
assert!(actual_compression < 0.2);
|
||||
assert!(estimate < 0.20);
|
||||
assert!(estimate > 0.15);
|
||||
@@ -390,7 +398,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = get_codec_test_data_sets();
|
||||
let data_sets = get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
49
fastfield_codecs/src/column.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
pub trait Column<T = u64> {
|
||||
/// Return the value associated to the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `idx` is greater than the column length.
|
||||
fn get_val(&self, idx: u64) -> T;
|
||||
|
||||
/// Fills an output buffer with the fast field values
|
||||
/// associated with the `DocId` going from
|
||||
/// `start` to `start + output.len()`.
|
||||
///
|
||||
/// Regardless of the type of `Item`, this method works
|
||||
/// - transmuting the output array
|
||||
/// - extracting the `Item`s as if they were `u64`
|
||||
/// - possibly converting the `u64` value to the right type.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `start + output.len()` is greater than
|
||||
/// the segment's `maxdoc`.
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
for (out, idx) in output.iter_mut().zip(start..) {
|
||||
*out = self.get_val(idx);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the minimum value for this fast field.
|
||||
///
|
||||
/// The min value does not take in account of possible
|
||||
/// deleted document, and should be considered as a lower bound
|
||||
/// of the actual minimum value.
|
||||
fn min_value(&self) -> T;
|
||||
|
||||
/// Returns the maximum value for this fast field.
|
||||
///
|
||||
/// The max value does not take in account of possible
|
||||
/// deleted document, and should be considered as an upper bound
|
||||
/// of the actual maximum value
|
||||
fn max_value(&self) -> T;
|
||||
|
||||
fn num_vals(&self) -> u64;
|
||||
/// Returns a iterator over the data
|
||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
|
||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||
}
|
||||
}
|
||||
@@ -5,125 +5,188 @@ extern crate more_asserts;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use common::BinarySerializable;
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
pub mod bitpacked;
|
||||
pub mod linearinterpol;
|
||||
pub mod multilinearinterpol;
|
||||
pub mod blockwise_linear;
|
||||
pub mod linear;
|
||||
|
||||
pub trait FastFieldCodecReader: Sized {
|
||||
/// reads the metadata and returns the CodecReader
|
||||
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self>;
|
||||
mod column;
|
||||
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64;
|
||||
pub use self::column::Column;
|
||||
|
||||
fn min_value(&self) -> u64;
|
||||
fn max_value(&self) -> u64;
|
||||
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||
#[repr(u8)]
|
||||
pub enum FastFieldCodecType {
|
||||
Bitpacked = 1,
|
||||
Linear = 2,
|
||||
BlockwiseLinear = 3,
|
||||
Gcd = 4,
|
||||
}
|
||||
|
||||
impl BinarySerializable for FastFieldCodecType {
|
||||
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
self.to_code().serialize(wrt)
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let code = u8::deserialize(reader)?;
|
||||
let codec_type: Self = Self::from_code(code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||
Ok(codec_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldCodecType {
|
||||
pub fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub fn from_code(code: u8) -> Option<Self> {
|
||||
match code {
|
||||
1 => Some(Self::Bitpacked),
|
||||
2 => Some(Self::Linear),
|
||||
3 => Some(Self::BlockwiseLinear),
|
||||
4 => Some(Self::Gcd),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The FastFieldSerializerEstimate trait is required on all variants
|
||||
/// of fast field compressions, to decide which one to choose.
|
||||
pub trait FastFieldCodecSerializer {
|
||||
pub trait FastFieldCodec {
|
||||
/// A codex needs to provide a unique name and id, which is
|
||||
/// used for debugging and de/serialization.
|
||||
const NAME: &'static str;
|
||||
const ID: u8;
|
||||
const CODEC_TYPE: FastFieldCodecType;
|
||||
|
||||
/// Check if the Codec is able to compress the data
|
||||
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> bool;
|
||||
type Reader: Column<u64>;
|
||||
|
||||
/// Reads the metadata and returns the CodecReader
|
||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader>;
|
||||
|
||||
/// Serializes the data using the serializer into write.
|
||||
///
|
||||
/// The fastfield_accessor iterator should be preferred over using fastfield_accessor for
|
||||
/// performance reasons.
|
||||
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column<u64>) -> io::Result<()>;
|
||||
|
||||
/// Returns an estimate of the compression ratio.
|
||||
/// If the codec is not applicable, returns `None`.
|
||||
///
|
||||
/// The baseline is uncompressed 64bit data.
|
||||
///
|
||||
/// It could make sense to also return a value representing
|
||||
/// computational complexity.
|
||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32;
|
||||
|
||||
/// Serializes the data using the serializer into write.
|
||||
/// There are multiple iterators, in case the codec needs to read the data multiple times.
|
||||
/// The iterators should be preferred over using fastfield_accessor for performance reasons.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()>;
|
||||
}
|
||||
|
||||
/// FastFieldDataAccess is the trait to access fast field data during serialization and estimation.
|
||||
pub trait FastFieldDataAccess {
|
||||
/// Return the value associated to the given position.
|
||||
///
|
||||
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance
|
||||
/// reasons.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `position` is greater than the index.
|
||||
fn get_val(&self, position: u64) -> u64;
|
||||
fn estimate(fastfield_accessor: &impl Column) -> Option<f32>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Statistics are used in codec detection and stored in the fast field footer.
|
||||
pub struct FastFieldStats {
|
||||
pub min_value: u64,
|
||||
pub max_value: u64,
|
||||
pub num_vals: u64,
|
||||
}
|
||||
|
||||
impl<'a> FastFieldDataAccess for &'a [u64] {
|
||||
impl<'a> Column for &'a [u64] {
|
||||
fn get_val(&self, position: u64) -> u64 {
|
||||
self[position as usize]
|
||||
}
|
||||
|
||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
|
||||
Box::new((self as &[u64]).iter().cloned())
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
self.iter().min().unwrap_or(0)
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.iter().max().unwrap_or(0)
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.len() as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldDataAccess for Vec<u64> {
|
||||
impl Column for Vec<u64> {
|
||||
fn get_val(&self, position: u64) -> u64 {
|
||||
self[position as usize]
|
||||
}
|
||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
|
||||
Box::new((self as &[u64]).iter().cloned())
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.iter().min().unwrap_or(0)
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.iter().max().unwrap_or(0)
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.len() as u64
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer};
|
||||
use crate::linearinterpol::{LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer};
|
||||
use crate::multilinearinterpol::{
|
||||
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use proptest::arbitrary::any;
|
||||
use proptest::proptest;
|
||||
|
||||
pub fn create_and_validate<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
|
||||
use crate::bitpacked::BitpackedCodec;
|
||||
use crate::blockwise_linear::BlockwiseLinearCodec;
|
||||
use crate::linear::LinearCodec;
|
||||
|
||||
pub fn create_and_validate<Codec: FastFieldCodec>(
|
||||
data: &[u64],
|
||||
name: &str,
|
||||
) -> (f32, f32) {
|
||||
if !S::is_applicable(&data, crate::tests::stats_from_vec(data)) {
|
||||
return (f32::MAX, 0.0);
|
||||
}
|
||||
let estimation = S::estimate(&data, crate::tests::stats_from_vec(data));
|
||||
let mut out = vec![];
|
||||
S::serialize(
|
||||
&mut out,
|
||||
&data,
|
||||
crate::tests::stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
) -> Option<(f32, f32)> {
|
||||
let estimation = Codec::estimate(&data)?;
|
||||
|
||||
let mut out: Vec<u8> = Vec::new();
|
||||
Codec::serialize(&mut out, &data).unwrap();
|
||||
|
||||
let reader = R::open_from_bytes(&out).unwrap();
|
||||
for (doc, orig_val) in data.iter().enumerate() {
|
||||
let val = reader.get_u64(doc as u64, &out);
|
||||
if val != *orig_val {
|
||||
panic!(
|
||||
"val {:?} does not match orig_val {:?}, in data set {}, data {:?}",
|
||||
val, orig_val, name, data
|
||||
);
|
||||
}
|
||||
}
|
||||
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
|
||||
(estimation, actual_compression)
|
||||
|
||||
let reader = Codec::open_from_bytes(OwnedBytes::new(out)).unwrap();
|
||||
assert_eq!(reader.num_vals(), data.len() as u64);
|
||||
for (doc, orig_val) in data.iter().copied().enumerate() {
|
||||
let val = reader.get_val(doc as u64);
|
||||
assert_eq!(
|
||||
val, orig_val,
|
||||
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data \
|
||||
`{data:?}`",
|
||||
);
|
||||
}
|
||||
Some((estimation, actual_compression))
|
||||
}
|
||||
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_proptest_small(data in proptest::collection::vec(any::<u64>(), 1..10)) {
|
||||
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proptest_large(data in proptest::collection::vec(any::<u64>(), 1..6000)) {
|
||||
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
pub fn get_codec_test_datasets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
let mut data_and_names = vec![];
|
||||
|
||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
||||
let data = (10..=10_000_u64).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "simple monotonically increasing"));
|
||||
|
||||
data_and_names.push((
|
||||
@@ -136,89 +199,82 @@ mod tests {
|
||||
data_and_names
|
||||
}
|
||||
|
||||
fn test_codec<S: FastFieldCodecSerializer, R: FastFieldCodecReader>() {
|
||||
let codec_name = S::NAME;
|
||||
for (data, data_set_name) in get_codec_test_data_sets() {
|
||||
let (estimate, actual) =
|
||||
crate::tests::create_and_validate::<S, R>(&data, data_set_name);
|
||||
let result = if estimate == f32::MAX {
|
||||
"Disabled".to_string()
|
||||
fn test_codec<C: FastFieldCodec>() {
|
||||
let codec_name = format!("{:?}", C::CODEC_TYPE);
|
||||
for (data, dataset_name) in get_codec_test_datasets() {
|
||||
let estimate_actual_opt: Option<(f32, f32)> =
|
||||
crate::tests::create_and_validate::<C>(&data, dataset_name);
|
||||
let result = if let Some((estimate, actual)) = estimate_actual_opt {
|
||||
format!("Estimate `{estimate}` Actual `{actual}`")
|
||||
} else {
|
||||
format!("Estimate {:?} Actual {:?} ", estimate, actual)
|
||||
"Disabled".to_string()
|
||||
};
|
||||
println!(
|
||||
"Codec {}, DataSet {}, {}",
|
||||
codec_name, data_set_name, result
|
||||
);
|
||||
println!("Codec {codec_name}, DataSet {dataset_name}, {result}");
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_bitpacking() {
|
||||
test_codec::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>();
|
||||
test_codec::<BitpackedCodec>();
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_interpolation() {
|
||||
test_codec::<LinearInterpolFastFieldSerializer, LinearInterpolFastFieldReader>();
|
||||
test_codec::<LinearCodec>();
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_multi_interpolation() {
|
||||
test_codec::<MultiLinearInterpolFastFieldSerializer, MultiLinearInterpolFastFieldReader>();
|
||||
test_codec::<BlockwiseLinearCodec>();
|
||||
}
|
||||
|
||||
use super::*;
|
||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
||||
let min_value = data.iter().cloned().min().unwrap_or(0);
|
||||
let max_value = data.iter().cloned().max().unwrap_or(0);
|
||||
FastFieldStats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: data.len() as u64,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn estimation_good_interpolation_case() {
|
||||
let data = (10..=20000_u64).collect::<Vec<_>>();
|
||||
|
||||
let linear_interpol_estimation =
|
||||
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.01);
|
||||
|
||||
let multi_linear_interpol_estimation =
|
||||
MultiLinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let multi_linear_interpol_estimation = BlockwiseLinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(multi_linear_interpol_estimation, 0.2);
|
||||
assert_le!(linear_interpol_estimation, multi_linear_interpol_estimation);
|
||||
|
||||
let bitpacked_estimation =
|
||||
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, bitpacked_estimation);
|
||||
}
|
||||
#[test]
|
||||
fn estimation_test_bad_interpolation_case() {
|
||||
let data = vec![200, 10, 10, 10, 10, 1000, 20];
|
||||
|
||||
let linear_interpol_estimation =
|
||||
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.32);
|
||||
|
||||
let bitpacked_estimation =
|
||||
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
#[test]
|
||||
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
||||
let mut data = (200..=20000_u64).collect::<Vec<_>>();
|
||||
let mut data: Vec<u64> = (200..=20000_u64).collect();
|
||||
data.push(1_000_000);
|
||||
|
||||
// in this case the linear interpolation can't in fact not be worse than bitpacking,
|
||||
// but the estimator adds some threshold, which leads to estimated worse behavior
|
||||
let linear_interpol_estimation =
|
||||
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.35);
|
||||
|
||||
let bitpacked_estimation =
|
||||
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_le!(bitpacked_estimation, 0.32);
|
||||
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fast_field_codec_type_to_code() {
|
||||
let mut count_codec = 0;
|
||||
for code in 0..=255 {
|
||||
if let Some(codec_type) = FastFieldCodecType::from_code(code) {
|
||||
assert_eq!(codec_type.to_code(), code);
|
||||
count_codec += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(count_codec, 4);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,21 +2,23 @@ use std::io::{self, Read, Write};
|
||||
use std::ops::Sub;
|
||||
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
use crate::{Column, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct LinearInterpolFastFieldReader {
|
||||
pub struct LinearReader {
|
||||
data: OwnedBytes,
|
||||
bit_unpacker: BitUnpacker,
|
||||
pub footer: LinearInterpolFooter,
|
||||
pub footer: LinearFooter,
|
||||
pub slope: f32,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LinearInterpolFooter {
|
||||
pub struct LinearFooter {
|
||||
pub relative_max_value: u64,
|
||||
pub offset: u64,
|
||||
pub first_val: u64,
|
||||
@@ -26,7 +28,7 @@ pub struct LinearInterpolFooter {
|
||||
pub max_value: u64,
|
||||
}
|
||||
|
||||
impl BinarySerializable for LinearInterpolFooter {
|
||||
impl BinarySerializable for LinearFooter {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
self.relative_max_value.serialize(write)?;
|
||||
self.offset.serialize(write)?;
|
||||
@@ -38,8 +40,8 @@ impl BinarySerializable for LinearInterpolFooter {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearInterpolFooter> {
|
||||
Ok(LinearInterpolFooter {
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearFooter> {
|
||||
Ok(LinearFooter {
|
||||
relative_max_value: u64::deserialize(reader)?,
|
||||
offset: u64::deserialize(reader)?,
|
||||
first_val: u64::deserialize(reader)?,
|
||||
@@ -51,29 +53,15 @@ impl BinarySerializable for LinearInterpolFooter {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for LinearInterpolFooter {
|
||||
impl FixedSize for LinearFooter {
|
||||
const SIZE_IN_BYTES: usize = 56;
|
||||
}
|
||||
|
||||
impl FastFieldCodecReader for LinearInterpolFastFieldReader {
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
|
||||
let (_data, mut footer) = bytes.split_at(bytes.len() - LinearInterpolFooter::SIZE_IN_BYTES);
|
||||
let footer = LinearInterpolFooter::deserialize(&mut footer)?;
|
||||
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
|
||||
|
||||
let num_bits = compute_num_bits(footer.relative_max_value);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(LinearInterpolFastFieldReader {
|
||||
bit_unpacker,
|
||||
footer,
|
||||
slope,
|
||||
})
|
||||
}
|
||||
impl Column for LinearReader {
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
|
||||
(calculated_value + self.bit_unpacker.get(doc, data)) - self.footer.offset
|
||||
(calculated_value + self.bit_unpacker.get(doc, &self.data)) - self.footer.offset
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -84,47 +72,87 @@ impl FastFieldCodecReader for LinearInterpolFastFieldReader {
|
||||
fn max_value(&self) -> u64 {
|
||||
self.footer.max_value
|
||||
}
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.footer.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
/// Fastfield serializer, which tries to guess values by linear interpolation
|
||||
/// and stores the difference bitpacked.
|
||||
pub struct LinearInterpolFastFieldSerializer {}
|
||||
pub struct LinearCodec;
|
||||
|
||||
#[inline]
|
||||
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
|
||||
pub(crate) fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
|
||||
if num_vals <= 1 {
|
||||
return 0.0;
|
||||
}
|
||||
// We calculate the slope with f64 high precision and use the result in lower precision f32
|
||||
// This is done in order to handle estimations for very large values like i64::MAX
|
||||
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
|
||||
let diff = diff(last_val, first_val);
|
||||
(diff / (num_vals - 1) as f64) as f32
|
||||
}
|
||||
|
||||
/// Delay the cast, to improve precision for very large u64 values.
|
||||
///
|
||||
/// Since i64 is mapped monotonically to u64 space, 0i64 is after the mapping i64::MAX.
|
||||
/// So very large values are not uncommon.
|
||||
///
|
||||
/// ```rust
|
||||
/// let val1 = i64::MAX;
|
||||
/// let val2 = i64::MAX - 100;
|
||||
/// assert_eq!(val1 - val2, 100);
|
||||
/// assert_eq!(val1 as f64 - val2 as f64, 0.0);
|
||||
/// ```
|
||||
fn diff(val1: u64, val2: u64) -> f64 {
|
||||
if val1 >= val2 {
|
||||
(val1 - val2) as f64
|
||||
} else {
|
||||
(val2 - val1) as f64 * -1.0
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
|
||||
first_val + (pos as f32 * slope) as u64
|
||||
pub fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
|
||||
if slope < 0.0 {
|
||||
first_val - (pos as f32 * -slope) as u64
|
||||
} else {
|
||||
first_val + (pos as f32 * slope) as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
|
||||
const NAME: &'static str = "LinearInterpol";
|
||||
const ID: u8 = 2;
|
||||
impl FastFieldCodec for LinearCodec {
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Linear;
|
||||
|
||||
type Reader = LinearReader;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let footer_offset = bytes.len() - LinearFooter::SIZE_IN_BYTES;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
let footer = LinearFooter::deserialize(&mut footer)?;
|
||||
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
|
||||
let num_bits = compute_num_bits(footer.relative_max_value);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(LinearReader {
|
||||
data,
|
||||
bit_unpacker,
|
||||
footer,
|
||||
slope,
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new fast field serializer.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
assert!(stats.min_value <= stats.max_value);
|
||||
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> {
|
||||
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value());
|
||||
|
||||
let first_val = fastfield_accessor.get_val(0);
|
||||
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
|
||||
let slope = get_slope(first_val, last_val, stats.num_vals);
|
||||
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
|
||||
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals());
|
||||
// calculate offset to ensure all values are positive
|
||||
let mut offset = 0;
|
||||
let mut rel_positive_max = 0;
|
||||
for (pos, actual_value) in data_iter1.enumerate() {
|
||||
for (pos, actual_value) in fastfield_accessor.iter().enumerate() {
|
||||
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
|
||||
if calculated_value > actual_value {
|
||||
// negative value we need to apply an offset
|
||||
@@ -142,56 +170,54 @@ impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
|
||||
|
||||
let num_bits = compute_num_bits(relative_max_value);
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for (pos, val) in data_iter.enumerate() {
|
||||
for (pos, val) in fastfield_accessor.iter().enumerate() {
|
||||
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
|
||||
let diff = (val + offset) - calculated_value;
|
||||
bit_packer.write(diff, num_bits, write)?;
|
||||
}
|
||||
bit_packer.close(write)?;
|
||||
|
||||
let footer = LinearInterpolFooter {
|
||||
let footer = LinearFooter {
|
||||
relative_max_value,
|
||||
offset,
|
||||
first_val,
|
||||
last_val,
|
||||
num_vals: stats.num_vals,
|
||||
min_value: stats.min_value,
|
||||
max_value: stats.max_value,
|
||||
num_vals: fastfield_accessor.num_vals(),
|
||||
min_value: fastfield_accessor.min_value(),
|
||||
max_value: fastfield_accessor.max_value(),
|
||||
};
|
||||
footer.serialize(write)?;
|
||||
Ok(())
|
||||
}
|
||||
fn is_applicable(
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
) -> bool {
|
||||
if stats.num_vals < 3 {
|
||||
return false; // disable compressor for this case
|
||||
}
|
||||
// On serialisation the offset is added to the actual value.
|
||||
// We need to make sure this won't run into overflow calculation issues.
|
||||
// For this we take the maximum theroretical offset and add this to the max value.
|
||||
// If this doesn't overflow the algortihm should be fine
|
||||
let theorethical_maximum_offset = stats.max_value - stats.min_value;
|
||||
if stats
|
||||
.max_value
|
||||
.checked_add(theorethical_maximum_offset)
|
||||
.is_none()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// estimation for linear interpolation is hard because, you don't know
|
||||
/// where the local maxima for the deviation of the calculated value are and
|
||||
/// the offset to shift all values to >=0 is also unknown.
|
||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
|
||||
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> {
|
||||
if fastfield_accessor.num_vals() < 3 {
|
||||
return None; // disable compressor for this case
|
||||
}
|
||||
|
||||
// On serialisation the offset is added to the actual value.
|
||||
// We need to make sure this won't run into overflow calculation issues.
|
||||
// For this we take the maximum theroretical offset and add this to the max value.
|
||||
// If this doesn't overflow the algorithm should be fine
|
||||
let theorethical_maximum_offset =
|
||||
fastfield_accessor.max_value() - fastfield_accessor.min_value();
|
||||
if fastfield_accessor
|
||||
.max_value()
|
||||
.checked_add(theorethical_maximum_offset)
|
||||
.is_none()
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
let first_val = fastfield_accessor.get_val(0);
|
||||
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
|
||||
let slope = get_slope(first_val, last_val, stats.num_vals);
|
||||
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
|
||||
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals());
|
||||
|
||||
// let's sample at 0%, 5%, 10% .. 95%, 100%
|
||||
let num_vals = stats.num_vals as f32 / 100.0;
|
||||
let num_vals = fastfield_accessor.num_vals() as f32 / 100.0;
|
||||
let sample_positions = (0..20)
|
||||
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
|
||||
.collect::<Vec<_>>();
|
||||
@@ -213,10 +239,11 @@ impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
|
||||
//
|
||||
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
|
||||
|
||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
|
||||
+ LinearInterpolFooter::SIZE_IN_BYTES as u64;
|
||||
let num_bits_uncompressed = 64 * stats.num_vals;
|
||||
num_bits as f32 / num_bits_uncompressed as f32
|
||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64
|
||||
* fastfield_accessor.num_vals()
|
||||
+ LinearFooter::SIZE_IN_BYTES as u64;
|
||||
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals();
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,28 +259,45 @@ fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_data_sets;
|
||||
use crate::tests::get_codec_test_datasets;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
|
||||
crate::tests::create_and_validate::<
|
||||
LinearInterpolFastFieldSerializer,
|
||||
LinearInterpolFastFieldReader,
|
||||
>(data, name)
|
||||
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> {
|
||||
crate::tests::create_and_validate::<LinearCodec>(data, name)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_calculated_value_test() {
|
||||
// pos slope
|
||||
assert_eq!(get_calculated_value(100, 10, 5.0), 150);
|
||||
|
||||
// neg slope
|
||||
assert_eq!(get_calculated_value(100, 10, -5.0), 50);
|
||||
|
||||
// pos slope, very high values
|
||||
assert_eq!(
|
||||
get_calculated_value(i64::MAX as u64, 10, 5.0),
|
||||
i64::MAX as u64 + 50
|
||||
);
|
||||
// neg slope, very high values
|
||||
assert_eq!(
|
||||
get_calculated_value(i64::MAX as u64, 10, -5.0),
|
||||
i64::MAX as u64 - 50
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression() {
|
||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate(&data, "simple monotonically large");
|
||||
create_and_validate(&data, "simple monotonically large").unwrap();
|
||||
|
||||
assert!(actual_compression < 0.01);
|
||||
assert!(estimate < 0.01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = get_codec_test_data_sets();
|
||||
fn test_with_codec_datasets() {
|
||||
let data_sets = get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
@@ -290,9 +334,10 @@ mod tests {
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_rand() {
|
||||
for _ in 0..5000 {
|
||||
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
|
||||
let mut data = (0..10_000)
|
||||
.map(|_| rand::random::<u64>())
|
||||
.collect::<Vec<_>>();
|
||||
create_and_validate(&data, "random");
|
||||
|
||||
data.reverse();
|
||||
create_and_validate(&data, "random");
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
#[macro_use]
|
||||
extern crate prettytable;
|
||||
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
|
||||
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
|
||||
use fastfield_codecs::{FastFieldCodecSerializer, FastFieldStats};
|
||||
use fastfield_codecs::bitpacked::BitpackedCodec;
|
||||
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec;
|
||||
use fastfield_codecs::linear::LinearCodec;
|
||||
use fastfield_codecs::{FastFieldCodec, FastFieldCodecType, FastFieldStats};
|
||||
use prettytable::{Cell, Row, Table};
|
||||
|
||||
fn main() {
|
||||
@@ -12,41 +13,32 @@ fn main() {
|
||||
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
|
||||
|
||||
for (data, data_set_name) in get_codec_test_data_sets() {
|
||||
let mut results = vec![];
|
||||
let res = serialize_with_codec::<LinearInterpolFastFieldSerializer>(&data);
|
||||
results.push(res);
|
||||
let res = serialize_with_codec::<MultiLinearInterpolFastFieldSerializer>(&data);
|
||||
results.push(res);
|
||||
let res = serialize_with_codec::<fastfield_codecs::bitpacked::BitpackedFastFieldSerializer>(
|
||||
&data,
|
||||
);
|
||||
results.push(res);
|
||||
|
||||
// let best_estimation_codec = results
|
||||
//.iter()
|
||||
//.min_by(|res1, res2| res1.partial_cmp(&res2).unwrap())
|
||||
//.unwrap();
|
||||
let results: Vec<(f32, f32, FastFieldCodecType)> = [
|
||||
serialize_with_codec::<LinearCodec>(&data),
|
||||
serialize_with_codec::<BlockwiseLinearCodec>(&data),
|
||||
serialize_with_codec::<BlockwiseLinearCodec>(&data),
|
||||
serialize_with_codec::<BitpackedCodec>(&data),
|
||||
]
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
let best_compression_ratio_codec = results
|
||||
.iter()
|
||||
.min_by(|res1, res2| res1.partial_cmp(res2).unwrap())
|
||||
.min_by(|&res1, &res2| res1.partial_cmp(res2).unwrap())
|
||||
.cloned()
|
||||
.unwrap();
|
||||
|
||||
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
|
||||
for (is_applicable, est, comp, name) in results {
|
||||
let (est_cell, ratio_cell) = if !is_applicable {
|
||||
("Codec Disabled".to_string(), "".to_string())
|
||||
} else {
|
||||
(est.to_string(), comp.to_string())
|
||||
};
|
||||
for (est, comp, codec_type) in results {
|
||||
let est_cell = est.to_string();
|
||||
let ratio_cell = comp.to_string();
|
||||
let style = if comp == best_compression_ratio_codec.1 {
|
||||
"Fb"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
table.add_row(Row::new(vec![
|
||||
Cell::new(name).style_spec("bFg"),
|
||||
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
|
||||
Cell::new(&ratio_cell).style_spec(style),
|
||||
Cell::new(&est_cell).style_spec(""),
|
||||
]));
|
||||
@@ -91,26 +83,14 @@ pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
data_and_names
|
||||
}
|
||||
|
||||
pub fn serialize_with_codec<S: FastFieldCodecSerializer>(
|
||||
pub fn serialize_with_codec<C: FastFieldCodec>(
|
||||
data: &[u64],
|
||||
) -> (bool, f32, f32, &'static str) {
|
||||
let is_applicable = S::is_applicable(&data, stats_from_vec(data));
|
||||
if !is_applicable {
|
||||
return (false, 0.0, 0.0, S::NAME);
|
||||
}
|
||||
let estimation = S::estimate(&data, stats_from_vec(data));
|
||||
let mut out = vec![];
|
||||
S::serialize(
|
||||
&mut out,
|
||||
&data,
|
||||
stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
) -> Option<(f32, f32, FastFieldCodecType)> {
|
||||
let estimation = C::estimate(&data)?;
|
||||
let mut out = Vec::new();
|
||||
C::serialize(&mut out, &data).unwrap();
|
||||
let actual_compression = out.len() as f32 / (data.len() * 8) as f32;
|
||||
(true, estimation, actual_compression, S::NAME)
|
||||
Some((estimation, actual_compression, C::CODEC_TYPE))
|
||||
}
|
||||
|
||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
name = "ownedbytes"
|
||||
version = "0.2.0"
|
||||
edition = "2018"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
description = "Expose data as static slice"
|
||||
license = "MIT"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -21,7 +21,7 @@ impl OwnedBytes {
|
||||
OwnedBytes::new(&[][..])
|
||||
}
|
||||
|
||||
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
|
||||
/// Creates an `OwnedBytes` instance given a `StableDeref` object.
|
||||
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
|
||||
data_holder: T,
|
||||
) -> OwnedBytes {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.15.0"
|
||||
version = "0.18.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -9,9 +9,9 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
combine = {version="4", default-features=false, features=[] }
|
||||
once_cell = "1.7.2"
|
||||
regex ={ version = "1.5.4", default-features = false, features = ["std"] }
|
||||
regex ={ version = "1.5.4", default-features = false, features = ["std", "unicode"] }
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
|
||||
mod occur;
|
||||
mod query_grammar;
|
||||
mod user_input_ast;
|
||||
|
||||
@@ -2,11 +2,11 @@ use std::fmt;
|
||||
use std::fmt::Write;
|
||||
|
||||
/// Defines whether a term in a query must be present,
|
||||
/// should be present or must be not present.
|
||||
/// should be present or must not be present.
|
||||
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
|
||||
pub enum Occur {
|
||||
/// For a given document to be considered for scoring,
|
||||
/// at least one of the document with the Should or the Must
|
||||
/// at least one of the terms with the Should or the Must
|
||||
/// Occur constraint must be within the document.
|
||||
Should,
|
||||
/// Document without the term are excluded from the search.
|
||||
|
||||
@@ -16,9 +16,9 @@ use crate::Occur;
|
||||
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to
|
||||
// special characters.
|
||||
const SPECIAL_CHARS: &[char] = &[
|
||||
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '~', '!', '\\', '*', ' ',
|
||||
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '!', '\\', '*', ' ',
|
||||
];
|
||||
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|\~|!|\\|\*| )"#;
|
||||
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|!|\\|\*|\s)"#;
|
||||
|
||||
/// Parses a field_name
|
||||
/// A field name must have at least one character and be followed by a colon.
|
||||
@@ -34,7 +34,8 @@ fn field_name<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
take_while(|c| !SPECIAL_CHARS.contains(&c)),
|
||||
),
|
||||
'\\',
|
||||
satisfy(|c| SPECIAL_CHARS.contains(&c)),
|
||||
satisfy(|_| true), /* if the next character is not a special char, the \ will be treated
|
||||
* as the \ character. */
|
||||
))
|
||||
.skip(char(':'))
|
||||
.map(|s| ESCAPED_SPECIAL_CHARS_RE.replace_all(&s, "$1").to_string())
|
||||
@@ -66,8 +67,8 @@ fn word<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
/// 2021-04-13T19:46:26.266051969+00:00
|
||||
///
|
||||
/// NOTE: also accepts 999999-99-99T99:99:99.266051969+99:99
|
||||
/// We delegate rejecting such invalid dates to the logical AST compuation code
|
||||
/// which invokes chrono::DateTime::parse_from_rfc3339 on the value to actually parse
|
||||
/// We delegate rejecting such invalid dates to the logical AST computation code
|
||||
/// which invokes time::OffsetDateTime::parse(..., &Rfc3339) on the value to actually parse
|
||||
/// it (instead of merely extracting the datetime value as string as done here).
|
||||
fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
let two_digits = || recognize::<String, _, _>((digit(), digit()));
|
||||
@@ -119,22 +120,36 @@ fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
|
||||
fn term_val<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
|
||||
phrase.or(word())
|
||||
negative_number().or(phrase.or(word()))
|
||||
}
|
||||
|
||||
fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> {
|
||||
let term_val_with_field = negative_number().or(term_val());
|
||||
(field_name(), term_val_with_field).map(|(field_name, phrase)| UserInputLiteral {
|
||||
(field_name(), term_val(), slop_val()).map(|(field_name, phrase, slop)| UserInputLiteral {
|
||||
field_name: Some(field_name),
|
||||
phrase,
|
||||
slop,
|
||||
})
|
||||
}
|
||||
|
||||
fn slop_val<'a>() -> impl Parser<&'a str, Output = u32> {
|
||||
let slop =
|
||||
(char('~'), many1(digit())).and_then(|(_, slop): (_, String)| match slop.parse::<u32>() {
|
||||
Ok(d) => Ok(d),
|
||||
_ => Err(StringStreamError::UnexpectedParse),
|
||||
});
|
||||
optional(slop).map(|slop| match slop {
|
||||
Some(d) => d,
|
||||
_ => 0,
|
||||
})
|
||||
}
|
||||
|
||||
fn literal<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||
let term_default_field = term_val().map(|phrase| UserInputLiteral {
|
||||
let term_default_field = (term_val(), slop_val()).map(|(phrase, slop)| UserInputLiteral {
|
||||
field_name: None,
|
||||
phrase,
|
||||
slop,
|
||||
});
|
||||
|
||||
attempt(term_query())
|
||||
.or(term_default_field)
|
||||
.map(UserInputLeaf::from)
|
||||
@@ -284,7 +299,7 @@ fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||
|
||||
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
|
||||
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
||||
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
|
||||
Some(boost) if (boost - 1.0).abs() > f64::EPSILON => {
|
||||
UserInputAst::Boost(Box::new(leaf), boost)
|
||||
}
|
||||
_ => leaf,
|
||||
@@ -516,14 +531,18 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_field_name() -> TestParseResult {
|
||||
fn test_field_name() {
|
||||
assert_eq!(
|
||||
super::field_name().parse(".my.field.name:a"),
|
||||
Ok((".my.field.name".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse("my\\ field\\ name:a"),
|
||||
Ok(("my field name".to_string(), "a"))
|
||||
super::field_name().parse(r#"にんじん:a"#),
|
||||
Ok(("にんじん".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse(r#"my\field:a"#),
|
||||
Ok((r#"my\field"#.to_string(), "a"))
|
||||
);
|
||||
assert!(super::field_name().parse("my field:a").is_err());
|
||||
assert_eq!(
|
||||
@@ -534,14 +553,32 @@ mod test {
|
||||
super::field_name().parse("my_field_name:a"),
|
||||
Ok(("my_field_name".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse("myfield.b:hello").unwrap(),
|
||||
("myfield.b".to_string(), "hello")
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse(r#"myfield\.b:hello"#).unwrap(),
|
||||
(r#"myfield\.b"#.to_string(), "hello")
|
||||
);
|
||||
assert!(super::field_name().parse("my_field_name").is_err());
|
||||
assert!(super::field_name().parse(":a").is_err());
|
||||
assert!(super::field_name().parse("-my_field:a").is_err());
|
||||
assert_eq!(
|
||||
super::field_name().parse("_my_field:a")?,
|
||||
("_my_field".to_string(), "a")
|
||||
super::field_name().parse("_my_field:a"),
|
||||
Ok(("_my_field".to_string(), "a"))
|
||||
);
|
||||
Ok(())
|
||||
assert_eq!(
|
||||
super::field_name().parse("~my~field:a"),
|
||||
Ok(("~my~field".to_string(), "a"))
|
||||
);
|
||||
for special_char in SPECIAL_CHARS.iter() {
|
||||
let query = &format!("\\{special_char}my\\{special_char}field:a");
|
||||
assert_eq!(
|
||||
super::field_name().parse(query),
|
||||
Ok((format!("{special_char}my{special_char}field"), "a"))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -694,4 +731,22 @@ mod test {
|
||||
);
|
||||
test_is_parse_err("abc + ");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slop() {
|
||||
assert!(parse_to_ast().parse("\"a b\"~").is_err());
|
||||
assert!(parse_to_ast().parse("foo:\"a b\"~").is_err());
|
||||
assert!(parse_to_ast().parse("\"a b\"~a").is_err());
|
||||
assert!(parse_to_ast().parse("\"a b\"~100000000000000000").is_err());
|
||||
|
||||
test_parse_query_to_ast_helper("\"a b\"^2~4", "(*(\"a b\")^2 *\"~4\")");
|
||||
test_parse_query_to_ast_helper("\"~Document\"", "\"~Document\"");
|
||||
test_parse_query_to_ast_helper("~Document", "\"~Document\"");
|
||||
test_parse_query_to_ast_helper("a~2", "\"a~2\"");
|
||||
test_parse_query_to_ast_helper("\"a b\"~0", "\"a b\"");
|
||||
test_parse_query_to_ast_helper("\"a b\"~1", "\"a b\"~1");
|
||||
test_parse_query_to_ast_helper("\"a b\"~3", "\"a b\"~3");
|
||||
test_parse_query_to_ast_helper("foo:\"a b\"~300", "\"foo\":\"a b\"~300");
|
||||
test_parse_query_to_ast_helper("\"a b\"~300^2", "(\"a b\"~300)^2");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,14 +40,19 @@ impl Debug for UserInputLeaf {
|
||||
pub struct UserInputLiteral {
|
||||
pub field_name: Option<String>,
|
||||
pub phrase: String,
|
||||
pub slop: u32,
|
||||
}
|
||||
|
||||
impl fmt::Debug for UserInputLiteral {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match self.field_name {
|
||||
Some(ref field_name) => write!(formatter, "\"{}\":\"{}\"", field_name, self.phrase),
|
||||
None => write!(formatter, "\"{}\"", self.phrase),
|
||||
if let Some(ref field) = self.field_name {
|
||||
write!(formatter, "\"{}\":", field)?;
|
||||
}
|
||||
write!(formatter, "\"{}\"", self.phrase)?;
|
||||
if self.slop > 0 {
|
||||
write!(formatter, "~{}", self.slop)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +64,7 @@ pub enum UserInputBound {
|
||||
}
|
||||
|
||||
impl UserInputBound {
|
||||
fn display_lower(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
fn display_lower(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
match *self {
|
||||
UserInputBound::Inclusive(ref word) => write!(formatter, "[\"{}\"", word),
|
||||
UserInputBound::Exclusive(ref word) => write!(formatter, "{{\"{}\"", word),
|
||||
@@ -67,7 +72,7 @@ impl UserInputBound {
|
||||
}
|
||||
}
|
||||
|
||||
fn display_upper(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
fn display_upper(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
match *self {
|
||||
UserInputBound::Inclusive(ref word) => write!(formatter, "\"{}\"]", word),
|
||||
UserInputBound::Exclusive(ref word) => write!(formatter, "\"{}\"}}", word),
|
||||
|
||||
@@ -11,10 +11,10 @@ Tantivy's aggregations have been designed to mimic the
|
||||
|
||||
The code is organized in submodules:
|
||||
|
||||
##bucket
|
||||
Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggegations.
|
||||
## bucket
|
||||
Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggregations.
|
||||
|
||||
##metric
|
||||
## metric
|
||||
Contains all metric aggregations, like average aggregation. Metric aggregations do not have sub aggregations.
|
||||
|
||||
#### agg_req
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
//!
|
||||
//! [Aggregations] is the top level entry point to create a request, which is a `HashMap<String,
|
||||
//! Aggregation>`.
|
||||
//!
|
||||
//! Requests are compatible with the json format of elasticsearch.
|
||||
//!
|
||||
//! # Example
|
||||
@@ -19,6 +20,7 @@
|
||||
//! bucket_agg: BucketAggregationType::Range(RangeAggregation{
|
||||
//! field: "score".to_string(),
|
||||
//! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
//! keyed: false,
|
||||
//! }),
|
||||
//! sub_aggregation: Default::default(),
|
||||
//! }),
|
||||
@@ -43,19 +45,100 @@
|
||||
//! assert_eq!(agg_req1, agg_req2);
|
||||
//! ```
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub use super::bucket::RangeAggregation;
|
||||
use super::bucket::{HistogramAggregation, TermsAggregation};
|
||||
use super::metric::{AverageAggregation, StatsAggregation};
|
||||
use super::VecWithNames;
|
||||
|
||||
/// The top-level aggregation request structure, which contains [Aggregation] and their user defined
|
||||
/// names.
|
||||
/// names. It is also used in [buckets](BucketAggregation) to define sub-aggregations.
|
||||
///
|
||||
/// The key is the user defined name of the aggregation.
|
||||
pub type Aggregations = HashMap<String, Aggregation>;
|
||||
|
||||
/// Like Aggregations, but optimized to work with the aggregation result
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct AggregationsInternal {
|
||||
pub(crate) metrics: VecWithNames<MetricAggregation>,
|
||||
pub(crate) buckets: VecWithNames<BucketAggregationInternal>,
|
||||
}
|
||||
|
||||
impl From<Aggregations> for AggregationsInternal {
|
||||
fn from(aggs: Aggregations) -> Self {
|
||||
let mut metrics = vec![];
|
||||
let mut buckets = vec![];
|
||||
for (key, agg) in aggs {
|
||||
match agg {
|
||||
Aggregation::Bucket(bucket) => buckets.push((
|
||||
key,
|
||||
BucketAggregationInternal {
|
||||
bucket_agg: bucket.bucket_agg,
|
||||
sub_aggregation: bucket.sub_aggregation.into(),
|
||||
},
|
||||
)),
|
||||
Aggregation::Metric(metric) => metrics.push((key, metric)),
|
||||
}
|
||||
}
|
||||
Self {
|
||||
metrics: VecWithNames::from_entries(metrics),
|
||||
buckets: VecWithNames::from_entries(buckets),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
// Like BucketAggregation, but optimized to work with the result
|
||||
pub(crate) struct BucketAggregationInternal {
|
||||
/// Bucket aggregation strategy to group documents.
|
||||
pub bucket_agg: BucketAggregationType,
|
||||
/// The sub_aggregations in the buckets. Each bucket will aggregate on the document set in the
|
||||
/// bucket.
|
||||
pub sub_aggregation: AggregationsInternal,
|
||||
}
|
||||
|
||||
impl BucketAggregationInternal {
|
||||
pub(crate) fn as_range(&self) -> Option<&RangeAggregation> {
|
||||
match &self.bucket_agg {
|
||||
BucketAggregationType::Range(range) => Some(range),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub(crate) fn as_histogram(&self) -> Option<&HistogramAggregation> {
|
||||
match &self.bucket_agg {
|
||||
BucketAggregationType::Histogram(histogram) => Some(histogram),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub(crate) fn as_term(&self) -> Option<&TermsAggregation> {
|
||||
match &self.bucket_agg {
|
||||
BucketAggregationType::Terms(terms) => Some(terms),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract all fields, where the term directory is used in the tree.
|
||||
pub fn get_term_dict_field_names(aggs: &Aggregations) -> HashSet<String> {
|
||||
let mut term_dict_field_names = Default::default();
|
||||
for el in aggs.values() {
|
||||
el.get_term_dict_field_names(&mut term_dict_field_names)
|
||||
}
|
||||
term_dict_field_names
|
||||
}
|
||||
|
||||
/// Extract all fast field names used in the tree.
|
||||
pub fn get_fast_field_names(aggs: &Aggregations) -> HashSet<String> {
|
||||
let mut fast_field_names = Default::default();
|
||||
for el in aggs.values() {
|
||||
el.get_fast_field_names(&mut fast_field_names)
|
||||
}
|
||||
fast_field_names
|
||||
}
|
||||
|
||||
/// Aggregation request of [BucketAggregation] or [MetricAggregation].
|
||||
///
|
||||
/// An aggregation is either a bucket or a metric.
|
||||
@@ -68,6 +151,21 @@ pub enum Aggregation {
|
||||
Metric(MetricAggregation),
|
||||
}
|
||||
|
||||
impl Aggregation {
|
||||
fn get_term_dict_field_names(&self, term_field_names: &mut HashSet<String>) {
|
||||
if let Aggregation::Bucket(bucket) = self {
|
||||
bucket.get_term_dict_field_names(term_field_names)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||
match self {
|
||||
Aggregation::Bucket(bucket) => bucket.get_fast_field_names(fast_field_names),
|
||||
Aggregation::Metric(metric) => metric.get_fast_field_names(fast_field_names),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// BucketAggregations create buckets of documents. Each bucket is associated with a rule which
|
||||
/// determines whether or not a document in the falls into it. In other words, the buckets
|
||||
/// effectively define document sets. Buckets are not necessarily disjunct, therefore a document can
|
||||
@@ -91,12 +189,43 @@ pub struct BucketAggregation {
|
||||
pub sub_aggregation: Aggregations,
|
||||
}
|
||||
|
||||
impl BucketAggregation {
|
||||
fn get_term_dict_field_names(&self, term_dict_field_names: &mut HashSet<String>) {
|
||||
if let BucketAggregationType::Terms(terms) = &self.bucket_agg {
|
||||
term_dict_field_names.insert(terms.field.to_string());
|
||||
}
|
||||
term_dict_field_names.extend(get_term_dict_field_names(&self.sub_aggregation));
|
||||
}
|
||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||
self.bucket_agg.get_fast_field_names(fast_field_names);
|
||||
fast_field_names.extend(get_fast_field_names(&self.sub_aggregation));
|
||||
}
|
||||
}
|
||||
|
||||
/// The bucket aggregation types.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum BucketAggregationType {
|
||||
/// Put data into buckets of user-defined ranges.
|
||||
#[serde(rename = "range")]
|
||||
Range(RangeAggregation),
|
||||
/// Put data into buckets of user-defined ranges.
|
||||
#[serde(rename = "histogram")]
|
||||
Histogram(HistogramAggregation),
|
||||
/// Put data into buckets of terms.
|
||||
#[serde(rename = "terms")]
|
||||
Terms(TermsAggregation),
|
||||
}
|
||||
|
||||
impl BucketAggregationType {
|
||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||
match self {
|
||||
BucketAggregationType::Terms(terms) => fast_field_names.insert(terms.field.to_string()),
|
||||
BucketAggregationType::Range(range) => fast_field_names.insert(range.field.to_string()),
|
||||
BucketAggregationType::Histogram(histogram) => {
|
||||
fast_field_names.insert(histogram.field.to_string())
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// The aggregations in this family compute metrics based on values extracted
|
||||
@@ -116,6 +245,15 @@ pub enum MetricAggregation {
|
||||
Stats(StatsAggregation),
|
||||
}
|
||||
|
||||
impl MetricAggregation {
|
||||
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||
match self {
|
||||
MetricAggregation::Average(avg) => fast_field_names.insert(avg.field.to_string()),
|
||||
MetricAggregation::Stats(stats) => fast_field_names.insert(stats.field.to_string()),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -133,6 +271,7 @@ mod tests {
|
||||
(7f64..20f64).into(),
|
||||
(20f64..f64::MAX).into(),
|
||||
],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -159,11 +298,72 @@ mod tests {
|
||||
{
|
||||
"from": 20.0
|
||||
}
|
||||
]
|
||||
],
|
||||
"keyed": true
|
||||
}
|
||||
}
|
||||
}"#;
|
||||
let agg_req2: String = serde_json::to_string_pretty(&agg_req1).unwrap();
|
||||
assert_eq!(agg_req2, elasticsearch_compatible_json_req);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_fast_field_names() {
|
||||
let agg_req2: Aggregations = vec![
|
||||
(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score2".to_string(),
|
||||
ranges: vec![
|
||||
(f64::MIN..3f64).into(),
|
||||
(3f64..7f64).into(),
|
||||
(7f64..20f64).into(),
|
||||
(20f64..f64::MAX).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
),
|
||||
(
|
||||
"metric".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Average(
|
||||
AverageAggregation::from_field_name("field123".to_string()),
|
||||
)),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let agg_req1: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score".to_string(),
|
||||
ranges: vec![
|
||||
(f64::MIN..3f64).into(),
|
||||
(3f64..7f64).into(),
|
||||
(7f64..20f64).into(),
|
||||
(20f64..f64::MAX).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: agg_req2,
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
get_fast_field_names(&agg_req1),
|
||||
vec![
|
||||
"score".to_string(),
|
||||
"score2".to_string(),
|
||||
"field123".to_string()
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,19 @@
|
||||
//! This will enhance the request tree with access to the fastfield and metadata.
|
||||
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
||||
use super::bucket::RangeAggregation;
|
||||
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
||||
use super::metric::{AverageAggregation, StatsAggregation};
|
||||
use super::segment_agg_result::BucketCount;
|
||||
use super::VecWithNames;
|
||||
use crate::fastfield::DynamicFastFieldReader;
|
||||
use crate::schema::Type;
|
||||
use crate::{SegmentReader, TantivyError};
|
||||
use crate::fastfield::{
|
||||
type_and_cardinality, DynamicFastFieldReader, FastType, MultiValuedFastFieldReader,
|
||||
};
|
||||
use crate::schema::{Cardinality, Type};
|
||||
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct AggregationsWithAccessor {
|
||||
@@ -27,34 +34,81 @@ impl AggregationsWithAccessor {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) enum FastFieldAccessor {
|
||||
Multi(MultiValuedFastFieldReader<u64>),
|
||||
Single(DynamicFastFieldReader<u64>),
|
||||
}
|
||||
impl FastFieldAccessor {
|
||||
pub fn as_single(&self) -> Option<&DynamicFastFieldReader<u64>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(_) => None,
|
||||
FastFieldAccessor::Single(reader) => Some(reader),
|
||||
}
|
||||
}
|
||||
pub fn as_multi(&self) -> Option<&MultiValuedFastFieldReader<u64>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(reader) => Some(reader),
|
||||
FastFieldAccessor::Single(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BucketAggregationWithAccessor {
|
||||
/// In general there can be buckets without fast field access, e.g. buckets that are created
|
||||
/// based on search terms. So eventually this needs to be Option or moved.
|
||||
pub(crate) accessor: DynamicFastFieldReader<u64>,
|
||||
pub(crate) accessor: FastFieldAccessor,
|
||||
pub(crate) inverted_index: Option<Arc<InvertedIndexReader>>,
|
||||
pub(crate) field_type: Type,
|
||||
pub(crate) bucket_agg: BucketAggregationType,
|
||||
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
||||
pub(crate) bucket_count: BucketCount,
|
||||
}
|
||||
|
||||
impl BucketAggregationWithAccessor {
|
||||
fn from_bucket(
|
||||
fn try_from_bucket(
|
||||
bucket: &BucketAggregationType,
|
||||
sub_aggregation: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
bucket_count: Rc<AtomicU32>,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<BucketAggregationWithAccessor> {
|
||||
let mut inverted_index = None;
|
||||
let (accessor, field_type) = match &bucket {
|
||||
BucketAggregationType::Range(RangeAggregation {
|
||||
field: field_name,
|
||||
ranges: _,
|
||||
}) => get_ff_reader_and_validate(reader, field_name)?,
|
||||
field: field_name, ..
|
||||
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
|
||||
BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: field_name, ..
|
||||
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
|
||||
BucketAggregationType::Terms(TermsAggregation {
|
||||
field: field_name, ..
|
||||
}) => {
|
||||
let field = reader
|
||||
.schema()
|
||||
.get_field(field_name)
|
||||
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
|
||||
inverted_index = Some(reader.inverted_index(field)?);
|
||||
get_ff_reader_and_validate(reader, field_name, Cardinality::MultiValues)?
|
||||
}
|
||||
};
|
||||
let sub_aggregation = sub_aggregation.clone();
|
||||
Ok(BucketAggregationWithAccessor {
|
||||
accessor,
|
||||
field_type,
|
||||
sub_aggregation: get_aggregations_with_accessor(&sub_aggregation, reader)?,
|
||||
sub_aggregation: get_aggs_with_accessor_and_validate(
|
||||
&sub_aggregation,
|
||||
reader,
|
||||
bucket_count.clone(),
|
||||
max_bucket_count,
|
||||
)?,
|
||||
bucket_agg: bucket.clone(),
|
||||
inverted_index,
|
||||
bucket_count: BucketCount {
|
||||
bucket_count,
|
||||
max_bucket_count,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -68,17 +122,21 @@ pub struct MetricAggregationWithAccessor {
|
||||
}
|
||||
|
||||
impl MetricAggregationWithAccessor {
|
||||
fn from_metric(
|
||||
fn try_from_metric(
|
||||
metric: &MetricAggregation,
|
||||
reader: &SegmentReader,
|
||||
) -> crate::Result<MetricAggregationWithAccessor> {
|
||||
match &metric {
|
||||
MetricAggregation::Average(AverageAggregation { field: field_name })
|
||||
| MetricAggregation::Stats(StatsAggregation { field: field_name }) => {
|
||||
let (accessor, field_type) = get_ff_reader_and_validate(reader, field_name)?;
|
||||
let (accessor, field_type) =
|
||||
get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?;
|
||||
|
||||
Ok(MetricAggregationWithAccessor {
|
||||
accessor,
|
||||
accessor: accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinality")
|
||||
.clone(),
|
||||
field_type,
|
||||
metric: metric.clone(),
|
||||
})
|
||||
@@ -87,9 +145,11 @@ impl MetricAggregationWithAccessor {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_aggregations_with_accessor(
|
||||
pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||
aggs: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
bucket_count: Rc<AtomicU32>,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<AggregationsWithAccessor> {
|
||||
let mut metrics = vec![];
|
||||
let mut buckets = vec![];
|
||||
@@ -97,15 +157,17 @@ pub(crate) fn get_aggregations_with_accessor(
|
||||
match agg {
|
||||
Aggregation::Bucket(bucket) => buckets.push((
|
||||
key.to_string(),
|
||||
BucketAggregationWithAccessor::from_bucket(
|
||||
BucketAggregationWithAccessor::try_from_bucket(
|
||||
&bucket.bucket_agg,
|
||||
&bucket.sub_aggregation,
|
||||
reader,
|
||||
Rc::clone(&bucket_count),
|
||||
max_bucket_count,
|
||||
)?,
|
||||
)),
|
||||
Aggregation::Metric(metric) => metrics.push((
|
||||
key.to_string(),
|
||||
MetricAggregationWithAccessor::from_metric(metric, reader)?,
|
||||
MetricAggregationWithAccessor::try_from_metric(metric, reader)?,
|
||||
)),
|
||||
}
|
||||
}
|
||||
@@ -115,26 +177,45 @@ pub(crate) fn get_aggregations_with_accessor(
|
||||
))
|
||||
}
|
||||
|
||||
/// Get fast field reader with given cardinatility.
|
||||
fn get_ff_reader_and_validate(
|
||||
reader: &SegmentReader,
|
||||
field_name: &str,
|
||||
) -> crate::Result<(DynamicFastFieldReader<u64>, Type)> {
|
||||
cardinality: Cardinality,
|
||||
) -> crate::Result<(FastFieldAccessor, Type)> {
|
||||
let field = reader
|
||||
.schema()
|
||||
.get_field(field_name)
|
||||
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
|
||||
let field_type = reader.schema().get_field_entry(field).field_type();
|
||||
if field_type.value_type() != Type::I64
|
||||
&& field_type.value_type() != Type::U64
|
||||
&& field_type.value_type() != Type::F64
|
||||
{
|
||||
|
||||
if let Some((ff_type, field_cardinality)) = type_and_cardinality(field_type) {
|
||||
if ff_type == FastType::Date {
|
||||
return Err(TantivyError::InvalidArgument(
|
||||
"Unsupported field type date in aggregation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if cardinality != field_cardinality {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Invalid field cardinality on field {} expected {:?}, but got {:?}",
|
||||
field_name, cardinality, field_cardinality
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Invalid field type in aggregation {:?}, only f64, u64, i64 is supported",
|
||||
"Only fast fields of type f64, u64, i64 are supported, but got {:?} ",
|
||||
field_type.value_type()
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let ff_fields = reader.fast_fields();
|
||||
ff_fields
|
||||
.u64_lenient(field)
|
||||
.map(|field| (field, field_type.value_type()))
|
||||
match cardinality {
|
||||
Cardinality::SingleValue => ff_fields
|
||||
.u64_lenient(field)
|
||||
.map(|field| (FastFieldAccessor::Single(field), field_type.value_type())),
|
||||
Cardinality::MultiValues => ff_fields
|
||||
.u64s_lenient(field)
|
||||
.map(|field| (FastFieldAccessor::Multi(field), field_type.value_type())),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,31 +4,37 @@
|
||||
//! intermediate average results, which is the sum and the number of values. The actual average is
|
||||
//! calculated on the step from intermediate to final aggregation result tree.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use itertools::Itertools;
|
||||
use fnv::FnvHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::intermediate_agg_result::{
|
||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateBucketResult,
|
||||
IntermediateMetricResult, IntermediateRangeBucketEntry,
|
||||
};
|
||||
use super::agg_req::BucketAggregationInternal;
|
||||
use super::bucket::GetDocCount;
|
||||
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
|
||||
use super::metric::{SingleMetricResult, Stats};
|
||||
use super::Key;
|
||||
use crate::TantivyError;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
/// The final aggegation result.
|
||||
pub struct AggregationResults(pub HashMap<String, AggregationResult>);
|
||||
|
||||
impl From<IntermediateAggregationResults> for AggregationResults {
|
||||
fn from(tree: IntermediateAggregationResults) -> Self {
|
||||
Self(
|
||||
tree.0
|
||||
.into_iter()
|
||||
.map(|(key, agg)| (key, agg.into()))
|
||||
.collect(),
|
||||
)
|
||||
impl AggregationResults {
|
||||
pub(crate) fn get_value_from_aggregation(
|
||||
&self,
|
||||
name: &str,
|
||||
agg_property: &str,
|
||||
) -> crate::Result<Option<f64>> {
|
||||
if let Some(agg) = self.0.get(name) {
|
||||
agg.get_value_from_aggregation(name, agg_property)
|
||||
} else {
|
||||
// Validation is be done during request parsing, so we can't reach this state.
|
||||
Err(TantivyError::InternalError(format!(
|
||||
"Can't find aggregation {:?} in sub_aggregations",
|
||||
name
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,15 +47,20 @@ pub enum AggregationResult {
|
||||
/// Metric result variant.
|
||||
MetricResult(MetricResult),
|
||||
}
|
||||
impl From<IntermediateAggregationResult> for AggregationResult {
|
||||
fn from(tree: IntermediateAggregationResult) -> Self {
|
||||
match tree {
|
||||
IntermediateAggregationResult::Bucket(bucket) => {
|
||||
AggregationResult::BucketResult(bucket.into())
|
||||
}
|
||||
IntermediateAggregationResult::Metric(metric) => {
|
||||
AggregationResult::MetricResult(metric.into())
|
||||
}
|
||||
|
||||
impl AggregationResult {
|
||||
pub(crate) fn get_value_from_aggregation(
|
||||
&self,
|
||||
_name: &str,
|
||||
agg_property: &str,
|
||||
) -> crate::Result<Option<f64>> {
|
||||
match self {
|
||||
AggregationResult::BucketResult(_bucket) => Err(TantivyError::InternalError(
|
||||
"Tried to retrieve value from bucket aggregation. This is not supported and \
|
||||
should not happen during collection phase, but should be caught during validation"
|
||||
.to_string(),
|
||||
)),
|
||||
AggregationResult::MetricResult(metric) => metric.get_value(agg_property),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -64,6 +75,14 @@ pub enum MetricResult {
|
||||
Stats(Stats),
|
||||
}
|
||||
|
||||
impl MetricResult {
|
||||
fn get_value(&self, agg_property: &str) -> crate::Result<Option<f64>> {
|
||||
match self {
|
||||
MetricResult::Average(avg) => Ok(avg.value),
|
||||
MetricResult::Stats(stats) => stats.get_value(agg_property),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<IntermediateMetricResult> for MetricResult {
|
||||
fn from(metric: IntermediateMetricResult) -> Self {
|
||||
match metric {
|
||||
@@ -81,37 +100,131 @@ impl From<IntermediateMetricResult> for MetricResult {
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum BucketResult {
|
||||
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
||||
/// This is the range entry for a bucket, which contains a key, count, from, to, and optionally
|
||||
/// sub_aggregations.
|
||||
Range {
|
||||
/// The range buckets sorted by range.
|
||||
buckets: Vec<RangeBucketEntry>,
|
||||
buckets: BucketEntries<RangeBucketEntry>,
|
||||
},
|
||||
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||
/// sub_aggregations.
|
||||
Histogram {
|
||||
/// The buckets.
|
||||
///
|
||||
/// If there are holes depends on the request, if min_doc_count is 0, then there are no
|
||||
/// holes between the first and last bucket.
|
||||
/// See [HistogramAggregation](super::bucket::HistogramAggregation)
|
||||
buckets: BucketEntries<BucketEntry>,
|
||||
},
|
||||
/// This is the term result
|
||||
Terms {
|
||||
/// The buckets.
|
||||
///
|
||||
/// See [TermsAggregation](super::bucket::TermsAggregation)
|
||||
buckets: Vec<BucketEntry>,
|
||||
/// The number of documents that didn’t make it into to TOP N due to shard_size or size
|
||||
sum_other_doc_count: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The upper bound error for the doc count of each term.
|
||||
doc_count_error_upper_bound: Option<u64>,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<IntermediateBucketResult> for BucketResult {
|
||||
fn from(result: IntermediateBucketResult) -> Self {
|
||||
match result {
|
||||
IntermediateBucketResult::Range(range_map) => {
|
||||
let mut buckets: Vec<RangeBucketEntry> = range_map
|
||||
.into_iter()
|
||||
.map(|(_, bucket)| bucket.into())
|
||||
.collect_vec();
|
||||
impl BucketResult {
|
||||
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
|
||||
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
||||
empty_bucket.into_final_bucket_result(req)
|
||||
}
|
||||
}
|
||||
|
||||
buckets.sort_by(|a, b| {
|
||||
a.from
|
||||
.unwrap_or(f64::MIN)
|
||||
.partial_cmp(&b.from.unwrap_or(f64::MIN))
|
||||
.unwrap_or(Ordering::Equal)
|
||||
});
|
||||
BucketResult::Range { buckets }
|
||||
}
|
||||
}
|
||||
/// This is the wrapper of buckets entries, which can be vector or hashmap
|
||||
/// depending on if it's keyed or not.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum BucketEntries<T> {
|
||||
/// Vector format bucket entries
|
||||
Vec(Vec<T>),
|
||||
/// HashMap format bucket entries
|
||||
HashMap(FnvHashMap<String, T>),
|
||||
}
|
||||
|
||||
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
||||
/// sub_aggregations.
|
||||
///
|
||||
/// # JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
/// ...
|
||||
/// "my_histogram": {
|
||||
/// "buckets": [
|
||||
/// {
|
||||
/// "key": "2.0",
|
||||
/// "doc_count": 5
|
||||
/// },
|
||||
/// {
|
||||
/// "key": "4.0",
|
||||
/// "doc_count": 2
|
||||
/// },
|
||||
/// {
|
||||
/// "key": "6.0",
|
||||
/// "doc_count": 3
|
||||
/// }
|
||||
/// ]
|
||||
/// }
|
||||
/// ...
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BucketEntry {
|
||||
/// The identifier of the bucket.
|
||||
pub key: Key,
|
||||
/// Number of documents in the bucket.
|
||||
pub doc_count: u64,
|
||||
#[serde(flatten)]
|
||||
/// Sub-aggregations in this bucket.
|
||||
pub sub_aggregation: AggregationResults,
|
||||
}
|
||||
impl GetDocCount for &BucketEntry {
|
||||
fn doc_count(&self) -> u64 {
|
||||
self.doc_count
|
||||
}
|
||||
}
|
||||
impl GetDocCount for BucketEntry {
|
||||
fn doc_count(&self) -> u64 {
|
||||
self.doc_count
|
||||
}
|
||||
}
|
||||
|
||||
/// This is the range entry for a bucket, which contains a key, count, and optionally
|
||||
/// sub_aggregations.
|
||||
///
|
||||
/// # JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
/// ...
|
||||
/// "my_ranges": {
|
||||
/// "buckets": [
|
||||
/// {
|
||||
/// "key": "*-10",
|
||||
/// "to": 10,
|
||||
/// "doc_count": 5
|
||||
/// },
|
||||
/// {
|
||||
/// "key": "10-20",
|
||||
/// "from": 10,
|
||||
/// "to": 20,
|
||||
/// "doc_count": 2
|
||||
/// },
|
||||
/// {
|
||||
/// "key": "20-*",
|
||||
/// "from": 20,
|
||||
/// "doc_count": 3
|
||||
/// }
|
||||
/// ]
|
||||
/// }
|
||||
/// ...
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RangeBucketEntry {
|
||||
/// The identifier of the bucket.
|
||||
@@ -128,15 +241,3 @@ pub struct RangeBucketEntry {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
impl From<IntermediateRangeBucketEntry> for RangeBucketEntry {
|
||||
fn from(entry: IntermediateRangeBucketEntry) -> Self {
|
||||
RangeBucketEntry {
|
||||
key: entry.key,
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: entry.sub_aggregation.into(),
|
||||
to: entry.to,
|
||||
from: entry.from,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1442
src/aggregation/bucket/histogram/histogram.rs
Normal file
2
src/aggregation/bucket/histogram/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
mod histogram;
|
||||
pub use histogram::*;
|
||||
@@ -1,10 +1,140 @@
|
||||
//! Module for all bucket aggregations.
|
||||
//!
|
||||
//! Results of final buckets are [BucketEntry](super::agg_result::BucketEntry).
|
||||
//! BucketAggregations create buckets of documents
|
||||
//! [BucketAggregation](super::agg_req::BucketAggregation).
|
||||
//!
|
||||
//! Results of final buckets are [BucketResult](super::agg_result::BucketResult).
|
||||
//! Results of intermediate buckets are
|
||||
//! [IntermediateBucketEntry](super::intermediate_agg_result::IntermediateBucketEntry)
|
||||
//! [IntermediateBucketResult](super::intermediate_agg_result::IntermediateBucketResult)
|
||||
|
||||
mod histogram;
|
||||
mod range;
|
||||
mod term_agg;
|
||||
|
||||
pub use range::RangeAggregation;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub(crate) use histogram::SegmentHistogramCollector;
|
||||
pub use histogram::*;
|
||||
pub(crate) use range::SegmentRangeCollector;
|
||||
pub use range::*;
|
||||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||
pub use term_agg::*;
|
||||
|
||||
/// Order for buckets in a bucket aggregation.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum Order {
|
||||
/// Asc order
|
||||
#[serde(rename = "asc")]
|
||||
Asc,
|
||||
/// Desc order
|
||||
#[serde(rename = "desc")]
|
||||
Desc,
|
||||
}
|
||||
|
||||
impl Default for Order {
|
||||
fn default() -> Self {
|
||||
Order::Desc
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
/// Order property by which to apply the order
|
||||
pub enum OrderTarget {
|
||||
/// The key of the bucket
|
||||
Key,
|
||||
/// The doc count of the bucket
|
||||
Count,
|
||||
/// Order by value of the sub aggregation metric with identified by given `String`.
|
||||
///
|
||||
/// Only single value metrics are supported currently
|
||||
SubAggregation(String),
|
||||
}
|
||||
|
||||
impl Default for OrderTarget {
|
||||
fn default() -> Self {
|
||||
OrderTarget::Count
|
||||
}
|
||||
}
|
||||
impl From<&str> for OrderTarget {
|
||||
fn from(val: &str) -> Self {
|
||||
match val {
|
||||
"_key" => OrderTarget::Key,
|
||||
"_count" => OrderTarget::Count,
|
||||
_ => OrderTarget::SubAggregation(val.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for OrderTarget {
|
||||
fn to_string(&self) -> String {
|
||||
match self {
|
||||
OrderTarget::Key => "_key".to_string(),
|
||||
OrderTarget::Count => "_count".to_string(),
|
||||
OrderTarget::SubAggregation(agg) => agg.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the order. target is either "_count", "_key", or the name of
|
||||
/// a metric sub_aggregation.
|
||||
///
|
||||
/// De/Serializes to elasticsearch compatible JSON.
|
||||
///
|
||||
/// Examples in JSON format:
|
||||
/// { "_count": "asc" }
|
||||
/// { "_key": "asc" }
|
||||
/// { "average_price": "asc" }
|
||||
#[derive(Clone, Default, Debug, PartialEq)]
|
||||
pub struct CustomOrder {
|
||||
/// The target property by which to sort by
|
||||
pub target: OrderTarget,
|
||||
/// The order asc or desc
|
||||
pub order: Order,
|
||||
}
|
||||
|
||||
impl Serialize for CustomOrder {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where S: Serializer {
|
||||
let map: HashMap<String, Order> =
|
||||
std::iter::once((self.target.to_string(), self.order)).collect();
|
||||
map.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for CustomOrder {
|
||||
fn deserialize<D>(deserializer: D) -> Result<CustomOrder, D::Error>
|
||||
where D: Deserializer<'de> {
|
||||
HashMap::<String, Order>::deserialize(deserializer).and_then(|map| {
|
||||
if let Some((key, value)) = map.into_iter().next() {
|
||||
Ok(CustomOrder {
|
||||
target: key.as_str().into(),
|
||||
order: value,
|
||||
})
|
||||
} else {
|
||||
Err(de::Error::custom(
|
||||
"unexpected empty map in order".to_string(),
|
||||
))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn custom_order_serde_test() {
|
||||
let order = CustomOrder {
|
||||
target: OrderTarget::Key,
|
||||
order: Order::Desc,
|
||||
};
|
||||
|
||||
let order_str = serde_json::to_string(&order).unwrap();
|
||||
assert_eq!(order_str, "{\"_key\":\"desc\"}");
|
||||
let order_deser = serde_json::from_str(&order_str).unwrap();
|
||||
|
||||
assert_eq!(order, order_deser);
|
||||
|
||||
let order_deser: serde_json::Result<CustomOrder> = serde_json::from_str("{}");
|
||||
assert!(order_deser.is_err());
|
||||
|
||||
let order_deser: serde_json::Result<CustomOrder> = serde_json::from_str("[]");
|
||||
assert!(order_deser.is_err());
|
||||
}
|
||||
|
||||
@@ -1,48 +1,78 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use itertools::Itertools;
|
||||
use fastfield_codecs::Column;
|
||||
use fnv::FnvHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::agg_req_with_accessor::{
|
||||
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
||||
};
|
||||
use crate::aggregation::intermediate_agg_result::IntermediateBucketResult;
|
||||
use crate::aggregation::segment_agg_result::{
|
||||
SegmentAggregationResultsCollector, SegmentRangeBucketEntry,
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
||||
};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey};
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// Provide user-defined buckets to aggregate on.
|
||||
/// Two special buckets will automatically be created to cover the whole range of values.
|
||||
/// The provided buckets have to be continous.
|
||||
/// During the aggregation, the values extracted from the fast_field `field_name` will be checked
|
||||
/// The provided buckets have to be continuous.
|
||||
/// During the aggregation, the values extracted from the fast_field `field` will be checked
|
||||
/// against each bucket range. Note that this aggregation includes the from value and excludes the
|
||||
/// to value for each range.
|
||||
///
|
||||
/// Result type is [BucketResult](crate::aggregation::agg_result::BucketResult) with
|
||||
/// [BucketEntryKeyCount](crate::aggregation::agg_result::BucketEntryKeyCount) on the
|
||||
/// [RangeBucketEntry](crate::aggregation::agg_result::RangeBucketEntry) on the
|
||||
/// AggregationCollector.
|
||||
///
|
||||
/// Result type is
|
||||
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketResult] with
|
||||
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketEntryKeyCount] on the
|
||||
/// [crate::aggregation::intermediate_agg_result::IntermediateRangeBucketEntry] on the
|
||||
/// DistributedAggregationCollector.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
///
|
||||
/// # Limitations/Compatibility
|
||||
/// Overlapping ranges are not yet supported.
|
||||
///
|
||||
/// # Request JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
/// "my_ranges": {
|
||||
/// "field": "score",
|
||||
/// "ranges": [
|
||||
/// { "to": 3.0 },
|
||||
/// { "from": 3.0, "to": 7.0 },
|
||||
/// { "from": 7.0, "to": 20.0 },
|
||||
/// { "from": 20.0 }
|
||||
/// ]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RangeAggregation {
|
||||
/// The field to aggregate on.
|
||||
pub field: String,
|
||||
/// Note that this aggregation includes the from value and excludes the to value for each
|
||||
/// range. Extra buckets will be created until the first to, and last from, if necessary.
|
||||
pub ranges: Vec<RangeAggregationRange>,
|
||||
/// Whether to return the buckets as a hash map
|
||||
#[serde(default)]
|
||||
pub keyed: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
/// The range for one range bucket.
|
||||
pub struct RangeAggregationRange {
|
||||
/// Custom key for the range bucket
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub key: Option<String>,
|
||||
/// The from range value, which is inclusive in the range.
|
||||
/// None equals to an open ended interval.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub from: Option<f64>,
|
||||
/// The to range value, which is not inclusive in the range.
|
||||
/// None equals to an open ended interval.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
@@ -59,12 +89,31 @@ impl From<Range<f64>> for RangeAggregationRange {
|
||||
} else {
|
||||
Some(range.end)
|
||||
};
|
||||
RangeAggregationRange { from, to }
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from,
|
||||
to,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct SegmentRangeAndBucketEntry {
|
||||
/// Internally used u64 range for one range bucket.
|
||||
pub(crate) struct InternalRangeAggregationRange {
|
||||
/// Custom key for the range bucket
|
||||
key: Option<String>,
|
||||
/// u64 range value
|
||||
range: Range<u64>,
|
||||
}
|
||||
|
||||
impl From<Range<u64>> for InternalRangeAggregationRange {
|
||||
fn from(range: Range<u64>) -> Self {
|
||||
InternalRangeAggregationRange { key: None, range }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) struct SegmentRangeAndBucketEntry {
|
||||
range: Range<u64>,
|
||||
bucket: SegmentRangeBucketEntry,
|
||||
}
|
||||
@@ -78,58 +127,114 @@ pub struct SegmentRangeCollector {
|
||||
field_type: Type,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub(crate) struct SegmentRangeBucketEntry {
|
||||
pub key: Key,
|
||||
pub doc_count: u64,
|
||||
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
|
||||
/// The from range of the bucket. Equals f64::MIN when None.
|
||||
pub from: Option<f64>,
|
||||
/// The to range of the bucket. Equals f64::MAX when None. Open interval, `to` is not
|
||||
/// inclusive.
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
impl Debug for SegmentRangeBucketEntry {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SegmentRangeBucketEntry")
|
||||
.field("key", &self.key)
|
||||
.field("doc_count", &self.doc_count)
|
||||
.field("from", &self.from)
|
||||
.field("to", &self.to)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
impl SegmentRangeBucketEntry {
|
||||
pub(crate) fn into_intermediate_bucket_entry(
|
||||
self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateRangeBucketEntry> {
|
||||
let sub_aggregation = if let Some(sub_aggregation) = self.sub_aggregation {
|
||||
sub_aggregation.into_intermediate_aggregations_result(agg_with_accessor)?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
Ok(IntermediateRangeBucketEntry {
|
||||
key: self.key,
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation,
|
||||
from: self.from,
|
||||
to: self.to,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentRangeCollector {
|
||||
pub fn into_intermediate_bucket_result(self) -> IntermediateBucketResult {
|
||||
pub fn into_intermediate_bucket_result(
|
||||
self,
|
||||
agg_with_accessor: &BucketAggregationWithAccessor,
|
||||
) -> crate::Result<IntermediateBucketResult> {
|
||||
let field_type = self.field_type;
|
||||
|
||||
let buckets = self
|
||||
let buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
|
||||
.buckets
|
||||
.into_iter()
|
||||
.map(move |range_bucket| {
|
||||
(
|
||||
range_to_key(&range_bucket.range, &field_type),
|
||||
range_bucket.bucket.into(),
|
||||
)
|
||||
Ok((
|
||||
range_to_string(&range_bucket.range, &field_type),
|
||||
range_bucket
|
||||
.bucket
|
||||
.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
IntermediateBucketResult::Range(buckets)
|
||||
Ok(IntermediateBucketResult::Range(
|
||||
IntermediateRangeBucketResult { buckets },
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) fn from_req(
|
||||
pub(crate) fn from_req_and_validate(
|
||||
req: &RangeAggregation,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
bucket_count: &BucketCount,
|
||||
field_type: Type,
|
||||
) -> crate::Result<Self> {
|
||||
// The range input on the request is f64.
|
||||
// We need to convert to u64 ranges, because we read the values as u64.
|
||||
// The mapping from the conversion is monotonic so ordering is preserved.
|
||||
let buckets = extend_validate_ranges(&req.ranges, &field_type)?
|
||||
let buckets: Vec<_> = extend_validate_ranges(&req.ranges, &field_type)?
|
||||
.iter()
|
||||
.map(|range| {
|
||||
let to = if range.end == u64::MAX {
|
||||
let key = range
|
||||
.key
|
||||
.clone()
|
||||
.map(Key::Str)
|
||||
.unwrap_or_else(|| range_to_key(&range.range, &field_type));
|
||||
let to = if range.range.end == u64::MAX {
|
||||
None
|
||||
} else {
|
||||
Some(f64_from_fastfield_u64(range.end, &field_type))
|
||||
Some(f64_from_fastfield_u64(range.range.end, &field_type))
|
||||
};
|
||||
let from = if range.start == u64::MIN {
|
||||
let from = if range.range.start == u64::MIN {
|
||||
None
|
||||
} else {
|
||||
Some(f64_from_fastfield_u64(range.start, &field_type))
|
||||
Some(f64_from_fastfield_u64(range.range.start, &field_type))
|
||||
};
|
||||
let sub_aggregation = if sub_aggregation.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(SegmentAggregationResultsCollector::from_req(
|
||||
Some(SegmentAggregationResultsCollector::from_req_and_validate(
|
||||
sub_aggregation,
|
||||
)?)
|
||||
};
|
||||
Ok(SegmentRangeAndBucketEntry {
|
||||
range: range.clone(),
|
||||
range: range.range.clone(),
|
||||
bucket: SegmentRangeBucketEntry {
|
||||
key: range_to_key(range, &field_type),
|
||||
doc_count: 0,
|
||||
sub_aggregation,
|
||||
key,
|
||||
from,
|
||||
to,
|
||||
},
|
||||
@@ -137,6 +242,9 @@ impl SegmentRangeCollector {
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
bucket_count.add_count(buckets.len() as u32);
|
||||
bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(SegmentRangeCollector {
|
||||
buckets,
|
||||
field_type,
|
||||
@@ -149,36 +257,41 @@ impl SegmentRangeCollector {
|
||||
doc: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinatility");
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = bucket_with_accessor.accessor.get(docs[0]);
|
||||
let val2 = bucket_with_accessor.accessor.get(docs[1]);
|
||||
let val3 = bucket_with_accessor.accessor.get(docs[2]);
|
||||
let val4 = bucket_with_accessor.accessor.get(docs[3]);
|
||||
let val1 = accessor.get_val(docs[0] as u64);
|
||||
let val2 = accessor.get_val(docs[1] as u64);
|
||||
let val3 = accessor.get_val(docs[2] as u64);
|
||||
let val4 = accessor.get_val(docs[3] as u64);
|
||||
let bucket_pos1 = self.get_bucket_pos(val1);
|
||||
let bucket_pos2 = self.get_bucket_pos(val2);
|
||||
let bucket_pos3 = self.get_bucket_pos(val3);
|
||||
let bucket_pos4 = self.get_bucket_pos(val4);
|
||||
|
||||
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = bucket_with_accessor.accessor.get(*doc);
|
||||
for &doc in iter.remainder() {
|
||||
let val = accessor.get_val(doc as u64);
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
for bucket in &mut self.buckets {
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
sub_aggregation
|
||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush);
|
||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -187,13 +300,14 @@ impl SegmentRangeCollector {
|
||||
bucket_pos: usize,
|
||||
doc: DocId,
|
||||
bucket_with_accessor: &AggregationsWithAccessor,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let bucket = &mut self.buckets[bucket_pos];
|
||||
|
||||
bucket.bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
sub_aggregation.collect(doc, bucket_with_accessor);
|
||||
sub_aggregation.collect(doc, bucket_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -219,47 +333,61 @@ impl SegmentRangeCollector {
|
||||
/// fast field.
|
||||
/// The alternative would be that every value read would be converted to the f64 range, but that is
|
||||
/// more computational expensive when many documents are hit.
|
||||
fn to_u64_range(range: &RangeAggregationRange, field_type: &Type) -> Range<u64> {
|
||||
range
|
||||
.from
|
||||
.map(|from| f64_to_fastfield_u64(from, field_type))
|
||||
.unwrap_or(u64::MIN)
|
||||
..range
|
||||
.to
|
||||
.map(|to| f64_to_fastfield_u64(to, field_type))
|
||||
.unwrap_or(u64::MAX)
|
||||
fn to_u64_range(
|
||||
range: &RangeAggregationRange,
|
||||
field_type: &Type,
|
||||
) -> crate::Result<InternalRangeAggregationRange> {
|
||||
let start = if let Some(from) = range.from {
|
||||
f64_to_fastfield_u64(from, field_type)
|
||||
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
|
||||
} else {
|
||||
u64::MIN
|
||||
};
|
||||
|
||||
let end = if let Some(to) = range.to {
|
||||
f64_to_fastfield_u64(to, field_type)
|
||||
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
|
||||
} else {
|
||||
u64::MAX
|
||||
};
|
||||
|
||||
Ok(InternalRangeAggregationRange {
|
||||
key: range.key.clone(),
|
||||
range: start..end,
|
||||
})
|
||||
}
|
||||
|
||||
/// Extends the provided buckets to contain the whole value range, by inserting buckets at the
|
||||
/// beginning and end.
|
||||
/// beginning and end and filling gaps.
|
||||
fn extend_validate_ranges(
|
||||
buckets: &[RangeAggregationRange],
|
||||
field_type: &Type,
|
||||
) -> crate::Result<Vec<Range<u64>>> {
|
||||
) -> crate::Result<Vec<InternalRangeAggregationRange>> {
|
||||
let mut converted_buckets = buckets
|
||||
.iter()
|
||||
.map(|range| to_u64_range(range, field_type))
|
||||
.collect_vec();
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
converted_buckets.sort_by_key(|bucket| bucket.start);
|
||||
if converted_buckets[0].start != u64::MIN {
|
||||
converted_buckets.insert(0, u64::MIN..converted_buckets[0].start);
|
||||
converted_buckets.sort_by_key(|bucket| bucket.range.start);
|
||||
if converted_buckets[0].range.start != u64::MIN {
|
||||
converted_buckets.insert(0, (u64::MIN..converted_buckets[0].range.start).into());
|
||||
}
|
||||
|
||||
if converted_buckets[converted_buckets.len() - 1].end != u64::MAX {
|
||||
converted_buckets.push(converted_buckets[converted_buckets.len() - 1].end..u64::MAX);
|
||||
if converted_buckets[converted_buckets.len() - 1].range.end != u64::MAX {
|
||||
converted_buckets
|
||||
.push((converted_buckets[converted_buckets.len() - 1].range.end..u64::MAX).into());
|
||||
}
|
||||
|
||||
// fill up holes in the ranges
|
||||
let find_hole = |converted_buckets: &[Range<u64>]| {
|
||||
let find_hole = |converted_buckets: &[InternalRangeAggregationRange]| {
|
||||
for (pos, ranges) in converted_buckets.windows(2).enumerate() {
|
||||
if ranges[0].end > ranges[1].start {
|
||||
if ranges[0].range.end > ranges[1].range.start {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Overlapping ranges not supported range {:?}, range+1 {:?}",
|
||||
ranges[0], ranges[1]
|
||||
)));
|
||||
}
|
||||
if ranges[0].end != ranges[1].start {
|
||||
if ranges[0].range.end != ranges[1].range.start {
|
||||
return Ok(Some(pos));
|
||||
}
|
||||
}
|
||||
@@ -267,16 +395,17 @@ fn extend_validate_ranges(
|
||||
};
|
||||
|
||||
while let Some(hole_pos) = find_hole(&converted_buckets)? {
|
||||
let new_range = converted_buckets[hole_pos].end..converted_buckets[hole_pos + 1].start;
|
||||
converted_buckets.insert(hole_pos + 1, new_range);
|
||||
let new_range =
|
||||
converted_buckets[hole_pos].range.end..converted_buckets[hole_pos + 1].range.start;
|
||||
converted_buckets.insert(hole_pos + 1, new_range.into());
|
||||
}
|
||||
|
||||
Ok(converted_buckets)
|
||||
}
|
||||
|
||||
pub fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
||||
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
||||
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
||||
// it should be rendererd as "*-0" and not "*-*"
|
||||
// it should be rendered as "*-0" and not "*-*"
|
||||
let to_str = |val: u64, is_start: bool| {
|
||||
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
|
||||
"*".to_string()
|
||||
@@ -288,23 +417,19 @@ pub fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
||||
format!("{}-{}", to_str(range.start, true), to_str(range.end, false))
|
||||
}
|
||||
|
||||
pub fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
|
||||
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
|
||||
Key::Str(range_to_string(range, field_type))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use serde_json::Value;
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req::{
|
||||
Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
|
||||
};
|
||||
use crate::aggregation::tests::get_test_index_with_num_docs;
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::aggregation::tests::{exec_request_with_query, get_test_index_with_num_docs};
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::query::AllQuery;
|
||||
|
||||
pub fn get_collector_from_ranges(
|
||||
ranges: Vec<RangeAggregationRange>,
|
||||
@@ -313,9 +438,16 @@ mod tests {
|
||||
let req = RangeAggregation {
|
||||
field: "dummy".to_string(),
|
||||
ranges,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
SegmentRangeCollector::from_req(&req, &Default::default(), field_type).unwrap()
|
||||
SegmentRangeCollector::from_req_and_validate(
|
||||
&req,
|
||||
&Default::default(),
|
||||
&Default::default(),
|
||||
field_type,
|
||||
)
|
||||
.expect("unexpected error")
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -328,6 +460,7 @@ mod tests {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -335,13 +468,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let agg_res = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(res["range"]["buckets"][0]["key"], "*-0");
|
||||
assert_eq!(res["range"]["buckets"][0]["doc_count"], 0);
|
||||
@@ -355,6 +482,131 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_keyed_buckets_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"range": {
|
||||
"buckets": {
|
||||
"*-0": { "key": "*-0", "doc_count": 0, "to": 0.0},
|
||||
"0-0.1": {"key": "0-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
|
||||
"0.1-0.2": {"key": "0.1-0.2", "doc_count": 10, "from": 0.1, "to": 0.2},
|
||||
"0.2-*": {"key": "0.2-*", "doc_count": 80, "from": 0.2},
|
||||
}
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_custom_key_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![
|
||||
RangeAggregationRange {
|
||||
key: Some("custom-key-0-to-0.1".to_string()),
|
||||
from: Some(0f64),
|
||||
to: Some(0.1f64),
|
||||
},
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: Some(0.1f64),
|
||||
to: Some(0.2f64),
|
||||
},
|
||||
],
|
||||
keyed: false,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"range": {
|
||||
"buckets": [
|
||||
{"key": "*-0", "doc_count": 0, "to": 0.0},
|
||||
{"key": "custom-key-0-to-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
|
||||
{"key": "0.1-0.2", "doc_count": 10, "from": 0.1, "to": 0.2},
|
||||
{"key": "0.2-*", "doc_count": 80, "from": 0.2}
|
||||
]
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_custom_key_keyed_buckets_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![RangeAggregationRange {
|
||||
key: Some("custom-key-0-to-0.1".to_string()),
|
||||
from: Some(0f64),
|
||||
to: Some(0.1f64),
|
||||
}],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"range": {
|
||||
"buckets": {
|
||||
"*-0": { "key": "*-0", "doc_count": 0, "to": 0.0},
|
||||
"custom-key-0-to-0.1": {"key": "custom-key-0-to-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
|
||||
"0.1-*": {"key": "0.1-*", "doc_count": 90, "from": 0.1},
|
||||
}
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_test_extend_range_hole() {
|
||||
let buckets = vec![(10f64..20f64).into(), (30f64..40f64).into()];
|
||||
@@ -433,6 +685,7 @@ mod tests {
|
||||
|
||||
let ranges = vec![
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
to: Some(10.0),
|
||||
from: None,
|
||||
},
|
||||
@@ -442,11 +695,13 @@ mod tests {
|
||||
|
||||
let ranges = vec![
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
to: Some(10.0),
|
||||
from: None,
|
||||
},
|
||||
(10.0..100.0).into(),
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
to: None,
|
||||
from: Some(100.0),
|
||||
},
|
||||
@@ -456,11 +711,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn range_binary_search_test_f64() {
|
||||
let ranges = vec![
|
||||
//(f64::MIN..10.0).into(),
|
||||
(10.0..100.0).into(),
|
||||
//(100.0..f64::MAX).into(),
|
||||
];
|
||||
let ranges = vec![(10.0..100.0).into()];
|
||||
|
||||
let collector = get_collector_from_ranges(ranges, Type::F64);
|
||||
let search = |val: u64| collector.get_bucket_pos(val);
|
||||
@@ -479,6 +730,7 @@ mod tests {
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use itertools::Itertools;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
|
||||
|
||||
1364
src/aggregation/bucket/term_agg.rs
Normal file
@@ -1,23 +1,35 @@
|
||||
use std::rc::Rc;
|
||||
|
||||
use super::agg_req::Aggregations;
|
||||
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use super::agg_result::AggregationResults;
|
||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggregations_with_accessor;
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::TantivyError;
|
||||
use crate::{SegmentReader, TantivyError};
|
||||
|
||||
/// The default max bucket count, before the aggregation fails.
|
||||
pub const MAX_BUCKET_COUNT: u32 = 65000;
|
||||
|
||||
/// Collector for aggregations.
|
||||
///
|
||||
/// The collector collects all aggregations by the underlying aggregation request.
|
||||
pub struct AggregationCollector {
|
||||
agg: Aggregations,
|
||||
max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl AggregationCollector {
|
||||
/// Create collector from aggregation request.
|
||||
pub fn from_aggs(agg: Aggregations) -> Self {
|
||||
Self { agg }
|
||||
///
|
||||
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
|
||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||
Self {
|
||||
agg,
|
||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,16 +39,22 @@ impl AggregationCollector {
|
||||
///
|
||||
/// # Purpose
|
||||
/// AggregationCollector returns `IntermediateAggregationResults` and not the final
|
||||
/// `AggregationResults`, so that results from differenct indices can be merged and then converted
|
||||
/// into the final `AggregationResults` via the `into()` method.
|
||||
/// `AggregationResults`, so that results from different indices can be merged and then converted
|
||||
/// into the final `AggregationResults` via the `into_final_result()` method.
|
||||
pub struct DistributedAggregationCollector {
|
||||
agg: Aggregations,
|
||||
max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl DistributedAggregationCollector {
|
||||
/// Create collector from aggregation request.
|
||||
pub fn from_aggs(agg: Aggregations) -> Self {
|
||||
Self { agg }
|
||||
///
|
||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||
Self {
|
||||
agg,
|
||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,12 +68,11 @@ impl Collector for DistributedAggregationCollector {
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let aggs_with_accessor = get_aggregations_with_accessor(&self.agg, reader)?;
|
||||
let result = SegmentAggregationResultsCollector::from_req(&aggs_with_accessor)?;
|
||||
Ok(AggregationSegmentCollector {
|
||||
aggs: aggs_with_accessor,
|
||||
result,
|
||||
})
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
self.max_bucket_count,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -80,12 +97,11 @@ impl Collector for AggregationCollector {
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let aggs_with_accessor = get_aggregations_with_accessor(&self.agg, reader)?;
|
||||
let result = SegmentAggregationResultsCollector::from_req(&aggs_with_accessor)?;
|
||||
Ok(AggregationSegmentCollector {
|
||||
aggs: aggs_with_accessor,
|
||||
result,
|
||||
})
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
self.max_bucket_count,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -96,40 +112,72 @@ impl Collector for AggregationCollector {
|
||||
&self,
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
merge_fruits(segment_fruits).map(|res| res.into())
|
||||
let res = merge_fruits(segment_fruits)?;
|
||||
res.into_final_bucket_result(self.agg.clone())
|
||||
}
|
||||
}
|
||||
|
||||
fn merge_fruits(
|
||||
mut segment_fruits: Vec<IntermediateAggregationResults>,
|
||||
mut segment_fruits: Vec<crate::Result<IntermediateAggregationResults>>,
|
||||
) -> crate::Result<IntermediateAggregationResults> {
|
||||
if let Some(mut fruit) = segment_fruits.pop() {
|
||||
if let Some(fruit) = segment_fruits.pop() {
|
||||
let mut fruit = fruit?;
|
||||
for next_fruit in segment_fruits {
|
||||
fruit.merge_fruits(&next_fruit);
|
||||
fruit.merge_fruits(next_fruit?);
|
||||
}
|
||||
Ok(fruit)
|
||||
} else {
|
||||
Err(TantivyError::InvalidArgument(
|
||||
"no fruits provided in merge_fruits".to_string(),
|
||||
))
|
||||
Ok(IntermediateAggregationResults::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// AggregationSegmentCollector does the aggregation collection on a segment.
|
||||
pub struct AggregationSegmentCollector {
|
||||
aggs: AggregationsWithAccessor,
|
||||
aggs_with_accessor: AggregationsWithAccessor,
|
||||
result: SegmentAggregationResultsCollector,
|
||||
error: Option<TantivyError>,
|
||||
}
|
||||
|
||||
impl AggregationSegmentCollector {
|
||||
/// Creates an AggregationSegmentCollector from an [Aggregations] request and a segment reader.
|
||||
/// Also includes validation, e.g. checking field types and existence.
|
||||
pub fn from_agg_req_and_reader(
|
||||
agg: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<Self> {
|
||||
let aggs_with_accessor =
|
||||
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
|
||||
let result =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?;
|
||||
Ok(AggregationSegmentCollector {
|
||||
aggs_with_accessor,
|
||||
result,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentCollector for AggregationSegmentCollector {
|
||||
type Fruit = IntermediateAggregationResults;
|
||||
type Fruit = crate::Result<IntermediateAggregationResults>;
|
||||
|
||||
#[inline]
|
||||
fn collect(&mut self, doc: crate::DocId, _score: crate::Score) {
|
||||
self.result.collect(doc, &self.aggs);
|
||||
if self.error.is_some() {
|
||||
return;
|
||||
}
|
||||
if let Err(err) = self.result.collect(doc, &self.aggs_with_accessor) {
|
||||
self.error = Some(err);
|
||||
}
|
||||
}
|
||||
|
||||
fn harvest(mut self) -> Self::Fruit {
|
||||
self.result.flush_staged_docs(&self.aggs, true);
|
||||
self.result.into()
|
||||
if let Some(err) = self.error {
|
||||
return Err(err);
|
||||
}
|
||||
self.result
|
||||
.flush_staged_docs(&self.aggs_with_accessor, true)?;
|
||||
self.result
|
||||
.into_intermediate_aggregations_result(&self.aggs_with_accessor)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,47 +2,187 @@
|
||||
//! Intermediate aggregation results can be used to merge results between segments or between
|
||||
//! indices.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::metric::{IntermediateAverage, IntermediateStats};
|
||||
use super::segment_agg_result::{
|
||||
SegmentAggregationResultsCollector, SegmentBucketResultCollector, SegmentMetricResultCollector,
|
||||
SegmentRangeBucketEntry,
|
||||
use super::agg_req::{
|
||||
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
|
||||
MetricAggregation,
|
||||
};
|
||||
use super::{Key, VecWithNames};
|
||||
use super::agg_result::{AggregationResult, BucketResult, RangeBucketEntry};
|
||||
use super::bucket::{
|
||||
cut_off_buckets, get_agg_name_and_property, intermediate_histogram_buckets_to_final_buckets,
|
||||
GetDocCount, Order, OrderTarget, SegmentHistogramBucketEntry, TermsAggregation,
|
||||
};
|
||||
use super::metric::{IntermediateAverage, IntermediateStats};
|
||||
use super::segment_agg_result::SegmentMetricResultCollector;
|
||||
use super::{Key, SerializedKey, VecWithNames};
|
||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
||||
use crate::aggregation::bucket::TermsAggregationInternal;
|
||||
|
||||
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
||||
/// intermediate results.
|
||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct IntermediateAggregationResults(pub(crate) VecWithNames<IntermediateAggregationResult>);
|
||||
|
||||
impl From<SegmentAggregationResultsCollector> for IntermediateAggregationResults {
|
||||
fn from(tree: SegmentAggregationResultsCollector) -> Self {
|
||||
let mut data = vec![];
|
||||
for (key, bucket) in tree.buckets.into_iter() {
|
||||
data.push((key, IntermediateAggregationResult::Bucket(bucket.into())));
|
||||
}
|
||||
for (key, metric) in tree.metrics.into_iter() {
|
||||
data.push((key, IntermediateAggregationResult::Metric(metric.into())));
|
||||
}
|
||||
Self(VecWithNames::from_entries(data))
|
||||
}
|
||||
pub struct IntermediateAggregationResults {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) metrics: Option<VecWithNames<IntermediateMetricResult>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(crate) buckets: Option<VecWithNames<IntermediateBucketResult>>,
|
||||
}
|
||||
|
||||
impl IntermediateAggregationResults {
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
|
||||
self.into_final_bucket_result_internal(&(req.into()))
|
||||
}
|
||||
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
///
|
||||
/// Internal function, AggregationsInternal is used instead Aggregations, which is optimized
|
||||
/// for internal processing, by splitting metric and buckets into separate groups.
|
||||
pub(crate) fn into_final_bucket_result_internal(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<AggregationResults> {
|
||||
// Important assumption:
|
||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||
// request
|
||||
let mut results: HashMap<String, AggregationResult> = HashMap::new();
|
||||
|
||||
if let Some(buckets) = self.buckets {
|
||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||
} else {
|
||||
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||
// format is constant
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
||||
};
|
||||
|
||||
if let Some(metrics) = self.metrics {
|
||||
convert_and_add_final_metrics_to_result(&mut results, metrics);
|
||||
} else {
|
||||
// When there are no metrics, we create empty metric results, so that the serialized
|
||||
// json format is constant
|
||||
add_empty_final_metrics_to_result(&mut results, &req.metrics)?;
|
||||
}
|
||||
|
||||
Ok(AggregationResults(results))
|
||||
}
|
||||
|
||||
pub(crate) fn empty_from_req(req: &AggregationsInternal) -> Self {
|
||||
let metrics = if req.metrics.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let metrics = req
|
||||
.metrics
|
||||
.iter()
|
||||
.map(|(key, req)| {
|
||||
(
|
||||
key.to_string(),
|
||||
IntermediateMetricResult::empty_from_req(req),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
Some(VecWithNames::from_entries(metrics))
|
||||
};
|
||||
|
||||
let buckets = if req.buckets.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let buckets = req
|
||||
.buckets
|
||||
.iter()
|
||||
.map(|(key, req)| {
|
||||
(
|
||||
key.to_string(),
|
||||
IntermediateBucketResult::empty_from_req(&req.bucket_agg),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
Some(VecWithNames::from_entries(buckets))
|
||||
};
|
||||
|
||||
Self { metrics, buckets }
|
||||
}
|
||||
|
||||
/// Merge an other intermediate aggregation result into this result.
|
||||
///
|
||||
/// The order of the values need to be the same on both results. This is ensured when the same
|
||||
/// (key values) are present on the underlying VecWithNames struct.
|
||||
pub fn merge_fruits(&mut self, other: &IntermediateAggregationResults) {
|
||||
for (tree_left, tree_right) in self.0.values_mut().zip(other.0.values()) {
|
||||
tree_left.merge_fruits(tree_right);
|
||||
pub fn merge_fruits(&mut self, other: IntermediateAggregationResults) {
|
||||
if let (Some(buckets_left), Some(buckets_right)) = (&mut self.buckets, other.buckets) {
|
||||
for (bucket_left, bucket_right) in
|
||||
buckets_left.values_mut().zip(buckets_right.into_values())
|
||||
{
|
||||
bucket_left.merge_fruits(bucket_right);
|
||||
}
|
||||
}
|
||||
|
||||
if let (Some(metrics_left), Some(metrics_right)) = (&mut self.metrics, other.metrics) {
|
||||
for (metric_left, metric_right) in
|
||||
metrics_left.values_mut().zip(metrics_right.into_values())
|
||||
{
|
||||
metric_left.merge_fruits(metric_right);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_and_add_final_metrics_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
metrics: VecWithNames<IntermediateMetricResult>,
|
||||
) {
|
||||
results.extend(
|
||||
metrics
|
||||
.into_iter()
|
||||
.map(|(key, metric)| (key, AggregationResult::MetricResult(metric.into()))),
|
||||
);
|
||||
}
|
||||
|
||||
fn add_empty_final_metrics_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_metrics: &VecWithNames<MetricAggregation>,
|
||||
) -> crate::Result<()> {
|
||||
results.extend(req_metrics.iter().map(|(key, req)| {
|
||||
let empty_bucket = IntermediateMetricResult::empty_from_req(req);
|
||||
(
|
||||
key.to_string(),
|
||||
AggregationResult::MetricResult(empty_bucket.into()),
|
||||
)
|
||||
}));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_empty_final_buckets_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
let requested_buckets = req_buckets.iter();
|
||||
for (key, req) in requested_buckets {
|
||||
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
||||
results.insert(key.to_string(), empty_bucket);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn convert_and_add_final_buckets_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
buckets: VecWithNames<IntermediateBucketResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
assert_eq!(buckets.len(), req_buckets.len());
|
||||
|
||||
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||
for ((key, bucket), req) in buckets_with_request {
|
||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
|
||||
results.insert(key, result);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// An aggregation is either a bucket or a metric.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum IntermediateAggregationResult {
|
||||
@@ -52,28 +192,6 @@ pub enum IntermediateAggregationResult {
|
||||
Metric(IntermediateMetricResult),
|
||||
}
|
||||
|
||||
impl IntermediateAggregationResult {
|
||||
fn merge_fruits(&mut self, other: &IntermediateAggregationResult) {
|
||||
match (self, other) {
|
||||
(
|
||||
IntermediateAggregationResult::Bucket(res_left),
|
||||
IntermediateAggregationResult::Bucket(res_right),
|
||||
) => {
|
||||
res_left.merge_fruits(res_right);
|
||||
}
|
||||
(
|
||||
IntermediateAggregationResult::Metric(res_left),
|
||||
IntermediateAggregationResult::Metric(res_right),
|
||||
) => {
|
||||
res_left.merge_fruits(res_right);
|
||||
}
|
||||
_ => {
|
||||
panic!("incompatible types in aggregation tree on merge fruits");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds the intermediate data for metric results
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum IntermediateMetricResult {
|
||||
@@ -97,7 +215,17 @@ impl From<SegmentMetricResultCollector> for IntermediateMetricResult {
|
||||
}
|
||||
|
||||
impl IntermediateMetricResult {
|
||||
fn merge_fruits(&mut self, other: &IntermediateMetricResult) {
|
||||
pub(crate) fn empty_from_req(req: &MetricAggregation) -> Self {
|
||||
match req {
|
||||
MetricAggregation::Average(_) => {
|
||||
IntermediateMetricResult::Average(IntermediateAverage::default())
|
||||
}
|
||||
MetricAggregation::Stats(_) => {
|
||||
IntermediateMetricResult::Stats(IntermediateStats::default())
|
||||
}
|
||||
}
|
||||
}
|
||||
fn merge_fruits(&mut self, other: IntermediateMetricResult) {
|
||||
match (self, other) {
|
||||
(
|
||||
IntermediateMetricResult::Average(avg_data_left),
|
||||
@@ -112,7 +240,7 @@ impl IntermediateMetricResult {
|
||||
stats_left.merge_fruits(stats_right);
|
||||
}
|
||||
_ => {
|
||||
panic!("incompatible fruit types in tree {:?}", other);
|
||||
panic!("incompatible fruit types in tree");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -124,36 +252,300 @@ impl IntermediateMetricResult {
|
||||
pub enum IntermediateBucketResult {
|
||||
/// This is the range entry for a bucket, which contains a key, count, from, to, and optionally
|
||||
/// sub_aggregations.
|
||||
Range(HashMap<Key, IntermediateRangeBucketEntry>),
|
||||
Range(IntermediateRangeBucketResult),
|
||||
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||
/// sub_aggregations.
|
||||
Histogram {
|
||||
/// The buckets
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
},
|
||||
/// Term aggregation
|
||||
Terms(IntermediateTermBucketResult),
|
||||
}
|
||||
|
||||
impl From<SegmentBucketResultCollector> for IntermediateBucketResult {
|
||||
fn from(collector: SegmentBucketResultCollector) -> Self {
|
||||
match collector {
|
||||
SegmentBucketResultCollector::Range(range) => range.into_intermediate_bucket_result(),
|
||||
impl IntermediateBucketResult {
|
||||
pub(crate) fn into_final_bucket_result(
|
||||
self,
|
||||
req: &BucketAggregationInternal,
|
||||
) -> crate::Result<BucketResult> {
|
||||
match self {
|
||||
IntermediateBucketResult::Range(range_res) => {
|
||||
let mut buckets: Vec<RangeBucketEntry> = range_res
|
||||
.buckets
|
||||
.into_iter()
|
||||
.map(|(_, bucket)| bucket.into_final_bucket_entry(&req.sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
buckets.sort_by(|left, right| {
|
||||
left.from
|
||||
.unwrap_or(f64::MIN)
|
||||
.total_cmp(&right.from.unwrap_or(f64::MIN))
|
||||
});
|
||||
|
||||
let is_keyed = req
|
||||
.as_range()
|
||||
.expect("unexpected aggregation, expected range aggregation")
|
||||
.keyed;
|
||||
let buckets = if is_keyed {
|
||||
let mut bucket_map =
|
||||
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||
for bucket in buckets {
|
||||
bucket_map.insert(bucket.key.to_string(), bucket);
|
||||
}
|
||||
BucketEntries::HashMap(bucket_map)
|
||||
} else {
|
||||
BucketEntries::Vec(buckets)
|
||||
};
|
||||
Ok(BucketResult::Range { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Histogram { buckets } => {
|
||||
let buckets = intermediate_histogram_buckets_to_final_buckets(
|
||||
buckets,
|
||||
req.as_histogram()
|
||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||
&req.sub_aggregation,
|
||||
)?;
|
||||
|
||||
let buckets = if req.as_histogram().unwrap().keyed {
|
||||
let mut bucket_map =
|
||||
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||
for bucket in buckets {
|
||||
bucket_map.insert(bucket.key.to_string(), bucket);
|
||||
}
|
||||
BucketEntries::HashMap(bucket_map)
|
||||
} else {
|
||||
BucketEntries::Vec(buckets)
|
||||
};
|
||||
Ok(BucketResult::Histogram { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
|
||||
req.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
&req.sub_aggregation,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn empty_from_req(req: &BucketAggregationType) -> Self {
|
||||
match req {
|
||||
BucketAggregationType::Terms(_) => IntermediateBucketResult::Terms(Default::default()),
|
||||
BucketAggregationType::Range(_) => IntermediateBucketResult::Range(Default::default()),
|
||||
BucketAggregationType::Histogram(_) => {
|
||||
IntermediateBucketResult::Histogram { buckets: vec![] }
|
||||
}
|
||||
}
|
||||
}
|
||||
fn merge_fruits(&mut self, other: IntermediateBucketResult) {
|
||||
match (self, other) {
|
||||
(
|
||||
IntermediateBucketResult::Terms(term_res_left),
|
||||
IntermediateBucketResult::Terms(term_res_right),
|
||||
) => {
|
||||
merge_maps(&mut term_res_left.entries, term_res_right.entries);
|
||||
term_res_left.sum_other_doc_count += term_res_right.sum_other_doc_count;
|
||||
term_res_left.doc_count_error_upper_bound +=
|
||||
term_res_right.doc_count_error_upper_bound;
|
||||
}
|
||||
|
||||
(
|
||||
IntermediateBucketResult::Range(range_res_left),
|
||||
IntermediateBucketResult::Range(range_res_right),
|
||||
) => {
|
||||
merge_maps(&mut range_res_left.buckets, range_res_right.buckets);
|
||||
}
|
||||
(
|
||||
IntermediateBucketResult::Histogram {
|
||||
buckets: buckets_left,
|
||||
..
|
||||
},
|
||||
IntermediateBucketResult::Histogram {
|
||||
buckets: buckets_right,
|
||||
..
|
||||
},
|
||||
) => {
|
||||
let buckets = buckets_left
|
||||
.drain(..)
|
||||
.merge_join_by(buckets_right.into_iter(), |left, right| {
|
||||
left.key.partial_cmp(&right.key).unwrap_or(Ordering::Equal)
|
||||
})
|
||||
.map(|either| match either {
|
||||
itertools::EitherOrBoth::Both(mut left, right) => {
|
||||
left.merge_fruits(right);
|
||||
left
|
||||
}
|
||||
itertools::EitherOrBoth::Left(left) => left,
|
||||
itertools::EitherOrBoth::Right(right) => right,
|
||||
})
|
||||
.collect();
|
||||
|
||||
*buckets_left = buckets;
|
||||
}
|
||||
(IntermediateBucketResult::Range(_), _) => {
|
||||
panic!("try merge on different types")
|
||||
}
|
||||
(IntermediateBucketResult::Histogram { .. }, _) => {
|
||||
panic!("try merge on different types")
|
||||
}
|
||||
(IntermediateBucketResult::Terms { .. }, _) => {
|
||||
panic!("try merge on different types")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IntermediateBucketResult {
|
||||
fn merge_fruits(&mut self, other: &IntermediateBucketResult) {
|
||||
match (self, other) {
|
||||
(
|
||||
IntermediateBucketResult::Range(entries_left),
|
||||
IntermediateBucketResult::Range(entries_right),
|
||||
) => {
|
||||
for (name, entry_left) in entries_left.iter_mut() {
|
||||
if let Some(entry_right) = entries_right.get(name) {
|
||||
entry_left.merge_fruits(entry_right);
|
||||
}
|
||||
}
|
||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
/// Range aggregation including error counts
|
||||
pub struct IntermediateRangeBucketResult {
|
||||
pub(crate) buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry>,
|
||||
}
|
||||
|
||||
for (key, res) in entries_right.iter() {
|
||||
if !entries_left.contains_key(key) {
|
||||
entries_left.insert(key.clone(), res.clone());
|
||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
/// Term aggregation including error counts
|
||||
pub struct IntermediateTermBucketResult {
|
||||
pub(crate) entries: FnvHashMap<String, IntermediateTermBucketEntry>,
|
||||
pub(crate) sum_other_doc_count: u64,
|
||||
pub(crate) doc_count_error_upper_bound: u64,
|
||||
}
|
||||
|
||||
impl IntermediateTermBucketResult {
|
||||
pub(crate) fn into_final_result(
|
||||
self,
|
||||
req: &TermsAggregation,
|
||||
sub_aggregation_req: &AggregationsInternal,
|
||||
) -> crate::Result<BucketResult> {
|
||||
let req = TermsAggregationInternal::from_req(req);
|
||||
let mut buckets: Vec<BucketEntry> = self
|
||||
.entries
|
||||
.into_iter()
|
||||
.filter(|bucket| bucket.1.doc_count >= req.min_doc_count)
|
||||
.map(|(key, entry)| {
|
||||
Ok(BucketEntry {
|
||||
key: Key::Str(key),
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: entry
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(sub_aggregation_req)?,
|
||||
})
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
let order = req.order.order;
|
||||
match req.order.target {
|
||||
OrderTarget::Key => {
|
||||
buckets.sort_by(|left, right| {
|
||||
if req.order.order == Order::Desc {
|
||||
left.key.partial_cmp(&right.key)
|
||||
} else {
|
||||
right.key.partial_cmp(&left.key)
|
||||
}
|
||||
.expect("expected type string, which is always sortable")
|
||||
});
|
||||
}
|
||||
OrderTarget::Count => {
|
||||
if req.order.order == Order::Desc {
|
||||
buckets.sort_unstable_by_key(|bucket| std::cmp::Reverse(bucket.doc_count()));
|
||||
} else {
|
||||
buckets.sort_unstable_by_key(|bucket| bucket.doc_count());
|
||||
}
|
||||
}
|
||||
OrderTarget::SubAggregation(name) => {
|
||||
let (agg_name, agg_property) = get_agg_name_and_property(&name);
|
||||
let mut buckets_with_val = buckets
|
||||
.into_iter()
|
||||
.map(|bucket| {
|
||||
let val = bucket
|
||||
.sub_aggregation
|
||||
.get_value_from_aggregation(agg_name, agg_property)?
|
||||
.unwrap_or(f64::NAN);
|
||||
Ok((bucket, val))
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
buckets_with_val.sort_by(|(_, val1), (_, val2)| match &order {
|
||||
Order::Desc => val2.total_cmp(val1),
|
||||
Order::Asc => val1.total_cmp(val2),
|
||||
});
|
||||
buckets = buckets_with_val
|
||||
.into_iter()
|
||||
.map(|(bucket, _val)| bucket)
|
||||
.collect_vec();
|
||||
}
|
||||
}
|
||||
|
||||
// We ignore _term_doc_count_before_cutoff here, because it increases the upperbound error
|
||||
// only for terms that didn't make it into the top N.
|
||||
//
|
||||
// This can be interesting, as a value of quality of the results, but not good to check the
|
||||
// actual error count for the returned terms.
|
||||
let (_term_doc_count_before_cutoff, sum_other_doc_count) =
|
||||
cut_off_buckets(&mut buckets, req.size as usize);
|
||||
|
||||
let doc_count_error_upper_bound = if req.show_term_doc_count_error {
|
||||
Some(self.doc_count_error_upper_bound)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(BucketResult::Terms {
|
||||
buckets,
|
||||
sum_other_doc_count: self.sum_other_doc_count + sum_other_doc_count,
|
||||
doc_count_error_upper_bound,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
trait MergeFruits {
|
||||
fn merge_fruits(&mut self, other: Self);
|
||||
}
|
||||
|
||||
fn merge_maps<V: MergeFruits + Clone>(
|
||||
entries_left: &mut FnvHashMap<SerializedKey, V>,
|
||||
mut entries_right: FnvHashMap<SerializedKey, V>,
|
||||
) {
|
||||
for (name, entry_left) in entries_left.iter_mut() {
|
||||
if let Some(entry_right) = entries_right.remove(name) {
|
||||
entry_left.merge_fruits(entry_right);
|
||||
}
|
||||
}
|
||||
|
||||
for (key, res) in entries_right.into_iter() {
|
||||
entries_left.entry(key).or_insert(res);
|
||||
}
|
||||
}
|
||||
|
||||
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||
/// sub_aggregations.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct IntermediateHistogramBucketEntry {
|
||||
/// The unique the bucket is identified.
|
||||
pub key: f64,
|
||||
/// The number of documents in the bucket.
|
||||
pub doc_count: u64,
|
||||
/// The sub_aggregation in this bucket.
|
||||
pub sub_aggregation: IntermediateAggregationResults,
|
||||
}
|
||||
|
||||
impl IntermediateHistogramBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<BucketEntry> {
|
||||
Ok(BucketEntry {
|
||||
key: Key::F64(self.key),
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SegmentHistogramBucketEntry> for IntermediateHistogramBucketEntry {
|
||||
fn from(entry: SegmentHistogramBucketEntry) -> Self {
|
||||
IntermediateHistogramBucketEntry {
|
||||
key: entry.key,
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -166,7 +558,6 @@ pub struct IntermediateRangeBucketEntry {
|
||||
pub key: Key,
|
||||
/// The number of documents in the bucket.
|
||||
pub doc_count: u64,
|
||||
pub(crate) values: Option<Vec<u64>>,
|
||||
/// The sub_aggregation in this bucket.
|
||||
pub sub_aggregation: IntermediateAggregationResults,
|
||||
/// The from range of the bucket. Equals f64::MIN when None.
|
||||
@@ -177,49 +568,71 @@ pub struct IntermediateRangeBucketEntry {
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
impl From<SegmentRangeBucketEntry> for IntermediateRangeBucketEntry {
|
||||
fn from(entry: SegmentRangeBucketEntry) -> Self {
|
||||
let sub_aggregation = if let Some(sub_aggregation) = entry.sub_aggregation {
|
||||
sub_aggregation.into()
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
// let sub_aggregation = entry.sub_aggregation.into();
|
||||
|
||||
IntermediateRangeBucketEntry {
|
||||
key: entry.key,
|
||||
doc_count: entry.doc_count,
|
||||
values: None,
|
||||
sub_aggregation,
|
||||
to: entry.to,
|
||||
from: entry.from,
|
||||
}
|
||||
impl IntermediateRangeBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<RangeBucketEntry> {
|
||||
Ok(RangeBucketEntry {
|
||||
key: self.key,
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
to: self.to,
|
||||
from: self.from,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl IntermediateRangeBucketEntry {
|
||||
fn merge_fruits(&mut self, other: &IntermediateRangeBucketEntry) {
|
||||
/// This is the term entry for a bucket, which contains a count, and optionally
|
||||
/// sub_aggregations.
|
||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct IntermediateTermBucketEntry {
|
||||
/// The number of documents in the bucket.
|
||||
pub doc_count: u64,
|
||||
/// The sub_aggregation in this bucket.
|
||||
pub sub_aggregation: IntermediateAggregationResults,
|
||||
}
|
||||
|
||||
impl MergeFruits for IntermediateTermBucketEntry {
|
||||
fn merge_fruits(&mut self, other: IntermediateTermBucketEntry) {
|
||||
self.doc_count += other.doc_count;
|
||||
self.sub_aggregation.merge_fruits(&other.sub_aggregation);
|
||||
self.sub_aggregation.merge_fruits(other.sub_aggregation);
|
||||
}
|
||||
}
|
||||
|
||||
impl MergeFruits for IntermediateRangeBucketEntry {
|
||||
fn merge_fruits(&mut self, other: IntermediateRangeBucketEntry) {
|
||||
self.doc_count += other.doc_count;
|
||||
self.sub_aggregation.merge_fruits(other.sub_aggregation);
|
||||
}
|
||||
}
|
||||
|
||||
impl MergeFruits for IntermediateHistogramBucketEntry {
|
||||
fn merge_fruits(&mut self, other: IntermediateHistogramBucketEntry) {
|
||||
self.doc_count += other.doc_count;
|
||||
self.sub_aggregation.merge_fruits(other.sub_aggregation);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashMap;
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn get_sub_test_tree(data: &[(String, u64)]) -> IntermediateAggregationResults {
|
||||
let mut map = HashMap::new();
|
||||
let mut buckets = HashMap::new();
|
||||
let mut buckets = FnvHashMap::default();
|
||||
for (key, doc_count) in data {
|
||||
buckets.insert(
|
||||
Key::Str(key.to_string()),
|
||||
key.to_string(),
|
||||
IntermediateRangeBucketEntry {
|
||||
key: Key::Str(key.to_string()),
|
||||
doc_count: *doc_count,
|
||||
values: None,
|
||||
sub_aggregation: Default::default(),
|
||||
from: None,
|
||||
to: None,
|
||||
@@ -228,21 +641,25 @@ mod tests {
|
||||
}
|
||||
map.insert(
|
||||
"my_agg_level2".to_string(),
|
||||
IntermediateAggregationResult::Bucket(IntermediateBucketResult::Range(buckets)),
|
||||
IntermediateBucketResult::Range(IntermediateRangeBucketResult { buckets }),
|
||||
);
|
||||
IntermediateAggregationResults(VecWithNames::from_entries(map.into_iter().collect()))
|
||||
IntermediateAggregationResults {
|
||||
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
|
||||
metrics: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_test_tree(data: &[(String, u64, String, u64)]) -> IntermediateAggregationResults {
|
||||
fn get_intermediat_tree_with_ranges(
|
||||
data: &[(String, u64, String, u64)],
|
||||
) -> IntermediateAggregationResults {
|
||||
let mut map = HashMap::new();
|
||||
let mut buckets = HashMap::new();
|
||||
let mut buckets: FnvHashMap<_, _> = Default::default();
|
||||
for (key, doc_count, sub_aggregation_key, sub_aggregation_count) in data {
|
||||
buckets.insert(
|
||||
Key::Str(key.to_string()),
|
||||
key.to_string(),
|
||||
IntermediateRangeBucketEntry {
|
||||
key: Key::Str(key.to_string()),
|
||||
doc_count: *doc_count,
|
||||
values: None,
|
||||
from: None,
|
||||
to: None,
|
||||
sub_aggregation: get_sub_test_tree(&[(
|
||||
@@ -254,25 +671,28 @@ mod tests {
|
||||
}
|
||||
map.insert(
|
||||
"my_agg_level1".to_string(),
|
||||
IntermediateAggregationResult::Bucket(IntermediateBucketResult::Range(buckets)),
|
||||
IntermediateBucketResult::Range(IntermediateRangeBucketResult { buckets }),
|
||||
);
|
||||
IntermediateAggregationResults(VecWithNames::from_entries(map.into_iter().collect()))
|
||||
IntermediateAggregationResults {
|
||||
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
|
||||
metrics: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_fruits_tree_1() {
|
||||
let mut tree_left = get_test_tree(&[
|
||||
let mut tree_left = get_intermediat_tree_with_ranges(&[
|
||||
("red".to_string(), 50, "1900".to_string(), 25),
|
||||
("blue".to_string(), 30, "1900".to_string(), 30),
|
||||
]);
|
||||
let tree_right = get_test_tree(&[
|
||||
let tree_right = get_intermediat_tree_with_ranges(&[
|
||||
("red".to_string(), 60, "1900".to_string(), 30),
|
||||
("blue".to_string(), 25, "1900".to_string(), 50),
|
||||
]);
|
||||
|
||||
tree_left.merge_fruits(&tree_right);
|
||||
tree_left.merge_fruits(tree_right);
|
||||
|
||||
let tree_expected = get_test_tree(&[
|
||||
let tree_expected = get_intermediat_tree_with_ranges(&[
|
||||
("red".to_string(), 110, "1900".to_string(), 55),
|
||||
("blue".to_string(), 55, "1900".to_string(), 80),
|
||||
]);
|
||||
@@ -282,18 +702,18 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_merge_fruits_tree_2() {
|
||||
let mut tree_left = get_test_tree(&[
|
||||
let mut tree_left = get_intermediat_tree_with_ranges(&[
|
||||
("red".to_string(), 50, "1900".to_string(), 25),
|
||||
("blue".to_string(), 30, "1900".to_string(), 30),
|
||||
]);
|
||||
let tree_right = get_test_tree(&[
|
||||
let tree_right = get_intermediat_tree_with_ranges(&[
|
||||
("red".to_string(), 60, "1900".to_string(), 30),
|
||||
("green".to_string(), 25, "1900".to_string(), 50),
|
||||
]);
|
||||
|
||||
tree_left.merge_fruits(&tree_right);
|
||||
tree_left.merge_fruits(tree_right);
|
||||
|
||||
let tree_expected = get_test_tree(&[
|
||||
let tree_expected = get_intermediat_tree_with_ranges(&[
|
||||
("red".to_string(), 110, "1900".to_string(), 55),
|
||||
("blue".to_string(), 30, "1900".to_string(), 30),
|
||||
("green".to_string(), 25, "1900".to_string(), 50),
|
||||
@@ -301,4 +721,18 @@ mod tests {
|
||||
|
||||
assert_eq!(tree_left, tree_expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_fruits_tree_empty() {
|
||||
let mut tree_left = get_intermediat_tree_with_ranges(&[
|
||||
("red".to_string(), 50, "1900".to_string(), 25),
|
||||
("blue".to_string(), 30, "1900".to_string(), 30),
|
||||
]);
|
||||
|
||||
let orig = tree_left.clone();
|
||||
|
||||
tree_left.merge_fruits(IntermediateAggregationResults::default());
|
||||
|
||||
assert_eq!(tree_left, orig);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::fastfield::DynamicFastFieldReader;
|
||||
use crate::schema::Type;
|
||||
use crate::DocId;
|
||||
|
||||
@@ -12,6 +13,15 @@ use crate::DocId;
|
||||
/// extracted from the aggregated documents.
|
||||
/// Supported field types are u64, i64, and f64.
|
||||
/// See [super::SingleMetricResult] for return value.
|
||||
///
|
||||
/// # JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
/// "avg": {
|
||||
/// "field": "score",
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
pub struct AverageAggregation {
|
||||
/// The field name to compute the stats on.
|
||||
pub field: String,
|
||||
@@ -51,10 +61,10 @@ impl SegmentAverageCollector {
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = field.get(docs[0]);
|
||||
let val2 = field.get(docs[1]);
|
||||
let val3 = field.get(docs[2]);
|
||||
let val4 = field.get(docs[3]);
|
||||
let val1 = field.get_val(docs[0] as u64);
|
||||
let val2 = field.get_val(docs[1] as u64);
|
||||
let val3 = field.get_val(docs[2] as u64);
|
||||
let val4 = field.get_val(docs[3] as u64);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
@@ -64,8 +74,8 @@ impl SegmentAverageCollector {
|
||||
self.data.collect(val3);
|
||||
self.data.collect(val4);
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = field.get(*doc);
|
||||
for &doc in iter.remainder() {
|
||||
let val = field.get_val(doc as u64);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.data.collect(val);
|
||||
}
|
||||
@@ -85,13 +95,17 @@ impl IntermediateAverage {
|
||||
}
|
||||
|
||||
/// Merge average data into this instance.
|
||||
pub fn merge_fruits(&mut self, other: &IntermediateAverage) {
|
||||
pub fn merge_fruits(&mut self, other: IntermediateAverage) {
|
||||
self.sum += other.sum;
|
||||
self.doc_count += other.doc_count;
|
||||
}
|
||||
/// compute final result
|
||||
pub fn finalize(&self) -> f64 {
|
||||
self.sum / self.doc_count as f64
|
||||
pub fn finalize(&self) -> Option<f64> {
|
||||
if self.doc_count == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(self.sum / self.doc_count as f64)
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
fn collect(&mut self, val: f64) {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
//! Module for all metric aggregations.
|
||||
|
||||
//!
|
||||
//! The aggregations in this family compute metrics, see [super::agg_req::MetricAggregation] for
|
||||
//! details.
|
||||
mod average;
|
||||
mod stats;
|
||||
pub use average::*;
|
||||
@@ -12,11 +14,17 @@ pub use stats::*;
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct SingleMetricResult {
|
||||
/// The value of the single value metric.
|
||||
pub value: f64,
|
||||
pub value: Option<f64>,
|
||||
}
|
||||
|
||||
impl From<f64> for SingleMetricResult {
|
||||
fn from(value: f64) -> Self {
|
||||
Self { value: Some(value) }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<f64>> for SingleMetricResult {
|
||||
fn from(value: Option<f64>) -> Self {
|
||||
Self { value }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,31 @@
|
||||
use fastfield_codecs::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::fastfield::DynamicFastFieldReader;
|
||||
use crate::schema::Type;
|
||||
use crate::DocId;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// A multi-value metric aggregation that computes stats of numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
/// Supported field types are u64, i64, and f64.
|
||||
/// See [Stats] for returned statistics.
|
||||
///
|
||||
/// # JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
/// "stats": {
|
||||
/// "field": "score",
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct StatsAggregation {
|
||||
/// The field name to compute the stats on.
|
||||
pub field: String,
|
||||
}
|
||||
|
||||
impl StatsAggregation {
|
||||
/// Create new StatsAggregation from a field.
|
||||
pub fn from_field_name(field_name: String) -> Self {
|
||||
@@ -32,14 +44,31 @@ pub struct Stats {
|
||||
pub count: usize,
|
||||
/// The sum of the fast field values.
|
||||
pub sum: f64,
|
||||
/// The standard deviation of the fast field values.
|
||||
pub standard_deviation: f64,
|
||||
/// The standard deviation of the fast field values. None for count == 0.
|
||||
pub standard_deviation: Option<f64>,
|
||||
/// The min value of the fast field values.
|
||||
pub min: f64,
|
||||
pub min: Option<f64>,
|
||||
/// The max value of the fast field values.
|
||||
pub max: f64,
|
||||
/// The average of the values.
|
||||
pub avg: f64,
|
||||
pub max: Option<f64>,
|
||||
/// The average of the values. None for count == 0.
|
||||
pub avg: Option<f64>,
|
||||
}
|
||||
|
||||
impl Stats {
|
||||
pub(crate) fn get_value(&self, agg_property: &str) -> crate::Result<Option<f64>> {
|
||||
match agg_property {
|
||||
"count" => Ok(Some(self.count as f64)),
|
||||
"sum" => Ok(Some(self.sum)),
|
||||
"standard_deviation" => Ok(self.standard_deviation),
|
||||
"min" => Ok(self.min),
|
||||
"max" => Ok(self.max),
|
||||
"avg" => Ok(self.avg),
|
||||
_ => Err(TantivyError::InvalidArgument(format!(
|
||||
"unknown property {} on stats metric aggregation",
|
||||
agg_property
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// IntermediateStats contains the mergeable version for stats.
|
||||
@@ -51,9 +80,8 @@ pub struct IntermediateStats {
|
||||
min: f64,
|
||||
max: f64,
|
||||
}
|
||||
|
||||
impl IntermediateStats {
|
||||
fn new() -> Self {
|
||||
impl Default for IntermediateStats {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
count: 0,
|
||||
sum: 0.0,
|
||||
@@ -62,22 +90,28 @@ impl IntermediateStats {
|
||||
max: f64::MIN,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn avg(&self) -> f64 {
|
||||
self.sum / (self.count as f64)
|
||||
impl IntermediateStats {
|
||||
pub(crate) fn avg(&self) -> Option<f64> {
|
||||
if self.count == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(self.sum / (self.count as f64))
|
||||
}
|
||||
}
|
||||
|
||||
fn square_mean(&self) -> f64 {
|
||||
self.squared_sum / (self.count as f64)
|
||||
}
|
||||
|
||||
pub(crate) fn standard_deviation(&self) -> f64 {
|
||||
let average = self.avg();
|
||||
(self.square_mean() - average * average).sqrt()
|
||||
pub(crate) fn standard_deviation(&self) -> Option<f64> {
|
||||
self.avg()
|
||||
.map(|average| (self.square_mean() - average * average).sqrt())
|
||||
}
|
||||
|
||||
/// Merge data from other stats into this instance.
|
||||
pub fn merge_fruits(&mut self, other: &IntermediateStats) {
|
||||
pub fn merge_fruits(&mut self, other: IntermediateStats) {
|
||||
self.count += other.count;
|
||||
self.sum += other.sum;
|
||||
self.squared_sum += other.squared_sum;
|
||||
@@ -85,14 +119,24 @@ impl IntermediateStats {
|
||||
self.max = self.max.max(other.max);
|
||||
}
|
||||
|
||||
/// compute final result
|
||||
/// compute final resultimprove_docs
|
||||
pub fn finalize(&self) -> Stats {
|
||||
let min = if self.count == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(self.min)
|
||||
};
|
||||
let max = if self.count == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(self.max)
|
||||
};
|
||||
Stats {
|
||||
count: self.count,
|
||||
sum: self.sum,
|
||||
standard_deviation: self.standard_deviation(),
|
||||
min: self.min,
|
||||
max: self.max,
|
||||
min,
|
||||
max,
|
||||
avg: self.avg(),
|
||||
}
|
||||
}
|
||||
@@ -117,16 +161,16 @@ impl SegmentStatsCollector {
|
||||
pub fn from_req(field_type: Type) -> Self {
|
||||
Self {
|
||||
field_type,
|
||||
stats: IntermediateStats::new(),
|
||||
stats: IntermediateStats::default(),
|
||||
}
|
||||
}
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = field.get(docs[0]);
|
||||
let val2 = field.get(docs[1]);
|
||||
let val3 = field.get(docs[2]);
|
||||
let val4 = field.get(docs[3]);
|
||||
let val1 = field.get_val(docs[0] as u64);
|
||||
let val2 = field.get_val(docs[1] as u64);
|
||||
let val3 = field.get_val(docs[2] as u64);
|
||||
let val4 = field.get_val(docs[3] as u64);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
@@ -136,8 +180,8 @@ impl SegmentStatsCollector {
|
||||
self.stats.collect(val3);
|
||||
self.stats.collect(val4);
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = field.get(*doc);
|
||||
for &doc in iter.remainder() {
|
||||
let val = field.get_val(doc as u64);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
@@ -157,12 +201,50 @@ mod tests {
|
||||
};
|
||||
use crate::aggregation::agg_result::AggregationResults;
|
||||
use crate::aggregation::metric::StatsAggregation;
|
||||
use crate::aggregation::tests::get_test_index_2_segments;
|
||||
use crate::aggregation::tests::{get_test_index_2_segments, get_test_index_from_values};
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::query::TermQuery;
|
||||
use crate::query::{AllQuery, TermQuery};
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::Term;
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_stats_empty_index() -> crate::Result<()> {
|
||||
// test index without segments
|
||||
let values = vec![];
|
||||
|
||||
let index = get_test_index_from_values(false, &values)?;
|
||||
|
||||
let agg_req_1: Aggregations = vec![(
|
||||
"stats".to_string(),
|
||||
Aggregation::Metric(MetricAggregation::Stats(StatsAggregation::from_field_name(
|
||||
"score".to_string(),
|
||||
))),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
assert_eq!(
|
||||
res["stats"],
|
||||
json!({
|
||||
"avg": Value::Null,
|
||||
"count": 0,
|
||||
"max": Value::Null,
|
||||
"min": Value::Null,
|
||||
"standard_deviation": Value::Null,
|
||||
"sum": 0.0
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregation_stats() -> crate::Result<()> {
|
||||
let index = get_test_index_2_segments(false)?;
|
||||
@@ -199,7 +281,12 @@ mod tests {
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score".to_string(),
|
||||
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
ranges: vec![
|
||||
(3f64..7f64).into(),
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: iter::once((
|
||||
"stats".to_string(),
|
||||
@@ -214,7 +301,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
@@ -268,6 +355,18 @@ mod tests {
|
||||
})
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
res["range"]["buckets"][3]["stats"],
|
||||
json!({
|
||||
"avg": serde_json::Value::Null,
|
||||
"count": 0,
|
||||
"max": serde_json::Value::Null,
|
||||
"min": serde_json::Value::Null,
|
||||
"standard_deviation": serde_json::Value::Null,
|
||||
"sum": 0.0,
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,32 +4,45 @@
|
||||
//! merging.
|
||||
|
||||
use std::fmt::Debug;
|
||||
|
||||
use itertools::Itertools;
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
|
||||
use super::agg_req::MetricAggregation;
|
||||
use super::agg_req_with_accessor::{
|
||||
AggregationsWithAccessor, BucketAggregationWithAccessor, MetricAggregationWithAccessor,
|
||||
};
|
||||
use super::bucket::SegmentRangeCollector;
|
||||
use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTermCollector};
|
||||
use super::collector::MAX_BUCKET_COUNT;
|
||||
use super::intermediate_agg_result::{IntermediateAggregationResults, IntermediateBucketResult};
|
||||
use super::metric::{
|
||||
AverageAggregation, SegmentAverageCollector, SegmentStatsCollector, StatsAggregation,
|
||||
};
|
||||
use super::{Key, VecWithNames};
|
||||
use super::VecWithNames;
|
||||
use crate::aggregation::agg_req::BucketAggregationType;
|
||||
use crate::DocId;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
pub(crate) const DOC_BLOCK_SIZE: usize = 256;
|
||||
pub(crate) const DOC_BLOCK_SIZE: usize = 64;
|
||||
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub(crate) struct SegmentAggregationResultsCollector {
|
||||
pub(crate) metrics: VecWithNames<SegmentMetricResultCollector>,
|
||||
pub(crate) buckets: VecWithNames<SegmentBucketResultCollector>,
|
||||
pub(crate) metrics: Option<VecWithNames<SegmentMetricResultCollector>>,
|
||||
pub(crate) buckets: Option<VecWithNames<SegmentBucketResultCollector>>,
|
||||
staged_docs: DocBlock,
|
||||
num_staged_docs: usize,
|
||||
}
|
||||
|
||||
impl Default for SegmentAggregationResultsCollector {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metrics: Default::default(),
|
||||
buckets: Default::default(),
|
||||
staged_docs: [0; DOC_BLOCK_SIZE],
|
||||
num_staged_docs: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for SegmentAggregationResultsCollector {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SegmentAggregationResultsCollector")
|
||||
@@ -42,25 +55,59 @@ impl Debug for SegmentAggregationResultsCollector {
|
||||
}
|
||||
|
||||
impl SegmentAggregationResultsCollector {
|
||||
pub(crate) fn from_req(req: &AggregationsWithAccessor) -> crate::Result<Self> {
|
||||
pub fn into_intermediate_aggregations_result(
|
||||
self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) -> crate::Result<IntermediateAggregationResults> {
|
||||
let buckets = if let Some(buckets) = self.buckets {
|
||||
let entries = buckets
|
||||
.into_iter()
|
||||
.zip(agg_with_accessor.buckets.values())
|
||||
.map(|((key, bucket), acc)| Ok((key, bucket.into_intermediate_bucket_result(acc)?)))
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
Some(VecWithNames::from_entries(entries))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let metrics = self.metrics.map(VecWithNames::from_other);
|
||||
|
||||
Ok(IntermediateAggregationResults { metrics, buckets })
|
||||
}
|
||||
|
||||
pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> {
|
||||
let buckets = req
|
||||
.buckets
|
||||
.entries()
|
||||
.map(|(key, req)| {
|
||||
Ok((
|
||||
key.to_string(),
|
||||
SegmentBucketResultCollector::from_req(req)?,
|
||||
SegmentBucketResultCollector::from_req_and_validate(req)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
let metrics = req
|
||||
.metrics
|
||||
.entries()
|
||||
.map(|(key, req)| (key.to_string(), SegmentMetricResultCollector::from_req(req)))
|
||||
.collect_vec();
|
||||
.map(|(key, req)| {
|
||||
Ok((
|
||||
key.to_string(),
|
||||
SegmentMetricResultCollector::from_req_and_validate(req)?,
|
||||
))
|
||||
})
|
||||
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||
let metrics = if metrics.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VecWithNames::from_entries(metrics))
|
||||
};
|
||||
let buckets = if buckets.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(VecWithNames::from_entries(buckets))
|
||||
};
|
||||
Ok(SegmentAggregationResultsCollector {
|
||||
metrics: VecWithNames::from_entries(metrics),
|
||||
buckets: VecWithNames::from_entries(buckets),
|
||||
metrics,
|
||||
buckets,
|
||||
staged_docs: [0; DOC_BLOCK_SIZE],
|
||||
num_staged_docs: 0,
|
||||
})
|
||||
@@ -71,40 +118,46 @@ impl SegmentAggregationResultsCollector {
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
self.staged_docs[self.num_staged_docs] = doc;
|
||||
self.num_staged_docs += 1;
|
||||
if self.num_staged_docs == self.staged_docs.len() {
|
||||
self.flush_staged_docs(agg_with_accessor, false);
|
||||
self.flush_staged_docs(agg_with_accessor, false)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
pub(crate) fn flush_staged_docs(
|
||||
&mut self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
for (agg_with_accessor, collector) in agg_with_accessor
|
||||
.metrics
|
||||
.values()
|
||||
.zip(self.metrics.values_mut())
|
||||
{
|
||||
collector.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor);
|
||||
) -> crate::Result<()> {
|
||||
if self.num_staged_docs == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
for (agg_with_accessor, collector) in agg_with_accessor
|
||||
.buckets
|
||||
.values()
|
||||
.zip(self.buckets.values_mut())
|
||||
{
|
||||
collector.collect_block(
|
||||
&self.staged_docs[..self.num_staged_docs],
|
||||
agg_with_accessor,
|
||||
force_flush,
|
||||
);
|
||||
if let Some(metrics) = &mut self.metrics {
|
||||
for (collector, agg_with_accessor) in
|
||||
metrics.values_mut().zip(agg_with_accessor.metrics.values())
|
||||
{
|
||||
collector
|
||||
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(buckets) = &mut self.buckets {
|
||||
for (collector, agg_with_accessor) in
|
||||
buckets.values_mut().zip(agg_with_accessor.buckets.values())
|
||||
{
|
||||
collector.collect_block(
|
||||
&self.staged_docs[..self.num_staged_docs],
|
||||
agg_with_accessor,
|
||||
force_flush,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
self.num_staged_docs = 0;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,15 +168,17 @@ pub(crate) enum SegmentMetricResultCollector {
|
||||
}
|
||||
|
||||
impl SegmentMetricResultCollector {
|
||||
pub fn from_req(req: &MetricAggregationWithAccessor) -> Self {
|
||||
pub fn from_req_and_validate(req: &MetricAggregationWithAccessor) -> crate::Result<Self> {
|
||||
match &req.metric {
|
||||
MetricAggregation::Average(AverageAggregation { field: _ }) => {
|
||||
SegmentMetricResultCollector::Average(SegmentAverageCollector::from_req(
|
||||
req.field_type,
|
||||
Ok(SegmentMetricResultCollector::Average(
|
||||
SegmentAverageCollector::from_req(req.field_type),
|
||||
))
|
||||
}
|
||||
MetricAggregation::Stats(StatsAggregation { field: _ }) => {
|
||||
SegmentMetricResultCollector::Stats(SegmentStatsCollector::from_req(req.field_type))
|
||||
Ok(SegmentMetricResultCollector::Stats(
|
||||
SegmentStatsCollector::from_req(req.field_type),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -146,14 +201,58 @@ impl SegmentMetricResultCollector {
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) enum SegmentBucketResultCollector {
|
||||
Range(SegmentRangeCollector),
|
||||
Histogram(Box<SegmentHistogramCollector>),
|
||||
Terms(Box<SegmentTermCollector>),
|
||||
}
|
||||
|
||||
impl SegmentBucketResultCollector {
|
||||
pub fn from_req(req: &BucketAggregationWithAccessor) -> crate::Result<Self> {
|
||||
pub fn into_intermediate_bucket_result(
|
||||
self,
|
||||
agg_with_accessor: &BucketAggregationWithAccessor,
|
||||
) -> crate::Result<IntermediateBucketResult> {
|
||||
match self {
|
||||
SegmentBucketResultCollector::Terms(terms) => {
|
||||
terms.into_intermediate_bucket_result(agg_with_accessor)
|
||||
}
|
||||
SegmentBucketResultCollector::Range(range) => {
|
||||
range.into_intermediate_bucket_result(agg_with_accessor)
|
||||
}
|
||||
SegmentBucketResultCollector::Histogram(histogram) => {
|
||||
histogram.into_intermediate_bucket_result(agg_with_accessor)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_req_and_validate(req: &BucketAggregationWithAccessor) -> crate::Result<Self> {
|
||||
match &req.bucket_agg {
|
||||
BucketAggregationType::Range(range_req) => Ok(Self::Range(
|
||||
SegmentRangeCollector::from_req(range_req, &req.sub_aggregation, req.field_type)?,
|
||||
)),
|
||||
BucketAggregationType::Terms(terms_req) => Ok(Self::Terms(Box::new(
|
||||
SegmentTermCollector::from_req_and_validate(
|
||||
terms_req,
|
||||
&req.sub_aggregation,
|
||||
req.field_type,
|
||||
req.accessor
|
||||
.as_multi()
|
||||
.expect("unexpected fast field cardinality"),
|
||||
)?,
|
||||
))),
|
||||
BucketAggregationType::Range(range_req) => {
|
||||
Ok(Self::Range(SegmentRangeCollector::from_req_and_validate(
|
||||
range_req,
|
||||
&req.sub_aggregation,
|
||||
&req.bucket_count,
|
||||
req.field_type,
|
||||
)?))
|
||||
}
|
||||
BucketAggregationType::Histogram(histogram) => Ok(Self::Histogram(Box::new(
|
||||
SegmentHistogramCollector::from_req_and_validate(
|
||||
histogram,
|
||||
&req.sub_aggregation,
|
||||
req.field_type,
|
||||
req.accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinality"),
|
||||
)?,
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,33 +262,52 @@ impl SegmentBucketResultCollector {
|
||||
doc: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
match self {
|
||||
SegmentBucketResultCollector::Range(range) => {
|
||||
range.collect_block(doc, bucket_with_accessor, force_flush);
|
||||
range.collect_block(doc, bucket_with_accessor, force_flush)?;
|
||||
}
|
||||
SegmentBucketResultCollector::Histogram(histogram) => {
|
||||
histogram.collect_block(doc, bucket_with_accessor, force_flush)?;
|
||||
}
|
||||
SegmentBucketResultCollector::Terms(terms) => {
|
||||
terms.collect_block(doc, bucket_with_accessor, force_flush)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct BucketCount {
|
||||
/// The counter which is shared between the aggregations for one request.
|
||||
pub(crate) bucket_count: Rc<AtomicU32>,
|
||||
pub(crate) max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl Default for BucketCount {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bucket_count: Default::default(),
|
||||
max_bucket_count: MAX_BUCKET_COUNT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub(crate) struct SegmentRangeBucketEntry {
|
||||
pub key: Key,
|
||||
pub doc_count: u64,
|
||||
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
|
||||
/// The from range of the bucket. Equals f64::MIN when None.
|
||||
pub from: Option<f64>,
|
||||
/// The to range of the bucket. Equals f64::MAX when None.
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
impl Debug for SegmentRangeBucketEntry {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SegmentRangeBucketEntry")
|
||||
.field("key", &self.key)
|
||||
.field("doc_count", &self.doc_count)
|
||||
.field("from", &self.from)
|
||||
.field("to", &self.to)
|
||||
.finish()
|
||||
impl BucketCount {
|
||||
pub(crate) fn validate_bucket_count(&self) -> crate::Result<()> {
|
||||
if self.get_count() > self.max_bucket_count {
|
||||
return Err(TantivyError::InvalidArgument(
|
||||
"Aborting aggregation because too many buckets were created".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub(crate) fn add_count(&self, count: u32) {
|
||||
self.bucket_count
|
||||
.fetch_add(count as u32, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
pub(crate) fn get_count(&self) -> u32 {
|
||||
self.bucket_count.load(std::sync::atomic::Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,8 +271,8 @@ impl Collector for FacetCollector {
|
||||
let mut facet_streamer = facet_reader.facet_dict().range().into_stream()?;
|
||||
if facet_streamer.advance() {
|
||||
'outer: loop {
|
||||
// at the begining of this loop, facet_streamer
|
||||
// is positionned on a term that has not been processed yet.
|
||||
// at the beginning of this loop, facet_streamer
|
||||
// is positioned on a term that has not been processed yet.
|
||||
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
||||
match skip_result {
|
||||
SkipResult::Found => {
|
||||
|
||||
@@ -11,8 +11,10 @@
|
||||
// Importing tantivy...
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastValue};
|
||||
use crate::schema::Field;
|
||||
use crate::{Score, SegmentReader, TantivyError};
|
||||
|
||||
@@ -174,7 +176,7 @@ where
|
||||
type Fruit = TSegmentCollector::Fruit;
|
||||
|
||||
fn collect(&mut self, doc: u32, score: Score) {
|
||||
let value = self.fast_field_reader.get(doc);
|
||||
let value = self.fast_field_reader.get_val(doc as u64);
|
||||
if (self.predicate)(value) {
|
||||
self.segment_collector.collect(doc, score)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use fastdivide::DividerU64;
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastValue};
|
||||
use crate::schema::{Field, Type};
|
||||
use crate::{DocId, Score};
|
||||
|
||||
@@ -72,8 +73,7 @@ impl HistogramComputer {
|
||||
return;
|
||||
}
|
||||
let delta = value - self.min_value;
|
||||
let delta_u64 = delta.to_u64();
|
||||
let bucket_id: usize = self.divider.divide(delta_u64) as usize;
|
||||
let bucket_id: usize = self.divider.divide(delta) as usize;
|
||||
if bucket_id < self.counts.len() {
|
||||
self.counts[bucket_id] += 1;
|
||||
}
|
||||
@@ -92,7 +92,7 @@ impl SegmentCollector for SegmentHistogramCollector {
|
||||
type Fruit = Vec<u64>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
let value = self.ff_reader.get(doc);
|
||||
let value = self.ff_reader.get_val(doc as u64);
|
||||
self.histogram_computer.add_value(value);
|
||||
}
|
||||
|
||||
@@ -152,9 +152,9 @@ mod tests {
|
||||
use query::AllQuery;
|
||||
|
||||
use super::{add_vecs, HistogramCollector, HistogramComputer};
|
||||
use crate::chrono::{TimeZone, Utc};
|
||||
use crate::schema::{Schema, FAST};
|
||||
use crate::{doc, query, Index};
|
||||
use crate::time::{Date, Month};
|
||||
use crate::{doc, query, DateTime, Index};
|
||||
|
||||
#[test]
|
||||
fn test_add_histograms_simple() {
|
||||
@@ -273,17 +273,21 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut writer = index.writer_with_num_threads(1, 4_000_000)?;
|
||||
writer.add_document(doc!(date_field=>Utc.ymd(1982, 9, 17).and_hms(0, 0,0)))?;
|
||||
writer.add_document(doc!(date_field=>Utc.ymd(1986, 3, 9).and_hms(0, 0, 0)))?;
|
||||
writer.add_document(doc!(date_field=>Utc.ymd(1983, 9, 27).and_hms(0, 0, 0)))?;
|
||||
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1982, Month::September, 17)?.with_hms(0, 0, 0)?)))?;
|
||||
writer.add_document(
|
||||
doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1986, Month::March, 9)?.with_hms(0, 0, 0)?)),
|
||||
)?;
|
||||
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1983, Month::September, 27)?.with_hms(0, 0, 0)?)))?;
|
||||
writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let all_query = AllQuery;
|
||||
let week_histogram_collector = HistogramCollector::new(
|
||||
date_field,
|
||||
Utc.ymd(1980, 1, 1).and_hms(0, 0, 0),
|
||||
3600 * 24 * 365, // it is just for a unit test... sorry leap years.
|
||||
DateTime::from_primitive(
|
||||
Date::from_calendar_date(1980, Month::January, 1)?.with_hms(0, 0, 0)?,
|
||||
),
|
||||
3_600_000_000 * 24 * 365, // it is just for a unit test... sorry leap years.
|
||||
10,
|
||||
);
|
||||
let week_histogram = searcher.search(&all_query, &week_histogram_collector)?;
|
||||
|
||||
@@ -92,7 +92,7 @@ mod histogram_collector;
|
||||
pub use histogram_collector::HistogramCollector;
|
||||
|
||||
mod multi_collector;
|
||||
pub use self::multi_collector::MultiCollector;
|
||||
pub use self::multi_collector::{FruitHandle, MultiCollector, MultiFruit};
|
||||
|
||||
mod top_collector;
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ use super::{Collector, SegmentCollector};
|
||||
use crate::collector::Fruit;
|
||||
use crate::{DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
||||
|
||||
/// MultiFruit keeps Fruits from every nested Collector
|
||||
pub struct MultiFruit {
|
||||
sub_fruits: Vec<Option<Box<dyn Fruit>>>,
|
||||
}
|
||||
@@ -79,12 +80,17 @@ impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
||||
}
|
||||
}
|
||||
|
||||
/// FruitHandle stores reference to the corresponding collector inside MultiCollector
|
||||
pub struct FruitHandle<TFruit: Fruit> {
|
||||
pos: usize,
|
||||
_phantom: PhantomData<TFruit>,
|
||||
}
|
||||
|
||||
impl<TFruit: Fruit> FruitHandle<TFruit> {
|
||||
/// Extract a typed fruit off a multifruit.
|
||||
///
|
||||
/// This function involves downcasting and can panic if the multifruit was
|
||||
/// created using faulty code.
|
||||
pub fn extract(self, fruits: &mut MultiFruit) -> TFruit {
|
||||
let boxed_fruit = fruits.sub_fruits[self.pos].take().expect("");
|
||||
*boxed_fruit
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
use std::str::FromStr;
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::*;
|
||||
use crate::collector::{Count, FilterCollector, TopDocs};
|
||||
use crate::core::SegmentReader;
|
||||
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader};
|
||||
use crate::query::{AllQuery, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, TEXT};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::{doc, DateTime, DocAddress, DocId, Document, Index, Score, Searcher, SegmentOrdinal};
|
||||
|
||||
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
||||
@@ -26,11 +28,11 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||
index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64, date => DateTime::from_str("1898-04-09T00:00:00+00:00").unwrap()))?;
|
||||
index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64, date => DateTime::from_str("2020-04-09T00:00:00+00:00").unwrap()))?;
|
||||
index_writer.add_document(doc!(title => "The Diary of Anne Frank", price => 18_240u64, date => DateTime::from_str("2019-04-20T00:00:00+00:00").unwrap()))?;
|
||||
index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64, date => DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()))?;
|
||||
index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64, date => DateTime::from_str("2018-04-09T00:00:00+00:00").unwrap()))?;
|
||||
index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64, date => DateTime::from_utc(OffsetDateTime::parse("1898-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
|
||||
index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64, date => DateTime::from_utc(OffsetDateTime::parse("2020-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
|
||||
index_writer.add_document(doc!(title => "The Diary of Anne Frank", price => 18_240u64, date => DateTime::from_utc(OffsetDateTime::parse("2019-04-20T00:00:00+00:00", &Rfc3339).unwrap())))?;
|
||||
index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64, date => DateTime::from_utc(OffsetDateTime::parse("2019-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
|
||||
index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64, date => DateTime::from_utc(OffsetDateTime::parse("2018-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
@@ -55,7 +57,9 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
||||
assert_eq!(filtered_top_docs.len(), 0);
|
||||
|
||||
fn date_filter(value: DateTime) -> bool {
|
||||
(value - DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()).num_weeks() > 0
|
||||
(value.into_utc() - OffsetDateTime::parse("2019-04-09T00:00:00+00:00", &Rfc3339).unwrap())
|
||||
.whole_weeks()
|
||||
> 0
|
||||
}
|
||||
|
||||
let filter_dates_collector = FilterCollector::new(date, &date_filter, TopDocs::with_limit(5));
|
||||
@@ -67,10 +71,8 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
||||
|
||||
/// Stores all of the doc ids.
|
||||
/// This collector is only used for tests.
|
||||
/// It is unusable in pr
|
||||
///
|
||||
/// actise, as it does not store
|
||||
/// the segment ordinals
|
||||
/// It is unusable in practise, as it does
|
||||
/// not store the segment ordinals
|
||||
pub struct TestCollector {
|
||||
pub compute_score: bool,
|
||||
}
|
||||
@@ -197,7 +199,7 @@ impl SegmentCollector for FastFieldSegmentCollector {
|
||||
type Fruit = Vec<u64>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
let val = self.reader.get(doc);
|
||||
let val = self.reader.get_val(doc as u64);
|
||||
self.vals.push(val);
|
||||
}
|
||||
|
||||
@@ -263,7 +265,7 @@ impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||
}
|
||||
}
|
||||
|
||||
fn make_test_searcher() -> crate::Result<crate::LeasedItem<Searcher>> {
|
||||
fn make_test_searcher() -> crate::Result<Searcher> {
|
||||
let schema = Schema::builder().build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
|
||||
@@ -137,7 +137,7 @@ where T: PartialOrd + Clone
|
||||
/// sorted by type `T`.
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// The theoretical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
pub(crate) struct TopSegmentCollector<T> {
|
||||
limit: usize,
|
||||
@@ -173,8 +173,7 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Return true iff at least K documents have gone through
|
||||
/// the collector.
|
||||
/// Return true if more documents have been collected than the limit.
|
||||
#[inline]
|
||||
pub(crate) fn at_capacity(&self) -> bool {
|
||||
self.heap.len() >= self.limit
|
||||
|
||||
@@ -2,6 +2,8 @@ use std::collections::BinaryHeap;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::Collector;
|
||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||
use crate::collector::top_collector::{ComparableDoc, TopCollector, TopSegmentCollector};
|
||||
@@ -9,7 +11,7 @@ use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||
use crate::collector::{
|
||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||
};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastValue};
|
||||
use crate::query::Weight;
|
||||
use crate::schema::Field;
|
||||
use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
||||
@@ -79,7 +81,7 @@ where
|
||||
/// sorted by their score.
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// The theoretical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
///
|
||||
/// This collector guarantees a stable sorting in case of a tie on the
|
||||
@@ -134,7 +136,7 @@ struct ScorerByFastFieldReader {
|
||||
|
||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||
fn score(&mut self, doc: DocId) -> u64 {
|
||||
self.ff_reader.get(doc)
|
||||
self.ff_reader.get_val(doc as u64)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -283,7 +285,7 @@ impl TopDocs {
|
||||
///
|
||||
/// # See also
|
||||
///
|
||||
/// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||
/// To comfortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method.
|
||||
pub fn order_by_u64_field(
|
||||
self,
|
||||
@@ -499,7 +501,7 @@ impl TopDocs {
|
||||
///
|
||||
/// This method only makes it possible to compute the score from a given
|
||||
/// `DocId`, fastfield values for the doc and any information you could
|
||||
/// have precomputed beforehands. It does not make it possible for instance
|
||||
/// have precomputed beforehand. It does not make it possible for instance
|
||||
/// to compute something like TfIdf as it does not have access to the list of query
|
||||
/// terms present in the document, nor the term frequencies for the different terms.
|
||||
///
|
||||
@@ -714,7 +716,9 @@ mod tests {
|
||||
use crate::collector::Collector;
|
||||
use crate::query::{AllQuery, Query, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||
use crate::{DocAddress, DocId, Index, IndexWriter, Score, SegmentReader};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::{DateTime, DocAddress, DocId, Index, IndexWriter, Score, SegmentReader};
|
||||
|
||||
fn make_index() -> crate::Result<Index> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -890,28 +894,32 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_top_field_collector_datetime() -> crate::Result<()> {
|
||||
use std::str::FromStr;
|
||||
let mut schema_builder = Schema::builder();
|
||||
let name = schema_builder.add_text_field("name", TEXT);
|
||||
let birthday = schema_builder.add_date_field("birthday", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let pr_birthday = crate::DateTime::from_str("1898-04-09T00:00:00+00:00")?;
|
||||
let pr_birthday = DateTime::from_utc(OffsetDateTime::parse(
|
||||
"1898-04-09T00:00:00+00:00",
|
||||
&Rfc3339,
|
||||
)?);
|
||||
index_writer.add_document(doc!(
|
||||
name => "Paul Robeson",
|
||||
birthday => pr_birthday
|
||||
birthday => pr_birthday,
|
||||
))?;
|
||||
let mr_birthday = crate::DateTime::from_str("1947-11-08T00:00:00+00:00")?;
|
||||
let mr_birthday = DateTime::from_utc(OffsetDateTime::parse(
|
||||
"1947-11-08T00:00:00+00:00",
|
||||
&Rfc3339,
|
||||
)?);
|
||||
index_writer.add_document(doc!(
|
||||
name => "Minnie Riperton",
|
||||
birthday => mr_birthday
|
||||
birthday => mr_birthday,
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(birthday);
|
||||
let top_docs: Vec<(crate::DateTime, DocAddress)> =
|
||||
searcher.search(&AllQuery, &top_collector)?;
|
||||
let top_docs: Vec<(DateTime, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
&[
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crossbeam::channel;
|
||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||
|
||||
use crate::TantivyError;
|
||||
|
||||
/// Search executor whether search request are single thread or multithread.
|
||||
///
|
||||
/// We don't expose Rayon thread pool directly here for several reasons.
|
||||
@@ -47,16 +48,19 @@ impl Executor {
|
||||
match self {
|
||||
Executor::SingleThread => args.map(f).collect::<crate::Result<_>>(),
|
||||
Executor::ThreadPool(pool) => {
|
||||
let args_with_indices: Vec<(usize, A)> = args.enumerate().collect();
|
||||
let num_fruits = args_with_indices.len();
|
||||
let args: Vec<A> = args.collect();
|
||||
let num_fruits = args.len();
|
||||
let fruit_receiver = {
|
||||
let (fruit_sender, fruit_receiver) = channel::unbounded();
|
||||
let (fruit_sender, fruit_receiver) = crossbeam_channel::unbounded();
|
||||
pool.scope(|scope| {
|
||||
for arg_with_idx in args_with_indices {
|
||||
scope.spawn(|_| {
|
||||
let (idx, arg) = arg_with_idx;
|
||||
let fruit = f(arg);
|
||||
if let Err(err) = fruit_sender.send((idx, fruit)) {
|
||||
for (idx, arg) in args.into_iter().enumerate() {
|
||||
// We name references for f and fruit_sender_ref because we do not
|
||||
// want these two to be moved into the closure.
|
||||
let f_ref = &f;
|
||||
let fruit_sender_ref = &fruit_sender;
|
||||
scope.spawn(move |_| {
|
||||
let fruit = f_ref(arg);
|
||||
if let Err(err) = fruit_sender_ref.send((idx, fruit)) {
|
||||
error!(
|
||||
"Failed to send search task. It probably means all search \
|
||||
threads have panicked. {:?}",
|
||||
@@ -71,18 +75,19 @@ impl Executor {
|
||||
// This is important as it makes it possible for the fruit_receiver iteration to
|
||||
// terminate.
|
||||
};
|
||||
// This is lame, but safe.
|
||||
let mut results_with_position = Vec::with_capacity(num_fruits);
|
||||
let mut result_placeholders: Vec<Option<R>> =
|
||||
std::iter::repeat_with(|| None).take(num_fruits).collect();
|
||||
for (pos, fruit_res) in fruit_receiver {
|
||||
let fruit = fruit_res?;
|
||||
results_with_position.push((pos, fruit));
|
||||
result_placeholders[pos] = Some(fruit);
|
||||
}
|
||||
results_with_position.sort_by_key(|(pos, _)| *pos);
|
||||
assert_eq!(results_with_position.len(), num_fruits);
|
||||
Ok(results_with_position
|
||||
.into_iter()
|
||||
.map(|(_, fruit)| fruit)
|
||||
.collect::<Vec<_>>())
|
||||
let results: Vec<R> = result_placeholders.into_iter().flatten().collect();
|
||||
if results.len() != num_fruits {
|
||||
return Err(TantivyError::InternalError(
|
||||
"One of the mapped execution failed.".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(results)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ fn load_metas(
|
||||
/// let body_field = schema_builder.add_text_field("body", TEXT);
|
||||
/// let number_field = schema_builder.add_u64_field(
|
||||
/// "number",
|
||||
/// IntOptions::default().set_fast(Cardinality::SingleValue),
|
||||
/// NumericOptions::default().set_fast(Cardinality::SingleValue),
|
||||
/// );
|
||||
///
|
||||
/// let schema = schema_builder.build();
|
||||
@@ -74,6 +74,7 @@ fn load_metas(
|
||||
pub struct IndexBuilder {
|
||||
schema: Option<Schema>,
|
||||
index_settings: IndexSettings,
|
||||
tokenizer_manager: TokenizerManager,
|
||||
}
|
||||
impl Default for IndexBuilder {
|
||||
fn default() -> Self {
|
||||
@@ -86,6 +87,7 @@ impl IndexBuilder {
|
||||
Self {
|
||||
schema: None,
|
||||
index_settings: IndexSettings::default(),
|
||||
tokenizer_manager: TokenizerManager::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,6 +105,12 @@ impl IndexBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the tokenizers .
|
||||
pub fn tokenizers(mut self, tokenizers: TokenizerManager) -> Self {
|
||||
self.tokenizer_manager = tokenizers;
|
||||
self
|
||||
}
|
||||
|
||||
/// Creates a new index using the `RAMDirectory`.
|
||||
///
|
||||
/// The index will be allocated in anonymous memory.
|
||||
@@ -154,7 +162,8 @@ impl IndexBuilder {
|
||||
if !Index::exists(&*dir)? {
|
||||
return self.create(dir);
|
||||
}
|
||||
let index = Index::open(dir)?;
|
||||
let mut index = Index::open(dir)?;
|
||||
index.set_tokenizers(self.tokenizer_manager.clone());
|
||||
if index.schema() == self.get_expect_schema()? {
|
||||
Ok(index)
|
||||
} else {
|
||||
@@ -176,7 +185,8 @@ impl IndexBuilder {
|
||||
)?;
|
||||
let mut metas = IndexMeta::with_schema(self.get_expect_schema()?);
|
||||
metas.index_settings = self.index_settings;
|
||||
let index = Index::open_from_metas(directory, &metas, SegmentMetaInventory::default());
|
||||
let mut index = Index::open_from_metas(directory, &metas, SegmentMetaInventory::default());
|
||||
index.set_tokenizers(self.tokenizer_manager);
|
||||
Ok(index)
|
||||
}
|
||||
}
|
||||
@@ -222,7 +232,7 @@ impl Index {
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
/// by a thread pool with as many threads as there are CPUs on the system.
|
||||
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
|
||||
let default_num_threads = num_cpus::get();
|
||||
self.set_multithread_executor(default_num_threads)
|
||||
@@ -304,6 +314,11 @@ impl Index {
|
||||
}
|
||||
}
|
||||
|
||||
/// Setter for the tokenizer manager.
|
||||
pub fn set_tokenizers(&mut self, tokenizers: TokenizerManager) {
|
||||
self.tokenizers = tokenizers;
|
||||
}
|
||||
|
||||
/// Accessor for the tokenizer manager.
|
||||
pub fn tokenizers(&self) -> &TokenizerManager {
|
||||
&self.tokenizers
|
||||
@@ -314,20 +329,31 @@ impl Index {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
let tokenizer_manager: &TokenizerManager = self.tokenizers();
|
||||
let tokenizer_name_opt: Option<TextAnalyzer> = match field_type {
|
||||
FieldType::Str(text_options) => text_options
|
||||
.get_indexing_options()
|
||||
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
|
||||
.and_then(|tokenizer_name| tokenizer_manager.get(&tokenizer_name)),
|
||||
_ => None,
|
||||
let indexing_options_opt = match field_type {
|
||||
FieldType::JsonObject(options) => options.get_text_indexing_options(),
|
||||
FieldType::Str(options) => options.get_indexing_options(),
|
||||
_ => {
|
||||
return Err(TantivyError::SchemaError(format!(
|
||||
"{:?} is not a text field.",
|
||||
field_entry.name()
|
||||
)))
|
||||
}
|
||||
};
|
||||
match tokenizer_name_opt {
|
||||
Some(tokenizer) => Ok(tokenizer),
|
||||
None => Err(TantivyError::SchemaError(format!(
|
||||
"{:?} is not a text field.",
|
||||
field_entry.name()
|
||||
))),
|
||||
}
|
||||
let indexing_options = indexing_options_opt.ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"No indexing options set for field {:?}",
|
||||
field_entry
|
||||
))
|
||||
})?;
|
||||
|
||||
tokenizer_manager
|
||||
.get(indexing_options.tokenizer())
|
||||
.ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"No Tokenizer found for field {:?}",
|
||||
field_entry
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a default `IndexReader` for the given index.
|
||||
@@ -340,8 +366,7 @@ impl Index {
|
||||
/// Create a `IndexReader` for the given index.
|
||||
///
|
||||
/// Most project should create at most one reader for a given index.
|
||||
/// This method is typically called only once per `Index` instance,
|
||||
/// over the lifetime of most problem.
|
||||
/// This method is typically called only once per `Index` instance.
|
||||
pub fn reader_builder(&self) -> IndexReaderBuilder {
|
||||
IndexReaderBuilder::new(self.clone())
|
||||
}
|
||||
@@ -557,7 +582,8 @@ impl fmt::Debug for Index {
|
||||
mod tests {
|
||||
use crate::directory::{RamDirectory, WatchCallback};
|
||||
use crate::schema::{Field, Schema, INDEXED, TEXT};
|
||||
use crate::{Directory, Index, IndexReader, IndexSettings, ReloadPolicy};
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy};
|
||||
|
||||
#[test]
|
||||
fn test_indexer_for_field() {
|
||||
@@ -573,6 +599,21 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_tokenizer_manager() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_u64_field("num_likes", INDEXED);
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = IndexBuilder::new()
|
||||
// set empty tokenizer manager
|
||||
.tokenizers(TokenizerManager::new())
|
||||
.schema(schema)
|
||||
.create_in_ram()
|
||||
.unwrap();
|
||||
assert!(index.tokenizers().get("raw").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_exists() {
|
||||
let directory: Box<dyn Directory> = Box::new(RamDirectory::create());
|
||||
@@ -702,7 +743,7 @@ mod tests {
|
||||
.try_into()?;
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
writer.add_document(doc!(field=>1u64))?;
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
let (sender, receiver) = crossbeam_channel::unbounded();
|
||||
let _handle = index.directory_mut().watch(WatchCallback::new(move || {
|
||||
let _ = sender.send(());
|
||||
}));
|
||||
@@ -737,7 +778,7 @@ mod tests {
|
||||
reader: &IndexReader,
|
||||
) -> crate::Result<()> {
|
||||
let mut reader_index = reader.index();
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
let (sender, receiver) = crossbeam_channel::unbounded();
|
||||
let _watch_handle = reader_index
|
||||
.directory_mut()
|
||||
.watch(WatchCallback::new(move || {
|
||||
@@ -781,24 +822,24 @@ mod tests {
|
||||
for i in 0u64..8_000u64 {
|
||||
writer.add_document(doc!(field => i))?;
|
||||
}
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
let _handle = directory.watch(WatchCallback::new(move || {
|
||||
let _ = sender.send(());
|
||||
}));
|
||||
|
||||
writer.commit()?;
|
||||
let mem_right_after_commit = directory.total_mem_usage();
|
||||
assert!(receiver.recv().is_ok());
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::Manual)
|
||||
.try_into()?;
|
||||
|
||||
assert_eq!(reader.searcher().num_docs(), 8_000);
|
||||
assert_eq!(reader.searcher().segment_readers().len(), 8);
|
||||
|
||||
writer.wait_merging_threads()?;
|
||||
|
||||
let mem_right_after_merge_finished = directory.total_mem_usage();
|
||||
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
assert_eq!(searcher.num_docs(), 8_000);
|
||||
assert!(
|
||||
mem_right_after_merge_finished < mem_right_after_commit,
|
||||
|
||||
@@ -239,7 +239,7 @@ impl InnerSegmentMeta {
|
||||
///
|
||||
/// Contains settings which are applied on the whole
|
||||
/// index, like presort documents.
|
||||
#[derive(Clone, Default, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct IndexSettings {
|
||||
/// Sorts the documents by information
|
||||
/// provided in `IndexSortByField`
|
||||
@@ -248,13 +248,32 @@ pub struct IndexSettings {
|
||||
/// The `Compressor` used to compress the doc store.
|
||||
#[serde(default)]
|
||||
pub docstore_compression: Compressor,
|
||||
#[serde(default = "default_docstore_blocksize")]
|
||||
/// The size of each block that will be compressed and written to disk
|
||||
pub docstore_blocksize: usize,
|
||||
}
|
||||
|
||||
/// Must be a function to be compatible with serde defaults
|
||||
fn default_docstore_blocksize() -> usize {
|
||||
16_384
|
||||
}
|
||||
|
||||
impl Default for IndexSettings {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
sort_by_field: None,
|
||||
docstore_compression: Compressor::default(),
|
||||
docstore_blocksize: default_docstore_blocksize(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Settings to presort the documents in an index
|
||||
///
|
||||
/// Presorting documents can greatly performance
|
||||
/// Presorting documents can greatly improve performance
|
||||
/// in some scenarios, by applying top n
|
||||
/// optimizations.
|
||||
#[derive(Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct IndexSortByField {
|
||||
/// The field to sort the documents by
|
||||
pub field: String,
|
||||
@@ -262,7 +281,7 @@ pub struct IndexSortByField {
|
||||
pub order: Order,
|
||||
}
|
||||
/// The order to sort by
|
||||
#[derive(Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub enum Order {
|
||||
/// Ascending Order
|
||||
Asc,
|
||||
@@ -292,22 +311,22 @@ pub struct IndexMeta {
|
||||
/// `IndexSettings` to configure index options.
|
||||
#[serde(default)]
|
||||
pub index_settings: IndexSettings,
|
||||
/// List of `SegmentMeta` informations associated to each finalized segment of the index.
|
||||
/// List of `SegmentMeta` information associated to each finalized segment of the index.
|
||||
pub segments: Vec<SegmentMeta>,
|
||||
/// Index `Schema`
|
||||
pub schema: Schema,
|
||||
/// Opstamp associated to the last `commit` operation.
|
||||
pub opstamp: Opstamp,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Payload associated to the last commit.
|
||||
///
|
||||
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||
/// to help identify this commit.
|
||||
/// This payload is entirely unused by tantivy.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payload: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct UntrackedIndexMeta {
|
||||
pub segments: Vec<InnerSegmentMeta>,
|
||||
#[serde(default)]
|
||||
@@ -374,7 +393,9 @@ impl fmt::Debug for IndexMeta {
|
||||
mod tests {
|
||||
|
||||
use super::IndexMeta;
|
||||
use crate::core::index_meta::UntrackedIndexMeta;
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::store::ZstdCompressor;
|
||||
use crate::{IndexSettings, IndexSortByField, Order};
|
||||
|
||||
#[test]
|
||||
@@ -400,7 +421,68 @@ mod tests {
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"lz4"},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false}}],"opstamp":0}"#
|
||||
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"lz4","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
|
||||
);
|
||||
|
||||
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(index_metas.index_settings, deser_meta.index_settings);
|
||||
assert_eq!(index_metas.schema, deser_meta.schema);
|
||||
assert_eq!(index_metas.opstamp, deser_meta.opstamp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_metas_zstd_compressor() {
|
||||
let schema = {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("text", TEXT);
|
||||
schema_builder.build()
|
||||
};
|
||||
let index_metas = IndexMeta {
|
||||
index_settings: IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "text".to_string(),
|
||||
order: Order::Asc,
|
||||
}),
|
||||
docstore_compression: crate::store::Compressor::Zstd(ZstdCompressor {
|
||||
compression_level: Some(4),
|
||||
}),
|
||||
docstore_blocksize: 1_000_000,
|
||||
},
|
||||
segments: Vec::new(),
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
};
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd(compression_level=4)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
|
||||
);
|
||||
|
||||
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(index_metas.index_settings, deser_meta.index_settings);
|
||||
assert_eq!(index_metas.schema, deser_meta.schema);
|
||||
assert_eq!(index_metas.opstamp, deser_meta.opstamp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_metas_invalid_comp() {
|
||||
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zsstd","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
|
||||
|
||||
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"unknown variant `zsstd`, expected one of `none`, `lz4`, `brotli`, `snappy`, `zstd`, \
|
||||
`zstd(compression_level=5)` at line 1 column 96"
|
||||
.to_string()
|
||||
);
|
||||
|
||||
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd(bla=10)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
|
||||
|
||||
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"unknown zstd option \"bla\" at line 1 column 103".to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,7 +88,8 @@ impl InvertedIndexReader {
|
||||
let postings_slice = self
|
||||
.postings_file_slice
|
||||
.slice(term_info.postings_range.clone());
|
||||
block_postings.reset(term_info.doc_freq, postings_slice.read_bytes()?);
|
||||
let postings_bytes = postings_slice.read_bytes()?;
|
||||
block_postings.reset(term_info.doc_freq, postings_bytes)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -197,3 +198,45 @@ impl InvertedIndexReader {
|
||||
.unwrap_or(0u32))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
impl InvertedIndexReader {
|
||||
pub(crate) async fn get_term_info_async(
|
||||
&self,
|
||||
term: &Term,
|
||||
) -> crate::AsyncIoResult<Option<TermInfo>> {
|
||||
self.termdict.get_async(term.value_bytes()).await
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `Term`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
pub async fn warm_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
with_positions: bool,
|
||||
) -> crate::AsyncIoResult<()> {
|
||||
let term_info_opt = self.get_term_info_async(term).await?;
|
||||
if let Some(term_info) = term_info_opt {
|
||||
self.postings_file_slice
|
||||
.read_bytes_slice_async(term_info.postings_range.clone())
|
||||
.await?;
|
||||
if with_positions {
|
||||
self.positions_file_slice
|
||||
.read_bytes_slice_async(term_info.positions_range.clone())
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the number of documents containing the term asynchronously.
|
||||
pub async fn doc_freq_async(&self, term: &Term) -> crate::AsyncIoResult<u32> {
|
||||
Ok(self
|
||||
.get_term_info_async(term)
|
||||
.await?
|
||||
.map(|term_info| term_info.doc_freq)
|
||||
.unwrap_or(0u32))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use crate::collector::Collector;
|
||||
@@ -6,7 +7,7 @@ use crate::core::{Executor, SegmentReader};
|
||||
use crate::query::Query;
|
||||
use crate::schema::{Document, Schema, Term};
|
||||
use crate::space_usage::SearcherSpaceUsage;
|
||||
use crate::store::StoreReader;
|
||||
use crate::store::{CacheStats, StoreReader};
|
||||
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject};
|
||||
|
||||
/// Identifies the searcher generation accessed by a [Searcher].
|
||||
@@ -62,43 +63,20 @@ impl SearcherGeneration {
|
||||
///
|
||||
/// It guarantees that the `Segment` will not be removed before
|
||||
/// the destruction of the `Searcher`.
|
||||
#[derive(Clone)]
|
||||
pub struct Searcher {
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
store_readers: Vec<StoreReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
inner: Arc<SearcherInner>,
|
||||
}
|
||||
|
||||
impl Searcher {
|
||||
/// Creates a new `Searcher`
|
||||
pub(crate) fn new(
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
) -> io::Result<Searcher> {
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
.iter()
|
||||
.map(SegmentReader::get_store_reader)
|
||||
.collect::<io::Result<Vec<_>>>()?;
|
||||
Ok(Searcher {
|
||||
schema,
|
||||
index,
|
||||
segment_readers,
|
||||
store_readers,
|
||||
generation,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the `Index` associated to the `Searcher`
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
&self.inner.index
|
||||
}
|
||||
|
||||
/// [SearcherGeneration] which identifies the version of the snapshot held by this `Searcher`.
|
||||
pub fn generation(&self) -> &SearcherGeneration {
|
||||
self.generation.as_ref()
|
||||
self.inner.generation.as_ref()
|
||||
}
|
||||
|
||||
/// Fetches a document from tantivy's store given a `DocAddress`.
|
||||
@@ -106,18 +84,39 @@ impl Searcher {
|
||||
/// The searcher uses the segment ordinal to route the
|
||||
/// the request to the right `Segment`.
|
||||
pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||
let store_reader = &self.store_readers[doc_address.segment_ord as usize];
|
||||
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
||||
store_reader.get(doc_address.doc_id)
|
||||
}
|
||||
|
||||
/// The cache stats for the underlying store reader.
|
||||
///
|
||||
/// Aggregates the sum for each segment store reader.
|
||||
pub fn doc_store_cache_stats(&self) -> CacheStats {
|
||||
let cache_stats: CacheStats = self
|
||||
.inner
|
||||
.store_readers
|
||||
.iter()
|
||||
.map(|reader| reader.cache_stats())
|
||||
.sum();
|
||||
cache_stats
|
||||
}
|
||||
|
||||
/// Fetches a document in an asynchronous manner.
|
||||
#[cfg(feature = "quickwit")]
|
||||
pub async fn doc_async(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
||||
store_reader.get_async(doc_address.doc_id).await
|
||||
}
|
||||
|
||||
/// Access the schema associated to the index of this searcher.
|
||||
pub fn schema(&self) -> &Schema {
|
||||
&self.schema
|
||||
&self.inner.schema
|
||||
}
|
||||
|
||||
/// Returns the overall number of documents in the index.
|
||||
pub fn num_docs(&self) -> u64 {
|
||||
self.segment_readers
|
||||
self.inner
|
||||
.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| u64::from(segment_reader.num_docs()))
|
||||
.sum::<u64>()
|
||||
@@ -127,7 +126,7 @@ impl Searcher {
|
||||
/// the given term.
|
||||
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> {
|
||||
let mut total_doc_freq = 0;
|
||||
for segment_reader in &self.segment_readers {
|
||||
for segment_reader in &self.inner.segment_readers {
|
||||
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||
let doc_freq = inverted_index.doc_freq(term)?;
|
||||
total_doc_freq += u64::from(doc_freq);
|
||||
@@ -135,14 +134,27 @@ impl Searcher {
|
||||
Ok(total_doc_freq)
|
||||
}
|
||||
|
||||
/// Return the overall number of documents containing
|
||||
/// the given term in an asynchronous manner.
|
||||
#[cfg(feature = "quickwit")]
|
||||
pub async fn doc_freq_async(&self, term: &Term) -> crate::Result<u64> {
|
||||
let mut total_doc_freq = 0;
|
||||
for segment_reader in &self.inner.segment_readers {
|
||||
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||
let doc_freq = inverted_index.doc_freq_async(term).await?;
|
||||
total_doc_freq += u64::from(doc_freq);
|
||||
}
|
||||
Ok(total_doc_freq)
|
||||
}
|
||||
|
||||
/// Return the list of segment readers
|
||||
pub fn segment_readers(&self) -> &[SegmentReader] {
|
||||
&self.segment_readers
|
||||
&self.inner.segment_readers
|
||||
}
|
||||
|
||||
/// Returns the segment_reader associated with the given segment_ord
|
||||
pub fn segment_reader(&self, segment_ord: u32) -> &SegmentReader {
|
||||
&self.segment_readers[segment_ord as usize]
|
||||
&self.inner.segment_readers[segment_ord as usize]
|
||||
}
|
||||
|
||||
/// Runs a query on the segment readers wrapped by the searcher.
|
||||
@@ -164,7 +176,7 @@ impl Searcher {
|
||||
query: &dyn Query,
|
||||
collector: &C,
|
||||
) -> crate::Result<C::Fruit> {
|
||||
let executor = self.index.search_executor();
|
||||
let executor = self.inner.index.search_executor();
|
||||
self.search_with_executor(query, collector, executor)
|
||||
}
|
||||
|
||||
@@ -201,17 +213,67 @@ impl Searcher {
|
||||
/// Summarize total space usage of this searcher.
|
||||
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> {
|
||||
let mut space_usage = SearcherSpaceUsage::new();
|
||||
for segment_reader in &self.segment_readers {
|
||||
for segment_reader in self.segment_readers() {
|
||||
space_usage.add_segment(segment_reader.space_usage()?);
|
||||
}
|
||||
Ok(space_usage)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Arc<SearcherInner>> for Searcher {
|
||||
fn from(inner: Arc<SearcherInner>) -> Self {
|
||||
Searcher { inner }
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds a list of `SegmentReader`s ready for search.
|
||||
///
|
||||
/// It guarantees that the `Segment` will not be removed before
|
||||
/// the destruction of the `Searcher`.
|
||||
pub(crate) struct SearcherInner {
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
store_readers: Vec<StoreReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
}
|
||||
|
||||
impl SearcherInner {
|
||||
/// Creates a new `Searcher`
|
||||
pub(crate) fn new(
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
doc_store_cache_size: usize,
|
||||
) -> io::Result<SearcherInner> {
|
||||
assert_eq!(
|
||||
&segment_readers
|
||||
.iter()
|
||||
.map(|reader| (reader.segment_id(), reader.delete_opstamp()))
|
||||
.collect::<BTreeMap<_, _>>(),
|
||||
generation.segments(),
|
||||
"Set of segments referenced by this Searcher and its SearcherGeneration must match"
|
||||
);
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size))
|
||||
.collect::<io::Result<Vec<_>>>()?;
|
||||
|
||||
Ok(SearcherInner {
|
||||
schema,
|
||||
index,
|
||||
segment_readers,
|
||||
store_readers,
|
||||
generation,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Searcher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let segment_ids = self
|
||||
.segment_readers
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(SegmentReader::segment_id)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -24,7 +24,8 @@ pub enum SegmentComponent {
|
||||
Store,
|
||||
/// Temporary storage of the documents, before streamed to `Store`.
|
||||
TempStore,
|
||||
/// Bitset describing which document of the segment is deleted.
|
||||
/// Bitset describing which document of the segment is alive.
|
||||
/// (It was representing deleted docs but changed to represent alive docs from v0.17)
|
||||
Delete,
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ use uuid::Uuid;
|
||||
/// by a UUID which is used to prefix the filenames
|
||||
/// of all of the file associated with the segment.
|
||||
///
|
||||
/// In unit test, for reproducability, the `SegmentId` are
|
||||
/// In unit test, for reproducibility, the `SegmentId` are
|
||||
/// simply generated in an autoincrement fashion.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct SegmentId(Uuid);
|
||||
@@ -35,7 +35,7 @@ const ZERO_ARRAY: [u8; 8] = [0u8; 8];
|
||||
#[cfg(test)]
|
||||
fn create_uuid() -> Uuid {
|
||||
let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst);
|
||||
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &ZERO_ARRAY).unwrap()
|
||||
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &ZERO_ARRAY)
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
@@ -57,7 +57,7 @@ impl SegmentId {
|
||||
/// Picking the first 8 chars is ok to identify
|
||||
/// segments in a display message (e.g. a5c4dfcb).
|
||||
pub fn short_uuid_string(&self) -> String {
|
||||
(&self.0.to_simple_ref().to_string()[..8]).to_string()
|
||||
(&self.0.as_simple().to_string()[..8]).to_string()
|
||||
}
|
||||
|
||||
/// Returns a segment uuid string.
|
||||
@@ -65,7 +65,7 @@ impl SegmentId {
|
||||
/// It consists in 32 lowercase hexadecimal chars
|
||||
/// (e.g. a5c4dfcbdfe645089129e308e26d5523)
|
||||
pub fn uuid_string(&self) -> String {
|
||||
self.0.to_simple_ref().to_string()
|
||||
self.0.as_simple().to_string()
|
||||
}
|
||||
|
||||
/// Build a `SegmentId` string from the full uuid string.
|
||||
|
||||
@@ -70,7 +70,7 @@ impl SegmentReader {
|
||||
self.max_doc - self.num_docs
|
||||
}
|
||||
|
||||
/// Returns true iff some of the documents of the segment have been deleted.
|
||||
/// Returns true if some of the documents of the segment have been deleted.
|
||||
pub fn has_deletes(&self) -> bool {
|
||||
self.num_deleted_docs() > 0
|
||||
}
|
||||
@@ -121,21 +121,21 @@ impl SegmentReader {
|
||||
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| {
|
||||
let field_name = self.schema.get_field_name(field);
|
||||
let err_msg = format!(
|
||||
"Field norm not found for field {:?}. Was the field set to record norm during \
|
||||
indexing?",
|
||||
field_name
|
||||
"Field norm not found for field {field_name:?}. Was the field set to record norm \
|
||||
during indexing?"
|
||||
);
|
||||
crate::TantivyError::SchemaError(err_msg)
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn fieldnorms_readers(&self) -> &FieldNormReaders {
|
||||
#[doc(hidden)]
|
||||
pub fn fieldnorms_readers(&self) -> &FieldNormReaders {
|
||||
&self.fieldnorm_readers
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `StoreReader`.
|
||||
pub fn get_store_reader(&self) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone())
|
||||
pub fn get_store_reader(&self, cache_size: usize) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone(), cache_size)
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
@@ -170,15 +170,15 @@ impl SegmentReader {
|
||||
|
||||
let fast_fields_data = segment.open_read(SegmentComponent::FastFields)?;
|
||||
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
||||
let fast_field_readers =
|
||||
let fast_fields_readers =
|
||||
Arc::new(FastFieldReaders::new(schema.clone(), fast_fields_composite));
|
||||
let fieldnorm_data = segment.open_read(SegmentComponent::FieldNorms)?;
|
||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||
|
||||
let original_bitset = if segment.meta().has_deletes() {
|
||||
let delete_file_slice = segment.open_read(SegmentComponent::Delete)?;
|
||||
let delete_data = delete_file_slice.read_bytes()?;
|
||||
Some(AliveBitSet::open(delete_data))
|
||||
let alive_doc_file_slice = segment.open_read(SegmentComponent::Delete)?;
|
||||
let alive_doc_data = alive_doc_file_slice.read_bytes()?;
|
||||
Some(AliveBitSet::open(alive_doc_data))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -197,7 +197,7 @@ impl SegmentReader {
|
||||
max_doc,
|
||||
termdict_composite,
|
||||
postings_composite,
|
||||
fast_fields_readers: fast_field_readers,
|
||||
fast_fields_readers,
|
||||
fieldnorm_readers,
|
||||
segment_id: segment.id(),
|
||||
delete_opstamp: segment.meta().delete_opstamp(),
|
||||
@@ -216,7 +216,7 @@ impl SegmentReader {
|
||||
/// term dictionary associated to a specific field,
|
||||
/// and opening the posting list associated to any term.
|
||||
///
|
||||
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||
/// If the field is not marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||
/// is returned.
|
||||
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
||||
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||
@@ -296,17 +296,16 @@ impl SegmentReader {
|
||||
self.delete_opstamp
|
||||
}
|
||||
|
||||
/// Returns the bitset representing
|
||||
/// the documents that have been deleted.
|
||||
/// Returns the bitset representing the alive `DocId`s.
|
||||
pub fn alive_bitset(&self) -> Option<&AliveBitSet> {
|
||||
self.alive_bitset_opt.as_ref()
|
||||
}
|
||||
|
||||
/// Returns true iff the `doc` is marked
|
||||
/// Returns true if the `doc` is marked
|
||||
/// as deleted.
|
||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||
self.alive_bitset()
|
||||
.map(|delete_set| delete_set.is_deleted(doc))
|
||||
.map(|alive_bitset| alive_bitset.is_deleted(doc))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
@@ -328,7 +327,7 @@ impl SegmentReader {
|
||||
self.positions_composite.space_usage(),
|
||||
self.fast_fields_readers.space_usage(),
|
||||
self.fieldnorm_readers.space_usage(),
|
||||
self.get_store_reader()?.space_usage(),
|
||||
self.get_store_reader(0)?.space_usage(),
|
||||
self.alive_bitset_opt
|
||||
.as_ref()
|
||||
.map(AliveBitSet::space_usage)
|
||||
|
||||
@@ -38,7 +38,7 @@ impl BinarySerializable for FileAddr {
|
||||
/// A `CompositeWrite` is used to write a `CompositeFile`.
|
||||
pub struct CompositeWrite<W = WritePtr> {
|
||||
write: CountingWriter<W>,
|
||||
offsets: HashMap<FileAddr, u64>,
|
||||
offsets: Vec<(FileAddr, u64)>,
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
@@ -47,7 +47,7 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
pub fn wrap(w: W) -> CompositeWrite<W> {
|
||||
CompositeWrite {
|
||||
write: CountingWriter::wrap(w),
|
||||
offsets: HashMap::new(),
|
||||
offsets: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,8 +60,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
pub fn for_field_with_idx(&mut self, field: Field, idx: usize) -> &mut CountingWriter<W> {
|
||||
let offset = self.write.written_bytes();
|
||||
let file_addr = FileAddr::new(field, idx);
|
||||
assert!(!self.offsets.contains_key(&file_addr));
|
||||
self.offsets.insert(file_addr, offset);
|
||||
assert!(!self.offsets.iter().any(|el| el.0 == file_addr));
|
||||
self.offsets.push((file_addr, offset));
|
||||
&mut self.write
|
||||
}
|
||||
|
||||
@@ -73,16 +73,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
let footer_offset = self.write.written_bytes();
|
||||
VInt(self.offsets.len() as u64).serialize(&mut self.write)?;
|
||||
|
||||
let mut offset_fields: Vec<_> = self
|
||||
.offsets
|
||||
.iter()
|
||||
.map(|(file_addr, offset)| (*offset, *file_addr))
|
||||
.collect();
|
||||
|
||||
offset_fields.sort();
|
||||
|
||||
let mut prev_offset = 0;
|
||||
for (offset, file_addr) in offset_fields {
|
||||
for (file_addr, offset) in self.offsets {
|
||||
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
|
||||
file_addr.serialize(&mut self.write)?;
|
||||
prev_offset = offset;
|
||||
@@ -106,6 +98,14 @@ pub struct CompositeFile {
|
||||
offsets_index: HashMap<FileAddr, Range<usize>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for CompositeFile {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("CompositeFile")
|
||||
.field("offsets_index", &self.offsets_index)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl CompositeFile {
|
||||
/// Opens a composite file stored in a given
|
||||
/// `FileSlice`.
|
||||
@@ -233,4 +233,56 @@ mod test {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_composite_file_bug() -> crate::Result<()> {
|
||||
let path = Path::new("test_path");
|
||||
let directory = RamDirectory::create();
|
||||
{
|
||||
let w = directory.open_write(path).unwrap();
|
||||
let mut composite_write = CompositeWrite::wrap(w);
|
||||
let mut write = composite_write.for_field_with_idx(Field::from_field_id(1u32), 0);
|
||||
VInt(32431123u64).serialize(&mut write)?;
|
||||
write.flush()?;
|
||||
let write = composite_write.for_field_with_idx(Field::from_field_id(1u32), 1);
|
||||
write.flush()?;
|
||||
|
||||
let mut write = composite_write.for_field_with_idx(Field::from_field_id(0u32), 0);
|
||||
VInt(1_000_000).serialize(&mut write)?;
|
||||
write.flush()?;
|
||||
|
||||
composite_write.close()?;
|
||||
}
|
||||
{
|
||||
let r = directory.open_read(path)?;
|
||||
let composite_file = CompositeFile::open(&r)?;
|
||||
{
|
||||
let file = composite_file
|
||||
.open_read_with_idx(Field::from_field_id(1u32), 0)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let mut file0_buf = file.as_slice();
|
||||
let payload_0 = VInt::deserialize(&mut file0_buf)?.0;
|
||||
assert_eq!(file0_buf.len(), 0);
|
||||
assert_eq!(payload_0, 32431123u64);
|
||||
}
|
||||
{
|
||||
let file = composite_file
|
||||
.open_read_with_idx(Field::from_field_id(1u32), 1)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let file = file.as_slice();
|
||||
assert_eq!(file.len(), 0);
|
||||
}
|
||||
{
|
||||
let file = composite_file
|
||||
.open_read_with_idx(Field::from_field_id(0u32), 0)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let file = file.as_slice();
|
||||
assert_eq!(file.len(), 3);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::io::Write;
|
||||
use std::marker::{Send, Sync};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{fmt, io, thread};
|
||||
|
||||
@@ -62,7 +63,12 @@ impl Drop for DirectoryLockGuard {
|
||||
|
||||
enum TryAcquireLockError {
|
||||
FileExists,
|
||||
IoError(io::Error),
|
||||
IoError(Arc<io::Error>),
|
||||
}
|
||||
impl From<io::Error> for TryAcquireLockError {
|
||||
fn from(io_error: io::Error) -> Self {
|
||||
Self::IoError(Arc::new(io_error))
|
||||
}
|
||||
}
|
||||
|
||||
fn try_acquire_lock(
|
||||
@@ -73,7 +79,7 @@ fn try_acquire_lock(
|
||||
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
||||
OpenWriteError::IoError { io_error, .. } => TryAcquireLockError::IoError(io_error),
|
||||
})?;
|
||||
write.flush().map_err(TryAcquireLockError::IoError)?;
|
||||
write.flush().map_err(TryAcquireLockError::from)?;
|
||||
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
||||
directory: directory.box_clone(),
|
||||
path: filepath.to_owned(),
|
||||
@@ -96,16 +102,16 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
|
||||
///
|
||||
/// There are currently two implementations of `Directory`
|
||||
///
|
||||
/// - The [`MMapDirectory`](struct.MmapDirectory.html), this
|
||||
/// - The [`MMapDirectory`][crate::directory::MmapDirectory], this
|
||||
/// should be your default choice.
|
||||
/// - The [`RamDirectory`](struct.RamDirectory.html), which
|
||||
/// - The [`RamDirectory`][crate::directory::RamDirectory], which
|
||||
/// should be used mostly for tests.
|
||||
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// Opens a file and returns a boxed `FileHandle`.
|
||||
///
|
||||
/// Users of `Directory` should typically call `Directory::open_read(...)`,
|
||||
/// while `Directory` implementor should implement `get_file_handle()`.
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError>;
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError>;
|
||||
|
||||
/// Once a virtual file is open, its data may not
|
||||
/// change.
|
||||
@@ -128,7 +134,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// `DeleteError::DoesNotExist`.
|
||||
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
||||
|
||||
/// Returns true iff the file exists
|
||||
/// Returns true if and only if the file exists
|
||||
fn exists(&self, path: &Path) -> Result<bool, OpenReadError>;
|
||||
|
||||
/// Opens a writer for the *virtual file* associated with
|
||||
|
||||
@@ -45,7 +45,7 @@ pub static INDEX_WRITER_LOCK: Lazy<Lock> = Lazy::new(|| Lock {
|
||||
/// The meta lock file is here to protect the segment files being opened by
|
||||
/// `IndexReader::reload()` from being garbage collected.
|
||||
/// It makes it possible for another process to safely consume
|
||||
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics
|
||||
/// our index in-writing. Ideally, we may have preferred `RWLock` semantics
|
||||
/// here, but it is difficult to achieve on Windows.
|
||||
///
|
||||
/// Opening segment readers is a very fast process.
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use crate::Version;
|
||||
|
||||
/// Error while trying to acquire a directory lock.
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum LockError {
|
||||
/// Failed to acquired a lock as it is already held by another
|
||||
/// client.
|
||||
@@ -16,11 +17,18 @@ pub enum LockError {
|
||||
LockBusy,
|
||||
/// Trying to acquire a lock failed with an `IoError`
|
||||
#[error("Failed to acquire the lock due to an io:Error.")]
|
||||
IoError(io::Error),
|
||||
IoError(Arc<io::Error>),
|
||||
}
|
||||
|
||||
impl LockError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error) -> Self {
|
||||
Self::IoError(Arc::new(io_error))
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when opening a directory
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenDirectoryError {
|
||||
/// The underlying directory does not exists.
|
||||
#[error("Directory does not exist: '{0}'.")]
|
||||
@@ -30,12 +38,12 @@ pub enum OpenDirectoryError {
|
||||
NotADirectory(PathBuf),
|
||||
/// Failed to create a temp directory.
|
||||
#[error("Failed to create a temporary directory: '{0}'.")]
|
||||
FailedToCreateTempDir(io::Error),
|
||||
FailedToCreateTempDir(Arc<io::Error>),
|
||||
/// IoError
|
||||
#[error("IoError '{io_error:?}' while create directory in: '{directory_path:?}'.")]
|
||||
IoError {
|
||||
/// underlying io Error.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// directory we tried to open.
|
||||
directory_path: PathBuf,
|
||||
},
|
||||
@@ -45,14 +53,14 @@ impl OpenDirectoryError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error, directory_path: PathBuf) -> Self {
|
||||
Self::IoError {
|
||||
io_error,
|
||||
io_error: Arc::new(io_error),
|
||||
directory_path,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when starting to write in a file
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenWriteError {
|
||||
/// Our directory is WORM, writing an existing file is forbidden.
|
||||
/// Checkout the `Directory` documentation.
|
||||
@@ -63,7 +71,7 @@ pub enum OpenWriteError {
|
||||
#[error("IoError '{io_error:?}' while opening file for write: '{filepath}'.")]
|
||||
IoError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// File path of the file that tantivy failed to open for write.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
@@ -72,11 +80,15 @@ pub enum OpenWriteError {
|
||||
impl OpenWriteError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||
Self::IoError { io_error, filepath }
|
||||
Self::IoError {
|
||||
io_error: Arc::new(io_error),
|
||||
filepath,
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Type of index incompatibility between the library and the index found on disk
|
||||
/// Used to catch and provide a hint to solve this incompatibility issue
|
||||
#[derive(Clone)]
|
||||
pub enum Incompatibility {
|
||||
/// This library cannot decompress the index found on disk
|
||||
CompressionMismatch {
|
||||
@@ -135,7 +147,7 @@ impl fmt::Debug for Incompatibility {
|
||||
}
|
||||
|
||||
/// Error that may occur when accessing a file read
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenReadError {
|
||||
/// The file does not exists.
|
||||
#[error("Files does not exists: {0:?}")]
|
||||
@@ -146,7 +158,7 @@ pub enum OpenReadError {
|
||||
)]
|
||||
IoError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// File path of the file that tantivy failed to open for read.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
@@ -158,11 +170,14 @@ pub enum OpenReadError {
|
||||
impl OpenReadError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||
Self::IoError { io_error, filepath }
|
||||
Self::IoError {
|
||||
io_error: Arc::new(io_error),
|
||||
filepath,
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Error that may occur when trying to delete a file
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum DeleteError {
|
||||
/// The file does not exists.
|
||||
#[error("File does not exists: '{0}'.")]
|
||||
@@ -172,7 +187,7 @@ pub enum DeleteError {
|
||||
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")]
|
||||
IoError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// File path of the file that tantivy failed to delete.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::ops::{Deref, Range};
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::{fmt, io};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common::HasLen;
|
||||
use stable_deref_trait::StableDeref;
|
||||
|
||||
@@ -18,25 +19,42 @@ pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
||||
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
||||
/// on the filesystem.
|
||||
|
||||
#[async_trait]
|
||||
pub trait FileHandle: 'static + Send + Sync + HasLen + fmt::Debug {
|
||||
/// Reads a slice of bytes.
|
||||
///
|
||||
/// This method may panic if the range requested is invalid.
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes>;
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[doc(hidden)]
|
||||
async fn read_bytes_async(
|
||||
&self,
|
||||
_byte_range: Range<usize>,
|
||||
) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
Err(crate::error::AsyncIoError::AsyncUnsupported)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileHandle for &'static [u8] {
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
let bytes = &self[range];
|
||||
Ok(OwnedBytes::new(bytes))
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
Ok(self.read_bytes(byte_range)?)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> From<B> for FileSlice
|
||||
where B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync
|
||||
{
|
||||
fn from(bytes: B) -> FileSlice {
|
||||
FileSlice::new(Box::new(OwnedBytes::new(bytes)))
|
||||
FileSlice::new(Arc::new(OwnedBytes::new(bytes)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +75,7 @@ impl fmt::Debug for FileSlice {
|
||||
|
||||
impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
pub fn new(file_handle: Box<dyn FileHandle>) -> Self {
|
||||
pub fn new(file_handle: Arc<dyn FileHandle>) -> Self {
|
||||
let num_bytes = file_handle.len();
|
||||
FileSlice::new_with_num_bytes(file_handle, num_bytes)
|
||||
}
|
||||
@@ -65,9 +83,9 @@ impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
#[doc(hidden)]
|
||||
#[must_use]
|
||||
pub fn new_with_num_bytes(file_handle: Box<dyn FileHandle>, num_bytes: usize) -> Self {
|
||||
pub fn new_with_num_bytes(file_handle: Arc<dyn FileHandle>, num_bytes: usize) -> Self {
|
||||
FileSlice {
|
||||
data: Arc::from(file_handle),
|
||||
data: file_handle,
|
||||
range: 0..num_bytes,
|
||||
}
|
||||
}
|
||||
@@ -94,7 +112,7 @@ impl FileSlice {
|
||||
|
||||
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
|
||||
///
|
||||
/// The behavior is strongly dependant on the implementation of the underlying
|
||||
/// The behavior is strongly dependent on the implementation of the underlying
|
||||
/// `Directory` and the `FileSliceTrait` it creates.
|
||||
/// In particular, it is up to the `Directory` implementation
|
||||
/// to handle caching if needed.
|
||||
@@ -102,6 +120,12 @@ impl FileSlice {
|
||||
self.data.read_bytes(self.range.clone())
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[doc(hidden)]
|
||||
pub async fn read_bytes_async(&self) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
self.data.read_bytes_async(self.range.clone()).await
|
||||
}
|
||||
|
||||
/// Reads a specific slice of data.
|
||||
///
|
||||
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
|
||||
@@ -116,6 +140,23 @@ impl FileSlice {
|
||||
.read_bytes(self.range.start + range.start..self.range.start + range.end)
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[doc(hidden)]
|
||||
pub async fn read_bytes_slice_async(
|
||||
&self,
|
||||
byte_range: Range<usize>,
|
||||
) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
assert!(
|
||||
self.range.start + byte_range.end <= self.range.end,
|
||||
"`to` exceeds the fileslice length"
|
||||
);
|
||||
self.data
|
||||
.read_bytes_async(
|
||||
self.range.start + byte_range.start..self.range.start + byte_range.end,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Splits the FileSlice at the given offset and return two file slices.
|
||||
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||
///
|
||||
@@ -160,10 +201,16 @@ impl FileSlice {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileHandle for FileSlice {
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
self.read_bytes_slice(range)
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
async fn read_bytes_async(&self, byte_range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
self.read_bytes_slice_async(byte_range).await
|
||||
}
|
||||
}
|
||||
|
||||
impl HasLen for FileSlice {
|
||||
@@ -172,9 +219,23 @@ impl HasLen for FileSlice {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl FileHandle for OwnedBytes {
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
Ok(self.slice(range))
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
async fn read_bytes_async(&self, range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||
let bytes = self.read_bytes(range)?;
|
||||
Ok(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::HasLen;
|
||||
|
||||
@@ -182,7 +243,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_file_slice() -> io::Result<()> {
|
||||
let file_slice = FileSlice::new(Box::new(b"abcdef".as_ref()));
|
||||
let file_slice = FileSlice::new(Arc::new(b"abcdef".as_ref()));
|
||||
assert_eq!(file_slice.len(), 6);
|
||||
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
|
||||
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
|
||||
@@ -226,7 +287,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_slice_simple_read() -> io::Result<()> {
|
||||
let slice = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||
let slice = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(slice.len(), 6);
|
||||
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
|
||||
assert_eq!(slice.slice(1..4).read_bytes()?.as_ref(), b"bcd");
|
||||
@@ -235,7 +296,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_slice_read_slice() -> io::Result<()> {
|
||||
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||
let slice_deref = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(slice_deref.read_bytes_slice(1..4)?.as_ref(), b"bcd");
|
||||
Ok(())
|
||||
}
|
||||
@@ -243,7 +304,7 @@ mod tests {
|
||||
#[test]
|
||||
#[should_panic(expected = "end of requested range exceeds the fileslice length (10 > 6)")]
|
||||
fn test_slice_read_slice_invalid_range_exceeds() {
|
||||
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||
let slice_deref = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(
|
||||
slice_deref.read_bytes_slice(0..10).unwrap().as_ref(),
|
||||
b"bcd"
|
||||
|
||||
@@ -53,7 +53,9 @@ impl FileWatcher {
|
||||
if metafile_has_changed {
|
||||
info!("Meta file {:?} was modified", path);
|
||||
current_checksum_opt = Some(checksum);
|
||||
futures::executor::block_on(callbacks.broadcast());
|
||||
// We actually ignore callbacks failing here.
|
||||
// We just wait for the end of their execution.
|
||||
let _ = callbacks.broadcast().wait();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,7 +110,7 @@ mod tests {
|
||||
let tmp_file = tmp_dir.path().join("watched.txt");
|
||||
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
let (tx, rx) = crossbeam::channel::unbounded();
|
||||
let (tx, rx) = crossbeam_channel::unbounded();
|
||||
let timeout = Duration::from_millis(100);
|
||||
|
||||
let watcher = FileWatcher::new(&tmp_file);
|
||||
@@ -151,7 +153,7 @@ mod tests {
|
||||
let tmp_file = tmp_dir.path().join("watched.txt");
|
||||
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
let (tx, rx) = crossbeam::channel::unbounded();
|
||||
let (tx, rx) = crossbeam_channel::unbounded();
|
||||
let timeout = Duration::from_millis(100);
|
||||
|
||||
let watcher = FileWatcher::new(&tmp_file);
|
||||
|
||||
@@ -156,6 +156,7 @@ impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||
mod tests {
|
||||
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
|
||||
@@ -168,7 +169,7 @@ mod tests {
|
||||
let footer = Footer::new(123);
|
||||
footer.append_footer(&mut buf).unwrap();
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let (footer_deser, _body) = Footer::extract_footer(fileslice).unwrap();
|
||||
assert_eq!(footer_deser.crc(), footer.crc());
|
||||
}
|
||||
@@ -181,7 +182,7 @@ mod tests {
|
||||
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let err = Footer::extract_footer(fileslice).unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -198,7 +199,7 @@ mod tests {
|
||||
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let err = Footer::extract_footer(fileslice).unwrap_err();
|
||||
assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof);
|
||||
assert_eq!(
|
||||
@@ -217,7 +218,7 @@ mod tests {
|
||||
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let err = Footer::extract_footer(fileslice).unwrap_err();
|
||||
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
|
||||
assert_eq!(
|
||||
|
||||
@@ -16,7 +16,7 @@ use crate::directory::{
|
||||
use crate::error::DataCorruption;
|
||||
use crate::Directory;
|
||||
|
||||
/// Returns true iff the file is "managed".
|
||||
/// Returns true if the file is "managed".
|
||||
/// Non-managed file are not subject to garbage collection.
|
||||
///
|
||||
/// Filenames that starts by a "." -typically locks-
|
||||
@@ -114,7 +114,7 @@ impl ManagedDirectory {
|
||||
let mut files_to_delete = vec![];
|
||||
|
||||
// It is crucial to get the living files after acquiring the
|
||||
// read lock of meta informations. That way, we
|
||||
// read lock of meta information. That way, we
|
||||
// avoid the following scenario.
|
||||
//
|
||||
// 1) we get the list of living files.
|
||||
@@ -242,16 +242,13 @@ impl ManagedDirectory {
|
||||
/// Verify checksum of a managed file
|
||||
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
||||
let reader = self.directory.open_read(path)?;
|
||||
let (footer, data) =
|
||||
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IoError {
|
||||
io_error,
|
||||
filepath: path.to_path_buf(),
|
||||
})?;
|
||||
let (footer, data) = Footer::extract_footer(reader)
|
||||
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||
let bytes = data
|
||||
.read_bytes()
|
||||
.map_err(|io_error| OpenReadError::IoError {
|
||||
io_error: Arc::new(io_error),
|
||||
filepath: path.to_path_buf(),
|
||||
io_error,
|
||||
})?;
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(bytes.as_slice());
|
||||
@@ -272,9 +269,9 @@ impl ManagedDirectory {
|
||||
}
|
||||
|
||||
impl Directory for ManagedDirectory {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError> {
|
||||
let file_slice = self.open_read(path)?;
|
||||
Ok(Box::new(file_slice))
|
||||
Ok(Arc::new(file_slice))
|
||||
}
|
||||
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
use std::convert::From;
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, BufWriter, Read, Seek, SeekFrom, Write};
|
||||
use std::io::{self, BufWriter, Read, Seek, Write};
|
||||
use std::ops::Deref;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
@@ -175,7 +174,8 @@ impl MmapDirectory {
|
||||
/// This is mostly useful to test the MmapDirectory itself.
|
||||
/// For your unit tests, prefer the RamDirectory.
|
||||
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?;
|
||||
let tempdir = TempDir::new()
|
||||
.map_err(|io_err| OpenDirectoryError::FailedToCreateTempDir(Arc::new(io_err)))?;
|
||||
Ok(MmapDirectory::new(
|
||||
tempdir.path().to_path_buf(),
|
||||
Some(tempdir),
|
||||
@@ -265,7 +265,7 @@ impl Write for SafeFileWriter {
|
||||
}
|
||||
|
||||
impl Seek for SafeFileWriter {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
|
||||
self.0.seek(pos)
|
||||
}
|
||||
}
|
||||
@@ -310,7 +310,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
||||
}
|
||||
|
||||
impl Directory for MmapDirectory {
|
||||
fn get_file_handle(&self, path: &Path) -> result::Result<Box<dyn FileHandle>, OpenReadError> {
|
||||
fn get_file_handle(&self, path: &Path) -> result::Result<Arc<dyn FileHandle>, OpenReadError> {
|
||||
debug!("Open Read {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
@@ -331,7 +331,7 @@ impl Directory for MmapDirectory {
|
||||
})
|
||||
.unwrap_or_else(OwnedBytes::empty);
|
||||
|
||||
Ok(Box::new(owned_bytes))
|
||||
Ok(Arc::new(owned_bytes))
|
||||
}
|
||||
|
||||
/// Any entry associated to the path in the mmap will be
|
||||
@@ -343,7 +343,7 @@ impl Directory for MmapDirectory {
|
||||
DeleteError::FileDoesNotExist(path.to_owned())
|
||||
} else {
|
||||
DeleteError::IoError {
|
||||
io_error: e,
|
||||
io_error: Arc::new(e),
|
||||
filepath: path.to_path_buf(),
|
||||
}
|
||||
}
|
||||
@@ -423,9 +423,9 @@ impl Directory for MmapDirectory {
|
||||
.write(true)
|
||||
.create(true) //< if the file does not exist yet, create it.
|
||||
.open(&full_path)
|
||||
.map_err(LockError::IoError)?;
|
||||
.map_err(LockError::wrap_io_error)?;
|
||||
if lock.is_blocking {
|
||||
file.lock_exclusive().map_err(LockError::IoError)?;
|
||||
file.lock_exclusive().map_err(LockError::wrap_io_error)?;
|
||||
} else {
|
||||
file.try_lock_exclusive().map_err(|_| LockError::LockBusy)?
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ mod file_slice;
|
||||
mod file_watcher;
|
||||
mod footer;
|
||||
mod managed_directory;
|
||||
mod owned_bytes;
|
||||
mod ram_directory;
|
||||
mod watch_event_router;
|
||||
|
||||
@@ -22,13 +21,13 @@ use std::io::BufWriter;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub use common::{AntiCallToken, TerminatingWrite};
|
||||
pub use ownedbytes::OwnedBytes;
|
||||
|
||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
|
||||
pub use self::file_slice::{FileHandle, FileSlice};
|
||||
pub use self::owned_bytes::OwnedBytes;
|
||||
pub use self::ram_directory::RamDirectory;
|
||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
use std::io;
|
||||
use std::ops::Range;
|
||||
|
||||
pub use ownedbytes::OwnedBytes;
|
||||
|
||||
use crate::directory::FileHandle;
|
||||
|
||||
impl FileHandle for OwnedBytes {
|
||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||
Ok(self.slice(range))
|
||||
}
|
||||
}
|
||||
@@ -40,7 +40,7 @@ impl Drop for VecWriter {
|
||||
fn drop(&mut self) {
|
||||
if !self.is_flushed {
|
||||
warn!(
|
||||
"You forgot to flush {:?} before its writter got Drop. Do not rely on drop. This \
|
||||
"You forgot to flush {:?} before its writer got Drop. Do not rely on drop. This \
|
||||
also occurs when the indexer crashed, so you may want to check the logs for the \
|
||||
root cause.",
|
||||
self.path
|
||||
@@ -160,9 +160,9 @@ impl RamDirectory {
|
||||
}
|
||||
|
||||
impl Directory for RamDirectory {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError> {
|
||||
let file_slice = self.open_read(path)?;
|
||||
Ok(Box::new(file_slice))
|
||||
Ok(Arc::new(file_slice))
|
||||
}
|
||||
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
@@ -172,7 +172,7 @@ impl Directory for RamDirectory {
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
fail_point!("RamDirectory::delete", |_| {
|
||||
Err(DeleteError::IoError {
|
||||
io_error: io::Error::from(io::ErrorKind::Other),
|
||||
io_error: Arc::new(io::Error::from(io::ErrorKind::Other)),
|
||||
filepath: path.to_path_buf(),
|
||||
})
|
||||
});
|
||||
@@ -184,7 +184,7 @@ impl Directory for RamDirectory {
|
||||
.fs
|
||||
.read()
|
||||
.map_err(|e| OpenReadError::IoError {
|
||||
io_error: io::Error::new(io::ErrorKind::Other, e.to_string()),
|
||||
io_error: Arc::new(io::Error::new(io::ErrorKind::Other, e.to_string())),
|
||||
filepath: path.to_path_buf(),
|
||||
})?
|
||||
.exists(path))
|
||||
@@ -208,7 +208,7 @@ impl Directory for RamDirectory {
|
||||
self.open_read(path)?
|
||||
.read_bytes()
|
||||
.map_err(|io_error| OpenReadError::IoError {
|
||||
io_error,
|
||||
io_error: Arc::new(io_error),
|
||||
filepath: path.to_path_buf(),
|
||||
})?;
|
||||
Ok(bytes.as_slice().to_owned())
|
||||
|
||||
@@ -6,9 +6,6 @@ use std::sync::atomic::{AtomicBool, AtomicUsize};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use futures::channel::oneshot;
|
||||
use futures::executor::block_on;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[cfg(feature = "mmap")]
|
||||
@@ -184,7 +181,7 @@ fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
|
||||
|
||||
fn test_watch(directory: &dyn Directory) {
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
let (tx, rx) = crossbeam::channel::unbounded();
|
||||
let (tx, rx) = crossbeam_channel::unbounded();
|
||||
let timeout = Duration::from_millis(500);
|
||||
|
||||
let handle = directory
|
||||
@@ -249,8 +246,8 @@ fn test_lock_blocking(directory: &dyn Directory) {
|
||||
std::thread::spawn(move || {
|
||||
//< lock_a_res is sent to the thread.
|
||||
in_thread_clone.store(true, SeqCst);
|
||||
let _just_sync = block_on(receiver);
|
||||
// explicitely droping lock_a_res. It would have been sufficient to just force it
|
||||
let _just_sync = receiver.recv();
|
||||
// explicitly dropping lock_a_res. It would have been sufficient to just force it
|
||||
// to be part of the move, but the intent seems clearer that way.
|
||||
drop(lock_a_res);
|
||||
});
|
||||
@@ -273,7 +270,7 @@ fn test_lock_blocking(directory: &dyn Directory) {
|
||||
assert!(in_thread.load(SeqCst));
|
||||
assert!(lock_a_res.is_ok());
|
||||
});
|
||||
assert!(block_on(receiver2).is_ok());
|
||||
assert!(receiver2.recv().is_ok());
|
||||
assert!(sender.send(()).is_ok());
|
||||
assert!(join_handle.join().is_ok());
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use futures::channel::oneshot;
|
||||
use futures::{Future, TryFutureExt};
|
||||
use crate::FutureResult;
|
||||
|
||||
/// Cloneable wrapper for callbacks registered when watching files of a `Directory`.
|
||||
#[derive(Clone)]
|
||||
@@ -74,12 +73,11 @@ impl WatchCallbackList {
|
||||
}
|
||||
|
||||
/// Triggers all callbacks
|
||||
pub fn broadcast(&self) -> impl Future<Output = ()> {
|
||||
pub fn broadcast(&self) -> FutureResult<()> {
|
||||
let callbacks = self.list_callback();
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
let result = receiver.unwrap_or_else(|_| ());
|
||||
let (result, sender) = FutureResult::create("One of the callback panicked.");
|
||||
if callbacks.is_empty() {
|
||||
let _ = sender.send(());
|
||||
let _ = sender.send(Ok(()));
|
||||
return result;
|
||||
}
|
||||
let spawn_res = std::thread::Builder::new()
|
||||
@@ -88,7 +86,7 @@ impl WatchCallbackList {
|
||||
for callback in callbacks {
|
||||
callback.call();
|
||||
}
|
||||
let _ = sender.send(());
|
||||
let _ = sender.send(Ok(()));
|
||||
});
|
||||
if let Err(err) = spawn_res {
|
||||
error!(
|
||||
@@ -106,8 +104,6 @@ mod tests {
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::executor::block_on;
|
||||
|
||||
use crate::directory::{WatchCallback, WatchCallbackList};
|
||||
|
||||
#[test]
|
||||
@@ -118,22 +114,18 @@ mod tests {
|
||||
let inc_callback = WatchCallback::new(move || {
|
||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||
});
|
||||
block_on(watch_event_router.broadcast());
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||
block_on(watch_event_router.broadcast());
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(1, counter.load(Ordering::SeqCst));
|
||||
block_on(async {
|
||||
(
|
||||
watch_event_router.broadcast().await,
|
||||
watch_event_router.broadcast().await,
|
||||
watch_event_router.broadcast().await,
|
||||
)
|
||||
});
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(4, counter.load(Ordering::SeqCst));
|
||||
mem::drop(handle_a);
|
||||
block_on(watch_event_router.broadcast());
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(4, counter.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
@@ -150,19 +142,15 @@ mod tests {
|
||||
let handle_a = watch_event_router.subscribe(inc_callback(1));
|
||||
let handle_a2 = watch_event_router.subscribe(inc_callback(10));
|
||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||
block_on(async {
|
||||
futures::join!(
|
||||
watch_event_router.broadcast(),
|
||||
watch_event_router.broadcast()
|
||||
)
|
||||
});
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(22, counter.load(Ordering::SeqCst));
|
||||
mem::drop(handle_a);
|
||||
block_on(watch_event_router.broadcast());
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(32, counter.load(Ordering::SeqCst));
|
||||
mem::drop(handle_a2);
|
||||
block_on(watch_event_router.broadcast());
|
||||
block_on(watch_event_router.broadcast());
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(32, counter.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
@@ -176,15 +164,12 @@ mod tests {
|
||||
});
|
||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||
block_on(async {
|
||||
let future1 = watch_event_router.broadcast();
|
||||
let future2 = watch_event_router.broadcast();
|
||||
futures::join!(future1, future2)
|
||||
});
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||
mem::drop(handle_a);
|
||||
let _ = watch_event_router.broadcast();
|
||||
block_on(watch_event_router.broadcast());
|
||||
watch_event_router.broadcast().wait().unwrap();
|
||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::DocId;
|
||||
///
|
||||
/// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions
|
||||
/// to compare [u32; 4].
|
||||
pub const TERMINATED: DocId = std::i32::MAX as u32;
|
||||
pub const TERMINATED: DocId = i32::MAX as u32;
|
||||
|
||||
/// Represents an iterable set of sorted doc ids.
|
||||
pub trait DocSet: Send {
|
||||
@@ -24,7 +24,6 @@ pub trait DocSet: Send {
|
||||
///
|
||||
/// Calling `.advance()` on a terminated DocSet should be supported, and TERMINATED should
|
||||
/// be returned.
|
||||
/// TODO Test existing docsets.
|
||||
fn advance(&mut self) -> DocId;
|
||||
|
||||
/// Advances the DocSet forward until reaching the target, or going to the
|
||||
|
||||
82
src/error.rs
@@ -1,9 +1,11 @@
|
||||
//! Definition of Tantivy's error and result.
|
||||
//! Definition of Tantivy's errors and results.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::PoisonError;
|
||||
use std::sync::{Arc, PoisonError};
|
||||
use std::{fmt, io};
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::directory::error::{
|
||||
Incompatibility, LockError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
||||
};
|
||||
@@ -12,7 +14,8 @@ use crate::{query, schema};
|
||||
|
||||
/// Represents a `DataCorruption` error.
|
||||
///
|
||||
/// When facing data corruption, tantivy actually panic or return this error.
|
||||
/// When facing data corruption, tantivy actually panics or returns this error.
|
||||
#[derive(Clone)]
|
||||
pub struct DataCorruption {
|
||||
filepath: Option<PathBuf>,
|
||||
comment: String,
|
||||
@@ -38,9 +41,9 @@ impl DataCorruption {
|
||||
|
||||
impl fmt::Debug for DataCorruption {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
write!(f, "Data corruption: ")?;
|
||||
write!(f, "Data corruption")?;
|
||||
if let Some(ref filepath) = &self.filepath {
|
||||
write!(f, "(in file `{:?}`)", filepath)?;
|
||||
write!(f, " (in file `{:?}`)", filepath)?;
|
||||
}
|
||||
write!(f, ": {}.", self.comment)?;
|
||||
Ok(())
|
||||
@@ -48,7 +51,7 @@ impl fmt::Debug for DataCorruption {
|
||||
}
|
||||
|
||||
/// The library's error enum
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum TantivyError {
|
||||
/// Failed to open the directory.
|
||||
#[error("Failed to open the directory: '{0:?}'")]
|
||||
@@ -59,15 +62,15 @@ pub enum TantivyError {
|
||||
/// Failed to open a file for write.
|
||||
#[error("Failed to open file for write: '{0:?}'")]
|
||||
OpenWriteError(#[from] OpenWriteError),
|
||||
/// Index already exists in this directory
|
||||
/// Index already exists in this directory.
|
||||
#[error("Index already exists")]
|
||||
IndexAlreadyExists,
|
||||
/// Failed to acquire file lock
|
||||
/// Failed to acquire file lock.
|
||||
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")]
|
||||
LockFailure(LockError, Option<String>),
|
||||
/// IO Error.
|
||||
#[error("An IO error occurred: '{0}'")]
|
||||
IoError(#[from] io::Error),
|
||||
IoError(Arc<io::Error>),
|
||||
/// Data corruption.
|
||||
#[error("Data corrupted: '{0:?}'")]
|
||||
DataCorruption(DataCorruption),
|
||||
@@ -80,23 +83,54 @@ pub enum TantivyError {
|
||||
/// Invalid argument was passed by the user.
|
||||
#[error("An invalid argument was passed: '{0}'")]
|
||||
InvalidArgument(String),
|
||||
/// An Error happened in one of the thread.
|
||||
/// An Error occurred in one of the threads.
|
||||
#[error("An error occurred in a thread: '{0}'")]
|
||||
ErrorInThread(String),
|
||||
/// An Error appeared related to opening or creating a index.
|
||||
/// An Error occurred related to opening or creating a index.
|
||||
#[error("Missing required index builder argument when open/create index: '{0}'")]
|
||||
IndexBuilderMissingArgument(&'static str),
|
||||
/// An Error appeared related to the schema.
|
||||
/// An Error occurred related to the schema.
|
||||
#[error("Schema error: '{0}'")]
|
||||
SchemaError(String),
|
||||
/// System error. (e.g.: We failed spawning a new thread)
|
||||
/// System error. (e.g.: We failed spawning a new thread).
|
||||
#[error("System error.'{0}'")]
|
||||
SystemError(String),
|
||||
/// Index incompatible with current version of tantivy
|
||||
/// Index incompatible with current version of Tantivy.
|
||||
#[error("{0:?}")]
|
||||
IncompatibleIndex(Incompatibility),
|
||||
/// An internal error occurred. This is are internal states that should not be reached.
|
||||
/// e.g. a datastructure is incorrectly inititalized.
|
||||
#[error("Internal error: '{0}'")]
|
||||
InternalError(String),
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
#[derive(Error, Debug)]
|
||||
#[doc(hidden)]
|
||||
pub enum AsyncIoError {
|
||||
#[error("io::Error `{0}`")]
|
||||
Io(#[from] io::Error),
|
||||
#[error("Asynchronous API is unsupported by this directory")]
|
||||
AsyncUnsupported,
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
impl From<AsyncIoError> for TantivyError {
|
||||
fn from(async_io_err: AsyncIoError) -> Self {
|
||||
match async_io_err {
|
||||
AsyncIoError::Io(io_err) => TantivyError::from(io_err),
|
||||
AsyncIoError::AsyncUnsupported => {
|
||||
TantivyError::SystemError(format!("{:?}", async_io_err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for TantivyError {
|
||||
fn from(io_err: io::Error) -> TantivyError {
|
||||
TantivyError::IoError(Arc::new(io_err))
|
||||
}
|
||||
}
|
||||
impl From<DataCorruption> for TantivyError {
|
||||
fn from(data_corruption: DataCorruption) -> TantivyError {
|
||||
TantivyError::DataCorruption(data_corruption)
|
||||
@@ -125,9 +159,21 @@ impl<Guard> From<PoisonError<Guard>> for TantivyError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<chrono::ParseError> for TantivyError {
|
||||
fn from(err: chrono::ParseError) -> TantivyError {
|
||||
TantivyError::InvalidArgument(err.to_string())
|
||||
impl From<time::error::Format> for TantivyError {
|
||||
fn from(err: time::error::Format) -> TantivyError {
|
||||
TantivyError::InvalidArgument(format!("Date formatting error: {err}"))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<time::error::Parse> for TantivyError {
|
||||
fn from(err: time::error::Parse) -> TantivyError {
|
||||
TantivyError::InvalidArgument(format!("Date parsing error: {err}"))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<time::error::ComponentRange> for TantivyError {
|
||||
fn from(err: time::error::ComponentRange) -> TantivyError {
|
||||
TantivyError::InvalidArgument(format!("Date range error: {err}"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,7 +185,7 @@ impl From<schema::DocParsingError> for TantivyError {
|
||||
|
||||
impl From<serde_json::Error> for TantivyError {
|
||||
fn from(error: serde_json::Error) -> TantivyError {
|
||||
TantivyError::IoError(error.into())
|
||||
TantivyError::IoError(Arc::new(error.into()))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use ownedbytes::OwnedBytes;
|
||||
use crate::space_usage::ByteCount;
|
||||
use crate::DocId;
|
||||
|
||||
/// Write a alive `BitSet`
|
||||
/// Write an alive `BitSet`
|
||||
///
|
||||
/// where `alive_bitset` is the set of alive `DocId`.
|
||||
/// Warning: this function does not call terminate. The caller is in charge of
|
||||
@@ -55,19 +55,19 @@ impl AliveBitSet {
|
||||
AliveBitSet::from(readonly_bitset)
|
||||
}
|
||||
|
||||
/// Opens a delete bitset given its file.
|
||||
/// Opens an alive bitset given its file.
|
||||
pub fn open(bytes: OwnedBytes) -> AliveBitSet {
|
||||
let bitset = ReadOnlyBitSet::open(bytes);
|
||||
AliveBitSet::from(bitset)
|
||||
}
|
||||
|
||||
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
||||
/// Returns true if the document is still "alive". In other words, if it has not been deleted.
|
||||
#[inline]
|
||||
pub fn is_alive(&self, doc: DocId) -> bool {
|
||||
self.bitset.contains(doc)
|
||||
}
|
||||
|
||||
/// Returns true iff the document has been marked as deleted.
|
||||
/// Returns true if the document has been marked as deleted.
|
||||
#[inline]
|
||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||
!self.is_alive(doc)
|
||||
@@ -79,13 +79,13 @@ impl AliveBitSet {
|
||||
self.bitset.iter()
|
||||
}
|
||||
|
||||
/// Get underlying bitset
|
||||
/// Get underlying bitset.
|
||||
#[inline]
|
||||
pub fn bitset(&self) -> &ReadOnlyBitSet {
|
||||
&self.bitset
|
||||
}
|
||||
|
||||
/// The number of deleted docs
|
||||
/// The number of alive documents.
|
||||
pub fn num_alive_docs(&self) -> usize {
|
||||
self.num_alive_docs
|
||||
}
|
||||
@@ -188,14 +188,14 @@ mod bench {
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_deletebitset_iter_deser_on_fly(bench: &mut Bencher) {
|
||||
fn bench_alive_bitset_iter_deser_on_fly(bench: &mut Bencher) {
|
||||
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[0, 1, 1000, 10000], 1_000_000);
|
||||
|
||||
bench.iter(|| alive_bitset.iter_alive().collect::<Vec<_>>());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_deletebitset_access(bench: &mut Bencher) {
|
||||
fn bench_alive_bitset_access(bench: &mut Bencher) {
|
||||
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[0, 1, 1000, 10000], 1_000_000);
|
||||
|
||||
bench.iter(|| {
|
||||
@@ -206,14 +206,14 @@ mod bench {
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_deletebitset_iter_deser_on_fly_1_8_alive(bench: &mut Bencher) {
|
||||
fn bench_alive_bitset_iter_deser_on_fly_1_8_alive(bench: &mut Bencher) {
|
||||
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&get_alive(), 1_000_000);
|
||||
|
||||
bench.iter(|| alive_bitset.iter_alive().collect::<Vec<_>>());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_deletebitset_access_1_8_alive(bench: &mut Bencher) {
|
||||
fn bench_alive_bitset_access_1_8_alive(bench: &mut Bencher) {
|
||||
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&get_alive(), 1_000_000);
|
||||
|
||||
bench.iter(|| {
|
||||
|
||||