mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-27 20:42:54 +00:00
Compare commits
384 Commits
0.18
...
bugfix-pos
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
727d024a23 | ||
|
|
449f595832 | ||
|
|
c9235df059 | ||
|
|
a4485f7611 | ||
|
|
1082ff60f9 | ||
|
|
491854155c | ||
|
|
96c3d54ac7 | ||
|
|
6800fdec9d | ||
|
|
c9cf9c952a | ||
|
|
024e53a99c | ||
|
|
8d75e451bd | ||
|
|
fcfd76ec55 | ||
|
|
6b7b1cc4fa | ||
|
|
129f7422f5 | ||
|
|
f39cce2c8b | ||
|
|
d2478fac8a | ||
|
|
952b048341 | ||
|
|
80f9596ec8 | ||
|
|
84f9e77e1d | ||
|
|
a602c248fb | ||
|
|
4b9d1fe828 | ||
|
|
63bc390b02 | ||
|
|
07393c2fa0 | ||
|
|
77a415cbe4 | ||
|
|
4b4c231bba | ||
|
|
11d3409286 | ||
|
|
9cb8cfbea8 | ||
|
|
8b69aab0fc | ||
|
|
3650d1f36a | ||
|
|
2efebdb1bb | ||
|
|
e443ca63aa | ||
|
|
5c9cbee29d | ||
|
|
b2ca83a93c | ||
|
|
3b189080d4 | ||
|
|
00a6586efe | ||
|
|
b9b913510e | ||
|
|
534b1d33c3 | ||
|
|
f465173872 | ||
|
|
96315df20d | ||
|
|
9a1609d364 | ||
|
|
39f4e58450 | ||
|
|
a8a36b62cd | ||
|
|
226a49338f | ||
|
|
2864bf7123 | ||
|
|
5171ff611b | ||
|
|
e50e74acf8 | ||
|
|
0b86658389 | ||
|
|
5d6602a8d9 | ||
|
|
4d29ff4d01 | ||
|
|
cdc8e3a8be | ||
|
|
67f453b534 | ||
|
|
787a37bacf | ||
|
|
f5039f1846 | ||
|
|
eeb1f19093 | ||
|
|
087beaf328 | ||
|
|
309449dba3 | ||
|
|
5a76e6c5d3 | ||
|
|
c8713a01ed | ||
|
|
6113e0408c | ||
|
|
400a20b7af | ||
|
|
5f565e77de | ||
|
|
516e60900d | ||
|
|
36e1c79f37 | ||
|
|
c2f1c250f9 | ||
|
|
c694bc039a | ||
|
|
2063f1717f | ||
|
|
d742275048 | ||
|
|
b9f06bc287 | ||
|
|
8b42c4c126 | ||
|
|
7905965800 | ||
|
|
f60a551890 | ||
|
|
7baa6e3ec5 | ||
|
|
2100ec5d26 | ||
|
|
b3bf9a5716 | ||
|
|
0dc8c458e0 | ||
|
|
e5043d78d2 | ||
|
|
6d0bb82bd2 | ||
|
|
5945dbf0bd | ||
|
|
4cf911d56a | ||
|
|
0f5cff762f | ||
|
|
6d9a123cf2 | ||
|
|
0f4a47816a | ||
|
|
b062ab2196 | ||
|
|
a9d2f3db23 | ||
|
|
44e03791f9 | ||
|
|
2d23763e9f | ||
|
|
a24ae8d924 | ||
|
|
927dff5262 | ||
|
|
a695edcc95 | ||
|
|
b4b4f3fa73 | ||
|
|
b50e4b7c20 | ||
|
|
f8686ab1ec | ||
|
|
2fe42719d8 | ||
|
|
fadd784a25 | ||
|
|
0e94213af0 | ||
|
|
0da2a2e70d | ||
|
|
0bcdf3cbbf | ||
|
|
8f647b817f | ||
|
|
a86b0df6f4 | ||
|
|
f842da758c | ||
|
|
97ccd6d712 | ||
|
|
cb252a42af | ||
|
|
d9609dd6b6 | ||
|
|
f03667d967 | ||
|
|
10f10a322f | ||
|
|
f757471077 | ||
|
|
21e0adefda | ||
|
|
ea8e6d7b1d | ||
|
|
dac7da780e | ||
|
|
20c87903b2 | ||
|
|
f9c3947803 | ||
|
|
e9a384bb15 | ||
|
|
d231671fe2 | ||
|
|
fa3d786a2f | ||
|
|
75aafeeb9b | ||
|
|
6f066c7f65 | ||
|
|
22e56aaee3 | ||
|
|
d641979127 | ||
|
|
1998111521 | ||
|
|
acb2e2e282 | ||
|
|
1ff5da5eb4 | ||
|
|
c3b25710ad | ||
|
|
8492010d43 | ||
|
|
cf02e32578 | ||
|
|
8cca1014c9 | ||
|
|
938f884e32 | ||
|
|
ed68afb698 | ||
|
|
8a7962dc22 | ||
|
|
a06039dea8 | ||
|
|
68b6254b09 | ||
|
|
6a88ac3fe3 | ||
|
|
191b934650 | ||
|
|
1a2ba7025a | ||
|
|
02599ebeb7 | ||
|
|
a16b466460 | ||
|
|
b8d8fdeb6e | ||
|
|
12856d80fa | ||
|
|
e75472ec9a | ||
|
|
e2e6c94ba8 | ||
|
|
9f610b25af | ||
|
|
237b64025e | ||
|
|
592caeefa0 | ||
|
|
570009b5b1 | ||
|
|
61b5110db7 | ||
|
|
58af1235e4 | ||
|
|
d3e7c41a1f | ||
|
|
11275854ca | ||
|
|
3ca48cd826 | ||
|
|
47dc511733 | ||
|
|
cae6b28a8f | ||
|
|
9aa9efe2a4 | ||
|
|
57570b38a2 | ||
|
|
584394db1e | ||
|
|
3aeb026970 | ||
|
|
df32ee2df2 | ||
|
|
762e662bfd | ||
|
|
63b2420058 | ||
|
|
ced21b8791 | ||
|
|
bc85947105 | ||
|
|
64f08a1a5c | ||
|
|
e029fdfca7 | ||
|
|
817225edfb | ||
|
|
1eab12396d | ||
|
|
8006f63426 | ||
|
|
0a907d0319 | ||
|
|
45924711fd | ||
|
|
14cb817a52 | ||
|
|
edd9155b88 | ||
|
|
9497794d40 | ||
|
|
29d56111de | ||
|
|
4d634d61ff | ||
|
|
1f3d8ca7e2 | ||
|
|
54696da771 | ||
|
|
21c2205de9 | ||
|
|
9436049d85 | ||
|
|
21c9a26182 | ||
|
|
56c68f5869 | ||
|
|
f5e66042d8 | ||
|
|
bf3327acd3 | ||
|
|
2a6479b66d | ||
|
|
9c2ef81198 | ||
|
|
c5d30a54bc | ||
|
|
c632fc014e | ||
|
|
085e63ae43 | ||
|
|
f6f23ba684 | ||
|
|
ea72cf34d6 | ||
|
|
00657d9e99 | ||
|
|
26876d41d7 | ||
|
|
8e775b6c3d | ||
|
|
e1f9af4384 | ||
|
|
4e350c5f1b | ||
|
|
84e0c75598 | ||
|
|
08c4412d73 | ||
|
|
70e58adff9 | ||
|
|
0d1cd119e9 | ||
|
|
d3dd620048 | ||
|
|
e89c220b56 | ||
|
|
a451f6d60d | ||
|
|
f740ddeee3 | ||
|
|
7a26cc9022 | ||
|
|
54972caa7c | ||
|
|
5d436759b0 | ||
|
|
6f563b1606 | ||
|
|
095fb68fda | ||
|
|
6316eaefc6 | ||
|
|
5331be800b | ||
|
|
c73b425bc1 | ||
|
|
54cfd0d154 | ||
|
|
0dd62169c8 | ||
|
|
3a9727aa91 | ||
|
|
17093e8ffe | ||
|
|
03e4630cd8 | ||
|
|
4ae0317d68 | ||
|
|
107b19855f | ||
|
|
d8f66ba07e | ||
|
|
f908549245 | ||
|
|
3673a5df9b | ||
|
|
3984cafccc | ||
|
|
298b5dd726 | ||
|
|
8bbb22e9bf | ||
|
|
513f68209d | ||
|
|
91f2f7e722 | ||
|
|
c476b530cf | ||
|
|
77dd202e19 | ||
|
|
00ebff3c16 | ||
|
|
9a6d37c42c | ||
|
|
bb01e99e05 | ||
|
|
535f1a5d83 | ||
|
|
625f9174a7 | ||
|
|
11a4d97cf5 | ||
|
|
1c3d39677a | ||
|
|
6f65995cfd | ||
|
|
e2e4190571 | ||
|
|
82209c58aa | ||
|
|
21519788ea | ||
|
|
4c6c6e4a9c | ||
|
|
df0ac9e901 | ||
|
|
71ab482720 | ||
|
|
2ae383e452 | ||
|
|
8b3a6f6231 | ||
|
|
11edd6bd59 | ||
|
|
193a3c21f4 | ||
|
|
998b1263f6 | ||
|
|
72272bdf81 | ||
|
|
c39c2d79da | ||
|
|
67d94f5bd2 | ||
|
|
abbd934ac9 | ||
|
|
7f9ba0ee50 | ||
|
|
8edcd6f958 | ||
|
|
f50700835d | ||
|
|
494e92ca59 | ||
|
|
4a3169011d | ||
|
|
050fc5dde9 | ||
|
|
ce45889add | ||
|
|
4875174d16 | ||
|
|
0c634c5bc6 | ||
|
|
e25ab5d537 | ||
|
|
27400c9ad3 | ||
|
|
19074e1d5e | ||
|
|
014b1adc3e | ||
|
|
84295d5b35 | ||
|
|
625bcb4877 | ||
|
|
f01cb7d3aa | ||
|
|
8e773ade77 | ||
|
|
fad3faefe2 | ||
|
|
9811d15657 | ||
|
|
31ba5a3c16 | ||
|
|
f4d7621370 | ||
|
|
d4b2b7de8b | ||
|
|
d5ee4edf25 | ||
|
|
fcc7bd7024 | ||
|
|
ce8d6b259a | ||
|
|
099e626156 | ||
|
|
71041b2314 | ||
|
|
09aae134e6 | ||
|
|
6a9d09cf7a | ||
|
|
704d0a8d8b | ||
|
|
195309a557 | ||
|
|
da0f78e06c | ||
|
|
9b6b60cc2b | ||
|
|
6444516a82 | ||
|
|
a9b0d1a0ab | ||
|
|
2b333ca635 | ||
|
|
80a1418284 | ||
|
|
5ab5f070ed | ||
|
|
d122f2c74e | ||
|
|
5b564916f0 | ||
|
|
06fd8684b7 | ||
|
|
931bab8010 | ||
|
|
8dac30e6d1 | ||
|
|
2e0a7d072f | ||
|
|
af84e74284 | ||
|
|
fff1a03842 | ||
|
|
90e296f2d0 | ||
|
|
5f966d747b | ||
|
|
d24f31f965 | ||
|
|
f26b686a1c | ||
|
|
775e936f7d | ||
|
|
7e032a9efd | ||
|
|
23fe73a6c0 | ||
|
|
a4be239d38 | ||
|
|
2406d9278b | ||
|
|
6c2d9737f1 | ||
|
|
a5688572a5 | ||
|
|
431b5a091e | ||
|
|
2c17271cd9 | ||
|
|
5750224d4c | ||
|
|
02691f2445 | ||
|
|
e31e78f39f | ||
|
|
9db2f0e82b | ||
|
|
2ed5cc873d | ||
|
|
d278417300 | ||
|
|
d89a8dd118 | ||
|
|
1bd44a5f61 | ||
|
|
d750ced813 | ||
|
|
fbc469e5df | ||
|
|
c1273670e4 | ||
|
|
7eb267341e | ||
|
|
db1836691e | ||
|
|
437cd350a2 | ||
|
|
8024ecf013 | ||
|
|
9baefbe2ab | ||
|
|
ad76d11008 | ||
|
|
c3220bece0 | ||
|
|
2b713f0977 | ||
|
|
0bc6b4a117 | ||
|
|
79e42d4a6d | ||
|
|
0135fbc4c8 | ||
|
|
449594f67a | ||
|
|
8b6647e908 | ||
|
|
efabcbcdf5 | ||
|
|
7bf5962554 | ||
|
|
4c7dedef29 | ||
|
|
93f356a7a7 | ||
|
|
6ca5f77466 | ||
|
|
2e2822f89d | ||
|
|
de178a1901 | ||
|
|
11e4225f23 | ||
|
|
f21b73d1f6 | ||
|
|
1440f3243b | ||
|
|
83d0c13fb0 | ||
|
|
88054aa333 | ||
|
|
635c39ba48 | ||
|
|
eab2257637 | ||
|
|
328bd96c24 | ||
|
|
fc24842a43 | ||
|
|
2d6f1d43ff | ||
|
|
ca0973ec78 | ||
|
|
38ee60d792 | ||
|
|
f68be28284 | ||
|
|
fc43ab9280 | ||
|
|
38c2ea6a5d | ||
|
|
26a0fd1fbe | ||
|
|
811b91ecb3 | ||
|
|
25c00ce856 | ||
|
|
e5debb97a7 | ||
|
|
bc4cd9ffaa | ||
|
|
9a13d8709b | ||
|
|
e6eadf1a2f | ||
|
|
7cca7e6a47 | ||
|
|
ef2492dba6 | ||
|
|
2981e6c1df | ||
|
|
b33b4c0092 | ||
|
|
4d9d2b6db0 | ||
|
|
ed868f93a3 | ||
|
|
5e599d96d7 | ||
|
|
314ae43a45 | ||
|
|
fce91b2f3a | ||
|
|
9bcd2b8104 | ||
|
|
0c9c257150 | ||
|
|
1af85a2956 | ||
|
|
bc4c3d0c6b | ||
|
|
6937c75f05 | ||
|
|
e54429e827 | ||
|
|
ca836b6414 | ||
|
|
71f75071d2 | ||
|
|
b114e553cd | ||
|
|
17dcc99e43 | ||
|
|
c5c2e59b2b | ||
|
|
44ea7313ca | ||
|
|
11ac451250 | ||
|
|
6a4632211a | ||
|
|
a99e5459e3 | ||
|
|
3f88718f38 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
cpp/* linguist-vendored
|
||||
4
.github/workflows/coverage.yml
vendored
4
.github/workflows/coverage.yml
vendored
@@ -12,12 +12,14 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
run: rustup toolchain install nightly --component llvm-tools-preview
|
||||
run: rustup toolchain install nightly --profile minimal --component llvm-tools-preview
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo +nightly llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
|
||||
files: lcov.info
|
||||
|
||||
16
.github/workflows/long_running.yml
vendored
16
.github/workflows/long_running.yml
vendored
@@ -9,16 +9,20 @@ env:
|
||||
NUM_FUNCTIONAL_TEST_ITERATIONS: 20000
|
||||
|
||||
jobs:
|
||||
functional_test_unsorted:
|
||||
test:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Run indexing_unsorted
|
||||
run: cargo test indexing_unsorted -- --ignored
|
||||
functional_test_sorted:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run indexing_sorted
|
||||
run: cargo test indexing_sorted -- --ignored
|
||||
|
||||
|
||||
53
.github/workflows/test.yml
vendored
53
.github/workflows/test.yml
vendored
@@ -10,33 +10,27 @@ env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
test:
|
||||
check:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build
|
||||
run: cargo build --verbose --workspace
|
||||
- name: Install latest nightly to test also against unstable feature flag
|
||||
|
||||
- name: Install nightly
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
profile: minimal
|
||||
components: rustfmt
|
||||
|
||||
- name: Install latest nightly to test also against unstable feature flag
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
profile: minimal
|
||||
components: clippy
|
||||
|
||||
- name: Run tests
|
||||
run: cargo +stable test --features mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints --verbose --workspace
|
||||
|
||||
- name: Run tests quickwit feature
|
||||
run: cargo +stable test --features mmap,quickwit,failpoints --verbose --workspace
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Check Formatting
|
||||
run: cargo +nightly fmt --all -- --check
|
||||
@@ -47,3 +41,34 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --tests
|
||||
|
||||
test:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
features: [
|
||||
{ label: "all", flags: "mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
|
||||
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
|
||||
]
|
||||
|
||||
name: test-${{ matrix.features.label}}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- uses: taiki-e/install-action@nextest
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run tests
|
||||
run: cargo +stable nextest run --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
|
||||
- name: Run doctests
|
||||
run: cargo +stable test --doc --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,7 +9,6 @@ target/release
|
||||
Cargo.lock
|
||||
benchmark
|
||||
.DS_Store
|
||||
cpp/simdcomp/bitpackingbenchmark
|
||||
*.bk
|
||||
.idea
|
||||
trace.dat
|
||||
|
||||
@@ -10,6 +10,7 @@ Tantivy's bread and butter is to address the problem of full-text search :
|
||||
Given a large set of textual documents, and a text query, return the K-most relevant documents in a very efficient way. To execute these queries rapidly, the tantivy needs to build an index beforehand. The relevance score implemented in the tantivy is not configurable. Tantivy uses the same score as the default similarity used in Lucene / Elasticsearch, called [BM25](https://en.wikipedia.org/wiki/Okapi_BM25).
|
||||
|
||||
But tantivy's scope does not stop there. Numerous features are required to power rich-search applications. For instance, one may want to:
|
||||
|
||||
- compute the count of documents matching a query in the different section of an e-commerce website,
|
||||
- display an average price per meter square for a real estate search engine,
|
||||
- take into account historical user data to rank documents in a specific way,
|
||||
@@ -22,27 +23,28 @@ rapidly select all documents matching a given predicate (also known as a query)
|
||||
collect some information about them ([See collector](#collector-define-what-to-do-with-matched-documents)).
|
||||
|
||||
Roughly speaking the design is following these guiding principles:
|
||||
|
||||
- Search should be O(1) in memory.
|
||||
- Indexing should be O(1) in memory. (In practice it is just sublinear)
|
||||
- Search should be as fast as possible
|
||||
|
||||
This comes at the cost of the dynamicity of the index: while it is possible to add, and delete documents from our corpus, the tantivy is designed to handle these updates in large batches.
|
||||
|
||||
## [core/](src/core): Index, segments, searchers.
|
||||
## [core/](src/core): Index, segments, searchers
|
||||
|
||||
Core contains all of the high-level code to make it possible to create an index, add documents, delete documents and commit.
|
||||
|
||||
This is both the most high-level part of tantivy, the least performance-sensitive one, the seemingly most mundane code... And paradoxically the most complicated part.
|
||||
|
||||
### Index and Segments...
|
||||
### Index and Segments
|
||||
|
||||
A tantivy index is a collection of smaller independent immutable segments.
|
||||
A tantivy index is a collection of smaller independent immutable segments.
|
||||
Each segment contains its own independent set of data structures.
|
||||
|
||||
A segment is identified by a segment id that is in fact a UUID.
|
||||
The file of a segment has the format
|
||||
|
||||
```segment-id . ext ```
|
||||
```segment-id . ext```
|
||||
|
||||
The extension signals which data structure (or [`SegmentComponent`](src/core/segment_component.rs)) is stored in the file.
|
||||
|
||||
@@ -52,17 +54,15 @@ On commit, one segment per indexing thread is written to disk, and the `meta.jso
|
||||
|
||||
For a better idea of how indexing works, you may read the [following blog post](https://fulmicoton.com/posts/behold-tantivy-part2/).
|
||||
|
||||
|
||||
### Deletes
|
||||
|
||||
Deletes happen by deleting a "term". Tantivy does not offer any notion of primary id, so it is up to the user to use a field in their schema as if it was a primary id, and delete the associated term if they want to delete only one specific document.
|
||||
|
||||
On commit, tantivy will find all of the segments with documents matching this existing term and create a [tombstone file](src/fastfield/delete.rs) that represents the bitset of the document that are deleted.
|
||||
Like all segment files, this file is immutable. Because it is possible to have more than one tombstone file at a given instant, the tombstone filename has the format ``` segment_id . commit_opstamp . del```.
|
||||
On commit, tantivy will find all of the segments with documents matching this existing term and remove from [alive bitset file](src/fastfield/alive_bitset.rs) that represents the bitset of the alive document ids.
|
||||
Like all segment files, this file is immutable. Because it is possible to have more than one alive bitset file at a given instant, the alive bitset filename has the format ```segment_id . commit_opstamp . del```.
|
||||
|
||||
An opstamp is simply an incremental id that identifies any operation applied to the index. For instance, performing a commit or adding a document.
|
||||
|
||||
|
||||
### DocId
|
||||
|
||||
Within a segment, all documents are identified by a DocId that ranges within `[0, max_doc)`.
|
||||
@@ -74,6 +74,7 @@ The DocIds are simply allocated in the order documents are added to the index.
|
||||
|
||||
In separate threads, tantivy's index writer search for opportunities to merge segments.
|
||||
The point of segment merge is to:
|
||||
|
||||
- eventually get rid of tombstoned documents
|
||||
- reduce the otherwise ever-growing number of segments.
|
||||
|
||||
@@ -94,7 +95,7 @@ called [`Directory`](src/directory/directory.rs).
|
||||
Contrary to Lucene however, "files" are quite different from some kind of `io::Read` object.
|
||||
Check out [`src/directory/directory.rs`](src/directory/directory.rs) trait for more details.
|
||||
|
||||
Tantivy ships two main directory implementation: the `MMapDirectory` and the `RAMDirectory`,
|
||||
Tantivy ships two main directory implementation: the `MmapDirectory` and the `RamDirectory`,
|
||||
but users can extend tantivy with their own implementation.
|
||||
|
||||
## [schema/](src/schema): What are documents?
|
||||
@@ -104,6 +105,7 @@ Tantivy's document follows a very strict schema, decided before building any ind
|
||||
The schema defines all of the fields that the indexes [`Document`](src/schema/document.rs) may and should contain, their types (`text`, `i64`, `u64`, `Date`, ...) as well as how it should be indexed / represented in tantivy.
|
||||
|
||||
Depending on the type of the field, you can decide to
|
||||
|
||||
- put it in the docstore
|
||||
- store it as a fast field
|
||||
- index it
|
||||
@@ -117,9 +119,10 @@ As of today, tantivy's schema imposes a 1:1 relationship between a field that is
|
||||
|
||||
This is not something tantivy supports, and it is up to the user to duplicate field / concatenate fields before feeding them to tantivy.
|
||||
|
||||
## General information about these data structures.
|
||||
## General information about these data structures
|
||||
|
||||
All data structures in tantivy, have:
|
||||
|
||||
- a writer
|
||||
- a serializer
|
||||
- a reader
|
||||
@@ -132,7 +135,7 @@ This conversion is done by the serializer.
|
||||
Finally, the reader is in charge of offering an API to read on this on-disk read-only representation.
|
||||
In tantivy, readers are designed to require very little anonymous memory. The data is read straight from an mmapped file, and loading an index is as fast as mmapping its files.
|
||||
|
||||
## [store/](src/store): Here is my DocId, Gimme my document!
|
||||
## [store/](src/store): Here is my DocId, Gimme my document
|
||||
|
||||
The docstore is a row-oriented storage that, for each document, stores a subset of the fields
|
||||
that are marked as stored in the schema. The docstore is compressed using a general-purpose algorithm
|
||||
@@ -146,6 +149,7 @@ Once the top 10 documents have been identified, we fetch them from the store, an
|
||||
**Not useful for**
|
||||
|
||||
Fetching a document from the store is typically a "slow" operation. It usually consists in
|
||||
|
||||
- searching into a compact tree-like data structure to find the position of the right block.
|
||||
- decompressing a small block
|
||||
- returning the document from this block.
|
||||
@@ -154,8 +158,7 @@ It is NOT meant to be called for every document matching a query.
|
||||
|
||||
As a rule of thumb, if you hit the docstore more than 100 times per search query, you are probably misusing tantivy.
|
||||
|
||||
|
||||
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value!
|
||||
## [fastfield/](src/fastfield): Here is my DocId, Gimme my value
|
||||
|
||||
Fast fields are stored in a column-oriented storage that allows for random access.
|
||||
The only compression applied is bitpacking. The column comes with two meta data.
|
||||
@@ -163,7 +166,7 @@ The minimum value in the column and the number of bits per doc.
|
||||
|
||||
Fetching a value for a `DocId` is then as simple as computing
|
||||
|
||||
```
|
||||
```rust
|
||||
min_value + fetch_bits(num_bits * doc_id..num_bits * (doc_id+1))
|
||||
```
|
||||
|
||||
@@ -190,7 +193,7 @@ For advanced search engine, it is possible to store all of the features required
|
||||
|
||||
Finally facets are a specific kind of fast field, and the associated source code is in [`fastfield/facet_reader.rs`](src/fastfield/facet_reader.rs).
|
||||
|
||||
# The inverted search index.
|
||||
# The inverted search index
|
||||
|
||||
The inverted index is the core part of full-text search.
|
||||
When presented a new document with the text field "Hello, happy tax payer!", tantivy breaks it into a list of so-called tokens. In addition to just splitting these strings into tokens, it might also do different kinds of operations like dropping the punctuation, converting the character to lowercase, apply stemming, etc. Tantivy makes it possible to configure the operations to be applied in the schema (tokenizer/ is the place where these operations are implemented).
|
||||
@@ -215,19 +218,18 @@ The inverted index actually consists of two data structures chained together.
|
||||
|
||||
Where [TermInfo](src/postings/term_info.rs) is an object containing some meta data about a term.
|
||||
|
||||
|
||||
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)!
|
||||
## [termdict/](src/termdict): Here is a term, give me the [TermInfo](src/postings/term_info.rs)
|
||||
|
||||
Tantivy's term dictionary is mainly in charge of supplying the function
|
||||
|
||||
[Term](src/schema/term.rs) ⟶ [TermInfo](src/postings/term_info.rs)
|
||||
|
||||
It is itself broken into two parts.
|
||||
|
||||
- [Term](src/schema/term.rs) ⟶ [TermOrdinal](src/termdict/mod.rs) is addressed by a finite state transducer, implemented by the fst crate.
|
||||
- [TermOrdinal](src/termdict/mod.rs) ⟶ [TermInfo](src/postings/term_info.rs) is addressed by the term info store.
|
||||
|
||||
|
||||
## [postings/](src/postings): Iterate over documents... very fast!
|
||||
## [postings/](src/postings): Iterate over documents... very fast
|
||||
|
||||
A posting list makes it possible to store a sorted list of doc ids and for each doc store
|
||||
a term frequency as well.
|
||||
@@ -249,7 +251,7 @@ For instance, when the phrase query "the art of war" does not match "the war of
|
||||
To make it possible, it is possible to specify in the schema that a field should store positions in addition to being indexed.
|
||||
|
||||
The token positions of all of the terms are then stored in a separate file with the extension `.pos`.
|
||||
The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate throught the docset,
|
||||
The [TermInfo](src/postings/term_info.rs) gives an offset (expressed in position this time) in this file. As we iterate through the docset,
|
||||
we advance the position reader by the number of term frequencies of the current document.
|
||||
|
||||
## [fieldnorms/](src/fieldnorms): Here is my doc, how many tokens in this field?
|
||||
@@ -257,7 +259,6 @@ we advance the position reader by the number of term frequencies of the current
|
||||
The [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) formula also requires to know the number of tokens stored in a specific field for a given document. We store this information on one byte per document in the fieldnorm.
|
||||
The fieldnorm is therefore compressed. Values up to 40 are encoded unchanged.
|
||||
|
||||
|
||||
## [tokenizer/](src/tokenizer): How should we process text?
|
||||
|
||||
Text processing is key to a good search experience.
|
||||
@@ -268,7 +269,6 @@ Text processing can be configured by selecting an off-the-shelf [`Tokenizer`](./
|
||||
|
||||
Tantivy's comes with few tokenizers, but external crates are offering advanced tokenizers, such as [Lindera](https://crates.io/crates/lindera) for Japanese.
|
||||
|
||||
|
||||
## [query/](src/query): Define and compose queries
|
||||
|
||||
The [Query](src/query/query.rs) trait defines what a query is.
|
||||
|
||||
142
CHANGELOG.md
142
CHANGELOG.md
@@ -1,5 +1,36 @@
|
||||
Tantivy 0.19
|
||||
================================
|
||||
|
||||
- Major bugfix: Fix missing fieldnorms for u64, i64, f64, bool, bytes and date [#1620](https://github.com/quickwit-oss/tantivy/pull/1620) (@PSeitz)
|
||||
- Updated [Date Field Type](https://github.com/quickwit-oss/tantivy/pull/1396)
|
||||
The `DateTime` type has been updated to hold timestamps with microseconds precision.
|
||||
`DateOptions` and `DatePrecision` have been added to configure Date fields. The precision is used to hint on fast values compression. Otherwise, seconds precision is used everywhere else (i.e terms, indexing). (@evanxg852000)
|
||||
- Add IP address field type [#1553](https://github.com/quickwit-oss/tantivy/pull/1553) (@PSeitz)
|
||||
- Add boolean field type [#1382](https://github.com/quickwit-oss/tantivy/pull/1382) (@boraarslan)
|
||||
- Remove Searcher pool and make `Searcher` cloneable. (@PSeitz)
|
||||
- Validate settings on create [#1570](https://github.com/quickwit-oss/tantivy/pull/1570 (@PSeitz)
|
||||
- Fix interpolation overflow in linear interpolation fastfield codec [#1480](https://github.com/quickwit-oss/tantivy/pull/1480 (@PSeitz @fulmicoton)
|
||||
- Detect and apply gcd on fastfield codecs [#1418](https://github.com/quickwit-oss/tantivy/pull/1418) (@PSeitz)
|
||||
- Doc store
|
||||
- use separate thread to compress block store [#1389](https://github.com/quickwit-oss/tantivy/pull/1389) [#1510](https://github.com/quickwit-oss/tantivy/pull/1510 (@PSeitz @fulmicoton)
|
||||
- Expose doc store cache size [#1403](https://github.com/quickwit-oss/tantivy/pull/1403) (@PSeitz)
|
||||
- Enable compression levels for doc store [#1378](https://github.com/quickwit-oss/tantivy/pull/1378) (@PSeitz)
|
||||
- Make block size configurable [#1374](https://github.com/quickwit-oss/tantivy/pull/1374) (@kryesh)
|
||||
- Make `tantivy::TantivyError` cloneable [#1402](https://github.com/quickwit-oss/tantivy/pull/1402) (@PSeitz)
|
||||
- Add support for phrase slop in query language [#1393](https://github.com/quickwit-oss/tantivy/pull/1393) (@saroh)
|
||||
- Aggregation
|
||||
- Add support for keyed parameter in range and histgram aggregations [#1424](https://github.com/quickwit-oss/tantivy/pull/1424) (@k-yomo)
|
||||
- Add aggregation bucket limit [#1363](https://github.com/quickwit-oss/tantivy/pull/1363) (@PSeitz)
|
||||
- Faster indexing
|
||||
- [#1610](https://github.com/quickwit-oss/tantivy/pull/1610 (@PSeitz)
|
||||
- [#1594](https://github.com/quickwit-oss/tantivy/pull/1594 (@PSeitz)
|
||||
- [#1582](https://github.com/quickwit-oss/tantivy/pull/1582 (@PSeitz)
|
||||
- [#1611](https://github.com/quickwit-oss/tantivy/pull/1611 (@PSeitz)
|
||||
|
||||
|
||||
Tantivy 0.18
|
||||
================================
|
||||
|
||||
- For date values `chrono` has been replaced with `time` (@uklotzde) #1304 :
|
||||
- The `time` crate is re-exported as `tantivy::time` instead of `tantivy::chrono`.
|
||||
- The type alias `tantivy::DateTime` has been removed.
|
||||
@@ -15,6 +46,7 @@ Tantivy 0.18
|
||||
|
||||
Tantivy 0.17
|
||||
================================
|
||||
|
||||
- LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar @fulmicoton) [#115](https://github.com/quickwit-oss/tantivy/issues/115)
|
||||
- Adds a searcher Warmer API (@shikhar @fulmicoton)
|
||||
- Change to non-strict schema. Ignore fields in data which are not defined in schema. Previously this returned an error. #1211
|
||||
@@ -29,33 +61,39 @@ Tantivy 0.17
|
||||
|
||||
Tantivy 0.16.2
|
||||
================================
|
||||
- Bugfix in FuzzyTermQuery. (tranposition_cost_one was not doing anything)
|
||||
|
||||
- Bugfix in FuzzyTermQuery. (transposition_cost_one was not doing anything)
|
||||
|
||||
Tantivy 0.16.1
|
||||
========================
|
||||
|
||||
- Major Bugfix on multivalued fastfield. #1151
|
||||
- Demux operation (@PSeitz)
|
||||
|
||||
Tantivy 0.16.0
|
||||
=========================
|
||||
|
||||
- Bugfix in the filesum check. (@evanxg852000) #1127
|
||||
- Bugfix in positions when the index is sorted by a field. (@appaquet) #1125
|
||||
|
||||
Tantivy 0.15.3
|
||||
=========================
|
||||
- Major bugfix. Deleting documents was broken when the index was sorted by a field. (@appaquet, @fulmicoton) #1101
|
||||
|
||||
- Major bugfix. Deleting documents was broken when the index was sorted by a field. (@appaquet, @fulmicoton) #1101
|
||||
|
||||
Tantivy 0.15.2
|
||||
========================
|
||||
|
||||
- Major bugfix. DocStore still panics when a deleted doc is at the beginning of a block. (@appaquet) #1088
|
||||
|
||||
Tantivy 0.15.1
|
||||
=========================
|
||||
|
||||
- Major bugfix. DocStore panics when first block is deleted. (@appaquet) #1077
|
||||
|
||||
Tantivy 0.15.0
|
||||
=========================
|
||||
|
||||
- API Changes. Using Range instead of (start, end) in the API and internals (`FileSlice`, `OwnedBytes`, `Snippets`, ...)
|
||||
This change is breaking but migration is trivial.
|
||||
- Added an Histogram collector. (@fulmicoton) #994
|
||||
@@ -77,9 +115,9 @@ Tantivy 0.15.0
|
||||
- Updated TermMerger implementation to rely on the union feature of the FST (@scampi) #469
|
||||
- Add boolean marking whether position is required in the query_terms API call (@fulmicoton). #1070
|
||||
|
||||
|
||||
Tantivy 0.14.0
|
||||
=========================
|
||||
|
||||
- Remove dependency to atomicwrites #833 .Implemented by @fulmicoton upon suggestion and research from @asafigan).
|
||||
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
|
||||
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
|
||||
@@ -98,16 +136,19 @@ This version breaks compatibility and requires users to reindex everything.
|
||||
|
||||
Tantivy 0.13.2
|
||||
===================
|
||||
|
||||
Bugfix. Acquiring a facet reader on a segment that does not contain any
|
||||
doc with this facet returns `None`. (#896)
|
||||
|
||||
Tantivy 0.13.1
|
||||
===================
|
||||
|
||||
Made `Query` and `Collector` `Send + Sync`.
|
||||
Updated misc dependency versions.
|
||||
|
||||
Tantivy 0.13.0
|
||||
======================
|
||||
|
||||
Tantivy 0.13 introduce a change in the index format that will require
|
||||
you to reindex your index (BlockWAND information are added in the skiplist).
|
||||
The index size increase is minor as this information is only added for
|
||||
@@ -122,6 +163,7 @@ so that we can discuss possible solutions.
|
||||
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
||||
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
||||
As a result, iterating through DocSet now looks as follows
|
||||
|
||||
```rust
|
||||
let mut doc = docset.doc();
|
||||
while doc != TERMINATED {
|
||||
@@ -129,7 +171,9 @@ while doc != TERMINATED {
|
||||
doc = docset.advance();
|
||||
}
|
||||
```
|
||||
|
||||
The change made it possible to greatly simplify a lot of the docset's code.
|
||||
|
||||
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
||||
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
||||
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
|
||||
@@ -137,6 +181,7 @@ to the PISA team for answering all my questions!)
|
||||
|
||||
Tantivy 0.12.0
|
||||
======================
|
||||
|
||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
|
||||
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
|
||||
@@ -147,30 +192,32 @@ Tantivy 0.12.0
|
||||
## How to update?
|
||||
|
||||
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
|
||||
minor changes. Check https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs
|
||||
minor changes. Check <https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs>
|
||||
to check for some code sample.
|
||||
|
||||
Tantivy 0.11.3
|
||||
=======================
|
||||
|
||||
- Fixed DateTime as a fast field (#735)
|
||||
|
||||
Tantivy 0.11.2
|
||||
=======================
|
||||
|
||||
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
|
||||
- Exposing a constructor for `WatchHandle` (#731)
|
||||
|
||||
Tantivy 0.11.1
|
||||
=====================
|
||||
- Bug fix #729
|
||||
|
||||
- Bug fix #729
|
||||
|
||||
Tantivy 0.11.0
|
||||
=====================
|
||||
|
||||
- Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima)
|
||||
- Various bugfixes in the query parser.
|
||||
- Better handling of hyphens in query parser. (#609)
|
||||
- Better handling of whitespaces.
|
||||
- Better handling of hyphens in query parser. (#609)
|
||||
- Better handling of whitespaces.
|
||||
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
|
||||
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
||||
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
||||
@@ -201,7 +248,6 @@ Tantivy 0.10.1
|
||||
Avoid watching the mmap directory until someone effectively creates a reader that uses
|
||||
this functionality.
|
||||
|
||||
|
||||
Tantivy 0.10.0
|
||||
=====================
|
||||
|
||||
@@ -217,6 +263,7 @@ Tantivy 0.10.0
|
||||
|
||||
Minor
|
||||
---------
|
||||
|
||||
- Switched to Rust 2018 (@uvd)
|
||||
- Small simplification of the code.
|
||||
Calling .freq() or .doc() when .advance() has never been called
|
||||
@@ -224,8 +271,7 @@ on segment postings should panic from now on.
|
||||
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
|
||||
- Fast fields are now preloaded when the `SegmentReader` is created.
|
||||
- `IndexMeta` is now public. (@hntd187)
|
||||
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
|
||||
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||
only require a read lock. (@fulmicoton)
|
||||
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
|
||||
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
|
||||
@@ -241,16 +287,17 @@ Your program should be usable as is.
|
||||
Fast fields used to be accessed directly from the `SegmentReader`.
|
||||
The API changed, you are now required to acquire your fast field reader via the
|
||||
`segment_reader.fast_fields()`, and use one of the typed method:
|
||||
|
||||
- `.u64()`, `.i64()` if your field is single-valued ;
|
||||
- `.u64s()`, `.i64s()` if your field is multi-valued ;
|
||||
- `.bytes()` if your field is bytes fast field.
|
||||
|
||||
|
||||
|
||||
Tantivy 0.9.0
|
||||
=====================
|
||||
|
||||
*0.9.0 index format is not compatible with the
|
||||
previous index format.*
|
||||
|
||||
- MAJOR BUGFIX :
|
||||
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
|
||||
- Removed most unsafe (@fulmicoton)
|
||||
@@ -294,37 +341,40 @@ To update from tantivy 0.8, you will need to go through the following steps.
|
||||
|
||||
```
|
||||
|
||||
|
||||
Tantivy 0.8.2
|
||||
=====================
|
||||
|
||||
Fixing build for x86_64 platforms. (#496)
|
||||
No need to update from 0.8.1 if tantivy
|
||||
is building on your platform.
|
||||
|
||||
|
||||
Tantivy 0.8.1
|
||||
=====================
|
||||
|
||||
Hotfix of #476.
|
||||
|
||||
Merge was reflecting deletes before commit was passed.
|
||||
Thanks @barrotsteindev for reporting the bug.
|
||||
|
||||
|
||||
Tantivy 0.8.0
|
||||
=====================
|
||||
|
||||
*No change in the index format*
|
||||
|
||||
- API Breaking change in the collector API. (@jwolfe, @fulmicoton)
|
||||
- Multithreaded search (@jwolfe, @fulmicoton)
|
||||
|
||||
|
||||
Tantivy 0.7.1
|
||||
=====================
|
||||
|
||||
*No change in the index format*
|
||||
|
||||
- Bugfix: NGramTokenizer panics on non ascii chars
|
||||
- Added a space usage API
|
||||
|
||||
Tantivy 0.7
|
||||
=====================
|
||||
|
||||
- Skip data for doc ids and positions (@fulmicoton),
|
||||
greatly improving performance
|
||||
- Tantivy error now rely on the failure crate (@drusellers)
|
||||
@@ -334,15 +384,15 @@ Tantivy 0.7
|
||||
|
||||
Tantivy 0.6.1
|
||||
=========================
|
||||
|
||||
- Bugfix #324. GC removing was removing file that were still in useful
|
||||
- Added support for parsing AllQuery and RangeQuery via QueryParser
|
||||
- AllQuery: `*`
|
||||
- RangeQuery:
|
||||
- Inclusive `field:[startIncl to endIncl]`
|
||||
- Exclusive `field:{startExcl to endExcl}`
|
||||
- Mixed `field:[startIncl to endExcl}` and vice versa
|
||||
- Unbounded `field:[start to *]`, `field:[* to end]`
|
||||
|
||||
- AllQuery: `*`
|
||||
- RangeQuery:
|
||||
- Inclusive `field:[startIncl to endIncl]`
|
||||
- Exclusive `field:{startExcl to endExcl}`
|
||||
- Mixed `field:[startIncl to endExcl}` and vice versa
|
||||
- Unbounded `field:[start to *]`, `field:[* to end]`
|
||||
|
||||
Tantivy 0.6
|
||||
==========================
|
||||
@@ -355,58 +405,53 @@ to this release!
|
||||
- Approximate field norms encoded over 1 byte. (@fulmicoton)
|
||||
- Compiles on stable rust (@fulmicoton)
|
||||
- Add &[u8] fastfield for associating arbitrary bytes to each document (@jason-wolfe) (#270)
|
||||
- Completely uncompressed
|
||||
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
||||
- Completely uncompressed
|
||||
- Internally: One u64 fast field for indexes, one fast field for the bytes themselves.
|
||||
- Add NGram token support (@drusellers)
|
||||
- Add Stopword Filter support (@drusellers)
|
||||
- Add a FuzzyTermQuery (@drusellers)
|
||||
- Add a RegexQuery (@drusellers)
|
||||
- Various performance improvements (@fulmicoton)_
|
||||
|
||||
|
||||
Tantivy 0.5.2
|
||||
===========================
|
||||
|
||||
- bugfix #274
|
||||
- bugfix #280
|
||||
- bugfix #289
|
||||
|
||||
|
||||
Tantivy 0.5.1
|
||||
==========================
|
||||
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
|
||||
|
||||
- bugfix #254 : tantivy failed if no documents in a segment contained a specific field.
|
||||
|
||||
Tantivy 0.5
|
||||
==========================
|
||||
|
||||
- Faceting
|
||||
- RangeQuery
|
||||
- Configurable tokenization pipeline
|
||||
- Bugfix in PhraseQuery
|
||||
- Various query optimisation
|
||||
- Allowing very large indexes
|
||||
- 64 bits file address
|
||||
- Smarter encoding of the `TermInfo` objects
|
||||
|
||||
|
||||
- 64 bits file address
|
||||
- Smarter encoding of the `TermInfo` objects
|
||||
|
||||
Tantivy 0.4.3
|
||||
==========================
|
||||
|
||||
- Bugfix race condition when deleting files. (#198)
|
||||
|
||||
|
||||
Tantivy 0.4.2
|
||||
==========================
|
||||
|
||||
- Prevent usage of AVX2 instructions (#201)
|
||||
|
||||
|
||||
Tantivy 0.4.1
|
||||
==========================
|
||||
|
||||
- Bugfix for non-indexed fields. (#199)
|
||||
|
||||
|
||||
Tantivy 0.4.0
|
||||
==========================
|
||||
|
||||
@@ -421,37 +466,31 @@ Tantivy 0.4.0
|
||||
- Searching for a non-indexed field returns an explicit Error
|
||||
- Phrase query for non-tokenized field are not tokenized by the query parser.
|
||||
- Faster/Better indexing (@fulmicoton)
|
||||
- using murmurhash2
|
||||
- faster merging
|
||||
- more memory efficient fast field writer (@lnicola )
|
||||
- better handling of collisions
|
||||
- lesser memory usage
|
||||
- using murmurhash2
|
||||
- faster merging
|
||||
- more memory efficient fast field writer (@lnicola )
|
||||
- better handling of collisions
|
||||
- lesser memory usage
|
||||
- Added API, most notably to iterate over ranges of terms (@fulmicoton)
|
||||
- Bugfix that was preventing to unmap segment files, on index drop (@fulmicoton)
|
||||
- Made the doc! macro public (@fulmicoton)
|
||||
- Added an alternative implementation of the streaming dictionary (@fulmicoton)
|
||||
|
||||
|
||||
|
||||
Tantivy 0.3.1
|
||||
==========================
|
||||
|
||||
- Expose a method to trigger files garbage collection
|
||||
|
||||
|
||||
|
||||
Tantivy 0.3
|
||||
==========================
|
||||
|
||||
|
||||
Special thanks to @Kodraus @lnicola @Ameobea @manuel-woelker @celaus
|
||||
for their contribution to this release.
|
||||
|
||||
Thanks also to everyone in tantivy gitter chat
|
||||
for their advise and company :)
|
||||
|
||||
https://gitter.im/tantivy-search/tantivy
|
||||
|
||||
<https://gitter.im/tantivy-search/tantivy>
|
||||
|
||||
Warning:
|
||||
|
||||
@@ -460,19 +499,16 @@ code and index format.
|
||||
You should not expect backward compatibility before
|
||||
tantivy 1.0.
|
||||
|
||||
|
||||
|
||||
New Features
|
||||
------------
|
||||
|
||||
- Delete. You can now delete documents from an index.
|
||||
- Support for windows (Thanks to @lnicola)
|
||||
|
||||
|
||||
Various Bugfixes & small improvements
|
||||
----------------------------------------
|
||||
|
||||
- Added CI for Windows (https://ci.appveyor.com/project/fulmicoton/tantivy)
|
||||
- Added CI for Windows (<https://ci.appveyor.com/project/fulmicoton/tantivy>)
|
||||
Thanks to @KodrAus ! (#108)
|
||||
- Various dependy version update (Thanks to @Ameobea) #76
|
||||
- Fixed several race conditions in `Index.wait_merge_threads`
|
||||
@@ -484,7 +520,3 @@ Thanks to @KodrAus ! (#108)
|
||||
- Building binary targets for tantivy-cli (Thanks to @KodrAus)
|
||||
- Misc invisible bug fixes, and code cleanup.
|
||||
- Use
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
18
Cargo.toml
18
Cargo.toml
@@ -11,6 +11,7 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2021"
|
||||
rust-version = "1.62"
|
||||
|
||||
[dependencies]
|
||||
oneshot = "0.1.3"
|
||||
@@ -19,18 +20,18 @@ byteorder = "1.4.3"
|
||||
crc32fast = "1.3.2"
|
||||
once_cell = "1.10.0"
|
||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||
tantivy-fst = "0.3.0"
|
||||
tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.5.3", optional = true }
|
||||
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
||||
brotli = { version = "3.3.4", optional = true }
|
||||
zstd = { version = "0.11", optional = true }
|
||||
zstd = { version = "0.11", optional = true, default-features = false }
|
||||
snap = { version = "1.0.5", optional = true }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
log = "0.4.16"
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
serde_json = "1.0.79"
|
||||
num_cpus = "1.13.1"
|
||||
fs2={ version = "0.4.3", optional = true }
|
||||
fs2 = { version = "0.4.3", optional = true }
|
||||
levenshtein_automata = "0.2.1"
|
||||
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||
crossbeam-channel = "0.5.4"
|
||||
@@ -49,16 +50,16 @@ thiserror = "1.0.30"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.5.0"
|
||||
murmurhash32 = "0.2.0"
|
||||
time = { version = "0.3.9", features = ["serde-well-known"] }
|
||||
time = { version = "0.3.10", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.7.5"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.10.3"
|
||||
measure_time = "0.8.2"
|
||||
pretty_assertions = "1.2.1"
|
||||
serde_cbor = { version = "0.11.2", optional = true }
|
||||
ciborium = { version = "0.2", optional = true}
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
@@ -67,11 +68,12 @@ winapi = "0.3.9"
|
||||
rand = "0.8.5"
|
||||
maplit = "1.0.2"
|
||||
matches = "0.1.9"
|
||||
pretty_assertions = "1.2.1"
|
||||
proptest = "1.0.0"
|
||||
criterion = "0.3.5"
|
||||
test-log = "0.2.10"
|
||||
env_logger = "0.9.0"
|
||||
pprof = { version = "0.9.0", features = ["flamegraph", "criterion"] }
|
||||
pprof = { version = "0.10.0", features = ["flamegraph", "criterion"] }
|
||||
futures = "0.3.21"
|
||||
|
||||
[dev-dependencies.fail]
|
||||
@@ -99,7 +101,7 @@ zstd-compression = ["zstd"]
|
||||
failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
quickwit = ["serde_cbor"]
|
||||
quickwit = ["ciborium"]
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
|
||||
|
||||
36
README.md
36
README.md
@@ -5,7 +5,6 @@
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://crates.io/crates/tantivy)
|
||||
|
||||
|
||||

|
||||
|
||||
**Tantivy** is a **full-text search engine library** written in Rust.
|
||||
@@ -16,7 +15,7 @@ to build such a search engine.
|
||||
|
||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
|
||||
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
|
||||
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
|
||||
|
||||
# Benchmark
|
||||
|
||||
@@ -57,10 +56,9 @@ Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
|
||||
|
||||
|
||||
# Getting started
|
||||
|
||||
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
|
||||
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
|
||||
|
||||
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
|
||||
@@ -83,9 +81,13 @@ There are many ways to support this project.
|
||||
|
||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
Tantivy currently requires at least Rust 1.62 or later to compile.
|
||||
|
||||
## Clone and build locally
|
||||
|
||||
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
|
||||
Tantivy compiles on stable Rust.
|
||||
To check out and run tests, you can simply run:
|
||||
|
||||
```bash
|
||||
@@ -125,20 +127,23 @@ By default, `rustc` compiles everything in the `examples/` directory in debug mo
|
||||
rust-gdb target/debug/examples/$EXAMPLE_NAME
|
||||
$ gdb run
|
||||
```
|
||||
# Companies Using Tantivy
|
||||
|
||||
# Companies Using Tantivy
|
||||
|
||||
<p align="left">
|
||||
<img align="center" src="doc/assets/images/etsy.png" alt="Etsy" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/humanfirst.png#gh-light-mode-only" alt="Humanfirst.ai" height="30" width="auto" />
|
||||
<img align="center" src="doc/assets/images/element.io.svg#gh-light-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/nuclia-dark-theme.png#gh-dark-mode-only" alt="Nuclia" height="35" width="auto" />
|
||||
<img align="center" src="doc/assets/images/humanfirst.ai-dark-theme.png#gh-dark-mode-only" alt="Humanfirst.ai" height="25" width="auto" />
|
||||
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
|
||||
</p>
|
||||
|
||||
</p>
|
||||
|
||||
# FAQ
|
||||
|
||||
### Can I use Tantivy in other languages?
|
||||
|
||||
- Python → [tantivy-py](https://github.com/quickwit-oss/tantivy-py)
|
||||
- Ruby → [tantiny](https://github.com/baygeldin/tantiny)
|
||||
|
||||
@@ -152,4 +157,17 @@ You can also find other bindings on [GitHub](https://github.com/search?q=tantivy
|
||||
- and [more](https://github.com/search?q=tantivy)!
|
||||
|
||||
### On average, how much faster is Tantivy compared to Lucene?
|
||||
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
|
||||
|
||||
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
|
||||
|
||||
### Does tantivy support incremental indexing?
|
||||
|
||||
- Yes.
|
||||
|
||||
### How can I edit documents?
|
||||
|
||||
- Data in tantivy is immutable. To edit a document, the document needs to be deleted and reindexed.
|
||||
|
||||
### When will my documents be searchable during indexing?
|
||||
|
||||
- Documents will be searchable after a `commit` is called on an `IndexWriter`. Existing `IndexReader`s will also need to be reloaded in order to reflect the changes. Finally, changes are only visible to newly acquired `Searcher`.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "tantivy-bitpacker"
|
||||
version = "0.2.0"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = []
|
||||
|
||||
@@ -82,14 +82,16 @@ impl BitUnpacker {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bit_width(&self) -> u8 {
|
||||
self.num_bits as u8
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u64, data: &[u8]) -> u64 {
|
||||
if self.num_bits == 0 {
|
||||
return 0u64;
|
||||
}
|
||||
let num_bits = self.num_bits;
|
||||
let mask = self.mask;
|
||||
let addr_in_bits = idx * num_bits;
|
||||
let addr_in_bits = idx * self.num_bits;
|
||||
let addr = addr_in_bits >> 3;
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
debug_assert!(
|
||||
@@ -101,7 +103,7 @@ impl BitUnpacker {
|
||||
.unwrap();
|
||||
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
val_shifted & mask
|
||||
val_shifted & self.mask
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,6 +58,10 @@ fn metadata_test() {
|
||||
assert_eq!(meta.num_bits(), 6);
|
||||
}
|
||||
|
||||
fn mem_usage<T>(items: &Vec<T>) -> usize {
|
||||
items.capacity() * std::mem::size_of::<T>()
|
||||
}
|
||||
|
||||
impl BlockedBitpacker {
|
||||
pub fn new() -> Self {
|
||||
let mut compressed_blocks = vec![];
|
||||
@@ -73,10 +77,8 @@ impl BlockedBitpacker {
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
std::mem::size_of::<BlockedBitpacker>()
|
||||
+ self.compressed_blocks.capacity()
|
||||
+ self.offset_and_bits.capacity()
|
||||
* std::mem::size_of_val(&self.offset_and_bits.get(0).cloned().unwrap_or_default())
|
||||
+ self.buffer.capacity()
|
||||
* std::mem::size_of_val(&self.buffer.get(0).cloned().unwrap_or_default())
|
||||
+ mem_usage(&self.offset_and_bits)
|
||||
+ mem_usage(&self.buffer)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
||||
@@ -3,7 +3,7 @@ name = "tantivy-common"
|
||||
version = "0.3.0"
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
description = "common traits and utility functions used by multiple tantivy subcrates"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -259,11 +259,7 @@ impl BitSet {
|
||||
// we do not check saturated els.
|
||||
let higher = el / 64u32;
|
||||
let lower = el % 64u32;
|
||||
self.len += if self.tinysets[higher as usize].insert_mut(lower) {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
};
|
||||
self.len += u64::from(self.tinysets[higher as usize].insert_mut(lower));
|
||||
}
|
||||
|
||||
/// Inserts an element in the `BitSet`
|
||||
@@ -272,11 +268,7 @@ impl BitSet {
|
||||
// we do not check saturated els.
|
||||
let higher = el / 64u32;
|
||||
let lower = el % 64u32;
|
||||
self.len -= if self.tinysets[higher as usize].remove_mut(lower) {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
};
|
||||
self.len -= u64::from(self.tinysets[higher as usize].remove_mut(lower));
|
||||
}
|
||||
|
||||
/// Returns true iff the elements is in the `BitSet`.
|
||||
@@ -285,7 +277,7 @@ impl BitSet {
|
||||
self.tinyset(el / 64u32).contains(el % 64)
|
||||
}
|
||||
|
||||
/// Returns the first non-empty `TinySet` associated to a bucket lower
|
||||
/// Returns the first non-empty `TinySet` associated with a bucket lower
|
||||
/// or greater than bucket.
|
||||
///
|
||||
/// Reminder: the tiny set with the bucket `bucket`, represents the
|
||||
|
||||
@@ -11,7 +11,10 @@ mod writer;
|
||||
|
||||
pub use bitset::*;
|
||||
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
|
||||
pub use vint::{read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt};
|
||||
pub use vint::{
|
||||
deserialize_vint_u128, read_u32_vint, read_u32_vint_no_advance, serialize_vint_u128,
|
||||
serialize_vint_u32, write_u32_vint, VInt, VIntU128,
|
||||
};
|
||||
pub use writer::{AntiCallToken, CountingWriter, TerminatingWrite};
|
||||
|
||||
/// Has length trait
|
||||
@@ -52,13 +55,13 @@ const HIGHEST_BIT: u64 = 1 << 63;
|
||||
/// to values over 2^63, and all values end up requiring 64 bits.
|
||||
///
|
||||
/// # See also
|
||||
/// The [reverse mapping is `u64_to_i64`](./fn.u64_to_i64.html).
|
||||
/// The reverse mapping is [`u64_to_i64()`].
|
||||
#[inline]
|
||||
pub fn i64_to_u64(val: i64) -> u64 {
|
||||
(val as u64) ^ HIGHEST_BIT
|
||||
}
|
||||
|
||||
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
|
||||
/// Reverse the mapping given by [`i64_to_u64()`].
|
||||
#[inline]
|
||||
pub fn u64_to_i64(val: u64) -> i64 {
|
||||
(val ^ HIGHEST_BIT) as i64
|
||||
@@ -80,7 +83,7 @@ pub fn u64_to_i64(val: u64) -> i64 {
|
||||
/// explains the mapping in a clear manner.
|
||||
///
|
||||
/// # See also
|
||||
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
|
||||
/// The reverse mapping is [`u64_to_f64()`].
|
||||
#[inline]
|
||||
pub fn f64_to_u64(val: f64) -> u64 {
|
||||
let bits = val.to_bits();
|
||||
@@ -91,7 +94,7 @@ pub fn f64_to_u64(val: f64) -> u64 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
|
||||
/// Reverse the mapping given by [`f64_to_u64()`].
|
||||
#[inline]
|
||||
pub fn u64_to_f64(val: u64) -> f64 {
|
||||
f64::from_bits(if val & HIGHEST_BIT != 0 {
|
||||
@@ -104,8 +107,6 @@ pub fn u64_to_f64(val: u64) -> f64 {
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
|
||||
use std::f64;
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64, BinarySerializable, FixedSize};
|
||||
@@ -135,11 +136,11 @@ pub mod test {
|
||||
|
||||
#[test]
|
||||
fn test_i64_converter() {
|
||||
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
||||
assert_eq!(i64_to_u64(i64::max_value()), u64::max_value());
|
||||
assert_eq!(i64_to_u64(i64::MIN), u64::MIN);
|
||||
assert_eq!(i64_to_u64(i64::MAX), u64::MAX);
|
||||
test_i64_converter_helper(0i64);
|
||||
test_i64_converter_helper(i64::min_value());
|
||||
test_i64_converter_helper(i64::max_value());
|
||||
test_i64_converter_helper(i64::MIN);
|
||||
test_i64_converter_helper(i64::MAX);
|
||||
for i in -1000i64..1000i64 {
|
||||
test_i64_converter_helper(i);
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ pub trait DeserializeFrom<T: BinarySerializable> {
|
||||
|
||||
/// Implement deserialize from &[u8] for all types which implement BinarySerializable.
|
||||
///
|
||||
/// TryFrom would actually be preferrable, but not possible because of the orphan
|
||||
/// TryFrom would actually be preferable, but not possible because of the orphan
|
||||
/// rules (not completely sure if this could be resolved)
|
||||
impl<T: BinarySerializable> DeserializeFrom<T> for &[u8] {
|
||||
fn deserialize(&mut self) -> io::Result<T> {
|
||||
@@ -107,6 +107,19 @@ impl FixedSize for u64 {
|
||||
const SIZE_IN_BYTES: usize = 8;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u128 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u128::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
reader.read_u128::<Endianness>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for u128 {
|
||||
const SIZE_IN_BYTES: usize = 16;
|
||||
}
|
||||
|
||||
impl BinarySerializable for f32 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_f32::<Endianness>(*self)
|
||||
@@ -161,8 +174,7 @@ impl FixedSize for u8 {
|
||||
|
||||
impl BinarySerializable for bool {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let val = if *self { 1 } else { 0 };
|
||||
writer.write_u8(val)
|
||||
writer.write_u8(u8::from(*self))
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
|
||||
let val = reader.read_u8()?;
|
||||
@@ -229,7 +241,7 @@ pub mod test {
|
||||
fixed_size_test::<u32>();
|
||||
assert_eq!(4, serialize_test(3u32));
|
||||
assert_eq!(4, serialize_test(5u32));
|
||||
assert_eq!(4, serialize_test(u32::max_value()));
|
||||
assert_eq!(4, serialize_test(u32::MAX));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -247,6 +259,11 @@ pub mod test {
|
||||
fixed_size_test::<u64>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_bool() {
|
||||
fixed_size_test::<bool>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_string() {
|
||||
assert_eq!(serialize_test(String::from("")), 1);
|
||||
@@ -272,6 +289,6 @@ pub mod test {
|
||||
assert_eq!(serialize_test(VInt(1234u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_383u64)), 2);
|
||||
assert_eq!(serialize_test(VInt(16_384u64)), 3);
|
||||
assert_eq!(serialize_test(VInt(u64::max_value())), 10);
|
||||
assert_eq!(serialize_test(VInt(u64::MAX)), 10);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,75 @@ use byteorder::{ByteOrder, LittleEndian};
|
||||
|
||||
use super::BinarySerializable;
|
||||
|
||||
/// Variable int serializes a u128 number
|
||||
pub fn serialize_vint_u128(mut val: u128, output: &mut Vec<u8>) {
|
||||
loop {
|
||||
let next_byte: u8 = (val % 128u128) as u8;
|
||||
val /= 128u128;
|
||||
if val == 0 {
|
||||
output.push(next_byte | STOP_BIT);
|
||||
return;
|
||||
} else {
|
||||
output.push(next_byte);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Deserializes a u128 number
|
||||
///
|
||||
/// Returns the number and the slice after the vint
|
||||
pub fn deserialize_vint_u128(data: &[u8]) -> io::Result<(u128, &[u8])> {
|
||||
let mut result = 0u128;
|
||||
let mut shift = 0u64;
|
||||
for i in 0..19 {
|
||||
let b = data[i];
|
||||
result |= u128::from(b % 128u8) << shift;
|
||||
if b >= STOP_BIT {
|
||||
return Ok((result, &data[i + 1..]));
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Failed to deserialize u128 vint",
|
||||
))
|
||||
}
|
||||
|
||||
/// Wrapper over a `u128` that serializes as a variable int.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub struct VIntU128(pub u128);
|
||||
|
||||
impl BinarySerializable for VIntU128 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let mut buffer = vec![];
|
||||
serialize_vint_u128(self.0, &mut buffer);
|
||||
writer.write_all(&buffer)
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut bytes = reader.bytes();
|
||||
let mut result = 0u128;
|
||||
let mut shift = 0u64;
|
||||
loop {
|
||||
match bytes.next() {
|
||||
Some(Ok(b)) => {
|
||||
result |= u128::from(b % 128u8) << shift;
|
||||
if b >= STOP_BIT {
|
||||
return Ok(VIntU128(result));
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
_ => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Reach end of buffer while reading VInt",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper over a `u64` that serializes as a variable int.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub struct VInt(pub u64);
|
||||
@@ -176,6 +245,7 @@ impl BinarySerializable for VInt {
|
||||
mod tests {
|
||||
|
||||
use super::{serialize_vint_u32, BinarySerializable, VInt};
|
||||
use crate::vint::{deserialize_vint_u128, serialize_vint_u128, VIntU128};
|
||||
|
||||
fn aux_test_vint(val: u64) {
|
||||
let mut v = [14u8; 10];
|
||||
@@ -199,7 +269,7 @@ mod tests {
|
||||
aux_test_vint(0);
|
||||
aux_test_vint(1);
|
||||
aux_test_vint(5);
|
||||
aux_test_vint(u64::max_value());
|
||||
aux_test_vint(u64::MAX);
|
||||
for i in 1..9 {
|
||||
let power_of_128 = 1u64 << (7 * i);
|
||||
aux_test_vint(power_of_128 - 1u64);
|
||||
@@ -217,6 +287,26 @@ mod tests {
|
||||
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
||||
}
|
||||
|
||||
fn aux_test_vint_u128(val: u128) {
|
||||
let mut data = vec![];
|
||||
serialize_vint_u128(val, &mut data);
|
||||
let (deser_val, _data) = deserialize_vint_u128(&data).unwrap();
|
||||
assert_eq!(val, deser_val);
|
||||
|
||||
let mut out = vec![];
|
||||
VIntU128(val).serialize(&mut out).unwrap();
|
||||
let deser_val = VIntU128::deserialize(&mut &out[..]).unwrap();
|
||||
assert_eq!(val, deser_val.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vint_u128() {
|
||||
aux_test_vint_u128(0);
|
||||
aux_test_vint_u128(1);
|
||||
aux_test_vint_u128(u128::MAX / 3);
|
||||
aux_test_vint_u128(u128::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vint_u32() {
|
||||
aux_test_serialize_vint_u32(0);
|
||||
@@ -228,6 +318,6 @@ mod tests {
|
||||
aux_test_serialize_vint_u32(power_of_128);
|
||||
aux_test_serialize_vint_u32(power_of_128 + 1u32);
|
||||
}
|
||||
aux_test_serialize_vint_u32(u32::max_value());
|
||||
aux_test_serialize_vint_u32(u32::MAX);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,14 +55,14 @@ impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
||||
}
|
||||
|
||||
/// Struct used to prevent from calling
|
||||
/// [`terminate_ref`](trait.TerminatingWrite.html#tymethod.terminate_ref) directly
|
||||
/// [`terminate_ref`](TerminatingWrite::terminate_ref) directly
|
||||
///
|
||||
/// The point is that while the type is public, it cannot be built by anyone
|
||||
/// outside of this module.
|
||||
pub struct AntiCallToken(());
|
||||
|
||||
/// Trait used to indicate when no more write need to be done on a writer
|
||||
pub trait TerminatingWrite: Write {
|
||||
pub trait TerminatingWrite: Write + Send + Sync {
|
||||
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
|
||||
fn terminate(mut self) -> io::Result<()>
|
||||
where Self: Sized {
|
||||
|
||||
BIN
doc/assets/images/etsy.png
Normal file
BIN
doc/assets/images/etsy.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 85 KiB |
@@ -1,7 +1,5 @@
|
||||
# Summary
|
||||
|
||||
|
||||
|
||||
[Avant Propos](./avant-propos.md)
|
||||
|
||||
- [Segments](./basis.md)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
> Tantivy is a **search** engine **library** for Rust.
|
||||
|
||||
If you are familiar with Lucene, it's an excellent approximation to consider tantivy as Lucene for rust. tantivy is heavily inspired by Lucene's design and
|
||||
they both have the same scope and targetted use cases.
|
||||
they both have the same scope and targeted use cases.
|
||||
|
||||
If you are not familiar with Lucene, let's break down our little tagline.
|
||||
|
||||
@@ -31,4 +31,4 @@ relevancy, collapsing, highlighting, spatial search.
|
||||
index from a different format.
|
||||
|
||||
Tantivy exposes a lot of low level API to do all of these things.
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ directory shipped with tantivy is the `MmapDirectory`.
|
||||
While this design has some downsides, this greatly simplifies the source code of
|
||||
tantivy. Caching is also entirely delegated to the OS.
|
||||
|
||||
`tantivy` works entirely (or almost) by directly reading the datastructures as they are layed on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
|
||||
`tantivy` works entirely (or almost) by directly reading the datastructures as they are laid on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
|
||||
|
||||
This is an interesting property for a command line search engine, or for some multi-tenant log search engine : spawning a new process for each new query can be a perfectly sensible solution in some use case.
|
||||
|
||||
@@ -22,7 +22,6 @@ Of course this is crucial to reduce IO, and ensure that as much of our index can
|
||||
Also, whenever possible its data is accessed sequentially. Of course, this is an amazing property when tantivy needs to access the data from your spinning hard disk, but this is also
|
||||
critical for performance, if your data is read from and an `SSD` or even already in your pagecache.
|
||||
|
||||
|
||||
## Segments, and the log method
|
||||
|
||||
That kind of compact layout comes at one cost: it prevents our datastructures from being dynamic.
|
||||
@@ -51,13 +50,9 @@ to get tantivy to fit your use case:
|
||||
|
||||
*Example 1* You could for instance use hadoop to build a very large search index in a timely manner, copy all of the resulting segment files in the same directory and edit the `meta.json` to get a functional index.[^2]
|
||||
|
||||
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
|
||||
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated with segment `D-7`.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Merging
|
||||
## Merging
|
||||
|
||||
As you index more and more data, your index will accumulate more and more segments.
|
||||
Having a lot of small segments is not really optimal. There is a bit of redundancy in having
|
||||
@@ -66,11 +61,7 @@ all these term dictionary. Also when searching, we will need to do term lookups
|
||||
That's where merging or compacting comes into place. Tantivy will continuously consider merge
|
||||
opportunities and start merging segments in the background.
|
||||
|
||||
|
||||
# Indexing throughput, number of indexing threads
|
||||
|
||||
|
||||
|
||||
## Indexing throughput, number of indexing threads
|
||||
|
||||
[^1]: This may eventually change.
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Examples
|
||||
|
||||
- [Basic search](/examples/basic_search.html)
|
||||
- [Basic search](/examples/basic_search.html)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
|
||||
- [Index Sorting](#index-sorting)
|
||||
+ [Why Sorting](#why-sorting)
|
||||
* [Compression](#compression)
|
||||
* [Top-N Optimization](#top-n-optimization)
|
||||
* [Pruning](#pruning)
|
||||
* [Other](#other)
|
||||
+ [Usage](#usage)
|
||||
- [Why Sorting](#why-sorting)
|
||||
- [Compression](#compression)
|
||||
- [Top-N Optimization](#top-n-optimization)
|
||||
- [Pruning](#pruning)
|
||||
- [Other](#other)
|
||||
- [Usage](#usage)
|
||||
|
||||
# Index Sorting
|
||||
|
||||
@@ -15,32 +15,34 @@ Tantivy allows you to sort the index according to a property.
|
||||
|
||||
Presorting an index has several advantages:
|
||||
|
||||
###### Compression
|
||||
### Compression
|
||||
|
||||
When data is sorted it is easier to compress the data. E.g. the numbers sequence [5, 2, 3, 1, 4] would be sorted to [1, 2, 3, 4, 5].
|
||||
When data is sorted it is easier to compress the data. E.g. the numbers sequence [5, 2, 3, 1, 4] would be sorted to [1, 2, 3, 4, 5].
|
||||
If we apply delta encoding this list would be unsorted [5, -3, 1, -2, 3] vs. [1, 1, 1, 1, 1].
|
||||
Compression ratio is mainly affected on the fast field of the sorted property, every thing else is likely unaffected.
|
||||
###### Top-N Optimization
|
||||
Compression ratio is mainly affected on the fast field of the sorted property, every thing else is likely unaffected.
|
||||
|
||||
When data is presorted by a field and search queries request sorting by the same field, we can leverage the natural order of the documents.
|
||||
### Top-N Optimization
|
||||
|
||||
When data is presorted by a field and search queries request sorting by the same field, we can leverage the natural order of the documents.
|
||||
E.g. if the data is sorted by timestamp and want the top n newest docs containing a term, we can simply leveraging the order of the docids.
|
||||
|
||||
Note: Tantivy 0.16 does not do this optimization yet.
|
||||
|
||||
###### Pruning
|
||||
### Pruning
|
||||
|
||||
Let's say we want all documents and want to apply the filter `>= 2010-08-11`. When the data is sorted, we could make a lookup in the fast field to find the docid range and use this as the filter.
|
||||
|
||||
Note: Tantivy 0.16 does not do this optimization yet.
|
||||
|
||||
###### Other?
|
||||
### Other?
|
||||
|
||||
In principle there are many algorithms possible that exploit the monotonically increasing nature. (aggregations maybe?)
|
||||
|
||||
## Usage
|
||||
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of tantvy 0.16 only fast fields are allowed to be used.
|
||||
|
||||
```
|
||||
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of Tantivy 0.16 only fast fields are allowed to be used.
|
||||
|
||||
```rust
|
||||
let settings = IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "intval".to_string(),
|
||||
@@ -58,4 +60,3 @@ let index = index_builder.create_in_ram().unwrap();
|
||||
Sorting an index is applied in the serialization step. In general there are two serialization steps: [Finishing a single segment](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/segment_writer.rs#L338) and [merging multiple segments](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/merger.rs#L1073).
|
||||
|
||||
In both cases we generate a docid mapping reflecting the sort. This mapping is used when serializing the different components (doc store, fastfields, posting list, normfield, facets).
|
||||
|
||||
|
||||
@@ -21,16 +21,17 @@ For instance, if user is a json field, the following document:
|
||||
```
|
||||
|
||||
emits the following tokens:
|
||||
- ("name", Text, "Paul")
|
||||
- ("name", Text, "Masurel")
|
||||
- ("address.city", Text, "Tokyo")
|
||||
- ("address.country", Text, "Japan")
|
||||
- ("created_at", Date, 15420648505)
|
||||
|
||||
- ("name", Text, "Paul")
|
||||
- ("name", Text, "Masurel")
|
||||
- ("address.city", Text, "Tokyo")
|
||||
- ("address.country", Text, "Japan")
|
||||
- ("created_at", Date, 15420648505)
|
||||
|
||||
# Bytes-encoding and lexicographical sort.
|
||||
## Bytes-encoding and lexicographical sort
|
||||
|
||||
Like any other terms, these triplets are encoded into a binary format as follows.
|
||||
|
||||
- `json_path`: the json path is a sequence of "segments". In the example above, `address.city`
|
||||
is just a debug representation of the json path `["address", "city"]`.
|
||||
Its representation is done by separating segments by a unicode char `\x01`, and ending the path by `\x00`.
|
||||
@@ -41,16 +42,16 @@ This representation is designed to align the natural sort of Terms with the lexi
|
||||
of their binary representation (Tantivy's dictionary (whether fst or sstable) is sorted and does prefix encoding).
|
||||
|
||||
In the example above, the terms will be sorted as
|
||||
- ("address.city", Text, "Tokyo")
|
||||
- ("address.country", Text, "Japan")
|
||||
- ("name", Text, "Masurel")
|
||||
- ("name", Text, "Paul")
|
||||
- ("created_at", Date, 15420648505)
|
||||
|
||||
- ("address.city", Text, "Tokyo")
|
||||
- ("address.country", Text, "Japan")
|
||||
- ("name", Text, "Masurel")
|
||||
- ("name", Text, "Paul")
|
||||
- ("created_at", Date, 15420648505)
|
||||
|
||||
As seen in "pitfalls", we may end up having to search for a value for a same path in several different fields. Putting the field code after the path makes it maximizes compression opportunities but also increases the chances for the two terms to end up in the actual same term dictionary block.
|
||||
|
||||
|
||||
# Pitfalls, limitation and corner cases.
|
||||
## Pitfalls, limitation and corner cases
|
||||
|
||||
Json gives very little information about the type of the literals it stores.
|
||||
All numeric types end up mapped as a "Number" and there are no types for dates.
|
||||
@@ -70,23 +71,25 @@ For instance, we do not even know if the type is a number or string based.
|
||||
|
||||
So the query
|
||||
|
||||
```
|
||||
```rust
|
||||
my_path.my_segment:233
|
||||
```
|
||||
|
||||
Will be interpreted as
|
||||
`(my_path.my_segment, String, 233) or (my_path.my_segment, u64, 233)`
|
||||
|
||||
```rust
|
||||
(my_path.my_segment, String, 233) or (my_path.my_segment, u64, 233)
|
||||
```
|
||||
|
||||
Likewise, we need to emit two tokens if the query contains an rfc3999 date.
|
||||
Indeed the date could have been actually a single token inside the text of a document at ingestion time. Generally speaking, we will always at least emit a string token in query parsing, and sometimes more.
|
||||
|
||||
If one more json field is defined, things get even more complicated.
|
||||
|
||||
|
||||
## Default json field
|
||||
|
||||
If the schema contains a text field called "text" and a json field that is set as a default field:
|
||||
`text:hello` could be reasonably interpreted as targetting the text field or as targetting the json field called `json_dynamic` with the json_path "text".
|
||||
`text:hello` could be reasonably interpreted as targeting the text field or as targeting the json field called `json_dynamic` with the json_path "text".
|
||||
|
||||
If there is such an ambiguity, we decide to only search in the "text" field: `text:hello`.
|
||||
|
||||
@@ -96,11 +99,11 @@ This is a product decision.
|
||||
The user can still target the JSON field by specifying its name explicitly:
|
||||
`json_dynamic.text:hello`.
|
||||
|
||||
## Range queries are not supported.
|
||||
## Range queries are not supported
|
||||
|
||||
Json field do not support range queries.
|
||||
|
||||
## Arrays do not work like nested object.
|
||||
## Arrays do not work like nested object
|
||||
|
||||
If json object contains an array, a search query might return more documents
|
||||
than what might be expected.
|
||||
@@ -120,9 +123,8 @@ Let's take an example.
|
||||
Despite the array structure, a document in tantivy is a bag of terms.
|
||||
The query:
|
||||
|
||||
```
|
||||
```rust
|
||||
cart.product_type:sneakers AND cart.attributes.color:red
|
||||
```
|
||||
|
||||
Actually match the document above.
|
||||
|
||||
|
||||
@@ -110,6 +110,7 @@ fn main() -> tantivy::Result<()> {
|
||||
(9f64..14f64).into(),
|
||||
(14f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req_1.clone(),
|
||||
}),
|
||||
@@ -117,7 +118,7 @@ fn main() -> tantivy::Result<()> {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
|
||||
@@ -7,10 +7,12 @@
|
||||
// Of course, you can have a look at the tantivy's built-in collectors
|
||||
// such as the `CountCollector` for more examples.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
use tantivy::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, Score, SegmentReader};
|
||||
@@ -95,7 +97,7 @@ impl Collector for StatsCollector {
|
||||
}
|
||||
|
||||
struct StatsSegmentCollector {
|
||||
fast_field_reader: DynamicFastFieldReader<u64>,
|
||||
fast_field_reader: Arc<dyn Column<u64>>,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
@@ -103,7 +105,7 @@ impl SegmentCollector for StatsSegmentCollector {
|
||||
type Fruit = Option<Stats>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: Score) {
|
||||
let value = self.fast_field_reader.get(doc) as f64;
|
||||
let value = self.fast_field_reader.get_val(doc as u64) as f64;
|
||||
self.stats.count += 1;
|
||||
self.stats.sum += value;
|
||||
self.stats.squared_sum += value * value;
|
||||
|
||||
@@ -36,8 +36,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// need to be able to be able to retrieve it
|
||||
// for our application.
|
||||
//
|
||||
// We can make our index lighter and
|
||||
// by omitting `STORED` flag.
|
||||
// We can make our index lighter by omitting the `STORED` flag.
|
||||
let body = schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
@@ -50,7 +49,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// for your unit tests... Or this example.
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
// here we are registering our custome tokenizer
|
||||
// here we are registering our custom tokenizer
|
||||
// this will store tokens of 3 characters each
|
||||
index
|
||||
.tokenizers()
|
||||
|
||||
69
examples/date_time_field.rs
Normal file
69
examples/date_time_field.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
// # DateTime field example
|
||||
//
|
||||
// This example shows how the DateTime field can be used
|
||||
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Cardinality, DateOptions, Schema, Value, INDEXED, STORED, STRING};
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
let mut schema_builder = Schema::builder();
|
||||
let opts = DateOptions::from(INDEXED)
|
||||
.set_stored()
|
||||
.set_fast(Cardinality::SingleValue)
|
||||
.set_precision(tantivy::DatePrecision::Seconds);
|
||||
let occurred_at = schema_builder.add_date_field("occurred_at", opts);
|
||||
let event_type = schema_builder.add_text_field("event", STRING | STORED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T12:53:50.53Z",
|
||||
"event": "pull-request"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
let doc = schema.parse_document(
|
||||
r#"{
|
||||
"occurred_at": "2022-06-22T13:00:00.22Z",
|
||||
"event": "comment"
|
||||
}"#,
|
||||
)?;
|
||||
index_writer.add_document(doc)?;
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// # Default fields: event_type
|
||||
let query_parser = QueryParser::for_index(&index, vec![event_type]);
|
||||
{
|
||||
let query = query_parser.parse_query("event:comment")?;
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(5))?;
|
||||
assert_eq!(count_docs.len(), 1);
|
||||
}
|
||||
{
|
||||
let query = query_parser
|
||||
.parse_query(r#"occurred_at:[2022-06-22T12:58:00Z TO 2022-06-23T00:00:00Z}"#)?;
|
||||
let count_docs = searcher.search(&*query, &TopDocs::with_limit(4))?;
|
||||
assert_eq!(count_docs.len(), 1);
|
||||
for (_score, doc_address) in count_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
assert!(matches!(
|
||||
retrieved_doc.get_first(occurred_at),
|
||||
Some(Value::Date(_))
|
||||
));
|
||||
assert_eq!(
|
||||
schema.to_json(&retrieved_doc),
|
||||
r#"{"event":["comment"],"occurred_at":["2022-06-22T13:00:00.22Z"]}"#
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -113,7 +113,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// on its id.
|
||||
//
|
||||
// Note that `tantivy` does nothing to enforce the idea that
|
||||
// there is only one document associated to this id.
|
||||
// there is only one document associated with this id.
|
||||
//
|
||||
// Also you might have noticed that we apply the delete before
|
||||
// having committed. This does not matter really...
|
||||
|
||||
@@ -44,7 +44,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// A segment contains different data structure.
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
// - the inverted lists associated to each terms and their positions
|
||||
// - the inverted lists associated with each terms and their positions
|
||||
let inverted_index = segment_reader.inverted_index(title)?;
|
||||
|
||||
// A `Term` is a text token associated with a field.
|
||||
@@ -105,7 +105,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// A segment contains different data structure.
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
// - the inverted lists associated to each terms and their positions
|
||||
// - the inverted lists associated with each terms and their positions
|
||||
let inverted_index = segment_reader.inverted_index(title)?;
|
||||
|
||||
// This segment posting object is like a cursor over the documents matching the term.
|
||||
|
||||
@@ -3,7 +3,6 @@ use std::collections::{HashMap, HashSet};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::fastfield::FastFieldReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Field, Schema, FAST, TEXT};
|
||||
use tantivy::{
|
||||
@@ -52,7 +51,7 @@ impl Warmer for DynamicPriceColumn {
|
||||
let product_id_reader = segment.fast_fields().u64(self.field)?;
|
||||
let product_ids: Vec<ProductId> = segment
|
||||
.doc_ids_alive()
|
||||
.map(|doc| product_id_reader.get(doc))
|
||||
.map(|doc| product_id_reader.get_val(doc as u64))
|
||||
.collect();
|
||||
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
|
||||
let mut price_vals: Vec<Price> = Vec::new();
|
||||
@@ -145,11 +144,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let warmers: Vec<Weak<dyn Warmer>> = vec![Arc::downgrade(
|
||||
&(price_dynamic_column.clone() as Arc<dyn Warmer>),
|
||||
)];
|
||||
let reader: IndexReader = index
|
||||
.reader_builder()
|
||||
.warmers(warmers)
|
||||
.num_searchers(1)
|
||||
.try_into()?;
|
||||
let reader: IndexReader = index.reader_builder().warmers(warmers).try_into()?;
|
||||
reader.reload()?;
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![text]);
|
||||
|
||||
@@ -3,7 +3,7 @@ name = "fastfield_codecs"
|
||||
version = "0.2.0"
|
||||
authors = ["Pascal Seitz <pascal@quickwit.io>"]
|
||||
license = "MIT"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
description = "Fast field codecs used by tantivy"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
@@ -11,14 +11,21 @@ description = "Fast field codecs used by tantivy"
|
||||
[dependencies]
|
||||
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
|
||||
prettytable-rs = {version="0.8.0", optional= true}
|
||||
ownedbytes = { version = "0.3.0", path = "../ownedbytes" }
|
||||
prettytable-rs = {version="0.9.0", optional= true}
|
||||
rand = {version="0.8.3", optional= true}
|
||||
fastdivide = "0.4"
|
||||
log = "0.4"
|
||||
itertools = { version = "0.10.3" }
|
||||
measure_time = { version="0.8.2", optional=true}
|
||||
|
||||
[dev-dependencies]
|
||||
more-asserts = "0.2.1"
|
||||
more-asserts = "0.3.0"
|
||||
proptest = "1.0.0"
|
||||
rand = "0.8.3"
|
||||
|
||||
[features]
|
||||
bin = ["prettytable-rs", "rand"]
|
||||
bin = ["prettytable-rs", "rand", "measure_time"]
|
||||
default = ["bin"]
|
||||
unstable = []
|
||||
|
||||
|
||||
@@ -4,105 +4,223 @@ extern crate test;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use fastfield_codecs::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer};
|
||||
use fastfield_codecs::linearinterpol::{
|
||||
LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use fastfield_codecs::multilinearinterpol::{
|
||||
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use fastfield_codecs::*;
|
||||
use std::iter;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut data: Vec<_> = (100..55000_u64)
|
||||
.map(|num| num + rand::random::<u8>() as u64)
|
||||
use fastfield_codecs::*;
|
||||
use ownedbytes::OwnedBytes;
|
||||
use rand::prelude::*;
|
||||
use test::Bencher;
|
||||
|
||||
use super::*;
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
fn generate_random() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64)
|
||||
.map(|el| el + random::<u16>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
data.insert(2000, 100);
|
||||
data.insert(3000, 4100);
|
||||
data.insert(4000, 100);
|
||||
data.insert(5000, 800);
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
// Warning: this generates the same permutation at each call
|
||||
fn generate_permutation_gcd() -> Vec<u64> {
|
||||
let mut permutation: Vec<u64> = (1u64..100_000u64).map(|el| el * 1000).collect();
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
permutation
|
||||
}
|
||||
|
||||
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
||||
column: &[T],
|
||||
) -> Arc<dyn Column<T>> {
|
||||
let mut buffer = Vec::new();
|
||||
serialize(VecColumn::from(&column), &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
open(OwnedBytes::new(buffer)).unwrap()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = permutation[a as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_jumpy_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = column.get_val(a as u64);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
fn get_exp_data() -> Vec<u64> {
|
||||
let mut data = vec![];
|
||||
for i in 0..100 {
|
||||
let num = i * i;
|
||||
data.extend(iter::repeat(i as u64).take(num));
|
||||
}
|
||||
data.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
|
||||
// lengt = 328350
|
||||
data
|
||||
}
|
||||
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
fn get_data_50percent_item() -> (u128, u128, Vec<u128>) {
|
||||
let mut permutation = get_exp_data();
|
||||
let major_item = 20;
|
||||
let minor_item = 10;
|
||||
permutation.extend(iter::repeat(major_item).take(permutation.len()));
|
||||
permutation.shuffle(&mut StdRng::from_seed([1u8; 32]));
|
||||
let permutation = permutation.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
(major_item as u128, minor_item as u128, permutation)
|
||||
}
|
||||
fn bench_get<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
|
||||
b: &mut Bencher,
|
||||
data: &[u64],
|
||||
) {
|
||||
let mut bytes = vec![];
|
||||
S::serialize(
|
||||
&mut bytes,
|
||||
&data,
|
||||
stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
let reader = R::open_from_bytes(&bytes).unwrap();
|
||||
fn get_u128_column_random() -> Arc<dyn Column<u128>> {
|
||||
let permutation = generate_random();
|
||||
let permutation = permutation.iter().map(|el| *el as u128).collect::<Vec<_>>();
|
||||
get_u128_column_from_data(&permutation)
|
||||
}
|
||||
|
||||
fn get_u128_column_from_data(data: &[u128]) -> Arc<dyn Column<u128>> {
|
||||
let mut out = vec![];
|
||||
let iter_gen = || data.iter().cloned();
|
||||
serialize_u128(iter_gen, data.len() as u64, &mut out).unwrap();
|
||||
let out = OwnedBytes::new(out);
|
||||
open_u128::<u128>(out).unwrap()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_50percent_hit(b: &mut Bencher) {
|
||||
let (major_item, _minor_item, data) = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| column.get_between_vals(major_item..=major_item));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_single_hit(b: &mut Bencher) {
|
||||
let (_major_item, minor_item, data) = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| column.get_between_vals(minor_item..=minor_item));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_getrange_u128_hit_all(b: &mut Bencher) {
|
||||
let (_major_item, _minor_item, data) = get_data_50percent_item();
|
||||
let column = get_u128_column_from_data(&data);
|
||||
|
||||
b.iter(|| column.get_between_vals(0..=u128::MAX));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
for pos in value_iter() {
|
||||
reader.get_u64(pos as u64, &bytes);
|
||||
let mut a = 0u128;
|
||||
for i in 0u64..column.num_vals() as u64 {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
});
|
||||
}
|
||||
fn bench_create<S: FastFieldCodecSerializer>(b: &mut Bencher, data: &[u64]) {
|
||||
let mut bytes = vec![];
|
||||
b.iter(|| {
|
||||
S::serialize(
|
||||
&mut bytes,
|
||||
&data,
|
||||
stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
use test::Bencher;
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BitpackedFastFieldSerializer>(b, &data);
|
||||
fn bench_intfastfield_jumpy_stride5_u128(b: &mut Bencher) {
|
||||
let column = get_u128_column_random();
|
||||
|
||||
b.iter(|| {
|
||||
let n = column.num_vals();
|
||||
let mut a = 0u128;
|
||||
for i in (0..n / 5).map(|val| val * 5) {
|
||||
a += column.get_val(i as u64);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<LinearInterpolFastFieldSerializer>(b, &data);
|
||||
fn bench_intfastfield_stride7_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += permutation[i as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<MultiLinearInterpolFastFieldSerializer>(b, &data);
|
||||
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += column.get_val(i as u64);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(b, &data);
|
||||
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0u64..n as u64 {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<LinearInterpolFastFieldSerializer, LinearInterpolFastFieldReader>(b, &data);
|
||||
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
|
||||
let permutation = generate_permutation_gcd();
|
||||
let n = permutation.len();
|
||||
let column: Arc<dyn Column<u64>> = serialize_and_load(&permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..n as u64 {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<MultiLinearInterpolFastFieldSerializer, MultiLinearInterpolFastFieldReader>(
|
||||
b, &data,
|
||||
);
|
||||
}
|
||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
||||
let min_value = data.iter().cloned().min().unwrap_or(0);
|
||||
let max_value = data.iter().cloned().max().unwrap_or(0);
|
||||
FastFieldStats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: data.len() as u64,
|
||||
}
|
||||
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..permutation.len() {
|
||||
a += permutation[i as usize] as u64;
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,155 +1,99 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
use ownedbytes::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
use crate::serialize::NormalizedHeader;
|
||||
use crate::{Column, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct BitpackedFastFieldReader {
|
||||
pub struct BitpackedReader {
|
||||
data: OwnedBytes,
|
||||
bit_unpacker: BitUnpacker,
|
||||
pub min_value_u64: u64,
|
||||
pub max_value_u64: u64,
|
||||
normalized_header: NormalizedHeader,
|
||||
}
|
||||
|
||||
impl<'data> FastFieldCodecReader for BitpackedFastFieldReader {
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
|
||||
let (_data, mut footer) = bytes.split_at(bytes.len() - 16);
|
||||
let min_value = u64::deserialize(&mut footer)?;
|
||||
let amplitude = u64::deserialize(&mut footer)?;
|
||||
let max_value = min_value + amplitude;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(BitpackedFastFieldReader {
|
||||
min_value_u64: min_value,
|
||||
max_value_u64: max_value,
|
||||
bit_unpacker,
|
||||
})
|
||||
}
|
||||
impl Column for BitpackedReader {
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
self.min_value_u64 + self.bit_unpacker.get(doc, data)
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
self.bit_unpacker.get(doc, &self.data)
|
||||
}
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value_u64
|
||||
// The BitpackedReader assumes a normalized vector.
|
||||
0
|
||||
}
|
||||
#[inline]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value_u64
|
||||
self.normalized_header.max_value
|
||||
}
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.normalized_header.num_vals
|
||||
}
|
||||
}
|
||||
pub struct BitpackedFastFieldSerializerLegacy<'a, W: 'a + Write> {
|
||||
bit_packer: BitPacker,
|
||||
write: &'a mut W,
|
||||
min_value: u64,
|
||||
amplitude: u64,
|
||||
num_bits: u8,
|
||||
}
|
||||
|
||||
impl<'a, W: Write> BitpackedFastFieldSerializerLegacy<'a, W> {
|
||||
/// Creates a new fast field serializer.
|
||||
///
|
||||
/// The serializer in fact encode the values by bitpacking
|
||||
/// `(val - min_value)`.
|
||||
///
|
||||
/// It requires a `min_value` and a `max_value` to compute
|
||||
/// compute the minimum number of bits required to encode
|
||||
/// values.
|
||||
pub fn open(
|
||||
write: &'a mut W,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
) -> io::Result<BitpackedFastFieldSerializerLegacy<'a, W>> {
|
||||
assert!(min_value <= max_value);
|
||||
let amplitude = max_value - min_value;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let bit_packer = BitPacker::new();
|
||||
Ok(BitpackedFastFieldSerializerLegacy {
|
||||
bit_packer,
|
||||
write,
|
||||
min_value,
|
||||
amplitude,
|
||||
num_bits,
|
||||
pub struct BitpackedCodec;
|
||||
|
||||
impl FastFieldCodec for BitpackedCodec {
|
||||
/// The CODEC_TYPE is an enum value used for serialization.
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Bitpacked;
|
||||
|
||||
type Reader = BitpackedReader;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(
|
||||
data: OwnedBytes,
|
||||
normalized_header: NormalizedHeader,
|
||||
) -> io::Result<Self::Reader> {
|
||||
let num_bits = compute_num_bits(normalized_header.max_value);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(BitpackedReader {
|
||||
data,
|
||||
bit_unpacker,
|
||||
normalized_header,
|
||||
})
|
||||
}
|
||||
/// Pushes a new value to the currently open u64 fast field.
|
||||
#[inline]
|
||||
pub fn add_val(&mut self, val: u64) -> io::Result<()> {
|
||||
let val_to_write: u64 = val - self.min_value;
|
||||
self.bit_packer
|
||||
.write(val_to_write, self.num_bits, &mut self.write)?;
|
||||
Ok(())
|
||||
}
|
||||
pub fn close_field(mut self) -> io::Result<()> {
|
||||
self.bit_packer.close(&mut self.write)?;
|
||||
self.min_value.serialize(&mut self.write)?;
|
||||
self.amplitude.serialize(&mut self.write)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BitpackedFastFieldSerializer {}
|
||||
|
||||
impl FastFieldCodecSerializer for BitpackedFastFieldSerializer {
|
||||
const NAME: &'static str = "Bitpacked";
|
||||
const ID: u8 = 1;
|
||||
/// Serializes data with the BitpackedFastFieldSerializer.
|
||||
///
|
||||
/// The serializer in fact encode the values by bitpacking
|
||||
/// `(val - min_value)`.
|
||||
/// The bitpacker assumes that the column has been normalized.
|
||||
/// i.e. It has already been shifted by its minimum value, so that its
|
||||
/// current minimum value is 0.
|
||||
///
|
||||
/// It requires a `min_value` and a `max_value` to compute
|
||||
/// compute the minimum number of bits required to encode
|
||||
/// values.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
_data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
let mut serializer =
|
||||
BitpackedFastFieldSerializerLegacy::open(write, stats.min_value, stats.max_value)?;
|
||||
|
||||
for val in data_iter {
|
||||
serializer.add_val(val)?;
|
||||
/// Ideally, we made a shift upstream on the column so that `col.min_value() == 0`.
|
||||
fn serialize(column: &dyn Column, write: &mut impl Write) -> io::Result<()> {
|
||||
assert_eq!(column.min_value(), 0u64);
|
||||
let num_bits = compute_num_bits(column.max_value());
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for val in column.iter() {
|
||||
bit_packer.write(val, num_bits, write)?;
|
||||
}
|
||||
serializer.close_field()?;
|
||||
|
||||
bit_packer.close(write)?;
|
||||
Ok(())
|
||||
}
|
||||
fn is_applicable(
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
_stats: FastFieldStats,
|
||||
) -> bool {
|
||||
true
|
||||
}
|
||||
fn estimate(_fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
|
||||
let amplitude = stats.max_value - stats.min_value;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
|
||||
fn estimate(column: &dyn Column) -> Option<f32> {
|
||||
let num_bits = compute_num_bits(column.max_value());
|
||||
let num_bits_uncompressed = 64;
|
||||
num_bits as f32 / num_bits_uncompressed as f32
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_data_sets;
|
||||
use crate::tests::get_codec_test_datasets;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) {
|
||||
crate::tests::create_and_validate::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(
|
||||
data, name,
|
||||
);
|
||||
crate::tests::create_and_validate::<BitpackedCodec>(data, name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = get_codec_test_data_sets();
|
||||
let data_sets = get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
|
||||
186
fastfield_codecs/src/blockwise_linear.rs
Normal file
186
fastfield_codecs/src/blockwise_linear.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
use std::sync::Arc;
|
||||
use std::{io, iter};
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, DeserializeFrom};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::line::Line;
|
||||
use crate::serialize::NormalizedHeader;
|
||||
use crate::{Column, FastFieldCodec, FastFieldCodecType, VecColumn};
|
||||
|
||||
const CHUNK_SIZE: usize = 512;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct Block {
|
||||
line: Line,
|
||||
bit_unpacker: BitUnpacker,
|
||||
data_start_offset: usize,
|
||||
}
|
||||
|
||||
impl BinarySerializable for Block {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.line.serialize(writer)?;
|
||||
self.bit_unpacker.bit_width().serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let line = Line::deserialize(reader)?;
|
||||
let bit_width = u8::deserialize(reader)?;
|
||||
Ok(Block {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
data_start_offset: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_num_blocks(num_vals: u64) -> usize {
|
||||
(num_vals as usize + CHUNK_SIZE - 1) / CHUNK_SIZE
|
||||
}
|
||||
|
||||
pub struct BlockwiseLinearCodec;
|
||||
|
||||
impl FastFieldCodec for BlockwiseLinearCodec {
|
||||
const CODEC_TYPE: crate::FastFieldCodecType = FastFieldCodecType::BlockwiseLinear;
|
||||
type Reader = BlockwiseLinearReader;
|
||||
|
||||
fn open_from_bytes(
|
||||
bytes: ownedbytes::OwnedBytes,
|
||||
normalized_header: NormalizedHeader,
|
||||
) -> io::Result<Self::Reader> {
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
let num_blocks = compute_num_blocks(normalized_header.num_vals);
|
||||
let mut blocks: Vec<Block> = iter::repeat_with(|| Block::deserialize(&mut footer))
|
||||
.take(num_blocks)
|
||||
.collect::<io::Result<_>>()?;
|
||||
|
||||
let mut start_offset = 0;
|
||||
for block in &mut blocks {
|
||||
block.data_start_offset = start_offset;
|
||||
start_offset += (block.bit_unpacker.bit_width() as usize) * CHUNK_SIZE / 8;
|
||||
}
|
||||
Ok(BlockwiseLinearReader {
|
||||
blocks: Arc::new(blocks),
|
||||
data,
|
||||
normalized_header,
|
||||
})
|
||||
}
|
||||
|
||||
// Estimate first_chunk and extrapolate
|
||||
fn estimate(column: &dyn crate::Column) -> Option<f32> {
|
||||
if column.num_vals() < 10 * CHUNK_SIZE as u64 {
|
||||
return None;
|
||||
}
|
||||
let mut first_chunk: Vec<u64> = column.iter().take(CHUNK_SIZE as usize).collect();
|
||||
let line = Line::train(&VecColumn::from(&first_chunk));
|
||||
for (i, buffer_val) in first_chunk.iter_mut().enumerate() {
|
||||
let interpolated_val = line.eval(i as u64);
|
||||
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
||||
}
|
||||
let estimated_bit_width = first_chunk
|
||||
.iter()
|
||||
.map(|el| ((el + 1) as f32 * 3.0) as u64)
|
||||
.map(compute_num_bits)
|
||||
.max()
|
||||
.unwrap();
|
||||
|
||||
let metadata_per_block = {
|
||||
let mut out = vec![];
|
||||
Block::default().serialize(&mut out).unwrap();
|
||||
out.len()
|
||||
};
|
||||
let num_bits = estimated_bit_width as u64 * column.num_vals() as u64
|
||||
// function metadata per block
|
||||
+ metadata_per_block as u64 * (column.num_vals() / CHUNK_SIZE as u64);
|
||||
let num_bits_uncompressed = 64 * column.num_vals();
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
|
||||
fn serialize(column: &dyn Column, wrt: &mut impl io::Write) -> io::Result<()> {
|
||||
// The BitpackedReader assumes a normalized vector.
|
||||
assert_eq!(column.min_value(), 0);
|
||||
let mut buffer = Vec::with_capacity(CHUNK_SIZE);
|
||||
let num_vals = column.num_vals();
|
||||
|
||||
let num_blocks = compute_num_blocks(num_vals);
|
||||
let mut blocks = Vec::with_capacity(num_blocks);
|
||||
|
||||
let mut vals = column.iter();
|
||||
|
||||
let mut bit_packer = BitPacker::new();
|
||||
|
||||
for _ in 0..num_blocks {
|
||||
buffer.clear();
|
||||
buffer.extend((&mut vals).take(CHUNK_SIZE));
|
||||
let line = Line::train(&VecColumn::from(&buffer));
|
||||
|
||||
assert!(!buffer.is_empty());
|
||||
|
||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||
let interpolated_val = line.eval(i as u64);
|
||||
*buffer_val = buffer_val.wrapping_sub(interpolated_val);
|
||||
}
|
||||
let bit_width = buffer.iter().copied().map(compute_num_bits).max().unwrap();
|
||||
|
||||
for &buffer_val in &buffer {
|
||||
bit_packer.write(buffer_val, bit_width, wrt)?;
|
||||
}
|
||||
|
||||
blocks.push(Block {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
data_start_offset: 0,
|
||||
});
|
||||
}
|
||||
|
||||
bit_packer.close(wrt)?;
|
||||
|
||||
assert_eq!(blocks.len(), compute_num_blocks(num_vals));
|
||||
|
||||
let mut counting_wrt = CountingWriter::wrap(wrt);
|
||||
for block in &blocks {
|
||||
block.serialize(&mut counting_wrt)?;
|
||||
}
|
||||
let footer_len = counting_wrt.written_bytes();
|
||||
(footer_len as u32).serialize(&mut counting_wrt)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BlockwiseLinearReader {
|
||||
blocks: Arc<Vec<Block>>,
|
||||
normalized_header: NormalizedHeader,
|
||||
data: OwnedBytes,
|
||||
}
|
||||
|
||||
impl Column for BlockwiseLinearReader {
|
||||
#[inline(always)]
|
||||
fn get_val(&self, idx: u64) -> u64 {
|
||||
let block_id = (idx / CHUNK_SIZE as u64) as usize;
|
||||
let idx_within_block = idx % (CHUNK_SIZE as u64);
|
||||
let block = &self.blocks[block_id];
|
||||
let interpoled_val: u64 = block.line.eval(idx_within_block);
|
||||
let block_bytes = &self.data[block.data_start_offset..];
|
||||
let bitpacked_diff = block.bit_unpacker.get(idx_within_block, block_bytes);
|
||||
interpoled_val.wrapping_add(bitpacked_diff)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
// The BlockwiseLinearReader assumes a normalized vector.
|
||||
0u64
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.normalized_header.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.normalized_header.num_vals
|
||||
}
|
||||
}
|
||||
333
fastfield_codecs/src/column.rs
Normal file
333
fastfield_codecs/src/column.rs
Normal file
@@ -0,0 +1,333 @@
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::monotonic_mapping::StrictlyMonotonicFn;
|
||||
|
||||
/// `Column` provides columnar access on a field.
|
||||
pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
||||
/// Return the value associated with the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `idx` is greater than the column length.
|
||||
fn get_val(&self, idx: u64) -> T;
|
||||
|
||||
/// Fills an output buffer with the fast field values
|
||||
/// associated with the `DocId` going from
|
||||
/// `start` to `start + output.len()`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Must panic if `start + output.len()` is greater than
|
||||
/// the segment's `maxdoc`.
|
||||
#[inline]
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
for (out, idx) in output.iter_mut().zip(start..) {
|
||||
*out = self.get_val(idx);
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the positions of values which are in the provided range.
|
||||
#[inline]
|
||||
fn get_between_vals(&self, range: RangeInclusive<T>) -> Vec<u64> {
|
||||
let mut vals = Vec::new();
|
||||
for idx in 0..self.num_vals() {
|
||||
let val = self.get_val(idx);
|
||||
if range.contains(&val) {
|
||||
vals.push(idx);
|
||||
}
|
||||
}
|
||||
vals
|
||||
}
|
||||
|
||||
/// Returns the minimum value for this fast field.
|
||||
///
|
||||
/// This min_value may not be exact.
|
||||
/// For instance, the min value does not take in account of possible
|
||||
/// deleted document. All values are however guaranteed to be higher than
|
||||
/// `.min_value()`.
|
||||
fn min_value(&self) -> T;
|
||||
|
||||
/// Returns the maximum value for this fast field.
|
||||
///
|
||||
/// This max_value may not be exact.
|
||||
/// For instance, the max value does not take in account of possible
|
||||
/// deleted document. All values are however guaranteed to be higher than
|
||||
/// `.max_value()`.
|
||||
fn max_value(&self) -> T;
|
||||
|
||||
/// The number of values in the column.
|
||||
fn num_vals(&self) -> u64;
|
||||
|
||||
/// Returns a iterator over the data
|
||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
|
||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||
}
|
||||
}
|
||||
|
||||
/// VecColumn provides `Column` over a slice.
|
||||
pub struct VecColumn<'a, T = u64> {
|
||||
values: &'a [T],
|
||||
min_value: T,
|
||||
max_value: T,
|
||||
}
|
||||
|
||||
impl<'a, C: Column<T>, T: Copy + PartialOrd> Column<T> for &'a C {
|
||||
fn get_val(&self, idx: u64) -> T {
|
||||
(*self).get_val(idx)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> T {
|
||||
(*self).min_value()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> T {
|
||||
(*self).max_value()
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
(*self).num_vals()
|
||||
}
|
||||
|
||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = T> + 'b> {
|
||||
(*self).iter()
|
||||
}
|
||||
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
(*self).get_range(start, output)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + PartialOrd + Send + Sync> Column<T> for VecColumn<'a, T> {
|
||||
fn get_val(&self, position: u64) -> T {
|
||||
self.values[position as usize]
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = T> + '_> {
|
||||
Box::new(self.values.iter().copied())
|
||||
}
|
||||
|
||||
fn min_value(&self) -> T {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> T {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.values.len() as u64
|
||||
}
|
||||
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
output.copy_from_slice(&self.values[start as usize..][..output.len()])
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Copy + Ord + Default, V> From<&'a V> for VecColumn<'a, T>
|
||||
where V: AsRef<[T]> + ?Sized
|
||||
{
|
||||
fn from(values: &'a V) -> Self {
|
||||
let values = values.as_ref();
|
||||
let (min_value, max_value) = minmax(values.iter().copied()).unwrap_or_default();
|
||||
Self {
|
||||
values,
|
||||
min_value,
|
||||
max_value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct MonotonicMappingColumn<C, T, Input> {
|
||||
from_column: C,
|
||||
monotonic_mapping: T,
|
||||
_phantom: PhantomData<Input>,
|
||||
}
|
||||
|
||||
/// Creates a view of a column transformed by a strictly monotonic mapping. See
|
||||
/// [`StrictlyMonotonicFn`].
|
||||
///
|
||||
/// E.g. apply a gcd monotonic_mapping([100, 200, 300]) == [1, 2, 3]
|
||||
/// monotonic_mapping.mapping() is expected to be injective, and we should always have
|
||||
/// monotonic_mapping.inverse(monotonic_mapping.mapping(el)) == el
|
||||
///
|
||||
/// The inverse of the mapping is required for:
|
||||
/// `fn get_between_vals(&self, range: RangeInclusive<T>) -> Vec<u64> `
|
||||
/// The user provides the original value range and we need to monotonic map them in the same way the
|
||||
/// serialization does before calling the underlying column.
|
||||
///
|
||||
/// Note that when opening a codec, the monotonic_mapping should be the inverse of the mapping
|
||||
/// during serialization. And therefore the monotonic_mapping_inv when opening is the same as
|
||||
/// monotonic_mapping during serialization.
|
||||
pub fn monotonic_map_column<C, T, Input, Output>(
|
||||
from_column: C,
|
||||
monotonic_mapping: T,
|
||||
) -> impl Column<Output>
|
||||
where
|
||||
C: Column<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Send + Sync + Clone,
|
||||
Output: PartialOrd + Send + Sync + Clone,
|
||||
{
|
||||
MonotonicMappingColumn {
|
||||
from_column,
|
||||
monotonic_mapping,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, T, Input, Output> Column<Output> for MonotonicMappingColumn<C, T, Input>
|
||||
where
|
||||
C: Column<Input>,
|
||||
T: StrictlyMonotonicFn<Input, Output> + Send + Sync,
|
||||
Input: PartialOrd + Send + Sync + Clone,
|
||||
Output: PartialOrd + Send + Sync + Clone,
|
||||
{
|
||||
#[inline]
|
||||
fn get_val(&self, idx: u64) -> Output {
|
||||
let from_val = self.from_column.get_val(idx);
|
||||
self.monotonic_mapping.mapping(from_val)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> Output {
|
||||
let from_min_value = self.from_column.min_value();
|
||||
self.monotonic_mapping.mapping(from_min_value)
|
||||
}
|
||||
|
||||
fn max_value(&self) -> Output {
|
||||
let from_max_value = self.from_column.max_value();
|
||||
self.monotonic_mapping.mapping(from_max_value)
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.from_column.num_vals()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = Output> + '_> {
|
||||
Box::new(
|
||||
self.from_column
|
||||
.iter()
|
||||
.map(|el| self.monotonic_mapping.mapping(el)),
|
||||
)
|
||||
}
|
||||
|
||||
fn get_between_vals(&self, range: RangeInclusive<Output>) -> Vec<u64> {
|
||||
self.from_column.get_between_vals(
|
||||
self.monotonic_mapping.inverse(range.start().clone())
|
||||
..=self.monotonic_mapping.inverse(range.end().clone()),
|
||||
)
|
||||
}
|
||||
|
||||
// We voluntarily do not implement get_range as it yields a regression,
|
||||
// and we do not have any specialized implementation anyway.
|
||||
}
|
||||
|
||||
pub struct IterColumn<T>(T);
|
||||
|
||||
impl<T> From<T> for IterColumn<T>
|
||||
where T: Iterator + Clone + ExactSizeIterator
|
||||
{
|
||||
fn from(iter: T) -> Self {
|
||||
IterColumn(iter)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Column<T::Item> for IterColumn<T>
|
||||
where
|
||||
T: Iterator + Clone + ExactSizeIterator + Send + Sync,
|
||||
T::Item: PartialOrd,
|
||||
{
|
||||
fn get_val(&self, idx: u64) -> T::Item {
|
||||
self.0.clone().nth(idx as usize).unwrap()
|
||||
}
|
||||
|
||||
fn min_value(&self) -> T::Item {
|
||||
self.0.clone().next().unwrap()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> T::Item {
|
||||
self.0.clone().last().unwrap()
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.0.len() as u64
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = T::Item> + '_> {
|
||||
Box::new(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::monotonic_mapping::{
|
||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternalBaseval,
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_monotonic_mapping() {
|
||||
let vals = &[3u64, 5u64][..];
|
||||
let col = VecColumn::from(vals);
|
||||
let mapped = monotonic_map_column(col, StrictlyMonotonicMappingToInternalBaseval::new(2));
|
||||
assert_eq!(mapped.min_value(), 1u64);
|
||||
assert_eq!(mapped.max_value(), 3u64);
|
||||
assert_eq!(mapped.num_vals(), 2);
|
||||
assert_eq!(mapped.num_vals(), 2);
|
||||
assert_eq!(mapped.get_val(0), 1);
|
||||
assert_eq!(mapped.get_val(1), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_as_col() {
|
||||
let col = IterColumn::from(10..100);
|
||||
assert_eq!(col.num_vals(), 90);
|
||||
assert_eq!(col.max_value(), 99);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_monotonic_mapping_iter() {
|
||||
let vals: Vec<u64> = (10..110u64).map(|el| el * 10).collect();
|
||||
let col = VecColumn::from(&vals);
|
||||
let mapped = monotonic_map_column(
|
||||
col,
|
||||
StrictlyMonotonicMappingInverter::from(
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100),
|
||||
),
|
||||
);
|
||||
let val_i64s: Vec<u64> = mapped.iter().collect();
|
||||
for i in 0..100 {
|
||||
assert_eq!(val_i64s[i as usize], mapped.get_val(i));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_monotonic_mapping_get_range() {
|
||||
let vals: Vec<u64> = (0..100u64).map(|el| el * 10).collect();
|
||||
let col = VecColumn::from(&vals);
|
||||
let mapped = monotonic_map_column(
|
||||
col,
|
||||
StrictlyMonotonicMappingInverter::from(
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 0),
|
||||
),
|
||||
);
|
||||
|
||||
assert_eq!(mapped.min_value(), 0u64);
|
||||
assert_eq!(mapped.max_value(), 9900u64);
|
||||
assert_eq!(mapped.num_vals(), 100);
|
||||
let val_u64s: Vec<u64> = mapped.iter().collect();
|
||||
assert_eq!(val_u64s.len(), 100);
|
||||
for i in 0..100 {
|
||||
assert_eq!(val_u64s[i as usize], mapped.get_val(i));
|
||||
assert_eq!(val_u64s[i as usize], vals[i as usize] * 10);
|
||||
}
|
||||
let mut buf = [0u64; 20];
|
||||
mapped.get_range(7, &mut buf[..]);
|
||||
assert_eq!(&val_u64s[7..][..20], &buf);
|
||||
}
|
||||
}
|
||||
43
fastfield_codecs/src/compact_space/blank_range.rs
Normal file
43
fastfield_codecs/src/compact_space/blank_range.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
/// The range of a blank in value space.
|
||||
///
|
||||
/// A blank is an unoccupied space in the data.
|
||||
/// Use try_into() to construct.
|
||||
/// A range has to have at least length of 3. Invalid ranges will be rejected.
|
||||
///
|
||||
/// Ordered by range length.
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub(crate) struct BlankRange {
|
||||
blank_range: RangeInclusive<u128>,
|
||||
}
|
||||
impl TryFrom<RangeInclusive<u128>> for BlankRange {
|
||||
type Error = &'static str;
|
||||
fn try_from(range: RangeInclusive<u128>) -> Result<Self, Self::Error> {
|
||||
let blank_size = range.end().saturating_sub(*range.start());
|
||||
if blank_size < 2 {
|
||||
Err("invalid range")
|
||||
} else {
|
||||
Ok(BlankRange { blank_range: range })
|
||||
}
|
||||
}
|
||||
}
|
||||
impl BlankRange {
|
||||
pub(crate) fn blank_size(&self) -> u128 {
|
||||
self.blank_range.end() - self.blank_range.start() + 1
|
||||
}
|
||||
pub(crate) fn blank_range(&self) -> RangeInclusive<u128> {
|
||||
self.blank_range.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for BlankRange {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
self.blank_size().cmp(&other.blank_size())
|
||||
}
|
||||
}
|
||||
impl PartialOrd for BlankRange {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.blank_size().cmp(&other.blank_size()))
|
||||
}
|
||||
}
|
||||
231
fastfield_codecs/src/compact_space/build_compact_space.rs
Normal file
231
fastfield_codecs/src/compact_space/build_compact_space.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
use std::collections::{BTreeSet, BinaryHeap};
|
||||
use std::iter;
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::blank_range::BlankRange;
|
||||
use super::{CompactSpace, RangeMapping};
|
||||
|
||||
/// Put the blanks for the sorted values into a binary heap
|
||||
fn get_blanks(values_sorted: &BTreeSet<u128>) -> BinaryHeap<BlankRange> {
|
||||
let mut blanks: BinaryHeap<BlankRange> = BinaryHeap::new();
|
||||
for (first, second) in values_sorted.iter().tuple_windows() {
|
||||
// Correctness Overflow: the values are deduped and sorted (BTreeSet property), that means
|
||||
// there's always space between two values.
|
||||
let blank_range = first + 1..=second - 1;
|
||||
let blank_range: Result<BlankRange, _> = blank_range.try_into();
|
||||
if let Ok(blank_range) = blank_range {
|
||||
blanks.push(blank_range);
|
||||
}
|
||||
}
|
||||
|
||||
blanks
|
||||
}
|
||||
|
||||
struct BlankCollector {
|
||||
blanks: Vec<BlankRange>,
|
||||
staged_blanks_sum: u128,
|
||||
}
|
||||
impl BlankCollector {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
blanks: vec![],
|
||||
staged_blanks_sum: 0,
|
||||
}
|
||||
}
|
||||
fn stage_blank(&mut self, blank: BlankRange) {
|
||||
self.staged_blanks_sum += blank.blank_size();
|
||||
self.blanks.push(blank);
|
||||
}
|
||||
fn drain(&mut self) -> impl Iterator<Item = BlankRange> + '_ {
|
||||
self.staged_blanks_sum = 0;
|
||||
self.blanks.drain(..)
|
||||
}
|
||||
fn staged_blanks_sum(&self) -> u128 {
|
||||
self.staged_blanks_sum
|
||||
}
|
||||
fn num_staged_blanks(&self) -> usize {
|
||||
self.blanks.len()
|
||||
}
|
||||
}
|
||||
fn num_bits(val: u128) -> u8 {
|
||||
(128u32 - val.leading_zeros()) as u8
|
||||
}
|
||||
|
||||
/// Will collect blanks and add them to compact space if more bits are saved than cost from
|
||||
/// metadata.
|
||||
pub fn get_compact_space(
|
||||
values_deduped_sorted: &BTreeSet<u128>,
|
||||
total_num_values: u64,
|
||||
cost_per_blank: usize,
|
||||
) -> CompactSpace {
|
||||
let mut compact_space_builder = CompactSpaceBuilder::new();
|
||||
if values_deduped_sorted.is_empty() {
|
||||
return compact_space_builder.finish();
|
||||
}
|
||||
|
||||
let mut blanks: BinaryHeap<BlankRange> = get_blanks(values_deduped_sorted);
|
||||
// Replace after stabilization of https://github.com/rust-lang/rust/issues/62924
|
||||
|
||||
// We start by space that's limited to min_value..=max_value
|
||||
let min_value = *values_deduped_sorted.iter().next().unwrap_or(&0);
|
||||
let max_value = *values_deduped_sorted.iter().last().unwrap_or(&0);
|
||||
|
||||
// +1 for null, in case min and max covers the whole space, we are off by one.
|
||||
let mut amplitude_compact_space = (max_value - min_value).saturating_add(1);
|
||||
if min_value != 0 {
|
||||
compact_space_builder.add_blanks(iter::once(0..=min_value - 1));
|
||||
}
|
||||
if max_value != u128::MAX {
|
||||
compact_space_builder.add_blanks(iter::once(max_value + 1..=u128::MAX));
|
||||
}
|
||||
|
||||
let mut amplitude_bits: u8 = num_bits(amplitude_compact_space);
|
||||
|
||||
let mut blank_collector = BlankCollector::new();
|
||||
// We will stage blanks until they reduce the compact space by at least 1 bit and then flush
|
||||
// them if the metadata cost is lower than the total number of saved bits.
|
||||
// Binary heap to process the gaps by their size
|
||||
while let Some(blank_range) = blanks.pop() {
|
||||
blank_collector.stage_blank(blank_range);
|
||||
|
||||
let staged_spaces_sum: u128 = blank_collector.staged_blanks_sum();
|
||||
let amplitude_new_compact_space = amplitude_compact_space - staged_spaces_sum;
|
||||
let amplitude_new_bits = num_bits(amplitude_new_compact_space);
|
||||
if amplitude_bits == amplitude_new_bits {
|
||||
continue;
|
||||
}
|
||||
let saved_bits = (amplitude_bits - amplitude_new_bits) as usize * total_num_values as usize;
|
||||
// TODO: Maybe calculate exact cost of blanks and run this more expensive computation only,
|
||||
// when amplitude_new_bits changes
|
||||
let cost = blank_collector.num_staged_blanks() * cost_per_blank;
|
||||
if cost >= saved_bits {
|
||||
// Continue here, since although we walk over the blanks by size,
|
||||
// we can potentially save a lot at the last bits, which are smaller blanks
|
||||
//
|
||||
// E.g. if the first range reduces the compact space by 1000 from 2000 to 1000, which
|
||||
// saves 11-10=1 bit and the next range reduces the compact space by 950 to
|
||||
// 50, which saves 10-6=4 bit
|
||||
continue;
|
||||
}
|
||||
|
||||
amplitude_compact_space = amplitude_new_compact_space;
|
||||
amplitude_bits = amplitude_new_bits;
|
||||
compact_space_builder.add_blanks(blank_collector.drain().map(|blank| blank.blank_range()));
|
||||
}
|
||||
|
||||
// special case, when we don't collected any blanks because:
|
||||
// * the data is empty (early exit)
|
||||
// * the algorithm did decide it's not worth the cost, which can be the case for single values
|
||||
//
|
||||
// We drain one collected blank unconditionally, so the empty case is reserved for empty
|
||||
// data, and therefore empty compact_space means the data is empty and no data is covered
|
||||
// (conversely to all data) and we can assign null to it.
|
||||
if compact_space_builder.is_empty() {
|
||||
compact_space_builder.add_blanks(
|
||||
blank_collector
|
||||
.drain()
|
||||
.map(|blank| blank.blank_range())
|
||||
.take(1),
|
||||
);
|
||||
}
|
||||
|
||||
let compact_space = compact_space_builder.finish();
|
||||
if max_value - min_value != u128::MAX {
|
||||
debug_assert_eq!(
|
||||
compact_space.amplitude_compact_space(),
|
||||
amplitude_compact_space
|
||||
);
|
||||
}
|
||||
compact_space
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
struct CompactSpaceBuilder {
|
||||
blanks: Vec<RangeInclusive<u128>>,
|
||||
}
|
||||
|
||||
impl CompactSpaceBuilder {
|
||||
/// Creates a new compact space builder which will initially cover the whole space.
|
||||
fn new() -> Self {
|
||||
Self { blanks: Vec::new() }
|
||||
}
|
||||
|
||||
/// Assumes that repeated add_blank calls don't overlap and are not adjacent,
|
||||
/// e.g. [3..=5, 5..=10] is not allowed
|
||||
///
|
||||
/// Both of those assumptions are true when blanks are produced from sorted values.
|
||||
fn add_blanks(&mut self, blank: impl Iterator<Item = RangeInclusive<u128>>) {
|
||||
self.blanks.extend(blank);
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.blanks.is_empty()
|
||||
}
|
||||
|
||||
/// Convert blanks to covered space and assign null value
|
||||
fn finish(mut self) -> CompactSpace {
|
||||
// sort by start. ranges are not allowed to overlap
|
||||
self.blanks.sort_unstable_by_key(|blank| *blank.start());
|
||||
|
||||
let mut covered_space = Vec::with_capacity(self.blanks.len());
|
||||
|
||||
// begining of the blanks
|
||||
if let Some(first_blank_start) = self.blanks.first().map(RangeInclusive::start) {
|
||||
if *first_blank_start != 0 {
|
||||
covered_space.push(0..=first_blank_start - 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Between the blanks
|
||||
let between_blanks = self.blanks.iter().tuple_windows().map(|(left, right)| {
|
||||
assert!(
|
||||
left.end() < right.start(),
|
||||
"overlapping or adjacent ranges detected"
|
||||
);
|
||||
*left.end() + 1..=*right.start() - 1
|
||||
});
|
||||
covered_space.extend(between_blanks);
|
||||
|
||||
// end of the blanks
|
||||
if let Some(last_blank_end) = self.blanks.last().map(RangeInclusive::end) {
|
||||
if *last_blank_end != u128::MAX {
|
||||
covered_space.push(last_blank_end + 1..=u128::MAX);
|
||||
}
|
||||
}
|
||||
|
||||
if covered_space.is_empty() {
|
||||
covered_space.push(0..=0); // empty data case
|
||||
};
|
||||
|
||||
let mut compact_start: u64 = 1; // 0 is reserved for `null`
|
||||
let mut ranges_mapping: Vec<RangeMapping> = Vec::with_capacity(covered_space.len());
|
||||
for cov in covered_space {
|
||||
let range_mapping = super::RangeMapping {
|
||||
value_range: cov,
|
||||
compact_start,
|
||||
};
|
||||
let covered_range_len = range_mapping.range_length();
|
||||
ranges_mapping.push(range_mapping);
|
||||
compact_start += covered_range_len as u64;
|
||||
}
|
||||
// println!("num ranges {}", ranges_mapping.len());
|
||||
CompactSpace { ranges_mapping }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_binary_heap_pop_order() {
|
||||
let mut blanks: BinaryHeap<BlankRange> = BinaryHeap::new();
|
||||
blanks.push((0..=10).try_into().unwrap());
|
||||
blanks.push((100..=200).try_into().unwrap());
|
||||
blanks.push((100..=110).try_into().unwrap());
|
||||
assert_eq!(blanks.pop().unwrap().blank_size(), 101);
|
||||
assert_eq!(blanks.pop().unwrap().blank_size(), 11);
|
||||
}
|
||||
}
|
||||
671
fastfield_codecs/src/compact_space/mod.rs
Normal file
671
fastfield_codecs/src/compact_space/mod.rs
Normal file
@@ -0,0 +1,671 @@
|
||||
/// This codec takes a large number space (u128) and reduces it to a compact number space.
|
||||
///
|
||||
/// It will find spaces in the number range. For example:
|
||||
///
|
||||
/// 100, 101, 102, 103, 104, 50000, 50001
|
||||
/// could be mapped to
|
||||
/// 100..104 -> 0..4
|
||||
/// 50000..50001 -> 5..6
|
||||
///
|
||||
/// Compact space 0..=6 requires much less bits than 100..=50001
|
||||
///
|
||||
/// The codec is created to compress ip addresses, but may be employed in other use cases.
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::BTreeSet,
|
||||
io::{self, Write},
|
||||
ops::RangeInclusive,
|
||||
};
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, VInt, VIntU128};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use tantivy_bitpacker::{self, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::compact_space::build_compact_space::get_compact_space;
|
||||
use crate::Column;
|
||||
|
||||
mod blank_range;
|
||||
mod build_compact_space;
|
||||
|
||||
/// The cost per blank is quite hard actually, since blanks are delta encoded, the actual cost of
|
||||
/// blanks depends on the number of blanks.
|
||||
///
|
||||
/// The number is taken by looking at a real dataset. It is optimized for larger datasets.
|
||||
const COST_PER_BLANK_IN_BITS: usize = 36;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct CompactSpace {
|
||||
ranges_mapping: Vec<RangeMapping>,
|
||||
}
|
||||
|
||||
/// Maps the range from the original space to compact_start + range.len()
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
struct RangeMapping {
|
||||
value_range: RangeInclusive<u128>,
|
||||
compact_start: u64,
|
||||
}
|
||||
impl RangeMapping {
|
||||
fn range_length(&self) -> u64 {
|
||||
(self.value_range.end() - self.value_range.start()) as u64 + 1
|
||||
}
|
||||
|
||||
// The last value of the compact space in this range
|
||||
fn compact_end(&self) -> u64 {
|
||||
self.compact_start + self.range_length() - 1
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for CompactSpace {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.ranges_mapping.len() as u64).serialize(writer)?;
|
||||
|
||||
let mut prev_value = 0;
|
||||
for value_range in self
|
||||
.ranges_mapping
|
||||
.iter()
|
||||
.map(|range_mapping| &range_mapping.value_range)
|
||||
{
|
||||
let blank_delta_start = value_range.start() - prev_value;
|
||||
VIntU128(blank_delta_start).serialize(writer)?;
|
||||
prev_value = *value_range.start();
|
||||
|
||||
let blank_delta_end = value_range.end() - prev_value;
|
||||
VIntU128(blank_delta_end).serialize(writer)?;
|
||||
prev_value = *value_range.end();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let num_ranges = VInt::deserialize(reader)?.0;
|
||||
let mut ranges_mapping: Vec<RangeMapping> = vec![];
|
||||
let mut value = 0u128;
|
||||
let mut compact_start = 1u64; // 0 is reserved for `null`
|
||||
for _ in 0..num_ranges {
|
||||
let blank_delta_start = VIntU128::deserialize(reader)?.0;
|
||||
value += blank_delta_start;
|
||||
let blank_start = value;
|
||||
|
||||
let blank_delta_end = VIntU128::deserialize(reader)?.0;
|
||||
value += blank_delta_end;
|
||||
let blank_end = value;
|
||||
|
||||
let range_mapping = RangeMapping {
|
||||
value_range: blank_start..=blank_end,
|
||||
compact_start,
|
||||
};
|
||||
let range_length = range_mapping.range_length();
|
||||
ranges_mapping.push(range_mapping);
|
||||
compact_start += range_length as u64;
|
||||
}
|
||||
|
||||
Ok(Self { ranges_mapping })
|
||||
}
|
||||
}
|
||||
|
||||
impl CompactSpace {
|
||||
/// Amplitude is the value range of the compact space including the sentinel value used to
|
||||
/// identify null values. The compact space is 0..=amplitude .
|
||||
///
|
||||
/// It's only used to verify we don't exceed u64 number space, which would indicate a bug.
|
||||
fn amplitude_compact_space(&self) -> u128 {
|
||||
self.ranges_mapping
|
||||
.last()
|
||||
.map(|last_range| last_range.compact_end() as u128)
|
||||
.unwrap_or(1) // compact space starts at 1, 0 == null
|
||||
}
|
||||
|
||||
fn get_range_mapping(&self, pos: usize) -> &RangeMapping {
|
||||
&self.ranges_mapping[pos]
|
||||
}
|
||||
|
||||
/// Returns either Ok(the value in the compact space) or if it is outside the compact space the
|
||||
/// Err(position where it would be inserted)
|
||||
fn u128_to_compact(&self, value: u128) -> Result<u64, usize> {
|
||||
self.ranges_mapping
|
||||
.binary_search_by(|probe| {
|
||||
let value_range = &probe.value_range;
|
||||
if value < *value_range.start() {
|
||||
Ordering::Greater
|
||||
} else if value > *value_range.end() {
|
||||
Ordering::Less
|
||||
} else {
|
||||
Ordering::Equal
|
||||
}
|
||||
})
|
||||
.map(|pos| {
|
||||
let range_mapping = &self.ranges_mapping[pos];
|
||||
let pos_in_range = (value - range_mapping.value_range.start()) as u64;
|
||||
range_mapping.compact_start + pos_in_range
|
||||
})
|
||||
}
|
||||
|
||||
/// Unpacks a value from compact space u64 to u128 space
|
||||
fn compact_to_u128(&self, compact: u64) -> u128 {
|
||||
let pos = self
|
||||
.ranges_mapping
|
||||
.binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start)
|
||||
// Correctness: Overflow. The first range starts at compact space 0, the error from
|
||||
// binary search can never be 0
|
||||
.map_or_else(|e| e - 1, |v| v);
|
||||
|
||||
let range_mapping = &self.ranges_mapping[pos];
|
||||
let diff = compact - range_mapping.compact_start;
|
||||
range_mapping.value_range.start() + diff as u128
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CompactSpaceCompressor {
|
||||
params: IPCodecParams,
|
||||
}
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IPCodecParams {
|
||||
compact_space: CompactSpace,
|
||||
bit_unpacker: BitUnpacker,
|
||||
min_value: u128,
|
||||
max_value: u128,
|
||||
num_vals: u64,
|
||||
num_bits: u8,
|
||||
}
|
||||
|
||||
impl CompactSpaceCompressor {
|
||||
/// Taking the vals as Vec may cost a lot of memory. It is used to sort the vals.
|
||||
pub fn train_from(iter: impl Iterator<Item = u128>, num_vals: u64) -> Self {
|
||||
let mut values_sorted = BTreeSet::new();
|
||||
values_sorted.extend(iter);
|
||||
let total_num_values = num_vals;
|
||||
|
||||
let compact_space =
|
||||
get_compact_space(&values_sorted, total_num_values, COST_PER_BLANK_IN_BITS);
|
||||
let amplitude_compact_space = compact_space.amplitude_compact_space();
|
||||
|
||||
assert!(
|
||||
amplitude_compact_space <= u64::MAX as u128,
|
||||
"case unsupported."
|
||||
);
|
||||
|
||||
let num_bits = tantivy_bitpacker::compute_num_bits(amplitude_compact_space as u64);
|
||||
let min_value = *values_sorted.iter().next().unwrap_or(&0);
|
||||
let max_value = *values_sorted.iter().last().unwrap_or(&0);
|
||||
assert_eq!(
|
||||
compact_space
|
||||
.u128_to_compact(max_value)
|
||||
.expect("could not convert max value to compact space"),
|
||||
amplitude_compact_space as u64
|
||||
);
|
||||
CompactSpaceCompressor {
|
||||
params: IPCodecParams {
|
||||
compact_space,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: total_num_values as u64,
|
||||
num_bits,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn write_footer(self, writer: &mut impl Write) -> io::Result<()> {
|
||||
let writer = &mut CountingWriter::wrap(writer);
|
||||
self.params.serialize(writer)?;
|
||||
|
||||
let footer_len = writer.written_bytes() as u32;
|
||||
footer_len.serialize(writer)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn compress_into(
|
||||
self,
|
||||
vals: impl Iterator<Item = u128>,
|
||||
write: &mut impl Write,
|
||||
) -> io::Result<()> {
|
||||
let mut bitpacker = BitPacker::default();
|
||||
for val in vals {
|
||||
let compact = self
|
||||
.params
|
||||
.compact_space
|
||||
.u128_to_compact(val)
|
||||
.map_err(|_| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Could not convert value to compact_space. This is a bug.",
|
||||
)
|
||||
})?;
|
||||
bitpacker.write(compact, self.params.num_bits, write)?;
|
||||
}
|
||||
bitpacker.close(write)?;
|
||||
self.write_footer(write)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CompactSpaceDecompressor {
|
||||
data: OwnedBytes,
|
||||
params: IPCodecParams,
|
||||
}
|
||||
|
||||
impl BinarySerializable for IPCodecParams {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
// header flags for future optional dictionary encoding
|
||||
let footer_flags = 0u64;
|
||||
footer_flags.serialize(writer)?;
|
||||
|
||||
VIntU128(self.min_value).serialize(writer)?;
|
||||
VIntU128(self.max_value).serialize(writer)?;
|
||||
VIntU128(self.num_vals as u128).serialize(writer)?;
|
||||
self.num_bits.serialize(writer)?;
|
||||
|
||||
self.compact_space.serialize(writer)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let _header_flags = u64::deserialize(reader)?;
|
||||
let min_value = VIntU128::deserialize(reader)?.0;
|
||||
let max_value = VIntU128::deserialize(reader)?.0;
|
||||
let num_vals = VIntU128::deserialize(reader)?.0 as u64;
|
||||
let num_bits = u8::deserialize(reader)?;
|
||||
let compact_space = CompactSpace::deserialize(reader)?;
|
||||
|
||||
Ok(Self {
|
||||
compact_space,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals,
|
||||
num_bits,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Column<u128> for CompactSpaceDecompressor {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u64) -> u128 {
|
||||
self.get(doc)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u128 {
|
||||
self.min_value()
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u128 {
|
||||
self.max_value()
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.params.num_vals
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u128> + '_> {
|
||||
Box::new(self.iter())
|
||||
}
|
||||
fn get_between_vals(&self, range: RangeInclusive<u128>) -> Vec<u64> {
|
||||
self.get_between_vals(range)
|
||||
}
|
||||
}
|
||||
|
||||
impl CompactSpaceDecompressor {
|
||||
pub fn open(data: OwnedBytes) -> io::Result<CompactSpaceDecompressor> {
|
||||
let (data_slice, footer_len_bytes) = data.split_at(data.len() - 4);
|
||||
let footer_len = u32::deserialize(&mut &footer_len_bytes[..])?;
|
||||
|
||||
let data_footer = &data_slice[data_slice.len() - footer_len as usize..];
|
||||
let params = IPCodecParams::deserialize(&mut &data_footer[..])?;
|
||||
let decompressor = CompactSpaceDecompressor { data, params };
|
||||
|
||||
Ok(decompressor)
|
||||
}
|
||||
|
||||
/// Converting to compact space for the decompressor is more complex, since we may get values
|
||||
/// which are outside the compact space. e.g. if we map
|
||||
/// 1000 => 5
|
||||
/// 2000 => 6
|
||||
///
|
||||
/// and we want a mapping for 1005, there is no equivalent compact space. We instead return an
|
||||
/// error with the index of the next range.
|
||||
fn u128_to_compact(&self, value: u128) -> Result<u64, usize> {
|
||||
self.params.compact_space.u128_to_compact(value)
|
||||
}
|
||||
|
||||
fn compact_to_u128(&self, compact: u64) -> u128 {
|
||||
self.params.compact_space.compact_to_u128(compact)
|
||||
}
|
||||
|
||||
/// Comparing on compact space: Random dataset 0,24 (50% random hit) - 1.05 GElements/s
|
||||
/// Comparing on compact space: Real dataset 1.08 GElements/s
|
||||
///
|
||||
/// Comparing on original space: Real dataset .06 GElements/s (not completely optimized)
|
||||
pub fn get_between_vals(&self, range: RangeInclusive<u128>) -> Vec<u64> {
|
||||
if range.start() > range.end() {
|
||||
return Vec::new();
|
||||
}
|
||||
let from_value = *range.start();
|
||||
let to_value = *range.end();
|
||||
assert!(to_value >= from_value);
|
||||
let compact_from = self.u128_to_compact(from_value);
|
||||
let compact_to = self.u128_to_compact(to_value);
|
||||
|
||||
// Quick return, if both ranges fall into the same non-mapped space, the range can't cover
|
||||
// any values, so we can early exit
|
||||
match (compact_to, compact_from) {
|
||||
(Err(pos1), Err(pos2)) if pos1 == pos2 => return Vec::new(),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let compact_from = compact_from.unwrap_or_else(|pos| {
|
||||
// Correctness: Out of bounds, if this value is Err(last_index + 1), we early exit,
|
||||
// since the to_value also mapps into the same non-mapped space
|
||||
let range_mapping = self.params.compact_space.get_range_mapping(pos);
|
||||
range_mapping.compact_start
|
||||
});
|
||||
// If there is no compact space, we go to the closest upperbound compact space
|
||||
let compact_to = compact_to.unwrap_or_else(|pos| {
|
||||
// Correctness: Overflow, if this value is Err(0), we early exit,
|
||||
// since the from_value also mapps into the same non-mapped space
|
||||
|
||||
// Get end of previous range
|
||||
let pos = pos - 1;
|
||||
let range_mapping = self.params.compact_space.get_range_mapping(pos);
|
||||
range_mapping.compact_end()
|
||||
});
|
||||
|
||||
let range = compact_from..=compact_to;
|
||||
let mut positions = Vec::new();
|
||||
|
||||
let step_size = 4;
|
||||
let cutoff = self.params.num_vals - self.params.num_vals % step_size;
|
||||
|
||||
let mut push_if_in_range = |idx, val| {
|
||||
if range.contains(&val) {
|
||||
positions.push(idx);
|
||||
}
|
||||
};
|
||||
let get_val = |idx| self.params.bit_unpacker.get(idx as u64, &self.data);
|
||||
// unrolled loop
|
||||
for idx in (0..cutoff).step_by(step_size as usize) {
|
||||
let idx1 = idx;
|
||||
let idx2 = idx + 1;
|
||||
let idx3 = idx + 2;
|
||||
let idx4 = idx + 3;
|
||||
let val1 = get_val(idx1);
|
||||
let val2 = get_val(idx2);
|
||||
let val3 = get_val(idx3);
|
||||
let val4 = get_val(idx4);
|
||||
push_if_in_range(idx1, val1);
|
||||
push_if_in_range(idx2, val2);
|
||||
push_if_in_range(idx3, val3);
|
||||
push_if_in_range(idx4, val4);
|
||||
}
|
||||
|
||||
// handle rest
|
||||
for idx in cutoff..self.params.num_vals {
|
||||
push_if_in_range(idx, get_val(idx));
|
||||
}
|
||||
|
||||
positions
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter_compact(&self) -> impl Iterator<Item = u64> + '_ {
|
||||
(0..self.params.num_vals)
|
||||
.map(move |idx| self.params.bit_unpacker.get(idx as u64, &self.data) as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn iter(&self) -> impl Iterator<Item = u128> + '_ {
|
||||
// TODO: Performance. It would be better to iterate on the ranges and check existence via
|
||||
// the bit_unpacker.
|
||||
self.iter_compact()
|
||||
.map(|compact| self.compact_to_u128(compact))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u64) -> u128 {
|
||||
let compact = self.params.bit_unpacker.get(idx, &self.data);
|
||||
self.compact_to_u128(compact)
|
||||
}
|
||||
|
||||
pub fn min_value(&self) -> u128 {
|
||||
self.params.min_value
|
||||
}
|
||||
|
||||
pub fn max_value(&self) -> u128 {
|
||||
self.params.max_value
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::{open_u128, serialize_u128};
|
||||
|
||||
#[test]
|
||||
fn compact_space_test() {
|
||||
let ips = &[
|
||||
2u128, 4u128, 1000, 1001, 1002, 1003, 1004, 1005, 1008, 1010, 1012, 1260,
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let compact_space = get_compact_space(ips, ips.len() as u64, 11);
|
||||
let amplitude = compact_space.amplitude_compact_space();
|
||||
assert_eq!(amplitude, 17);
|
||||
assert_eq!(1, compact_space.u128_to_compact(2).unwrap());
|
||||
assert_eq!(2, compact_space.u128_to_compact(3).unwrap());
|
||||
assert_eq!(compact_space.u128_to_compact(100).unwrap_err(), 1);
|
||||
|
||||
for (num1, num2) in (0..3).tuple_windows() {
|
||||
assert_eq!(
|
||||
compact_space.get_range_mapping(num1).compact_end() + 1,
|
||||
compact_space.get_range_mapping(num2).compact_start
|
||||
);
|
||||
}
|
||||
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
compact_space.serialize(&mut output).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
compact_space,
|
||||
CompactSpace::deserialize(&mut &output[..]).unwrap()
|
||||
);
|
||||
|
||||
for ip in ips {
|
||||
let compact = compact_space.u128_to_compact(*ip).unwrap();
|
||||
assert_eq!(compact_space.compact_to_u128(compact), *ip);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compact_space_amplitude_test() {
|
||||
let ips = &[100000u128, 1000000].into_iter().collect();
|
||||
let compact_space = get_compact_space(ips, ips.len() as u64, 1);
|
||||
let amplitude = compact_space.amplitude_compact_space();
|
||||
assert_eq!(amplitude, 2);
|
||||
}
|
||||
|
||||
fn test_all(data: OwnedBytes, expected: &[u128]) {
|
||||
let decompressor = CompactSpaceDecompressor::open(data).unwrap();
|
||||
for (idx, expected_val) in expected.iter().cloned().enumerate() {
|
||||
let val = decompressor.get(idx as u64);
|
||||
assert_eq!(val, expected_val);
|
||||
|
||||
let test_range = |range: RangeInclusive<u128>| {
|
||||
let expected_positions = expected
|
||||
.iter()
|
||||
.positions(|val| range.contains(val))
|
||||
.map(|pos| pos as u64)
|
||||
.collect::<Vec<_>>();
|
||||
let positions = decompressor.get_between_vals(range);
|
||||
assert_eq!(positions, expected_positions);
|
||||
};
|
||||
|
||||
test_range(expected_val.saturating_sub(1)..=expected_val);
|
||||
test_range(expected_val..=expected_val);
|
||||
test_range(expected_val..=expected_val.saturating_add(1));
|
||||
test_range(expected_val.saturating_sub(1)..=expected_val.saturating_add(1));
|
||||
}
|
||||
}
|
||||
|
||||
fn test_aux_vals(u128_vals: &[u128]) -> OwnedBytes {
|
||||
let mut out = Vec::new();
|
||||
serialize_u128(
|
||||
|| u128_vals.iter().cloned(),
|
||||
u128_vals.len() as u64,
|
||||
&mut out,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let data = OwnedBytes::new(out);
|
||||
test_all(data.clone(), u128_vals);
|
||||
data
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_1() {
|
||||
let vals = &[
|
||||
1u128,
|
||||
100u128,
|
||||
3u128,
|
||||
99999u128,
|
||||
100000u128,
|
||||
100001u128,
|
||||
4_000_211_221u128,
|
||||
4_000_211_222u128,
|
||||
333u128,
|
||||
];
|
||||
let data = test_aux_vals(vals);
|
||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
let positions = decomp.get_between_vals(0..=1);
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions = decomp.get_between_vals(0..=2);
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions = decomp.get_between_vals(0..=3);
|
||||
assert_eq!(positions, vec![0, 2]);
|
||||
assert_eq!(decomp.get_between_vals(99999u128..=99999u128), vec![3]);
|
||||
assert_eq!(decomp.get_between_vals(99999u128..=100000u128), vec![3, 4]);
|
||||
assert_eq!(decomp.get_between_vals(99998u128..=100000u128), vec![3, 4]);
|
||||
assert_eq!(decomp.get_between_vals(99998u128..=99999u128), vec![3]);
|
||||
assert_eq!(decomp.get_between_vals(99998u128..=99998u128), vec![]);
|
||||
assert_eq!(decomp.get_between_vals(333u128..=333u128), vec![8]);
|
||||
assert_eq!(decomp.get_between_vals(332u128..=333u128), vec![8]);
|
||||
assert_eq!(decomp.get_between_vals(332u128..=334u128), vec![8]);
|
||||
assert_eq!(decomp.get_between_vals(333u128..=334u128), vec![8]);
|
||||
|
||||
assert_eq!(
|
||||
decomp.get_between_vals(4_000_211_221u128..=5_000_000_000u128),
|
||||
vec![6, 7]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty() {
|
||||
let vals = &[];
|
||||
let data = test_aux_vals(vals);
|
||||
let _decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_2() {
|
||||
let vals = &[
|
||||
100u128,
|
||||
99999u128,
|
||||
100000u128,
|
||||
100001u128,
|
||||
4_000_211_221u128,
|
||||
4_000_211_222u128,
|
||||
333u128,
|
||||
];
|
||||
let data = test_aux_vals(vals);
|
||||
let decomp = CompactSpaceDecompressor::open(data).unwrap();
|
||||
let positions = decomp.get_between_vals(0..=5);
|
||||
assert_eq!(positions, vec![]);
|
||||
let positions = decomp.get_between_vals(0..=100);
|
||||
assert_eq!(positions, vec![0]);
|
||||
let positions = decomp.get_between_vals(0..=105);
|
||||
assert_eq!(positions, vec![0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_3() {
|
||||
let vals = &[
|
||||
200u128,
|
||||
201,
|
||||
202,
|
||||
203,
|
||||
204,
|
||||
204,
|
||||
206,
|
||||
207,
|
||||
208,
|
||||
209,
|
||||
210,
|
||||
1_000_000,
|
||||
5_000_000_000,
|
||||
];
|
||||
let mut out = Vec::new();
|
||||
serialize_u128(|| vals.iter().cloned(), vals.len() as u64, &mut out).unwrap();
|
||||
let decomp = open_u128::<u128>(OwnedBytes::new(out)).unwrap();
|
||||
|
||||
assert_eq!(decomp.get_between_vals(199..=200), vec![0]);
|
||||
assert_eq!(decomp.get_between_vals(199..=201), vec![0, 1]);
|
||||
assert_eq!(decomp.get_between_vals(200..=200), vec![0]);
|
||||
assert_eq!(decomp.get_between_vals(1_000_000..=1_000_000), vec![11]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug1() {
|
||||
let vals = &[9223372036854775806];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug2() {
|
||||
let vals = &[340282366920938463463374607431768211455u128];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug3() {
|
||||
let vals = &[340282366920938463463374607431768211454];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug4() {
|
||||
let vals = &[340282366920938463463374607431768211455, 0];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_first_large_gaps() {
|
||||
let vals = &[1_000_000_000u128; 100];
|
||||
let _data = test_aux_vals(vals);
|
||||
}
|
||||
use itertools::Itertools;
|
||||
use proptest::prelude::*;
|
||||
|
||||
fn num_strategy() -> impl Strategy<Value = u128> {
|
||||
prop_oneof![
|
||||
1 => prop::num::u128::ANY.prop_map(|num| u128::MAX - (num % 10) ),
|
||||
1 => prop::num::u128::ANY.prop_map(|num| i64::MAX as u128 + 5 - (num % 10) ),
|
||||
1 => prop::num::u128::ANY.prop_map(|num| i128::MAX as u128 + 5 - (num % 10) ),
|
||||
1 => prop::num::u128::ANY.prop_map(|num| num % 10 ),
|
||||
20 => prop::num::u128::ANY,
|
||||
]
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(10))]
|
||||
|
||||
#[test]
|
||||
fn compress_decompress_random(vals in proptest::collection::vec(num_strategy()
|
||||
, 1..1000)) {
|
||||
let _data = test_aux_vals(&vals);
|
||||
}
|
||||
}
|
||||
}
|
||||
170
fastfield_codecs/src/gcd.rs
Normal file
170
fastfield_codecs/src/gcd.rs
Normal file
@@ -0,0 +1,170 @@
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use fastdivide::DividerU64;
|
||||
|
||||
/// Compute the gcd of two non null numbers.
|
||||
///
|
||||
/// It is recommended, but not required, to feed values such that `large >= small`.
|
||||
fn compute_gcd(mut large: NonZeroU64, mut small: NonZeroU64) -> NonZeroU64 {
|
||||
loop {
|
||||
let rem: u64 = large.get() % small;
|
||||
if let Some(new_small) = NonZeroU64::new(rem) {
|
||||
(large, small) = (small, new_small);
|
||||
} else {
|
||||
return small;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find GCD for iterator of numbers
|
||||
pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<NonZeroU64> {
|
||||
let mut numbers = numbers.flat_map(NonZeroU64::new);
|
||||
let mut gcd: NonZeroU64 = numbers.next()?;
|
||||
if gcd.get() == 1 {
|
||||
return Some(gcd);
|
||||
}
|
||||
|
||||
let mut gcd_divider = DividerU64::divide_by(gcd.get());
|
||||
for val in numbers {
|
||||
let remainder = val.get() - (gcd_divider.divide(val.get())) * gcd.get();
|
||||
if remainder == 0 {
|
||||
continue;
|
||||
}
|
||||
gcd = compute_gcd(val, gcd);
|
||||
if gcd.get() == 1 {
|
||||
return Some(gcd);
|
||||
}
|
||||
|
||||
gcd_divider = DividerU64::divide_by(gcd.get());
|
||||
}
|
||||
Some(gcd)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io;
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
use crate::gcd::{compute_gcd, find_gcd};
|
||||
use crate::{FastFieldCodecType, VecColumn};
|
||||
|
||||
fn test_fastfield_gcd_i64_with_codec(
|
||||
codec_type: FastFieldCodecType,
|
||||
num_vals: usize,
|
||||
) -> io::Result<()> {
|
||||
let mut vals: Vec<i64> = (-4..=(num_vals as i64) - 5).map(|val| val * 1000).collect();
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
crate::serialize(VecColumn::from(&vals), &mut buffer, &[codec_type])?;
|
||||
let buffer = OwnedBytes::new(buffer);
|
||||
let column = crate::open::<i64>(buffer.clone())?;
|
||||
assert_eq!(column.get_val(0), -4000i64);
|
||||
assert_eq!(column.get_val(1), -3000i64);
|
||||
assert_eq!(column.get_val(2), -2000i64);
|
||||
assert_eq!(column.max_value(), (num_vals as i64 - 5) * 1000);
|
||||
assert_eq!(column.min_value(), -4000i64);
|
||||
|
||||
// Can't apply gcd
|
||||
let mut buffer_without_gcd = Vec::new();
|
||||
vals.pop();
|
||||
vals.push(1001i64);
|
||||
crate::serialize(
|
||||
VecColumn::from(&vals),
|
||||
&mut buffer_without_gcd,
|
||||
&[codec_type],
|
||||
)?;
|
||||
let buffer_without_gcd = OwnedBytes::new(buffer_without_gcd);
|
||||
assert!(buffer_without_gcd.len() > buffer.len());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd_i64() -> io::Result<()> {
|
||||
for &codec_type in &[
|
||||
FastFieldCodecType::Bitpacked,
|
||||
FastFieldCodecType::BlockwiseLinear,
|
||||
FastFieldCodecType::Linear,
|
||||
] {
|
||||
test_fastfield_gcd_i64_with_codec(codec_type, 5500)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_fastfield_gcd_u64_with_codec(
|
||||
codec_type: FastFieldCodecType,
|
||||
num_vals: usize,
|
||||
) -> io::Result<()> {
|
||||
let mut vals: Vec<u64> = (1..=num_vals).map(|i| i as u64 * 1000u64).collect();
|
||||
let mut buffer: Vec<u8> = Vec::new();
|
||||
crate::serialize(VecColumn::from(&vals), &mut buffer, &[codec_type])?;
|
||||
let buffer = OwnedBytes::new(buffer);
|
||||
let column = crate::open::<u64>(buffer.clone())?;
|
||||
assert_eq!(column.get_val(0), 1000u64);
|
||||
assert_eq!(column.get_val(1), 2000u64);
|
||||
assert_eq!(column.get_val(2), 3000u64);
|
||||
assert_eq!(column.max_value(), num_vals as u64 * 1000);
|
||||
assert_eq!(column.min_value(), 1000u64);
|
||||
|
||||
// Can't apply gcd
|
||||
let mut buffer_without_gcd = Vec::new();
|
||||
vals.pop();
|
||||
vals.push(1001u64);
|
||||
crate::serialize(
|
||||
VecColumn::from(&vals),
|
||||
&mut buffer_without_gcd,
|
||||
&[codec_type],
|
||||
)?;
|
||||
let buffer_without_gcd = OwnedBytes::new(buffer_without_gcd);
|
||||
assert!(buffer_without_gcd.len() > buffer.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd_u64() -> io::Result<()> {
|
||||
for &codec_type in &[
|
||||
FastFieldCodecType::Bitpacked,
|
||||
FastFieldCodecType::BlockwiseLinear,
|
||||
FastFieldCodecType::Linear,
|
||||
] {
|
||||
test_fastfield_gcd_u64_with_codec(codec_type, 5500)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield2() {
|
||||
let test_fastfield = crate::serialize_and_load(&[100u64, 200u64, 300u64]);
|
||||
assert_eq!(test_fastfield.get_val(0), 100);
|
||||
assert_eq!(test_fastfield.get_val(1), 200);
|
||||
assert_eq!(test_fastfield.get_val(2), 300);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_gcd() {
|
||||
let test_compute_gcd_aux = |large, small, expected| {
|
||||
let large = NonZeroU64::new(large).unwrap();
|
||||
let small = NonZeroU64::new(small).unwrap();
|
||||
let expected = NonZeroU64::new(expected).unwrap();
|
||||
assert_eq!(compute_gcd(small, large), expected);
|
||||
assert_eq!(compute_gcd(large, small), expected);
|
||||
};
|
||||
test_compute_gcd_aux(1, 4, 1);
|
||||
test_compute_gcd_aux(2, 4, 2);
|
||||
test_compute_gcd_aux(10, 25, 5);
|
||||
test_compute_gcd_aux(25, 25, 25);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_gcd_test() {
|
||||
assert_eq!(find_gcd([0].into_iter()), None);
|
||||
assert_eq!(find_gcd([0, 10].into_iter()), NonZeroU64::new(10));
|
||||
assert_eq!(find_gcd([10, 0].into_iter()), NonZeroU64::new(10));
|
||||
assert_eq!(find_gcd([].into_iter()), None);
|
||||
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), NonZeroU64::new(5));
|
||||
assert_eq!(find_gcd([15, 16, 10].into_iter()), NonZeroU64::new(1));
|
||||
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), NonZeroU64::new(5));
|
||||
assert_eq!(find_gcd([0, 0].into_iter()), None);
|
||||
}
|
||||
}
|
||||
@@ -1,130 +1,278 @@
|
||||
#![warn(missing_docs)]
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
|
||||
//! # `fastfield_codecs`
|
||||
//!
|
||||
//! - Columnar storage of data for tantivy [`Column`].
|
||||
//! - Encode data in different codecs.
|
||||
//! - Monotonically map values to u64/u128
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate more_asserts;
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
extern crate test;
|
||||
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub mod bitpacked;
|
||||
pub mod linearinterpol;
|
||||
pub mod multilinearinterpol;
|
||||
use common::BinarySerializable;
|
||||
use compact_space::CompactSpaceDecompressor;
|
||||
use monotonic_mapping::{
|
||||
StrictlyMonotonicMappingInverter, StrictlyMonotonicMappingToInternal,
|
||||
StrictlyMonotonicMappingToInternalBaseval, StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use serialize::Header;
|
||||
|
||||
pub trait FastFieldCodecReader: Sized {
|
||||
/// reads the metadata and returns the CodecReader
|
||||
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self>;
|
||||
mod bitpacked;
|
||||
mod blockwise_linear;
|
||||
mod compact_space;
|
||||
mod line;
|
||||
mod linear;
|
||||
mod monotonic_mapping;
|
||||
mod monotonic_mapping_u128;
|
||||
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64;
|
||||
mod column;
|
||||
mod gcd;
|
||||
mod serialize;
|
||||
|
||||
fn min_value(&self) -> u64;
|
||||
fn max_value(&self) -> u64;
|
||||
use self::bitpacked::BitpackedCodec;
|
||||
use self::blockwise_linear::BlockwiseLinearCodec;
|
||||
pub use self::column::{monotonic_map_column, Column, VecColumn};
|
||||
use self::linear::LinearCodec;
|
||||
pub use self::monotonic_mapping::{MonotonicallyMappableToU64, StrictlyMonotonicFn};
|
||||
pub use self::monotonic_mapping_u128::MonotonicallyMappableToU128;
|
||||
pub use self::serialize::{
|
||||
estimate, serialize, serialize_and_load, serialize_u128, NormalizedHeader,
|
||||
};
|
||||
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||
#[repr(u8)]
|
||||
/// Available codecs to use to encode the u64 (via [`MonotonicallyMappableToU64`]) converted data.
|
||||
pub enum FastFieldCodecType {
|
||||
/// Bitpack all values in the value range. The number of bits is defined by the amplitude
|
||||
/// `column.max_value() - column.min_value()`
|
||||
Bitpacked = 1,
|
||||
/// Linear interpolation puts a line between the first and last value and then bitpacks the
|
||||
/// values by the offset from the line. The number of bits is defined by the max deviation from
|
||||
/// the line.
|
||||
Linear = 2,
|
||||
/// Same as [`FastFieldCodecType::Linear`], but encodes in blocks of 512 elements.
|
||||
BlockwiseLinear = 3,
|
||||
}
|
||||
|
||||
impl BinarySerializable for FastFieldCodecType {
|
||||
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
self.to_code().serialize(wrt)
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let code = u8::deserialize(reader)?;
|
||||
let codec_type: Self = Self::from_code(code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||
Ok(codec_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldCodecType {
|
||||
pub(crate) fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn from_code(code: u8) -> Option<Self> {
|
||||
match code {
|
||||
1 => Some(Self::Bitpacked),
|
||||
2 => Some(Self::Linear),
|
||||
3 => Some(Self::BlockwiseLinear),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||
pub fn open_u128<Item: MonotonicallyMappableToU128>(
|
||||
bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn Column<Item>>> {
|
||||
let reader = CompactSpaceDecompressor::open(bytes)?;
|
||||
let inverted: StrictlyMonotonicMappingInverter<StrictlyMonotonicMappingToInternal<Item>> =
|
||||
StrictlyMonotonicMappingToInternal::<Item>::new().into();
|
||||
Ok(Arc::new(monotonic_map_column(reader, inverted)))
|
||||
}
|
||||
|
||||
/// Returns the correct codec reader wrapped in the `Arc` for the data.
|
||||
pub fn open<T: MonotonicallyMappableToU64>(
|
||||
mut bytes: OwnedBytes,
|
||||
) -> io::Result<Arc<dyn Column<T>>> {
|
||||
let header = Header::deserialize(&mut bytes)?;
|
||||
match header.codec_type {
|
||||
FastFieldCodecType::Bitpacked => open_specific_codec::<BitpackedCodec, _>(bytes, &header),
|
||||
FastFieldCodecType::Linear => open_specific_codec::<LinearCodec, _>(bytes, &header),
|
||||
FastFieldCodecType::BlockwiseLinear => {
|
||||
open_specific_codec::<BlockwiseLinearCodec, _>(bytes, &header)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn open_specific_codec<C: FastFieldCodec, Item: MonotonicallyMappableToU64>(
|
||||
bytes: OwnedBytes,
|
||||
header: &Header,
|
||||
) -> io::Result<Arc<dyn Column<Item>>> {
|
||||
let normalized_header = header.normalized();
|
||||
let reader = C::open_from_bytes(bytes, normalized_header)?;
|
||||
let min_value = header.min_value;
|
||||
if let Some(gcd) = header.gcd {
|
||||
let mapping = StrictlyMonotonicMappingInverter::from(
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd.get(), min_value),
|
||||
);
|
||||
Ok(Arc::new(monotonic_map_column(reader, mapping)))
|
||||
} else {
|
||||
let mapping = StrictlyMonotonicMappingInverter::from(
|
||||
StrictlyMonotonicMappingToInternalBaseval::new(min_value),
|
||||
);
|
||||
Ok(Arc::new(monotonic_map_column(reader, mapping)))
|
||||
}
|
||||
}
|
||||
|
||||
/// The FastFieldSerializerEstimate trait is required on all variants
|
||||
/// of fast field compressions, to decide which one to choose.
|
||||
pub trait FastFieldCodecSerializer {
|
||||
trait FastFieldCodec: 'static {
|
||||
/// A codex needs to provide a unique name and id, which is
|
||||
/// used for debugging and de/serialization.
|
||||
const NAME: &'static str;
|
||||
const ID: u8;
|
||||
const CODEC_TYPE: FastFieldCodecType;
|
||||
|
||||
/// Check if the Codec is able to compress the data
|
||||
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> bool;
|
||||
type Reader: Column<u64> + 'static;
|
||||
|
||||
/// Reads the metadata and returns the CodecReader
|
||||
fn open_from_bytes(bytes: OwnedBytes, header: NormalizedHeader) -> io::Result<Self::Reader>;
|
||||
|
||||
/// Serializes the data using the serializer into write.
|
||||
///
|
||||
/// The column iterator should be preferred over using column `get_val` method for
|
||||
/// performance reasons.
|
||||
fn serialize(column: &dyn Column, write: &mut impl Write) -> io::Result<()>;
|
||||
|
||||
/// Returns an estimate of the compression ratio.
|
||||
/// If the codec is not applicable, returns `None`.
|
||||
///
|
||||
/// The baseline is uncompressed 64bit data.
|
||||
///
|
||||
/// It could make sense to also return a value representing
|
||||
/// computational complexity.
|
||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32;
|
||||
|
||||
/// Serializes the data using the serializer into write.
|
||||
/// There are multiple iterators, in case the codec needs to read the data multiple times.
|
||||
/// The iterators should be preferred over using fastfield_accessor for performance reasons.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()>;
|
||||
fn estimate(column: &dyn Column) -> Option<f32>;
|
||||
}
|
||||
|
||||
/// FastFieldDataAccess is the trait to access fast field data during serialization and estimation.
|
||||
pub trait FastFieldDataAccess {
|
||||
/// Return the value associated to the given position.
|
||||
///
|
||||
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance
|
||||
/// reasons.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `position` is greater than the index.
|
||||
fn get_val(&self, position: u64) -> u64;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Statistics are used in codec detection and stored in the fast field footer.
|
||||
pub struct FastFieldStats {
|
||||
pub min_value: u64,
|
||||
pub max_value: u64,
|
||||
pub num_vals: u64,
|
||||
}
|
||||
|
||||
impl<'a> FastFieldDataAccess for &'a [u64] {
|
||||
fn get_val(&self, position: u64) -> u64 {
|
||||
self[position as usize]
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldDataAccess for Vec<u64> {
|
||||
fn get_val(&self, position: u64) -> u64 {
|
||||
self[position as usize]
|
||||
}
|
||||
}
|
||||
/// The list of all available codecs for u64 convertible data.
|
||||
pub const ALL_CODEC_TYPES: [FastFieldCodecType; 3] = [
|
||||
FastFieldCodecType::Bitpacked,
|
||||
FastFieldCodecType::BlockwiseLinear,
|
||||
FastFieldCodecType::Linear,
|
||||
];
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer};
|
||||
use crate::linearinterpol::{LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer};
|
||||
use crate::multilinearinterpol::{
|
||||
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
|
||||
};
|
||||
|
||||
pub fn create_and_validate<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
|
||||
use proptest::prelude::*;
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
use crate::bitpacked::BitpackedCodec;
|
||||
use crate::blockwise_linear::BlockwiseLinearCodec;
|
||||
use crate::linear::LinearCodec;
|
||||
use crate::serialize::Header;
|
||||
|
||||
pub(crate) fn create_and_validate<Codec: FastFieldCodec>(
|
||||
data: &[u64],
|
||||
name: &str,
|
||||
) -> (f32, f32) {
|
||||
if !S::is_applicable(&data, crate::tests::stats_from_vec(data)) {
|
||||
return (f32::MAX, 0.0);
|
||||
}
|
||||
let estimation = S::estimate(&data, crate::tests::stats_from_vec(data));
|
||||
let mut out = vec![];
|
||||
S::serialize(
|
||||
&mut out,
|
||||
&data,
|
||||
crate::tests::stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
) -> Option<(f32, f32)> {
|
||||
let col = &VecColumn::from(data);
|
||||
let header = Header::compute_header(col, &[Codec::CODEC_TYPE])?;
|
||||
let normalized_col = header.normalize_column(col);
|
||||
let estimation = Codec::estimate(&normalized_col)?;
|
||||
|
||||
let mut out = Vec::new();
|
||||
let col = VecColumn::from(data);
|
||||
serialize(col, &mut out, &[Codec::CODEC_TYPE]).unwrap();
|
||||
|
||||
let reader = R::open_from_bytes(&out).unwrap();
|
||||
for (doc, orig_val) in data.iter().enumerate() {
|
||||
let val = reader.get_u64(doc as u64, &out);
|
||||
if val != *orig_val {
|
||||
panic!(
|
||||
"val {:?} does not match orig_val {:?}, in data set {}, data {:?}",
|
||||
val, orig_val, name, data
|
||||
);
|
||||
}
|
||||
}
|
||||
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
|
||||
(estimation, actual_compression)
|
||||
|
||||
let reader = crate::open::<u64>(OwnedBytes::new(out)).unwrap();
|
||||
assert_eq!(reader.num_vals(), data.len() as u64);
|
||||
for (doc, orig_val) in data.iter().copied().enumerate() {
|
||||
let val = reader.get_val(doc as u64);
|
||||
assert_eq!(
|
||||
val, orig_val,
|
||||
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data \
|
||||
`{data:?}`",
|
||||
);
|
||||
}
|
||||
|
||||
if !data.is_empty() {
|
||||
let test_rand_idx = rand::thread_rng().gen_range(0..=data.len() - 1);
|
||||
let expected_positions: Vec<u64> = data
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, el)| **el == data[test_rand_idx])
|
||||
.map(|(pos, _)| pos as u64)
|
||||
.collect();
|
||||
let positions = reader.get_between_vals(data[test_rand_idx]..=data[test_rand_idx]);
|
||||
assert_eq!(expected_positions, positions);
|
||||
}
|
||||
Some((estimation, actual_compression))
|
||||
}
|
||||
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(100))]
|
||||
|
||||
#[test]
|
||||
fn test_proptest_small_bitpacked(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proptest_small_linear(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proptest_small_blockwise_linear(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(10))]
|
||||
|
||||
#[test]
|
||||
fn test_proptest_large_bitpacked(data in proptest::collection::vec(num_strategy(), 1..6000)) {
|
||||
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proptest_large_linear(data in proptest::collection::vec(num_strategy(), 1..6000)) {
|
||||
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proptest_large_blockwise_linear(data in proptest::collection::vec(num_strategy(), 1..6000)) {
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||
}
|
||||
}
|
||||
|
||||
fn num_strategy() -> impl Strategy<Value = u64> {
|
||||
prop_oneof![
|
||||
1 => prop::num::u64::ANY.prop_map(|num| u64::MAX - (num % 10) ),
|
||||
1 => prop::num::u64::ANY.prop_map(|num| num % 10 ),
|
||||
20 => prop::num::u64::ANY,
|
||||
]
|
||||
}
|
||||
|
||||
pub fn get_codec_test_datasets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
let mut data_and_names = vec![];
|
||||
|
||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
||||
let data = (10..=10_000_u64).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "simple monotonically increasing"));
|
||||
|
||||
data_and_names.push((
|
||||
@@ -134,92 +282,230 @@ mod tests {
|
||||
data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small"));
|
||||
data_and_names.push((vec![10], "single value"));
|
||||
|
||||
data_and_names.push((
|
||||
vec![1572656989877777, 1170935903116329, 720575940379279, 0],
|
||||
"overflow error",
|
||||
));
|
||||
|
||||
data_and_names
|
||||
}
|
||||
|
||||
fn test_codec<S: FastFieldCodecSerializer, R: FastFieldCodecReader>() {
|
||||
let codec_name = S::NAME;
|
||||
for (data, data_set_name) in get_codec_test_data_sets() {
|
||||
let (estimate, actual) =
|
||||
crate::tests::create_and_validate::<S, R>(&data, data_set_name);
|
||||
let result = if estimate == f32::MAX {
|
||||
"Disabled".to_string()
|
||||
fn test_codec<C: FastFieldCodec>() {
|
||||
let codec_name = format!("{:?}", C::CODEC_TYPE);
|
||||
for (data, dataset_name) in get_codec_test_datasets() {
|
||||
let estimate_actual_opt: Option<(f32, f32)> =
|
||||
crate::tests::create_and_validate::<C>(&data, dataset_name);
|
||||
let result = if let Some((estimate, actual)) = estimate_actual_opt {
|
||||
format!("Estimate `{estimate}` Actual `{actual}`")
|
||||
} else {
|
||||
format!("Estimate {:?} Actual {:?} ", estimate, actual)
|
||||
"Disabled".to_string()
|
||||
};
|
||||
println!(
|
||||
"Codec {}, DataSet {}, {}",
|
||||
codec_name, data_set_name, result
|
||||
);
|
||||
println!("Codec {codec_name}, DataSet {dataset_name}, {result}");
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_bitpacking() {
|
||||
test_codec::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>();
|
||||
test_codec::<BitpackedCodec>();
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_interpolation() {
|
||||
test_codec::<LinearInterpolFastFieldSerializer, LinearInterpolFastFieldReader>();
|
||||
test_codec::<LinearCodec>();
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_multi_interpolation() {
|
||||
test_codec::<MultiLinearInterpolFastFieldSerializer, MultiLinearInterpolFastFieldReader>();
|
||||
test_codec::<BlockwiseLinearCodec>();
|
||||
}
|
||||
|
||||
use super::*;
|
||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
||||
let min_value = data.iter().cloned().min().unwrap_or(0);
|
||||
let max_value = data.iter().cloned().max().unwrap_or(0);
|
||||
FastFieldStats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: data.len() as u64,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn estimation_good_interpolation_case() {
|
||||
let data = (10..=20000_u64).collect::<Vec<_>>();
|
||||
let data: VecColumn = data.as_slice().into();
|
||||
|
||||
let linear_interpol_estimation =
|
||||
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.01);
|
||||
|
||||
let multi_linear_interpol_estimation =
|
||||
MultiLinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let multi_linear_interpol_estimation = BlockwiseLinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(multi_linear_interpol_estimation, 0.2);
|
||||
assert_le!(linear_interpol_estimation, multi_linear_interpol_estimation);
|
||||
assert_lt!(linear_interpol_estimation, multi_linear_interpol_estimation);
|
||||
|
||||
let bitpacked_estimation =
|
||||
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
assert_le!(linear_interpol_estimation, bitpacked_estimation);
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_lt!(linear_interpol_estimation, bitpacked_estimation);
|
||||
}
|
||||
#[test]
|
||||
fn estimation_test_bad_interpolation_case() {
|
||||
let data = vec![200, 10, 10, 10, 10, 1000, 20];
|
||||
let data: &[u64] = &[200, 10, 10, 10, 10, 1000, 20];
|
||||
|
||||
let linear_interpol_estimation =
|
||||
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
assert_le!(linear_interpol_estimation, 0.32);
|
||||
let data: VecColumn = data.into();
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.34);
|
||||
|
||||
let bitpacked_estimation =
|
||||
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_lt!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn estimation_prefer_bitpacked() {
|
||||
let data = VecColumn::from(&[10, 10, 10, 10]);
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_lt!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
||||
let mut data = (200..=20000_u64).collect::<Vec<_>>();
|
||||
let mut data: Vec<u64> = (201..=20000_u64).collect();
|
||||
data.push(1_000_000);
|
||||
let data: VecColumn = data.as_slice().into();
|
||||
|
||||
// in this case the linear interpolation can't in fact not be worse than bitpacking,
|
||||
// but the estimator adds some threshold, which leads to estimated worse behavior
|
||||
let linear_interpol_estimation =
|
||||
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.35);
|
||||
|
||||
let bitpacked_estimation =
|
||||
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_le!(bitpacked_estimation, 0.32);
|
||||
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fast_field_codec_type_to_code() {
|
||||
let mut count_codec = 0;
|
||||
for code in 0..=255 {
|
||||
if let Some(codec_type) = FastFieldCodecType::from_code(code) {
|
||||
assert_eq!(codec_type.to_code(), code);
|
||||
count_codec += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(count_codec, 3);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
use std::sync::Arc;
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use test::{self, Bencher};
|
||||
|
||||
use super::*;
|
||||
use crate::Column;
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
let mut data: Vec<_> = (100..55000_u64)
|
||||
.map(|num| num + rng.gen::<u8>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
data.insert(2000, 100);
|
||||
data.insert(3000, 4100);
|
||||
data.insert(4000, 100);
|
||||
data.insert(5000, 800);
|
||||
data
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
}
|
||||
fn get_reader_for_bench<Codec: FastFieldCodec>(data: &[u64]) -> Codec::Reader {
|
||||
let mut bytes = Vec::new();
|
||||
let min_value = *data.iter().min().unwrap();
|
||||
let data = data.iter().map(|el| *el - min_value).collect::<Vec<_>>();
|
||||
let col = VecColumn::from(&data);
|
||||
let normalized_header = crate::NormalizedHeader {
|
||||
num_vals: col.num_vals(),
|
||||
max_value: col.max_value(),
|
||||
};
|
||||
Codec::serialize(&VecColumn::from(&data), &mut bytes).unwrap();
|
||||
Codec::open_from_bytes(OwnedBytes::new(bytes), normalized_header).unwrap()
|
||||
}
|
||||
fn bench_get<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = get_reader_for_bench::<Codec>(data);
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
let val = col.get_val(pos as u64);
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn bench_get_dynamic_helper(b: &mut Bencher, col: Arc<dyn Column>) {
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
let val = col.get_val(pos as u64);
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_get_dynamic<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let col = Arc::new(get_reader_for_bench::<Codec>(data));
|
||||
bench_get_dynamic_helper(b, col);
|
||||
}
|
||||
fn bench_create<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let min_value = *data.iter().min().unwrap();
|
||||
let data = data.iter().map(|el| *el - min_value).collect::<Vec<_>>();
|
||||
|
||||
let mut bytes = Vec::new();
|
||||
b.iter(|| {
|
||||
bytes.clear();
|
||||
Codec::serialize(&VecColumn::from(&data), &mut bytes).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get_dynamic(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get_dynamic::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
}
|
||||
|
||||
222
fastfield_codecs/src/line.rs
Normal file
222
fastfield_codecs/src/line.rs
Normal file
@@ -0,0 +1,222 @@
|
||||
use std::io;
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use common::{BinarySerializable, VInt};
|
||||
|
||||
use crate::Column;
|
||||
|
||||
const MID_POINT: u64 = (1u64 << 32) - 1u64;
|
||||
|
||||
/// `Line` describes a line function `y: ax + b` using integer
|
||||
/// arithmetics.
|
||||
///
|
||||
/// The slope is in fact a decimal split into a 32 bit integer value,
|
||||
/// and a 32-bit decimal value.
|
||||
///
|
||||
/// The multiplication then becomes.
|
||||
/// `y = m * x >> 32 + b`
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct Line {
|
||||
slope: u64,
|
||||
intercept: u64,
|
||||
}
|
||||
|
||||
/// Compute the line slope.
|
||||
///
|
||||
/// This function has the nice property of being
|
||||
/// invariant by translation.
|
||||
/// `
|
||||
/// compute_slope(y0, y1)
|
||||
/// = compute_slope(y0 + X % 2^64, y1 + X % 2^64)
|
||||
/// `
|
||||
fn compute_slope(y0: u64, y1: u64, num_vals: NonZeroU64) -> u64 {
|
||||
let dy = y1.wrapping_sub(y0);
|
||||
let sign = dy <= (1 << 63);
|
||||
let abs_dy = if sign {
|
||||
y1.wrapping_sub(y0)
|
||||
} else {
|
||||
y0.wrapping_sub(y1)
|
||||
};
|
||||
if abs_dy >= 1 << 32 {
|
||||
// This is outside of realm we handle.
|
||||
// Let's just bail.
|
||||
return 0u64;
|
||||
}
|
||||
|
||||
let abs_slope = (abs_dy << 32) / num_vals.get();
|
||||
if sign {
|
||||
abs_slope
|
||||
} else {
|
||||
// The complement does indeed create the
|
||||
// opposite decreasing slope...
|
||||
//
|
||||
// Intuitively (without the bitshifts and % u64::MAX)
|
||||
// ```
|
||||
// (x + shift)*(u64::MAX - abs_slope)
|
||||
// - (x * (u64::MAX - abs_slope))
|
||||
// = - shift * abs_slope
|
||||
// ```
|
||||
u64::MAX - abs_slope
|
||||
}
|
||||
}
|
||||
|
||||
impl Line {
|
||||
#[inline(always)]
|
||||
pub fn eval(&self, x: u64) -> u64 {
|
||||
let linear_part = (x.wrapping_mul(self.slope) >> 32) as i32 as u64;
|
||||
self.intercept.wrapping_add(linear_part)
|
||||
}
|
||||
|
||||
// Same as train, but the intercept is only estimated from provided sample positions
|
||||
pub fn estimate(sample_positions_and_values: &[(u64, u64)]) -> Self {
|
||||
let first_val = sample_positions_and_values[0].1;
|
||||
let last_val = sample_positions_and_values[sample_positions_and_values.len() - 1].1;
|
||||
let num_vals = sample_positions_and_values[sample_positions_and_values.len() - 1].0 + 1;
|
||||
Self::train_from(
|
||||
first_val,
|
||||
last_val,
|
||||
num_vals,
|
||||
sample_positions_and_values.iter().cloned(),
|
||||
)
|
||||
}
|
||||
|
||||
// Intercept is only computed from provided positions
|
||||
fn train_from(
|
||||
first_val: u64,
|
||||
last_val: u64,
|
||||
num_vals: u64,
|
||||
positions_and_values: impl Iterator<Item = (u64, u64)>,
|
||||
) -> Self {
|
||||
// TODO replace with let else
|
||||
let idx_last_val = if let Some(idx_last_val) = NonZeroU64::new(num_vals - 1) {
|
||||
idx_last_val
|
||||
} else {
|
||||
return Line::default();
|
||||
};
|
||||
|
||||
let y0 = first_val;
|
||||
let y1 = last_val;
|
||||
|
||||
// We first independently pick our slope.
|
||||
let slope = compute_slope(y0, y1, idx_last_val);
|
||||
|
||||
// We picked our slope. Note that it does not have to be perfect.
|
||||
// Now we need to compute the best intercept.
|
||||
//
|
||||
// Intuitively, the best intercept is such that line passes through one of the
|
||||
// `(i, ys[])`.
|
||||
//
|
||||
// The best intercept therefore has the form
|
||||
// `y[i] - line.eval(i)` (using wrapping arithmetics).
|
||||
// In other words, the best intercept is one of the `y - Line::eval(ys[i])`
|
||||
// and our task is just to pick the one that minimizes our error.
|
||||
//
|
||||
// Without sorting our values, this is a difficult problem.
|
||||
// We however rely on the following trick...
|
||||
//
|
||||
// We only focus on the case where the interpolation is half decent.
|
||||
// If the line interpolation is doing its job on a dataset suited for it,
|
||||
// we can hope that the maximum error won't be larger than `u64::MAX / 2`.
|
||||
//
|
||||
// In other words, even without the intercept the values `y - Line::eval(ys[i])` will all be
|
||||
// within an interval that takes less than half of the modulo space of `u64`.
|
||||
//
|
||||
// Our task is therefore to identify this interval.
|
||||
// Here we simply translate all of our values by `y0 - 2^63` and pick the min.
|
||||
let mut line = Line {
|
||||
slope,
|
||||
intercept: 0,
|
||||
};
|
||||
let heuristic_shift = y0.wrapping_sub(MID_POINT);
|
||||
line.intercept = positions_and_values
|
||||
.map(|(pos, y)| y.wrapping_sub(line.eval(pos)))
|
||||
.min_by_key(|&val| val.wrapping_sub(heuristic_shift))
|
||||
.unwrap_or(0u64); //< Never happens.
|
||||
line
|
||||
}
|
||||
|
||||
/// Returns a line that attemps to approximate a function
|
||||
/// f: i in 0..[ys.num_vals()) -> ys[i].
|
||||
///
|
||||
/// - The approximation is always lower than the actual value.
|
||||
/// Or more rigorously, formally `f(i).wrapping_sub(ys[i])` is small
|
||||
/// for any i in [0..ys.len()).
|
||||
/// - It computes without panicking for any value of it.
|
||||
///
|
||||
/// This function is only invariable by translation if all of the
|
||||
/// `ys` are packaged into half of the space. (See heuristic below)
|
||||
pub fn train(ys: &dyn Column) -> Self {
|
||||
let first_val = ys.iter().next().unwrap();
|
||||
let last_val = ys.iter().nth(ys.num_vals() as usize - 1).unwrap();
|
||||
Self::train_from(
|
||||
first_val,
|
||||
last_val,
|
||||
ys.num_vals(),
|
||||
ys.iter().enumerate().map(|(pos, val)| (pos as u64, val)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Line {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.slope).serialize(writer)?;
|
||||
VInt(self.intercept).serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let slope = VInt::deserialize(reader)?.0;
|
||||
let intercept = VInt::deserialize(reader)?.0;
|
||||
Ok(Line { slope, intercept })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::VecColumn;
|
||||
|
||||
/// Test training a line and ensuring that the maximum difference between
|
||||
/// the data points and the line is `expected`.
|
||||
///
|
||||
/// This function operates translation over the data for better coverage.
|
||||
#[track_caller]
|
||||
fn test_line_interpol_with_translation(ys: &[u64], expected: Option<u64>) {
|
||||
let mut translations = vec![0, 100, u64::MAX / 2, u64::MAX, u64::MAX - 1];
|
||||
translations.extend_from_slice(ys);
|
||||
for translation in translations {
|
||||
let translated_ys: Vec<u64> = ys
|
||||
.iter()
|
||||
.copied()
|
||||
.map(|y| y.wrapping_add(translation))
|
||||
.collect();
|
||||
let largest_err = test_eval_max_err(&translated_ys);
|
||||
assert_eq!(largest_err, expected);
|
||||
}
|
||||
}
|
||||
|
||||
fn test_eval_max_err(ys: &[u64]) -> Option<u64> {
|
||||
let line = Line::train(&VecColumn::from(&ys));
|
||||
ys.iter()
|
||||
.enumerate()
|
||||
.map(|(x, y)| y.wrapping_sub(line.eval(x as u64)))
|
||||
.max()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_train() {
|
||||
test_line_interpol_with_translation(&[11, 11, 11, 12, 12, 13], Some(1));
|
||||
test_line_interpol_with_translation(&[13, 12, 12, 11, 11, 11], Some(1));
|
||||
test_line_interpol_with_translation(&[13, 13, 12, 11, 11, 11], Some(1));
|
||||
test_line_interpol_with_translation(&[13, 13, 12, 11, 11, 11], Some(1));
|
||||
test_line_interpol_with_translation(&[u64::MAX - 1, 0, 0, 1], Some(1));
|
||||
test_line_interpol_with_translation(&[u64::MAX - 1, u64::MAX, 0, 1], Some(0));
|
||||
test_line_interpol_with_translation(&[0, 1, 2, 3, 5], Some(0));
|
||||
test_line_interpol_with_translation(&[1, 2, 3, 4], Some(0));
|
||||
|
||||
let data: Vec<u64> = (0..255).collect();
|
||||
test_line_interpol_with_translation(&data, Some(0));
|
||||
let data: Vec<u64> = (0..255).map(|el| el * 2).collect();
|
||||
test_line_interpol_with_translation(&data, Some(0));
|
||||
}
|
||||
}
|
||||
231
fastfield_codecs/src/linear.rs
Normal file
231
fastfield_codecs/src/linear.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
use ownedbytes::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::line::Line;
|
||||
use crate::serialize::NormalizedHeader;
|
||||
use crate::{Column, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct LinearReader {
|
||||
data: OwnedBytes,
|
||||
linear_params: LinearParams,
|
||||
header: NormalizedHeader,
|
||||
}
|
||||
|
||||
impl Column for LinearReader {
|
||||
#[inline]
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
let interpoled_val: u64 = self.linear_params.line.eval(doc);
|
||||
let bitpacked_diff = self.linear_params.bit_unpacker.get(doc, &self.data);
|
||||
interpoled_val.wrapping_add(bitpacked_diff)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
// The LinearReader assumes a normalized vector.
|
||||
0u64
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.header.max_value
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.header.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
/// Fastfield serializer, which tries to guess values by linear interpolation
|
||||
/// and stores the difference bitpacked.
|
||||
pub struct LinearCodec;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct LinearParams {
|
||||
line: Line,
|
||||
bit_unpacker: BitUnpacker,
|
||||
}
|
||||
|
||||
impl BinarySerializable for LinearParams {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.line.serialize(writer)?;
|
||||
self.bit_unpacker.bit_width().serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let line = Line::deserialize(reader)?;
|
||||
let bit_width = u8::deserialize(reader)?;
|
||||
Ok(Self {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(bit_width),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldCodec for LinearCodec {
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Linear;
|
||||
|
||||
type Reader = LinearReader;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(mut data: OwnedBytes, header: NormalizedHeader) -> io::Result<Self::Reader> {
|
||||
let linear_params = LinearParams::deserialize(&mut data)?;
|
||||
Ok(LinearReader {
|
||||
data,
|
||||
linear_params,
|
||||
header,
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new fast field serializer.
|
||||
fn serialize(column: &dyn Column, write: &mut impl Write) -> io::Result<()> {
|
||||
assert_eq!(column.min_value(), 0);
|
||||
let line = Line::train(column);
|
||||
|
||||
let max_offset_from_line = column
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(pos, actual_value)| {
|
||||
let calculated_value = line.eval(pos as u64);
|
||||
actual_value.wrapping_sub(calculated_value)
|
||||
})
|
||||
.max()
|
||||
.unwrap();
|
||||
|
||||
let num_bits = compute_num_bits(max_offset_from_line);
|
||||
let linear_params = LinearParams {
|
||||
line,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
};
|
||||
linear_params.serialize(write)?;
|
||||
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for (pos, actual_value) in column.iter().enumerate() {
|
||||
let calculated_value = line.eval(pos as u64);
|
||||
let offset = actual_value.wrapping_sub(calculated_value);
|
||||
bit_packer.write(offset, num_bits, write)?;
|
||||
}
|
||||
bit_packer.close(write)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// estimation for linear interpolation is hard because, you don't know
|
||||
/// where the local maxima for the deviation of the calculated value are and
|
||||
/// the offset to shift all values to >=0 is also unknown.
|
||||
#[allow(clippy::question_mark)]
|
||||
fn estimate(column: &dyn Column) -> Option<f32> {
|
||||
if column.num_vals() < 3 {
|
||||
return None; // disable compressor for this case
|
||||
}
|
||||
|
||||
let limit_num_vals = column.num_vals().min(100_000);
|
||||
|
||||
let num_samples = 100;
|
||||
let step_size = (limit_num_vals / num_samples).max(1); // 20 samples
|
||||
let mut sample_positions_and_values: Vec<_> = Vec::new();
|
||||
for (pos, val) in column.iter().enumerate().step_by(step_size as usize) {
|
||||
sample_positions_and_values.push((pos as u64, val));
|
||||
}
|
||||
|
||||
let line = Line::estimate(&sample_positions_and_values);
|
||||
|
||||
let estimated_bit_width = sample_positions_and_values
|
||||
.into_iter()
|
||||
.map(|(pos, actual_value)| {
|
||||
let interpolated_val = line.eval(pos as u64);
|
||||
actual_value.wrapping_sub(interpolated_val)
|
||||
})
|
||||
.map(|diff| ((diff as f32 * 1.5) * 2.0) as u64)
|
||||
.map(compute_num_bits)
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Extrapolate to whole column
|
||||
let num_bits = (estimated_bit_width as u64 * column.num_vals() as u64) + 64;
|
||||
let num_bits_uncompressed = 64 * column.num_vals();
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rand::RngCore;
|
||||
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_datasets;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> {
|
||||
crate::tests::create_and_validate::<LinearCodec>(data, name)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression() {
|
||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate(&data, "simple monotonically large").unwrap();
|
||||
|
||||
assert_le!(actual_compression, 0.001);
|
||||
assert_le!(estimate, 0.02);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_datasets() {
|
||||
let data_sets = get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate(&data, name);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_test_large_amplitude() {
|
||||
let data = vec![
|
||||
i64::MAX as u64 / 2,
|
||||
i64::MAX as u64 / 3,
|
||||
i64::MAX as u64 / 2,
|
||||
];
|
||||
|
||||
create_and_validate(&data, "large amplitude");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn overflow_error_test() {
|
||||
let data = vec![1572656989877777, 1170935903116329, 720575940379279, 0];
|
||||
create_and_validate(&data, "overflow test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_concave_data() {
|
||||
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
|
||||
create_and_validate(&data, "concave data");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_convex_data() {
|
||||
let data = vec![0, 40, 60, 70, 75, 77];
|
||||
create_and_validate(&data, "convex data");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_test_simple() {
|
||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
||||
create_and_validate(&data, "simple monotonically");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_rand() {
|
||||
let mut rng = rand::thread_rng();
|
||||
for _ in 0..50 {
|
||||
let mut data = (0..10_000).map(|_| rng.next_u64()).collect::<Vec<_>>();
|
||||
create_and_validate(&data, "random");
|
||||
data.reverse();
|
||||
create_and_validate(&data, "random");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
use std::io::{self, Read, Write};
|
||||
use std::ops::Sub;
|
||||
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct LinearInterpolFastFieldReader {
|
||||
bit_unpacker: BitUnpacker,
|
||||
pub footer: LinearInterpolFooter,
|
||||
pub slope: f32,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LinearInterpolFooter {
|
||||
pub relative_max_value: u64,
|
||||
pub offset: u64,
|
||||
pub first_val: u64,
|
||||
pub last_val: u64,
|
||||
pub num_vals: u64,
|
||||
pub min_value: u64,
|
||||
pub max_value: u64,
|
||||
}
|
||||
|
||||
impl BinarySerializable for LinearInterpolFooter {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
self.relative_max_value.serialize(write)?;
|
||||
self.offset.serialize(write)?;
|
||||
self.first_val.serialize(write)?;
|
||||
self.last_val.serialize(write)?;
|
||||
self.num_vals.serialize(write)?;
|
||||
self.min_value.serialize(write)?;
|
||||
self.max_value.serialize(write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearInterpolFooter> {
|
||||
Ok(LinearInterpolFooter {
|
||||
relative_max_value: u64::deserialize(reader)?,
|
||||
offset: u64::deserialize(reader)?,
|
||||
first_val: u64::deserialize(reader)?,
|
||||
last_val: u64::deserialize(reader)?,
|
||||
num_vals: u64::deserialize(reader)?,
|
||||
min_value: u64::deserialize(reader)?,
|
||||
max_value: u64::deserialize(reader)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for LinearInterpolFooter {
|
||||
const SIZE_IN_BYTES: usize = 56;
|
||||
}
|
||||
|
||||
impl FastFieldCodecReader for LinearInterpolFastFieldReader {
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
|
||||
let (_data, mut footer) = bytes.split_at(bytes.len() - LinearInterpolFooter::SIZE_IN_BYTES);
|
||||
let footer = LinearInterpolFooter::deserialize(&mut footer)?;
|
||||
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
|
||||
|
||||
let num_bits = compute_num_bits(footer.relative_max_value);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(LinearInterpolFastFieldReader {
|
||||
bit_unpacker,
|
||||
footer,
|
||||
slope,
|
||||
})
|
||||
}
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
|
||||
(calculated_value + self.bit_unpacker.get(doc, data)) - self.footer.offset
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.footer.min_value
|
||||
}
|
||||
#[inline]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.footer.max_value
|
||||
}
|
||||
}
|
||||
|
||||
/// Fastfield serializer, which tries to guess values by linear interpolation
|
||||
/// and stores the difference bitpacked.
|
||||
pub struct LinearInterpolFastFieldSerializer {}
|
||||
|
||||
#[inline]
|
||||
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
|
||||
if num_vals <= 1 {
|
||||
return 0.0;
|
||||
}
|
||||
// We calculate the slope with f64 high precision and use the result in lower precision f32
|
||||
// This is done in order to handle estimations for very large values like i64::MAX
|
||||
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
|
||||
first_val + (pos as f32 * slope) as u64
|
||||
}
|
||||
|
||||
impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
|
||||
const NAME: &'static str = "LinearInterpol";
|
||||
const ID: u8 = 2;
|
||||
/// Creates a new fast field serializer.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
assert!(stats.min_value <= stats.max_value);
|
||||
|
||||
let first_val = fastfield_accessor.get_val(0);
|
||||
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
|
||||
let slope = get_slope(first_val, last_val, stats.num_vals);
|
||||
// calculate offset to ensure all values are positive
|
||||
let mut offset = 0;
|
||||
let mut rel_positive_max = 0;
|
||||
for (pos, actual_value) in data_iter1.enumerate() {
|
||||
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
|
||||
if calculated_value > actual_value {
|
||||
// negative value we need to apply an offset
|
||||
// we ignore negative values in the max value calculation, because negative values
|
||||
// will be offset to 0
|
||||
offset = offset.max(calculated_value - actual_value);
|
||||
} else {
|
||||
// positive value no offset reuqired
|
||||
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
|
||||
}
|
||||
}
|
||||
|
||||
// rel_positive_max will be adjusted by offset
|
||||
let relative_max_value = rel_positive_max + offset;
|
||||
|
||||
let num_bits = compute_num_bits(relative_max_value);
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for (pos, val) in data_iter.enumerate() {
|
||||
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
|
||||
let diff = (val + offset) - calculated_value;
|
||||
bit_packer.write(diff, num_bits, write)?;
|
||||
}
|
||||
bit_packer.close(write)?;
|
||||
|
||||
let footer = LinearInterpolFooter {
|
||||
relative_max_value,
|
||||
offset,
|
||||
first_val,
|
||||
last_val,
|
||||
num_vals: stats.num_vals,
|
||||
min_value: stats.min_value,
|
||||
max_value: stats.max_value,
|
||||
};
|
||||
footer.serialize(write)?;
|
||||
Ok(())
|
||||
}
|
||||
fn is_applicable(
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
) -> bool {
|
||||
if stats.num_vals < 3 {
|
||||
return false; // disable compressor for this case
|
||||
}
|
||||
// On serialisation the offset is added to the actual value.
|
||||
// We need to make sure this won't run into overflow calculation issues.
|
||||
// For this we take the maximum theroretical offset and add this to the max value.
|
||||
// If this doesn't overflow the algortihm should be fine
|
||||
let theorethical_maximum_offset = stats.max_value - stats.min_value;
|
||||
if stats
|
||||
.max_value
|
||||
.checked_add(theorethical_maximum_offset)
|
||||
.is_none()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
/// estimation for linear interpolation is hard because, you don't know
|
||||
/// where the local maxima for the deviation of the calculated value are and
|
||||
/// the offset to shift all values to >=0 is also unknown.
|
||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
|
||||
let first_val = fastfield_accessor.get_val(0);
|
||||
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
|
||||
let slope = get_slope(first_val, last_val, stats.num_vals);
|
||||
|
||||
// let's sample at 0%, 5%, 10% .. 95%, 100%
|
||||
let num_vals = stats.num_vals as f32 / 100.0;
|
||||
let sample_positions = (0..20)
|
||||
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let max_distance = sample_positions
|
||||
.iter()
|
||||
.map(|pos| {
|
||||
let calculated_value = get_calculated_value(first_val, *pos as u64, slope);
|
||||
let actual_value = fastfield_accessor.get_val(*pos as u64);
|
||||
distance(calculated_value, actual_value)
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
// the theory would be that we don't have the actual max_distance, but we are close within
|
||||
// 50% threshold.
|
||||
// It is multiplied by 2 because in a log case scenario the line would be as much above as
|
||||
// below. So the offset would = max_distance
|
||||
//
|
||||
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
|
||||
|
||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
|
||||
+ LinearInterpolFooter::SIZE_IN_BYTES as u64;
|
||||
let num_bits_uncompressed = 64 * stats.num_vals;
|
||||
num_bits as f32 / num_bits_uncompressed as f32
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
|
||||
if x < y {
|
||||
y - x
|
||||
} else {
|
||||
x - y
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_data_sets;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
|
||||
crate::tests::create_and_validate::<
|
||||
LinearInterpolFastFieldSerializer,
|
||||
LinearInterpolFastFieldReader,
|
||||
>(data, name)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression() {
|
||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate(&data, "simple monotonically large");
|
||||
|
||||
assert!(actual_compression < 0.01);
|
||||
assert!(estimate < 0.01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = get_codec_test_data_sets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate(&data, name);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_test_large_amplitude() {
|
||||
let data = vec![
|
||||
i64::MAX as u64 / 2,
|
||||
i64::MAX as u64 / 3,
|
||||
i64::MAX as u64 / 2,
|
||||
];
|
||||
|
||||
create_and_validate(&data, "large amplitude");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_concave_data() {
|
||||
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
|
||||
create_and_validate(&data, "concave data");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_convex_data() {
|
||||
let data = vec![0, 40, 60, 70, 75, 77];
|
||||
create_and_validate(&data, "convex data");
|
||||
}
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_test_simple() {
|
||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
||||
|
||||
create_and_validate(&data, "simple monotonically");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_rand() {
|
||||
for _ in 0..5000 {
|
||||
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
|
||||
create_and_validate(&data, "random");
|
||||
|
||||
data.reverse();
|
||||
create_and_validate(&data, "random");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,52 +1,164 @@
|
||||
#[macro_use]
|
||||
extern crate prettytable;
|
||||
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
|
||||
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
|
||||
use fastfield_codecs::{FastFieldCodecSerializer, FastFieldStats};
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::io::BufRead;
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use fastfield_codecs::{open_u128, serialize_u128, Column, FastFieldCodecType, VecColumn};
|
||||
use itertools::Itertools;
|
||||
use measure_time::print_time;
|
||||
use ownedbytes::OwnedBytes;
|
||||
use prettytable::{Cell, Row, Table};
|
||||
|
||||
fn print_set_stats(ip_addrs: &[u128]) {
|
||||
println!("NumIps\t{}", ip_addrs.len());
|
||||
let ip_addr_set: HashSet<u128> = ip_addrs.iter().cloned().collect();
|
||||
println!("NumUniqueIps\t{}", ip_addr_set.len());
|
||||
let ratio_unique = ip_addr_set.len() as f64 / ip_addrs.len() as f64;
|
||||
println!("RatioUniqueOverTotal\t{ratio_unique:.4}");
|
||||
|
||||
// histogram
|
||||
let mut ip_addrs = ip_addrs.to_vec();
|
||||
ip_addrs.sort();
|
||||
let mut cnts: Vec<usize> = ip_addrs
|
||||
.into_iter()
|
||||
.dedup_with_count()
|
||||
.map(|(cnt, _)| cnt)
|
||||
.collect();
|
||||
cnts.sort();
|
||||
|
||||
let top_256_cnt: usize = cnts.iter().rev().take(256).sum();
|
||||
let top_128_cnt: usize = cnts.iter().rev().take(128).sum();
|
||||
let top_64_cnt: usize = cnts.iter().rev().take(64).sum();
|
||||
let top_8_cnt: usize = cnts.iter().rev().take(8).sum();
|
||||
let total: usize = cnts.iter().sum();
|
||||
|
||||
println!("{}", total);
|
||||
println!("{}", top_256_cnt);
|
||||
println!("{}", top_128_cnt);
|
||||
println!("Percentage Top8 {:02}", top_8_cnt as f32 / total as f32);
|
||||
println!("Percentage Top64 {:02}", top_64_cnt as f32 / total as f32);
|
||||
println!("Percentage Top128 {:02}", top_128_cnt as f32 / total as f32);
|
||||
println!("Percentage Top256 {:02}", top_256_cnt as f32 / total as f32);
|
||||
|
||||
let mut cnts: Vec<(usize, usize)> = cnts.into_iter().dedup_with_count().collect();
|
||||
cnts.sort_by(|a, b| {
|
||||
if a.1 == b.1 {
|
||||
a.0.cmp(&b.0)
|
||||
} else {
|
||||
b.1.cmp(&a.1)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn ip_dataset() -> Vec<u128> {
|
||||
let mut ip_addr_v4 = 0;
|
||||
|
||||
let stdin = std::io::stdin();
|
||||
let ip_addrs: Vec<u128> = stdin
|
||||
.lock()
|
||||
.lines()
|
||||
.flat_map(|line| {
|
||||
let line = line.unwrap();
|
||||
let line = line.trim();
|
||||
let ip_addr = IpAddr::from_str(line.trim()).ok()?;
|
||||
if ip_addr.is_ipv4() {
|
||||
ip_addr_v4 += 1;
|
||||
}
|
||||
let ip_addr_v6: Ipv6Addr = match ip_addr {
|
||||
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||
IpAddr::V6(v6) => v6,
|
||||
};
|
||||
Some(ip_addr_v6)
|
||||
})
|
||||
.map(|ip_v6| u128::from_be_bytes(ip_v6.octets()))
|
||||
.collect();
|
||||
|
||||
println!("IpAddrsAny\t{}", ip_addrs.len());
|
||||
println!("IpAddrsV4\t{}", ip_addr_v4);
|
||||
|
||||
ip_addrs
|
||||
}
|
||||
|
||||
fn bench_ip() {
|
||||
let dataset = ip_dataset();
|
||||
print_set_stats(&dataset);
|
||||
|
||||
// Chunks
|
||||
{
|
||||
let mut data = vec![];
|
||||
for dataset in dataset.chunks(500_000) {
|
||||
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u64, &mut data).unwrap();
|
||||
}
|
||||
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||
println!("Compression 50_000 chunks {:.4}", compression);
|
||||
println!(
|
||||
"Num Bits per elem {:.2}",
|
||||
(data.len() * 8) as f32 / dataset.len() as f32
|
||||
);
|
||||
}
|
||||
|
||||
let mut data = vec![];
|
||||
{
|
||||
print_time!("creation");
|
||||
serialize_u128(|| dataset.iter().cloned(), dataset.len() as u64, &mut data).unwrap();
|
||||
}
|
||||
|
||||
let compression = data.len() as f64 / (dataset.len() * 16) as f64;
|
||||
println!("Compression {:.2}", compression);
|
||||
println!(
|
||||
"Num Bits per elem {:.2}",
|
||||
(data.len() * 8) as f32 / dataset.len() as f32
|
||||
);
|
||||
|
||||
let decompressor = open_u128::<u128>(OwnedBytes::new(data)).unwrap();
|
||||
// Sample some ranges
|
||||
for value in dataset.iter().take(1110).skip(1100).cloned() {
|
||||
print_time!("get range");
|
||||
let doc_values = decompressor.get_between_vals(value..=value);
|
||||
println!("{:?}", doc_values.len());
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if env::args().nth(1).unwrap() == "bench_ip" {
|
||||
bench_ip();
|
||||
return;
|
||||
}
|
||||
|
||||
let mut table = Table::new();
|
||||
|
||||
// Add a row per time
|
||||
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
|
||||
|
||||
for (data, data_set_name) in get_codec_test_data_sets() {
|
||||
let mut results = vec![];
|
||||
let res = serialize_with_codec::<LinearInterpolFastFieldSerializer>(&data);
|
||||
results.push(res);
|
||||
let res = serialize_with_codec::<MultiLinearInterpolFastFieldSerializer>(&data);
|
||||
results.push(res);
|
||||
let res = serialize_with_codec::<fastfield_codecs::bitpacked::BitpackedFastFieldSerializer>(
|
||||
&data,
|
||||
);
|
||||
results.push(res);
|
||||
|
||||
// let best_estimation_codec = results
|
||||
//.iter()
|
||||
//.min_by(|res1, res2| res1.partial_cmp(&res2).unwrap())
|
||||
//.unwrap();
|
||||
let results: Vec<(f32, f32, FastFieldCodecType)> = [
|
||||
serialize_with_codec(&data, FastFieldCodecType::Bitpacked),
|
||||
serialize_with_codec(&data, FastFieldCodecType::Linear),
|
||||
serialize_with_codec(&data, FastFieldCodecType::BlockwiseLinear),
|
||||
]
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
let best_compression_ratio_codec = results
|
||||
.iter()
|
||||
.min_by(|res1, res2| res1.partial_cmp(res2).unwrap())
|
||||
.min_by(|&res1, &res2| res1.partial_cmp(res2).unwrap())
|
||||
.cloned()
|
||||
.unwrap();
|
||||
|
||||
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
|
||||
for (is_applicable, est, comp, name) in results {
|
||||
let (est_cell, ratio_cell) = if !is_applicable {
|
||||
("Codec Disabled".to_string(), "".to_string())
|
||||
} else {
|
||||
(est.to_string(), comp.to_string())
|
||||
};
|
||||
for (est, comp, codec_type) in results {
|
||||
let est_cell = est.to_string();
|
||||
let ratio_cell = comp.to_string();
|
||||
let style = if comp == best_compression_ratio_codec.1 {
|
||||
"Fb"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
table.add_row(Row::new(vec![
|
||||
Cell::new(name).style_spec("bFg"),
|
||||
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
|
||||
Cell::new(&ratio_cell).style_spec(style),
|
||||
Cell::new(&est_cell).style_spec(""),
|
||||
]));
|
||||
@@ -91,34 +203,14 @@ pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
data_and_names
|
||||
}
|
||||
|
||||
pub fn serialize_with_codec<S: FastFieldCodecSerializer>(
|
||||
pub fn serialize_with_codec(
|
||||
data: &[u64],
|
||||
) -> (bool, f32, f32, &'static str) {
|
||||
let is_applicable = S::is_applicable(&data, stats_from_vec(data));
|
||||
if !is_applicable {
|
||||
return (false, 0.0, 0.0, S::NAME);
|
||||
}
|
||||
let estimation = S::estimate(&data, stats_from_vec(data));
|
||||
let mut out = vec![];
|
||||
S::serialize(
|
||||
&mut out,
|
||||
&data,
|
||||
stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let actual_compression = out.len() as f32 / (data.len() * 8) as f32;
|
||||
(true, estimation, actual_compression, S::NAME)
|
||||
}
|
||||
|
||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
||||
let min_value = data.iter().cloned().min().unwrap_or(0);
|
||||
let max_value = data.iter().cloned().max().unwrap_or(0);
|
||||
FastFieldStats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: data.len() as u64,
|
||||
}
|
||||
codec_type: FastFieldCodecType,
|
||||
) -> Option<(f32, f32, FastFieldCodecType)> {
|
||||
let col = VecColumn::from(data);
|
||||
let estimation = fastfield_codecs::estimate(&col, codec_type)?;
|
||||
let mut out = Vec::new();
|
||||
fastfield_codecs::serialize(&col, &mut out, &[codec_type]).ok()?;
|
||||
let actual_compression = out.len() as f32 / (col.num_vals() * 8) as f32;
|
||||
Some((estimation, actual_compression, codec_type))
|
||||
}
|
||||
|
||||
233
fastfield_codecs/src/monotonic_mapping.rs
Normal file
233
fastfield_codecs/src/monotonic_mapping.rs
Normal file
@@ -0,0 +1,233 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use fastdivide::DividerU64;
|
||||
|
||||
use crate::MonotonicallyMappableToU128;
|
||||
|
||||
/// Monotonic maps a value to u64 value space.
|
||||
/// Monotonic mapping enables `PartialOrd` on u64 space without conversion to original space.
|
||||
pub trait MonotonicallyMappableToU64: 'static + PartialOrd + Copy + Send + Sync {
|
||||
/// Converts a value to u64.
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
fn to_u64(self) -> u64;
|
||||
|
||||
/// Converts a value from u64
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
/// **Note: To be used for converting encoded Term, Posting values.**
|
||||
fn from_u64(val: u64) -> Self;
|
||||
}
|
||||
|
||||
/// Values need to be strictly monotonic mapped to a `Internal` value (u64 or u128) that can be
|
||||
/// used in fast field codecs.
|
||||
///
|
||||
/// The monotonic mapping is required so that `PartialOrd` can be used on `Internal` without
|
||||
/// converting to `External`.
|
||||
///
|
||||
/// All strictly monotonic functions are invertible because they are guaranteed to have a one-to-one
|
||||
/// mapping from their range to their domain. The `inverse` method is required when opening a codec,
|
||||
/// so a value can be converted back to its original domain (e.g. ip address or f64) from its
|
||||
/// internal representation.
|
||||
pub trait StrictlyMonotonicFn<External, Internal> {
|
||||
/// Strictly monotonically maps the value from External to Internal.
|
||||
fn mapping(&self, inp: External) -> Internal;
|
||||
/// Inverse of `mapping`. Maps the value from Internal to External.
|
||||
fn inverse(&self, out: Internal) -> External;
|
||||
}
|
||||
|
||||
/// Inverts a strictly monotonic mapping from `StrictlyMonotonicFn<A, B>` to
|
||||
/// `StrictlyMonotonicFn<B, A>`.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This type comes with a footgun. A type being strictly monotonic does not impose that the inverse
|
||||
/// mapping is strictly monotonic over the entire space External. e.g. a -> a * 2. Use at your own
|
||||
/// risks.
|
||||
pub(crate) struct StrictlyMonotonicMappingInverter<T> {
|
||||
orig_mapping: T,
|
||||
}
|
||||
impl<T> From<T> for StrictlyMonotonicMappingInverter<T> {
|
||||
fn from(orig_mapping: T) -> Self {
|
||||
Self { orig_mapping }
|
||||
}
|
||||
}
|
||||
|
||||
impl<From, To, T> StrictlyMonotonicFn<To, From> for StrictlyMonotonicMappingInverter<T>
|
||||
where T: StrictlyMonotonicFn<From, To>
|
||||
{
|
||||
fn mapping(&self, val: To) -> From {
|
||||
self.orig_mapping.inverse(val)
|
||||
}
|
||||
|
||||
fn inverse(&self, val: From) -> To {
|
||||
self.orig_mapping.mapping(val)
|
||||
}
|
||||
}
|
||||
|
||||
/// Applies the strictly monotonic mapping from `T` without any additional changes.
|
||||
pub(crate) struct StrictlyMonotonicMappingToInternal<T> {
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> StrictlyMonotonicMappingToInternal<T> {
|
||||
pub(crate) fn new() -> StrictlyMonotonicMappingToInternal<T> {
|
||||
Self {
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<External: MonotonicallyMappableToU128, T: MonotonicallyMappableToU128>
|
||||
StrictlyMonotonicFn<External, u128> for StrictlyMonotonicMappingToInternal<T>
|
||||
where T: MonotonicallyMappableToU128
|
||||
{
|
||||
fn mapping(&self, inp: External) -> u128 {
|
||||
External::to_u128(inp)
|
||||
}
|
||||
|
||||
fn inverse(&self, out: u128) -> External {
|
||||
External::from_u128(out)
|
||||
}
|
||||
}
|
||||
|
||||
impl<External: MonotonicallyMappableToU64, T: MonotonicallyMappableToU64>
|
||||
StrictlyMonotonicFn<External, u64> for StrictlyMonotonicMappingToInternal<T>
|
||||
where T: MonotonicallyMappableToU64
|
||||
{
|
||||
fn mapping(&self, inp: External) -> u64 {
|
||||
External::to_u64(inp)
|
||||
}
|
||||
|
||||
fn inverse(&self, out: u64) -> External {
|
||||
External::from_u64(out)
|
||||
}
|
||||
}
|
||||
|
||||
/// Mapping dividing by gcd and a base value.
|
||||
///
|
||||
/// The function is assumed to be only called on values divided by passed
|
||||
/// gcd value. (It is necessary for the function to be monotonic.)
|
||||
pub(crate) struct StrictlyMonotonicMappingToInternalGCDBaseval {
|
||||
gcd_divider: DividerU64,
|
||||
gcd: u64,
|
||||
min_value: u64,
|
||||
}
|
||||
impl StrictlyMonotonicMappingToInternalGCDBaseval {
|
||||
pub(crate) fn new(gcd: u64, min_value: u64) -> Self {
|
||||
let gcd_divider = DividerU64::divide_by(gcd);
|
||||
Self {
|
||||
gcd_divider,
|
||||
gcd,
|
||||
min_value,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
||||
for StrictlyMonotonicMappingToInternalGCDBaseval
|
||||
{
|
||||
fn mapping(&self, inp: External) -> u64 {
|
||||
self.gcd_divider
|
||||
.divide(External::to_u64(inp) - self.min_value)
|
||||
}
|
||||
|
||||
fn inverse(&self, out: u64) -> External {
|
||||
External::from_u64(self.min_value + out * self.gcd)
|
||||
}
|
||||
}
|
||||
|
||||
/// Strictly monotonic mapping with a base value.
|
||||
pub(crate) struct StrictlyMonotonicMappingToInternalBaseval {
|
||||
min_value: u64,
|
||||
}
|
||||
impl StrictlyMonotonicMappingToInternalBaseval {
|
||||
pub(crate) fn new(min_value: u64) -> Self {
|
||||
Self { min_value }
|
||||
}
|
||||
}
|
||||
|
||||
impl<External: MonotonicallyMappableToU64> StrictlyMonotonicFn<External, u64>
|
||||
for StrictlyMonotonicMappingToInternalBaseval
|
||||
{
|
||||
fn mapping(&self, val: External) -> u64 {
|
||||
External::to_u64(val) - self.min_value
|
||||
}
|
||||
|
||||
fn inverse(&self, val: u64) -> External {
|
||||
External::from_u64(self.min_value + val)
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for u64 {
|
||||
fn to_u64(self) -> u64 {
|
||||
self
|
||||
}
|
||||
|
||||
fn from_u64(val: u64) -> Self {
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for i64 {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
common::i64_to_u64(self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> Self {
|
||||
common::u64_to_i64(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for bool {
|
||||
#[inline(always)]
|
||||
fn to_u64(self) -> u64 {
|
||||
u64::from(self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_u64(val: u64) -> Self {
|
||||
val > 0
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU64 for f64 {
|
||||
fn to_u64(self) -> u64 {
|
||||
common::f64_to_u64(self)
|
||||
}
|
||||
|
||||
fn from_u64(val: u64) -> Self {
|
||||
common::u64_to_f64(val)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn strictly_monotonic_test() {
|
||||
// identity mapping
|
||||
test_round_trip(&StrictlyMonotonicMappingToInternal::<u64>::new(), 100u64);
|
||||
// round trip to i64
|
||||
test_round_trip(&StrictlyMonotonicMappingToInternal::<i64>::new(), 100u64);
|
||||
// identity mapping
|
||||
test_round_trip(&StrictlyMonotonicMappingToInternal::<u128>::new(), 100u128);
|
||||
|
||||
// base value to i64 round trip
|
||||
let mapping = StrictlyMonotonicMappingToInternalBaseval::new(100);
|
||||
test_round_trip::<_, _, u64>(&mapping, 100i64);
|
||||
// base value and gcd to u64 round trip
|
||||
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(10, 100);
|
||||
test_round_trip::<_, _, u64>(&mapping, 100u64);
|
||||
}
|
||||
|
||||
fn test_round_trip<T: StrictlyMonotonicFn<K, L>, K: std::fmt::Debug + Eq + Copy, L>(
|
||||
mapping: &T,
|
||||
test_val: K,
|
||||
) {
|
||||
assert_eq!(mapping.inverse(mapping.mapping(test_val)), test_val);
|
||||
}
|
||||
}
|
||||
40
fastfield_codecs/src/monotonic_mapping_u128.rs
Normal file
40
fastfield_codecs/src/monotonic_mapping_u128.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
/// Montonic maps a value to u128 value space
|
||||
/// Monotonic mapping enables `PartialOrd` on u128 space without conversion to original space.
|
||||
pub trait MonotonicallyMappableToU128: 'static + PartialOrd + Copy + Send + Sync {
|
||||
/// Converts a value to u128.
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
fn to_u128(self) -> u128;
|
||||
|
||||
/// Converts a value from u128
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
/// **Note: To be used for converting encoded Term, Posting values.**
|
||||
fn from_u128(val: u128) -> Self;
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU128 for u128 {
|
||||
fn to_u128(self) -> u128 {
|
||||
self
|
||||
}
|
||||
|
||||
fn from_u128(val: u128) -> Self {
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU128 for Ipv6Addr {
|
||||
fn to_u128(self) -> u128 {
|
||||
ip_to_u128(self)
|
||||
}
|
||||
|
||||
fn from_u128(val: u128) -> Self {
|
||||
Ipv6Addr::from(val.to_be_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
fn ip_to_u128(ip_addr: Ipv6Addr) -> u128 {
|
||||
u128::from_be_bytes(ip_addr.octets())
|
||||
}
|
||||
@@ -1,427 +0,0 @@
|
||||
//! MultiLinearInterpol compressor uses linear interpolation to guess a values and stores the
|
||||
//! offset, but in blocks of 512.
|
||||
//!
|
||||
//! With a CHUNK_SIZE of 512 and 29 byte metadata per block, we get a overhead for metadata of 232 /
|
||||
//! 512 = 0,45 bits per element. The additional space required per element in a block is the the
|
||||
//! maximum deviation of the linear interpolation estimation function.
|
||||
//!
|
||||
//! E.g. if the maximum deviation of an element is 12, all elements cost 4bits.
|
||||
//!
|
||||
//! Size per block:
|
||||
//! Num Elements * Maximum Deviation from Interpolation + 29 Byte Metadata
|
||||
|
||||
use std::io::{self, Read, Write};
|
||||
use std::ops::Sub;
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, DeserializeFrom};
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
|
||||
const CHUNK_SIZE: u64 = 512;
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct MultiLinearInterpolFastFieldReader {
|
||||
pub footer: MultiLinearInterpolFooter,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
struct Function {
|
||||
// The offset in the data is required, because we have diffrent bit_widths per block
|
||||
data_start_offset: u64,
|
||||
// start_pos in the block will be CHUNK_SIZE * BLOCK_NUM
|
||||
start_pos: u64,
|
||||
// only used during serialization, 0 after deserialization
|
||||
end_pos: u64,
|
||||
// only used during serialization, 0 after deserialization
|
||||
value_start_pos: u64,
|
||||
// only used during serialization, 0 after deserialization
|
||||
value_end_pos: u64,
|
||||
slope: f32,
|
||||
// The offset so that all values are positive when writing them
|
||||
positive_val_offset: u64,
|
||||
num_bits: u8,
|
||||
bit_unpacker: BitUnpacker,
|
||||
}
|
||||
|
||||
impl Function {
|
||||
fn calc_slope(&mut self) {
|
||||
let num_vals = self.end_pos - self.start_pos;
|
||||
self.slope = get_slope(self.value_start_pos, self.value_end_pos, num_vals);
|
||||
}
|
||||
// split the interpolation into two function, change self and return the second split
|
||||
fn split(&mut self, split_pos: u64, split_pos_value: u64) -> Function {
|
||||
let mut new_function = Function {
|
||||
start_pos: split_pos,
|
||||
end_pos: self.end_pos,
|
||||
value_start_pos: split_pos_value,
|
||||
value_end_pos: self.value_end_pos,
|
||||
..Default::default()
|
||||
};
|
||||
new_function.calc_slope();
|
||||
self.end_pos = split_pos;
|
||||
self.value_end_pos = split_pos_value;
|
||||
self.calc_slope();
|
||||
new_function
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Function {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
self.data_start_offset.serialize(write)?;
|
||||
self.value_start_pos.serialize(write)?;
|
||||
self.positive_val_offset.serialize(write)?;
|
||||
self.slope.serialize(write)?;
|
||||
self.num_bits.serialize(write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Function> {
|
||||
let data_start_offset = u64::deserialize(reader)?;
|
||||
let value_start_pos = u64::deserialize(reader)?;
|
||||
let offset = u64::deserialize(reader)?;
|
||||
let slope = f32::deserialize(reader)?;
|
||||
let num_bits = u8::deserialize(reader)?;
|
||||
let interpolation = Function {
|
||||
data_start_offset,
|
||||
value_start_pos,
|
||||
positive_val_offset: offset,
|
||||
num_bits,
|
||||
bit_unpacker: BitUnpacker::new(num_bits),
|
||||
slope,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
Ok(interpolation)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MultiLinearInterpolFooter {
|
||||
pub num_vals: u64,
|
||||
pub min_value: u64,
|
||||
pub max_value: u64,
|
||||
interpolations: Vec<Function>,
|
||||
}
|
||||
|
||||
impl BinarySerializable for MultiLinearInterpolFooter {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
let mut out = vec![];
|
||||
self.num_vals.serialize(&mut out)?;
|
||||
self.min_value.serialize(&mut out)?;
|
||||
self.max_value.serialize(&mut out)?;
|
||||
self.interpolations.serialize(&mut out)?;
|
||||
write.write_all(&out)?;
|
||||
(out.len() as u32).serialize(write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<MultiLinearInterpolFooter> {
|
||||
let mut footer = MultiLinearInterpolFooter {
|
||||
num_vals: u64::deserialize(reader)?,
|
||||
min_value: u64::deserialize(reader)?,
|
||||
max_value: u64::deserialize(reader)?,
|
||||
interpolations: Vec::<Function>::deserialize(reader)?,
|
||||
};
|
||||
for (num, interpol) in footer.interpolations.iter_mut().enumerate() {
|
||||
interpol.start_pos = CHUNK_SIZE * num as u64;
|
||||
}
|
||||
Ok(footer)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_interpolation_position(doc: u64) -> usize {
|
||||
let index = doc / CHUNK_SIZE;
|
||||
index as usize
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_interpolation_function(doc: u64, interpolations: &[Function]) -> &Function {
|
||||
&interpolations[get_interpolation_position(doc)]
|
||||
}
|
||||
|
||||
impl FastFieldCodecReader for MultiLinearInterpolFastFieldReader {
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
|
||||
let (_data, mut footer) = bytes.split_at(bytes.len() - (4 + footer_len) as usize);
|
||||
let footer = MultiLinearInterpolFooter::deserialize(&mut footer)?;
|
||||
|
||||
Ok(MultiLinearInterpolFastFieldReader { footer })
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
let interpolation = get_interpolation_function(doc, &self.footer.interpolations);
|
||||
let doc = doc - interpolation.start_pos;
|
||||
let calculated_value =
|
||||
get_calculated_value(interpolation.value_start_pos, doc, interpolation.slope);
|
||||
let diff = interpolation
|
||||
.bit_unpacker
|
||||
.get(doc, &data[interpolation.data_start_offset as usize..]);
|
||||
(calculated_value + diff) - interpolation.positive_val_offset
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
self.footer.min_value
|
||||
}
|
||||
#[inline]
|
||||
fn max_value(&self) -> u64 {
|
||||
self.footer.max_value
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
|
||||
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
|
||||
(first_val as i64 + (pos as f32 * slope) as i64) as u64
|
||||
}
|
||||
|
||||
/// Same as LinearInterpolFastFieldSerializer, but working on chunks of CHUNK_SIZE elements.
|
||||
pub struct MultiLinearInterpolFastFieldSerializer {}
|
||||
|
||||
impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
|
||||
const NAME: &'static str = "MultiLinearInterpol";
|
||||
const ID: u8 = 3;
|
||||
/// Creates a new fast field serializer.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
_data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
assert!(stats.min_value <= stats.max_value);
|
||||
|
||||
let first_val = fastfield_accessor.get_val(0);
|
||||
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
|
||||
|
||||
let mut first_function = Function {
|
||||
end_pos: stats.num_vals,
|
||||
value_start_pos: first_val,
|
||||
value_end_pos: last_val,
|
||||
..Default::default()
|
||||
};
|
||||
first_function.calc_slope();
|
||||
let mut interpolations = vec![first_function];
|
||||
|
||||
// Since we potentially apply multiple passes over the data, the data is cached.
|
||||
// Multiple iteration can be expensive (merge with index sorting can add lot of overhead per
|
||||
// iteration)
|
||||
let data = data_iter.collect::<Vec<_>>();
|
||||
|
||||
//// let's split this into chunks of CHUNK_SIZE
|
||||
for data_pos in (0..data.len() as u64).step_by(CHUNK_SIZE as usize).skip(1) {
|
||||
let new_fun = {
|
||||
let current_interpolation = interpolations.last_mut().unwrap();
|
||||
current_interpolation.split(data_pos, data[data_pos as usize])
|
||||
};
|
||||
interpolations.push(new_fun);
|
||||
}
|
||||
// calculate offset and max (-> numbits) for each function
|
||||
for interpolation in &mut interpolations {
|
||||
let mut offset = 0;
|
||||
let mut rel_positive_max = 0;
|
||||
for (pos, actual_value) in data
|
||||
[interpolation.start_pos as usize..interpolation.end_pos as usize]
|
||||
.iter()
|
||||
.cloned()
|
||||
.enumerate()
|
||||
{
|
||||
let calculated_value = get_calculated_value(
|
||||
interpolation.value_start_pos,
|
||||
pos as u64,
|
||||
interpolation.slope,
|
||||
);
|
||||
if calculated_value > actual_value {
|
||||
// negative value we need to apply an offset
|
||||
// we ignore negative values in the max value calculation, because negative
|
||||
// values will be offset to 0
|
||||
offset = offset.max(calculated_value - actual_value);
|
||||
} else {
|
||||
// positive value no offset reuqired
|
||||
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
|
||||
}
|
||||
}
|
||||
|
||||
interpolation.positive_val_offset = offset;
|
||||
interpolation.num_bits = compute_num_bits(rel_positive_max + offset);
|
||||
}
|
||||
let mut bit_packer = BitPacker::new();
|
||||
|
||||
let write = &mut CountingWriter::wrap(write);
|
||||
for interpolation in &mut interpolations {
|
||||
interpolation.data_start_offset = write.written_bytes();
|
||||
let num_bits = interpolation.num_bits;
|
||||
for (pos, actual_value) in data
|
||||
[interpolation.start_pos as usize..interpolation.end_pos as usize]
|
||||
.iter()
|
||||
.cloned()
|
||||
.enumerate()
|
||||
{
|
||||
let calculated_value = get_calculated_value(
|
||||
interpolation.value_start_pos,
|
||||
pos as u64,
|
||||
interpolation.slope,
|
||||
);
|
||||
let diff = (actual_value + interpolation.positive_val_offset) - calculated_value;
|
||||
bit_packer.write(diff, num_bits, write)?;
|
||||
}
|
||||
bit_packer.flush(write)?;
|
||||
}
|
||||
bit_packer.close(write)?;
|
||||
|
||||
let footer = MultiLinearInterpolFooter {
|
||||
num_vals: stats.num_vals,
|
||||
min_value: stats.min_value,
|
||||
max_value: stats.max_value,
|
||||
interpolations,
|
||||
};
|
||||
footer.serialize(write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_applicable(
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
) -> bool {
|
||||
if stats.num_vals < 5_000 {
|
||||
return false;
|
||||
}
|
||||
// On serialization the offset is added to the actual value.
|
||||
// We need to make sure this won't run into overflow calculation issues.
|
||||
// For this we take the maximum theroretical offset and add this to the max value.
|
||||
// If this doesn't overflow the algortihm should be fine
|
||||
let theorethical_maximum_offset = stats.max_value - stats.min_value;
|
||||
if stats
|
||||
.max_value
|
||||
.checked_add(theorethical_maximum_offset)
|
||||
.is_none()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
/// estimation for linear interpolation is hard because, you don't know
|
||||
/// where the local maxima are for the deviation of the calculated value and
|
||||
/// the offset is also unknown.
|
||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
|
||||
let first_val_in_first_block = fastfield_accessor.get_val(0);
|
||||
let last_elem_in_first_chunk = CHUNK_SIZE.min(stats.num_vals);
|
||||
let last_val_in_first_block =
|
||||
fastfield_accessor.get_val(last_elem_in_first_chunk as u64 - 1);
|
||||
let slope = get_slope(
|
||||
first_val_in_first_block,
|
||||
last_val_in_first_block,
|
||||
stats.num_vals,
|
||||
);
|
||||
|
||||
// let's sample at 0%, 5%, 10% .. 95%, 100%, but for the first block only
|
||||
let sample_positions = (0..20)
|
||||
.map(|pos| (last_elem_in_first_chunk as f32 / 100.0 * pos as f32 * 5.0) as usize)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let max_distance = sample_positions
|
||||
.iter()
|
||||
.map(|pos| {
|
||||
let calculated_value =
|
||||
get_calculated_value(first_val_in_first_block, *pos as u64, slope);
|
||||
let actual_value = fastfield_accessor.get_val(*pos as u64);
|
||||
distance(calculated_value, actual_value)
|
||||
})
|
||||
.max()
|
||||
.unwrap();
|
||||
|
||||
// Estimate one block and extrapolate the cost to all blocks.
|
||||
// the theory would be that we don't have the actual max_distance, but we are close within
|
||||
// 50% threshold.
|
||||
// It is multiplied by 2 because in a log case scenario the line would be as much above as
|
||||
// below. So the offset would = max_distance
|
||||
//
|
||||
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
|
||||
|
||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
|
||||
// function metadata per block
|
||||
+ 29 * (stats.num_vals / CHUNK_SIZE);
|
||||
let num_bits_uncompressed = 64 * stats.num_vals;
|
||||
num_bits as f32 / num_bits_uncompressed as f32
|
||||
}
|
||||
}
|
||||
|
||||
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
|
||||
if x < y {
|
||||
y - x
|
||||
} else {
|
||||
x - y
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_data_sets;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
|
||||
crate::tests::create_and_validate::<
|
||||
MultiLinearInterpolFastFieldSerializer,
|
||||
MultiLinearInterpolFastFieldReader,
|
||||
>(data, name)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression() {
|
||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate(&data, "simple monotonically large");
|
||||
assert!(actual_compression < 0.2);
|
||||
assert!(estimate < 0.20);
|
||||
assert!(estimate > 0.15);
|
||||
assert!(actual_compression > 0.01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = get_codec_test_data_sets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
create_and_validate(&data, name);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_simple() {
|
||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
||||
create_and_validate(&data, "simple monotonically");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn border_cases_1() {
|
||||
let data = (0..1024).collect::<Vec<_>>();
|
||||
create_and_validate(&data, "border case");
|
||||
}
|
||||
#[test]
|
||||
fn border_case_2() {
|
||||
let data = (0..1025).collect::<Vec<_>>();
|
||||
create_and_validate(&data, "border case");
|
||||
}
|
||||
#[test]
|
||||
fn rand() {
|
||||
for _ in 0..10 {
|
||||
let mut data = (5_000..20_000)
|
||||
.map(|_| rand::random::<u32>() as u64)
|
||||
.collect::<Vec<_>>();
|
||||
let _ = create_and_validate(&data, "random");
|
||||
data.reverse();
|
||||
create_and_validate(&data, "random");
|
||||
}
|
||||
}
|
||||
}
|
||||
295
fastfield_codecs/src/serialize.rs
Normal file
295
fastfield_codecs/src/serialize.rs
Normal file
@@ -0,0 +1,295 @@
|
||||
// Copyright (C) 2022 Quickwit, Inc.
|
||||
//
|
||||
// Quickwit is offered under the AGPL v3.0 and as commercial software.
|
||||
// For commercial licensing, contact us at hello@quickwit.io.
|
||||
//
|
||||
// AGPL:
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as
|
||||
// published by the Free Software Foundation, either version 3 of the
|
||||
// License, or (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::io;
|
||||
use std::num::NonZeroU64;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::{BinarySerializable, VInt};
|
||||
use log::warn;
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
use crate::bitpacked::BitpackedCodec;
|
||||
use crate::blockwise_linear::BlockwiseLinearCodec;
|
||||
use crate::compact_space::CompactSpaceCompressor;
|
||||
use crate::linear::LinearCodec;
|
||||
use crate::monotonic_mapping::{
|
||||
StrictlyMonotonicFn, StrictlyMonotonicMappingToInternal,
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval,
|
||||
};
|
||||
use crate::{
|
||||
monotonic_map_column, Column, FastFieldCodec, FastFieldCodecType, MonotonicallyMappableToU64,
|
||||
VecColumn, ALL_CODEC_TYPES,
|
||||
};
|
||||
|
||||
/// The normalized header gives some parameters after applying the following
|
||||
/// normalization of the vector:
|
||||
/// `val -> (val - min_value) / gcd`
|
||||
///
|
||||
/// By design, after normalization, `min_value = 0` and `gcd = 1`.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct NormalizedHeader {
|
||||
/// The number of values in the underlying column.
|
||||
pub num_vals: u64,
|
||||
/// The max value of the underlying column.
|
||||
pub max_value: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub(crate) struct Header {
|
||||
pub num_vals: u64,
|
||||
pub min_value: u64,
|
||||
pub max_value: u64,
|
||||
pub gcd: Option<NonZeroU64>,
|
||||
pub codec_type: FastFieldCodecType,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
pub fn normalized(self) -> NormalizedHeader {
|
||||
let gcd = self.gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
||||
let gcd_min_val_mapping =
|
||||
StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, self.min_value);
|
||||
|
||||
let max_value = gcd_min_val_mapping.mapping(self.max_value);
|
||||
NormalizedHeader {
|
||||
num_vals: self.num_vals,
|
||||
max_value,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn normalize_column<C: Column>(&self, from_column: C) -> impl Column {
|
||||
normalize_column(from_column, self.min_value, self.gcd)
|
||||
}
|
||||
|
||||
pub fn compute_header(
|
||||
column: impl Column<u64>,
|
||||
codecs: &[FastFieldCodecType],
|
||||
) -> Option<Header> {
|
||||
let num_vals = column.num_vals();
|
||||
let min_value = column.min_value();
|
||||
let max_value = column.max_value();
|
||||
let gcd = crate::gcd::find_gcd(column.iter().map(|val| val - min_value))
|
||||
.filter(|gcd| gcd.get() > 1u64);
|
||||
let normalized_column = normalize_column(column, min_value, gcd);
|
||||
let codec_type = detect_codec(normalized_column, codecs)?;
|
||||
Some(Header {
|
||||
num_vals,
|
||||
min_value,
|
||||
max_value,
|
||||
gcd,
|
||||
codec_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn normalize_column<C: Column>(
|
||||
from_column: C,
|
||||
min_value: u64,
|
||||
gcd: Option<NonZeroU64>,
|
||||
) -> impl Column {
|
||||
let gcd = gcd.map(|gcd| gcd.get()).unwrap_or(1);
|
||||
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(gcd, min_value);
|
||||
monotonic_map_column(from_column, mapping)
|
||||
}
|
||||
|
||||
impl BinarySerializable for Header {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
VInt(self.num_vals).serialize(writer)?;
|
||||
VInt(self.min_value).serialize(writer)?;
|
||||
VInt(self.max_value - self.min_value).serialize(writer)?;
|
||||
if let Some(gcd) = self.gcd {
|
||||
VInt(gcd.get()).serialize(writer)?;
|
||||
} else {
|
||||
VInt(0u64).serialize(writer)?;
|
||||
}
|
||||
self.codec_type.serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let num_vals = VInt::deserialize(reader)?.0;
|
||||
let min_value = VInt::deserialize(reader)?.0;
|
||||
let amplitude = VInt::deserialize(reader)?.0;
|
||||
let max_value = min_value + amplitude;
|
||||
let gcd_u64 = VInt::deserialize(reader)?.0;
|
||||
let codec_type = FastFieldCodecType::deserialize(reader)?;
|
||||
Ok(Header {
|
||||
num_vals,
|
||||
min_value,
|
||||
max_value,
|
||||
gcd: NonZeroU64::new(gcd_u64),
|
||||
codec_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Return estimated compression for given codec in the value range [0.0..1.0], where 1.0 means no
|
||||
/// compression.
|
||||
pub fn estimate<T: MonotonicallyMappableToU64>(
|
||||
typed_column: impl Column<T>,
|
||||
codec_type: FastFieldCodecType,
|
||||
) -> Option<f32> {
|
||||
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
|
||||
let min_value = column.min_value();
|
||||
let gcd = crate::gcd::find_gcd(column.iter().map(|val| val - min_value))
|
||||
.filter(|gcd| gcd.get() > 1u64);
|
||||
let mapping = StrictlyMonotonicMappingToInternalGCDBaseval::new(
|
||||
gcd.map(|gcd| gcd.get()).unwrap_or(1u64),
|
||||
min_value,
|
||||
);
|
||||
let normalized_column = monotonic_map_column(&column, mapping);
|
||||
match codec_type {
|
||||
FastFieldCodecType::Bitpacked => BitpackedCodec::estimate(&normalized_column),
|
||||
FastFieldCodecType::Linear => LinearCodec::estimate(&normalized_column),
|
||||
FastFieldCodecType::BlockwiseLinear => BlockwiseLinearCodec::estimate(&normalized_column),
|
||||
}
|
||||
}
|
||||
|
||||
/// Serializes u128 values with the compact space codec.
|
||||
pub fn serialize_u128<F: Fn() -> I, I: Iterator<Item = u128>>(
|
||||
iter_gen: F,
|
||||
num_vals: u64,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
// TODO write header, to later support more codecs
|
||||
let compressor = CompactSpaceCompressor::train_from(iter_gen(), num_vals);
|
||||
compressor.compress_into(iter_gen(), output).unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serializes the column with the codec with the best estimate on the data.
|
||||
pub fn serialize<T: MonotonicallyMappableToU64>(
|
||||
typed_column: impl Column<T>,
|
||||
output: &mut impl io::Write,
|
||||
codecs: &[FastFieldCodecType],
|
||||
) -> io::Result<()> {
|
||||
let column = monotonic_map_column(typed_column, StrictlyMonotonicMappingToInternal::<T>::new());
|
||||
let header = Header::compute_header(&column, codecs).ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!(
|
||||
"Data cannot be serialized with this list of codec. {:?}",
|
||||
codecs
|
||||
),
|
||||
)
|
||||
})?;
|
||||
header.serialize(output)?;
|
||||
let normalized_column = header.normalize_column(column);
|
||||
assert_eq!(normalized_column.min_value(), 0u64);
|
||||
serialize_given_codec(normalized_column, header.codec_type, output)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn detect_codec(
|
||||
column: impl Column<u64>,
|
||||
codecs: &[FastFieldCodecType],
|
||||
) -> Option<FastFieldCodecType> {
|
||||
let mut estimations = Vec::new();
|
||||
for &codec in codecs {
|
||||
let estimation_opt = match codec {
|
||||
FastFieldCodecType::Bitpacked => BitpackedCodec::estimate(&column),
|
||||
FastFieldCodecType::Linear => LinearCodec::estimate(&column),
|
||||
FastFieldCodecType::BlockwiseLinear => BlockwiseLinearCodec::estimate(&column),
|
||||
};
|
||||
if let Some(estimation) = estimation_opt {
|
||||
estimations.push((estimation, codec));
|
||||
}
|
||||
}
|
||||
if let Some(broken_estimation) = estimations.iter().find(|estimation| estimation.0.is_nan()) {
|
||||
warn!(
|
||||
"broken estimation for fast field codec {:?}",
|
||||
broken_estimation.1
|
||||
);
|
||||
}
|
||||
// removing nan values for codecs with broken calculations, and max values which disables
|
||||
// codecs
|
||||
estimations.retain(|estimation| !estimation.0.is_nan() && estimation.0 != f32::MAX);
|
||||
estimations.sort_by(|(score_left, _), (score_right, _)| score_left.total_cmp(score_right));
|
||||
Some(estimations.first()?.1)
|
||||
}
|
||||
|
||||
fn serialize_given_codec(
|
||||
column: impl Column<u64>,
|
||||
codec_type: FastFieldCodecType,
|
||||
output: &mut impl io::Write,
|
||||
) -> io::Result<()> {
|
||||
match codec_type {
|
||||
FastFieldCodecType::Bitpacked => {
|
||||
BitpackedCodec::serialize(&column, output)?;
|
||||
}
|
||||
FastFieldCodecType::Linear => {
|
||||
LinearCodec::serialize(&column, output)?;
|
||||
}
|
||||
FastFieldCodecType::BlockwiseLinear => {
|
||||
BlockwiseLinearCodec::serialize(&column, output)?;
|
||||
}
|
||||
}
|
||||
output.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to serialize a column (autodetect from all codecs) and then open it
|
||||
pub fn serialize_and_load<T: MonotonicallyMappableToU64 + Ord + Default>(
|
||||
column: &[T],
|
||||
) -> Arc<dyn Column<T>> {
|
||||
let mut buffer = Vec::new();
|
||||
super::serialize(VecColumn::from(&column), &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
super::open(OwnedBytes::new(buffer)).unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_serialize_deserialize() {
|
||||
let original = [1u64, 5u64, 10u64];
|
||||
let restored: Vec<u64> = serialize_and_load(&original[..]).iter().collect();
|
||||
assert_eq!(&restored, &original[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_bool_size_bitwidth_1() {
|
||||
let mut buffer = Vec::new();
|
||||
let col = VecColumn::from(&[false, true][..]);
|
||||
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
// 5 bytes of header, 1 byte of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 5 + 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_bool_bit_size_bitwidth_0() {
|
||||
let mut buffer = Vec::new();
|
||||
let col = VecColumn::from(&[true][..]);
|
||||
serialize(col, &mut buffer, &ALL_CODEC_TYPES).unwrap();
|
||||
// 5 bytes of header, 0 bytes of value, 7 bytes of padding.
|
||||
assert_eq!(buffer.len(), 5 + 7);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd() {
|
||||
let mut buffer = Vec::new();
|
||||
let vals: Vec<u64> = (0..80).map(|val| (val % 7) * 1_000u64).collect();
|
||||
let col = VecColumn::from(&vals[..]);
|
||||
serialize(col, &mut buffer, &[FastFieldCodecType::Bitpacked]).unwrap();
|
||||
// Values are stored over 3 bits.
|
||||
assert_eq!(buffer.len(), 7 + (3 * 80 / 8) + 7);
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||
name = "ownedbytes"
|
||||
version = "0.3.0"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
description = "Expose data as static slice"
|
||||
license = "MIT"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::{fmt, io, mem};
|
||||
use stable_deref_trait::StableDeref;
|
||||
|
||||
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
||||
/// this data as a static slice.
|
||||
/// this data as a slice.
|
||||
///
|
||||
/// The backing object is required to be `StableDeref`.
|
||||
#[derive(Clone)]
|
||||
@@ -21,7 +21,7 @@ impl OwnedBytes {
|
||||
OwnedBytes::new(&[][..])
|
||||
}
|
||||
|
||||
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
|
||||
/// Creates an `OwnedBytes` instance given a `StableDeref` object.
|
||||
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
|
||||
data_holder: T,
|
||||
) -> OwnedBytes {
|
||||
|
||||
@@ -9,9 +9,9 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
combine = {version="4", default-features=false, features=[] }
|
||||
once_cell = "1.7.2"
|
||||
regex ={ version = "1.5.4", default-features = false, features = ["std"] }
|
||||
regex ={ version = "1.5.4", default-features = false, features = ["std", "unicode"] }
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
|
||||
mod occur;
|
||||
mod query_grammar;
|
||||
mod user_input_ast;
|
||||
|
||||
@@ -2,11 +2,11 @@ use std::fmt;
|
||||
use std::fmt::Write;
|
||||
|
||||
/// Defines whether a term in a query must be present,
|
||||
/// should be present or must be not present.
|
||||
/// should be present or must not be present.
|
||||
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
|
||||
pub enum Occur {
|
||||
/// For a given document to be considered for scoring,
|
||||
/// at least one of the document with the Should or the Must
|
||||
/// at least one of the terms with the Should or the Must
|
||||
/// Occur constraint must be within the document.
|
||||
Should,
|
||||
/// Document without the term are excluded from the search.
|
||||
|
||||
@@ -16,14 +16,14 @@ use crate::Occur;
|
||||
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to
|
||||
// special characters.
|
||||
const SPECIAL_CHARS: &[char] = &[
|
||||
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '~', '!', '\\', '*', ' ',
|
||||
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '!', '\\', '*', ' ',
|
||||
];
|
||||
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|\~|!|\\|\*|\s)"#;
|
||||
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|!|\\|\*|\s)"#;
|
||||
|
||||
/// Parses a field_name
|
||||
/// A field name must have at least one character and be followed by a colon.
|
||||
/// All characters are allowed including special characters `SPECIAL_CHARS`, but these
|
||||
/// need to be escaped with a backslack character '\'.
|
||||
/// need to be escaped with a backslash character '\'.
|
||||
fn field_name<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
static ESCAPED_SPECIAL_CHARS_RE: Lazy<Regex> =
|
||||
Lazy::new(|| Regex::new(ESCAPED_SPECIAL_CHARS_PATTERN).unwrap());
|
||||
@@ -67,8 +67,8 @@ fn word<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
/// 2021-04-13T19:46:26.266051969+00:00
|
||||
///
|
||||
/// NOTE: also accepts 999999-99-99T99:99:99.266051969+99:99
|
||||
/// We delegate rejecting such invalid dates to the logical AST compuation code
|
||||
/// which invokes time::OffsetDateTime::parse(..., &Rfc3339) on the value to actually parse
|
||||
/// We delegate rejecting such invalid dates to the logical AST computation code
|
||||
/// which invokes `time::OffsetDateTime::parse(..., &Rfc3339)` on the value to actually parse
|
||||
/// it (instead of merely extracting the datetime value as string as done here).
|
||||
fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
let two_digits = || recognize::<String, _, _>((digit(), digit()));
|
||||
@@ -120,22 +120,36 @@ fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
|
||||
fn term_val<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
|
||||
phrase.or(word())
|
||||
negative_number().or(phrase.or(word()))
|
||||
}
|
||||
|
||||
fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> {
|
||||
let term_val_with_field = negative_number().or(term_val());
|
||||
(field_name(), term_val_with_field).map(|(field_name, phrase)| UserInputLiteral {
|
||||
(field_name(), term_val(), slop_val()).map(|(field_name, phrase, slop)| UserInputLiteral {
|
||||
field_name: Some(field_name),
|
||||
phrase,
|
||||
slop,
|
||||
})
|
||||
}
|
||||
|
||||
fn slop_val<'a>() -> impl Parser<&'a str, Output = u32> {
|
||||
let slop =
|
||||
(char('~'), many1(digit())).and_then(|(_, slop): (_, String)| match slop.parse::<u32>() {
|
||||
Ok(d) => Ok(d),
|
||||
_ => Err(StringStreamError::UnexpectedParse),
|
||||
});
|
||||
optional(slop).map(|slop| match slop {
|
||||
Some(d) => d,
|
||||
_ => 0,
|
||||
})
|
||||
}
|
||||
|
||||
fn literal<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||
let term_default_field = term_val().map(|phrase| UserInputLiteral {
|
||||
let term_default_field = (term_val(), slop_val()).map(|(phrase, slop)| UserInputLiteral {
|
||||
field_name: None,
|
||||
phrase,
|
||||
slop,
|
||||
});
|
||||
|
||||
attempt(term_query())
|
||||
.or(term_default_field)
|
||||
.map(UserInputLeaf::from)
|
||||
@@ -285,7 +299,7 @@ fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||
|
||||
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
|
||||
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
||||
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
|
||||
Some(boost) if (boost - 1.0).abs() > f64::EPSILON => {
|
||||
UserInputAst::Boost(Box::new(leaf), boost)
|
||||
}
|
||||
_ => leaf,
|
||||
@@ -522,18 +536,10 @@ mod test {
|
||||
super::field_name().parse(".my.field.name:a"),
|
||||
Ok((".my.field.name".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse(r#"my\ field:a"#),
|
||||
Ok(("my field".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse(r#"にんじん:a"#),
|
||||
Ok(("にんじん".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse("my\\ field\\ name:a"),
|
||||
Ok(("my field name".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse(r#"my\field:a"#),
|
||||
Ok((r#"my\field"#.to_string(), "a"))
|
||||
@@ -562,6 +568,17 @@ mod test {
|
||||
super::field_name().parse("_my_field:a"),
|
||||
Ok(("_my_field".to_string(), "a"))
|
||||
);
|
||||
assert_eq!(
|
||||
super::field_name().parse("~my~field:a"),
|
||||
Ok(("~my~field".to_string(), "a"))
|
||||
);
|
||||
for special_char in SPECIAL_CHARS.iter() {
|
||||
let query = &format!("\\{special_char}my\\{special_char}field:a");
|
||||
assert_eq!(
|
||||
super::field_name().parse(query),
|
||||
Ok((format!("{special_char}my{special_char}field"), "a"))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -714,4 +731,22 @@ mod test {
|
||||
);
|
||||
test_is_parse_err("abc + ");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slop() {
|
||||
assert!(parse_to_ast().parse("\"a b\"~").is_err());
|
||||
assert!(parse_to_ast().parse("foo:\"a b\"~").is_err());
|
||||
assert!(parse_to_ast().parse("\"a b\"~a").is_err());
|
||||
assert!(parse_to_ast().parse("\"a b\"~100000000000000000").is_err());
|
||||
|
||||
test_parse_query_to_ast_helper("\"a b\"^2~4", "(*(\"a b\")^2 *\"~4\")");
|
||||
test_parse_query_to_ast_helper("\"~Document\"", "\"~Document\"");
|
||||
test_parse_query_to_ast_helper("~Document", "\"~Document\"");
|
||||
test_parse_query_to_ast_helper("a~2", "\"a~2\"");
|
||||
test_parse_query_to_ast_helper("\"a b\"~0", "\"a b\"");
|
||||
test_parse_query_to_ast_helper("\"a b\"~1", "\"a b\"~1");
|
||||
test_parse_query_to_ast_helper("\"a b\"~3", "\"a b\"~3");
|
||||
test_parse_query_to_ast_helper("foo:\"a b\"~300", "\"foo\":\"a b\"~300");
|
||||
test_parse_query_to_ast_helper("\"a b\"~300^2", "(\"a b\"~300)^2");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,14 +40,19 @@ impl Debug for UserInputLeaf {
|
||||
pub struct UserInputLiteral {
|
||||
pub field_name: Option<String>,
|
||||
pub phrase: String,
|
||||
pub slop: u32,
|
||||
}
|
||||
|
||||
impl fmt::Debug for UserInputLiteral {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match self.field_name {
|
||||
Some(ref field_name) => write!(formatter, "\"{}\":\"{}\"", field_name, self.phrase),
|
||||
None => write!(formatter, "\"{}\"", self.phrase),
|
||||
if let Some(ref field) = self.field_name {
|
||||
write!(formatter, "\"{}\":", field)?;
|
||||
}
|
||||
write!(formatter, "\"{}\"", self.phrase)?;
|
||||
if self.slop > 0 {
|
||||
write!(formatter, "~{}", self.slop)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ Tantivy's aggregations have been designed to mimic the
|
||||
The code is organized in submodules:
|
||||
|
||||
## bucket
|
||||
Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggegations.
|
||||
Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggregations.
|
||||
|
||||
## metric
|
||||
Contains all metric aggregations, like average aggregation. Metric aggregations do not have sub aggregations.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
//! Contains the aggregation request tree. Used to build an
|
||||
//! [AggregationCollector](super::AggregationCollector).
|
||||
//! [`AggregationCollector`](super::AggregationCollector).
|
||||
//!
|
||||
//! [Aggregations] is the top level entry point to create a request, which is a `HashMap<String,
|
||||
//! [`Aggregations`] is the top level entry point to create a request, which is a `HashMap<String,
|
||||
//! Aggregation>`.
|
||||
//!
|
||||
//! Requests are compatible with the json format of elasticsearch.
|
||||
@@ -20,6 +20,7 @@
|
||||
//! bucket_agg: BucketAggregationType::Range(RangeAggregation{
|
||||
//! field: "score".to_string(),
|
||||
//! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
//! keyed: false,
|
||||
//! }),
|
||||
//! sub_aggregation: Default::default(),
|
||||
//! }),
|
||||
@@ -53,8 +54,8 @@ use super::bucket::{HistogramAggregation, TermsAggregation};
|
||||
use super::metric::{AverageAggregation, StatsAggregation};
|
||||
use super::VecWithNames;
|
||||
|
||||
/// The top-level aggregation request structure, which contains [Aggregation] and their user defined
|
||||
/// names. It is also used in [buckets](BucketAggregation) to define sub-aggregations.
|
||||
/// The top-level aggregation request structure, which contains [`Aggregation`] and their user
|
||||
/// defined names. It is also used in [buckets](BucketAggregation) to define sub-aggregations.
|
||||
///
|
||||
/// The key is the user defined name of the aggregation.
|
||||
pub type Aggregations = HashMap<String, Aggregation>;
|
||||
@@ -100,6 +101,12 @@ pub(crate) struct BucketAggregationInternal {
|
||||
}
|
||||
|
||||
impl BucketAggregationInternal {
|
||||
pub(crate) fn as_range(&self) -> Option<&RangeAggregation> {
|
||||
match &self.bucket_agg {
|
||||
BucketAggregationType::Range(range) => Some(range),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub(crate) fn as_histogram(&self) -> Option<&HistogramAggregation> {
|
||||
match &self.bucket_agg {
|
||||
BucketAggregationType::Histogram(histogram) => Some(histogram),
|
||||
@@ -132,15 +139,15 @@ pub fn get_fast_field_names(aggs: &Aggregations) -> HashSet<String> {
|
||||
fast_field_names
|
||||
}
|
||||
|
||||
/// Aggregation request of [BucketAggregation] or [MetricAggregation].
|
||||
/// Aggregation request of [`BucketAggregation`] or [`MetricAggregation`].
|
||||
///
|
||||
/// An aggregation is either a bucket or a metric.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Aggregation {
|
||||
/// Bucket aggregation, see [BucketAggregation] for details.
|
||||
/// Bucket aggregation, see [`BucketAggregation`] for details.
|
||||
Bucket(BucketAggregation),
|
||||
/// Metric aggregation, see [MetricAggregation] for details.
|
||||
/// Metric aggregation, see [`MetricAggregation`] for details.
|
||||
Metric(MetricAggregation),
|
||||
}
|
||||
|
||||
@@ -264,6 +271,7 @@ mod tests {
|
||||
(7f64..20f64).into(),
|
||||
(20f64..f64::MAX).into(),
|
||||
],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -290,7 +298,8 @@ mod tests {
|
||||
{
|
||||
"from": 20.0
|
||||
}
|
||||
]
|
||||
],
|
||||
"keyed": true
|
||||
}
|
||||
}
|
||||
}"#;
|
||||
@@ -312,6 +321,7 @@ mod tests {
|
||||
(7f64..20f64).into(),
|
||||
(20f64..f64::MAX).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -337,6 +347,7 @@ mod tests {
|
||||
(7f64..20f64).into(),
|
||||
(20f64..f64::MAX).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: agg_req2,
|
||||
}),
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
//! This will enhance the request tree with access to the fastfield and metadata.
|
||||
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
||||
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
||||
use super::metric::{AverageAggregation, StatsAggregation};
|
||||
use super::segment_agg_result::BucketCount;
|
||||
use super::VecWithNames;
|
||||
use crate::fastfield::{
|
||||
type_and_cardinality, DynamicFastFieldReader, FastType, MultiValuedFastFieldReader,
|
||||
};
|
||||
use crate::fastfield::{type_and_cardinality, FastType, MultiValuedFastFieldReader};
|
||||
use crate::schema::{Cardinality, Type};
|
||||
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
|
||||
|
||||
@@ -34,10 +37,16 @@ impl AggregationsWithAccessor {
|
||||
#[derive(Clone)]
|
||||
pub(crate) enum FastFieldAccessor {
|
||||
Multi(MultiValuedFastFieldReader<u64>),
|
||||
Single(DynamicFastFieldReader<u64>),
|
||||
Single(Arc<dyn Column<u64>>),
|
||||
}
|
||||
impl FastFieldAccessor {
|
||||
pub fn as_single(&self) -> Option<&DynamicFastFieldReader<u64>> {
|
||||
pub fn as_single(&self) -> Option<&dyn Column<u64>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(_) => None,
|
||||
FastFieldAccessor::Single(reader) => Some(&**reader),
|
||||
}
|
||||
}
|
||||
pub fn into_single(self) -> Option<Arc<dyn Column<u64>>> {
|
||||
match self {
|
||||
FastFieldAccessor::Multi(_) => None,
|
||||
FastFieldAccessor::Single(reader) => Some(reader),
|
||||
@@ -60,6 +69,7 @@ pub struct BucketAggregationWithAccessor {
|
||||
pub(crate) field_type: Type,
|
||||
pub(crate) bucket_agg: BucketAggregationType,
|
||||
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
||||
pub(crate) bucket_count: BucketCount,
|
||||
}
|
||||
|
||||
impl BucketAggregationWithAccessor {
|
||||
@@ -67,12 +77,13 @@ impl BucketAggregationWithAccessor {
|
||||
bucket: &BucketAggregationType,
|
||||
sub_aggregation: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
bucket_count: Rc<AtomicU32>,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<BucketAggregationWithAccessor> {
|
||||
let mut inverted_index = None;
|
||||
let (accessor, field_type) = match &bucket {
|
||||
BucketAggregationType::Range(RangeAggregation {
|
||||
field: field_name,
|
||||
ranges: _,
|
||||
field: field_name, ..
|
||||
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
|
||||
BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: field_name, ..
|
||||
@@ -92,9 +103,18 @@ impl BucketAggregationWithAccessor {
|
||||
Ok(BucketAggregationWithAccessor {
|
||||
accessor,
|
||||
field_type,
|
||||
sub_aggregation: get_aggs_with_accessor_and_validate(&sub_aggregation, reader)?,
|
||||
sub_aggregation: get_aggs_with_accessor_and_validate(
|
||||
&sub_aggregation,
|
||||
reader,
|
||||
bucket_count.clone(),
|
||||
max_bucket_count,
|
||||
)?,
|
||||
bucket_agg: bucket.clone(),
|
||||
inverted_index,
|
||||
bucket_count: BucketCount {
|
||||
bucket_count,
|
||||
max_bucket_count,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -104,7 +124,7 @@ impl BucketAggregationWithAccessor {
|
||||
pub struct MetricAggregationWithAccessor {
|
||||
pub metric: MetricAggregation,
|
||||
pub field_type: Type,
|
||||
pub accessor: DynamicFastFieldReader<u64>,
|
||||
pub accessor: Arc<dyn Column>,
|
||||
}
|
||||
|
||||
impl MetricAggregationWithAccessor {
|
||||
@@ -120,9 +140,8 @@ impl MetricAggregationWithAccessor {
|
||||
|
||||
Ok(MetricAggregationWithAccessor {
|
||||
accessor: accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinality")
|
||||
.clone(),
|
||||
.into_single()
|
||||
.expect("unexpected fast field cardinality"),
|
||||
field_type,
|
||||
metric: metric.clone(),
|
||||
})
|
||||
@@ -134,6 +153,8 @@ impl MetricAggregationWithAccessor {
|
||||
pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||
aggs: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
bucket_count: Rc<AtomicU32>,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<AggregationsWithAccessor> {
|
||||
let mut metrics = vec![];
|
||||
let mut buckets = vec![];
|
||||
@@ -145,6 +166,8 @@ pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||
&bucket.bucket_agg,
|
||||
&bucket.sub_aggregation,
|
||||
reader,
|
||||
Rc::clone(&bucket_count),
|
||||
max_bucket_count,
|
||||
)?,
|
||||
)),
|
||||
Aggregation::Metric(metric) => metrics.push((
|
||||
|
||||
@@ -4,21 +4,16 @@
|
||||
//! intermediate average results, which is the sum and the number of values. The actual average is
|
||||
//! calculated on the step from intermediate to final aggregation result tree.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::agg_req::{
|
||||
Aggregations, AggregationsInternal, BucketAggregationInternal, MetricAggregation,
|
||||
};
|
||||
use super::bucket::{intermediate_buckets_to_final_buckets, GetDocCount};
|
||||
use super::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||
IntermediateMetricResult, IntermediateRangeBucketEntry,
|
||||
};
|
||||
use super::agg_req::BucketAggregationInternal;
|
||||
use super::bucket::GetDocCount;
|
||||
use super::intermediate_agg_result::{IntermediateBucketResult, IntermediateMetricResult};
|
||||
use super::metric::{SingleMetricResult, Stats};
|
||||
use super::{Key, VecWithNames};
|
||||
use super::Key;
|
||||
use crate::TantivyError;
|
||||
|
||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@@ -41,98 +36,6 @@ impl AggregationResults {
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert and intermediate result and its aggregation request to the final result
|
||||
pub fn from_intermediate_and_req(
|
||||
results: IntermediateAggregationResults,
|
||||
agg: Aggregations,
|
||||
) -> crate::Result<Self> {
|
||||
AggregationResults::from_intermediate_and_req_internal(results, &(agg.into()))
|
||||
}
|
||||
|
||||
/// Convert and intermediate result and its aggregation request to the final result
|
||||
///
|
||||
/// Internal function, CollectorAggregations is used instead Aggregations, which is optimized
|
||||
/// for internal processing, by splitting metric and buckets into seperate groups.
|
||||
pub(crate) fn from_intermediate_and_req_internal(
|
||||
intermediate_results: IntermediateAggregationResults,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<Self> {
|
||||
// Important assumption:
|
||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||
// request
|
||||
let mut results: HashMap<String, AggregationResult> = HashMap::new();
|
||||
|
||||
if let Some(buckets) = intermediate_results.buckets {
|
||||
add_coverted_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||
} else {
|
||||
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||
// format is constant
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
||||
};
|
||||
|
||||
if let Some(metrics) = intermediate_results.metrics {
|
||||
add_converted_final_metrics_to_result(&mut results, metrics);
|
||||
} else {
|
||||
// When there are no metrics, we create empty metric results, so that the serialized
|
||||
// json format is constant
|
||||
add_empty_final_metrics_to_result(&mut results, &req.metrics)?;
|
||||
}
|
||||
Ok(Self(results))
|
||||
}
|
||||
}
|
||||
|
||||
fn add_converted_final_metrics_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
metrics: VecWithNames<IntermediateMetricResult>,
|
||||
) {
|
||||
results.extend(
|
||||
metrics
|
||||
.into_iter()
|
||||
.map(|(key, metric)| (key, AggregationResult::MetricResult(metric.into()))),
|
||||
);
|
||||
}
|
||||
|
||||
fn add_empty_final_metrics_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_metrics: &VecWithNames<MetricAggregation>,
|
||||
) -> crate::Result<()> {
|
||||
results.extend(req_metrics.iter().map(|(key, req)| {
|
||||
let empty_bucket = IntermediateMetricResult::empty_from_req(req);
|
||||
(
|
||||
key.to_string(),
|
||||
AggregationResult::MetricResult(empty_bucket.into()),
|
||||
)
|
||||
}));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_empty_final_buckets_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
let requested_buckets = req_buckets.iter();
|
||||
for (key, req) in requested_buckets {
|
||||
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
||||
results.insert(key.to_string(), empty_bucket);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_coverted_final_buckets_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
buckets: VecWithNames<IntermediateBucketResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
assert_eq!(buckets.len(), req_buckets.len());
|
||||
|
||||
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||
for ((key, bucket), req) in buckets_with_request {
|
||||
let result =
|
||||
AggregationResult::BucketResult(BucketResult::from_intermediate_and_req(bucket, req)?);
|
||||
results.insert(key, result);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@@ -154,7 +57,7 @@ impl AggregationResult {
|
||||
match self {
|
||||
AggregationResult::BucketResult(_bucket) => Err(TantivyError::InternalError(
|
||||
"Tried to retrieve value from bucket aggregation. This is not supported and \
|
||||
should not happen during collection, but should be catched during validation"
|
||||
should not happen during collection phase, but should be caught during validation"
|
||||
.to_string(),
|
||||
)),
|
||||
AggregationResult::MetricResult(metric) => metric.get_value(agg_property),
|
||||
@@ -201,7 +104,7 @@ pub enum BucketResult {
|
||||
/// sub_aggregations.
|
||||
Range {
|
||||
/// The range buckets sorted by range.
|
||||
buckets: Vec<RangeBucketEntry>,
|
||||
buckets: BucketEntries<RangeBucketEntry>,
|
||||
},
|
||||
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||
/// sub_aggregations.
|
||||
@@ -210,14 +113,14 @@ pub enum BucketResult {
|
||||
///
|
||||
/// If there are holes depends on the request, if min_doc_count is 0, then there are no
|
||||
/// holes between the first and last bucket.
|
||||
/// See [HistogramAggregation](super::bucket::HistogramAggregation)
|
||||
buckets: Vec<BucketEntry>,
|
||||
/// See [`HistogramAggregation`](super::bucket::HistogramAggregation)
|
||||
buckets: BucketEntries<BucketEntry>,
|
||||
},
|
||||
/// This is the term result
|
||||
Terms {
|
||||
/// The buckets.
|
||||
///
|
||||
/// See [TermsAggregation](super::bucket::TermsAggregation)
|
||||
/// See [`TermsAggregation`](super::bucket::TermsAggregation)
|
||||
buckets: Vec<BucketEntry>,
|
||||
/// The number of documents that didn’t make it into to TOP N due to shard_size or size
|
||||
sum_other_doc_count: u64,
|
||||
@@ -230,49 +133,19 @@ pub enum BucketResult {
|
||||
impl BucketResult {
|
||||
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
|
||||
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
||||
BucketResult::from_intermediate_and_req(empty_bucket, req)
|
||||
empty_bucket.into_final_bucket_result(req)
|
||||
}
|
||||
}
|
||||
|
||||
fn from_intermediate_and_req(
|
||||
bucket_result: IntermediateBucketResult,
|
||||
req: &BucketAggregationInternal,
|
||||
) -> crate::Result<Self> {
|
||||
match bucket_result {
|
||||
IntermediateBucketResult::Range(range_res) => {
|
||||
let mut buckets: Vec<RangeBucketEntry> = range_res
|
||||
.buckets
|
||||
.into_iter()
|
||||
.map(|(_, bucket)| {
|
||||
RangeBucketEntry::from_intermediate_and_req(bucket, &req.sub_aggregation)
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
buckets.sort_by(|left, right| {
|
||||
// TODO use total_cmp next stable rust release
|
||||
left.from
|
||||
.unwrap_or(f64::MIN)
|
||||
.partial_cmp(&right.from.unwrap_or(f64::MIN))
|
||||
.unwrap_or(Ordering::Equal)
|
||||
});
|
||||
Ok(BucketResult::Range { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Histogram { buckets } => {
|
||||
let buckets = intermediate_buckets_to_final_buckets(
|
||||
buckets,
|
||||
req.as_histogram()
|
||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||
&req.sub_aggregation,
|
||||
)?;
|
||||
|
||||
Ok(BucketResult::Histogram { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
|
||||
req.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
&req.sub_aggregation,
|
||||
),
|
||||
}
|
||||
}
|
||||
/// This is the wrapper of buckets entries, which can be vector or hashmap
|
||||
/// depending on if it's keyed or not.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum BucketEntries<T> {
|
||||
/// Vector format bucket entries
|
||||
Vec(Vec<T>),
|
||||
/// HashMap format bucket entries
|
||||
HashMap(FnvHashMap<String, T>),
|
||||
}
|
||||
|
||||
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
||||
@@ -311,22 +184,6 @@ pub struct BucketEntry {
|
||||
/// Sub-aggregations in this bucket.
|
||||
pub sub_aggregation: AggregationResults,
|
||||
}
|
||||
|
||||
impl BucketEntry {
|
||||
pub(crate) fn from_intermediate_and_req(
|
||||
entry: IntermediateHistogramBucketEntry,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<Self> {
|
||||
Ok(BucketEntry {
|
||||
key: Key::F64(entry.key),
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
|
||||
entry.sub_aggregation,
|
||||
req,
|
||||
)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
impl GetDocCount for &BucketEntry {
|
||||
fn doc_count(&self) -> u64 {
|
||||
self.doc_count
|
||||
@@ -377,28 +234,10 @@ pub struct RangeBucketEntry {
|
||||
#[serde(flatten)]
|
||||
/// sub-aggregations in this bucket.
|
||||
pub sub_aggregation: AggregationResults,
|
||||
/// The from range of the bucket. Equals f64::MIN when None.
|
||||
/// The from range of the bucket. Equals `f64::MIN` when `None`.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub from: Option<f64>,
|
||||
/// The to range of the bucket. Equals f64::MAX when None.
|
||||
/// The to range of the bucket. Equals `f64::MAX` when `None`.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
impl RangeBucketEntry {
|
||||
fn from_intermediate_and_req(
|
||||
entry: IntermediateRangeBucketEntry,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<Self> {
|
||||
Ok(RangeBucketEntry {
|
||||
key: entry.key,
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
|
||||
entry.sub_aggregation,
|
||||
req,
|
||||
)?,
|
||||
to: entry.to,
|
||||
from: entry.from,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::Display;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -14,7 +15,6 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
@@ -37,19 +37,17 @@ use crate::{DocId, TantivyError};
|
||||
/// [hard_bounds](HistogramAggregation::hard_bounds).
|
||||
///
|
||||
/// # Result
|
||||
/// Result type is [BucketResult](crate::aggregation::agg_result::BucketResult) with
|
||||
/// [BucketEntry](crate::aggregation::agg_result::BucketEntry) on the
|
||||
/// AggregationCollector.
|
||||
/// Result type is [`BucketResult`](crate::aggregation::agg_result::BucketResult) with
|
||||
/// [`BucketEntry`](crate::aggregation::agg_result::BucketEntry) on the
|
||||
/// `AggregationCollector`.
|
||||
///
|
||||
/// Result type is
|
||||
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketResult] with
|
||||
/// [crate::aggregation::intermediate_agg_result::IntermediateHistogramBucketEntry] on the
|
||||
/// DistributedAggregationCollector.
|
||||
/// [`IntermediateBucketResult`](crate::aggregation::intermediate_agg_result::IntermediateBucketResult) with
|
||||
/// [`IntermediateHistogramBucketEntry`](crate::aggregation::intermediate_agg_result::IntermediateHistogramBucketEntry) on the
|
||||
/// `DistributedAggregationCollector`.
|
||||
///
|
||||
/// # Limitations/Compatibility
|
||||
///
|
||||
/// The keyed parameter (elasticsearch) is not yet supported.
|
||||
///
|
||||
/// # JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
@@ -63,7 +61,7 @@ use crate::{DocId, TantivyError};
|
||||
/// ```
|
||||
///
|
||||
/// Response
|
||||
/// See [BucketEntry](crate::aggregation::agg_result::BucketEntry)
|
||||
/// See [`BucketEntry`](crate::aggregation::agg_result::BucketEntry)
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct HistogramAggregation {
|
||||
@@ -72,7 +70,7 @@ pub struct HistogramAggregation {
|
||||
/// The interval to chunk your data range. Each bucket spans a value range of [0..interval).
|
||||
/// Must be a positive value.
|
||||
pub interval: f64,
|
||||
/// Intervals implicitely defines an absolute grid of buckets `[interval * k, interval * (k +
|
||||
/// Intervals implicitly defines an absolute grid of buckets `[interval * k, interval * (k +
|
||||
/// 1))`.
|
||||
///
|
||||
/// Offset makes it possible to shift this grid into
|
||||
@@ -117,6 +115,9 @@ pub struct HistogramAggregation {
|
||||
/// Cannot be set in conjunction with min_doc_count > 0, since the empty buckets from extended
|
||||
/// bounds would not be returned.
|
||||
pub extended_bounds: Option<HistogramBounds>,
|
||||
/// Whether to return the buckets as a hash map
|
||||
#[serde(default)]
|
||||
pub keyed: bool,
|
||||
}
|
||||
|
||||
impl HistogramAggregation {
|
||||
@@ -250,6 +251,11 @@ impl SegmentHistogramCollector {
|
||||
);
|
||||
};
|
||||
|
||||
agg_with_accessor
|
||||
.bucket_count
|
||||
.add_count(buckets.len() as u32);
|
||||
agg_with_accessor.bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(IntermediateBucketResult::Histogram { buckets })
|
||||
}
|
||||
|
||||
@@ -257,7 +263,7 @@ impl SegmentHistogramCollector {
|
||||
req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
field_type: Type,
|
||||
accessor: &DynamicFastFieldReader<u64>,
|
||||
accessor: &dyn Column<u64>,
|
||||
) -> crate::Result<Self> {
|
||||
req.validate()?;
|
||||
let min = f64_from_fastfield_u64(accessor.min_value(), &field_type);
|
||||
@@ -311,7 +317,7 @@ impl SegmentHistogramCollector {
|
||||
doc: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let bounds = self.bounds;
|
||||
let interval = self.interval;
|
||||
let offset = self.offset;
|
||||
@@ -325,10 +331,10 @@ impl SegmentHistogramCollector {
|
||||
.expect("unexpected fast field cardinatility");
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val0 = self.f64_from_fastfield_u64(accessor.get(docs[0]));
|
||||
let val1 = self.f64_from_fastfield_u64(accessor.get(docs[1]));
|
||||
let val2 = self.f64_from_fastfield_u64(accessor.get(docs[2]));
|
||||
let val3 = self.f64_from_fastfield_u64(accessor.get(docs[3]));
|
||||
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0] as u64));
|
||||
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1] as u64));
|
||||
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2] as u64));
|
||||
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3] as u64));
|
||||
|
||||
let bucket_pos0 = get_bucket_num(val0);
|
||||
let bucket_pos1 = get_bucket_num(val1);
|
||||
@@ -341,31 +347,31 @@ impl SegmentHistogramCollector {
|
||||
bucket_pos0,
|
||||
docs[0],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
);
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val1,
|
||||
&bounds,
|
||||
bucket_pos1,
|
||||
docs[1],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
);
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val2,
|
||||
&bounds,
|
||||
bucket_pos2,
|
||||
docs[2],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
);
|
||||
)?;
|
||||
self.increment_bucket_if_in_bounds(
|
||||
val3,
|
||||
&bounds,
|
||||
bucket_pos3,
|
||||
docs[3],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
);
|
||||
)?;
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = f64_from_fastfield_u64(accessor.get(*doc), &self.field_type);
|
||||
for &doc in iter.remainder() {
|
||||
let val = f64_from_fastfield_u64(accessor.get_val(doc as u64), &self.field_type);
|
||||
if !bounds.contains(val) {
|
||||
continue;
|
||||
}
|
||||
@@ -376,16 +382,17 @@ impl SegmentHistogramCollector {
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset) as f64
|
||||
);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {
|
||||
for sub_aggregation in sub_aggregations {
|
||||
sub_aggregation
|
||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush);
|
||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -396,15 +403,16 @@ impl SegmentHistogramCollector {
|
||||
bucket_pos: usize,
|
||||
doc: DocId,
|
||||
bucket_with_accessor: &AggregationsWithAccessor,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
if bounds.contains(val) {
|
||||
debug_assert_eq!(
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset) as f64
|
||||
);
|
||||
|
||||
self.increment_bucket(bucket_pos, doc, bucket_with_accessor);
|
||||
self.increment_bucket(bucket_pos, doc, bucket_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -413,12 +421,13 @@ impl SegmentHistogramCollector {
|
||||
bucket_pos: usize,
|
||||
doc: DocId,
|
||||
bucket_with_accessor: &AggregationsWithAccessor,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let bucket = &mut self.buckets[bucket_pos];
|
||||
bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation) = self.sub_aggregations.as_mut() {
|
||||
(&mut sub_aggregation[bucket_pos]).collect(doc, bucket_with_accessor);
|
||||
sub_aggregation[bucket_pos].collect(doc, bucket_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn f64_from_fastfield_u64(&self, val: u64) -> f64 {
|
||||
@@ -443,7 +452,7 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsInternal,
|
||||
) -> crate::Result<Vec<BucketEntry>> {
|
||||
// Generate the the full list of buckets without gaps.
|
||||
// Generate the full list of buckets without gaps.
|
||||
//
|
||||
// The bounds are the min max from the current buckets, optionally extended by
|
||||
// extended_bounds from the request
|
||||
@@ -482,14 +491,12 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
sub_aggregation: empty_sub_aggregation.clone(),
|
||||
},
|
||||
})
|
||||
.map(|intermediate_bucket| {
|
||||
BucketEntry::from_intermediate_and_req(intermediate_bucket, sub_aggregation)
|
||||
})
|
||||
.map(|intermediate_bucket| intermediate_bucket.into_final_bucket_entry(sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()
|
||||
}
|
||||
|
||||
// Convert to BucketEntry
|
||||
pub(crate) fn intermediate_buckets_to_final_buckets(
|
||||
pub(crate) fn intermediate_histogram_buckets_to_final_buckets(
|
||||
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsInternal,
|
||||
@@ -503,15 +510,15 @@ pub(crate) fn intermediate_buckets_to_final_buckets(
|
||||
} else {
|
||||
buckets
|
||||
.into_iter()
|
||||
.filter(|bucket| bucket.doc_count >= histogram_req.min_doc_count())
|
||||
.map(|bucket| BucketEntry::from_intermediate_and_req(bucket, sub_aggregation))
|
||||
.filter(|histogram_bucket| histogram_bucket.doc_count >= histogram_req.min_doc_count())
|
||||
.map(|histogram_bucket| histogram_bucket.into_final_bucket_entry(sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Applies req extended_bounds/hard_bounds on the min_max value
|
||||
///
|
||||
/// May return (f64::MAX, f64::MIN), if there is no range.
|
||||
/// May return `(f64::MAX, f64::MIN)`, if there is no range.
|
||||
fn get_req_min_max(req: &HistogramAggregation, min_max: Option<(f64, f64)>) -> (f64, f64) {
|
||||
let (mut min, mut max) = min_max.unwrap_or((f64::MAX, f64::MIN));
|
||||
|
||||
@@ -546,7 +553,7 @@ pub(crate) fn generate_buckets_with_opt_minmax(
|
||||
let offset = req.offset.unwrap_or(0.0);
|
||||
let first_bucket_num = get_bucket_num_f64(min, req.interval, offset) as i64;
|
||||
let last_bucket_num = get_bucket_num_f64(max, req.interval, offset) as i64;
|
||||
let mut buckets = vec![];
|
||||
let mut buckets = Vec::with_capacity((first_bucket_num..=last_bucket_num).count());
|
||||
for bucket_pos in first_bucket_num..=last_bucket_num {
|
||||
let bucket_key = bucket_pos as f64 * req.interval + offset;
|
||||
buckets.push(bucket_key);
|
||||
@@ -1389,4 +1396,46 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn histogram_keyed_buckets_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"histogram".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Histogram(HistogramAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
interval: 50.0,
|
||||
keyed: true,
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request(agg_req, &index)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"histogram": {
|
||||
"buckets": {
|
||||
"0": {
|
||||
"key": 0.0,
|
||||
"doc_count": 50
|
||||
},
|
||||
"50": {
|
||||
"key": 50.0,
|
||||
"doc_count": 50
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
//! Module for all bucket aggregations.
|
||||
//!
|
||||
//! BucketAggregations create buckets of documents
|
||||
//! [BucketAggregation](super::agg_req::BucketAggregation).
|
||||
//! [`BucketAggregation`](super::agg_req::BucketAggregation).
|
||||
//!
|
||||
//! Results of final buckets are [BucketResult](super::agg_result::BucketResult).
|
||||
//! Results of final buckets are [`BucketResult`](super::agg_result::BucketResult).
|
||||
//! Results of intermediate buckets are
|
||||
//! [IntermediateBucketResult](super::intermediate_agg_result::IntermediateBucketResult)
|
||||
//! [`IntermediateBucketResult`](super::intermediate_agg_result::IntermediateBucketResult)
|
||||
|
||||
mod histogram;
|
||||
mod range;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::agg_req_with_accessor::{
|
||||
@@ -9,33 +10,30 @@ use crate::aggregation::agg_req_with_accessor::{
|
||||
use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey};
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// Provide user-defined buckets to aggregate on.
|
||||
/// Two special buckets will automatically be created to cover the whole range of values.
|
||||
/// The provided buckets have to be continous.
|
||||
/// The provided buckets have to be continuous.
|
||||
/// During the aggregation, the values extracted from the fast_field `field` will be checked
|
||||
/// against each bucket range. Note that this aggregation includes the from value and excludes the
|
||||
/// to value for each range.
|
||||
///
|
||||
/// Result type is [BucketResult](crate::aggregation::agg_result::BucketResult) with
|
||||
/// [RangeBucketEntry](crate::aggregation::agg_result::RangeBucketEntry) on the
|
||||
/// AggregationCollector.
|
||||
/// Result type is [`BucketResult`](crate::aggregation::agg_result::BucketResult) with
|
||||
/// [`RangeBucketEntry`](crate::aggregation::agg_result::RangeBucketEntry) on the
|
||||
/// `AggregationCollector`.
|
||||
///
|
||||
/// Result type is
|
||||
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketResult] with
|
||||
/// [crate::aggregation::intermediate_agg_result::IntermediateRangeBucketEntry] on the
|
||||
/// DistributedAggregationCollector.
|
||||
/// [`IntermediateBucketResult`](crate::aggregation::intermediate_agg_result::IntermediateBucketResult) with
|
||||
/// [`IntermediateRangeBucketEntry`](crate::aggregation::intermediate_agg_result::IntermediateRangeBucketEntry) on the
|
||||
/// `DistributedAggregationCollector`.
|
||||
///
|
||||
/// # Limitations/Compatibility
|
||||
/// Overlapping ranges are not yet supported.
|
||||
///
|
||||
/// The keyed parameter (elasticsearch) is not yet supported.
|
||||
///
|
||||
/// # Request JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
@@ -50,24 +48,30 @@ use crate::{DocId, TantivyError};
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RangeAggregation {
|
||||
/// The field to aggregate on.
|
||||
pub field: String,
|
||||
/// Note that this aggregation includes the from value and excludes the to value for each
|
||||
/// range. Extra buckets will be created until the first to, and last from, if necessary.
|
||||
pub ranges: Vec<RangeAggregationRange>,
|
||||
/// Whether to return the buckets as a hash map
|
||||
#[serde(default)]
|
||||
pub keyed: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
/// The range for one range bucket.
|
||||
pub struct RangeAggregationRange {
|
||||
/// Custom key for the range bucket
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub key: Option<String>,
|
||||
/// The from range value, which is inclusive in the range.
|
||||
/// None equals to an open ended interval.
|
||||
/// `None` equals to an open ended interval.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub from: Option<f64>,
|
||||
/// The to range value, which is not inclusive in the range.
|
||||
/// None equals to an open ended interval.
|
||||
/// `None` equals to an open ended interval.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
@@ -84,7 +88,26 @@ impl From<Range<f64>> for RangeAggregationRange {
|
||||
} else {
|
||||
Some(range.end)
|
||||
};
|
||||
RangeAggregationRange { from, to }
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from,
|
||||
to,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
/// Internally used u64 range for one range bucket.
|
||||
pub(crate) struct InternalRangeAggregationRange {
|
||||
/// Custom key for the range bucket
|
||||
key: Option<String>,
|
||||
/// `u64` range value
|
||||
range: Range<u64>,
|
||||
}
|
||||
|
||||
impl From<Range<u64>> for InternalRangeAggregationRange {
|
||||
fn from(range: Range<u64>) -> Self {
|
||||
InternalRangeAggregationRange { key: None, range }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,9 +131,9 @@ pub(crate) struct SegmentRangeBucketEntry {
|
||||
pub key: Key,
|
||||
pub doc_count: u64,
|
||||
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
|
||||
/// The from range of the bucket. Equals f64::MIN when None.
|
||||
/// The from range of the bucket. Equals `f64::MIN` when `None`.
|
||||
pub from: Option<f64>,
|
||||
/// The to range of the bucket. Equals f64::MAX when None. Open interval, `to` is not
|
||||
/// The to range of the bucket. Equals `f64::MAX` when `None`. Open interval, `to` is not
|
||||
/// inclusive.
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
@@ -153,7 +176,7 @@ impl SegmentRangeCollector {
|
||||
) -> crate::Result<IntermediateBucketResult> {
|
||||
let field_type = self.field_type;
|
||||
|
||||
let buckets = self
|
||||
let buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry> = self
|
||||
.buckets
|
||||
.into_iter()
|
||||
.map(move |range_bucket| {
|
||||
@@ -174,23 +197,29 @@ impl SegmentRangeCollector {
|
||||
pub(crate) fn from_req_and_validate(
|
||||
req: &RangeAggregation,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
bucket_count: &BucketCount,
|
||||
field_type: Type,
|
||||
) -> crate::Result<Self> {
|
||||
// The range input on the request is f64.
|
||||
// We need to convert to u64 ranges, because we read the values as u64.
|
||||
// The mapping from the conversion is monotonic so ordering is preserved.
|
||||
let buckets = extend_validate_ranges(&req.ranges, &field_type)?
|
||||
let buckets: Vec<_> = extend_validate_ranges(&req.ranges, &field_type)?
|
||||
.iter()
|
||||
.map(|range| {
|
||||
let to = if range.end == u64::MAX {
|
||||
let key = range
|
||||
.key
|
||||
.clone()
|
||||
.map(Key::Str)
|
||||
.unwrap_or_else(|| range_to_key(&range.range, &field_type));
|
||||
let to = if range.range.end == u64::MAX {
|
||||
None
|
||||
} else {
|
||||
Some(f64_from_fastfield_u64(range.end, &field_type))
|
||||
Some(f64_from_fastfield_u64(range.range.end, &field_type))
|
||||
};
|
||||
let from = if range.start == u64::MIN {
|
||||
let from = if range.range.start == u64::MIN {
|
||||
None
|
||||
} else {
|
||||
Some(f64_from_fastfield_u64(range.start, &field_type))
|
||||
Some(f64_from_fastfield_u64(range.range.start, &field_type))
|
||||
};
|
||||
let sub_aggregation = if sub_aggregation.is_empty() {
|
||||
None
|
||||
@@ -200,11 +229,11 @@ impl SegmentRangeCollector {
|
||||
)?)
|
||||
};
|
||||
Ok(SegmentRangeAndBucketEntry {
|
||||
range: range.clone(),
|
||||
range: range.range.clone(),
|
||||
bucket: SegmentRangeBucketEntry {
|
||||
key: range_to_key(range, &field_type),
|
||||
doc_count: 0,
|
||||
sub_aggregation,
|
||||
key,
|
||||
from,
|
||||
to,
|
||||
},
|
||||
@@ -212,6 +241,9 @@ impl SegmentRangeCollector {
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
|
||||
bucket_count.add_count(buckets.len() as u32);
|
||||
bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(SegmentRangeCollector {
|
||||
buckets,
|
||||
field_type,
|
||||
@@ -224,40 +256,41 @@ impl SegmentRangeCollector {
|
||||
doc: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinatility");
|
||||
.expect("unexpected fast field cardinality");
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = accessor.get(docs[0]);
|
||||
let val2 = accessor.get(docs[1]);
|
||||
let val3 = accessor.get(docs[2]);
|
||||
let val4 = accessor.get(docs[3]);
|
||||
let val1 = accessor.get_val(docs[0] as u64);
|
||||
let val2 = accessor.get_val(docs[1] as u64);
|
||||
let val3 = accessor.get_val(docs[2] as u64);
|
||||
let val4 = accessor.get_val(docs[3] as u64);
|
||||
let bucket_pos1 = self.get_bucket_pos(val1);
|
||||
let bucket_pos2 = self.get_bucket_pos(val2);
|
||||
let bucket_pos3 = self.get_bucket_pos(val3);
|
||||
let bucket_pos4 = self.get_bucket_pos(val4);
|
||||
|
||||
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos1, docs[0], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos2, docs[1], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = accessor.get(*doc);
|
||||
for &doc in iter.remainder() {
|
||||
let val = accessor.get_val(doc as u64);
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation);
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
for bucket in &mut self.buckets {
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
sub_aggregation
|
||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush);
|
||||
.flush_staged_docs(&bucket_with_accessor.sub_aggregation, force_flush)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -266,13 +299,14 @@ impl SegmentRangeCollector {
|
||||
bucket_pos: usize,
|
||||
doc: DocId,
|
||||
bucket_with_accessor: &AggregationsWithAccessor,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let bucket = &mut self.buckets[bucket_pos];
|
||||
|
||||
bucket.bucket.doc_count += 1;
|
||||
if let Some(sub_aggregation) = &mut bucket.bucket.sub_aggregation {
|
||||
sub_aggregation.collect(doc, bucket_with_accessor);
|
||||
sub_aggregation.collect(doc, bucket_with_accessor)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -289,8 +323,8 @@ impl SegmentRangeCollector {
|
||||
/// Converts the user provided f64 range value to fast field value space.
|
||||
///
|
||||
/// Internally fast field values are always stored as u64.
|
||||
/// If the fast field has u64 [1,2,5], these values are stored as is in the fast field.
|
||||
/// A fast field with f64 [1.0, 2.0, 5.0] is converted to u64 space, using a
|
||||
/// If the fast field has u64 `[1, 2, 5]`, these values are stored as is in the fast field.
|
||||
/// A fast field with f64 `[1.0, 2.0, 5.0]` is converted to u64 space, using a
|
||||
/// monotonic mapping function, so the order is preserved.
|
||||
///
|
||||
/// Consequently, a f64 user range 1.0..3.0 needs to be converted to fast field value space using
|
||||
@@ -298,7 +332,10 @@ impl SegmentRangeCollector {
|
||||
/// fast field.
|
||||
/// The alternative would be that every value read would be converted to the f64 range, but that is
|
||||
/// more computational expensive when many documents are hit.
|
||||
fn to_u64_range(range: &RangeAggregationRange, field_type: &Type) -> crate::Result<Range<u64>> {
|
||||
fn to_u64_range(
|
||||
range: &RangeAggregationRange,
|
||||
field_type: &Type,
|
||||
) -> crate::Result<InternalRangeAggregationRange> {
|
||||
let start = if let Some(from) = range.from {
|
||||
f64_to_fastfield_u64(from, field_type)
|
||||
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
|
||||
@@ -313,39 +350,43 @@ fn to_u64_range(range: &RangeAggregationRange, field_type: &Type) -> crate::Resu
|
||||
u64::MAX
|
||||
};
|
||||
|
||||
Ok(start..end)
|
||||
Ok(InternalRangeAggregationRange {
|
||||
key: range.key.clone(),
|
||||
range: start..end,
|
||||
})
|
||||
}
|
||||
|
||||
/// Extends the provided buckets to contain the whole value range, by inserting buckets at the
|
||||
/// beginning and end.
|
||||
/// beginning and end and filling gaps.
|
||||
fn extend_validate_ranges(
|
||||
buckets: &[RangeAggregationRange],
|
||||
field_type: &Type,
|
||||
) -> crate::Result<Vec<Range<u64>>> {
|
||||
) -> crate::Result<Vec<InternalRangeAggregationRange>> {
|
||||
let mut converted_buckets = buckets
|
||||
.iter()
|
||||
.map(|range| to_u64_range(range, field_type))
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
converted_buckets.sort_by_key(|bucket| bucket.start);
|
||||
if converted_buckets[0].start != u64::MIN {
|
||||
converted_buckets.insert(0, u64::MIN..converted_buckets[0].start);
|
||||
converted_buckets.sort_by_key(|bucket| bucket.range.start);
|
||||
if converted_buckets[0].range.start != u64::MIN {
|
||||
converted_buckets.insert(0, (u64::MIN..converted_buckets[0].range.start).into());
|
||||
}
|
||||
|
||||
if converted_buckets[converted_buckets.len() - 1].end != u64::MAX {
|
||||
converted_buckets.push(converted_buckets[converted_buckets.len() - 1].end..u64::MAX);
|
||||
if converted_buckets[converted_buckets.len() - 1].range.end != u64::MAX {
|
||||
converted_buckets
|
||||
.push((converted_buckets[converted_buckets.len() - 1].range.end..u64::MAX).into());
|
||||
}
|
||||
|
||||
// fill up holes in the ranges
|
||||
let find_hole = |converted_buckets: &[Range<u64>]| {
|
||||
let find_hole = |converted_buckets: &[InternalRangeAggregationRange]| {
|
||||
for (pos, ranges) in converted_buckets.windows(2).enumerate() {
|
||||
if ranges[0].end > ranges[1].start {
|
||||
if ranges[0].range.end > ranges[1].range.start {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Overlapping ranges not supported range {:?}, range+1 {:?}",
|
||||
ranges[0], ranges[1]
|
||||
)));
|
||||
}
|
||||
if ranges[0].end != ranges[1].start {
|
||||
if ranges[0].range.end != ranges[1].range.start {
|
||||
return Ok(Some(pos));
|
||||
}
|
||||
}
|
||||
@@ -353,8 +394,9 @@ fn extend_validate_ranges(
|
||||
};
|
||||
|
||||
while let Some(hole_pos) = find_hole(&converted_buckets)? {
|
||||
let new_range = converted_buckets[hole_pos].end..converted_buckets[hole_pos + 1].start;
|
||||
converted_buckets.insert(hole_pos + 1, new_range);
|
||||
let new_range =
|
||||
converted_buckets[hole_pos].range.end..converted_buckets[hole_pos + 1].range.start;
|
||||
converted_buckets.insert(hole_pos + 1, new_range.into());
|
||||
}
|
||||
|
||||
Ok(converted_buckets)
|
||||
@@ -362,7 +404,7 @@ fn extend_validate_ranges(
|
||||
|
||||
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
||||
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
||||
// it should be rendererd as "*-0" and not "*-*"
|
||||
// it should be rendered as "*-0" and not "*-*"
|
||||
let to_str = |val: u64, is_start: bool| {
|
||||
if (is_start && val == u64::MIN) || (!is_start && val == u64::MAX) {
|
||||
"*".to_string()
|
||||
@@ -381,16 +423,13 @@ pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use serde_json::Value;
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
|
||||
use super::*;
|
||||
use crate::aggregation::agg_req::{
|
||||
Aggregation, Aggregations, BucketAggregation, BucketAggregationType,
|
||||
};
|
||||
use crate::aggregation::tests::get_test_index_with_num_docs;
|
||||
use crate::aggregation::AggregationCollector;
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::query::AllQuery;
|
||||
use crate::aggregation::tests::{exec_request_with_query, get_test_index_with_num_docs};
|
||||
|
||||
pub fn get_collector_from_ranges(
|
||||
ranges: Vec<RangeAggregationRange>,
|
||||
@@ -399,10 +438,16 @@ mod tests {
|
||||
let req = RangeAggregation {
|
||||
field: "dummy".to_string(),
|
||||
ranges,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
SegmentRangeCollector::from_req_and_validate(&req, &Default::default(), field_type)
|
||||
.expect("unexpected error")
|
||||
SegmentRangeCollector::from_req_and_validate(
|
||||
&req,
|
||||
&Default::default(),
|
||||
&Default::default(),
|
||||
field_type,
|
||||
)
|
||||
.expect("unexpected error")
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -415,6 +460,7 @@ mod tests {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -422,13 +468,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let agg_res = searcher.search(&AllQuery, &collector).unwrap();
|
||||
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(res["range"]["buckets"][0]["key"], "*-0");
|
||||
assert_eq!(res["range"]["buckets"][0]["doc_count"], 0);
|
||||
@@ -442,6 +482,131 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_keyed_buckets_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![(0f64..0.1f64).into(), (0.1f64..0.2f64).into()],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"range": {
|
||||
"buckets": {
|
||||
"*-0": { "key": "*-0", "doc_count": 0, "to": 0.0},
|
||||
"0-0.1": {"key": "0-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
|
||||
"0.1-0.2": {"key": "0.1-0.2", "doc_count": 10, "from": 0.1, "to": 0.2},
|
||||
"0.2-*": {"key": "0.2-*", "doc_count": 80, "from": 0.2},
|
||||
}
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_custom_key_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![
|
||||
RangeAggregationRange {
|
||||
key: Some("custom-key-0-to-0.1".to_string()),
|
||||
from: Some(0f64),
|
||||
to: Some(0.1f64),
|
||||
},
|
||||
RangeAggregationRange {
|
||||
key: None,
|
||||
from: Some(0.1f64),
|
||||
to: Some(0.2f64),
|
||||
},
|
||||
],
|
||||
keyed: false,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"range": {
|
||||
"buckets": [
|
||||
{"key": "*-0", "doc_count": 0, "to": 0.0},
|
||||
{"key": "custom-key-0-to-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
|
||||
{"key": "0.1-0.2", "doc_count": 10, "from": 0.1, "to": 0.2},
|
||||
{"key": "0.2-*", "doc_count": 80, "from": 0.2}
|
||||
]
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range_custom_key_keyed_buckets_test() -> crate::Result<()> {
|
||||
let index = get_test_index_with_num_docs(false, 100)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"range".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "fraction_f64".to_string(),
|
||||
ranges: vec![RangeAggregationRange {
|
||||
key: Some("custom-key-0-to-0.1".to_string()),
|
||||
from: Some(0f64),
|
||||
to: Some(0.1f64),
|
||||
}],
|
||||
keyed: true,
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None)?;
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
json!({
|
||||
"range": {
|
||||
"buckets": {
|
||||
"*-0": { "key": "*-0", "doc_count": 0, "to": 0.0},
|
||||
"custom-key-0-to-0.1": {"key": "custom-key-0-to-0.1", "doc_count": 10, "from": 0.0, "to": 0.1},
|
||||
"0.1-*": {"key": "0.1-*", "doc_count": 90, "from": 0.1},
|
||||
}
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_test_extend_range_hole() {
|
||||
let buckets = vec![(10f64..20f64).into(), (30f64..40f64).into()];
|
||||
@@ -520,6 +685,7 @@ mod tests {
|
||||
|
||||
let ranges = vec with
|
||||
/// [TermBucketEntry](crate::aggregation::agg_result::BucketEntry) on the
|
||||
/// AggregationCollector.
|
||||
/// Result type is [`BucketResult`](crate::aggregation::agg_result::BucketResult) with
|
||||
/// [`TermBucketEntry`](crate::aggregation::agg_result::BucketEntry) on the
|
||||
/// `AggregationCollector`.
|
||||
///
|
||||
/// Result type is
|
||||
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketResult] with
|
||||
/// [crate::aggregation::intermediate_agg_result::IntermediateTermBucketEntry] on the
|
||||
/// DistributedAggregationCollector.
|
||||
/// [`IntermediateBucketResult`](crate::aggregation::intermediate_agg_result::IntermediateBucketResult) with
|
||||
/// [`IntermediateTermBucketEntry`](crate::aggregation::intermediate_agg_result::IntermediateTermBucketEntry) on the
|
||||
/// `DistributedAggregationCollector`.
|
||||
///
|
||||
/// # Limitations/Compatibility
|
||||
///
|
||||
@@ -64,6 +68,25 @@ use crate::{DocId, TantivyError};
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// /// # Response JSON Format
|
||||
/// ```json
|
||||
/// {
|
||||
/// ...
|
||||
/// "aggregations": {
|
||||
/// "genres": {
|
||||
/// "doc_count_error_upper_bound": 0,
|
||||
/// "sum_other_doc_count": 0,
|
||||
/// "buckets": [
|
||||
/// { "key": "drumnbass", "doc_count": 6 },
|
||||
/// { "key": "raggae", "doc_count": 4 },
|
||||
/// { "key": "jazz", "doc_count": 2 }
|
||||
/// ]
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct TermsAggregation {
|
||||
/// The field to aggregate on.
|
||||
@@ -110,8 +133,8 @@ pub struct TermsAggregation {
|
||||
/// Set the order. `String` is here a target, which is either "_count", "_key", or the name of
|
||||
/// a metric sub_aggregation.
|
||||
///
|
||||
/// Single value metrics like average can be adressed by its name.
|
||||
/// Multi value metrics like stats are required to adress their field by name e.g.
|
||||
/// Single value metrics like average can be addressed by its name.
|
||||
/// Multi value metrics like stats are required to address their field by name e.g.
|
||||
/// "stats.avg"
|
||||
///
|
||||
/// Examples in JSON format:
|
||||
@@ -244,28 +267,33 @@ impl TermBuckets {
|
||||
&mut self,
|
||||
term_ids: &[u64],
|
||||
doc: DocId,
|
||||
bucket_with_accessor: &AggregationsWithAccessor,
|
||||
sub_aggregation: &AggregationsWithAccessor,
|
||||
bucket_count: &BucketCount,
|
||||
blueprint: &Option<SegmentAggregationResultsCollector>,
|
||||
) {
|
||||
// self.ensure_vec_exists(term_ids);
|
||||
) -> crate::Result<()> {
|
||||
for &term_id in term_ids {
|
||||
let entry = self
|
||||
.entries
|
||||
.entry(term_id as u32)
|
||||
.or_insert_with(|| TermBucketEntry::from_blueprint(blueprint));
|
||||
let entry = self.entries.entry(term_id as u32).or_insert_with(|| {
|
||||
bucket_count.add_count(1);
|
||||
|
||||
TermBucketEntry::from_blueprint(blueprint)
|
||||
});
|
||||
entry.doc_count += 1;
|
||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||
sub_aggregations.collect(doc, bucket_with_accessor);
|
||||
sub_aggregations.collect(doc, sub_aggregation)?;
|
||||
}
|
||||
}
|
||||
bucket_count.validate_bucket_count()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) {
|
||||
fn force_flush(&mut self, agg_with_accessor: &AggregationsWithAccessor) -> crate::Result<()> {
|
||||
for entry in &mut self.entries.values_mut() {
|
||||
if let Some(sub_aggregations) = entry.sub_aggregations.as_mut() {
|
||||
sub_aggregations.flush_staged_docs(agg_with_accessor, false);
|
||||
sub_aggregations.flush_staged_docs(agg_with_accessor, false)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -421,7 +449,7 @@ impl SegmentTermCollector {
|
||||
doc: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
let accessor = bucket_with_accessor
|
||||
.accessor
|
||||
.as_multi()
|
||||
@@ -441,26 +469,30 @@ impl SegmentTermCollector {
|
||||
&vals1,
|
||||
docs[0],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
);
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals2,
|
||||
docs[1],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
);
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals3,
|
||||
docs[2],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
);
|
||||
)?;
|
||||
self.term_buckets.increment_bucket(
|
||||
&vals4,
|
||||
docs[3],
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
);
|
||||
)?;
|
||||
}
|
||||
for &doc in iter.remainder() {
|
||||
accessor.get_vals(doc, &mut vals1);
|
||||
@@ -469,13 +501,15 @@ impl SegmentTermCollector {
|
||||
&vals1,
|
||||
doc,
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
&bucket_with_accessor.bucket_count,
|
||||
&self.blueprint,
|
||||
);
|
||||
)?;
|
||||
}
|
||||
if force_flush {
|
||||
self.term_buckets
|
||||
.force_flush(&bucket_with_accessor.sub_aggregation);
|
||||
.force_flush(&bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1173,6 +1207,65 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_term_bucket_limit() -> crate::Result<()> {
|
||||
let terms: Vec<String> = (0..100_000).map(|el| el.to_string()).collect();
|
||||
let terms_per_segment = vec![terms.iter().map(|el| el.as_str()).collect()];
|
||||
|
||||
let index = get_test_index_from_terms(true, &terms_per_segment)?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "string_id".to_string(),
|
||||
min_doc_count: Some(0),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None);
|
||||
|
||||
assert!(res.is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn terms_aggregation_multi_token_per_doc() -> crate::Result<()> {
|
||||
let terms = vec!["Hello Hello", "Hallo Hallo"];
|
||||
|
||||
let index = get_test_index_from_terms(true, &[terms])?;
|
||||
|
||||
let agg_req: Aggregations = vec![(
|
||||
"my_texts".to_string(),
|
||||
Aggregation::Bucket(BucketAggregation {
|
||||
bucket_agg: BucketAggregationType::Terms(TermsAggregation {
|
||||
field: "text_id".to_string(),
|
||||
min_doc_count: Some(0),
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let res = exec_request_with_query(agg_req, &index, None).unwrap();
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["key"], "hello");
|
||||
assert_eq!(res["my_texts"]["buckets"][0]["doc_count"], 2);
|
||||
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["key"], "hallo");
|
||||
assert_eq!(res["my_texts"]["buckets"][1]["doc_count"], 2);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_format() -> crate::Result<()> {
|
||||
let agg_req: Aggregations = vec![(
|
||||
@@ -1291,9 +1384,15 @@ mod bench {
|
||||
let mut collector = get_collector_with_buckets(total_terms);
|
||||
let vals = get_rand_terms(total_terms, num_terms);
|
||||
let aggregations_with_accessor: AggregationsWithAccessor = Default::default();
|
||||
let bucket_count: BucketCount = BucketCount {
|
||||
bucket_count: Default::default(),
|
||||
max_bucket_count: 1_000_001u32,
|
||||
};
|
||||
b.iter(|| {
|
||||
for &val in &vals {
|
||||
collector.increment_bucket(&[val], 0, &aggregations_with_accessor, &None);
|
||||
collector
|
||||
.increment_bucket(&[val], 0, &aggregations_with_accessor, &bucket_count, &None)
|
||||
.unwrap();
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::rc::Rc;
|
||||
|
||||
use super::agg_req::Aggregations;
|
||||
use super::agg_req_with_accessor::AggregationsWithAccessor;
|
||||
use super::agg_result::AggregationResults;
|
||||
@@ -5,19 +7,29 @@ use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::SegmentReader;
|
||||
use crate::{SegmentReader, TantivyError};
|
||||
|
||||
/// The default max bucket count, before the aggregation fails.
|
||||
pub const MAX_BUCKET_COUNT: u32 = 65000;
|
||||
|
||||
/// Collector for aggregations.
|
||||
///
|
||||
/// The collector collects all aggregations by the underlying aggregation request.
|
||||
pub struct AggregationCollector {
|
||||
agg: Aggregations,
|
||||
max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl AggregationCollector {
|
||||
/// Create collector from aggregation request.
|
||||
pub fn from_aggs(agg: Aggregations) -> Self {
|
||||
Self { agg }
|
||||
///
|
||||
/// Aggregation fails when the total bucket count is higher than max_bucket_count.
|
||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||
Self {
|
||||
agg,
|
||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,16 +39,22 @@ impl AggregationCollector {
|
||||
///
|
||||
/// # Purpose
|
||||
/// AggregationCollector returns `IntermediateAggregationResults` and not the final
|
||||
/// `AggregationResults`, so that results from differenct indices can be merged and then converted
|
||||
/// into the final `AggregationResults` via the `into()` method.
|
||||
/// `AggregationResults`, so that results from different indices can be merged and then converted
|
||||
/// into the final `AggregationResults` via the `into_final_result()` method.
|
||||
pub struct DistributedAggregationCollector {
|
||||
agg: Aggregations,
|
||||
max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl DistributedAggregationCollector {
|
||||
/// Create collector from aggregation request.
|
||||
pub fn from_aggs(agg: Aggregations) -> Self {
|
||||
Self { agg }
|
||||
///
|
||||
/// max_bucket_count will default to `MAX_BUCKET_COUNT` (65000) when unset
|
||||
pub fn from_aggs(agg: Aggregations, max_bucket_count: Option<u32>) -> Self {
|
||||
Self {
|
||||
agg,
|
||||
max_bucket_count: max_bucket_count.unwrap_or(MAX_BUCKET_COUNT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +68,11 @@ impl Collector for DistributedAggregationCollector {
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
self.max_bucket_count,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -75,7 +97,11 @@ impl Collector for AggregationCollector {
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader)
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
reader,
|
||||
self.max_bucket_count,
|
||||
)
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
@@ -87,7 +113,7 @@ impl Collector for AggregationCollector {
|
||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
let res = merge_fruits(segment_fruits)?;
|
||||
AggregationResults::from_intermediate_and_req(res, self.agg.clone())
|
||||
res.into_final_bucket_result(self.agg.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,25 +131,29 @@ fn merge_fruits(
|
||||
}
|
||||
}
|
||||
|
||||
/// AggregationSegmentCollector does the aggregation collection on a segment.
|
||||
/// `AggregationSegmentCollector` does the aggregation collection on a segment.
|
||||
pub struct AggregationSegmentCollector {
|
||||
aggs_with_accessor: AggregationsWithAccessor,
|
||||
result: SegmentAggregationResultsCollector,
|
||||
error: Option<TantivyError>,
|
||||
}
|
||||
|
||||
impl AggregationSegmentCollector {
|
||||
/// Creates an AggregationSegmentCollector from an [Aggregations] request and a segment reader.
|
||||
/// Also includes validation, e.g. checking field types and existence.
|
||||
/// Creates an `AggregationSegmentCollector from` an [`Aggregations`] request and a segment
|
||||
/// reader. Also includes validation, e.g. checking field types and existence.
|
||||
pub fn from_agg_req_and_reader(
|
||||
agg: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
max_bucket_count: u32,
|
||||
) -> crate::Result<Self> {
|
||||
let aggs_with_accessor = get_aggs_with_accessor_and_validate(agg, reader)?;
|
||||
let aggs_with_accessor =
|
||||
get_aggs_with_accessor_and_validate(agg, reader, Rc::default(), max_bucket_count)?;
|
||||
let result =
|
||||
SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?;
|
||||
Ok(AggregationSegmentCollector {
|
||||
aggs_with_accessor,
|
||||
result,
|
||||
error: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -133,12 +163,20 @@ impl SegmentCollector for AggregationSegmentCollector {
|
||||
|
||||
#[inline]
|
||||
fn collect(&mut self, doc: crate::DocId, _score: crate::Score) {
|
||||
self.result.collect(doc, &self.aggs_with_accessor);
|
||||
if self.error.is_some() {
|
||||
return;
|
||||
}
|
||||
if let Err(err) = self.result.collect(doc, &self.aggs_with_accessor) {
|
||||
self.error = Some(err);
|
||||
}
|
||||
}
|
||||
|
||||
fn harvest(mut self) -> Self::Fruit {
|
||||
if let Some(err) = self.error {
|
||||
return Err(err);
|
||||
}
|
||||
self.result
|
||||
.flush_staged_docs(&self.aggs_with_accessor, true);
|
||||
.flush_staged_docs(&self.aggs_with_accessor, true)?;
|
||||
self.result
|
||||
.into_intermediate_aggregations_result(&self.aggs_with_accessor)
|
||||
}
|
||||
|
||||
@@ -3,21 +3,25 @@
|
||||
//! indices.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::agg_req::{AggregationsInternal, BucketAggregationType, MetricAggregation};
|
||||
use super::agg_result::BucketResult;
|
||||
use super::agg_req::{
|
||||
Aggregations, AggregationsInternal, BucketAggregationInternal, BucketAggregationType,
|
||||
MetricAggregation,
|
||||
};
|
||||
use super::agg_result::{AggregationResult, BucketResult, RangeBucketEntry};
|
||||
use super::bucket::{
|
||||
cut_off_buckets, get_agg_name_and_property, GetDocCount, Order, OrderTarget,
|
||||
SegmentHistogramBucketEntry, TermsAggregation,
|
||||
cut_off_buckets, get_agg_name_and_property, intermediate_histogram_buckets_to_final_buckets,
|
||||
GetDocCount, Order, OrderTarget, SegmentHistogramBucketEntry, TermsAggregation,
|
||||
};
|
||||
use super::metric::{IntermediateAverage, IntermediateStats};
|
||||
use super::segment_agg_result::SegmentMetricResultCollector;
|
||||
use super::{Key, SerializedKey, VecWithNames};
|
||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntry};
|
||||
use crate::aggregation::agg_result::{AggregationResults, BucketEntries, BucketEntry};
|
||||
use crate::aggregation::bucket::TermsAggregationInternal;
|
||||
|
||||
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
||||
@@ -31,6 +35,43 @@ pub struct IntermediateAggregationResults {
|
||||
}
|
||||
|
||||
impl IntermediateAggregationResults {
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
pub fn into_final_bucket_result(self, req: Aggregations) -> crate::Result<AggregationResults> {
|
||||
self.into_final_bucket_result_internal(&(req.into()))
|
||||
}
|
||||
|
||||
/// Convert intermediate result and its aggregation request to the final result.
|
||||
///
|
||||
/// Internal function, AggregationsInternal is used instead Aggregations, which is optimized
|
||||
/// for internal processing, by splitting metric and buckets into separate groups.
|
||||
pub(crate) fn into_final_bucket_result_internal(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<AggregationResults> {
|
||||
// Important assumption:
|
||||
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||
// request
|
||||
let mut results: HashMap<String, AggregationResult> = HashMap::new();
|
||||
|
||||
if let Some(buckets) = self.buckets {
|
||||
convert_and_add_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||
} else {
|
||||
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||
// format is constant
|
||||
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
||||
};
|
||||
|
||||
if let Some(metrics) = self.metrics {
|
||||
convert_and_add_final_metrics_to_result(&mut results, metrics);
|
||||
} else {
|
||||
// When there are no metrics, we create empty metric results, so that the serialized
|
||||
// json format is constant
|
||||
add_empty_final_metrics_to_result(&mut results, &req.metrics)?;
|
||||
}
|
||||
|
||||
Ok(AggregationResults(results))
|
||||
}
|
||||
|
||||
pub(crate) fn empty_from_req(req: &AggregationsInternal) -> Self {
|
||||
let metrics = if req.metrics.is_empty() {
|
||||
None
|
||||
@@ -67,10 +108,10 @@ impl IntermediateAggregationResults {
|
||||
Self { metrics, buckets }
|
||||
}
|
||||
|
||||
/// Merge an other intermediate aggregation result into this result.
|
||||
/// Merge another intermediate aggregation result into this result.
|
||||
///
|
||||
/// The order of the values need to be the same on both results. This is ensured when the same
|
||||
/// (key values) are present on the underlying VecWithNames struct.
|
||||
/// (key values) are present on the underlying `VecWithNames` struct.
|
||||
pub fn merge_fruits(&mut self, other: IntermediateAggregationResults) {
|
||||
if let (Some(buckets_left), Some(buckets_right)) = (&mut self.buckets, other.buckets) {
|
||||
for (bucket_left, bucket_right) in
|
||||
@@ -90,6 +131,58 @@ impl IntermediateAggregationResults {
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_and_add_final_metrics_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
metrics: VecWithNames<IntermediateMetricResult>,
|
||||
) {
|
||||
results.extend(
|
||||
metrics
|
||||
.into_iter()
|
||||
.map(|(key, metric)| (key, AggregationResult::MetricResult(metric.into()))),
|
||||
);
|
||||
}
|
||||
|
||||
fn add_empty_final_metrics_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_metrics: &VecWithNames<MetricAggregation>,
|
||||
) -> crate::Result<()> {
|
||||
results.extend(req_metrics.iter().map(|(key, req)| {
|
||||
let empty_bucket = IntermediateMetricResult::empty_from_req(req);
|
||||
(
|
||||
key.to_string(),
|
||||
AggregationResult::MetricResult(empty_bucket.into()),
|
||||
)
|
||||
}));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_empty_final_buckets_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
let requested_buckets = req_buckets.iter();
|
||||
for (key, req) in requested_buckets {
|
||||
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
||||
results.insert(key.to_string(), empty_bucket);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn convert_and_add_final_buckets_to_result(
|
||||
results: &mut HashMap<String, AggregationResult>,
|
||||
buckets: VecWithNames<IntermediateBucketResult>,
|
||||
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||
) -> crate::Result<()> {
|
||||
assert_eq!(buckets.len(), req_buckets.len());
|
||||
|
||||
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||
for ((key, bucket), req) in buckets_with_request {
|
||||
let result = AggregationResult::BucketResult(bucket.into_final_bucket_result(req)?);
|
||||
results.insert(key, result);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// An aggregation is either a bucket or a metric.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum IntermediateAggregationResult {
|
||||
@@ -171,6 +264,68 @@ pub enum IntermediateBucketResult {
|
||||
}
|
||||
|
||||
impl IntermediateBucketResult {
|
||||
pub(crate) fn into_final_bucket_result(
|
||||
self,
|
||||
req: &BucketAggregationInternal,
|
||||
) -> crate::Result<BucketResult> {
|
||||
match self {
|
||||
IntermediateBucketResult::Range(range_res) => {
|
||||
let mut buckets: Vec<RangeBucketEntry> = range_res
|
||||
.buckets
|
||||
.into_iter()
|
||||
.map(|(_, bucket)| bucket.into_final_bucket_entry(&req.sub_aggregation))
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
buckets.sort_by(|left, right| {
|
||||
left.from
|
||||
.unwrap_or(f64::MIN)
|
||||
.total_cmp(&right.from.unwrap_or(f64::MIN))
|
||||
});
|
||||
|
||||
let is_keyed = req
|
||||
.as_range()
|
||||
.expect("unexpected aggregation, expected range aggregation")
|
||||
.keyed;
|
||||
let buckets = if is_keyed {
|
||||
let mut bucket_map =
|
||||
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||
for bucket in buckets {
|
||||
bucket_map.insert(bucket.key.to_string(), bucket);
|
||||
}
|
||||
BucketEntries::HashMap(bucket_map)
|
||||
} else {
|
||||
BucketEntries::Vec(buckets)
|
||||
};
|
||||
Ok(BucketResult::Range { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Histogram { buckets } => {
|
||||
let buckets = intermediate_histogram_buckets_to_final_buckets(
|
||||
buckets,
|
||||
req.as_histogram()
|
||||
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||
&req.sub_aggregation,
|
||||
)?;
|
||||
|
||||
let buckets = if req.as_histogram().unwrap().keyed {
|
||||
let mut bucket_map =
|
||||
FnvHashMap::with_capacity_and_hasher(buckets.len(), Default::default());
|
||||
for bucket in buckets {
|
||||
bucket_map.insert(bucket.key.to_string(), bucket);
|
||||
}
|
||||
BucketEntries::HashMap(bucket_map)
|
||||
} else {
|
||||
BucketEntries::Vec(buckets)
|
||||
};
|
||||
Ok(BucketResult::Histogram { buckets })
|
||||
}
|
||||
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
|
||||
req.as_term()
|
||||
.expect("unexpected aggregation, expected term aggregation"),
|
||||
&req.sub_aggregation,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn empty_from_req(req: &BucketAggregationType) -> Self {
|
||||
match req {
|
||||
BucketAggregationType::Terms(_) => IntermediateBucketResult::Terms(Default::default()),
|
||||
@@ -267,10 +422,9 @@ impl IntermediateTermBucketResult {
|
||||
Ok(BucketEntry {
|
||||
key: Key::Str(key),
|
||||
doc_count: entry.doc_count,
|
||||
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
|
||||
entry.sub_aggregation,
|
||||
sub_aggregation_req,
|
||||
)?,
|
||||
sub_aggregation: entry
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(sub_aggregation_req)?,
|
||||
})
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
@@ -307,12 +461,9 @@ impl IntermediateTermBucketResult {
|
||||
})
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
|
||||
buckets_with_val.sort_by(|(_, val1), (_, val2)| {
|
||||
// TODO use total_cmp in next rust stable release
|
||||
match &order {
|
||||
Order::Desc => val2.partial_cmp(val1).unwrap_or(std::cmp::Ordering::Equal),
|
||||
Order::Asc => val1.partial_cmp(val2).unwrap_or(std::cmp::Ordering::Equal),
|
||||
}
|
||||
buckets_with_val.sort_by(|(_, val1), (_, val2)| match &order {
|
||||
Order::Desc => val2.total_cmp(val1),
|
||||
Order::Asc => val1.total_cmp(val2),
|
||||
});
|
||||
buckets = buckets_with_val
|
||||
.into_iter()
|
||||
@@ -374,6 +525,21 @@ pub struct IntermediateHistogramBucketEntry {
|
||||
pub sub_aggregation: IntermediateAggregationResults,
|
||||
}
|
||||
|
||||
impl IntermediateHistogramBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<BucketEntry> {
|
||||
Ok(BucketEntry {
|
||||
key: Key::F64(self.key),
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SegmentHistogramBucketEntry> for IntermediateHistogramBucketEntry {
|
||||
fn from(entry: SegmentHistogramBucketEntry) -> Self {
|
||||
IntermediateHistogramBucketEntry {
|
||||
@@ -394,14 +560,31 @@ pub struct IntermediateRangeBucketEntry {
|
||||
pub doc_count: u64,
|
||||
/// The sub_aggregation in this bucket.
|
||||
pub sub_aggregation: IntermediateAggregationResults,
|
||||
/// The from range of the bucket. Equals f64::MIN when None.
|
||||
/// The from range of the bucket. Equals `f64::MIN` when `None`.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub from: Option<f64>,
|
||||
/// The to range of the bucket. Equals f64::MAX when None.
|
||||
/// The to range of the bucket. Equals `f64::MAX` when `None`.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub to: Option<f64>,
|
||||
}
|
||||
|
||||
impl IntermediateRangeBucketEntry {
|
||||
pub(crate) fn into_final_bucket_entry(
|
||||
self,
|
||||
req: &AggregationsInternal,
|
||||
) -> crate::Result<RangeBucketEntry> {
|
||||
Ok(RangeBucketEntry {
|
||||
key: self.key,
|
||||
doc_count: self.doc_count,
|
||||
sub_aggregation: self
|
||||
.sub_aggregation
|
||||
.into_final_bucket_result_internal(req)?,
|
||||
to: self.to,
|
||||
from: self.from,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// This is the term entry for a bucket, which contains a count, and optionally
|
||||
/// sub_aggregations.
|
||||
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::schema::Type;
|
||||
use crate::DocId;
|
||||
|
||||
@@ -57,13 +57,13 @@ impl SegmentAverageCollector {
|
||||
data: Default::default(),
|
||||
}
|
||||
}
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = field.get(docs[0]);
|
||||
let val2 = field.get(docs[1]);
|
||||
let val3 = field.get(docs[2]);
|
||||
let val4 = field.get(docs[3]);
|
||||
let val1 = field.get_val(docs[0] as u64);
|
||||
let val2 = field.get_val(docs[1] as u64);
|
||||
let val3 = field.get_val(docs[2] as u64);
|
||||
let val4 = field.get_val(docs[3] as u64);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
@@ -73,8 +73,8 @@ impl SegmentAverageCollector {
|
||||
self.data.collect(val3);
|
||||
self.data.collect(val4);
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = field.get(*doc);
|
||||
for &doc in iter.remainder() {
|
||||
let val = field.get_val(doc as u64);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.data.collect(val);
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use fastfield_codecs::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
/// A multi-value metric aggregation that computes stats of numeric values that are
|
||||
/// extracted from the aggregated documents.
|
||||
/// Supported field types are u64, i64, and f64.
|
||||
/// See [Stats] for returned statistics.
|
||||
/// Supported field types are `u64`, `i64`, and `f64`.
|
||||
/// See [`Stats`] for returned statistics.
|
||||
///
|
||||
/// # JSON Format
|
||||
/// ```json
|
||||
@@ -43,13 +43,13 @@ pub struct Stats {
|
||||
pub count: usize,
|
||||
/// The sum of the fast field values.
|
||||
pub sum: f64,
|
||||
/// The standard deviation of the fast field values. None for count == 0.
|
||||
/// The standard deviation of the fast field values. `None` for count == 0.
|
||||
pub standard_deviation: Option<f64>,
|
||||
/// The min value of the fast field values.
|
||||
pub min: Option<f64>,
|
||||
/// The max value of the fast field values.
|
||||
pub max: Option<f64>,
|
||||
/// The average of the values. None for count == 0.
|
||||
/// The average of the values. `None` for count == 0.
|
||||
pub avg: Option<f64>,
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ impl Stats {
|
||||
}
|
||||
}
|
||||
|
||||
/// IntermediateStats contains the mergeable version for stats.
|
||||
/// `IntermediateStats` contains the mergeable version for stats.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct IntermediateStats {
|
||||
count: usize,
|
||||
@@ -163,13 +163,13 @@ impl SegmentStatsCollector {
|
||||
stats: IntermediateStats::default(),
|
||||
}
|
||||
}
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &dyn Column<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = field.get(docs[0]);
|
||||
let val2 = field.get(docs[1]);
|
||||
let val3 = field.get(docs[2]);
|
||||
let val4 = field.get(docs[3]);
|
||||
let val1 = field.get_val(docs[0] as u64);
|
||||
let val2 = field.get_val(docs[1] as u64);
|
||||
let val3 = field.get_val(docs[2] as u64);
|
||||
let val4 = field.get_val(docs[3] as u64);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
@@ -179,8 +179,8 @@ impl SegmentStatsCollector {
|
||||
self.stats.collect(val3);
|
||||
self.stats.collect(val4);
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = field.get(*doc);
|
||||
for &doc in iter.remainder() {
|
||||
let val = field.get_val(doc as u64);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
@@ -222,7 +222,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -285,6 +285,7 @@ mod tests {
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: iter::once((
|
||||
"stats".to_string(),
|
||||
@@ -299,7 +300,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
|
||||
@@ -10,31 +10,30 @@
|
||||
//!
|
||||
//! There are two categories: [Metrics](metric) and [Buckets](bucket).
|
||||
//!
|
||||
//! # Usage
|
||||
//!
|
||||
//! ## Prerequisite
|
||||
//! Currently aggregations work only on [fast fields](`crate::fastfield`). Single value fast fields
|
||||
//! of type `u64`, `f64`, `i64` and fast fields on text fields.
|
||||
//!
|
||||
//! ## Usage
|
||||
//! To use aggregations, build an aggregation request by constructing
|
||||
//! [Aggregations](agg_req::Aggregations).
|
||||
//! Create an [AggregationCollector] from this request. AggregationCollector implements the
|
||||
//! `Collector` trait and can be passed as collector into `searcher.search()`.
|
||||
//! [`Aggregations`](agg_req::Aggregations).
|
||||
//! Create an [`AggregationCollector`] from this request. `AggregationCollector` implements the
|
||||
//! [`Collector`](crate::collector::Collector) trait and can be passed as collector into
|
||||
//! [`Searcher::search()`](crate::Searcher::search).
|
||||
//!
|
||||
//! #### Limitations
|
||||
//!
|
||||
//! Currently aggregations work only on single value fast fields of type u64, f64, i64 and
|
||||
//! fast fields on text fields.
|
||||
//!
|
||||
//! # JSON Format
|
||||
//! ## JSON Format
|
||||
//! Aggregations request and result structures de/serialize into elasticsearch compatible JSON.
|
||||
//!
|
||||
//! ```verbatim
|
||||
//! let agg_req: Aggregations = serde_json::from_str(json_request_string).unwrap();
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req);
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
//! let searcher = reader.searcher();
|
||||
//! let agg_res = searcher.search(&term_query, &collector).unwrap_err();
|
||||
//! let json_response_string: String = &serde_json::to_string(&agg_res)?;
|
||||
//! ```
|
||||
//!
|
||||
//! # Supported Aggregations
|
||||
//! ## Supported Aggregations
|
||||
//! - [Bucket](bucket)
|
||||
//! - [Histogram](bucket::HistogramAggregation)
|
||||
//! - [Range](bucket::RangeAggregation)
|
||||
@@ -44,8 +43,8 @@
|
||||
//! - [Stats](metric::StatsAggregation)
|
||||
//!
|
||||
//! # Example
|
||||
//! Compute the average metric, by building [agg_req::Aggregations], which is built from an (String,
|
||||
//! [agg_req::Aggregation]) iterator.
|
||||
//! Compute the average metric, by building [`agg_req::Aggregations`], which is built from an
|
||||
//! `(String, agg_req::Aggregation)` iterator.
|
||||
//!
|
||||
//! ```
|
||||
//! use tantivy::aggregation::agg_req::{Aggregations, Aggregation, MetricAggregation};
|
||||
@@ -68,7 +67,7 @@
|
||||
//! .into_iter()
|
||||
//! .collect();
|
||||
//!
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req);
|
||||
//! let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
//!
|
||||
//! let searcher = reader.searcher();
|
||||
//! let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||
@@ -132,6 +131,7 @@
|
||||
//! bucket_agg: BucketAggregationType::Range(RangeAggregation{
|
||||
//! field: "score".to_string(),
|
||||
//! ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
//! keyed: false,
|
||||
//! }),
|
||||
//! sub_aggregation: sub_agg_req_1.clone(),
|
||||
//! }),
|
||||
@@ -142,15 +142,15 @@
|
||||
//! ```
|
||||
//!
|
||||
//! # Distributed Aggregation
|
||||
//! When the data is distributed on different [crate::Index] instances, the
|
||||
//! [DistributedAggregationCollector] provides functionality to merge data between independent
|
||||
//! When the data is distributed on different [`Index`](crate::Index) instances, the
|
||||
//! [`DistributedAggregationCollector`] provides functionality to merge data between independent
|
||||
//! search calls by returning
|
||||
//! [IntermediateAggregationResults](intermediate_agg_result::IntermediateAggregationResults).
|
||||
//! IntermediateAggregationResults provides the
|
||||
//! [merge_fruits](intermediate_agg_result::IntermediateAggregationResults::merge_fruits) method to
|
||||
//! merge multiple results. The merged result can then be converted into
|
||||
//! [agg_result::AggregationResults] via the
|
||||
//! [agg_result::AggregationResults::from_intermediate_and_req] method.
|
||||
//! [`IntermediateAggregationResults`](intermediate_agg_result::IntermediateAggregationResults).
|
||||
//! `IntermediateAggregationResults` provides the
|
||||
//! [`merge_fruits`](intermediate_agg_result::IntermediateAggregationResults::merge_fruits) method
|
||||
//! to merge multiple results. The merged result can then be converted into
|
||||
//! [`AggregationResults`](agg_result::AggregationResults) via the
|
||||
//! [`into_final_bucket_result`](intermediate_agg_result::IntermediateAggregationResults::into_final_bucket_result) method.
|
||||
|
||||
pub mod agg_req;
|
||||
mod agg_req_with_accessor;
|
||||
@@ -160,17 +160,17 @@ mod collector;
|
||||
pub mod intermediate_agg_result;
|
||||
pub mod metric;
|
||||
mod segment_agg_result;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
|
||||
pub use collector::{
|
||||
AggregationCollector, AggregationSegmentCollector, DistributedAggregationCollector,
|
||||
MAX_BUCKET_COUNT,
|
||||
};
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::schema::Type;
|
||||
|
||||
/// Represents an associative array `(key => values)` in a very efficient manner.
|
||||
@@ -258,7 +258,7 @@ impl<T: Clone> VecWithNames<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// The serialized key is used in a HashMap.
|
||||
/// The serialized key is used in a `HashMap`.
|
||||
pub type SerializedKey = String;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, PartialOrd)]
|
||||
@@ -267,7 +267,7 @@ pub type SerializedKey = String;
|
||||
pub enum Key {
|
||||
/// String key
|
||||
Str(String),
|
||||
/// f64 key
|
||||
/// `f64` key
|
||||
F64(f64),
|
||||
}
|
||||
|
||||
@@ -280,10 +280,10 @@ impl Display for Key {
|
||||
}
|
||||
}
|
||||
|
||||
/// Invert of to_fastfield_u64. Used to convert to f64 for metrics.
|
||||
/// Inverse of `to_fastfield_u64`. Used to convert to `f64` for metrics.
|
||||
///
|
||||
/// # Panics
|
||||
/// Only u64, f64, i64 is supported
|
||||
/// Only `u64`, `f64`, and `i64` are supported.
|
||||
pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
||||
match field_type {
|
||||
Type::U64 => val as f64,
|
||||
@@ -295,15 +295,15 @@ pub(crate) fn f64_from_fastfield_u64(val: u64, field_type: &Type) -> f64 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts the f64 value to fast field value space.
|
||||
/// Converts the `f64` value to fast field value space.
|
||||
///
|
||||
/// If the fast field has u64, values are stored as u64 in the fast field.
|
||||
/// A f64 value of e.g. 2.0 therefore needs to be converted to 1u64
|
||||
/// If the fast field has `u64`, values are stored as `u64` in the fast field.
|
||||
/// A `f64` value of e.g. `2.0` therefore needs to be converted to `1u64`.
|
||||
///
|
||||
/// If the fast field has f64 values are converted and stored to u64 using a
|
||||
/// If the fast field has `f64` values are converted and stored to `u64` using a
|
||||
/// monotonic mapping.
|
||||
/// A f64 value of e.g. 2.0 needs to be converted using the same monotonic
|
||||
/// conversion function, so that the value matches the u64 value stored in the fast
|
||||
/// A `f64` value of e.g. `2.0` needs to be converted using the same monotonic
|
||||
/// conversion function, so that the value matches the `u64` value stored in the fast
|
||||
/// field.
|
||||
pub(crate) fn f64_to_fastfield_u64(val: f64, field_type: &Type) -> Option<u64> {
|
||||
match field_type {
|
||||
@@ -358,7 +358,7 @@ mod tests {
|
||||
index: &Index,
|
||||
query: Option<(&str, &str)>,
|
||||
) -> crate::Result<Value> {
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
@@ -375,7 +375,7 @@ mod tests {
|
||||
searcher.search(&AllQuery, &collector)?
|
||||
};
|
||||
|
||||
// Test serialization/deserialization rountrip
|
||||
// Test serialization/deserialization roundtrip
|
||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||
Ok(res)
|
||||
}
|
||||
@@ -417,7 +417,9 @@ mod tests {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_fieldtype = crate::schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||
TextFieldIndexing::default()
|
||||
.set_index_option(IndexRecordOption::Basic)
|
||||
.set_fieldnorms(false),
|
||||
)
|
||||
.set_fast()
|
||||
.set_stored();
|
||||
@@ -435,7 +437,8 @@ mod tests {
|
||||
);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
// let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?;
|
||||
for values in segment_and_values {
|
||||
for (i, term) in values {
|
||||
let i = *i;
|
||||
@@ -457,9 +460,11 @@ mod tests {
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
if segment_ids.len() > 1 {
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.merge(&segment_ids).wait()?;
|
||||
index_writer.wait_merging_threads()?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
@@ -511,7 +516,7 @@ mod tests {
|
||||
"histogram": {
|
||||
"field": "score",
|
||||
"interval": 70.0,
|
||||
"offset": 3.0,
|
||||
"offset": 3.0
|
||||
},
|
||||
"aggs": {
|
||||
"bucketsL2": {
|
||||
@@ -542,16 +547,15 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let agg_res: AggregationResults = if use_distributed_collector {
|
||||
let collector = DistributedAggregationCollector::from_aggs(agg_req.clone());
|
||||
let collector = DistributedAggregationCollector::from_aggs(agg_req.clone(), None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
AggregationResults::from_intermediate_and_req(
|
||||
searcher.search(&AllQuery, &collector).unwrap(),
|
||||
agg_req,
|
||||
)
|
||||
.unwrap()
|
||||
let intermediate_agg_result = searcher.search(&AllQuery, &collector).unwrap();
|
||||
intermediate_agg_result
|
||||
.into_final_bucket_result(agg_req)
|
||||
.unwrap()
|
||||
} else {
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&AllQuery, &collector).unwrap()
|
||||
@@ -760,6 +764,7 @@ mod tests {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score".to_string(),
|
||||
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -770,6 +775,7 @@ mod tests {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score_f64".to_string(),
|
||||
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -780,6 +786,7 @@ mod tests {
|
||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||
field: "score_i64".to_string(),
|
||||
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -788,7 +795,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||
@@ -936,6 +943,7 @@ mod tests {
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req.clone(),
|
||||
}),
|
||||
@@ -950,6 +958,7 @@ mod tests {
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req.clone(),
|
||||
}),
|
||||
@@ -964,6 +973,7 @@ mod tests {
|
||||
(7f64..19f64).into(),
|
||||
(19f64..20f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req,
|
||||
}),
|
||||
@@ -978,16 +988,16 @@ mod tests {
|
||||
assert_eq!(field_names, vec!["text".to_string()].into_iter().collect());
|
||||
|
||||
let agg_res: AggregationResults = if use_distributed_collector {
|
||||
let collector = DistributedAggregationCollector::from_aggs(agg_req.clone());
|
||||
let collector = DistributedAggregationCollector::from_aggs(agg_req.clone(), None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let res = searcher.search(&term_query, &collector).unwrap();
|
||||
// Test de/serialization roundtrip on intermediate_agg_result
|
||||
let res: IntermediateAggregationResults =
|
||||
serde_json::from_str(&serde_json::to_string(&res).unwrap()).unwrap();
|
||||
AggregationResults::from_intermediate_and_req(res, agg_req.clone()).unwrap()
|
||||
res.into_final_bucket_result(agg_req.clone()).unwrap()
|
||||
} else {
|
||||
let collector = AggregationCollector::from_aggs(agg_req.clone());
|
||||
let collector = AggregationCollector::from_aggs(agg_req.clone(), None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&term_query, &collector).unwrap()
|
||||
@@ -1045,7 +1055,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test empty result set
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
let searcher = reader.searcher();
|
||||
searcher.search(&query_with_no_hits, &collector).unwrap();
|
||||
|
||||
@@ -1110,7 +1120,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
|
||||
@@ -1223,7 +1233,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1254,7 +1264,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1285,7 +1295,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1324,7 +1334,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1353,7 +1363,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1382,7 +1392,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req);
|
||||
let collector = AggregationCollector::from_aggs(agg_req, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1411,6 +1421,7 @@ mod tests {
|
||||
(40000f64..50000f64).into(),
|
||||
(50000f64..60000f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: Default::default(),
|
||||
}),
|
||||
@@ -1418,7 +1429,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1453,7 +1464,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1492,7 +1503,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1522,7 +1533,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
@@ -1570,6 +1581,7 @@ mod tests {
|
||||
(7000f64..20000f64).into(),
|
||||
(20000f64..60000f64).into(),
|
||||
],
|
||||
..Default::default()
|
||||
}),
|
||||
sub_aggregation: sub_agg_req_1.clone(),
|
||||
}),
|
||||
@@ -1578,7 +1590,7 @@ mod tests {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||
let collector = AggregationCollector::from_aggs(agg_req_1, None);
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let agg_res: AggregationResults =
|
||||
|
||||
@@ -4,19 +4,22 @@
|
||||
//! merging.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::AtomicU32;
|
||||
|
||||
use super::agg_req::MetricAggregation;
|
||||
use super::agg_req_with_accessor::{
|
||||
AggregationsWithAccessor, BucketAggregationWithAccessor, MetricAggregationWithAccessor,
|
||||
};
|
||||
use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTermCollector};
|
||||
use super::collector::MAX_BUCKET_COUNT;
|
||||
use super::intermediate_agg_result::{IntermediateAggregationResults, IntermediateBucketResult};
|
||||
use super::metric::{
|
||||
AverageAggregation, SegmentAverageCollector, SegmentStatsCollector, StatsAggregation,
|
||||
};
|
||||
use super::VecWithNames;
|
||||
use crate::aggregation::agg_req::BucketAggregationType;
|
||||
use crate::DocId;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
pub(crate) const DOC_BLOCK_SIZE: usize = 64;
|
||||
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
|
||||
@@ -115,21 +118,22 @@ impl SegmentAggregationResultsCollector {
|
||||
&mut self,
|
||||
doc: crate::DocId,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
self.staged_docs[self.num_staged_docs] = doc;
|
||||
self.num_staged_docs += 1;
|
||||
if self.num_staged_docs == self.staged_docs.len() {
|
||||
self.flush_staged_docs(agg_with_accessor, false);
|
||||
self.flush_staged_docs(agg_with_accessor, false)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn flush_staged_docs(
|
||||
&mut self,
|
||||
agg_with_accessor: &AggregationsWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
if self.num_staged_docs == 0 {
|
||||
return;
|
||||
return Ok(());
|
||||
}
|
||||
if let Some(metrics) = &mut self.metrics {
|
||||
for (collector, agg_with_accessor) in
|
||||
@@ -148,11 +152,12 @@ impl SegmentAggregationResultsCollector {
|
||||
&self.staged_docs[..self.num_staged_docs],
|
||||
agg_with_accessor,
|
||||
force_flush,
|
||||
);
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
self.num_staged_docs = 0;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,10 +185,10 @@ impl SegmentMetricResultCollector {
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], metric: &MetricAggregationWithAccessor) {
|
||||
match self {
|
||||
SegmentMetricResultCollector::Average(avg_collector) => {
|
||||
avg_collector.collect_block(doc, &metric.accessor);
|
||||
avg_collector.collect_block(doc, &*metric.accessor);
|
||||
}
|
||||
SegmentMetricResultCollector::Stats(stats_collector) => {
|
||||
stats_collector.collect_block(doc, &metric.accessor);
|
||||
stats_collector.collect_block(doc, &*metric.accessor);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -234,6 +239,7 @@ impl SegmentBucketResultCollector {
|
||||
Ok(Self::Range(SegmentRangeCollector::from_req_and_validate(
|
||||
range_req,
|
||||
&req.sub_aggregation,
|
||||
&req.bucket_count,
|
||||
req.field_type,
|
||||
)?))
|
||||
}
|
||||
@@ -256,17 +262,52 @@ impl SegmentBucketResultCollector {
|
||||
doc: &[DocId],
|
||||
bucket_with_accessor: &BucketAggregationWithAccessor,
|
||||
force_flush: bool,
|
||||
) {
|
||||
) -> crate::Result<()> {
|
||||
match self {
|
||||
SegmentBucketResultCollector::Range(range) => {
|
||||
range.collect_block(doc, bucket_with_accessor, force_flush);
|
||||
range.collect_block(doc, bucket_with_accessor, force_flush)?;
|
||||
}
|
||||
SegmentBucketResultCollector::Histogram(histogram) => {
|
||||
histogram.collect_block(doc, bucket_with_accessor, force_flush)
|
||||
histogram.collect_block(doc, bucket_with_accessor, force_flush)?;
|
||||
}
|
||||
SegmentBucketResultCollector::Terms(terms) => {
|
||||
terms.collect_block(doc, bucket_with_accessor, force_flush)
|
||||
terms.collect_block(doc, bucket_with_accessor, force_flush)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct BucketCount {
|
||||
/// The counter which is shared between the aggregations for one request.
|
||||
pub(crate) bucket_count: Rc<AtomicU32>,
|
||||
pub(crate) max_bucket_count: u32,
|
||||
}
|
||||
|
||||
impl Default for BucketCount {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bucket_count: Default::default(),
|
||||
max_bucket_count: MAX_BUCKET_COUNT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketCount {
|
||||
pub(crate) fn validate_bucket_count(&self) -> crate::Result<()> {
|
||||
if self.get_count() > self.max_bucket_count {
|
||||
return Err(TantivyError::InvalidArgument(
|
||||
"Aborting aggregation because too many buckets were created".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub(crate) fn add_count(&self, count: u32) {
|
||||
self.bucket_count
|
||||
.fetch_add(count as u32, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
pub(crate) fn get_count(&self) -> u32 {
|
||||
self.bucket_count.load(std::sync::atomic::Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ where TScore: Clone + PartialOrd
|
||||
/// A custom segment scorer makes it possible to define any kind of score
|
||||
/// for a given document belonging to a specific segment.
|
||||
///
|
||||
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
|
||||
/// It is the segment local version of the [`CustomScorer`].
|
||||
pub trait CustomSegmentScorer<TScore>: 'static {
|
||||
/// Computes the score of a specific `doc`.
|
||||
fn score(&mut self, doc: DocId) -> TScore;
|
||||
@@ -36,9 +36,9 @@ pub trait CustomSegmentScorer<TScore>: 'static {
|
||||
/// Instead, it helps constructing `Self::Child` instances that will compute
|
||||
/// the score at a segment scale.
|
||||
pub trait CustomScorer<TScore>: Sync {
|
||||
/// Type of the associated [`CustomSegmentScorer`](./trait.CustomSegmentScorer.html).
|
||||
/// Type of the associated [`CustomSegmentScorer`].
|
||||
type Child: CustomSegmentScorer<TScore>;
|
||||
/// Builds a child scorer for a specific segment. The child scorer is associated to
|
||||
/// Builds a child scorer for a specific segment. The child scorer is associated with
|
||||
/// a specific segment.
|
||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>;
|
||||
}
|
||||
|
||||
@@ -67,10 +67,10 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// (e.g. `/category/fiction`, `/category/biography`, `/category/personal_development`).
|
||||
///
|
||||
/// Once collection is finished, you can harvest its results in the form
|
||||
/// of a `FacetCounts` object, and extract your face t counts from it.
|
||||
/// of a [`FacetCounts`] object, and extract your facet counts from it.
|
||||
///
|
||||
/// This implementation assumes you are working with a number of facets that
|
||||
/// is much hundreds of time lower than your number of documents.
|
||||
/// is many hundreds of times smaller than your number of documents.
|
||||
///
|
||||
///
|
||||
/// ```rust
|
||||
@@ -91,7 +91,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// // a document can be associated to any number of facets
|
||||
/// // a document can be associated with any number of facets
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// facet => Facet::from("/lang/en"),
|
||||
@@ -231,7 +231,7 @@ impl FacetCollector {
|
||||
///
|
||||
/// Adding two facets within which one is the prefix of the other is forbidden.
|
||||
/// If you need the correct number of unique documents for two such facets,
|
||||
/// just add them in separate `FacetCollector`.
|
||||
/// just add them in a separate `FacetCollector`.
|
||||
pub fn add_facet<T>(&mut self, facet_from: T)
|
||||
where Facet: From<T> {
|
||||
let facet = Facet::from(facet_from);
|
||||
@@ -271,8 +271,8 @@ impl Collector for FacetCollector {
|
||||
let mut facet_streamer = facet_reader.facet_dict().range().into_stream()?;
|
||||
if facet_streamer.advance() {
|
||||
'outer: loop {
|
||||
// at the begining of this loop, facet_streamer
|
||||
// is positionned on a term that has not been processed yet.
|
||||
// at the beginning of this loop, facet_streamer
|
||||
// is positioned on a term that has not been processed yet.
|
||||
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
||||
match skip_result {
|
||||
SkipResult::Found => {
|
||||
@@ -338,11 +338,7 @@ impl SegmentCollector for FacetSegmentCollector {
|
||||
let mut previous_collapsed_ord: usize = usize::MAX;
|
||||
for &facet_ord in &self.facet_ords_buf {
|
||||
let collapsed_ord = self.collapse_mapping[facet_ord as usize];
|
||||
self.counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord {
|
||||
0
|
||||
} else {
|
||||
1
|
||||
};
|
||||
self.counts[collapsed_ord] += u64::from(collapsed_ord != previous_collapsed_ord);
|
||||
previous_collapsed_ord = collapsed_ord;
|
||||
}
|
||||
}
|
||||
@@ -391,7 +387,7 @@ impl<'a> Iterator for FacetChildIterator<'a> {
|
||||
|
||||
impl FacetCounts {
|
||||
/// Returns an iterator over all of the facet count pairs inside this result.
|
||||
/// See the documentation for [FacetCollector] for a usage example.
|
||||
/// See the documentation for [`FacetCollector`] for a usage example.
|
||||
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
|
||||
where Facet: From<T> {
|
||||
let facet = Facet::from(facet_from);
|
||||
@@ -410,7 +406,7 @@ impl FacetCounts {
|
||||
}
|
||||
|
||||
/// Returns a vector of top `k` facets with their counts, sorted highest-to-lowest by counts.
|
||||
/// See the documentation for [FacetCollector] for a usage example.
|
||||
/// See the documentation for [`FacetCollector`] for a usage example.
|
||||
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
|
||||
where Facet: From<T> {
|
||||
let mut heap = BinaryHeap::with_capacity(k);
|
||||
|
||||
@@ -10,9 +10,12 @@
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::schema::Field;
|
||||
use crate::{Score, SegmentReader, TantivyError};
|
||||
|
||||
@@ -158,7 +161,7 @@ where
|
||||
TPredicate: 'static,
|
||||
TPredicateValue: FastValue,
|
||||
{
|
||||
fast_field_reader: DynamicFastFieldReader<TPredicateValue>,
|
||||
fast_field_reader: Arc<dyn Column<TPredicateValue>>,
|
||||
segment_collector: TSegmentCollector,
|
||||
predicate: TPredicate,
|
||||
t_predicate_value: PhantomData<TPredicateValue>,
|
||||
@@ -174,7 +177,7 @@ where
|
||||
type Fruit = TSegmentCollector::Fruit;
|
||||
|
||||
fn collect(&mut self, doc: u32, score: Score) {
|
||||
let value = self.fast_field_reader.get(doc);
|
||||
let value = self.fast_field_reader.get_val(doc as u64);
|
||||
if (self.predicate)(value) {
|
||||
self.segment_collector.collect(doc, score)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastdivide::DividerU64;
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::schema::{Field, Type};
|
||||
use crate::{DocId, Score};
|
||||
|
||||
@@ -34,7 +37,7 @@ impl HistogramCollector {
|
||||
/// The scale/range of the histogram is not dynamic. It is required to
|
||||
/// define it by supplying following parameter:
|
||||
/// - `min_value`: the minimum value that can be recorded in the histogram.
|
||||
/// - `bucket_width`: the length of the interval that is associated to each buckets.
|
||||
/// - `bucket_width`: the length of the interval that is associated with each buckets.
|
||||
/// - `num_buckets`: The overall number of buckets.
|
||||
///
|
||||
/// Together, this parameters define a partition of `[min_value, min_value + num_buckets *
|
||||
@@ -72,8 +75,7 @@ impl HistogramComputer {
|
||||
return;
|
||||
}
|
||||
let delta = value - self.min_value;
|
||||
let delta_u64 = delta.to_u64();
|
||||
let bucket_id: usize = self.divider.divide(delta_u64) as usize;
|
||||
let bucket_id: usize = self.divider.divide(delta) as usize;
|
||||
if bucket_id < self.counts.len() {
|
||||
self.counts[bucket_id] += 1;
|
||||
}
|
||||
@@ -85,14 +87,14 @@ impl HistogramComputer {
|
||||
}
|
||||
pub struct SegmentHistogramCollector {
|
||||
histogram_computer: HistogramComputer,
|
||||
ff_reader: DynamicFastFieldReader<u64>,
|
||||
ff_reader: Arc<dyn Column<u64>>,
|
||||
}
|
||||
|
||||
impl SegmentCollector for SegmentHistogramCollector {
|
||||
type Fruit = Vec<u64>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
let value = self.ff_reader.get(doc);
|
||||
let value = self.ff_reader.get_val(doc as u64);
|
||||
self.histogram_computer.add_value(value);
|
||||
}
|
||||
|
||||
@@ -287,7 +289,7 @@ mod tests {
|
||||
DateTime::from_primitive(
|
||||
Date::from_calendar_date(1980, Month::January, 1)?.with_hms(0, 0, 0)?,
|
||||
),
|
||||
3600 * 24 * 365, // it is just for a unit test... sorry leap years.
|
||||
3_600_000_000 * 24 * 365, // it is just for a unit test... sorry leap years.
|
||||
10,
|
||||
);
|
||||
let week_histogram = searcher.search(&all_query, &week_histogram_collector)?;
|
||||
|
||||
@@ -4,13 +4,13 @@
|
||||
//! In tantivy jargon, we call this information your search "fruit".
|
||||
//!
|
||||
//! Your fruit could for instance be :
|
||||
//! - [the count of matching documents](./struct.Count.html)
|
||||
//! - [the top 10 documents, by relevancy or by a fast field](./struct.TopDocs.html)
|
||||
//! - [facet counts](./struct.FacetCollector.html)
|
||||
//! - [the count of matching documents](crate::collector::Count)
|
||||
//! - [the top 10 documents, by relevancy or by a fast field](crate::collector::TopDocs)
|
||||
//! - [facet counts](FacetCollector)
|
||||
//!
|
||||
//! At one point in your code, you will trigger the actual search operation by calling
|
||||
//! [the `search(...)` method of your `Searcher` object](../struct.Searcher.html#method.search).
|
||||
//! This call will look like this.
|
||||
//! At some point in your code, you will trigger the actual search operation by calling
|
||||
//! [`Searcher::search()`](crate::Searcher::search).
|
||||
//! This call will look like this:
|
||||
//!
|
||||
//! ```verbatim
|
||||
//! let fruit = searcher.search(&query, &collector)?;
|
||||
@@ -64,7 +64,7 @@
|
||||
//!
|
||||
//! The `Collector` trait is implemented for up to 4 collectors.
|
||||
//! If you have more than 4 collectors, you can either group them into
|
||||
//! tuples of tuples `(a,(b,(c,d)))`, or rely on [`MultiCollector`](./struct.MultiCollector.html).
|
||||
//! tuples of tuples `(a,(b,(c,d)))`, or rely on [`MultiCollector`].
|
||||
//!
|
||||
//! # Combining several collectors dynamically
|
||||
//!
|
||||
@@ -74,7 +74,7 @@
|
||||
//!
|
||||
//! Unfortunately it requires you to know at compile time your collector types.
|
||||
//! If on the other hand, the collectors depend on some query parameter,
|
||||
//! you can rely on `MultiCollector`'s.
|
||||
//! you can rely on [`MultiCollector`]'s.
|
||||
//!
|
||||
//!
|
||||
//! # Implementing your own collectors.
|
||||
@@ -142,7 +142,7 @@ pub trait Collector: Sync + Send {
|
||||
/// e.g. `usize` for the `Count` collector.
|
||||
type Fruit: Fruit;
|
||||
|
||||
/// Type of the `SegmentCollector` associated to this collector.
|
||||
/// Type of the `SegmentCollector` associated with this collector.
|
||||
type Child: SegmentCollector;
|
||||
|
||||
/// `set_segment` is called before beginning to enumerate
|
||||
@@ -156,7 +156,7 @@ pub trait Collector: Sync + Send {
|
||||
/// Returns true iff the collector requires to compute scores for documents.
|
||||
fn requires_scoring(&self) -> bool;
|
||||
|
||||
/// Combines the fruit associated to the collection of each segments
|
||||
/// Combines the fruit associated with the collection of each segments
|
||||
/// into one fruit.
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::*;
|
||||
use crate::collector::{Count, FilterCollector, TopDocs};
|
||||
use crate::core::SegmentReader;
|
||||
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::fastfield::BytesFastFieldReader;
|
||||
use crate::query::{AllQuery, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, TEXT};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
@@ -69,10 +73,8 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
||||
|
||||
/// Stores all of the doc ids.
|
||||
/// This collector is only used for tests.
|
||||
/// It is unusable in pr
|
||||
///
|
||||
/// actise, as it does not store
|
||||
/// the segment ordinals
|
||||
/// It is unusable in practise, as it does
|
||||
/// not store the segment ordinals
|
||||
pub struct TestCollector {
|
||||
pub compute_score: bool,
|
||||
}
|
||||
@@ -158,7 +160,7 @@ pub struct FastFieldTestCollector {
|
||||
|
||||
pub struct FastFieldSegmentCollector {
|
||||
vals: Vec<u64>,
|
||||
reader: DynamicFastFieldReader<u64>,
|
||||
reader: Arc<dyn Column<u64>>,
|
||||
}
|
||||
|
||||
impl FastFieldTestCollector {
|
||||
@@ -199,7 +201,7 @@ impl SegmentCollector for FastFieldSegmentCollector {
|
||||
type Fruit = Vec<u64>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
let val = self.reader.get(doc);
|
||||
let val = self.reader.get_val(doc as u64);
|
||||
self.vals.push(val);
|
||||
}
|
||||
|
||||
@@ -265,7 +267,7 @@ impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||
}
|
||||
}
|
||||
|
||||
fn make_test_searcher() -> crate::Result<crate::LeasedItem<Searcher>> {
|
||||
fn make_test_searcher() -> crate::Result<Searcher> {
|
||||
let schema = Schema::builder().build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
|
||||
@@ -137,7 +137,7 @@ where T: PartialOrd + Clone
|
||||
/// sorted by type `T`.
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// The theoretical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
pub(crate) struct TopSegmentCollector<T> {
|
||||
limit: usize,
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
use std::collections::BinaryHeap;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::Collector;
|
||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||
@@ -9,7 +12,7 @@ use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||
use crate::collector::{
|
||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||
};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
||||
use crate::fastfield::FastValue;
|
||||
use crate::query::Weight;
|
||||
use crate::schema::Field;
|
||||
use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
||||
@@ -79,7 +82,7 @@ where
|
||||
/// sorted by their score.
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// The theoretical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
///
|
||||
/// This collector guarantees a stable sorting in case of a tie on the
|
||||
@@ -129,12 +132,12 @@ impl fmt::Debug for TopDocs {
|
||||
}
|
||||
|
||||
struct ScorerByFastFieldReader {
|
||||
ff_reader: DynamicFastFieldReader<u64>,
|
||||
ff_reader: Arc<dyn Column<u64>>,
|
||||
}
|
||||
|
||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||
fn score(&mut self, doc: DocId) -> u64 {
|
||||
self.ff_reader.get(doc)
|
||||
self.ff_reader.get_val(doc as u64)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -283,8 +286,8 @@ impl TopDocs {
|
||||
///
|
||||
/// # See also
|
||||
///
|
||||
/// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method.
|
||||
/// To comfortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to
|
||||
/// the [.order_by_fast_field(...)](TopDocs::order_by_fast_field) method.
|
||||
pub fn order_by_u64_field(
|
||||
self,
|
||||
field: Field,
|
||||
@@ -381,7 +384,7 @@ impl TopDocs {
|
||||
///
|
||||
/// This method offers a convenient way to tweak or replace
|
||||
/// the documents score. As suggested by the prototype you can
|
||||
/// manually define your own [`ScoreTweaker`](./trait.ScoreTweaker.html)
|
||||
/// manually define your own [`ScoreTweaker`]
|
||||
/// and pass it as an argument, but there is a much simpler way to
|
||||
/// tweak your score: you can use a closure as in the following
|
||||
/// example.
|
||||
@@ -398,7 +401,7 @@ impl TopDocs {
|
||||
/// In the following example will will tweak our ranking a bit by
|
||||
/// boosting popular products a notch.
|
||||
///
|
||||
/// In more serious application, this tweaking could involved running a
|
||||
/// In more serious application, this tweaking could involve running a
|
||||
/// learning-to-rank model over various features
|
||||
///
|
||||
/// ```rust
|
||||
@@ -407,7 +410,6 @@ impl TopDocs {
|
||||
/// # use tantivy::query::QueryParser;
|
||||
/// use tantivy::SegmentReader;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::fastfield::FastFieldReader;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// fn create_schema() -> Schema {
|
||||
@@ -456,7 +458,7 @@ impl TopDocs {
|
||||
///
|
||||
/// // We can now define our actual scoring function
|
||||
/// move |doc: DocId, original_score: Score| {
|
||||
/// let popularity: u64 = popularity_reader.get(doc);
|
||||
/// let popularity: u64 = popularity_reader.get_val(doc as u64);
|
||||
/// // Well.. For the sake of the example we use a simple logarithm
|
||||
/// // function.
|
||||
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
||||
@@ -472,7 +474,7 @@ impl TopDocs {
|
||||
/// ```
|
||||
///
|
||||
/// # See also
|
||||
/// [custom_score(...)](#method.custom_score).
|
||||
/// - [custom_score(...)](TopDocs::custom_score)
|
||||
pub fn tweak_score<TScore, TScoreSegmentTweaker, TScoreTweaker>(
|
||||
self,
|
||||
score_tweaker: TScoreTweaker,
|
||||
@@ -489,8 +491,7 @@ impl TopDocs {
|
||||
///
|
||||
/// This method offers a convenient way to use a different score.
|
||||
///
|
||||
/// As suggested by the prototype you can manually define your
|
||||
/// own [`CustomScorer`](./trait.CustomScorer.html)
|
||||
/// As suggested by the prototype you can manually define your own [`CustomScorer`]
|
||||
/// and pass it as an argument, but there is a much simpler way to
|
||||
/// tweak your score: you can use a closure as in the following
|
||||
/// example.
|
||||
@@ -499,7 +500,7 @@ impl TopDocs {
|
||||
///
|
||||
/// This method only makes it possible to compute the score from a given
|
||||
/// `DocId`, fastfield values for the doc and any information you could
|
||||
/// have precomputed beforehands. It does not make it possible for instance
|
||||
/// have precomputed beforehand. It does not make it possible for instance
|
||||
/// to compute something like TfIdf as it does not have access to the list of query
|
||||
/// terms present in the document, nor the term frequencies for the different terms.
|
||||
///
|
||||
@@ -515,7 +516,6 @@ impl TopDocs {
|
||||
/// use tantivy::SegmentReader;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
/// use tantivy::fastfield::FastFieldReader;
|
||||
///
|
||||
/// # fn create_schema() -> Schema {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
@@ -567,8 +567,8 @@ impl TopDocs {
|
||||
///
|
||||
/// // We can now define our actual scoring function
|
||||
/// move |doc: DocId| {
|
||||
/// let popularity: u64 = popularity_reader.get(doc);
|
||||
/// let boosted: u64 = boosted_reader.get(doc);
|
||||
/// let popularity: u64 = popularity_reader.get_val(doc as u64);
|
||||
/// let boosted: u64 = boosted_reader.get_val(doc as u64);
|
||||
/// // Score do not have to be `f64` in tantivy.
|
||||
/// // Here we return a couple to get lexicographical order
|
||||
/// // for free.
|
||||
@@ -587,7 +587,7 @@ impl TopDocs {
|
||||
/// ```
|
||||
///
|
||||
/// # See also
|
||||
/// [tweak_score(...)](#method.tweak_score).
|
||||
/// - [tweak_score(...)](TopDocs::tweak_score)
|
||||
pub fn custom_score<TScore, TCustomSegmentScorer, TCustomScorer>(
|
||||
self,
|
||||
custom_score: TCustomScorer,
|
||||
@@ -693,7 +693,7 @@ impl Collector for TopDocs {
|
||||
}
|
||||
}
|
||||
|
||||
/// Segment Collector associated to `TopDocs`.
|
||||
/// Segment Collector associated with `TopDocs`.
|
||||
pub struct TopScoreSegmentCollector(TopSegmentCollector<Score>);
|
||||
|
||||
impl SegmentCollector for TopScoreSegmentCollector {
|
||||
|
||||
@@ -24,7 +24,7 @@ where TScore: Clone + PartialOrd
|
||||
/// A `ScoreSegmentTweaker` makes it possible to modify the default score
|
||||
/// for a given document belonging to a specific segment.
|
||||
///
|
||||
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
|
||||
/// It is the segment local version of the [`ScoreTweaker`].
|
||||
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
||||
/// Tweak the given `score` for the document `doc`.
|
||||
fn score(&mut self, doc: DocId, score: Score) -> TScore;
|
||||
@@ -37,10 +37,10 @@ pub trait ScoreSegmentTweaker<TScore>: 'static {
|
||||
/// Instead, it helps constructing `Self::Child` instances that will compute
|
||||
/// the score at a segment scale.
|
||||
pub trait ScoreTweaker<TScore>: Sync {
|
||||
/// Type of the associated [`ScoreSegmentTweaker`](./trait.ScoreSegmentTweaker.html).
|
||||
/// Type of the associated [`ScoreSegmentTweaker`].
|
||||
type Child: ScoreSegmentTweaker<TScore>;
|
||||
|
||||
/// Builds a child tweaker for a specific segment. The child scorer is associated to
|
||||
/// Builds a child tweaker for a specific segment. The child scorer is associated with
|
||||
/// a specific segment.
|
||||
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ use std::sync::Arc;
|
||||
|
||||
use super::segment::Segment;
|
||||
use super::IndexSettings;
|
||||
use crate::core::single_segment_index_writer::SingleSegmentIndexWriter;
|
||||
use crate::core::{
|
||||
Executor, IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory, META_FILEPATH,
|
||||
};
|
||||
@@ -16,9 +17,9 @@ use crate::directory::MmapDirectory;
|
||||
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
|
||||
use crate::error::{DataCorruption, TantivyError};
|
||||
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
|
||||
use crate::indexer::segment_updater::save_new_metas;
|
||||
use crate::indexer::segment_updater::save_metas;
|
||||
use crate::reader::{IndexReader, IndexReaderBuilder};
|
||||
use crate::schema::{Field, FieldType, Schema};
|
||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||
use crate::IndexWriter;
|
||||
|
||||
@@ -47,10 +48,38 @@ fn load_metas(
|
||||
.map_err(From::from)
|
||||
}
|
||||
|
||||
/// Save the index meta file.
|
||||
/// This operation is atomic :
|
||||
/// Either
|
||||
/// - it fails, in which case an error is returned,
|
||||
/// and the `meta.json` remains untouched,
|
||||
/// - it succeeds, and `meta.json` is written
|
||||
/// and flushed.
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
fn save_new_metas(
|
||||
schema: Schema,
|
||||
index_settings: IndexSettings,
|
||||
directory: &dyn Directory,
|
||||
) -> crate::Result<()> {
|
||||
save_metas(
|
||||
&IndexMeta {
|
||||
index_settings,
|
||||
segments: Vec::new(),
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
},
|
||||
directory,
|
||||
)?;
|
||||
directory.sync_directory()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// IndexBuilder can be used to create an index.
|
||||
///
|
||||
/// Use in conjunction with `SchemaBuilder`. Global index settings
|
||||
/// can be configured with `IndexSettings`
|
||||
/// Use in conjunction with [`SchemaBuilder`][crate::schema::SchemaBuilder].
|
||||
/// Global index settings can be configured with [`IndexSettings`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
@@ -68,7 +97,13 @@ fn load_metas(
|
||||
/// );
|
||||
///
|
||||
/// let schema = schema_builder.build();
|
||||
/// let settings = IndexSettings{sort_by_field: Some(IndexSortByField{field:"number".to_string(), order:Order::Asc}), ..Default::default()};
|
||||
/// let settings = IndexSettings{
|
||||
/// sort_by_field: Some(IndexSortByField{
|
||||
/// field: "number".to_string(),
|
||||
/// order: Order::Asc
|
||||
/// }),
|
||||
/// ..Default::default()
|
||||
/// };
|
||||
/// let index = Index::builder().schema(schema).settings(settings).create_in_ram();
|
||||
/// ```
|
||||
pub struct IndexBuilder {
|
||||
@@ -111,21 +146,20 @@ impl IndexBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Creates a new index using the `RAMDirectory`.
|
||||
/// Creates a new index using the [`RamDirectory`].
|
||||
///
|
||||
/// The index will be allocated in anonymous memory.
|
||||
/// This should only be used for unit tests.
|
||||
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
|
||||
let ram_directory = RamDirectory::create();
|
||||
Ok(self
|
||||
.create(ram_directory)
|
||||
.expect("Creating a RAMDirectory should never fail"))
|
||||
self.create(ram_directory)
|
||||
}
|
||||
|
||||
/// Creates a new index in a given filepath.
|
||||
/// The index will use the `MMapDirectory`.
|
||||
/// The index will use the [`MmapDirectory`].
|
||||
///
|
||||
/// If a previous index was in this directory, it returns an `IndexAlreadyExists` error.
|
||||
/// If a previous index was in this directory, it returns an
|
||||
/// [`TantivyError::IndexAlreadyExists`] error.
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn create_in_dir<P: AsRef<Path>>(self, directory_path: P) -> crate::Result<Index> {
|
||||
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::open(directory_path)?);
|
||||
@@ -135,14 +169,34 @@ impl IndexBuilder {
|
||||
self.create(mmap_directory)
|
||||
}
|
||||
|
||||
/// Dragons ahead!!!
|
||||
///
|
||||
/// The point of this API is to let users create a simple index with a single segment
|
||||
/// and without starting any thread.
|
||||
///
|
||||
/// Do not use this method if you are not sure what you are doing.
|
||||
///
|
||||
/// It expects an originally empty directory, and will not run any GC operation.
|
||||
#[doc(hidden)]
|
||||
pub fn single_segment_index_writer(
|
||||
self,
|
||||
dir: impl Into<Box<dyn Directory>>,
|
||||
mem_budget: usize,
|
||||
) -> crate::Result<SingleSegmentIndexWriter> {
|
||||
let index = self.create(dir)?;
|
||||
let index_simple_writer = SingleSegmentIndexWriter::new(index, mem_budget)?;
|
||||
Ok(index_simple_writer)
|
||||
}
|
||||
|
||||
/// Creates a new index in a temp directory.
|
||||
///
|
||||
/// The index will use the `MMapDirectory` in a newly created directory.
|
||||
/// The temp directory will be destroyed automatically when the `Index` object
|
||||
/// The index will use the [`MmapDirectory`] in a newly created directory.
|
||||
/// The temp directory will be destroyed automatically when the [`Index`] object
|
||||
/// is destroyed.
|
||||
///
|
||||
/// The temp directory is only used for testing the `MmapDirectory`.
|
||||
/// For other unit tests, prefer the `RAMDirectory`, see: `create_in_ram`.
|
||||
/// The temp directory is only used for testing the [`MmapDirectory`].
|
||||
/// For other unit tests, prefer the [`RamDirectory`], see:
|
||||
/// [`IndexBuilder::create_in_ram()`].
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn create_from_tempdir(self) -> crate::Result<Index> {
|
||||
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::create_from_tempdir()?);
|
||||
@@ -172,10 +226,44 @@ impl IndexBuilder {
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self) -> crate::Result<()> {
|
||||
if let Some(schema) = self.schema.as_ref() {
|
||||
if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref() {
|
||||
let schema_field = schema.get_field(&sort_by_field.field).ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"Field to sort index {} not found in schema",
|
||||
sort_by_field.field
|
||||
))
|
||||
})?;
|
||||
let entry = schema.get_field_entry(schema_field);
|
||||
if !entry.is_fast() {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Field {} is no fast field. Field needs to be a single value fast field \
|
||||
to be used to sort an index",
|
||||
sort_by_field.field
|
||||
)));
|
||||
}
|
||||
if entry.field_type().fastfield_cardinality() != Some(Cardinality::SingleValue) {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Only single value fast field Cardinality supported for sorting index {}",
|
||||
sort_by_field.field
|
||||
)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err(TantivyError::InvalidArgument(
|
||||
"no schema passed".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new index given an implementation of the trait `Directory`.
|
||||
///
|
||||
/// If a directory previously existed, it will be erased.
|
||||
fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
|
||||
self.validate()?;
|
||||
let dir = dir.into();
|
||||
let directory = ManagedDirectory::wrap(dir)?;
|
||||
save_new_metas(
|
||||
@@ -232,13 +320,13 @@ impl Index {
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
/// by a thread pool with as many threads as there are CPUs on the system.
|
||||
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
|
||||
let default_num_threads = num_cpus::get();
|
||||
self.set_multithread_executor(default_num_threads)
|
||||
}
|
||||
|
||||
/// Creates a new index using the `RamDirectory`.
|
||||
/// Creates a new index using the [`RamDirectory`].
|
||||
///
|
||||
/// The index will be allocated in anonymous memory.
|
||||
/// This is useful for indexing small set of documents
|
||||
@@ -248,9 +336,10 @@ impl Index {
|
||||
}
|
||||
|
||||
/// Creates a new index in a given filepath.
|
||||
/// The index will use the `MMapDirectory`.
|
||||
/// The index will use the [`MmapDirectory`].
|
||||
///
|
||||
/// If a previous index was in this directory, then it returns an `IndexAlreadyExists` error.
|
||||
/// If a previous index was in this directory, then it returns
|
||||
/// a [`TantivyError::IndexAlreadyExists`] error.
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn create_in_dir<P: AsRef<Path>>(
|
||||
directory_path: P,
|
||||
@@ -272,12 +361,13 @@ impl Index {
|
||||
|
||||
/// Creates a new index in a temp directory.
|
||||
///
|
||||
/// The index will use the `MMapDirectory` in a newly created directory.
|
||||
/// The temp directory will be destroyed automatically when the `Index` object
|
||||
/// The index will use the [`MmapDirectory`] in a newly created directory.
|
||||
/// The temp directory will be destroyed automatically when the [`Index`] object
|
||||
/// is destroyed.
|
||||
///
|
||||
/// The temp directory is only used for testing the `MmapDirectory`.
|
||||
/// For other unit tests, prefer the `RamDirectory`, see: `create_in_ram`.
|
||||
/// The temp directory is only used for testing the [`MmapDirectory`].
|
||||
/// For other unit tests, prefer the [`RamDirectory`],
|
||||
/// see: [`IndexBuilder::create_in_ram()`].
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn create_from_tempdir(schema: Schema) -> crate::Result<Index> {
|
||||
IndexBuilder::new().schema(schema).create_from_tempdir()
|
||||
@@ -297,7 +387,7 @@ impl Index {
|
||||
builder.create(dir)
|
||||
}
|
||||
|
||||
/// Creates a new index given a directory and an `IndexMeta`.
|
||||
/// Creates a new index given a directory and an [`IndexMeta`].
|
||||
fn open_from_metas(
|
||||
directory: ManagedDirectory,
|
||||
metas: &IndexMeta,
|
||||
@@ -324,7 +414,7 @@ impl Index {
|
||||
&self.tokenizers
|
||||
}
|
||||
|
||||
/// Helper to access the tokenizer associated to a specific field.
|
||||
/// Get the tokenizer associated with a specific field.
|
||||
pub fn tokenizer_for_field(&self, field: Field) -> crate::Result<TextAnalyzer> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
@@ -356,18 +446,17 @@ impl Index {
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a default `IndexReader` for the given index.
|
||||
/// Create a default [`IndexReader`] for the given index.
|
||||
///
|
||||
/// See [`Index.reader_builder()`](#method.reader_builder).
|
||||
/// See [`Index.reader_builder()`].
|
||||
pub fn reader(&self) -> crate::Result<IndexReader> {
|
||||
self.reader_builder().try_into()
|
||||
}
|
||||
|
||||
/// Create a `IndexReader` for the given index.
|
||||
/// Create a [`IndexReader`] for the given index.
|
||||
///
|
||||
/// Most project should create at most one reader for a given index.
|
||||
/// This method is typically called only once per `Index` instance,
|
||||
/// over the lifetime of most problem.
|
||||
/// This method is typically called only once per `Index` instance.
|
||||
pub fn reader_builder(&self) -> IndexReaderBuilder {
|
||||
IndexReaderBuilder::new(self.clone())
|
||||
}
|
||||
@@ -581,10 +670,12 @@ impl fmt::Debug for Index {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::collector::Count;
|
||||
use crate::directory::{RamDirectory, WatchCallback};
|
||||
use crate::schema::{Field, Schema, INDEXED, TEXT};
|
||||
use crate::query::TermQuery;
|
||||
use crate::schema::{Field, IndexRecordOption, Schema, INDEXED, TEXT};
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy};
|
||||
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy, Term};
|
||||
|
||||
#[test]
|
||||
fn test_indexer_for_field() {
|
||||
@@ -850,4 +941,28 @@ mod tests {
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_segment_index_writer() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let directory = RamDirectory::default();
|
||||
let mut single_segment_index_writer = Index::builder()
|
||||
.schema(schema)
|
||||
.single_segment_index_writer(directory, 10_000_000)?;
|
||||
for _ in 0..10 {
|
||||
let doc = doc!(text_field=>"hello");
|
||||
single_segment_index_writer.add_document(doc)?;
|
||||
}
|
||||
let index = single_segment_index_writer.finalize()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "hello"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
let count = searcher.search(&term_query, &Count)?;
|
||||
assert_eq!(count, 10);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ impl SegmentMeta {
|
||||
/// Returns the relative path of a component of our segment.
|
||||
///
|
||||
/// It just joins the segment id with the extension
|
||||
/// associated to a segment component.
|
||||
/// associated with a segment component.
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
let mut path = self.id().uuid_string();
|
||||
path.push_str(&*match component {
|
||||
@@ -235,6 +235,14 @@ impl InnerSegmentMeta {
|
||||
}
|
||||
}
|
||||
|
||||
fn return_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn is_true(val: &bool) -> bool {
|
||||
*val
|
||||
}
|
||||
|
||||
/// Search Index Settings.
|
||||
///
|
||||
/// Contains settings which are applied on the whole
|
||||
@@ -248,6 +256,12 @@ pub struct IndexSettings {
|
||||
/// The `Compressor` used to compress the doc store.
|
||||
#[serde(default)]
|
||||
pub docstore_compression: Compressor,
|
||||
/// If set to true, docstore compression will happen on a dedicated thread.
|
||||
/// (defaults: true)
|
||||
#[doc(hidden)]
|
||||
#[serde(default = "return_true")]
|
||||
#[serde(skip_serializing_if = "is_true")]
|
||||
pub docstore_compress_dedicated_thread: bool,
|
||||
#[serde(default = "default_docstore_blocksize")]
|
||||
/// The size of each block that will be compressed and written to disk
|
||||
pub docstore_blocksize: usize,
|
||||
@@ -264,13 +278,14 @@ impl Default for IndexSettings {
|
||||
sort_by_field: None,
|
||||
docstore_compression: Compressor::default(),
|
||||
docstore_blocksize: default_docstore_blocksize(),
|
||||
docstore_compress_dedicated_thread: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Settings to presort the documents in an index
|
||||
///
|
||||
/// Presorting documents can greatly performance
|
||||
/// Presorting documents can greatly improve performance
|
||||
/// in some scenarios, by applying top n
|
||||
/// optimizations.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||
@@ -311,13 +326,13 @@ pub struct IndexMeta {
|
||||
/// `IndexSettings` to configure index options.
|
||||
#[serde(default)]
|
||||
pub index_settings: IndexSettings,
|
||||
/// List of `SegmentMeta` informations associated to each finalized segment of the index.
|
||||
/// List of `SegmentMeta` information associated with each finalized segment of the index.
|
||||
pub segments: Vec<SegmentMeta>,
|
||||
/// Index `Schema`
|
||||
pub schema: Schema,
|
||||
/// Opstamp associated to the last `commit` operation.
|
||||
/// Opstamp associated with the last `commit` operation.
|
||||
pub opstamp: Opstamp,
|
||||
/// Payload associated to the last commit.
|
||||
/// Payload associated with the last commit.
|
||||
///
|
||||
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||
/// to help identify this commit.
|
||||
@@ -326,7 +341,7 @@ pub struct IndexMeta {
|
||||
pub payload: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct UntrackedIndexMeta {
|
||||
pub segments: Vec<InnerSegmentMeta>,
|
||||
#[serde(default)]
|
||||
@@ -395,6 +410,7 @@ mod tests {
|
||||
use super::IndexMeta;
|
||||
use crate::core::index_meta::UntrackedIndexMeta;
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::store::{Compressor, ZstdCompressor};
|
||||
use crate::{IndexSettings, IndexSortByField, Order};
|
||||
|
||||
#[test]
|
||||
@@ -428,4 +444,104 @@ mod tests {
|
||||
assert_eq!(index_metas.schema, deser_meta.schema);
|
||||
assert_eq!(index_metas.opstamp, deser_meta.opstamp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_metas_zstd_compressor() {
|
||||
let schema = {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("text", TEXT);
|
||||
schema_builder.build()
|
||||
};
|
||||
let index_metas = IndexMeta {
|
||||
index_settings: IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "text".to_string(),
|
||||
order: Order::Asc,
|
||||
}),
|
||||
docstore_compression: crate::store::Compressor::Zstd(ZstdCompressor {
|
||||
compression_level: Some(4),
|
||||
}),
|
||||
docstore_blocksize: 1_000_000,
|
||||
docstore_compress_dedicated_thread: true,
|
||||
},
|
||||
segments: Vec::new(),
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
};
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd(compression_level=4)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
|
||||
);
|
||||
|
||||
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(index_metas.index_settings, deser_meta.index_settings);
|
||||
assert_eq!(index_metas.schema, deser_meta.schema);
|
||||
assert_eq!(index_metas.opstamp, deser_meta.opstamp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_metas_invalid_comp() {
|
||||
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zsstd","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
|
||||
|
||||
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"unknown variant `zsstd`, expected one of `none`, `lz4`, `brotli`, `snappy`, `zstd`, \
|
||||
`zstd(compression_level=5)` at line 1 column 96"
|
||||
.to_string()
|
||||
);
|
||||
|
||||
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd(bla=10)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
|
||||
|
||||
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"unknown zstd option \"bla\" at line 1 column 103".to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "lz4-compression")]
|
||||
fn test_index_settings_default() {
|
||||
let mut index_settings = IndexSettings::default();
|
||||
assert_eq!(
|
||||
index_settings,
|
||||
IndexSettings {
|
||||
sort_by_field: None,
|
||||
docstore_compression: Compressor::default(),
|
||||
docstore_compress_dedicated_thread: true,
|
||||
docstore_blocksize: 16_384
|
||||
}
|
||||
);
|
||||
{
|
||||
let index_settings_json = serde_json::to_value(&index_settings).unwrap();
|
||||
assert_eq!(
|
||||
index_settings_json,
|
||||
serde_json::json!({
|
||||
"docstore_compression": "lz4",
|
||||
"docstore_blocksize": 16384
|
||||
})
|
||||
);
|
||||
let index_settings_deser: IndexSettings =
|
||||
serde_json::from_value(index_settings_json).unwrap();
|
||||
assert_eq!(index_settings_deser, index_settings);
|
||||
}
|
||||
{
|
||||
index_settings.docstore_compress_dedicated_thread = false;
|
||||
let index_settings_json = serde_json::to_value(&index_settings).unwrap();
|
||||
assert_eq!(
|
||||
index_settings_json,
|
||||
serde_json::json!({
|
||||
"docstore_compression": "lz4",
|
||||
"docstore_blocksize": 16384,
|
||||
"docstore_compress_dedicated_thread": false,
|
||||
})
|
||||
);
|
||||
let index_settings_deser: IndexSettings =
|
||||
serde_json::from_value(index_settings_json).unwrap();
|
||||
assert_eq!(index_settings_deser, index_settings);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,18 +9,17 @@ use crate::schema::{IndexRecordOption, Term};
|
||||
use crate::termdict::TermDictionary;
|
||||
|
||||
/// The inverted index reader is in charge of accessing
|
||||
/// the inverted index associated to a specific field.
|
||||
/// the inverted index associated with a specific field.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// It is safe to delete the segment associated to
|
||||
/// It is safe to delete the segment associated with
|
||||
/// an `InvertedIndexReader`. As long as it is open,
|
||||
/// the `FileSlice` it is relying on should
|
||||
/// the [`FileSlice`] it is relying on should
|
||||
/// stay available.
|
||||
///
|
||||
///
|
||||
/// `InvertedIndexReader` are created by calling
|
||||
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
||||
/// [`SegmentReader::inverted_index()`](crate::SegmentReader::inverted_index).
|
||||
pub struct InvertedIndexReader {
|
||||
termdict: TermDictionary,
|
||||
postings_file_slice: FileSlice,
|
||||
@@ -30,7 +29,7 @@ pub struct InvertedIndexReader {
|
||||
}
|
||||
|
||||
impl InvertedIndexReader {
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
||||
#[allow(clippy::needless_pass_by_value)] // for symmetry
|
||||
pub(crate) fn new(
|
||||
termdict: TermDictionary,
|
||||
postings_file_slice: FileSlice,
|
||||
@@ -75,7 +74,7 @@ impl InvertedIndexReader {
|
||||
///
|
||||
/// This is useful for enumerating through a list of terms,
|
||||
/// and consuming the associated posting lists while avoiding
|
||||
/// reallocating a `BlockSegmentPostings`.
|
||||
/// reallocating a [`BlockSegmentPostings`].
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
@@ -96,7 +95,7 @@ impl InvertedIndexReader {
|
||||
/// Returns a block postings given a `Term`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub fn read_block_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
@@ -110,7 +109,7 @@ impl InvertedIndexReader {
|
||||
/// Returns a block postings given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub fn read_block_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
@@ -130,7 +129,7 @@ impl InvertedIndexReader {
|
||||
/// Returns a posting object given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub fn read_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
@@ -164,12 +163,12 @@ impl InvertedIndexReader {
|
||||
/// or `None` if the term has never been encountered and indexed.
|
||||
///
|
||||
/// If the field was not indexed with the indexing options that cover
|
||||
/// the requested options, the returned `SegmentPostings` the method does not fail
|
||||
/// the requested options, the returned [`SegmentPostings`] the method does not fail
|
||||
/// and returns a `SegmentPostings` with as much information as possible.
|
||||
///
|
||||
/// For instance, requesting `IndexRecordOption::Freq` for a
|
||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||
/// with `DocId`s and frequencies.
|
||||
/// For instance, requesting [`IndexRecordOption::WithFreqs`] for a
|
||||
/// [`TextOptions`](crate::schema::TextOptions) that does not index position
|
||||
/// will return a [`SegmentPostings`] with `DocId`s and frequencies.
|
||||
pub fn read_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
@@ -211,7 +210,7 @@ impl InvertedIndexReader {
|
||||
/// Returns a block postings given a `Term`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub async fn warm_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
@@ -230,4 +229,13 @@ impl InvertedIndexReader {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the number of documents containing the term asynchronously.
|
||||
pub async fn doc_freq_async(&self, term: &Term) -> crate::AsyncIoResult<u32> {
|
||||
Ok(self
|
||||
.get_term_info_async(term)
|
||||
.await?
|
||||
.map(|term_info| term_info.doc_freq)
|
||||
.unwrap_or(0u32))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ mod segment;
|
||||
mod segment_component;
|
||||
mod segment_id;
|
||||
mod segment_reader;
|
||||
mod single_segment_index_writer;
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
@@ -23,6 +24,7 @@ pub use self::segment::Segment;
|
||||
pub use self::segment_component::SegmentComponent;
|
||||
pub use self::segment_id::SegmentId;
|
||||
pub use self::segment_reader::SegmentReader;
|
||||
pub use self::single_segment_index_writer::SingleSegmentIndexWriter;
|
||||
|
||||
/// The meta file contains all the information about the list of segments and the schema
|
||||
/// of the index.
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use crate::collector::Collector;
|
||||
@@ -6,15 +7,15 @@ use crate::core::{Executor, SegmentReader};
|
||||
use crate::query::Query;
|
||||
use crate::schema::{Document, Schema, Term};
|
||||
use crate::space_usage::SearcherSpaceUsage;
|
||||
use crate::store::StoreReader;
|
||||
use crate::store::{CacheStats, StoreReader};
|
||||
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject};
|
||||
|
||||
/// Identifies the searcher generation accessed by a [Searcher].
|
||||
/// Identifies the searcher generation accessed by a [`Searcher`].
|
||||
///
|
||||
/// While this might seem redundant, a [SearcherGeneration] contains
|
||||
/// While this might seem redundant, a [`SearcherGeneration`] contains
|
||||
/// both a `generation_id` AND a list of `(SegmentId, DeleteOpstamp)`.
|
||||
///
|
||||
/// This is on purpose. This object is used by the `Warmer` API.
|
||||
/// This is on purpose. This object is used by the [`Warmer`](crate::reader::Warmer) API.
|
||||
/// Having both information makes it possible to identify which
|
||||
/// artifact should be refreshed or garbage collected.
|
||||
///
|
||||
@@ -62,69 +63,60 @@ impl SearcherGeneration {
|
||||
///
|
||||
/// It guarantees that the `Segment` will not be removed before
|
||||
/// the destruction of the `Searcher`.
|
||||
#[derive(Clone)]
|
||||
pub struct Searcher {
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
store_readers: Vec<StoreReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
inner: Arc<SearcherInner>,
|
||||
}
|
||||
|
||||
impl Searcher {
|
||||
/// Creates a new `Searcher`
|
||||
pub(crate) fn new(
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
) -> io::Result<Searcher> {
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
.iter()
|
||||
.map(SegmentReader::get_store_reader)
|
||||
.collect::<io::Result<Vec<_>>>()?;
|
||||
Ok(Searcher {
|
||||
schema,
|
||||
index,
|
||||
segment_readers,
|
||||
store_readers,
|
||||
generation,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the `Index` associated to the `Searcher`
|
||||
/// Returns the `Index` associated with the `Searcher`
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
&self.inner.index
|
||||
}
|
||||
|
||||
/// [SearcherGeneration] which identifies the version of the snapshot held by this `Searcher`.
|
||||
/// [`SearcherGeneration`] which identifies the version of the snapshot held by this `Searcher`.
|
||||
pub fn generation(&self) -> &SearcherGeneration {
|
||||
self.generation.as_ref()
|
||||
self.inner.generation.as_ref()
|
||||
}
|
||||
|
||||
/// Fetches a document from tantivy's store given a `DocAddress`.
|
||||
/// Fetches a document from tantivy's store given a [`DocAddress`].
|
||||
///
|
||||
/// The searcher uses the segment ordinal to route the
|
||||
/// the request to the right `Segment`.
|
||||
/// request to the right `Segment`.
|
||||
pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||
let store_reader = &self.store_readers[doc_address.segment_ord as usize];
|
||||
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
||||
store_reader.get(doc_address.doc_id)
|
||||
}
|
||||
|
||||
/// The cache stats for the underlying store reader.
|
||||
///
|
||||
/// Aggregates the sum for each segment store reader.
|
||||
pub fn doc_store_cache_stats(&self) -> CacheStats {
|
||||
let cache_stats: CacheStats = self
|
||||
.inner
|
||||
.store_readers
|
||||
.iter()
|
||||
.map(|reader| reader.cache_stats())
|
||||
.sum();
|
||||
cache_stats
|
||||
}
|
||||
|
||||
/// Fetches a document in an asynchronous manner.
|
||||
#[cfg(feature = "quickwit")]
|
||||
pub async fn doc_async(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||
let store_reader = &self.store_readers[doc_address.segment_ord as usize];
|
||||
let store_reader = &self.inner.store_readers[doc_address.segment_ord as usize];
|
||||
store_reader.get_async(doc_address.doc_id).await
|
||||
}
|
||||
|
||||
/// Access the schema associated to the index of this searcher.
|
||||
/// Access the schema associated with the index of this searcher.
|
||||
pub fn schema(&self) -> &Schema {
|
||||
&self.schema
|
||||
&self.inner.schema
|
||||
}
|
||||
|
||||
/// Returns the overall number of documents in the index.
|
||||
pub fn num_docs(&self) -> u64 {
|
||||
self.segment_readers
|
||||
self.inner
|
||||
.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| u64::from(segment_reader.num_docs()))
|
||||
.sum::<u64>()
|
||||
@@ -134,7 +126,7 @@ impl Searcher {
|
||||
/// the given term.
|
||||
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> {
|
||||
let mut total_doc_freq = 0;
|
||||
for segment_reader in &self.segment_readers {
|
||||
for segment_reader in &self.inner.segment_readers {
|
||||
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||
let doc_freq = inverted_index.doc_freq(term)?;
|
||||
total_doc_freq += u64::from(doc_freq);
|
||||
@@ -142,25 +134,38 @@ impl Searcher {
|
||||
Ok(total_doc_freq)
|
||||
}
|
||||
|
||||
/// Return the overall number of documents containing
|
||||
/// the given term in an asynchronous manner.
|
||||
#[cfg(feature = "quickwit")]
|
||||
pub async fn doc_freq_async(&self, term: &Term) -> crate::Result<u64> {
|
||||
let mut total_doc_freq = 0;
|
||||
for segment_reader in &self.inner.segment_readers {
|
||||
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||
let doc_freq = inverted_index.doc_freq_async(term).await?;
|
||||
total_doc_freq += u64::from(doc_freq);
|
||||
}
|
||||
Ok(total_doc_freq)
|
||||
}
|
||||
|
||||
/// Return the list of segment readers
|
||||
pub fn segment_readers(&self) -> &[SegmentReader] {
|
||||
&self.segment_readers
|
||||
&self.inner.segment_readers
|
||||
}
|
||||
|
||||
/// Returns the segment_reader associated with the given segment_ord
|
||||
pub fn segment_reader(&self, segment_ord: u32) -> &SegmentReader {
|
||||
&self.segment_readers[segment_ord as usize]
|
||||
&self.inner.segment_readers[segment_ord as usize]
|
||||
}
|
||||
|
||||
/// Runs a query on the segment readers wrapped by the searcher.
|
||||
///
|
||||
/// Search works as follows :
|
||||
///
|
||||
/// First the weight object associated to the query is created.
|
||||
/// First the weight object associated with the query is created.
|
||||
///
|
||||
/// Then, the query loops over the segments and for each segment :
|
||||
/// - setup the collector and informs it that the segment being processed has changed.
|
||||
/// - creates a SegmentCollector for collecting documents associated to the segment
|
||||
/// - creates a SegmentCollector for collecting documents associated with the segment
|
||||
/// - creates a `Scorer` object associated for this segment
|
||||
/// - iterate through the matched documents and push them to the segment collector.
|
||||
///
|
||||
@@ -171,11 +176,11 @@ impl Searcher {
|
||||
query: &dyn Query,
|
||||
collector: &C,
|
||||
) -> crate::Result<C::Fruit> {
|
||||
let executor = self.index.search_executor();
|
||||
let executor = self.inner.index.search_executor();
|
||||
self.search_with_executor(query, collector, executor)
|
||||
}
|
||||
|
||||
/// Same as [`search(...)`](#method.search) but multithreaded.
|
||||
/// Same as [`search(...)`](Searcher::search) but multithreaded.
|
||||
///
|
||||
/// The current implementation is rather naive :
|
||||
/// multithreading is by splitting search into as many task
|
||||
@@ -208,17 +213,67 @@ impl Searcher {
|
||||
/// Summarize total space usage of this searcher.
|
||||
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> {
|
||||
let mut space_usage = SearcherSpaceUsage::new();
|
||||
for segment_reader in &self.segment_readers {
|
||||
for segment_reader in self.segment_readers() {
|
||||
space_usage.add_segment(segment_reader.space_usage()?);
|
||||
}
|
||||
Ok(space_usage)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Arc<SearcherInner>> for Searcher {
|
||||
fn from(inner: Arc<SearcherInner>) -> Self {
|
||||
Searcher { inner }
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds a list of `SegmentReader`s ready for search.
|
||||
///
|
||||
/// It guarantees that the `Segment` will not be removed before
|
||||
/// the destruction of the `Searcher`.
|
||||
pub(crate) struct SearcherInner {
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
store_readers: Vec<StoreReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
}
|
||||
|
||||
impl SearcherInner {
|
||||
/// Creates a new `Searcher`
|
||||
pub(crate) fn new(
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
doc_store_cache_size: usize,
|
||||
) -> io::Result<SearcherInner> {
|
||||
assert_eq!(
|
||||
&segment_readers
|
||||
.iter()
|
||||
.map(|reader| (reader.segment_id(), reader.delete_opstamp()))
|
||||
.collect::<BTreeMap<_, _>>(),
|
||||
generation.segments(),
|
||||
"Set of segments referenced by this Searcher and its SearcherGeneration must match"
|
||||
);
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size))
|
||||
.collect::<io::Result<Vec<_>>>()?;
|
||||
|
||||
Ok(SearcherInner {
|
||||
schema,
|
||||
index,
|
||||
segment_readers,
|
||||
store_readers,
|
||||
generation,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Searcher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let segment_ids = self
|
||||
.segment_readers
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(SegmentReader::segment_id)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -70,7 +70,7 @@ impl Segment {
|
||||
/// Returns the relative path of a component of our segment.
|
||||
///
|
||||
/// It just joins the segment id with the extension
|
||||
/// associated to a segment component.
|
||||
/// associated with a segment component.
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
self.meta.relative_path(component)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::slice;
|
||||
/// except the delete component that takes an `segment_uuid`.`delete_opstamp`.`component_extension`
|
||||
#[derive(Copy, Clone, Eq, PartialEq)]
|
||||
pub enum SegmentComponent {
|
||||
/// Postings (or inverted list). Sorted lists of document ids, associated to terms
|
||||
/// Postings (or inverted list). Sorted lists of document ids, associated with terms
|
||||
Postings,
|
||||
/// Positions of terms in each document.
|
||||
Positions,
|
||||
@@ -24,7 +24,8 @@ pub enum SegmentComponent {
|
||||
Store,
|
||||
/// Temporary storage of the documents, before streamed to `Store`.
|
||||
TempStore,
|
||||
/// Bitset describing which document of the segment is deleted.
|
||||
/// Bitset describing which document of the segment is alive.
|
||||
/// (It was representing deleted docs but changed to represent alive docs from v0.17)
|
||||
Delete,
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ use uuid::Uuid;
|
||||
/// by a UUID which is used to prefix the filenames
|
||||
/// of all of the file associated with the segment.
|
||||
///
|
||||
/// In unit test, for reproducability, the `SegmentId` are
|
||||
/// In unit test, for reproducibility, the `SegmentId` are
|
||||
/// simply generated in an autoincrement fashion.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct SegmentId(Uuid);
|
||||
@@ -57,7 +57,7 @@ impl SegmentId {
|
||||
/// Picking the first 8 chars is ok to identify
|
||||
/// segments in a display message (e.g. a5c4dfcb).
|
||||
pub fn short_uuid_string(&self) -> String {
|
||||
(&self.0.as_simple().to_string()[..8]).to_string()
|
||||
self.0.as_simple().to_string()[..8].to_string()
|
||||
}
|
||||
|
||||
/// Returns a segment uuid string.
|
||||
|
||||
@@ -89,7 +89,7 @@ impl SegmentReader {
|
||||
&self.fast_fields_readers
|
||||
}
|
||||
|
||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||
/// Accessor to the `FacetReader` associated with a given `Field`.
|
||||
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
|
||||
@@ -128,13 +128,14 @@ impl SegmentReader {
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn fieldnorms_readers(&self) -> &FieldNormReaders {
|
||||
#[doc(hidden)]
|
||||
pub fn fieldnorms_readers(&self) -> &FieldNormReaders {
|
||||
&self.fieldnorm_readers
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `StoreReader`.
|
||||
pub fn get_store_reader(&self) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone())
|
||||
pub fn get_store_reader(&self, cache_size: usize) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone(), cache_size)
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
@@ -175,9 +176,9 @@ impl SegmentReader {
|
||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||
|
||||
let original_bitset = if segment.meta().has_deletes() {
|
||||
let delete_file_slice = segment.open_read(SegmentComponent::Delete)?;
|
||||
let delete_data = delete_file_slice.read_bytes()?;
|
||||
Some(AliveBitSet::open(delete_data))
|
||||
let alive_doc_file_slice = segment.open_read(SegmentComponent::Delete)?;
|
||||
let alive_doc_data = alive_doc_file_slice.read_bytes()?;
|
||||
Some(AliveBitSet::open(alive_doc_data))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -207,18 +208,18 @@ impl SegmentReader {
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a field reader associated to the field given in argument.
|
||||
/// Returns a field reader associated with the field given in argument.
|
||||
/// If the field was not present in the index during indexing time,
|
||||
/// the InvertedIndexReader is empty.
|
||||
///
|
||||
/// The field reader is in charge of iterating through the
|
||||
/// term dictionary associated to a specific field,
|
||||
/// and opening the posting list associated to any term.
|
||||
/// term dictionary associated with a specific field,
|
||||
/// and opening the posting list associated with any term.
|
||||
///
|
||||
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||
/// If the field is not marked as index, a warning is logged and an empty `InvertedIndexReader`
|
||||
/// is returned.
|
||||
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
||||
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||
/// Similarly, if the field is marked as indexed but no term has been indexed for the given
|
||||
/// index, an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
||||
if let Some(inv_idx_reader) = self
|
||||
.inv_idx_reader_cache
|
||||
@@ -240,7 +241,7 @@ impl SegmentReader {
|
||||
|
||||
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
||||
// no documents in the segment contained this field.
|
||||
// As a result, no data is associated to the inverted index.
|
||||
// As a result, no data is associated with the inverted index.
|
||||
//
|
||||
// Returns an empty inverted index.
|
||||
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
||||
@@ -295,8 +296,7 @@ impl SegmentReader {
|
||||
self.delete_opstamp
|
||||
}
|
||||
|
||||
/// Returns the bitset representing
|
||||
/// the documents that have been deleted.
|
||||
/// Returns the bitset representing the alive `DocId`s.
|
||||
pub fn alive_bitset(&self) -> Option<&AliveBitSet> {
|
||||
self.alive_bitset_opt.as_ref()
|
||||
}
|
||||
@@ -305,7 +305,7 @@ impl SegmentReader {
|
||||
/// as deleted.
|
||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||
self.alive_bitset()
|
||||
.map(|delete_set| delete_set.is_deleted(doc))
|
||||
.map(|alive_bitset| alive_bitset.is_deleted(doc))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
@@ -327,7 +327,7 @@ impl SegmentReader {
|
||||
self.positions_composite.space_usage(),
|
||||
self.fast_fields_readers.space_usage(),
|
||||
self.fieldnorm_readers.space_usage(),
|
||||
self.get_store_reader()?.space_usage(),
|
||||
self.get_store_reader(0)?.space_usage(),
|
||||
self.alive_bitset_opt
|
||||
.as_ref()
|
||||
.map(AliveBitSet::space_usage)
|
||||
|
||||
51
src/core/single_segment_index_writer.rs
Normal file
51
src/core/single_segment_index_writer.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use crate::indexer::operation::AddOperation;
|
||||
use crate::indexer::segment_updater::save_metas;
|
||||
use crate::indexer::SegmentWriter;
|
||||
use crate::{Directory, Document, Index, IndexMeta, Opstamp, Segment};
|
||||
|
||||
#[doc(hidden)]
|
||||
pub struct SingleSegmentIndexWriter {
|
||||
segment_writer: SegmentWriter,
|
||||
segment: Segment,
|
||||
opstamp: Opstamp,
|
||||
}
|
||||
|
||||
impl SingleSegmentIndexWriter {
|
||||
pub fn new(index: Index, mem_budget: usize) -> crate::Result<Self> {
|
||||
let segment = index.new_segment();
|
||||
let segment_writer = SegmentWriter::for_segment(mem_budget, segment.clone())?;
|
||||
Ok(Self {
|
||||
segment_writer,
|
||||
segment,
|
||||
opstamp: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
self.segment_writer.mem_usage()
|
||||
}
|
||||
|
||||
pub fn add_document(&mut self, document: Document) -> crate::Result<()> {
|
||||
let opstamp = self.opstamp;
|
||||
self.opstamp += 1;
|
||||
self.segment_writer
|
||||
.add_document(AddOperation { opstamp, document })
|
||||
}
|
||||
|
||||
pub fn finalize(self) -> crate::Result<Index> {
|
||||
let max_doc = self.segment_writer.max_doc();
|
||||
self.segment_writer.finalize()?;
|
||||
let segment: Segment = self.segment.with_max_doc(max_doc);
|
||||
let index = segment.index();
|
||||
let index_meta = IndexMeta {
|
||||
index_settings: index.settings().clone(),
|
||||
segments: vec![segment.meta().clone()],
|
||||
schema: index.schema(),
|
||||
opstamp: 0,
|
||||
payload: None,
|
||||
};
|
||||
save_metas(&index_meta, index.directory())?;
|
||||
index.directory().sync_directory()?;
|
||||
Ok(segment.index().clone())
|
||||
}
|
||||
}
|
||||
@@ -38,7 +38,7 @@ impl BinarySerializable for FileAddr {
|
||||
/// A `CompositeWrite` is used to write a `CompositeFile`.
|
||||
pub struct CompositeWrite<W = WritePtr> {
|
||||
write: CountingWriter<W>,
|
||||
offsets: HashMap<FileAddr, u64>,
|
||||
offsets: Vec<(FileAddr, u64)>,
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
@@ -47,7 +47,7 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
pub fn wrap(w: W) -> CompositeWrite<W> {
|
||||
CompositeWrite {
|
||||
write: CountingWriter::wrap(w),
|
||||
offsets: HashMap::new(),
|
||||
offsets: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,8 +60,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
pub fn for_field_with_idx(&mut self, field: Field, idx: usize) -> &mut CountingWriter<W> {
|
||||
let offset = self.write.written_bytes();
|
||||
let file_addr = FileAddr::new(field, idx);
|
||||
assert!(!self.offsets.contains_key(&file_addr));
|
||||
self.offsets.insert(file_addr, offset);
|
||||
assert!(!self.offsets.iter().any(|el| el.0 == file_addr));
|
||||
self.offsets.push((file_addr, offset));
|
||||
&mut self.write
|
||||
}
|
||||
|
||||
@@ -73,16 +73,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
let footer_offset = self.write.written_bytes();
|
||||
VInt(self.offsets.len() as u64).serialize(&mut self.write)?;
|
||||
|
||||
let mut offset_fields: Vec<_> = self
|
||||
.offsets
|
||||
.iter()
|
||||
.map(|(file_addr, offset)| (*offset, *file_addr))
|
||||
.collect();
|
||||
|
||||
offset_fields.sort();
|
||||
|
||||
let mut prev_offset = 0;
|
||||
for (offset, file_addr) in offset_fields {
|
||||
for (file_addr, offset) in self.offsets {
|
||||
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
|
||||
file_addr.serialize(&mut self.write)?;
|
||||
prev_offset = offset;
|
||||
@@ -106,6 +98,14 @@ pub struct CompositeFile {
|
||||
offsets_index: HashMap<FileAddr, Range<usize>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for CompositeFile {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("CompositeFile")
|
||||
.field("offsets_index", &self.offsets_index)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl CompositeFile {
|
||||
/// Opens a composite file stored in a given
|
||||
/// `FileSlice`.
|
||||
@@ -154,14 +154,14 @@ impl CompositeFile {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `FileSlice` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
/// Returns the `FileSlice` associated with
|
||||
/// a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
||||
self.open_read_with_idx(field, 0)
|
||||
}
|
||||
|
||||
/// Returns the `FileSlice` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
/// Returns the `FileSlice` associated with
|
||||
/// a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
||||
self.offsets_index
|
||||
.get(&FileAddr { field, idx })
|
||||
@@ -233,4 +233,56 @@ mod test {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_composite_file_bug() -> crate::Result<()> {
|
||||
let path = Path::new("test_path");
|
||||
let directory = RamDirectory::create();
|
||||
{
|
||||
let w = directory.open_write(path).unwrap();
|
||||
let mut composite_write = CompositeWrite::wrap(w);
|
||||
let mut write = composite_write.for_field_with_idx(Field::from_field_id(1u32), 0);
|
||||
VInt(32431123u64).serialize(&mut write)?;
|
||||
write.flush()?;
|
||||
let write = composite_write.for_field_with_idx(Field::from_field_id(1u32), 1);
|
||||
write.flush()?;
|
||||
|
||||
let mut write = composite_write.for_field_with_idx(Field::from_field_id(0u32), 0);
|
||||
VInt(1_000_000).serialize(&mut write)?;
|
||||
write.flush()?;
|
||||
|
||||
composite_write.close()?;
|
||||
}
|
||||
{
|
||||
let r = directory.open_read(path)?;
|
||||
let composite_file = CompositeFile::open(&r)?;
|
||||
{
|
||||
let file = composite_file
|
||||
.open_read_with_idx(Field::from_field_id(1u32), 0)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let mut file0_buf = file.as_slice();
|
||||
let payload_0 = VInt::deserialize(&mut file0_buf)?.0;
|
||||
assert_eq!(file0_buf.len(), 0);
|
||||
assert_eq!(payload_0, 32431123u64);
|
||||
}
|
||||
{
|
||||
let file = composite_file
|
||||
.open_read_with_idx(Field::from_field_id(1u32), 1)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let file = file.as_slice();
|
||||
assert_eq!(file.len(), 0);
|
||||
}
|
||||
{
|
||||
let file = composite_file
|
||||
.open_read_with_idx(Field::from_field_id(0u32), 0)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let file = file.as_slice();
|
||||
assert_eq!(file.len(), 3);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::io::Write;
|
||||
use std::marker::{Send, Sync};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{fmt, io, thread};
|
||||
|
||||
@@ -38,7 +39,7 @@ impl RetryPolicy {
|
||||
|
||||
/// The `DirectoryLock` is an object that represents a file lock.
|
||||
///
|
||||
/// It is associated to a lock file, that gets deleted on `Drop.`
|
||||
/// It is associated with a lock file, that gets deleted on `Drop.`
|
||||
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
|
||||
|
||||
struct DirectoryLockGuard {
|
||||
@@ -62,7 +63,12 @@ impl Drop for DirectoryLockGuard {
|
||||
|
||||
enum TryAcquireLockError {
|
||||
FileExists,
|
||||
IoError(io::Error),
|
||||
IoError(Arc<io::Error>),
|
||||
}
|
||||
impl From<io::Error> for TryAcquireLockError {
|
||||
fn from(io_error: io::Error) -> Self {
|
||||
Self::IoError(Arc::new(io_error))
|
||||
}
|
||||
}
|
||||
|
||||
fn try_acquire_lock(
|
||||
@@ -73,7 +79,7 @@ fn try_acquire_lock(
|
||||
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
||||
OpenWriteError::IoError { io_error, .. } => TryAcquireLockError::IoError(io_error),
|
||||
})?;
|
||||
write.flush().map_err(TryAcquireLockError::IoError)?;
|
||||
write.flush().map_err(TryAcquireLockError::from)?;
|
||||
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
||||
directory: directory.box_clone(),
|
||||
path: filepath.to_owned(),
|
||||
@@ -105,15 +111,15 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// Users of `Directory` should typically call `Directory::open_read(...)`,
|
||||
/// while `Directory` implementor should implement `get_file_handle()`.
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError>;
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError>;
|
||||
|
||||
/// Once a virtual file is open, its data may not
|
||||
/// change.
|
||||
///
|
||||
/// Specifically, subsequent writes or flushes should
|
||||
/// have no effect on the returned `FileSlice` object.
|
||||
/// have no effect on the returned [`FileSlice`] object.
|
||||
///
|
||||
/// You should only use this to read files create with [Directory::open_write].
|
||||
/// You should only use this to read files create with [`Directory::open_write()`].
|
||||
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> {
|
||||
let file_handle = self.get_file_handle(path)?;
|
||||
Ok(FileSlice::new(file_handle))
|
||||
@@ -122,27 +128,28 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// Removes a file
|
||||
///
|
||||
/// Removing a file will not affect an eventual
|
||||
/// existing FileSlice pointing to it.
|
||||
/// existing [`FileSlice`] pointing to it.
|
||||
///
|
||||
/// Removing a nonexistent file, yields a
|
||||
/// `DeleteError::DoesNotExist`.
|
||||
/// Removing a nonexistent file, returns a
|
||||
/// [`DeleteError::FileDoesNotExist`].
|
||||
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
||||
|
||||
/// Returns true if and only if the file exists
|
||||
fn exists(&self, path: &Path) -> Result<bool, OpenReadError>;
|
||||
|
||||
/// Opens a writer for the *virtual file* associated with
|
||||
/// a Path.
|
||||
/// a [`Path`].
|
||||
///
|
||||
/// Right after this call, for the span of the execution of the program
|
||||
/// the file should be created and any subsequent call to `open_read` for the
|
||||
/// same path should return a `FileSlice`.
|
||||
/// the file should be created and any subsequent call to
|
||||
/// [`Directory::open_read()`] for the same path should return
|
||||
/// a [`FileSlice`].
|
||||
///
|
||||
/// However, depending on the directory implementation,
|
||||
/// it might be required to call `sync_directory` to ensure
|
||||
/// it might be required to call [`Directory::sync_directory()`] to ensure
|
||||
/// that the file is durably created.
|
||||
/// (The semantics here are the same when dealing with
|
||||
/// a posix filesystem.)
|
||||
/// a POSIX filesystem.)
|
||||
///
|
||||
/// Write operations may be aggressively buffered.
|
||||
/// The client of this trait is responsible for calling flush
|
||||
@@ -151,19 +158,19 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// Flush operation should also be persistent.
|
||||
///
|
||||
/// The user shall not rely on `Drop` triggering `flush`.
|
||||
/// Note that `RamDirectory` will panic! if `flush`
|
||||
/// was not called.
|
||||
/// The user shall not rely on [`Drop`] triggering `flush`.
|
||||
/// Note that [`RamDirectory`][crate::directory::RamDirectory] will
|
||||
/// panic! if `flush` was not called.
|
||||
///
|
||||
/// The file may not previously exist.
|
||||
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError>;
|
||||
|
||||
/// Reads the full content file that has been written using
|
||||
/// atomic_write.
|
||||
/// [`Directory::atomic_write()`].
|
||||
///
|
||||
/// This should only be used for small files.
|
||||
///
|
||||
/// You should only use this to read files create with [Directory::atomic_write].
|
||||
/// You should only use this to read files create with [`Directory::atomic_write()`].
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
||||
|
||||
/// Atomically replace the content of a file with data.
|
||||
@@ -180,9 +187,9 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// effectively stored durably.
|
||||
fn sync_directory(&self) -> io::Result<()>;
|
||||
|
||||
/// Acquire a lock in the given directory.
|
||||
/// Acquire a lock in the directory given in the [`Lock`].
|
||||
///
|
||||
/// The method is blocking or not depending on the `Lock` object.
|
||||
/// The method is blocking or not depending on the [`Lock`] object.
|
||||
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
||||
let mut box_directory = self.box_clone();
|
||||
let mut retry_policy = retry_policy(lock.is_blocking);
|
||||
@@ -204,15 +211,15 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
}
|
||||
|
||||
/// Registers a callback that will be called whenever a change on the `meta.json`
|
||||
/// using the `atomic_write` API is detected.
|
||||
/// using the [`Directory::atomic_write()`] API is detected.
|
||||
///
|
||||
/// The behavior when using `.watch()` on a file using [Directory::open_write] is, on the other
|
||||
/// hand, undefined.
|
||||
/// The behavior when using `.watch()` on a file using [`Directory::open_write()`] is, on the
|
||||
/// other hand, undefined.
|
||||
///
|
||||
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is
|
||||
/// required to keep it.
|
||||
/// It does not override previous callbacks. When the file is modified, all callback that are
|
||||
/// registered (and whose `WatchHandle` is still alive) are triggered.
|
||||
/// registered (and whose [`WatchHandle`] is still alive) are triggered.
|
||||
///
|
||||
/// Internally, tantivy only uses this API to detect new commits to implement the
|
||||
/// `OnCommit` `ReloadPolicy`. Not implementing watch in a `Directory` only prevents the
|
||||
|
||||
@@ -4,12 +4,14 @@ use once_cell::sync::Lazy;
|
||||
|
||||
/// A directory lock.
|
||||
///
|
||||
/// A lock is associated to a specific path and some
|
||||
/// [`LockParams`](./enum.LockParams.html).
|
||||
/// A lock is associated with a specific path.
|
||||
///
|
||||
/// The lock will be passed to [`Directory::acquire_lock`](crate::Directory::acquire_lock).
|
||||
///
|
||||
/// Tantivy itself uses only two locks but client application
|
||||
/// can use the directory facility to define their own locks.
|
||||
/// - [INDEX_WRITER_LOCK]
|
||||
/// - [META_LOCK]
|
||||
/// - [`INDEX_WRITER_LOCK`]
|
||||
/// - [`META_LOCK`]
|
||||
///
|
||||
/// Check out these locks documentation for more information.
|
||||
#[derive(Debug)]
|
||||
@@ -18,19 +20,21 @@ pub struct Lock {
|
||||
/// Depending on the platform, the lock might rely on the creation
|
||||
/// and deletion of this filepath.
|
||||
pub filepath: PathBuf,
|
||||
/// `lock_params` describes whether acquiring the lock is meant
|
||||
/// `is_blocking` describes whether acquiring the lock is meant
|
||||
/// to be a blocking operation or a non-blocking.
|
||||
///
|
||||
/// Acquiring a blocking lock blocks until the lock is
|
||||
/// available.
|
||||
/// Acquiring a blocking lock returns rapidly, either successfully
|
||||
///
|
||||
/// Acquiring a non-blocking lock returns rapidly, either successfully
|
||||
/// or with an error signifying that someone is already holding
|
||||
/// the lock.
|
||||
pub is_blocking: bool,
|
||||
}
|
||||
|
||||
/// Only one process should be able to write tantivy's index at a time.
|
||||
/// This lock file, when present, is in charge of preventing other processes to open an IndexWriter.
|
||||
/// This lock file, when present, is in charge of preventing other processes to open an
|
||||
/// `IndexWriter`.
|
||||
///
|
||||
/// If the process is killed and this file remains, it is safe to remove it manually.
|
||||
///
|
||||
@@ -45,7 +49,7 @@ pub static INDEX_WRITER_LOCK: Lazy<Lock> = Lazy::new(|| Lock {
|
||||
/// The meta lock file is here to protect the segment files being opened by
|
||||
/// `IndexReader::reload()` from being garbage collected.
|
||||
/// It makes it possible for another process to safely consume
|
||||
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics
|
||||
/// our index in-writing. Ideally, we may have preferred `RWLock` semantics
|
||||
/// here, but it is difficult to achieve on Windows.
|
||||
///
|
||||
/// Opening segment readers is a very fast process.
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use crate::Version;
|
||||
|
||||
/// Error while trying to acquire a directory lock.
|
||||
#[derive(Debug, Error)]
|
||||
/// Error while trying to acquire a directory [lock](crate::directory::Lock).
|
||||
///
|
||||
/// This is returned from [`Directory::acquire_lock`](crate::Directory::acquire_lock).
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum LockError {
|
||||
/// Failed to acquired a lock as it is already held by another
|
||||
/// client.
|
||||
@@ -16,11 +19,18 @@ pub enum LockError {
|
||||
LockBusy,
|
||||
/// Trying to acquire a lock failed with an `IoError`
|
||||
#[error("Failed to acquire the lock due to an io:Error.")]
|
||||
IoError(io::Error),
|
||||
IoError(Arc<io::Error>),
|
||||
}
|
||||
|
||||
impl LockError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error) -> Self {
|
||||
Self::IoError(Arc::new(io_error))
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when opening a directory
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenDirectoryError {
|
||||
/// The underlying directory does not exists.
|
||||
#[error("Directory does not exist: '{0}'.")]
|
||||
@@ -30,12 +40,12 @@ pub enum OpenDirectoryError {
|
||||
NotADirectory(PathBuf),
|
||||
/// Failed to create a temp directory.
|
||||
#[error("Failed to create a temporary directory: '{0}'.")]
|
||||
FailedToCreateTempDir(io::Error),
|
||||
FailedToCreateTempDir(Arc<io::Error>),
|
||||
/// IoError
|
||||
#[error("IoError '{io_error:?}' while create directory in: '{directory_path:?}'.")]
|
||||
IoError {
|
||||
/// underlying io Error.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// directory we tried to open.
|
||||
directory_path: PathBuf,
|
||||
},
|
||||
@@ -45,14 +55,14 @@ impl OpenDirectoryError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error, directory_path: PathBuf) -> Self {
|
||||
Self::IoError {
|
||||
io_error,
|
||||
io_error: Arc::new(io_error),
|
||||
directory_path,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when starting to write in a file
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenWriteError {
|
||||
/// Our directory is WORM, writing an existing file is forbidden.
|
||||
/// Checkout the `Directory` documentation.
|
||||
@@ -63,7 +73,7 @@ pub enum OpenWriteError {
|
||||
#[error("IoError '{io_error:?}' while opening file for write: '{filepath}'.")]
|
||||
IoError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// File path of the file that tantivy failed to open for write.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
@@ -72,11 +82,15 @@ pub enum OpenWriteError {
|
||||
impl OpenWriteError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||
Self::IoError { io_error, filepath }
|
||||
Self::IoError {
|
||||
io_error: Arc::new(io_error),
|
||||
filepath,
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Type of index incompatibility between the library and the index found on disk
|
||||
/// Used to catch and provide a hint to solve this incompatibility issue
|
||||
#[derive(Clone)]
|
||||
pub enum Incompatibility {
|
||||
/// This library cannot decompress the index found on disk
|
||||
CompressionMismatch {
|
||||
@@ -135,7 +149,7 @@ impl fmt::Debug for Incompatibility {
|
||||
}
|
||||
|
||||
/// Error that may occur when accessing a file read
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum OpenReadError {
|
||||
/// The file does not exists.
|
||||
#[error("Files does not exists: {0:?}")]
|
||||
@@ -146,7 +160,7 @@ pub enum OpenReadError {
|
||||
)]
|
||||
IoError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// File path of the file that tantivy failed to open for read.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
@@ -158,11 +172,14 @@ pub enum OpenReadError {
|
||||
impl OpenReadError {
|
||||
/// Wraps an io error.
|
||||
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
|
||||
Self::IoError { io_error, filepath }
|
||||
Self::IoError {
|
||||
io_error: Arc::new(io_error),
|
||||
filepath,
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Error that may occur when trying to delete a file
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Clone, Error)]
|
||||
pub enum DeleteError {
|
||||
/// The file does not exists.
|
||||
#[error("File does not exists: '{0}'.")]
|
||||
@@ -172,7 +189,7 @@ pub enum DeleteError {
|
||||
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")]
|
||||
IoError {
|
||||
/// The underlying `io::Error`.
|
||||
io_error: io::Error,
|
||||
io_error: Arc<io::Error>,
|
||||
/// File path of the file that tantivy failed to delete.
|
||||
filepath: PathBuf,
|
||||
},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::ops::{Deref, Range};
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use async_trait::async_trait;
|
||||
@@ -8,16 +8,13 @@ use stable_deref_trait::StableDeref;
|
||||
|
||||
use crate::directory::OwnedBytes;
|
||||
|
||||
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
|
||||
/// Objects that represents files sections in tantivy.
|
||||
///
|
||||
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||
///
|
||||
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
||||
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
||||
/// The underlying behavior is therefore specific to the [`Directory`](crate::Directory) that
|
||||
/// created it. Despite its name, a [`FileSlice`] may or may not directly map to an actual file
|
||||
/// on the filesystem.
|
||||
|
||||
#[async_trait]
|
||||
@@ -54,7 +51,7 @@ impl<B> From<B> for FileSlice
|
||||
where B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync
|
||||
{
|
||||
fn from(bytes: B) -> FileSlice {
|
||||
FileSlice::new(Box::new(OwnedBytes::new(bytes)))
|
||||
FileSlice::new(Arc::new(OwnedBytes::new(bytes)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +72,7 @@ impl fmt::Debug for FileSlice {
|
||||
|
||||
impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
pub fn new(file_handle: Box<dyn FileHandle>) -> Self {
|
||||
pub fn new(file_handle: Arc<dyn FileHandle>) -> Self {
|
||||
let num_bytes = file_handle.len();
|
||||
FileSlice::new_with_num_bytes(file_handle, num_bytes)
|
||||
}
|
||||
@@ -83,9 +80,9 @@ impl FileSlice {
|
||||
/// Wraps a FileHandle.
|
||||
#[doc(hidden)]
|
||||
#[must_use]
|
||||
pub fn new_with_num_bytes(file_handle: Box<dyn FileHandle>, num_bytes: usize) -> Self {
|
||||
pub fn new_with_num_bytes(file_handle: Arc<dyn FileHandle>, num_bytes: usize) -> Self {
|
||||
FileSlice {
|
||||
data: Arc::from(file_handle),
|
||||
data: file_handle,
|
||||
range: 0..num_bytes,
|
||||
}
|
||||
}
|
||||
@@ -112,7 +109,7 @@ impl FileSlice {
|
||||
|
||||
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
|
||||
///
|
||||
/// The behavior is strongly dependant on the implementation of the underlying
|
||||
/// The behavior is strongly dependent on the implementation of the underlying
|
||||
/// `Directory` and the `FileSliceTrait` it creates.
|
||||
/// In particular, it is up to the `Directory` implementation
|
||||
/// to handle caching if needed.
|
||||
@@ -235,6 +232,7 @@ impl FileHandle for OwnedBytes {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::HasLen;
|
||||
|
||||
@@ -242,7 +240,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_file_slice() -> io::Result<()> {
|
||||
let file_slice = FileSlice::new(Box::new(b"abcdef".as_ref()));
|
||||
let file_slice = FileSlice::new(Arc::new(b"abcdef".as_ref()));
|
||||
assert_eq!(file_slice.len(), 6);
|
||||
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
|
||||
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
|
||||
@@ -286,7 +284,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_slice_simple_read() -> io::Result<()> {
|
||||
let slice = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||
let slice = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(slice.len(), 6);
|
||||
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
|
||||
assert_eq!(slice.slice(1..4).read_bytes()?.as_ref(), b"bcd");
|
||||
@@ -295,7 +293,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_slice_read_slice() -> io::Result<()> {
|
||||
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||
let slice_deref = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(slice_deref.read_bytes_slice(1..4)?.as_ref(), b"bcd");
|
||||
Ok(())
|
||||
}
|
||||
@@ -303,7 +301,7 @@ mod tests {
|
||||
#[test]
|
||||
#[should_panic(expected = "end of requested range exceeds the fileslice length (10 > 6)")]
|
||||
fn test_slice_read_slice_invalid_range_exceeds() {
|
||||
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
|
||||
let slice_deref = FileSlice::new(Arc::new(&b"abcdef"[..]));
|
||||
assert_eq!(
|
||||
slice_deref.read_bytes_slice(0..10).unwrap().as_ref(),
|
||||
b"bcd"
|
||||
|
||||
@@ -9,7 +9,7 @@ use crc32fast::Hasher;
|
||||
|
||||
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
|
||||
pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
||||
const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
||||
|
||||
// Watches a file and executes registered callbacks when the file is modified.
|
||||
pub struct FileWatcher {
|
||||
|
||||
@@ -156,6 +156,7 @@ impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||
mod tests {
|
||||
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::BinarySerializable;
|
||||
|
||||
@@ -168,7 +169,7 @@ mod tests {
|
||||
let footer = Footer::new(123);
|
||||
footer.append_footer(&mut buf).unwrap();
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let (footer_deser, _body) = Footer::extract_footer(fileslice).unwrap();
|
||||
assert_eq!(footer_deser.crc(), footer.crc());
|
||||
}
|
||||
@@ -181,7 +182,7 @@ mod tests {
|
||||
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let err = Footer::extract_footer(fileslice).unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
@@ -198,7 +199,7 @@ mod tests {
|
||||
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let err = Footer::extract_footer(fileslice).unwrap_err();
|
||||
assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof);
|
||||
assert_eq!(
|
||||
@@ -217,7 +218,7 @@ mod tests {
|
||||
|
||||
let owned_bytes = OwnedBytes::new(buf);
|
||||
|
||||
let fileslice = FileSlice::new(Box::new(owned_bytes));
|
||||
let fileslice = FileSlice::new(Arc::new(owned_bytes));
|
||||
let err = Footer::extract_footer(fileslice).unwrap_err();
|
||||
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
|
||||
assert_eq!(
|
||||
|
||||
@@ -114,7 +114,7 @@ impl ManagedDirectory {
|
||||
let mut files_to_delete = vec![];
|
||||
|
||||
// It is crucial to get the living files after acquiring the
|
||||
// read lock of meta informations. That way, we
|
||||
// read lock of meta information. That way, we
|
||||
// avoid the following scenario.
|
||||
//
|
||||
// 1) we get the list of living files.
|
||||
@@ -242,16 +242,13 @@ impl ManagedDirectory {
|
||||
/// Verify checksum of a managed file
|
||||
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
||||
let reader = self.directory.open_read(path)?;
|
||||
let (footer, data) =
|
||||
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IoError {
|
||||
io_error,
|
||||
filepath: path.to_path_buf(),
|
||||
})?;
|
||||
let (footer, data) = Footer::extract_footer(reader)
|
||||
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?;
|
||||
let bytes = data
|
||||
.read_bytes()
|
||||
.map_err(|io_error| OpenReadError::IoError {
|
||||
io_error: Arc::new(io_error),
|
||||
filepath: path.to_path_buf(),
|
||||
io_error,
|
||||
})?;
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(bytes.as_slice());
|
||||
@@ -272,9 +269,9 @@ impl ManagedDirectory {
|
||||
}
|
||||
|
||||
impl Directory for ManagedDirectory {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> {
|
||||
fn get_file_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>, OpenReadError> {
|
||||
let file_slice = self.open_read(path)?;
|
||||
Ok(Box::new(file_slice))
|
||||
Ok(Arc::new(file_slice))
|
||||
}
|
||||
|
||||
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, BufWriter, Read, Seek, Write};
|
||||
use std::ops::Deref;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
use std::{fmt, result};
|
||||
|
||||
use fs2::FileExt;
|
||||
@@ -18,16 +18,19 @@ use crate::directory::error::{
|
||||
};
|
||||
use crate::directory::file_watcher::FileWatcher;
|
||||
use crate::directory::{
|
||||
AntiCallToken, ArcBytes, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes,
|
||||
TerminatingWrite, WatchCallback, WatchHandle, WeakArcBytes, WritePtr,
|
||||
AntiCallToken, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes, TerminatingWrite,
|
||||
WatchCallback, WatchHandle, WritePtr,
|
||||
};
|
||||
|
||||
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
|
||||
/// Create a default io error given a string.
|
||||
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
}
|
||||
|
||||
/// Returns None iff the file exists, can be read, but is empty (and hence
|
||||
/// Returns `None` iff the file exists, can be read, but is empty (and hence
|
||||
/// cannot be mmapped)
|
||||
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||
let file = File::open(full_path).map_err(|io_err| {
|
||||
@@ -56,10 +59,10 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct CacheCounters {
|
||||
// Number of time the cache prevents to call `mmap`
|
||||
/// Number of time the cache prevents to call `mmap`
|
||||
pub hit: usize,
|
||||
// Number of time tantivy had to call `mmap`
|
||||
// as no entry was in the cache.
|
||||
/// Number of time tantivy had to call `mmap`
|
||||
/// as no entry was in the cache.
|
||||
pub miss: usize,
|
||||
}
|
||||
|
||||
@@ -174,7 +177,8 @@ impl MmapDirectory {
|
||||
/// This is mostly useful to test the MmapDirectory itself.
|
||||
/// For your unit tests, prefer the RamDirectory.
|
||||
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?;
|
||||
let tempdir = TempDir::new()
|
||||
.map_err(|io_err| OpenDirectoryError::FailedToCreateTempDir(Arc::new(io_err)))?;
|
||||
Ok(MmapDirectory::new(
|
||||
tempdir.path().to_path_buf(),
|
||||
Some(tempdir),
|
||||
@@ -300,7 +304,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
||||
"Path {:?} does not have parent directory.",
|
||||
)
|
||||
})?;
|
||||
let mut tempfile = tempfile::Builder::new().tempfile_in(&parent_path)?;
|
||||
let mut tempfile = tempfile::Builder::new().tempfile_in(parent_path)?;
|
||||
tempfile.write_all(content)?;
|
||||
tempfile.flush()?;
|
||||
tempfile.as_file_mut().sync_data()?;
|
||||
@@ -309,7 +313,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
||||
}
|
||||
|
||||
impl Directory for MmapDirectory {
|
||||
fn get_file_handle(&self, path: &Path) -> result::Result<Box<dyn FileHandle>, OpenReadError> {
|
||||
fn get_file_handle(&self, path: &Path) -> result::Result<Arc<dyn FileHandle>, OpenReadError> {
|
||||
debug!("Open Read {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
@@ -330,10 +334,10 @@ impl Directory for MmapDirectory {
|
||||
})
|
||||
.unwrap_or_else(OwnedBytes::empty);
|
||||
|
||||
Ok(Box::new(owned_bytes))
|
||||
Ok(Arc::new(owned_bytes))
|
||||
}
|
||||
|
||||
/// Any entry associated to the path in the mmap will be
|
||||
/// Any entry associated with the path in the mmap will be
|
||||
/// removed before the file is deleted.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
@@ -342,7 +346,7 @@ impl Directory for MmapDirectory {
|
||||
DeleteError::FileDoesNotExist(path.to_owned())
|
||||
} else {
|
||||
DeleteError::IoError {
|
||||
io_error: e,
|
||||
io_error: Arc::new(e),
|
||||
filepath: path.to_path_buf(),
|
||||
}
|
||||
}
|
||||
@@ -422,9 +426,9 @@ impl Directory for MmapDirectory {
|
||||
.write(true)
|
||||
.create(true) //< if the file does not exist yet, create it.
|
||||
.open(&full_path)
|
||||
.map_err(LockError::IoError)?;
|
||||
.map_err(LockError::wrap_io_error)?;
|
||||
if lock.is_blocking {
|
||||
file.lock_exclusive().map_err(LockError::IoError)?;
|
||||
file.lock_exclusive().map_err(LockError::wrap_io_error)?;
|
||||
} else {
|
||||
file.try_lock_exclusive().map_err(|_| LockError::LockBusy)?
|
||||
}
|
||||
@@ -471,6 +475,8 @@ mod tests {
|
||||
// There are more tests in directory/mod.rs
|
||||
// The following tests are specific to the MmapDirectory
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use common::HasLen;
|
||||
|
||||
use super::*;
|
||||
@@ -565,9 +571,21 @@ mod tests {
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
||||
}
|
||||
|
||||
fn assert_eventually<P: Fn() -> Option<String>>(predicate: P) {
|
||||
for _ in 0..30 {
|
||||
if predicate().is_none() {
|
||||
break;
|
||||
}
|
||||
std::thread::sleep(Duration::from_millis(200));
|
||||
}
|
||||
if let Some(error_msg) = predicate() {
|
||||
panic!("{}", error_msg);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mmap_released() -> crate::Result<()> {
|
||||
let mmap_directory = MmapDirectory::create_from_tempdir()?;
|
||||
fn test_mmap_released() {
|
||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
let mut schema_builder: SchemaBuilder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
@@ -576,40 +594,56 @@ mod tests {
|
||||
let index =
|
||||
Index::create(mmap_directory.clone(), schema, IndexSettings::default()).unwrap();
|
||||
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let mut log_merge_policy = LogMergePolicy::default();
|
||||
log_merge_policy.set_min_num_segments(3);
|
||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||
for _num_commits in 0..10 {
|
||||
for _ in 0..10 {
|
||||
index_writer.add_document(doc!(text_field=>"abc"))?;
|
||||
index_writer.add_document(doc!(text_field=>"abc")).unwrap();
|
||||
}
|
||||
index_writer.commit()?;
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::Manual)
|
||||
.try_into()?;
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
for _ in 0..4 {
|
||||
index_writer.add_document(doc!(text_field=>"abc"))?;
|
||||
index_writer.commit()?;
|
||||
reader.reload()?;
|
||||
index_writer.add_document(doc!(text_field=>"abc")).unwrap();
|
||||
index_writer.commit().unwrap();
|
||||
reader.reload().unwrap();
|
||||
}
|
||||
index_writer.wait_merging_threads()?;
|
||||
index_writer.wait_merging_threads().unwrap();
|
||||
|
||||
reader.reload()?;
|
||||
reader.reload().unwrap();
|
||||
let num_segments = reader.searcher().segment_readers().len();
|
||||
assert!(num_segments <= 4);
|
||||
let num_components_except_deletes_and_tempstore =
|
||||
crate::core::SegmentComponent::iterator().len() - 2;
|
||||
assert_eq!(
|
||||
num_segments * num_components_except_deletes_and_tempstore,
|
||||
mmap_directory.get_cache_info().mmapped.len()
|
||||
);
|
||||
let max_num_mmapped = num_components_except_deletes_and_tempstore * num_segments;
|
||||
assert_eventually(|| {
|
||||
let num_mmapped = mmap_directory.get_cache_info().mmapped.len();
|
||||
if num_mmapped > max_num_mmapped {
|
||||
Some(format!(
|
||||
"Expected at most {max_num_mmapped} mmapped files, got {num_mmapped}"
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
}
|
||||
assert!(mmap_directory.get_cache_info().mmapped.is_empty());
|
||||
Ok(())
|
||||
// This test failed on CI. The last Mmap is dropped from the merging thread so there might
|
||||
// be a race condition indeed.
|
||||
assert_eventually(|| {
|
||||
let num_mmapped = mmap_directory.get_cache_info().mmapped.len();
|
||||
if num_mmapped > 0 {
|
||||
Some(format!("Expected no mmapped files, got {num_mmapped}"))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ pub use ownedbytes::OwnedBytes;
|
||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
|
||||
pub use self::file_slice::{FileHandle, FileSlice};
|
||||
pub use self::ram_directory::RamDirectory;
|
||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user