mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 04:52:55 +00:00
Compare commits
288 Commits
issue/483
...
githubacti
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6eb7c7f419 | ||
|
|
e37ca8178a | ||
|
|
3dd0322f4c | ||
|
|
2481c87be8 | ||
|
|
b6a664b5f8 | ||
|
|
25b666a7c9 | ||
|
|
9b41912e66 | ||
|
|
8e74bb98b5 | ||
|
|
6db8bb49d6 | ||
|
|
410aed0176 | ||
|
|
00a239a712 | ||
|
|
68fe406924 | ||
|
|
f71b04acb0 | ||
|
|
1ab7f660a4 | ||
|
|
0ebbc4cb5a | ||
|
|
5300cb5da0 | ||
|
|
7d773abc92 | ||
|
|
c34541ccce | ||
|
|
1cc5bd706c | ||
|
|
4026d183bc | ||
|
|
c0f5645cd9 | ||
|
|
cbff874e43 | ||
|
|
baf015fc57 | ||
|
|
7275ebdf3c | ||
|
|
b974e7ce34 | ||
|
|
8f8f34499f | ||
|
|
6ea6f4bfcd | ||
|
|
e25284bafe | ||
|
|
8b67877cd5 | ||
|
|
9de1360538 | ||
|
|
c55db83609 | ||
|
|
1e5ebdbf3c | ||
|
|
9a2090ab21 | ||
|
|
e4aaacdb86 | ||
|
|
29acf1104d | ||
|
|
3d34fa0b69 | ||
|
|
77f363987a | ||
|
|
c0be461191 | ||
|
|
1fb562f44a | ||
|
|
c591d0e591 | ||
|
|
186d7fc20e | ||
|
|
cfbdef5186 | ||
|
|
d04368b1d4 | ||
|
|
b167058028 | ||
|
|
262957717b | ||
|
|
873a808321 | ||
|
|
6fa8f9330e | ||
|
|
b3f0ef0878 | ||
|
|
04304262ba | ||
|
|
920ced364a | ||
|
|
e0499118e2 | ||
|
|
50b5efae46 | ||
|
|
486b8fa9c5 | ||
|
|
b2baed9bdd | ||
|
|
b591542c0b | ||
|
|
a83fa00ac4 | ||
|
|
7ff5c7c797 | ||
|
|
1748602691 | ||
|
|
6542dd5337 | ||
|
|
c64a44b9e1 | ||
|
|
fccc5b3bed | ||
|
|
98b9d5c6c4 | ||
|
|
afd2c1a8ad | ||
|
|
81f35a3ceb | ||
|
|
7e2e765f4a | ||
|
|
7d6cfa58e1 | ||
|
|
14735ce3aa | ||
|
|
72f7cc1569 | ||
|
|
abef5c4e74 | ||
|
|
ae14022bf0 | ||
|
|
55f5658d40 | ||
|
|
3ae6363462 | ||
|
|
9e20d7f8a5 | ||
|
|
ab13ffe377 | ||
|
|
039138ed50 | ||
|
|
6227a0555a | ||
|
|
f85d0a522a | ||
|
|
5795488ba7 | ||
|
|
c3045dfb5c | ||
|
|
811fd0cb9e | ||
|
|
f6847c46d7 | ||
|
|
92dac7af5c | ||
|
|
801905d77f | ||
|
|
8f5ac86f30 | ||
|
|
d12a06b65b | ||
|
|
749432f949 | ||
|
|
c1400f25a7 | ||
|
|
87120acf7c | ||
|
|
401f74f7ae | ||
|
|
03d31f6713 | ||
|
|
a57faf07f6 | ||
|
|
562ea9a839 | ||
|
|
cf92cc1ada | ||
|
|
f6000aece7 | ||
|
|
2b3fe3a2b5 | ||
|
|
0fde90faac | ||
|
|
5838644b03 | ||
|
|
c0011edd05 | ||
|
|
431c187a60 | ||
|
|
392abec420 | ||
|
|
dfbe337fe2 | ||
|
|
b9896c4962 | ||
|
|
afa5715e56 | ||
|
|
79474288d0 | ||
|
|
daf64487b4 | ||
|
|
00816f5529 | ||
|
|
f73787e6e5 | ||
|
|
5cffa71467 | ||
|
|
02af28b3b7 | ||
|
|
afe0134d0f | ||
|
|
db9e81d0f9 | ||
|
|
3821f57ecc | ||
|
|
d379f98b22 | ||
|
|
ef3eddf3da | ||
|
|
08a2368845 | ||
|
|
1868fc1e2c | ||
|
|
451a0252ab | ||
|
|
42756c7474 | ||
|
|
598b076240 | ||
|
|
f1f96fc417 | ||
|
|
9c941603f5 | ||
|
|
fb3d6fa332 | ||
|
|
88fd7f091a | ||
|
|
6e4fdfd4bf | ||
|
|
0519056bd8 | ||
|
|
7305ad575e | ||
|
|
79f64ac2f4 | ||
|
|
67bce6cbf2 | ||
|
|
e5316a4388 | ||
|
|
6a8a8557d2 | ||
|
|
3a65dc84c8 | ||
|
|
ce42bbf5c9 | ||
|
|
7b21b3f25a | ||
|
|
46caec1040 | ||
|
|
1187a02a3e | ||
|
|
f6c525b19e | ||
|
|
4a8f7712f3 | ||
|
|
2f867aad17 | ||
|
|
5c6580eb15 | ||
|
|
4c3941750b | ||
|
|
2ea8e618f2 | ||
|
|
94f27f990b | ||
|
|
349e8aa348 | ||
|
|
cde9b78b8d | ||
|
|
d8894f0bd2 | ||
|
|
7e08e0047b | ||
|
|
1a817f117f | ||
|
|
2ec19b21ae | ||
|
|
141f5a93f7 | ||
|
|
df47d55cd2 | ||
|
|
5e579fd6b7 | ||
|
|
4b9c1dce69 | ||
|
|
d74f71bbef | ||
|
|
5196ca41d8 | ||
|
|
4959e06151 | ||
|
|
c1635c13f6 | ||
|
|
135e0ea2e9 | ||
|
|
f283bfd7ab | ||
|
|
9f74786db2 | ||
|
|
32e5d7a0c7 | ||
|
|
84c615cff1 | ||
|
|
039c0a0863 | ||
|
|
b3b0138b82 | ||
|
|
ea56160cdc | ||
|
|
028b0a749c | ||
|
|
941f06eb9f | ||
|
|
04832a86eb | ||
|
|
beb8e990cd | ||
|
|
001af3876f | ||
|
|
f428f344da | ||
|
|
143f78eced | ||
|
|
754b55eee5 | ||
|
|
280ea1209c | ||
|
|
0154dbe477 | ||
|
|
efd1af1325 | ||
|
|
c91eb7fba7 | ||
|
|
6eb4e08636 | ||
|
|
c3231ca252 | ||
|
|
7211df6719 | ||
|
|
f27ce6412c | ||
|
|
8197a9921f | ||
|
|
b0e23b5715 | ||
|
|
0167151f5b | ||
|
|
0668949390 | ||
|
|
94d0e52786 | ||
|
|
818a0abbee | ||
|
|
4e6dcf3cbe | ||
|
|
af7ea1422a | ||
|
|
498057c5b7 | ||
|
|
5095e6b010 | ||
|
|
1aebc87ee3 | ||
|
|
9fb5058b29 | ||
|
|
158e0a28ba | ||
|
|
3576a006f7 | ||
|
|
80c25ae9f3 | ||
|
|
4867be3d3b | ||
|
|
697c7e721d | ||
|
|
3e368d92cb | ||
|
|
0bc2c64a53 | ||
|
|
35236c8634 | ||
|
|
462774b15c | ||
|
|
185a5b8d31 | ||
|
|
73d7791479 | ||
|
|
f52b1e68d1 | ||
|
|
3e0907fe05 | ||
|
|
ab4a8916d3 | ||
|
|
bcd7386fc5 | ||
|
|
c23a7c992b | ||
|
|
2a88094ec4 | ||
|
|
ca3cfddab4 | ||
|
|
7bd9f9773b | ||
|
|
e2da92fcb5 | ||
|
|
876e1451c4 | ||
|
|
a37d2f9777 | ||
|
|
4822940b19 | ||
|
|
d590f4c6b0 | ||
|
|
edfa619519 | ||
|
|
96f194635f | ||
|
|
444662485f | ||
|
|
943c25d0f8 | ||
|
|
5c0b2a4579 | ||
|
|
9870a9258d | ||
|
|
7102b363f5 | ||
|
|
66b4615e4e | ||
|
|
da46913839 | ||
|
|
3df037961f | ||
|
|
8ffae47854 | ||
|
|
1a90a1f3b0 | ||
|
|
dac50c6aeb | ||
|
|
31b22c5acc | ||
|
|
8e50921363 | ||
|
|
96a4f503ec | ||
|
|
9df288b0c9 | ||
|
|
b7c2d0de97 | ||
|
|
62445e0ec8 | ||
|
|
a228825462 | ||
|
|
d3eabd14bc | ||
|
|
c967031d21 | ||
|
|
d823163d52 | ||
|
|
c4f59f202d | ||
|
|
acd29b535d | ||
|
|
2cd31bcda2 | ||
|
|
99870de55c | ||
|
|
cad2d91845 | ||
|
|
79f3cd6cf4 | ||
|
|
e3abb4481b | ||
|
|
bfa61d2f2f | ||
|
|
6c0e621fdb | ||
|
|
a8cc5208f1 | ||
|
|
83eb0d0cb7 | ||
|
|
ee6e273365 | ||
|
|
6ea34b3d53 | ||
|
|
22cf1004bd | ||
|
|
5768d93171 | ||
|
|
663dd89c05 | ||
|
|
a934577168 | ||
|
|
94f1885334 | ||
|
|
2ccfdb97b5 | ||
|
|
e67883138d | ||
|
|
f5c65f1f60 | ||
|
|
ec73a9a284 | ||
|
|
a814a31f1e | ||
|
|
9acadb3756 | ||
|
|
774fcecf23 | ||
|
|
27c9fa6028 | ||
|
|
fdefea9e26 | ||
|
|
b422f9c389 | ||
|
|
9451fd5b09 | ||
|
|
788b3803d9 | ||
|
|
5b11228083 | ||
|
|
515adff644 | ||
|
|
e70a45426a | ||
|
|
e14701e9cd | ||
|
|
45e62d4329 | ||
|
|
76d2b4dab6 | ||
|
|
04e9606638 | ||
|
|
a5c57ebbd9 | ||
|
|
96eaa5bc63 | ||
|
|
f1d30ab196 | ||
|
|
4507df9255 | ||
|
|
e8625548b7 | ||
|
|
50ed6fb534 | ||
|
|
76609deadf | ||
|
|
749e62c40b | ||
|
|
259ce567d1 | ||
|
|
4c93b096eb | ||
|
|
6a547b0b5f | ||
|
|
e99d1a2355 |
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: fulmicoton
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
28
.github/workflows/ci.yml
vendored
Normal file
28
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Tantivy CI
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test Suite
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
- run: rustup component add clippy
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: -- -D warnings
|
||||
|
||||
66
.github/workflows/coveralls.yml
vendored
Normal file
66
.github/workflows/coveralls.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
on: [push]
|
||||
|
||||
name: Code coverage with grcov
|
||||
|
||||
jobs:
|
||||
grcov:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-latest
|
||||
#- macOS-latest
|
||||
#- windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
profile: minimal
|
||||
|
||||
- name: Execute tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --all --lib
|
||||
env:
|
||||
CARGO_INCREMENTAL: 0
|
||||
RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Cpanic=abort -Zpanic_abort_tests"
|
||||
|
||||
# Note that `actions-rs/grcov` Action can install `grcov` too,
|
||||
# but can't use faster installation methods yet.
|
||||
# As a temporary experiment `actions-rs/install` Action plugged in here.
|
||||
# Consider **NOT** to copy that into your workflow,
|
||||
# but use `actions-rs/grcov` only
|
||||
- name: Pre-installing grcov
|
||||
uses: actions-rs/install@v0.1
|
||||
with:
|
||||
crate: grcov
|
||||
use-tool-cache: true
|
||||
|
||||
- name: Gather coverage data
|
||||
id: coverage
|
||||
uses: actions-rs/grcov@v0.1
|
||||
with:
|
||||
coveralls-token: ${{ secrets.COVERALLS_TOKEN }}
|
||||
|
||||
- name: Coveralls upload
|
||||
uses: coverallsapp/github-action@master
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
parallel: true
|
||||
path-to-lcov: ${{ steps.coverage.outputs.report }}
|
||||
|
||||
grcov_finalize:
|
||||
runs-on: ubuntu-latest
|
||||
needs: grcov
|
||||
steps:
|
||||
- name: Coveralls finalization
|
||||
uses: coverallsapp/github-action@master
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
parallel-finished: true
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
tantivy.iml
|
||||
proptest-regressions
|
||||
*.swp
|
||||
target
|
||||
target/debug
|
||||
|
||||
32
.travis.yml
32
.travis.yml
@@ -10,7 +10,7 @@ env:
|
||||
global:
|
||||
- CRATE_NAME=tantivy
|
||||
- TRAVIS_CARGO_NIGHTLY_FEATURE=""
|
||||
- secure: eC8HjTi1wgRVCsMAeXEXt8Ckr0YBSGOEnQkkW4/Nde/OZ9jJjz2nmP1ELQlDE7+czHub2QvYtDMG0parcHZDx/Kus0yvyn08y3g2rhGIiE7y8OCvQm1Mybu2D/p7enm6shXquQ6Z5KRfRq+18mHy80wy9ABMA/ukEZdvnfQ76/Een8/Lb0eHaDoXDXn3PqLVtByvSfQQ7OhS60dEScu8PWZ6/l1057P5NpdWbMExBE7Ro4zYXNhkJeGZx0nP/Bd4Jjdt1XfPzMEybV6NZ5xsTILUBFTmOOt603IsqKGov089NExqxYu5bD3K+S4MzF1Nd6VhomNPJqLDCfhlymJCUj5n5Ku4yidlhQbM4Ej9nGrBalJnhcjBjPua5tmMF2WCxP9muKn/2tIOu1/+wc0vMf9Yd3wKIkf5+FtUxCgs2O+NslWvmOMAMI/yD25m7hb4t1IwE/4Bk+GVcWJRWXbo0/m6ZUHzRzdjUY2a1qvw7C9udzdhg7gcnXwsKrSWi2NjMiIVw86l+Zim0nLpKIN41sxZHLaFRG63Ki8zQ/481LGn32awJ6i3sizKS0WD+N1DfR2qYMrwYHaMN0uR0OFXYTJkFvTFttAeUY3EKmRKAuMhmO2YRdSr4/j/G5E9HMc1gSGJj6PxgpQU7EpvxRsmoVAEJr0mszmOj9icGHep/FM=
|
||||
# - secure: eC8HjTi1wgRVCsMAeXEXt8Ckr0YBSGOEnQkkW4/Nde/OZ9jJjz2nmP1ELQlDE7+czHub2QvYtDMG0parcHZDx/Kus0yvyn08y3g2rhGIiE7y8OCvQm1Mybu2D/p7enm6shXquQ6Z5KRfRq+18mHy80wy9ABMA/ukEZdvnfQ76/Een8/Lb0eHaDoXDXn3PqLVtByvSfQQ7OhS60dEScu8PWZ6/l1057P5NpdWbMExBE7Ro4zYXNhkJeGZx0nP/Bd4Jjdt1XfPzMEybV6NZ5xsTILUBFTmOOt603IsqKGov089NExqxYu5bD3K+S4MzF1Nd6VhomNPJqLDCfhlymJCUj5n5Ku4yidlhQbM4Ej9nGrBalJnhcjBjPua5tmMF2WCxP9muKn/2tIOu1/+wc0vMf9Yd3wKIkf5+FtUxCgs2O+NslWvmOMAMI/yD25m7hb4t1IwE/4Bk+GVcWJRWXbo0/m6ZUHzRzdjUY2a1qvw7C9udzdhg7gcnXwsKrSWi2NjMiIVw86l+Zim0nLpKIN41sxZHLaFRG63Ki8zQ/481LGn32awJ6i3sizKS0WD+N1DfR2qYMrwYHaMN0uR0OFXYTJkFvTFttAeUY3EKmRKAuMhmO2YRdSr4/j/G5E9HMc1gSGJj6PxgpQU7EpvxRsmoVAEJr0mszmOj9icGHep/FM=
|
||||
|
||||
addons:
|
||||
apt:
|
||||
@@ -38,20 +38,21 @@ matrix:
|
||||
# Linux
|
||||
#- env: TARGET=aarch64-unknown-linux-gnu
|
||||
#- env: TARGET=i686-unknown-linux-gnu
|
||||
- env: TARGET=x86_64-unknown-linux-gnu CODECOV=1
|
||||
- env: TARGET=x86_64-unknown-linux-gnu CODECOV=1 #UPLOAD_DOCS=1
|
||||
# - env: TARGET=x86_64-unknown-linux-musl CODECOV=1
|
||||
|
||||
# OSX
|
||||
- env: TARGET=x86_64-apple-darwin
|
||||
os: osx
|
||||
#- env: TARGET=x86_64-apple-darwin
|
||||
# os: osx
|
||||
|
||||
before_install:
|
||||
- set -e
|
||||
- rustup self update
|
||||
- rustup component add rustfmt
|
||||
|
||||
install:
|
||||
- sh ci/install.sh
|
||||
- source ~/.cargo/env || true
|
||||
- env | grep "TRAVIS"
|
||||
|
||||
before_script:
|
||||
- export PATH=$HOME/.cargo/bin:$PATH
|
||||
@@ -60,14 +61,25 @@ before_script:
|
||||
|
||||
script:
|
||||
- bash ci/script.sh
|
||||
- cargo fmt --all -- --check
|
||||
|
||||
before_deploy:
|
||||
- sh ci/before_deploy.sh
|
||||
|
||||
cache: cargo
|
||||
before_cache:
|
||||
# Travis can't cache files that are not readable by "others"
|
||||
- chmod -R a+r $HOME/.cargo
|
||||
after_success:
|
||||
# Needs GH_TOKEN env var to be set in travis settings
|
||||
- if [[ -v GH_TOKEN ]]; then echo "GH TOKEN IS SET"; else echo "GH TOKEN NOT SET"; fi
|
||||
- if [[ -v UPLOAD_DOCS ]]; then cargo doc; cargo doc-upload; else echo "doc upload disabled."; fi
|
||||
|
||||
#cache: cargo
|
||||
#before_cache:
|
||||
# # Travis can't cache files that are not readable by "others"
|
||||
# - chmod -R a+r $HOME/.cargo
|
||||
# - find ./target/debug -type f -maxdepth 1 -delete
|
||||
# - rm -f ./target/.rustc_info.json
|
||||
# - rm -fr ./target/debug/{deps,.fingerprint}/tantivy*
|
||||
# - rm -r target/debug/examples/
|
||||
# - ls -1 examples/ | sed -e 's/\.rs$//' | xargs -I "{}" find target/* -name "*{}*" -type f -delete
|
||||
|
||||
#branches:
|
||||
# only:
|
||||
@@ -77,4 +89,4 @@ before_cache:
|
||||
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
on_success: never
|
||||
|
||||
189
CHANGELOG.md
189
CHANGELOG.md
@@ -1,9 +1,198 @@
|
||||
Tantivy 0.13.0
|
||||
======================
|
||||
Tantivy 0.13 introduce a change in the index format that will require
|
||||
you to reindex your index (BlockWAND information are added in the skiplist).
|
||||
The index size increase is minor as this information is only added for
|
||||
full blocks.
|
||||
If you have a massive index for which reindexing is not an option, please contact me
|
||||
so that we can discuss possible solutions.
|
||||
|
||||
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
||||
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
||||
- `MMapDirectory::open` does not return a `Result` anymore.
|
||||
- Change in the DocSet and Scorer API. (@fulmicoton).
|
||||
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
||||
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
||||
As a result, iterating through DocSet now looks as follows
|
||||
```rust
|
||||
let mut doc = docset.doc();
|
||||
while doc != TERMINATED {
|
||||
// ...
|
||||
doc = docset.advance();
|
||||
}
|
||||
```
|
||||
The change made it possible to greatly simplify a lot of the docset's code.
|
||||
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
||||
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
||||
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
|
||||
to the PISA team for answering all my questions!)
|
||||
|
||||
Tantivy 0.12.0
|
||||
======================
|
||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||
- Added backward iteration for `TermDictionary` stream. (@halvorboe)
|
||||
- Fixed a performance issue when searching for the posting lists of a missing term (@audunhalland)
|
||||
- Added a configurable maximum number of docs (10M by default) for a segment to be considered for merge (@hntd187, landed by @halvorboe #713)
|
||||
- Important Bugfix #777, causing tantivy to retain memory mapping. (diagnosed by @poljar)
|
||||
- Added support for field boosting. (#547, @fulmicoton)
|
||||
|
||||
## How to update?
|
||||
|
||||
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
|
||||
minor changes. Check https://github.com/tantivy-search/tantivy/blob/master/examples/custom_tokenizer.rs
|
||||
to check for some code sample.
|
||||
|
||||
Tantivy 0.11.3
|
||||
=======================
|
||||
- Fixed DateTime as a fast field (#735)
|
||||
|
||||
Tantivy 0.11.2
|
||||
=======================
|
||||
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
|
||||
- Exposing a constructor for `WatchHandle` (#731)
|
||||
|
||||
Tantivy 0.11.1
|
||||
=====================
|
||||
- Bug fix #729
|
||||
|
||||
|
||||
Tantivy 0.11.0
|
||||
=====================
|
||||
|
||||
- Added f64 field. Internally reuse u64 code the same way i64 does (@fdb-hiroshima)
|
||||
- Various bugfixes in the query parser.
|
||||
- Better handling of hyphens in query parser. (#609)
|
||||
- Better handling of whitespaces.
|
||||
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
|
||||
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
||||
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
||||
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
|
||||
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
|
||||
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
|
||||
- Added handling of pre-tokenized text fields (#642), which will enable users to
|
||||
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
|
||||
- Fix crash when committing multiple times with deleted documents. #681 (@brainlock)
|
||||
|
||||
## How to update?
|
||||
|
||||
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
|
||||
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
||||
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
||||
an error and handling the `Result` is required.
|
||||
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
|
||||
|
||||
Tantivy 0.10.2
|
||||
=====================
|
||||
|
||||
- Closes #656. Solving memory leak.
|
||||
|
||||
Tantivy 0.10.1
|
||||
=====================
|
||||
|
||||
- Closes #544. A few users experienced problems with the directory watching system.
|
||||
Avoid watching the mmap directory until someone effectively creates a reader that uses
|
||||
this functionality.
|
||||
|
||||
|
||||
Tantivy 0.10.0
|
||||
=====================
|
||||
|
||||
*Tantivy 0.10.0 index format is compatible with the index format in 0.9.0.*
|
||||
|
||||
- Added an API to easily tweak or entirely replace the
|
||||
default score. See `TopDocs::tweak_score`and `TopScore::custom_score` (@pmasurel)
|
||||
- Added an ASCII folding filter (@drusellers)
|
||||
- Bugfix in `query.count` in presence of deletes (@pmasurel)
|
||||
- Added `.explain(...)` in `Query` and `Weight` to (@pmasurel)
|
||||
- Added an efficient way to `delete_all_documents` in `IndexWriter` (@petr-tik).
|
||||
All segments are simply removed.
|
||||
|
||||
Minor
|
||||
---------
|
||||
- Switched to Rust 2018 (@uvd)
|
||||
- Small simplification of the code.
|
||||
Calling .freq() or .doc() when .advance() has never been called
|
||||
on segment postings should panic from now on.
|
||||
- Tokens exceeding `u16::max_value() - 4` chars are discarded silently instead of panicking.
|
||||
- Fast fields are now preloaded when the `SegmentReader` is created.
|
||||
- `IndexMeta` is now public. (@hntd187)
|
||||
- `IndexWriter` `add_document`, `delete_term`. `IndexWriter` is `Sync`, making it possible to use it with a `
|
||||
Arc<RwLock<IndexWriter>>`. `add_document` and `delete_term` can
|
||||
only require a read lock. (@pmasurel)
|
||||
- Introducing `Opstamp` as an expressive type alias for `u64`. (@petr-tik)
|
||||
- Stamper now relies on `AtomicU64` on all platforms (@petr-tik)
|
||||
- Bugfix - Files get deleted slightly earlier
|
||||
- Compilation resources improved (@fdb-hiroshima)
|
||||
|
||||
## How to update?
|
||||
|
||||
Your program should be usable as is.
|
||||
|
||||
### Fast fields
|
||||
|
||||
Fast fields used to be accessed directly from the `SegmentReader`.
|
||||
The API changed, you are now required to acquire your fast field reader via the
|
||||
`segment_reader.fast_fields()`, and use one of the typed method:
|
||||
- `.u64()`, `.i64()` if your field is single-valued ;
|
||||
- `.u64s()`, `.i64s()` if your field is multi-valued ;
|
||||
- `.bytes()` if your field is bytes fast field.
|
||||
|
||||
|
||||
|
||||
Tantivy 0.9.0
|
||||
=====================
|
||||
*0.9.0 index format is not compatible with the
|
||||
previous index format.*
|
||||
- MAJOR BUGFIX :
|
||||
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
|
||||
- Removed most unsafe (@fulmicoton)
|
||||
- Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton)
|
||||
- Stemming in other language possible (@pentlander)
|
||||
- Segments with no docs are deleted earlier (@barrotsteindev)
|
||||
- Added grouped add and delete operations.
|
||||
They are guaranteed to happen together (i.e. they cannot be split by a commit).
|
||||
In addition, adds are guaranteed to happen on the same segment. (@elbow-jason)
|
||||
- Removed `INT_STORED` and `INT_INDEXED`. It is now possible to use `STORED` and `INDEXED`
|
||||
for int fields. (@fulmicoton)
|
||||
- Added DateTime field (@barrotsteindev)
|
||||
- Added IndexReader. By default, index is reloaded automatically upon new commits (@fulmicoton)
|
||||
- SIMD linear search within blocks (@fulmicoton)
|
||||
|
||||
## How to update ?
|
||||
|
||||
tantivy 0.9 brought some API breaking change.
|
||||
To update from tantivy 0.8, you will need to go through the following steps.
|
||||
|
||||
- `schema::INT_INDEXED` and `schema::INT_STORED` should be replaced by `schema::INDEXED` and `schema::INT_STORED`.
|
||||
- The index now does not hold the pool of searcher anymore. You are required to create an intermediary object called
|
||||
`IndexReader` for this.
|
||||
|
||||
```rust
|
||||
// create the reader. You typically need to create 1 reader for the entire
|
||||
// lifetime of you program.
|
||||
let reader = index.reader()?;
|
||||
|
||||
// Acquire a searcher (previously `index.searcher()`) is now written:
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// With the default setting of the reader, you are not required to
|
||||
// call `index.load_searchers()` anymore.
|
||||
//
|
||||
// The IndexReader will pick up that change automatically, regardless
|
||||
// of whether the update was done in a different process or not.
|
||||
// If this behavior is not wanted, you can create your reader with
|
||||
// the `ReloadPolicy::Manual`, and manually decide when to reload the index
|
||||
// by calling `reader.reload()?`.
|
||||
|
||||
```
|
||||
|
||||
|
||||
Tantivy 0.8.2
|
||||
=====================
|
||||
Fixing build for x86_64 platforms. (#496)
|
||||
No need to update from 0.8.1 if tantivy
|
||||
is building on your platform.
|
||||
|
||||
|
||||
Tantivy 0.8.1
|
||||
=====================
|
||||
|
||||
85
Cargo.toml
85
Cargo.toml
@@ -1,63 +1,68 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.9.0-dev"
|
||||
version = "0.13.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
description = """Search engine library"""
|
||||
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
|
||||
documentation = "https://docs.rs/tantivy/"
|
||||
homepage = "https://github.com/tantivy-search/tantivy"
|
||||
repository = "https://github.com/tantivy-search/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.10.0"
|
||||
base64 = "0.12.0"
|
||||
byteorder = "1.0"
|
||||
lazy_static = "1"
|
||||
regex = "1.0"
|
||||
fst = {version="0.3", default-features=false}
|
||||
fst-regex = { version="0.2" }
|
||||
crc32fast = "1.2.0"
|
||||
once_cell = "1.0"
|
||||
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
||||
tantivy-fst = "0.3"
|
||||
memmap = {version = "0.7", optional=true}
|
||||
lz4 = {version="1.20", optional=true}
|
||||
snap = {version="0.2"}
|
||||
snap = "1"
|
||||
atomicwrites = {version="0.2.2", optional=true}
|
||||
tempfile = "3.0"
|
||||
log = "0.4"
|
||||
combine = "3"
|
||||
tempdir = "0.3"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde = {version="1.0", features=["derive"]}
|
||||
serde_json = "1.0"
|
||||
num_cpus = "1.2"
|
||||
fs2={version="0.4", optional=true}
|
||||
itertools = "0.8"
|
||||
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
|
||||
bit-set = "0.5"
|
||||
uuid = { version = "0.7", features = ["v4", "serde"] }
|
||||
crossbeam = "0.5"
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
levenshtein_automata = "0.2"
|
||||
notify = {version="4", optional=true}
|
||||
uuid = { version = "0.8", features = ["v4", "serde"] }
|
||||
crossbeam = "0.7"
|
||||
futures = {version = "0.3", features=["thread-pool"] }
|
||||
owning_ref = "0.4"
|
||||
stable_deref_trait = "1.0.0"
|
||||
rust-stemmers = "1"
|
||||
downcast = { version="0.9" }
|
||||
matches = "0.1"
|
||||
bitpacking = "0.5"
|
||||
census = "0.2"
|
||||
rust-stemmers = "1.2"
|
||||
downcast-rs = { version="1.0" }
|
||||
tantivy-query-grammar = { version="0.13", path="./query-grammar" }
|
||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||
census = "0.4"
|
||||
fnv = "1.0.6"
|
||||
owned-read = "0.4"
|
||||
failure = "0.1"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.2"
|
||||
scoped-pool = "1.0"
|
||||
fail = "0.4"
|
||||
murmurhash32 = "0.2"
|
||||
chrono = "0.4"
|
||||
smallvec = "1.0"
|
||||
rayon = "1"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.2"
|
||||
winapi = "0.3"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.6"
|
||||
rand = "0.7"
|
||||
maplit = "1"
|
||||
matches = "0.1.8"
|
||||
proptest = "0.10"
|
||||
|
||||
[dev-dependencies.fail]
|
||||
version = "0.4"
|
||||
features = ["failpoints"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
@@ -69,14 +74,28 @@ debug-assertions = true
|
||||
overflow-checks = true
|
||||
|
||||
[features]
|
||||
# by default no-fail is disabled. We manually enable it when running test.
|
||||
default = ["mmap", "no_fail"]
|
||||
mmap = ["fst/mmap", "atomicwrites", "fs2"]
|
||||
default = ["mmap"]
|
||||
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
|
||||
lz4-compression = ["lz4"]
|
||||
no_fail = ["fail/no_fail"]
|
||||
failpoints = ["fail/failpoints"]
|
||||
unstable = [] # useful for benches.
|
||||
wasm-bindgen = ["uuid/wasm-bindgen"]
|
||||
scoref64 = [] # scores are f64 instead of f32. was introduced to debug blockwand.
|
||||
|
||||
[workspace]
|
||||
members = ["query-grammar"]
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||
|
||||
|
||||
# Following the "fail" crate best practises, we isolate
|
||||
# tests that define specific behavior in fail check points
|
||||
# in a different binary.
|
||||
#
|
||||
# We do that because, fail rely on a global definition of
|
||||
# failpoints behavior and hence, it is incompatible with
|
||||
# multithreading.
|
||||
[[test]]
|
||||
name = "failpoints"
|
||||
path = "tests/failpoints/mod.rs"
|
||||
required-features = ["fail/failpoints"]
|
||||
|
||||
3
Makefile
Normal file
3
Makefile
Normal file
@@ -0,0 +1,3 @@
|
||||
test:
|
||||
echo "Run test only... No examples."
|
||||
cargo test --tests --lib
|
||||
110
README.md
110
README.md
@@ -4,6 +4,7 @@
|
||||
[](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
|
||||
[](https://crates.io/crates/tantivy)
|
||||
[](https://saythanks.io/to/fulmicoton)
|
||||
|
||||

|
||||
@@ -17,74 +18,123 @@
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/6)
|
||||
[](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/7)
|
||||
|
||||
[](https://www.patreon.com/fulmicoton)
|
||||
|
||||
|
||||
**Tantivy** is a **full text search engine library** written in rust.
|
||||
**Tantivy** is a **full text search engine library** written in Rust.
|
||||
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) and [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used
|
||||
to build such a search engine.
|
||||
|
||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
|
||||
# Benchmark
|
||||
|
||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||
performance for different type of queries / collection.
|
||||
|
||||
|
||||
In general, Tantivy tends to be
|
||||
- slower than Lucene on union with a Top-K due to Block-WAND optimization.
|
||||
- faster than Lucene on intersection and phrase queries.
|
||||
|
||||
Your mileage WILL vary depending on the nature of queries and their load.
|
||||
|
||||
# Features
|
||||
|
||||
- Full-text search
|
||||
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy) and [tantivy-tokenizer-tiny-segmente](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
|
||||
- Tiny startup time (<10ms), perfect for command line tools
|
||||
- BM25 scoring (the same as lucene)
|
||||
- Natural query language `(michael AND jackson) OR "king of pop"`
|
||||
- Phrase queries search (`"michael jackson"`)
|
||||
- BM25 scoring (the same as Lucene)
|
||||
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`)
|
||||
- Phrase queries search (e.g. `"michael jackson"`)
|
||||
- Incremental indexing
|
||||
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
|
||||
- Mmap directory
|
||||
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set.
|
||||
- Single valued and multivalued u64 and i64 fast fields (equivalent of doc values in Lucene)
|
||||
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set
|
||||
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene)
|
||||
- `&[u8]` fast fields
|
||||
- Text, i64, u64, f64, dates, and hierarchical facet fields
|
||||
- LZ4 compressed document store
|
||||
- Range queries
|
||||
- Faceted search
|
||||
- Configurable indexing (optional term frequency and position indexing)
|
||||
- Cheesy logo with a horse
|
||||
|
||||
# Non-features
|
||||
## Non-features
|
||||
|
||||
- Distributed search is out of the scope of tantivy. That being said, tantivy is meant as a
|
||||
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a
|
||||
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
|
||||
are within the scope of tantivy.
|
||||
are within the scope of Tantivy.
|
||||
|
||||
|
||||
# Supported OS and compiler
|
||||
|
||||
Tantivy works on stable rust (>= 1.27) and supports Linux, MacOS and Windows.
|
||||
|
||||
# Getting started
|
||||
|
||||
- [tantivy's simple search example](http://fulmicoton.com/tantivy-examples/simple_search.html)
|
||||
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli).
|
||||
`tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
|
||||
index documents and search via the CLI or a small server with a REST API.
|
||||
It will walk you through getting a wikipedia search engine up and running in a few minutes.
|
||||
- [reference doc]
|
||||
- [For the last released version](https://docs.rs/tantivy/)
|
||||
- [For the last master branch](https://tantivy-search.github.io/tantivy/tantivy/index.html)
|
||||
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows.
|
||||
|
||||
# Compiling
|
||||
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
|
||||
index documents, and search via the CLI or a small server with a REST API.
|
||||
It walks you through getting a wikipedia search engine up and running in a few minutes.
|
||||
- [Reference doc for the last released version](https://docs.rs/tantivy/)
|
||||
|
||||
## Development
|
||||
# How can I support this project?
|
||||
|
||||
Tantivy compiles on stable rust but requires `Rust >= 1.27`.
|
||||
To check out and run tests, you can simply run :
|
||||
There are many ways to support this project.
|
||||
|
||||
- Use Tantivy and tell us about your experience on [Gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
|
||||
- Report bugs
|
||||
- Write a blog post
|
||||
- Help with documentation by asking questions or submitting PRs
|
||||
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
|
||||
- Talk about Tantivy around you
|
||||
- Drop a word on on [](https://saythanks.io/to/fulmicoton) or even [](https://www.patreon.com/fulmicoton)
|
||||
|
||||
# Contributing code
|
||||
|
||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
|
||||
## Clone and build locally
|
||||
|
||||
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
|
||||
To check out and run tests, you can simply run:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/tantivy-search/tantivy.git
|
||||
cd tantivy
|
||||
cargo build
|
||||
```
|
||||
|
||||
## Running tests
|
||||
## Run tests
|
||||
|
||||
Some tests will not run with just `cargo test` because of `fail-rs`.
|
||||
To run the tests exhaustively, run `./run-tests.sh`.
|
||||
To run the tests exhaustively, run `./run-tests.sh`.
|
||||
|
||||
# Contribute
|
||||
## Debug
|
||||
|
||||
Send me an email (paul.masurel at gmail.com) if you want to contribute to tantivy.
|
||||
You might find it useful to step through the programme with a debugger.
|
||||
|
||||
### A failing test
|
||||
|
||||
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that the `target/` directory exists. Use this bash script to find the name of the most recent debug build of Tantivy and run it under `rust-gdb`:
|
||||
|
||||
```bash
|
||||
find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY
|
||||
```
|
||||
|
||||
Now that you are in `rust-gdb`, you can set breakpoints on lines and methods that match your source code and run the debug executable with flags that you normally pass to `cargo test` like this:
|
||||
|
||||
```bash
|
||||
$gdb run --test-threads 1 --test $NAME_OF_TEST
|
||||
```
|
||||
|
||||
### An example
|
||||
|
||||
By default, `rustc` compiles everything in the `examples/` directory in debug mode. This makes it easy for you to make examples to reproduce bugs:
|
||||
|
||||
```bash
|
||||
rust-gdb target/debug/examples/$EXAMPLE_NAME
|
||||
$ gdb run
|
||||
```
|
||||
|
||||
@@ -18,5 +18,5 @@ install:
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --verbose --no-default-features --features mmap -- --test-threads 1
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --all --verbose --no-default-features --features mmap
|
||||
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
||||
|
||||
@@ -7,7 +7,7 @@ set -ex
|
||||
main() {
|
||||
if [ ! -z $CODECOV ]; then
|
||||
echo "Codecov"
|
||||
cargo build --verbose && cargo coverage --verbose && bash <(curl -s https://codecov.io/bash) -s target/kcov
|
||||
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov
|
||||
else
|
||||
echo "Build"
|
||||
cross build --target $TARGET
|
||||
@@ -15,7 +15,8 @@ main() {
|
||||
return
|
||||
fi
|
||||
echo "Test"
|
||||
cross test --target $TARGET --no-default-features --features mmap -- --test-threads 1
|
||||
cross test --target $TARGET --no-default-features --features mmap
|
||||
cross test --target $TARGET --no-default-features --features mmap query-grammar
|
||||
fi
|
||||
for example in $(ls examples/*.rs)
|
||||
do
|
||||
|
||||
@@ -5,27 +5,23 @@
|
||||
//
|
||||
// We will :
|
||||
// - define our schema
|
||||
// = create an index in a directory
|
||||
// - index few documents in our index
|
||||
// - search for the best document matchings "sea whale"
|
||||
// - retrieve the best document original content.
|
||||
|
||||
extern crate tempdir;
|
||||
// - create an index in a directory
|
||||
// - index a few documents into our index
|
||||
// - search for the best document matching a basic query
|
||||
// - retrieve the best document's original content.
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::Index;
|
||||
use tempdir::TempDir;
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
let index_path = TempDir::new("tantivy_example_dir")?;
|
||||
let index_path = TempDir::new()?;
|
||||
|
||||
// # Defining the schema
|
||||
//
|
||||
@@ -34,7 +30,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// and for each field, its type and "the way it should
|
||||
// be indexed".
|
||||
|
||||
// first we need to define a schema ...
|
||||
// First we need to define a schema ...
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// Our first field is title.
|
||||
@@ -49,7 +45,7 @@ fn main() -> tantivy::Result<()> {
|
||||
//
|
||||
// `STORED` means that the field will also be saved
|
||||
// in a compressed, row-oriented key-value store.
|
||||
// This store is useful to reconstruct the
|
||||
// This store is useful for reconstructing the
|
||||
// documents that were selected during the search phase.
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
|
||||
@@ -58,8 +54,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// need to be able to be able to retrieve it
|
||||
// for our application.
|
||||
//
|
||||
// We can make our index lighter and
|
||||
// by omitting `STORED` flag.
|
||||
// We can make our index lighter by omitting the `STORED` flag.
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
@@ -72,7 +67,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// with our schema in the directory.
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
// To insert document we need an index writer.
|
||||
// To insert a document we will need an index writer.
|
||||
// There must be only one writer at a time.
|
||||
// This single `IndexWriter` is already
|
||||
// multithreaded.
|
||||
@@ -150,8 +145,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// At this point our documents are not searchable.
|
||||
//
|
||||
//
|
||||
// We need to call .commit() explicitly to force the
|
||||
// index_writer to finish processing the documents in the queue,
|
||||
// We need to call `.commit()` explicitly to force the
|
||||
// `index_writer` to finish processing the documents in the queue,
|
||||
// flush the current index to the disk, and advertise
|
||||
// the existence of new documents.
|
||||
//
|
||||
@@ -163,31 +158,40 @@ fn main() -> tantivy::Result<()> {
|
||||
// persistently indexed.
|
||||
//
|
||||
// In the scenario of a crash or a power failure,
|
||||
// tantivy behaves as if has rolled back to its last
|
||||
// tantivy behaves as if it has rolled back to its last
|
||||
// commit.
|
||||
|
||||
// # Searching
|
||||
//
|
||||
// ### Searcher
|
||||
//
|
||||
// Let's search our index. Start by reloading
|
||||
// searchers in the index. This should be done
|
||||
// after every `commit()`.
|
||||
index.load_searchers()?;
|
||||
// A reader is required first in order to search an index.
|
||||
// It acts as a `Searcher` pool that reloads itself,
|
||||
// depending on a `ReloadPolicy`.
|
||||
//
|
||||
// For a search server you will typically create one reader for the entire lifetime of your
|
||||
// program, and acquire a new searcher for every single request.
|
||||
//
|
||||
// In the code below, we rely on the 'ON_COMMIT' policy: the reader
|
||||
// will reload the index automatically after each commit.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()?;
|
||||
|
||||
// We now need to acquire a searcher.
|
||||
// Some search experience might require more than
|
||||
// one query.
|
||||
//
|
||||
// The searcher ensure that we get to work
|
||||
// with a consistent version of the index.
|
||||
// A searcher points to a snapshotted, immutable version of the index.
|
||||
//
|
||||
// Some search experience might require more than
|
||||
// one query. Using the same searcher ensures that all of these queries will run on the
|
||||
// same version of the index.
|
||||
//
|
||||
// Acquiring a `searcher` is very cheap.
|
||||
//
|
||||
// You should acquire a searcher every time you
|
||||
// start processing a request and
|
||||
// You should acquire a searcher every time you start processing a request and
|
||||
// and release it right after your query is finished.
|
||||
let searcher = index.searcher();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// ### Query
|
||||
|
||||
@@ -197,7 +201,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// in both title and body.
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
|
||||
// QueryParser may fail if the query is not in the right
|
||||
// `QueryParser` may fail if the query is not in the right
|
||||
// format. For user facing applications, this can be a problem.
|
||||
// A ticket has been opened regarding this problem.
|
||||
let query = query_parser.parse_query("sea whale")?;
|
||||
@@ -213,7 +217,7 @@ fn main() -> tantivy::Result<()> {
|
||||
//
|
||||
// We are not interested in all of the documents but
|
||||
// only in the top 10. Keeping track of our top 10 best documents
|
||||
// is the role of the TopDocs.
|
||||
// is the role of the `TopDocs` collector.
|
||||
|
||||
// We can now perform our query.
|
||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||
@@ -224,7 +228,6 @@ fn main() -> tantivy::Result<()> {
|
||||
// Since the body field was not configured as stored,
|
||||
// the document returned will only contain
|
||||
// a title.
|
||||
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
|
||||
@@ -7,19 +7,14 @@
|
||||
// Of course, you can have a look at the tantivy's built-in collectors
|
||||
// such as the `CountCollector` for more examples.
|
||||
|
||||
extern crate tempdir;
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
use tantivy::fastfield::FastFieldReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::Field;
|
||||
use tantivy::schema::{Schema, FAST, INT_INDEXED, TEXT};
|
||||
use tantivy::Index;
|
||||
use tantivy::SegmentReader;
|
||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, Score, SegmentReader, TantivyError};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Stats {
|
||||
@@ -75,9 +70,18 @@ impl Collector for StatsCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: u32,
|
||||
segment: &SegmentReader,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> tantivy::Result<StatsSegmentCollector> {
|
||||
let fast_field_reader = segment.fast_field_reader(self.field)?;
|
||||
let fast_field_reader = segment_reader
|
||||
.fast_fields()
|
||||
.u64(self.field)
|
||||
.ok_or_else(|| {
|
||||
let field_name = segment_reader.schema().get_field_name(self.field);
|
||||
TantivyError::SchemaError(format!(
|
||||
"Field {:?} is not a u64 fast field.",
|
||||
field_name
|
||||
))
|
||||
})?;
|
||||
Ok(StatsSegmentCollector {
|
||||
fast_field_reader,
|
||||
stats: Stats::default(),
|
||||
@@ -110,7 +114,7 @@ struct StatsSegmentCollector {
|
||||
impl SegmentCollector for StatsSegmentCollector {
|
||||
type Fruit = Option<Stats>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: f32) {
|
||||
fn collect(&mut self, doc: u32, _score: Score) {
|
||||
let value = self.fast_field_reader.get(doc) as f64;
|
||||
self.stats.count += 1;
|
||||
self.stats.sum += value;
|
||||
@@ -137,7 +141,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// products, and with a name, a description, and a price.
|
||||
let product_name = schema_builder.add_text_field("name", TEXT);
|
||||
let product_description = schema_builder.add_text_field("description", TEXT);
|
||||
let price = schema_builder.add_u64_field("price", INT_INDEXED | FAST);
|
||||
let price = schema_builder.add_u64_field("price", INDEXED | FAST);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
@@ -170,9 +174,9 @@ fn main() -> tantivy::Result<()> {
|
||||
price => 5_200u64
|
||||
));
|
||||
index_writer.commit()?;
|
||||
index.load_searchers()?;
|
||||
|
||||
let searcher = index.searcher();
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let query_parser = QueryParser::for_index(&index, vec![product_name, product_description]);
|
||||
|
||||
// here we want to get a hit on the 'ken' in Frankenstein
|
||||
|
||||
@@ -2,14 +2,11 @@
|
||||
//
|
||||
// In this example, we'll see how to define a tokenizer pipeline
|
||||
// by aligning a bunch of `TokenFilter`.
|
||||
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::NgramTokenizer;
|
||||
use tantivy::Index;
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
@@ -91,9 +88,9 @@ fn main() -> tantivy::Result<()> {
|
||||
increasing confidence in the success of my undertaking."#
|
||||
));
|
||||
index_writer.commit()?;
|
||||
index.load_searchers()?;
|
||||
|
||||
let searcher = index.searcher();
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// The query parser can interpret human queries.
|
||||
// Here, if the user does not specify which
|
||||
|
||||
@@ -8,18 +8,19 @@
|
||||
//
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::Index;
|
||||
use tantivy::{doc, Index, IndexReader};
|
||||
|
||||
// A simple helper function to fetch a single document
|
||||
// given its id from our index.
|
||||
// It will be helpful to check our work.
|
||||
fn extract_doc_given_isbn(index: &Index, isbn_term: &Term) -> tantivy::Result<Option<Document>> {
|
||||
let searcher = index.searcher();
|
||||
fn extract_doc_given_isbn(
|
||||
reader: &IndexReader,
|
||||
isbn_term: &Term,
|
||||
) -> tantivy::Result<Option<Document>> {
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// This is the simplest query you can think of.
|
||||
// It matches all of the documents containing a specific term.
|
||||
@@ -85,12 +86,12 @@ fn main() -> tantivy::Result<()> {
|
||||
isbn => "978-9176370711",
|
||||
));
|
||||
index_writer.commit()?;
|
||||
index.load_searchers()?;
|
||||
let reader = index.reader()?;
|
||||
|
||||
let frankenstein_isbn = Term::from_field_text(isbn, "978-9176370711");
|
||||
|
||||
// Oops our frankenstein doc seems mispelled
|
||||
let frankenstein_doc_misspelled = extract_doc_given_isbn(&index, &frankenstein_isbn)?.unwrap();
|
||||
let frankenstein_doc_misspelled = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
schema.to_json(&frankenstein_doc_misspelled),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankentein"]}"#,
|
||||
@@ -129,10 +130,10 @@ fn main() -> tantivy::Result<()> {
|
||||
// Everything happened as if the document was updated.
|
||||
index_writer.commit()?;
|
||||
// We reload our searcher to make our change available to clients.
|
||||
index.load_searchers()?;
|
||||
reader.reload()?;
|
||||
|
||||
// No more typo!
|
||||
let frankenstein_new_doc = extract_doc_given_isbn(&index, &frankenstein_isbn)?.unwrap();
|
||||
let frankenstein_new_doc = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
|
||||
assert_eq!(
|
||||
schema.to_json(&frankenstein_new_doc),
|
||||
r#"{"isbn":["978-9176370711"],"title":["Frankenstein"]}"#,
|
||||
|
||||
@@ -10,71 +10,103 @@
|
||||
// - search for the best document matchings "sea whale"
|
||||
// - retrieve the best document original content.
|
||||
|
||||
extern crate tempdir;
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::FacetCollector;
|
||||
use tantivy::query::AllQuery;
|
||||
use tantivy::query::{AllQuery, TermQuery};
|
||||
use tantivy::schema::*;
|
||||
use tantivy::Index;
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
let index_path = TempDir::new("tantivy_facet_example_dir")?;
|
||||
// Let's create a temporary directory for the sake of this example
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
schema_builder.add_text_field("name", TEXT | STORED);
|
||||
|
||||
// this is our faceted field
|
||||
schema_builder.add_facet_field("tags");
|
||||
let name = schema_builder.add_text_field("felin_name", TEXT | STORED);
|
||||
// this is our faceted field: its scientific classification
|
||||
let classification = schema_builder.add_facet_field("classification");
|
||||
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
let name = schema.get_field("name").unwrap();
|
||||
let tags = schema.get_field("tags").unwrap();
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
index_writer.add_document(doc!(
|
||||
name => "the ditch",
|
||||
tags => Facet::from("/pools/north")
|
||||
name => "Cat",
|
||||
classification => Facet::from("/Felidae/Felinae/Felis")
|
||||
));
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
name => "little stacey",
|
||||
tags => Facet::from("/pools/south")
|
||||
name => "Canada lynx",
|
||||
classification => Facet::from("/Felidae/Felinae/Lynx")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Cheetah",
|
||||
classification => Facet::from("/Felidae/Felinae/Acinonyx")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Tiger",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Lion",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Jaguar",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Sunda clouded leopard",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Neofelis")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Fossa",
|
||||
classification => Facet::from("/Eupleridae/Cryptoprocta")
|
||||
));
|
||||
|
||||
index_writer.commit()?;
|
||||
|
||||
index.load_searchers()?;
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
{
|
||||
let mut facet_collector = FacetCollector::for_field(classification);
|
||||
facet_collector.add_facet("/Felidae");
|
||||
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
// This lists all of the facet counts, right below "/Felidae".
|
||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae").collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/Felidae/Felinae"), 3),
|
||||
(&Facet::from("/Felidae/Pantherinae"), 4),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
let searcher = index.searcher();
|
||||
// Facets are also searchable.
|
||||
//
|
||||
// For instance a common UI pattern is to allow the user someone to click on a facet link
|
||||
// (e.g: `Pantherinae`) to drill down and filter the current result set with this subfacet.
|
||||
//
|
||||
// The search would then look as follows.
|
||||
|
||||
let mut facet_collector = FacetCollector::for_field(tags);
|
||||
facet_collector.add_facet("/pools");
|
||||
|
||||
let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||
|
||||
// This lists all of the facet counts
|
||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/pools").collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/pools/north"), 1),
|
||||
(&Facet::from("/pools/south"), 1),
|
||||
]
|
||||
);
|
||||
// Check the reference doc for different ways to create a `Facet` object.
|
||||
{
|
||||
let facet = Facet::from_text("/Felidae/Pantherinae");
|
||||
let facet_term = Term::from_facet(classification, &facet);
|
||||
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic);
|
||||
let mut facet_collector = FacetCollector::for_field(classification);
|
||||
facet_collector.add_facet("/Felidae/Pantherinae");
|
||||
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?;
|
||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/Felidae/Pantherinae/Neofelis"), 1),
|
||||
(&Facet::from("/Felidae/Pantherinae/Panthera"), 3),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
use tempdir::TempDir;
|
||||
|
||||
98
examples/faceted_search_with_tweaked_score.rs
Normal file
98
examples/faceted_search_with_tweaked_score.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
use std::collections::HashSet;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::doc;
|
||||
use tantivy::query::BooleanQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{DocId, Index, Score, SegmentReader};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
let title = schema_builder.add_text_field("title", STORED);
|
||||
let ingredient = schema_builder.add_facet_field("ingredient");
|
||||
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Fried egg",
|
||||
ingredient => Facet::from("/ingredient/egg"),
|
||||
ingredient => Facet::from("/ingredient/oil"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "Scrambled egg",
|
||||
ingredient => Facet::from("/ingredient/egg"),
|
||||
ingredient => Facet::from("/ingredient/butter"),
|
||||
ingredient => Facet::from("/ingredient/milk"),
|
||||
ingredient => Facet::from("/ingredient/salt"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "Egg rolls",
|
||||
ingredient => Facet::from("/ingredient/egg"),
|
||||
ingredient => Facet::from("/ingredient/garlic"),
|
||||
ingredient => Facet::from("/ingredient/salt"),
|
||||
ingredient => Facet::from("/ingredient/oil"),
|
||||
ingredient => Facet::from("/ingredient/tortilla-wrap"),
|
||||
ingredient => Facet::from("/ingredient/mushroom"),
|
||||
));
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
{
|
||||
let facets = vec![
|
||||
Facet::from("/ingredient/egg"),
|
||||
Facet::from("/ingredient/oil"),
|
||||
Facet::from("/ingredient/garlic"),
|
||||
Facet::from("/ingredient/mushroom"),
|
||||
];
|
||||
let query = BooleanQuery::new_multiterms_query(
|
||||
facets
|
||||
.iter()
|
||||
.map(|key| Term::from_facet(ingredient, &key))
|
||||
.collect(),
|
||||
);
|
||||
let top_docs_by_custom_score =
|
||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
||||
let mut ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
||||
let facet_dict = ingredient_reader.facet_dict();
|
||||
|
||||
let query_ords: HashSet<u64> = facets
|
||||
.iter()
|
||||
.filter_map(|key| facet_dict.term_ord(key.encoded_str()))
|
||||
.collect();
|
||||
|
||||
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
|
||||
|
||||
move |doc: DocId, original_score: Score| {
|
||||
ingredient_reader.facet_ords(doc, &mut facet_ords_buffer);
|
||||
let missing_ingredients = facet_ords_buffer
|
||||
.iter()
|
||||
.filter(|ord| !query_ords.contains(ord))
|
||||
.count();
|
||||
let tweak = 1.0 / 4_f32.powi(missing_ingredients as i32);
|
||||
|
||||
original_score * tweak
|
||||
}
|
||||
});
|
||||
let top_docs = searcher.search(&query, &top_docs_by_custom_score)?;
|
||||
|
||||
let titles: Vec<String> = top_docs
|
||||
.iter()
|
||||
.map(|(_, doc_id)| {
|
||||
searcher
|
||||
.doc(*doc_id)
|
||||
.unwrap()
|
||||
.get_first(title)
|
||||
.unwrap()
|
||||
.text()
|
||||
.unwrap()
|
||||
.to_owned()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
39
examples/integer_range_search.rs
Normal file
39
examples/integer_range_search.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
// # Searching a range on an indexed int field.
|
||||
//
|
||||
// Below is an example of creating an indexed integer field in your schema
|
||||
// You can use RangeQuery to get a Count of all occurrences in a given range.
|
||||
use tantivy::collector::Count;
|
||||
use tantivy::query::RangeQuery;
|
||||
use tantivy::schema::{Schema, INDEXED};
|
||||
use tantivy::{doc, Index, Result};
|
||||
|
||||
fn run() -> Result<()> {
|
||||
// For the sake of simplicity, this schema will only have 1 field
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
// `INDEXED` is a short-hand to indicate that our field should be "searchable".
|
||||
let year_field = schema_builder.add_u64_field("year", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let reader = index.reader()?;
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
|
||||
for year in 1950u64..2019u64 {
|
||||
index_writer.add_document(doc!(year_field => year));
|
||||
}
|
||||
index_writer.commit()?;
|
||||
// The index will be a range of years
|
||||
}
|
||||
reader.reload()?;
|
||||
let searcher = reader.searcher();
|
||||
// The end is excluded i.e. here we are searching up to 1969
|
||||
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
|
||||
// Uses a Count collector to sum the total number of docs in the range
|
||||
let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
|
||||
assert_eq!(num_60s_books, 10);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
run().unwrap()
|
||||
}
|
||||
@@ -9,11 +9,8 @@
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::Index;
|
||||
use tantivy::{DocId, DocSet, Postings};
|
||||
use tantivy::{doc, DocSet, Index, Postings, TERMINATED};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// We first create a schema for the sake of the
|
||||
@@ -33,9 +30,9 @@ fn main() -> tantivy::Result<()> {
|
||||
index_writer.add_document(doc!(title => "The modern Promotheus"));
|
||||
index_writer.commit()?;
|
||||
|
||||
index.load_searchers()?;
|
||||
let reader = index.reader()?;
|
||||
|
||||
let searcher = index.searcher();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// A tantivy index is actually a collection of segments.
|
||||
// Similarly, a searcher just wraps a list `segment_reader`.
|
||||
@@ -65,12 +62,11 @@ fn main() -> tantivy::Result<()> {
|
||||
{
|
||||
// this buffer will be used to request for positions
|
||||
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
||||
while segment_postings.advance() {
|
||||
// the number of time the term appears in the document.
|
||||
let doc_id: DocId = segment_postings.doc(); //< do not try to access this before calling advance once.
|
||||
|
||||
let mut doc_id = segment_postings.doc();
|
||||
while doc_id != TERMINATED {
|
||||
// This MAY contains deleted documents as well.
|
||||
if segment_reader.is_deleted(doc_id) {
|
||||
doc_id = segment_postings.advance();
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -89,6 +85,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// Doc 2: TermFreq 1: [0]
|
||||
// ```
|
||||
println!("Doc {}: TermFreq {}: {:?}", doc_id, term_freq, positions);
|
||||
doc_id = segment_postings.advance();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -120,11 +117,16 @@ fn main() -> tantivy::Result<()> {
|
||||
if let Some(mut block_segment_postings) =
|
||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
|
||||
{
|
||||
while block_segment_postings.advance() {
|
||||
loop {
|
||||
let docs = block_segment_postings.docs();
|
||||
if docs.is_empty() {
|
||||
break;
|
||||
}
|
||||
// Once again these docs MAY contains deleted documents as well.
|
||||
let docs = block_segment_postings.docs();
|
||||
// Prints `Docs [0, 2].`
|
||||
println!("Docs {:?}", docs);
|
||||
block_segment_postings.advance();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
100
examples/multiple_producer.rs
Normal file
100
examples/multiple_producer.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
// # Indexing from different threads.
|
||||
//
|
||||
// It is fairly common to have to index from different threads.
|
||||
// Tantivy forbids to create more than one `IndexWriter` at a time.
|
||||
//
|
||||
// This `IndexWriter` itself has its own multithreaded layer, so managing your own
|
||||
// indexing threads will not help. However, it can still be useful for some applications.
|
||||
//
|
||||
// For instance, if preparing documents to send to tantivy before indexing is the bottleneck of
|
||||
// your application, it is reasonable to have multiple threads.
|
||||
//
|
||||
// Another very common reason to want to index from multiple threads, is implementing a webserver
|
||||
// with CRUD capabilities. The server framework will most likely handle request from
|
||||
// different threads.
|
||||
//
|
||||
// The recommended way to address both of these use case is to wrap your `IndexWriter` into a
|
||||
// `Arc<RwLock<IndexWriter>>`.
|
||||
//
|
||||
// While this is counterintuitive, adding and deleting documents do not require mutability
|
||||
// over the `IndexWriter`, so several threads will be able to do this operation concurrently.
|
||||
//
|
||||
// The example below does not represent an actual real-life use case (who would spawn thread to
|
||||
// index a single document?), but aims at demonstrating the mechanism that makes indexing
|
||||
// from several threads possible.
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use tantivy::schema::{Schema, STORED, TEXT};
|
||||
use tantivy::{doc, Index, IndexWriter, Opstamp};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field("title", TEXT | STORED);
|
||||
let body = schema_builder.add_text_field("body", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let index_writer: Arc<RwLock<IndexWriter>> = Arc::new(RwLock::new(index.writer(50_000_000)?));
|
||||
|
||||
// # First indexing thread.
|
||||
let index_writer_clone_1 = index_writer.clone();
|
||||
thread::spawn(move || {
|
||||
// we index 100 times the document... for the sake of the example.
|
||||
for i in 0..100 {
|
||||
let opstamp = index_writer_clone_1
|
||||
.read().unwrap() //< A read lock is sufficient here.
|
||||
.add_document(
|
||||
doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
}
|
||||
});
|
||||
|
||||
// # Second indexing thread.
|
||||
let index_writer_clone_2 = index_writer.clone();
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
thread::spawn(move || {
|
||||
// we index 100 times the document... for the sake of the example.
|
||||
for i in 0..100 {
|
||||
// A read lock is sufficient here.
|
||||
let opstamp = {
|
||||
let index_writer_rlock = index_writer_clone_2.read().unwrap();
|
||||
index_writer_rlock.add_document(doc!(
|
||||
title => "Manufacturing consent",
|
||||
body => "Some great book description..."
|
||||
))
|
||||
};
|
||||
println!("add doc {} from thread 2 - opstamp {}", i, opstamp);
|
||||
thread::sleep(Duration::from_millis(10));
|
||||
}
|
||||
});
|
||||
|
||||
// # In the main thread, we commit 10 times, once every 500ms.
|
||||
for _ in 0..10 {
|
||||
let opstamp: Opstamp = {
|
||||
// Committing or rollbacking on the other hand requires write lock. This will block other threads.
|
||||
let mut index_writer_wlock = index_writer.write().unwrap();
|
||||
index_writer_wlock.commit().unwrap()
|
||||
};
|
||||
println!("committed with opstamp {}", opstamp);
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
139
examples/pre_tokenized_text.rs
Normal file
139
examples/pre_tokenized_text.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
// # Pre-tokenized text example
|
||||
//
|
||||
// This example shows how to use pre-tokenized text. Sometimes yout might
|
||||
// want to index and search through text which is already split into
|
||||
// tokens by some external tool.
|
||||
//
|
||||
// In this example we will:
|
||||
// - use tantivy tokenizer to create tokens and load them directly into tantivy,
|
||||
// - import tokenized text straight from json,
|
||||
// - perform a search on documents with pre-tokenized text
|
||||
|
||||
use tantivy::collector::{Count, TopDocs};
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer};
|
||||
use tantivy::{doc, Index, ReloadPolicy};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn pre_tokenize_text(text: &str) -> Vec<Token> {
|
||||
let mut token_stream = SimpleTokenizer.token_stream(text);
|
||||
let mut tokens = vec![];
|
||||
while token_stream.advance() {
|
||||
tokens.push(token_stream.token().clone());
|
||||
}
|
||||
tokens
|
||||
}
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
let index_path = TempDir::new()?;
|
||||
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// We can create a document manually, by setting the fields
|
||||
// one by one in a Document object.
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
let title_text = "The Old Man and the Sea";
|
||||
let body_text = "He was an old man who fished alone in a skiff in the Gulf Stream";
|
||||
|
||||
// Content of our first document
|
||||
// We create `PreTokenizedString` which contains original text and vector of tokens
|
||||
let title_tok = PreTokenizedString {
|
||||
text: String::from(title_text),
|
||||
tokens: pre_tokenize_text(title_text),
|
||||
};
|
||||
|
||||
println!(
|
||||
"Original text: \"{}\" and tokens: {:?}",
|
||||
title_tok.text, title_tok.tokens
|
||||
);
|
||||
|
||||
let body_tok = PreTokenizedString {
|
||||
text: String::from(body_text),
|
||||
tokens: pre_tokenize_text(body_text),
|
||||
};
|
||||
|
||||
// Now lets create a document and add our `PreTokenizedString`
|
||||
let old_man_doc = doc!(title => title_tok, body => body_tok);
|
||||
|
||||
// ... now let's just add it to the IndexWriter
|
||||
index_writer.add_document(old_man_doc);
|
||||
|
||||
// Pretokenized text can also be fed as JSON
|
||||
let short_man_json = r#"{
|
||||
"title":[{
|
||||
"text":"The Old Man",
|
||||
"tokens":[
|
||||
{"offset_from":0,"offset_to":3,"position":0,"text":"The","position_length":1},
|
||||
{"offset_from":4,"offset_to":7,"position":1,"text":"Old","position_length":1},
|
||||
{"offset_from":8,"offset_to":11,"position":2,"text":"Man","position_length":1}
|
||||
]
|
||||
}]
|
||||
}"#;
|
||||
|
||||
let short_man_doc = schema.parse_document(&short_man_json)?;
|
||||
|
||||
index_writer.add_document(short_man_doc);
|
||||
|
||||
// Let's commit changes
|
||||
index_writer.commit()?;
|
||||
|
||||
// ... and now is the time to query our index
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
|
||||
// We want to get documents with token "Man", we will use TermQuery to do it
|
||||
// Using PreTokenizedString means the tokens are stored as is avoiding stemming
|
||||
// and lowercasing, which preserves full words in their original form
|
||||
let query = TermQuery::new(
|
||||
Term::from_field_text(title, "Man"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let (top_docs, count) = searcher
|
||||
.search(&query, &(TopDocs::with_limit(2), Count))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(count, 2);
|
||||
|
||||
// Now let's print out the results.
|
||||
// Note that the tokens are not stored along with the original text
|
||||
// in the document store
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("Document: {}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
// In contrary to the previous query, when we search for the "man" term we
|
||||
// should get no results, as it's not one of the indexed tokens. SimpleTokenizer
|
||||
// only splits text on whitespace / punctuation.
|
||||
|
||||
let query = TermQuery::new(
|
||||
Term::from_field_text(title, "man"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
|
||||
let (_top_docs, count) = searcher
|
||||
.search(&query, &(TopDocs::with_limit(2), Count))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(count, 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -4,23 +4,19 @@
|
||||
// your hit result.
|
||||
// Snippet are an extracted of a target document, and returned in HTML format.
|
||||
// The keyword searched by the user are highlighted with a `<b>` tag.
|
||||
extern crate tempdir;
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::Index;
|
||||
use tantivy::{Snippet, SnippetGenerator};
|
||||
use tempdir::TempDir;
|
||||
use tantivy::{doc, Index, Snippet, SnippetGenerator};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
let index_path = TempDir::new("tantivy_example_dir")?;
|
||||
let index_path = TempDir::new()?;
|
||||
|
||||
// # Defining the schema
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -48,9 +44,8 @@ fn main() -> tantivy::Result<()> {
|
||||
// ...
|
||||
index_writer.commit()?;
|
||||
|
||||
index.load_searchers()?;
|
||||
|
||||
let searcher = index.searcher();
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
let query = query_parser.parse_query("sycamore spring")?;
|
||||
|
||||
|
||||
@@ -9,17 +9,13 @@
|
||||
// - add a few stop words
|
||||
// - index few documents in our index
|
||||
|
||||
extern crate tempdir;
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::tokenizer::*;
|
||||
use tantivy::Index;
|
||||
use tantivy::{doc, Index};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// this example assumes you understand the content in `basic_search`
|
||||
@@ -54,7 +50,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
// This tokenizer lowers all of the text (to help with stop word matching)
|
||||
// then removes all instances of `the` and `and` from the corpus
|
||||
let tokenizer = SimpleTokenizer
|
||||
let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
||||
.filter(LowerCaser)
|
||||
.filter(StopWordFilter::remove(vec![
|
||||
"the".to_string(),
|
||||
@@ -96,9 +92,9 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
index_writer.commit()?;
|
||||
|
||||
index.load_searchers()?;
|
||||
let reader = index.reader()?;
|
||||
|
||||
let searcher = index.searcher();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
extern crate tantivy;
|
||||
use tantivy;
|
||||
use tantivy::schema::*;
|
||||
|
||||
// # Document from json
|
||||
@@ -12,7 +12,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_text_field("title", TEXT | STORED);
|
||||
schema_builder.add_text_field("body", TEXT);
|
||||
schema_builder.add_u64_field("year", INT_INDEXED);
|
||||
schema_builder.add_u64_field("year", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// Let's assume we have a json-serialized document.
|
||||
|
||||
16
query-grammar/Cargo.toml
Normal file
16
query-grammar/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "tantivy-query-grammar"
|
||||
version = "0.13.0"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
description = """Search engine library"""
|
||||
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
|
||||
homepage = "https://github.com/tantivy-search/tantivy"
|
||||
repository = "https://github.com/tantivy-search/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
combine = {version="4", default-features=false, features=[] }
|
||||
3
query-grammar/README.md
Normal file
3
query-grammar/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Tantivy Query Grammar
|
||||
|
||||
This crate is used by tantivy to parse queries.
|
||||
15
query-grammar/src/lib.rs
Normal file
15
query-grammar/src/lib.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
mod occur;
|
||||
mod query_grammar;
|
||||
mod user_input_ast;
|
||||
use combine::parser::Parser;
|
||||
|
||||
pub use crate::occur::Occur;
|
||||
use crate::query_grammar::parse_to_ast;
|
||||
pub use crate::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
|
||||
|
||||
pub struct Error;
|
||||
|
||||
pub fn parse_query(query: &str) -> Result<UserInputAST, Error> {
|
||||
let (user_input_ast, _remaining) = parse_to_ast().parse(query).map_err(|_| Error)?;
|
||||
Ok(user_input_ast)
|
||||
}
|
||||
72
query-grammar/src/occur.rs
Normal file
72
query-grammar/src/occur.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use std::fmt;
|
||||
use std::fmt::Write;
|
||||
|
||||
/// Defines whether a term in a query must be present,
|
||||
/// should be present or must be not present.
|
||||
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
|
||||
pub enum Occur {
|
||||
/// For a given document to be considered for scoring,
|
||||
/// at least one of the document with the Should or the Must
|
||||
/// Occur constraint must be within the document.
|
||||
Should,
|
||||
/// Document without the term are excluded from the search.
|
||||
Must,
|
||||
/// Document that contain the term are excluded from the
|
||||
/// search.
|
||||
MustNot,
|
||||
}
|
||||
|
||||
impl Occur {
|
||||
/// Returns the one-char prefix symbol for this `Occur`.
|
||||
/// - `Should` => '?',
|
||||
/// - `Must` => '+'
|
||||
/// - `Not` => '-'
|
||||
fn to_char(self) -> char {
|
||||
match self {
|
||||
Occur::Should => '?',
|
||||
Occur::Must => '+',
|
||||
Occur::MustNot => '-',
|
||||
}
|
||||
}
|
||||
|
||||
/// Compose two occur values.
|
||||
pub fn compose(left: Occur, right: Occur) -> Occur {
|
||||
match (left, right) {
|
||||
(Occur::Should, _) => right,
|
||||
(Occur::Must, Occur::MustNot) => Occur::MustNot,
|
||||
(Occur::Must, _) => Occur::Must,
|
||||
(Occur::MustNot, Occur::MustNot) => Occur::Must,
|
||||
(Occur::MustNot, _) => Occur::MustNot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Occur {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_char(self.to_char())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::Occur;
|
||||
|
||||
#[test]
|
||||
fn test_Occur_compose() {
|
||||
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
|
||||
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
|
||||
assert_eq!(
|
||||
Occur::compose(Occur::Should, Occur::MustNot),
|
||||
Occur::MustNot
|
||||
);
|
||||
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
|
||||
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
|
||||
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
|
||||
assert_eq!(
|
||||
Occur::compose(Occur::MustNot, Occur::Should),
|
||||
Occur::MustNot
|
||||
);
|
||||
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
|
||||
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
|
||||
}
|
||||
}
|
||||
510
query-grammar/src/query_grammar.rs
Normal file
510
query-grammar/src/query_grammar.rs
Normal file
@@ -0,0 +1,510 @@
|
||||
use super::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
|
||||
use crate::Occur;
|
||||
use combine::error::StringStreamError;
|
||||
use combine::parser::char::{char, digit, letter, space, spaces, string};
|
||||
use combine::parser::Parser;
|
||||
use combine::{
|
||||
attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value,
|
||||
};
|
||||
|
||||
fn field<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
(
|
||||
letter(),
|
||||
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
|
||||
)
|
||||
.skip(char(':'))
|
||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||
}
|
||||
|
||||
fn word<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
(
|
||||
satisfy(|c: char| {
|
||||
!c.is_whitespace()
|
||||
&& !['-', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||
}),
|
||||
many(satisfy(|c: char| {
|
||||
!c.is_whitespace() && ![':', '^', '{', '}', '"', '[', ']', '(', ')'].contains(&c)
|
||||
})),
|
||||
)
|
||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||
.and_then(|s: String| match s.as_str() {
|
||||
"OR" | "AND " | "NOT" => Err(StringStreamError::UnexpectedParse),
|
||||
_ => Ok(s),
|
||||
})
|
||||
}
|
||||
|
||||
fn term_val<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
let phrase = char('"').with(many1(satisfy(|c| c != '"'))).skip(char('"'));
|
||||
phrase.or(word())
|
||||
}
|
||||
|
||||
fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> {
|
||||
let term_val_with_field = negative_number().or(term_val());
|
||||
(field(), term_val_with_field).map(|(field_name, phrase)| UserInputLiteral {
|
||||
field_name: Some(field_name),
|
||||
phrase,
|
||||
})
|
||||
}
|
||||
|
||||
fn literal<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||
let term_default_field = term_val().map(|phrase| UserInputLiteral {
|
||||
field_name: None,
|
||||
phrase,
|
||||
});
|
||||
attempt(term_query())
|
||||
.or(term_default_field)
|
||||
.map(UserInputLeaf::from)
|
||||
}
|
||||
|
||||
fn negative_number<'a>() -> impl Parser<&'a str, Output = String> {
|
||||
(
|
||||
char('-'),
|
||||
many1(digit()),
|
||||
optional((char('.'), many1(digit()))),
|
||||
)
|
||||
.map(|(s1, s2, s3): (char, String, Option<(char, String)>)| {
|
||||
if let Some(('.', s3)) = s3 {
|
||||
format!("{}{}.{}", s1, s2, s3)
|
||||
} else {
|
||||
format!("{}{}", s1, s2)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn spaces1<'a>() -> impl Parser<&'a str, Output = ()> {
|
||||
skip_many1(space())
|
||||
}
|
||||
|
||||
/// Function that parses a range out of a Stream
|
||||
/// Supports ranges like:
|
||||
/// [5 TO 10], {5 TO 10}, [* TO 10], [10 TO *], {10 TO *], >5, <=10
|
||||
/// [a TO *], [a TO c], [abc TO bcd}
|
||||
fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
|
||||
let range_term_val = || {
|
||||
word()
|
||||
.or(negative_number())
|
||||
.or(char('*').with(value("*".to_string())))
|
||||
};
|
||||
|
||||
// check for unbounded range in the form of <5, <=10, >5, >=5
|
||||
let elastic_unbounded_range = (
|
||||
choice([
|
||||
attempt(string(">=")),
|
||||
attempt(string("<=")),
|
||||
attempt(string("<")),
|
||||
attempt(string(">")),
|
||||
])
|
||||
.skip(spaces()),
|
||||
range_term_val(),
|
||||
)
|
||||
.map(
|
||||
|(comparison_sign, bound): (&str, String)| match comparison_sign {
|
||||
">=" => (UserInputBound::Inclusive(bound), UserInputBound::Unbounded),
|
||||
"<=" => (UserInputBound::Unbounded, UserInputBound::Inclusive(bound)),
|
||||
"<" => (UserInputBound::Unbounded, UserInputBound::Exclusive(bound)),
|
||||
">" => (UserInputBound::Exclusive(bound), UserInputBound::Unbounded),
|
||||
// default case
|
||||
_ => (UserInputBound::Unbounded, UserInputBound::Unbounded),
|
||||
},
|
||||
);
|
||||
let lower_bound = (one_of("{[".chars()), range_term_val()).map(
|
||||
|(boundary_char, lower_bound): (char, String)| {
|
||||
if lower_bound == "*" {
|
||||
UserInputBound::Unbounded
|
||||
} else if boundary_char == '{' {
|
||||
UserInputBound::Exclusive(lower_bound)
|
||||
} else {
|
||||
UserInputBound::Inclusive(lower_bound)
|
||||
}
|
||||
},
|
||||
);
|
||||
let upper_bound = (range_term_val(), one_of("}]".chars())).map(
|
||||
|(higher_bound, boundary_char): (String, char)| {
|
||||
if higher_bound == "*" {
|
||||
UserInputBound::Unbounded
|
||||
} else if boundary_char == '}' {
|
||||
UserInputBound::Exclusive(higher_bound)
|
||||
} else {
|
||||
UserInputBound::Inclusive(higher_bound)
|
||||
}
|
||||
},
|
||||
);
|
||||
// return only lower and upper
|
||||
let lower_to_upper = (
|
||||
lower_bound.skip((spaces(), string("TO"), spaces())),
|
||||
upper_bound,
|
||||
);
|
||||
|
||||
(
|
||||
optional(field()).skip(spaces()),
|
||||
// try elastic first, if it matches, the range is unbounded
|
||||
attempt(elastic_unbounded_range).or(lower_to_upper),
|
||||
)
|
||||
.map(|(field, (lower, upper))|
|
||||
// Construct the leaf from extracted field (optional)
|
||||
// and bounds
|
||||
UserInputLeaf::Range {
|
||||
field,
|
||||
lower,
|
||||
upper
|
||||
})
|
||||
}
|
||||
|
||||
fn negate(expr: UserInputAST) -> UserInputAST {
|
||||
expr.unary(Occur::MustNot)
|
||||
}
|
||||
|
||||
fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
parser(|input| {
|
||||
char('(')
|
||||
.with(ast())
|
||||
.skip(char(')'))
|
||||
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
|
||||
.or(attempt(
|
||||
string("NOT").skip(spaces1()).with(leaf()).map(negate),
|
||||
))
|
||||
.or(attempt(range().map(UserInputAST::from)))
|
||||
.or(literal().map(UserInputAST::from))
|
||||
.parse_stream(input)
|
||||
.into_result()
|
||||
})
|
||||
}
|
||||
|
||||
fn occur_symbol<'a>() -> impl Parser<&'a str, Output = Occur> {
|
||||
char('-')
|
||||
.map(|_| Occur::MustNot)
|
||||
.or(char('+').map(|_| Occur::Must))
|
||||
}
|
||||
|
||||
fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAST)> {
|
||||
(optional(occur_symbol()), boosted_leaf())
|
||||
}
|
||||
|
||||
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
||||
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
||||
let mut float_str = int_part;
|
||||
if let Some((chr, decimal_str)) = decimal_part_opt {
|
||||
float_str.push(chr);
|
||||
float_str.push_str(&decimal_str);
|
||||
}
|
||||
float_str.parse::<f64>().unwrap()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||
(char('^'), positive_float_number()).map(|(_, boost)| boost)
|
||||
}
|
||||
|
||||
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
||||
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
|
||||
UserInputAST::Boost(Box::new(leaf), boost)
|
||||
}
|
||||
_ => leaf,
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
enum BinaryOperand {
|
||||
Or,
|
||||
And,
|
||||
}
|
||||
|
||||
fn binary_operand<'a>() -> impl Parser<&'a str, Output = BinaryOperand> {
|
||||
string("AND")
|
||||
.with(value(BinaryOperand::And))
|
||||
.or(string("OR").with(value(BinaryOperand::Or)))
|
||||
}
|
||||
|
||||
fn aggregate_binary_expressions(
|
||||
left: UserInputAST,
|
||||
others: Vec<(BinaryOperand, UserInputAST)>,
|
||||
) -> UserInputAST {
|
||||
let mut dnf: Vec<Vec<UserInputAST>> = vec![vec![left]];
|
||||
for (operator, operand_ast) in others {
|
||||
match operator {
|
||||
BinaryOperand::And => {
|
||||
if let Some(last) = dnf.last_mut() {
|
||||
last.push(operand_ast);
|
||||
}
|
||||
}
|
||||
BinaryOperand::Or => {
|
||||
dnf.push(vec![operand_ast]);
|
||||
}
|
||||
}
|
||||
}
|
||||
if dnf.len() == 1 {
|
||||
UserInputAST::and(dnf.into_iter().next().unwrap()) //< safe
|
||||
} else {
|
||||
let conjunctions = dnf.into_iter().map(UserInputAST::and).collect();
|
||||
UserInputAST::or(conjunctions)
|
||||
}
|
||||
}
|
||||
|
||||
fn operand_leaf<'a>() -> impl Parser<&'a str, Output = (BinaryOperand, UserInputAST)> {
|
||||
(
|
||||
binary_operand().skip(spaces()),
|
||||
boosted_leaf().skip(spaces()),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
let boolean_expr = (boosted_leaf().skip(spaces()), many1(operand_leaf()))
|
||||
.map(|(left, right)| aggregate_binary_expressions(left, right));
|
||||
let whitespace_separated_leaves = many1(occur_leaf().skip(spaces().silent())).map(
|
||||
|subqueries: Vec<(Option<Occur>, UserInputAST)>| {
|
||||
if subqueries.len() == 1 {
|
||||
let (occur_opt, ast) = subqueries.into_iter().next().unwrap();
|
||||
match occur_opt.unwrap_or(Occur::Should) {
|
||||
Occur::Must | Occur::Should => ast,
|
||||
Occur::MustNot => UserInputAST::Clause(vec![(Some(Occur::MustNot), ast)]),
|
||||
}
|
||||
} else {
|
||||
UserInputAST::Clause(subqueries.into_iter().collect())
|
||||
}
|
||||
},
|
||||
);
|
||||
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
|
||||
spaces().with(expr).skip(spaces())
|
||||
}
|
||||
|
||||
pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||
spaces()
|
||||
.with(optional(ast()).skip(eof()))
|
||||
.map(|opt_ast| opt_ast.unwrap_or_else(UserInputAST::empty_query))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use super::*;
|
||||
use combine::parser::Parser;
|
||||
|
||||
pub fn nearly_equals(a: f64, b: f64) -> bool {
|
||||
(a - b).abs() < 0.0005 * (a + b).abs()
|
||||
}
|
||||
|
||||
fn assert_nearly_equals(expected: f64, val: f64) {
|
||||
assert!(
|
||||
nearly_equals(val, expected),
|
||||
"Got {}, expected {}.",
|
||||
val,
|
||||
expected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_occur_symbol() {
|
||||
assert_eq!(super::occur_symbol().parse("-"), Ok((Occur::MustNot, "")));
|
||||
assert_eq!(super::occur_symbol().parse("+"), Ok((Occur::Must, "")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_positive_float_number() {
|
||||
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) {
|
||||
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
||||
assert_eq!(remaining, expected_remaining);
|
||||
assert_nearly_equals(val, expected_val);
|
||||
}
|
||||
fn error_parse(float_str: &str) {
|
||||
assert!(positive_float_number().parse(float_str).is_err());
|
||||
}
|
||||
valid_parse("1.0", 1.0, "");
|
||||
valid_parse("1", 1.0, "");
|
||||
valid_parse("0.234234 aaa", 0.234234f64, " aaa");
|
||||
error_parse(".3332");
|
||||
error_parse("1.");
|
||||
error_parse("-1.");
|
||||
}
|
||||
|
||||
fn test_parse_query_to_ast_helper(query: &str, expected: &str) {
|
||||
let query = parse_to_ast().parse(query).unwrap().0;
|
||||
let query_str = format!("{:?}", query);
|
||||
assert_eq!(query_str, expected);
|
||||
}
|
||||
|
||||
fn test_is_parse_err(query: &str) {
|
||||
assert!(parse_to_ast().parse(query).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_empty_to_ast() {
|
||||
test_parse_query_to_ast_helper("", "<emptyclause>");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_hyphen() {
|
||||
test_parse_query_to_ast_helper("\"www-form-encoded\"", "\"www-form-encoded\"");
|
||||
test_parse_query_to_ast_helper("www-form-encoded", "\"www-form-encoded\"");
|
||||
test_parse_query_to_ast_helper("www-form-encoded", "\"www-form-encoded\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_not_op() {
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("NOT")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
|
||||
test_parse_query_to_ast_helper("NOT a", "(-\"a\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boosting() {
|
||||
assert!(parse_to_ast().parse("a^2^3").is_err());
|
||||
assert!(parse_to_ast().parse("a^2^").is_err());
|
||||
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
|
||||
test_parse_query_to_ast_helper("a^3 b^2", "(*(\"a\")^3 *(\"b\")^2)");
|
||||
test_parse_query_to_ast_helper("a^1", "\"a\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_binary_op() {
|
||||
test_parse_query_to_ast_helper("a AND b", "(+\"a\" +\"b\")");
|
||||
test_parse_query_to_ast_helper("a OR b", "(?\"a\" ?\"b\")");
|
||||
test_parse_query_to_ast_helper("a OR b AND c", "(?\"a\" ?(+\"b\" +\"c\"))");
|
||||
test_parse_query_to_ast_helper("a AND b AND c", "(+\"a\" +\"b\" +\"c\")");
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("a AND b aaa")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("aaa a OR b ")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("aaa ccc a OR b ")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_elastic_query_ranges() {
|
||||
test_parse_query_to_ast_helper("title: >a", "title:{\"a\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("title:>=a", "title:[\"a\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("title: <a", "title:{\"*\" TO \"a\"}");
|
||||
test_parse_query_to_ast_helper("title:<=a", "title:{\"*\" TO \"a\"]");
|
||||
test_parse_query_to_ast_helper("title:<=bsd", "title:{\"*\" TO \"bsd\"]");
|
||||
|
||||
test_parse_query_to_ast_helper("weight: >70", "weight:{\"70\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("weight:>=70", "weight:[\"70\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("weight: <70", "weight:{\"*\" TO \"70\"}");
|
||||
test_parse_query_to_ast_helper("weight:<=70", "weight:{\"*\" TO \"70\"]");
|
||||
test_parse_query_to_ast_helper("weight: >60.7", "weight:{\"60.7\" TO \"*\"}");
|
||||
|
||||
test_parse_query_to_ast_helper("weight: <= 70", "weight:{\"*\" TO \"70\"]");
|
||||
|
||||
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_occur_leaf() {
|
||||
let ((occur, ast), _) = super::occur_leaf().parse("+abc").unwrap();
|
||||
assert_eq!(occur, Some(Occur::Must));
|
||||
assert_eq!(format!("{:?}", ast), "\"abc\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_parser() {
|
||||
// testing the range() parser separately
|
||||
let res = range().parse("title: <hello").unwrap().0;
|
||||
let expected = UserInputLeaf::Range {
|
||||
field: Some("title".to_string()),
|
||||
lower: UserInputBound::Unbounded,
|
||||
upper: UserInputBound::Exclusive("hello".to_string()),
|
||||
};
|
||||
let res2 = range().parse("title:{* TO hello}").unwrap().0;
|
||||
assert_eq!(res, expected);
|
||||
assert_eq!(res2, expected);
|
||||
let expected_weight = UserInputLeaf::Range {
|
||||
field: Some("weight".to_string()),
|
||||
lower: UserInputBound::Inclusive("71.2".to_string()),
|
||||
upper: UserInputBound::Unbounded,
|
||||
};
|
||||
|
||||
let res3 = range().parse("weight: >=71.2").unwrap().0;
|
||||
let res4 = range().parse("weight:[71.2 TO *}").unwrap().0;
|
||||
assert_eq!(res3, expected_weight);
|
||||
assert_eq!(res4, expected_weight);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_triming_spaces() {
|
||||
test_parse_query_to_ast_helper(" abc", "\"abc\"");
|
||||
test_parse_query_to_ast_helper("abc ", "\"abc\"");
|
||||
test_parse_query_to_ast_helper("( a OR abc)", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("a OR abc ", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc )", "(?\"a\" ?\"abc\")");
|
||||
test_parse_query_to_ast_helper("(a OR abc) ", "(?\"a\" ?\"abc\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_single_term() {
|
||||
test_parse_query_to_ast_helper("abc", "\"abc\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_default_clause() {
|
||||
test_parse_query_to_ast_helper("a b", "(*\"a\" *\"b\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_must_default_clause() {
|
||||
test_parse_query_to_ast_helper("+(a b)", "(*\"a\" *\"b\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_must_single_term() {
|
||||
test_parse_query_to_ast_helper("+d", "\"d\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_term_with_field() {
|
||||
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_term_with_float() {
|
||||
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_must_clause() {
|
||||
test_parse_query_to_ast_helper("(+a +b)", "(+\"a\" +\"b\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_test_query_plus_a_b_plus_d() {
|
||||
test_parse_query_to_ast_helper("+(a b) +d", "(+(*\"a\" *\"b\") +\"d\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_test_query_other() {
|
||||
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");
|
||||
test_parse_query_to_ast_helper("+abc:toto", "abc:\"toto\"");
|
||||
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+abc:\"toto\" -\"titi\")");
|
||||
test_parse_query_to_ast_helper("-abc:toto", "(-abc:\"toto\")");
|
||||
test_parse_query_to_ast_helper("abc:a b", "(*abc:\"a\" *\"b\")");
|
||||
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
|
||||
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_with_range() {
|
||||
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
|
||||
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
|
||||
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
|
||||
test_parse_query_to_ast_helper("foo:[* TO toto}", "foo:{\"*\" TO \"toto\"}");
|
||||
test_parse_query_to_ast_helper("foo:[1 TO *}", "foo:[\"1\" TO \"*\"}");
|
||||
test_parse_query_to_ast_helper("foo:[1.1 TO *}", "foo:[\"1.1\" TO \"*\"}");
|
||||
test_is_parse_err("abc + ");
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
use std::fmt;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
|
||||
use query::Occur;
|
||||
use crate::Occur;
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum UserInputLeaf {
|
||||
Literal(UserInputLiteral),
|
||||
All,
|
||||
@@ -14,7 +15,7 @@ pub enum UserInputLeaf {
|
||||
}
|
||||
|
||||
impl Debug for UserInputLeaf {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
|
||||
fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match self {
|
||||
UserInputLeaf::Literal(literal) => literal.fmt(formatter),
|
||||
UserInputLeaf::Range {
|
||||
@@ -35,13 +36,14 @@ impl Debug for UserInputLeaf {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub struct UserInputLiteral {
|
||||
pub field_name: Option<String>,
|
||||
pub phrase: String,
|
||||
}
|
||||
|
||||
impl fmt::Debug for UserInputLiteral {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match self.field_name {
|
||||
Some(ref field_name) => write!(formatter, "{}:\"{}\"", field_name, self.phrase),
|
||||
None => write!(formatter, "\"{}\"", self.phrase),
|
||||
@@ -49,23 +51,27 @@ impl fmt::Debug for UserInputLiteral {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum UserInputBound {
|
||||
Inclusive(String),
|
||||
Exclusive(String),
|
||||
Unbounded,
|
||||
}
|
||||
|
||||
impl UserInputBound {
|
||||
fn display_lower(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fn display_lower(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match *self {
|
||||
UserInputBound::Inclusive(ref word) => write!(formatter, "[\"{}\"", word),
|
||||
UserInputBound::Exclusive(ref word) => write!(formatter, "{{\"{}\"", word),
|
||||
UserInputBound::Unbounded => write!(formatter, "{{\"*\""),
|
||||
}
|
||||
}
|
||||
|
||||
fn display_upper(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fn display_upper(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match *self {
|
||||
UserInputBound::Inclusive(ref word) => write!(formatter, "\"{}\"]", word),
|
||||
UserInputBound::Exclusive(ref word) => write!(formatter, "\"{}\"}}", word),
|
||||
UserInputBound::Unbounded => write!(formatter, "\"*\"}}"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,38 +79,40 @@ impl UserInputBound {
|
||||
match *self {
|
||||
UserInputBound::Inclusive(ref contents) => contents,
|
||||
UserInputBound::Exclusive(ref contents) => contents,
|
||||
UserInputBound::Unbounded => &"*",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum UserInputAST {
|
||||
Clause(Vec<UserInputAST>),
|
||||
Unary(Occur, Box<UserInputAST>),
|
||||
// Not(Box<UserInputAST>),
|
||||
// Should(Box<UserInputAST>),
|
||||
// Must(Box<UserInputAST>),
|
||||
Clause(Vec<(Option<Occur>, UserInputAST)>),
|
||||
Leaf(Box<UserInputLeaf>),
|
||||
Boost(Box<UserInputAST>, f64),
|
||||
}
|
||||
|
||||
impl UserInputAST {
|
||||
pub fn unary(self, occur: Occur) -> UserInputAST {
|
||||
UserInputAST::Unary(occur, Box::new(self))
|
||||
UserInputAST::Clause(vec![(Some(occur), self)])
|
||||
}
|
||||
|
||||
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
|
||||
assert!(occur != Occur::MustNot);
|
||||
assert_ne!(occur, Occur::MustNot);
|
||||
assert!(!asts.is_empty());
|
||||
if asts.len() == 1 {
|
||||
asts.into_iter().next().unwrap() //< safe
|
||||
} else {
|
||||
UserInputAST::Clause(
|
||||
asts.into_iter()
|
||||
.map(|ast: UserInputAST| ast.unary(occur))
|
||||
.map(|ast: UserInputAST| (Some(occur), ast))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn empty_query() -> UserInputAST {
|
||||
UserInputAST::Clause(Vec::default())
|
||||
}
|
||||
|
||||
pub fn and(asts: Vec<UserInputAST>) -> UserInputAST {
|
||||
UserInputAST::compose(Occur::Must, asts)
|
||||
}
|
||||
@@ -114,42 +122,6 @@ impl UserInputAST {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
impl UserInputAST {
|
||||
|
||||
fn compose_occur(self, occur: Occur) -> UserInputAST {
|
||||
match self {
|
||||
UserInputAST::Not(other) => {
|
||||
let new_occur = compose_occur(Occur::MustNot, occur);
|
||||
other.simplify()
|
||||
}
|
||||
_ => {
|
||||
self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn simplify(self) -> UserInputAST {
|
||||
match self {
|
||||
UserInputAST::Clause(els) => {
|
||||
if els.len() == 1 {
|
||||
return els.into_iter().next().unwrap();
|
||||
} else {
|
||||
return self;
|
||||
}
|
||||
}
|
||||
UserInputAST::Not(els) => {
|
||||
if els.len() == 1 {
|
||||
return els.into_iter().next().unwrap();
|
||||
} else {
|
||||
return self;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
impl From<UserInputLiteral> for UserInputLeaf {
|
||||
fn from(literal: UserInputLiteral) -> UserInputLeaf {
|
||||
UserInputLeaf::Literal(literal)
|
||||
@@ -162,26 +134,38 @@ impl From<UserInputLeaf> for UserInputAST {
|
||||
}
|
||||
}
|
||||
|
||||
fn print_occur_ast(
|
||||
occur_opt: Option<Occur>,
|
||||
ast: &UserInputAST,
|
||||
formatter: &mut fmt::Formatter,
|
||||
) -> fmt::Result {
|
||||
if let Some(occur) = occur_opt {
|
||||
write!(formatter, "{}{:?}", occur, ast)?;
|
||||
} else {
|
||||
write!(formatter, "*{:?}", ast)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl fmt::Debug for UserInputAST {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
UserInputAST::Clause(ref subqueries) => {
|
||||
if subqueries.is_empty() {
|
||||
write!(formatter, "<emptyclause>")?;
|
||||
} else {
|
||||
write!(formatter, "(")?;
|
||||
write!(formatter, "{:?}", &subqueries[0])?;
|
||||
print_occur_ast(subqueries[0].0, &subqueries[0].1, formatter)?;
|
||||
for subquery in &subqueries[1..] {
|
||||
write!(formatter, " {:?}", subquery)?;
|
||||
write!(formatter, " ")?;
|
||||
print_occur_ast(subquery.0, &subquery.1, formatter)?;
|
||||
}
|
||||
write!(formatter, ")")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
UserInputAST::Unary(ref occur, ref subquery) => {
|
||||
write!(formatter, "{}({:?})", occur.to_char(), subquery)
|
||||
}
|
||||
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
||||
UserInputAST::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
cargo test --no-default-features --features mmap -- --test-threads 1
|
||||
cargo test
|
||||
|
||||
@@ -1,58 +1,40 @@
|
||||
use super::Collector;
|
||||
use collector::SegmentCollector;
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentLocalId;
|
||||
use SegmentReader;
|
||||
use crate::collector::SegmentCollector;
|
||||
use crate::DocId;
|
||||
use crate::Score;
|
||||
use crate::SegmentLocalId;
|
||||
use crate::SegmentReader;
|
||||
|
||||
/// `CountCollector` collector only counts how many
|
||||
/// documents match the query.
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{Index, Result};
|
||||
/// use tantivy::collector::Count;
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, Index};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
/// index.load_searchers()?;
|
||||
/// let searcher = index.searcher();
|
||||
/// let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||
/// assert!(index_writer.commit().is_ok());
|
||||
///
|
||||
/// {
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let count = searcher.search(&query, &Count).unwrap();
|
||||
/// let reader = index.reader().unwrap();
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// assert_eq!(count, 2);
|
||||
/// }
|
||||
/// // Here comes the important part
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary").unwrap();
|
||||
/// let count = searcher.search(&query, &Count).unwrap();
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// assert_eq!(count, 2);
|
||||
/// ```
|
||||
pub struct Count;
|
||||
|
||||
@@ -61,7 +43,11 @@ impl Collector for Count {
|
||||
|
||||
type Child = SegmentCountCollector;
|
||||
|
||||
fn for_segment(&self, _: SegmentLocalId, _: &SegmentReader) -> Result<SegmentCountCollector> {
|
||||
fn for_segment(
|
||||
&self,
|
||||
_: SegmentLocalId,
|
||||
_: &SegmentReader,
|
||||
) -> crate::Result<SegmentCountCollector> {
|
||||
Ok(SegmentCountCollector::default())
|
||||
}
|
||||
|
||||
@@ -69,7 +55,7 @@ impl Collector for Count {
|
||||
false
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, segment_counts: Vec<usize>) -> Result<usize> {
|
||||
fn merge_fruits(&self, segment_counts: Vec<usize>) -> crate::Result<usize> {
|
||||
Ok(segment_counts.into_iter().sum())
|
||||
}
|
||||
}
|
||||
@@ -94,8 +80,8 @@ impl SegmentCollector for SegmentCountCollector {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Count, SegmentCountCollector};
|
||||
use collector::Collector;
|
||||
use collector::SegmentCollector;
|
||||
use crate::collector::Collector;
|
||||
use crate::collector::SegmentCollector;
|
||||
|
||||
#[test]
|
||||
fn test_count_collect_does_not_requires_scoring() {
|
||||
@@ -110,20 +96,19 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let mut count_collector = SegmentCountCollector::default();
|
||||
count_collector.collect(0u32, 1f32);
|
||||
count_collector.collect(0u32, 1.0);
|
||||
assert_eq!(count_collector.harvest(), 1);
|
||||
}
|
||||
{
|
||||
let mut count_collector = SegmentCountCollector::default();
|
||||
count_collector.collect(0u32, 1f32);
|
||||
count_collector.collect(0u32, 1.0);
|
||||
assert_eq!(count_collector.harvest(), 1);
|
||||
}
|
||||
{
|
||||
let mut count_collector = SegmentCountCollector::default();
|
||||
count_collector.collect(0u32, 1f32);
|
||||
count_collector.collect(1u32, 1f32);
|
||||
count_collector.collect(0u32, 1.0);
|
||||
count_collector.collect(1u32, 1.0);
|
||||
assert_eq!(count_collector.harvest(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
125
src/collector/custom_score_top_collector.rs
Normal file
125
src/collector/custom_score_top_collector.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::{DocAddress, DocId, Score, SegmentReader};
|
||||
|
||||
pub(crate) struct CustomScoreTopCollector<TCustomScorer, TScore = Score> {
|
||||
custom_scorer: TCustomScorer,
|
||||
collector: TopCollector<TScore>,
|
||||
}
|
||||
|
||||
impl<TCustomScorer, TScore> CustomScoreTopCollector<TCustomScorer, TScore>
|
||||
where
|
||||
TScore: Clone + PartialOrd,
|
||||
{
|
||||
pub(crate) fn new(
|
||||
custom_scorer: TCustomScorer,
|
||||
collector: TopCollector<TScore>,
|
||||
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
|
||||
CustomScoreTopCollector {
|
||||
custom_scorer,
|
||||
collector,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A custom segment scorer makes it possible to define any kind of score
|
||||
/// for a given document belonging to a specific segment.
|
||||
///
|
||||
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
|
||||
pub trait CustomSegmentScorer<TScore>: 'static {
|
||||
/// Computes the score of a specific `doc`.
|
||||
fn score(&mut self, doc: DocId) -> TScore;
|
||||
}
|
||||
|
||||
/// `CustomScorer` makes it possible to define any kind of score.
|
||||
///
|
||||
/// The `CustomerScorer` itself does not make much of the computation itself.
|
||||
/// Instead, it helps constructing `Self::Child` instances that will compute
|
||||
/// the score at a segment scale.
|
||||
pub trait CustomScorer<TScore>: Sync {
|
||||
/// Type of the associated [`CustomSegmentScorer`](./trait.CustomSegmentScorer.html).
|
||||
type Child: CustomSegmentScorer<TScore>;
|
||||
/// Builds a child scorer for a specific segment. The child scorer is associated to
|
||||
/// a specific segment.
|
||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>;
|
||||
}
|
||||
|
||||
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
|
||||
where
|
||||
TCustomScorer: CustomScorer<TScore>,
|
||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||
{
|
||||
type Fruit = Vec<(TScore, DocAddress)>;
|
||||
|
||||
type Child = CustomScoreTopSegmentCollector<TCustomScorer::Child, TScore>;
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
|
||||
let segment_collector = self
|
||||
.collector
|
||||
.for_segment(segment_local_id, segment_reader)?;
|
||||
Ok(CustomScoreTopSegmentCollector {
|
||||
segment_collector,
|
||||
segment_scorer,
|
||||
})
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
||||
self.collector.merge_fruits(segment_fruits)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CustomScoreTopSegmentCollector<T, TScore>
|
||||
where
|
||||
TScore: 'static + PartialOrd + Clone + Send + Sync + Sized,
|
||||
T: CustomSegmentScorer<TScore>,
|
||||
{
|
||||
segment_collector: TopSegmentCollector<TScore>,
|
||||
segment_scorer: T,
|
||||
}
|
||||
|
||||
impl<T, TScore> SegmentCollector for CustomScoreTopSegmentCollector<T, TScore>
|
||||
where
|
||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||
T: 'static + CustomSegmentScorer<TScore>,
|
||||
{
|
||||
type Fruit = Vec<(TScore, DocAddress)>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
let score = self.segment_scorer.score(doc);
|
||||
self.segment_collector.collect(doc, score);
|
||||
}
|
||||
|
||||
fn harvest(self) -> Vec<(TScore, DocAddress)> {
|
||||
self.segment_collector.harvest()
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, TScore, T> CustomScorer<TScore> for F
|
||||
where
|
||||
F: 'static + Send + Sync + Fn(&SegmentReader) -> T,
|
||||
T: CustomSegmentScorer<TScore>,
|
||||
{
|
||||
type Child = T;
|
||||
|
||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
||||
Ok((self)(segment_reader))
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, TScore> CustomSegmentScorer<TScore> for F
|
||||
where
|
||||
F: 'static + FnMut(DocId) -> TScore,
|
||||
{
|
||||
fn score(&mut self, doc: DocId) -> TScore {
|
||||
(self)(doc)
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,13 @@
|
||||
use collector::Collector;
|
||||
use collector::SegmentCollector;
|
||||
use docset::SkipResult;
|
||||
use fastfield::FacetReader;
|
||||
use schema::Facet;
|
||||
use schema::Field;
|
||||
use crate::collector::Collector;
|
||||
use crate::collector::SegmentCollector;
|
||||
use crate::fastfield::FacetReader;
|
||||
use crate::schema::Facet;
|
||||
use crate::schema::Field;
|
||||
use crate::DocId;
|
||||
use crate::Score;
|
||||
use crate::SegmentLocalId;
|
||||
use crate::SegmentReader;
|
||||
use crate::TantivyError;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::btree_map;
|
||||
use std::collections::BTreeMap;
|
||||
@@ -12,11 +16,6 @@ use std::collections::BinaryHeap;
|
||||
use std::collections::Bound;
|
||||
use std::iter::Peekable;
|
||||
use std::{u64, usize};
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentLocalId;
|
||||
use SegmentReader;
|
||||
|
||||
struct Hit<'a> {
|
||||
count: u64,
|
||||
@@ -26,13 +25,13 @@ struct Hit<'a> {
|
||||
impl<'a> Eq for Hit<'a> {}
|
||||
|
||||
impl<'a> PartialEq<Hit<'a>> for Hit<'a> {
|
||||
fn eq(&self, other: &Hit) -> bool {
|
||||
fn eq(&self, other: &Hit<'_>) -> bool {
|
||||
self.count == other.count
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PartialOrd<Hit<'a>> for Hit<'a> {
|
||||
fn partial_cmp(&self, other: &Hit) -> Option<Ordering> {
|
||||
fn partial_cmp(&self, other: &Hit<'_>) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
@@ -80,15 +79,12 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
///
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{Facet, Schema, TEXT};
|
||||
/// use tantivy::{Index, Result};
|
||||
/// use tantivy::collector::FacetCollector;
|
||||
/// use tantivy::query::AllQuery;
|
||||
/// use tantivy::schema::{Facet, Schema, TEXT};
|
||||
/// use tantivy::{doc, Index};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// fn example() -> tantivy::Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
///
|
||||
/// // Facet have their own specific type.
|
||||
@@ -122,17 +118,16 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// facet => Facet::from("/lang/en"),
|
||||
/// facet => Facet::from("/category/biography")
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// index_writer.commit()?;
|
||||
/// }
|
||||
///
|
||||
/// index.load_searchers()?;
|
||||
/// let searcher = index.searcher();
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// facet_collector.add_facet("/lang");
|
||||
/// facet_collector.add_facet("/category");
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
///
|
||||
/// // This lists all of the facet counts
|
||||
/// let facets: Vec<(&Facet, u64)> = facet_counts
|
||||
@@ -145,9 +140,9 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// }
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// facet_collector.add_facet("/category/fiction");
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
///
|
||||
/// // This lists all of the facet counts
|
||||
/// let facets: Vec<(&Facet, u64)> = facet_counts
|
||||
@@ -160,10 +155,10 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// ]);
|
||||
/// }
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// facet_collector.add_facet("/category/fiction");
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
///
|
||||
/// // This lists all of the facet counts
|
||||
/// let facets: Vec<(&Facet, u64)> = facet_counts.top_k("/category/fiction", 1);
|
||||
@@ -174,6 +169,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// # assert!(example().is_ok());
|
||||
/// ```
|
||||
pub struct FacetCollector {
|
||||
field: Field,
|
||||
@@ -191,6 +187,11 @@ pub struct FacetSegmentCollector {
|
||||
collapse_facet_ords: Vec<u64>,
|
||||
}
|
||||
|
||||
enum SkipResult {
|
||||
Found,
|
||||
NotFound,
|
||||
}
|
||||
|
||||
fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
||||
target: &[u8],
|
||||
collapse_it: &mut Peekable<I>,
|
||||
@@ -200,14 +201,14 @@ fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
||||
Some(facet_bytes) => match facet_bytes.encoded_str().as_bytes().cmp(target) {
|
||||
Ordering::Less => {}
|
||||
Ordering::Greater => {
|
||||
return SkipResult::OverStep;
|
||||
return SkipResult::NotFound;
|
||||
}
|
||||
Ordering::Equal => {
|
||||
return SkipResult::Reached;
|
||||
return SkipResult::Found;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
return SkipResult::End;
|
||||
return SkipResult::NotFound;
|
||||
}
|
||||
}
|
||||
collapse_it.next();
|
||||
@@ -264,8 +265,11 @@ impl Collector for FacetCollector {
|
||||
&self,
|
||||
_: SegmentLocalId,
|
||||
reader: &SegmentReader,
|
||||
) -> Result<FacetSegmentCollector> {
|
||||
let facet_reader = reader.facet_reader(self.field)?;
|
||||
) -> crate::Result<FacetSegmentCollector> {
|
||||
let field_name = reader.schema().get_field_name(self.field);
|
||||
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
|
||||
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
|
||||
})?;
|
||||
|
||||
let mut collapse_mapping = Vec::new();
|
||||
let mut counts = Vec::new();
|
||||
@@ -281,7 +285,7 @@ impl Collector for FacetCollector {
|
||||
// is positionned on a term that has not been processed yet.
|
||||
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
||||
match skip_result {
|
||||
SkipResult::Reached => {
|
||||
SkipResult::Found => {
|
||||
// we reach a facet we decided to collapse.
|
||||
let collapse_depth = facet_depth(facet_streamer.key());
|
||||
let mut collapsed_id = 0;
|
||||
@@ -301,7 +305,7 @@ impl Collector for FacetCollector {
|
||||
}
|
||||
break;
|
||||
}
|
||||
SkipResult::End | SkipResult::OverStep => {
|
||||
SkipResult::NotFound => {
|
||||
collapse_mapping.push(0);
|
||||
if !facet_streamer.advance() {
|
||||
break;
|
||||
@@ -327,7 +331,7 @@ impl Collector for FacetCollector {
|
||||
false
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, segments_facet_counts: Vec<FacetCounts>) -> Result<FacetCounts> {
|
||||
fn merge_fruits(&self, segments_facet_counts: Vec<FacetCounts>) -> crate::Result<FacetCounts> {
|
||||
let mut facet_counts: BTreeMap<Facet, u64> = BTreeMap::new();
|
||||
for segment_facet_counts in segments_facet_counts {
|
||||
for (facet, count) in segment_facet_counts.facet_counts {
|
||||
@@ -395,7 +399,7 @@ impl<'a> Iterator for FacetChildIterator<'a> {
|
||||
}
|
||||
|
||||
impl FacetCounts {
|
||||
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator
|
||||
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
|
||||
where
|
||||
Facet: From<T>,
|
||||
{
|
||||
@@ -409,7 +413,8 @@ impl FacetCounts {
|
||||
let facet_after = Facet::from_encoded_string(facet_after_bytes);
|
||||
Bound::Excluded(facet_after)
|
||||
};
|
||||
let underlying: btree_map::Range<_, _> = self.facet_counts.range((left_bound, right_bound));
|
||||
let underlying: btree_map::Range<'_, _, _> =
|
||||
self.facet_counts.range((left_bound, right_bound));
|
||||
FacetChildIterator { underlying }
|
||||
}
|
||||
|
||||
@@ -450,12 +455,14 @@ impl FacetCounts {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{FacetCollector, FacetCounts};
|
||||
use core::Index;
|
||||
use query::AllQuery;
|
||||
use crate::collector::Count;
|
||||
use crate::core::Index;
|
||||
use crate::query::{AllQuery, QueryParser, TermQuery};
|
||||
use crate::schema::{Document, Facet, Field, IndexRecordOption, Schema};
|
||||
use crate::Term;
|
||||
use rand::distributions::Uniform;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::{thread_rng, Rng};
|
||||
use schema::{Document, Facet, Field, Schema};
|
||||
use std::iter;
|
||||
|
||||
#[test]
|
||||
@@ -483,8 +490,8 @@ mod tests {
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let mut facet_collector = FacetCollector::for_field(facet_field);
|
||||
facet_collector.add_facet(Facet::from("/top1"));
|
||||
let counts = searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||
@@ -513,7 +520,7 @@ mod tests {
|
||||
#[should_panic(expected = "Tried to add a facet which is a descendant of \
|
||||
an already added facet.")]
|
||||
fn test_misused_facet_collector() {
|
||||
let mut facet_collector = FacetCollector::for_field(Field(0));
|
||||
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
|
||||
facet_collector.add_facet(Facet::from("/country"));
|
||||
facet_collector.add_facet(Facet::from("/country/europe"));
|
||||
}
|
||||
@@ -532,8 +539,8 @@ mod tests {
|
||||
facet_field => Facet::from_text(&"/subjects/B/b"),
|
||||
));
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 1);
|
||||
let mut facet_collector = FacetCollector::for_field(facet_field);
|
||||
facet_collector.add_facet("/subjects");
|
||||
@@ -542,9 +549,59 @@ mod tests {
|
||||
assert_eq!(facets[0].1, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_doc_search_by_facet() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/A/A"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/A/B"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/A/C/A"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/D/C/A"),
|
||||
));
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 4);
|
||||
|
||||
let count_facet = |facet_str: &str| {
|
||||
let term = Term::from_facet(facet_field, &Facet::from_text(facet_str));
|
||||
searcher
|
||||
.search(&TermQuery::new(term, IndexRecordOption::Basic), &Count)
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
assert_eq!(count_facet("/"), 4);
|
||||
assert_eq!(count_facet("/A"), 3);
|
||||
assert_eq!(count_facet("/A/B"), 1);
|
||||
assert_eq!(count_facet("/A/C"), 1);
|
||||
assert_eq!(count_facet("/A/C/A"), 1);
|
||||
assert_eq!(count_facet("/C/A"), 0);
|
||||
{
|
||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||
{
|
||||
let query = query_parser.parse_query("facet:/A/B").unwrap();
|
||||
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("facet:/A").unwrap();
|
||||
assert_eq!(3, searcher.search(&query, &Count).unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_used_facet_collector() {
|
||||
let mut facet_collector = FacetCollector::for_field(Field(0));
|
||||
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
|
||||
facet_collector.add_facet(Facet::from("/country"));
|
||||
facet_collector.add_facet(Facet::from("/countryeurope"));
|
||||
}
|
||||
@@ -579,9 +636,7 @@ mod tests {
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
|
||||
let searcher = index.searcher();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
|
||||
let mut facet_collector = FacetCollector::for_field(facet_field);
|
||||
facet_collector.add_facet("/facet");
|
||||
@@ -599,19 +654,18 @@ mod tests {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use collector::FacetCollector;
|
||||
use query::AllQuery;
|
||||
use rand::{thread_rng, Rng};
|
||||
use schema::Facet;
|
||||
use schema::Schema;
|
||||
use crate::collector::FacetCollector;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::{Facet, Schema};
|
||||
use crate::Index;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
use test::Bencher;
|
||||
use Index;
|
||||
|
||||
#[bench]
|
||||
fn bench_facet_collector(b: &mut Bencher) {
|
||||
@@ -628,17 +682,16 @@ mod bench {
|
||||
}
|
||||
}
|
||||
// 40425 docs
|
||||
thread_rng().shuffle(&mut docs[..]);
|
||||
docs[..].shuffle(&mut thread_rng());
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for doc in docs {
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
b.iter(|| {
|
||||
let searcher = index.searcher();
|
||||
let searcher = reader.searcher();
|
||||
let facet_collector = FacetCollector::for_field(facet_field);
|
||||
searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||
});
|
||||
|
||||
@@ -82,6 +82,7 @@ mod tests {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
|
||||
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
|
||||
let num_field_f64 = schema_builder.add_f64_field("num_f64", FAST);
|
||||
let text_field = schema_builder.add_text_field("text", STRING);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
@@ -94,6 +95,7 @@ mod tests {
|
||||
index_writer.add_document(doc!(
|
||||
num_field_i64 => ((i as i64) % 3i64) as i64,
|
||||
num_field_u64 => (i % 2u64) as u64,
|
||||
num_field_f64 => (i % 4u64) as f64,
|
||||
text_field => "text"
|
||||
));
|
||||
}
|
||||
@@ -101,14 +103,14 @@ mod tests {
|
||||
assert_eq!(index_writer.commit().unwrap(), 10u64);
|
||||
}
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let searcher = index.reader().searcher();
|
||||
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
|
||||
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
|
||||
let mut ffvf_f64: IntFacetCollector<F64FastFieldReader> = IntFacetCollector::new(num_field_f64);
|
||||
|
||||
{
|
||||
// perform the query
|
||||
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64);
|
||||
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64).push(&mut ffvf_f64);
|
||||
let mut query_parser = QueryParser::for_index(index, vec![text_field]);
|
||||
let query = query_parser.parse_query("text:text").unwrap();
|
||||
query.search(&searcher, &mut facet_collectors).unwrap();
|
||||
@@ -118,6 +120,8 @@ mod tests {
|
||||
assert_eq!(ffvf_u64.counters[&1], 5);
|
||||
assert_eq!(ffvf_i64.counters[&0], 4);
|
||||
assert_eq!(ffvf_i64.counters[&1], 3);
|
||||
assert_eq!(ffvf_f64.counters[&0.0], 3);
|
||||
assert_eq!(ffvf_f64.counters[&2.0], 2);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ The resulting `Fruit` will then be a typed tuple with each collector's original
|
||||
in their respective position.
|
||||
|
||||
```rust
|
||||
# extern crate tantivy;
|
||||
# use tantivy::schema::*;
|
||||
# use tantivy::*;
|
||||
# use tantivy::query::*;
|
||||
@@ -53,9 +52,9 @@ use tantivy::collector::{Count, TopDocs};
|
||||
# index_writer.add_document(doc!(
|
||||
# title => "The Diary of Muadib",
|
||||
# ));
|
||||
# index_writer.commit().unwrap();
|
||||
# index.load_searchers()?;
|
||||
# let searcher = index.searcher();
|
||||
# index_writer.commit()?;
|
||||
# let reader = index.reader()?;
|
||||
# let searcher = reader.searcher();
|
||||
# let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
# let query = query_parser.parse_query("diary")?;
|
||||
let (doc_count, top_docs): (usize, Vec<(Score, DocAddress)>) =
|
||||
@@ -66,7 +65,7 @@ let (doc_count, top_docs): (usize, Vec<(Score, DocAddress)>) =
|
||||
|
||||
The `Collector` trait is implemented for up to 4 collectors.
|
||||
If you have more than 4 collectors, you can either group them into
|
||||
tuples of tuples `(a,(b,(c,d)))`, or rely on `MultiCollector`'s.
|
||||
tuples of tuples `(a,(b,(c,d)))`, or rely on [`MultiCollector`](./struct.MultiCollector.html).
|
||||
|
||||
# Combining several collectors dynamically
|
||||
|
||||
@@ -85,12 +84,11 @@ See the `custom_collector` example.
|
||||
|
||||
*/
|
||||
|
||||
use downcast;
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentLocalId;
|
||||
use SegmentReader;
|
||||
use crate::DocId;
|
||||
use crate::Score;
|
||||
use crate::SegmentLocalId;
|
||||
use crate::SegmentReader;
|
||||
use downcast_rs::impl_downcast;
|
||||
|
||||
mod count_collector;
|
||||
pub use self::count_collector::Count;
|
||||
@@ -103,17 +101,21 @@ mod top_collector;
|
||||
mod top_score_collector;
|
||||
pub use self::top_score_collector::TopDocs;
|
||||
|
||||
mod top_field_collector;
|
||||
pub use self::top_field_collector::TopDocsByField;
|
||||
mod custom_score_top_collector;
|
||||
pub use self::custom_score_top_collector::{CustomScorer, CustomSegmentScorer};
|
||||
|
||||
mod tweak_score_top_collector;
|
||||
pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker};
|
||||
|
||||
mod facet_collector;
|
||||
pub use self::facet_collector::FacetCollector;
|
||||
use crate::query::Weight;
|
||||
|
||||
/// `Fruit` is the type for the result of our collection.
|
||||
/// e.g. `usize` for the `Count` collector.
|
||||
pub trait Fruit: Send + downcast::Any {}
|
||||
pub trait Fruit: Send + downcast_rs::Downcast {}
|
||||
|
||||
impl<T> Fruit for T where T: Send + downcast::Any {}
|
||||
impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
|
||||
|
||||
/// Collectors are in charge of collecting and retaining relevant
|
||||
/// information from the document found and scored by the query.
|
||||
@@ -145,14 +147,37 @@ pub trait Collector: Sync {
|
||||
&self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<Self::Child>;
|
||||
) -> crate::Result<Self::Child>;
|
||||
|
||||
/// Returns true iff the collector requires to compute scores for documents.
|
||||
fn requires_scoring(&self) -> bool;
|
||||
|
||||
/// Combines the fruit associated to the collection of each segments
|
||||
/// into one fruit.
|
||||
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit>;
|
||||
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit>;
|
||||
|
||||
/// Created a segment collector and
|
||||
fn collect_segment(
|
||||
&self,
|
||||
weight: &dyn Weight,
|
||||
segment_ord: u32,
|
||||
reader: &SegmentReader,
|
||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
|
||||
|
||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||
weight.for_each(reader, &mut |doc, score| {
|
||||
if delete_bitset.is_alive(doc) {
|
||||
segment_collector.collect(doc, score);
|
||||
}
|
||||
})?;
|
||||
} else {
|
||||
weight.for_each(reader, &mut |doc, score| {
|
||||
segment_collector.collect(doc, score);
|
||||
})?;
|
||||
}
|
||||
Ok(segment_collector.harvest())
|
||||
}
|
||||
}
|
||||
|
||||
/// The `SegmentCollector` is the trait in charge of defining the
|
||||
@@ -183,7 +208,11 @@ where
|
||||
type Fruit = (Left::Fruit, Right::Fruit);
|
||||
type Child = (Left::Child, Right::Child);
|
||||
|
||||
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let left = self.0.for_segment(segment_local_id, segment)?;
|
||||
let right = self.1.for_segment(segment_local_id, segment)?;
|
||||
Ok((left, right))
|
||||
@@ -196,7 +225,7 @@ where
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
children: Vec<(Left::Fruit, Right::Fruit)>,
|
||||
) -> Result<(Left::Fruit, Right::Fruit)> {
|
||||
) -> crate::Result<(Left::Fruit, Right::Fruit)> {
|
||||
let mut left_fruits = vec![];
|
||||
let mut right_fruits = vec![];
|
||||
for (left_fruit, right_fruit) in children {
|
||||
@@ -238,7 +267,11 @@ where
|
||||
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit);
|
||||
type Child = (One::Child, Two::Child, Three::Child);
|
||||
|
||||
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let one = self.0.for_segment(segment_local_id, segment)?;
|
||||
let two = self.1.for_segment(segment_local_id, segment)?;
|
||||
let three = self.2.for_segment(segment_local_id, segment)?;
|
||||
@@ -249,7 +282,7 @@ where
|
||||
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> Result<Self::Fruit> {
|
||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
||||
let mut one_fruits = vec![];
|
||||
let mut two_fruits = vec![];
|
||||
let mut three_fruits = vec![];
|
||||
@@ -297,7 +330,11 @@ where
|
||||
type Fruit = (One::Fruit, Two::Fruit, Three::Fruit, Four::Fruit);
|
||||
type Child = (One::Child, Two::Child, Three::Child, Four::Child);
|
||||
|
||||
fn for_segment(&self, segment_local_id: u32, segment: &SegmentReader) -> Result<Self::Child> {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment: &SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let one = self.0.for_segment(segment_local_id, segment)?;
|
||||
let two = self.1.for_segment(segment_local_id, segment)?;
|
||||
let three = self.2.for_segment(segment_local_id, segment)?;
|
||||
@@ -312,7 +349,7 @@ where
|
||||
|| self.3.requires_scoring()
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> Result<Self::Fruit> {
|
||||
fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
|
||||
let mut one_fruits = vec![];
|
||||
let mut two_fruits = vec![];
|
||||
let mut three_fruits = vec![];
|
||||
@@ -358,10 +395,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
mod downcast_impl {
|
||||
downcast!(super::Fruit);
|
||||
}
|
||||
impl_downcast!(Fruit);
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
|
||||
@@ -1,30 +1,29 @@
|
||||
use super::Collector;
|
||||
use super::SegmentCollector;
|
||||
use collector::Fruit;
|
||||
use downcast::Downcast;
|
||||
use crate::collector::Fruit;
|
||||
use crate::DocId;
|
||||
use crate::Score;
|
||||
use crate::SegmentLocalId;
|
||||
use crate::SegmentReader;
|
||||
use crate::TantivyError;
|
||||
use std::marker::PhantomData;
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentLocalId;
|
||||
use SegmentReader;
|
||||
use TantivyError;
|
||||
use std::ops::Deref;
|
||||
|
||||
pub struct MultiFruit {
|
||||
sub_fruits: Vec<Option<Box<Fruit>>>,
|
||||
sub_fruits: Vec<Option<Box<dyn Fruit>>>,
|
||||
}
|
||||
|
||||
pub struct CollectorWrapper<TCollector: Collector>(TCollector);
|
||||
|
||||
impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
||||
type Fruit = Box<Fruit>;
|
||||
type Child = Box<BoxableSegmentCollector>;
|
||||
type Fruit = Box<dyn Fruit>;
|
||||
type Child = Box<dyn BoxableSegmentCollector>;
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
reader: &SegmentReader,
|
||||
) -> Result<Box<BoxableSegmentCollector>> {
|
||||
) -> crate::Result<Box<dyn BoxableSegmentCollector>> {
|
||||
let child = self.0.for_segment(segment_local_id, reader)?;
|
||||
Ok(Box::new(SegmentCollectorWrapper(child)))
|
||||
}
|
||||
@@ -33,38 +32,41 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
||||
self.0.requires_scoring()
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, children: Vec<<Self as Collector>::Fruit>) -> Result<Box<Fruit>> {
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
children: Vec<<Self as Collector>::Fruit>,
|
||||
) -> crate::Result<Box<dyn Fruit>> {
|
||||
let typed_fruit: Vec<TCollector::Fruit> = children
|
||||
.into_iter()
|
||||
.map(|untyped_fruit| {
|
||||
Downcast::<TCollector::Fruit>::downcast(untyped_fruit)
|
||||
untyped_fruit
|
||||
.downcast::<TCollector::Fruit>()
|
||||
.map(|boxed_but_typed| *boxed_but_typed)
|
||||
.map_err(|e| {
|
||||
let err_msg = format!("Failed to cast child collector fruit. {:?}", e);
|
||||
TantivyError::InvalidArgument(err_msg)
|
||||
.map_err(|_| {
|
||||
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
|
||||
})
|
||||
})
|
||||
.collect::<Result<_>>()?;
|
||||
.collect::<crate::Result<_>>()?;
|
||||
let merged_fruit = self.0.merge_fruits(typed_fruit)?;
|
||||
Ok(Box::new(merged_fruit))
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentCollector for Box<BoxableSegmentCollector> {
|
||||
type Fruit = Box<Fruit>;
|
||||
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
||||
type Fruit = Box<dyn Fruit>;
|
||||
|
||||
fn collect(&mut self, doc: u32, score: f32) {
|
||||
fn collect(&mut self, doc: u32, score: Score) {
|
||||
self.as_mut().collect(doc, score);
|
||||
}
|
||||
|
||||
fn harvest(self) -> Box<Fruit> {
|
||||
fn harvest(self) -> Box<dyn Fruit> {
|
||||
BoxableSegmentCollector::harvest_from_box(self)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait BoxableSegmentCollector {
|
||||
fn collect(&mut self, doc: u32, score: f32);
|
||||
fn harvest_from_box(self: Box<Self>) -> Box<Fruit>;
|
||||
fn collect(&mut self, doc: u32, score: Score);
|
||||
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
|
||||
}
|
||||
|
||||
pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegmentCollector);
|
||||
@@ -72,11 +74,11 @@ pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegment
|
||||
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
||||
for SegmentCollectorWrapper<TSegmentCollector>
|
||||
{
|
||||
fn collect(&mut self, doc: u32, score: f32) {
|
||||
fn collect(&mut self, doc: u32, score: Score) {
|
||||
self.0.collect(doc, score);
|
||||
}
|
||||
|
||||
fn harvest_from_box(self: Box<Self>) -> Box<Fruit> {
|
||||
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit> {
|
||||
Box::new(self.0.harvest())
|
||||
}
|
||||
}
|
||||
@@ -89,7 +91,10 @@ pub struct FruitHandle<TFruit: Fruit> {
|
||||
impl<TFruit: Fruit> FruitHandle<TFruit> {
|
||||
pub fn extract(self, fruits: &mut MultiFruit) -> TFruit {
|
||||
let boxed_fruit = fruits.sub_fruits[self.pos].take().expect("");
|
||||
*Downcast::<TFruit>::downcast(boxed_fruit).expect("Failed")
|
||||
*boxed_fruit
|
||||
.downcast::<TFruit>()
|
||||
.map_err(|_| ())
|
||||
.expect("Failed to downcast collector fruit.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,60 +107,45 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
|
||||
/// [Combining several collectors section of the collector documentation](./index.html#combining-several-collectors).
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{Index, Result};
|
||||
/// use tantivy::collector::{Count, TopDocs, MultiCollector};
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, Index};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
/// index.load_searchers()?;
|
||||
/// let searcher = index.searcher();
|
||||
/// let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||
/// assert!(index_writer.commit().is_ok());
|
||||
///
|
||||
/// let mut collectors = MultiCollector::new();
|
||||
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
|
||||
/// let count_handle = collectors.add_collector(Count);
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let mut multi_fruit = searcher.search(&query, &collectors)?;
|
||||
/// let reader = index.reader().unwrap();
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// let count = count_handle.extract(&mut multi_fruit);
|
||||
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
|
||||
/// let mut collectors = MultiCollector::new();
|
||||
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
|
||||
/// let count_handle = collectors.add_collector(Count);
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary").unwrap();
|
||||
/// let mut multi_fruit = searcher.search(&query, &collectors).unwrap();
|
||||
///
|
||||
/// # assert_eq!(count, 2);
|
||||
/// # assert_eq!(top_docs.len(), 2);
|
||||
/// let count = count_handle.extract(&mut multi_fruit);
|
||||
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// assert_eq!(count, 2);
|
||||
/// assert_eq!(top_docs.len(), 2);
|
||||
/// ```
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[derive(Default)]
|
||||
pub struct MultiCollector<'a> {
|
||||
collector_wrappers:
|
||||
Vec<Box<Collector<Child = Box<BoxableSegmentCollector>, Fruit = Box<Fruit>> + 'a>>,
|
||||
collector_wrappers: Vec<
|
||||
Box<dyn Collector<Child = Box<dyn BoxableSegmentCollector>, Fruit = Box<dyn Fruit>> + 'a>,
|
||||
>,
|
||||
}
|
||||
|
||||
impl<'a> MultiCollector<'a> {
|
||||
@@ -187,21 +177,24 @@ impl<'a> Collector for MultiCollector<'a> {
|
||||
&self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<MultiCollectorChild> {
|
||||
) -> crate::Result<MultiCollectorChild> {
|
||||
let children = self
|
||||
.collector_wrappers
|
||||
.iter()
|
||||
.map(|collector_wrapper| collector_wrapper.for_segment(segment_local_id, segment))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
Ok(MultiCollectorChild { children })
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
self.collector_wrappers.iter().any(|c| c.requires_scoring())
|
||||
self.collector_wrappers
|
||||
.iter()
|
||||
.map(Deref::deref)
|
||||
.any(Collector::requires_scoring)
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> Result<MultiFruit> {
|
||||
let mut segment_fruits_list: Vec<Vec<Box<Fruit>>> = (0..self.collector_wrappers.len())
|
||||
fn merge_fruits(&self, segments_multifruits: Vec<MultiFruit>) -> crate::Result<MultiFruit> {
|
||||
let mut segment_fruits_list: Vec<Vec<Box<dyn Fruit>>> = (0..self.collector_wrappers.len())
|
||||
.map(|_| Vec::with_capacity(segments_multifruits.len()))
|
||||
.collect::<Vec<_>>();
|
||||
for segment_multifruit in segments_multifruits {
|
||||
@@ -218,13 +211,13 @@ impl<'a> Collector for MultiCollector<'a> {
|
||||
.map(|(child_collector, segment_fruits)| {
|
||||
Ok(Some(child_collector.merge_fruits(segment_fruits)?))
|
||||
})
|
||||
.collect::<Result<_>>()?;
|
||||
.collect::<crate::Result<_>>()?;
|
||||
Ok(MultiFruit { sub_fruits })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MultiCollectorChild {
|
||||
children: Vec<Box<BoxableSegmentCollector>>,
|
||||
children: Vec<Box<dyn BoxableSegmentCollector>>,
|
||||
}
|
||||
|
||||
impl SegmentCollector for MultiCollectorChild {
|
||||
@@ -251,12 +244,12 @@ impl SegmentCollector for MultiCollectorChild {
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use collector::{Count, TopDocs};
|
||||
use query::TermQuery;
|
||||
use schema::IndexRecordOption;
|
||||
use schema::{Schema, TEXT};
|
||||
use Index;
|
||||
use Term;
|
||||
use crate::collector::{Count, TopDocs};
|
||||
use crate::query::TermQuery;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::Index;
|
||||
use crate::Term;
|
||||
|
||||
#[test]
|
||||
fn test_multi_collector() {
|
||||
@@ -276,8 +269,7 @@ mod tests {
|
||||
index_writer.add_document(doc!(text=>"abc"));
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let term = Term::from_field_text(text, "abc");
|
||||
let query = TermQuery::new(term, IndexRecordOption::Basic);
|
||||
|
||||
|
||||
@@ -1,12 +1,20 @@
|
||||
use super::*;
|
||||
use core::SegmentReader;
|
||||
use fastfield::BytesFastFieldReader;
|
||||
use fastfield::FastFieldReader;
|
||||
use schema::Field;
|
||||
use DocAddress;
|
||||
use DocId;
|
||||
use Score;
|
||||
use SegmentLocalId;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::fastfield::BytesFastFieldReader;
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::schema::Field;
|
||||
use crate::DocAddress;
|
||||
use crate::DocId;
|
||||
use crate::Score;
|
||||
use crate::SegmentLocalId;
|
||||
|
||||
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
||||
compute_score: true,
|
||||
};
|
||||
|
||||
pub const TEST_COLLECTOR_WITHOUT_SCORE: TestCollector = TestCollector {
|
||||
compute_score: true,
|
||||
};
|
||||
|
||||
/// Stores all of the doc ids.
|
||||
/// This collector is only used for tests.
|
||||
@@ -14,7 +22,9 @@ use SegmentLocalId;
|
||||
///
|
||||
/// actise, as it does not store
|
||||
/// the segment ordinals
|
||||
pub struct TestCollector;
|
||||
pub struct TestCollector {
|
||||
pub compute_score: bool,
|
||||
}
|
||||
|
||||
pub struct TestSegmentCollector {
|
||||
segment_id: SegmentLocalId,
|
||||
@@ -32,7 +42,6 @@ impl TestFruit {
|
||||
pub fn docs(&self) -> &[DocAddress] {
|
||||
&self.docs[..]
|
||||
}
|
||||
|
||||
pub fn scores(&self) -> &[Score] {
|
||||
&self.scores[..]
|
||||
}
|
||||
@@ -46,7 +55,7 @@ impl Collector for TestCollector {
|
||||
&self,
|
||||
segment_id: SegmentLocalId,
|
||||
_reader: &SegmentReader,
|
||||
) -> Result<TestSegmentCollector> {
|
||||
) -> crate::Result<TestSegmentCollector> {
|
||||
Ok(TestSegmentCollector {
|
||||
segment_id,
|
||||
fruit: TestFruit::default(),
|
||||
@@ -54,10 +63,10 @@ impl Collector for TestCollector {
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
true
|
||||
self.compute_score
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, mut children: Vec<TestFruit>) -> Result<TestFruit> {
|
||||
fn merge_fruits(&self, mut children: Vec<TestFruit>) -> crate::Result<TestFruit> {
|
||||
children.sort_by_key(|fruit| {
|
||||
if fruit.docs().is_empty() {
|
||||
0
|
||||
@@ -114,11 +123,15 @@ impl Collector for FastFieldTestCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
_: SegmentLocalId,
|
||||
reader: &SegmentReader,
|
||||
) -> Result<FastFieldSegmentCollector> {
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<FastFieldSegmentCollector> {
|
||||
let reader = segment_reader
|
||||
.fast_fields()
|
||||
.u64(self.field)
|
||||
.expect("Requested field is not a fast field.");
|
||||
Ok(FastFieldSegmentCollector {
|
||||
vals: Vec::new(),
|
||||
reader: reader.fast_field_reader(self.field)?,
|
||||
reader,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -126,7 +139,7 @@ impl Collector for FastFieldTestCollector {
|
||||
false
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, children: Vec<Vec<u64>>) -> Result<Vec<u64>> {
|
||||
fn merge_fruits(&self, children: Vec<Vec<u64>>) -> crate::Result<Vec<u64>> {
|
||||
Ok(children.into_iter().flat_map(|v| v.into_iter()).collect())
|
||||
}
|
||||
}
|
||||
@@ -170,11 +183,14 @@ impl Collector for BytesFastFieldTestCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: u32,
|
||||
segment: &SegmentReader,
|
||||
) -> Result<BytesFastFieldSegmentCollector> {
|
||||
segment_reader: &SegmentReader,
|
||||
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||
Ok(BytesFastFieldSegmentCollector {
|
||||
vals: Vec::new(),
|
||||
reader: segment.bytes_fast_field_reader(self.field)?,
|
||||
reader: segment_reader
|
||||
.fast_fields()
|
||||
.bytes(self.field)
|
||||
.expect("Field is not a bytes fast field."),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -182,7 +198,7 @@ impl Collector for BytesFastFieldTestCollector {
|
||||
false
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, children: Vec<Vec<u8>>) -> Result<Vec<u8>> {
|
||||
fn merge_fruits(&self, children: Vec<Vec<u8>>) -> crate::Result<Vec<u8>> {
|
||||
Ok(children.into_iter().flat_map(|c| c.into_iter()).collect())
|
||||
}
|
||||
}
|
||||
@@ -190,8 +206,8 @@ impl Collector for BytesFastFieldTestCollector {
|
||||
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||
type Fruit = Vec<u8>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: f32) {
|
||||
let data = self.reader.get_val(doc);
|
||||
fn collect(&mut self, doc: u32, _score: Score) {
|
||||
let data = self.reader.get_bytes(doc);
|
||||
self.vals.extend(data);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,52 +1,63 @@
|
||||
use crate::DocAddress;
|
||||
use crate::DocId;
|
||||
use crate::SegmentLocalId;
|
||||
use crate::SegmentReader;
|
||||
use serde::export::PhantomData;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use DocAddress;
|
||||
use DocId;
|
||||
use Result;
|
||||
use SegmentLocalId;
|
||||
use SegmentReader;
|
||||
|
||||
/// Contains a feature (field, score, etc.) of a document along with the document address.
|
||||
///
|
||||
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
|
||||
/// default Rust heap is a max heap, whereas a min heap is needed.
|
||||
///
|
||||
/// Additionally, it guarantees stable sorting: in case of a tie on the feature, the document
|
||||
/// address is used.
|
||||
///
|
||||
/// WARNING: equality is not what you would expect here.
|
||||
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
||||
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
||||
/// struct is never public.
|
||||
struct ComparableDoc<T, D> {
|
||||
feature: T,
|
||||
doc: D,
|
||||
pub(crate) struct ComparableDoc<T, D> {
|
||||
pub feature: T,
|
||||
pub doc: D,
|
||||
}
|
||||
|
||||
impl<T: PartialOrd, D> PartialOrd for ComparableDoc<T, D> {
|
||||
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd, D> Ord for ComparableDoc<T, D> {
|
||||
impl<T: PartialOrd, D: PartialOrd> Ord for ComparableDoc<T, D> {
|
||||
#[inline]
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
other
|
||||
// Reversed to make BinaryHeap work as a min-heap
|
||||
let by_feature = other
|
||||
.feature
|
||||
.partial_cmp(&self.feature)
|
||||
.unwrap_or_else(|| Ordering::Equal)
|
||||
.unwrap_or(Ordering::Equal);
|
||||
|
||||
let lazy_by_doc_address = || self.doc.partial_cmp(&other.doc).unwrap_or(Ordering::Equal);
|
||||
|
||||
// In case of a tie on the feature, we sort by ascending
|
||||
// `DocAddress` in order to ensure a stable sorting of the
|
||||
// documents.
|
||||
by_feature.then_with(lazy_by_doc_address)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd, D> PartialEq for ComparableDoc<T, D> {
|
||||
impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.cmp(other) == Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialOrd, D> Eq for ComparableDoc<T, D> {}
|
||||
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
|
||||
|
||||
pub(crate) struct TopCollector<T> {
|
||||
limit: usize,
|
||||
pub limit: usize,
|
||||
pub offset: usize,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
@@ -62,27 +73,33 @@ where
|
||||
if limit < 1 {
|
||||
panic!("Limit must be strictly greater than 0.");
|
||||
}
|
||||
TopCollector {
|
||||
Self {
|
||||
limit,
|
||||
offset: 0,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn limit(&self) -> usize {
|
||||
self.limit
|
||||
/// Skip the first "offset" documents when collecting.
|
||||
///
|
||||
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
||||
/// Lucene's TopDocsCollector.
|
||||
pub fn and_offset(mut self, offset: usize) -> TopCollector<T> {
|
||||
self.offset = offset;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn merge_fruits(
|
||||
&self,
|
||||
children: Vec<Vec<(T, DocAddress)>>,
|
||||
) -> Result<Vec<(T, DocAddress)>> {
|
||||
) -> crate::Result<Vec<(T, DocAddress)>> {
|
||||
if self.limit == 0 {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let mut top_collector = BinaryHeap::new();
|
||||
for child_fruit in children {
|
||||
for (feature, doc) in child_fruit {
|
||||
if top_collector.len() < self.limit {
|
||||
if top_collector.len() < (self.limit + self.offset) {
|
||||
top_collector.push(ComparableDoc { feature, doc });
|
||||
} else if let Some(mut head) = top_collector.peek_mut() {
|
||||
if head.feature < feature {
|
||||
@@ -94,16 +111,33 @@ where
|
||||
Ok(top_collector
|
||||
.into_sorted_vec()
|
||||
.into_iter()
|
||||
.skip(self.offset)
|
||||
.map(|cdoc| (cdoc.feature, cdoc.doc))
|
||||
.collect())
|
||||
}
|
||||
|
||||
pub(crate) fn for_segment(
|
||||
pub(crate) fn for_segment<F: PartialOrd>(
|
||||
&self,
|
||||
segment_id: SegmentLocalId,
|
||||
_: &SegmentReader,
|
||||
) -> Result<TopSegmentCollector<T>> {
|
||||
Ok(TopSegmentCollector::new(segment_id, self.limit))
|
||||
) -> crate::Result<TopSegmentCollector<F>> {
|
||||
Ok(TopSegmentCollector::new(
|
||||
segment_id,
|
||||
self.limit + self.offset,
|
||||
))
|
||||
}
|
||||
|
||||
/// Create a new TopCollector with the same limit and offset.
|
||||
///
|
||||
/// Ideally we would use Into but the blanket implementation seems to cause the Scorer traits
|
||||
/// to fail.
|
||||
#[doc(hidden)]
|
||||
pub(crate) fn into_tscore<TScore: PartialOrd + Clone>(self) -> TopCollector<TScore> {
|
||||
TopCollector {
|
||||
limit: self.limit,
|
||||
offset: self.offset,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,8 +212,7 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{TopCollector, TopSegmentCollector};
|
||||
use DocAddress;
|
||||
use Score;
|
||||
use crate::DocAddress;
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity() {
|
||||
@@ -217,8 +250,134 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_top_0() {
|
||||
let _collector: TopCollector<Score> = TopCollector::with_limit(0);
|
||||
fn test_top_segment_collector_stable_ordering_for_equal_feature() {
|
||||
// given that the documents are collected in ascending doc id order,
|
||||
// when harvesting we have to guarantee stable sorting in case of a tie
|
||||
// on the score
|
||||
let doc_ids_collection = [4, 5, 6];
|
||||
let score = 3.14;
|
||||
|
||||
let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2);
|
||||
for id in &doc_ids_collection {
|
||||
top_collector_limit_2.collect(*id, score);
|
||||
}
|
||||
|
||||
let mut top_collector_limit_3 = TopSegmentCollector::new(0, 3);
|
||||
for id in &doc_ids_collection {
|
||||
top_collector_limit_3.collect(*id, score);
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
top_collector_limit_2.harvest(),
|
||||
top_collector_limit_3.harvest()[..2].to_vec(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_with_limit_and_offset() {
|
||||
let collector = TopCollector::with_limit(2).and_offset(1);
|
||||
|
||||
let results = collector
|
||||
.merge_fruits(vec![vec![
|
||||
(0.9, DocAddress(0, 1)),
|
||||
(0.8, DocAddress(0, 2)),
|
||||
(0.7, DocAddress(0, 3)),
|
||||
(0.6, DocAddress(0, 4)),
|
||||
(0.5, DocAddress(0, 5)),
|
||||
]])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
results,
|
||||
vec![(0.8, DocAddress(0, 2)), (0.7, DocAddress(0, 3)),]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_with_limit_larger_than_set_and_offset() {
|
||||
let collector = TopCollector::with_limit(2).and_offset(1);
|
||||
|
||||
let results = collector
|
||||
.merge_fruits(vec![vec![(0.9, DocAddress(0, 1)), (0.8, DocAddress(0, 2))]])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(results, vec![(0.8, DocAddress(0, 2)),]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_with_limit_and_offset_larger_than_set() {
|
||||
let collector = TopCollector::with_limit(2).and_offset(20);
|
||||
|
||||
let results = collector
|
||||
.merge_fruits(vec![vec![(0.9, DocAddress(0, 1)), (0.8, DocAddress(0, 2))]])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(results, vec![]);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
use super::TopSegmentCollector;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_top_segment_collector_collect_not_at_capacity(b: &mut Bencher) {
|
||||
let mut top_collector = TopSegmentCollector::new(0, 400);
|
||||
|
||||
b.iter(|| {
|
||||
for i in 0..100 {
|
||||
top_collector.collect(i, 0.8);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_top_segment_collector_collect_at_capacity(b: &mut Bencher) {
|
||||
let mut top_collector = TopSegmentCollector::new(0, 100);
|
||||
|
||||
for i in 0..100 {
|
||||
top_collector.collect(i, 0.8);
|
||||
}
|
||||
|
||||
b.iter(|| {
|
||||
for i in 0..100 {
|
||||
top_collector.collect(i, 0.8);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_top_segment_collector_collect_and_harvest_many_ties(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let mut top_collector = TopSegmentCollector::new(0, 100);
|
||||
|
||||
for i in 0..100 {
|
||||
top_collector.collect(i, 0.8);
|
||||
}
|
||||
|
||||
// it would be nice to be able to do the setup N times but still
|
||||
// measure only harvest(). We can't since harvest() consumes
|
||||
// the top_collector.
|
||||
top_collector.harvest()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_top_segment_collector_collect_and_harvest_no_tie(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let mut top_collector = TopSegmentCollector::new(0, 100);
|
||||
let mut score = 1.0;
|
||||
|
||||
for i in 0..100 {
|
||||
score += 1.0;
|
||||
top_collector.collect(i, score);
|
||||
}
|
||||
|
||||
// it would be nice to be able to do the setup N times but still
|
||||
// measure only harvest(). We can't since harvest() consumes
|
||||
// the top_collector.
|
||||
top_collector.harvest()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,250 +0,0 @@
|
||||
use super::Collector;
|
||||
use collector::top_collector::TopCollector;
|
||||
use collector::top_collector::TopSegmentCollector;
|
||||
use collector::SegmentCollector;
|
||||
use fastfield::FastFieldReader;
|
||||
use fastfield::FastValue;
|
||||
use schema::Field;
|
||||
use DocAddress;
|
||||
use Result;
|
||||
use SegmentLocalId;
|
||||
use SegmentReader;
|
||||
|
||||
/// The Top Field Collector keeps track of the K documents
|
||||
/// sorted by a fast field in the index
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// # use tantivy::schema::{Schema, Field, FAST, TEXT};
|
||||
/// # use tantivy::{Index, Result, DocAddress};
|
||||
/// # use tantivy::query::{Query, QueryParser};
|
||||
/// use tantivy::collector::TopDocs;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
/// # let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// # let rating = schema_builder.add_u64_field("rating", FAST);
|
||||
/// # let schema = schema_builder.build();
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
/// # index_writer.add_document(doc!(
|
||||
/// # title => "The Name of the Wind",
|
||||
/// # rating => 92u64,
|
||||
/// # ));
|
||||
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
||||
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
||||
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
||||
/// # index_writer.commit().unwrap();
|
||||
/// # index.load_searchers().unwrap();
|
||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary").unwrap();
|
||||
/// # let top_docs = docs_sorted_by_rating(&index, &query, rating).unwrap();
|
||||
/// # assert_eq!(top_docs,
|
||||
/// # vec![(97u64, DocAddress(0u32, 1)),
|
||||
/// # (80u64, DocAddress(0u32, 3))]);
|
||||
/// # }
|
||||
/// #
|
||||
/// /// Searches the document matching the given query, and
|
||||
/// /// collects the top 10 documents, order by the `field`
|
||||
/// /// given in argument.
|
||||
/// ///
|
||||
/// /// `field` is required to be a FAST field.
|
||||
/// fn docs_sorted_by_rating(index: &Index, query: &Query, sort_by_field: Field)
|
||||
/// -> Result<Vec<(u64, DocAddress)>> {
|
||||
///
|
||||
/// // This is where we build our collector!
|
||||
/// let top_docs_by_rating = TopDocs::with_limit(2).order_by_field(sort_by_field);
|
||||
///
|
||||
/// // ... and here is our documents. Not this is a simple vec.
|
||||
/// // The `u64` in the pair is the value of our fast field for each documents.
|
||||
/// index.searcher()
|
||||
/// .search(query, &top_docs_by_rating)
|
||||
/// }
|
||||
/// ```
|
||||
pub struct TopDocsByField<T> {
|
||||
collector: TopCollector<T>,
|
||||
field: Field,
|
||||
}
|
||||
|
||||
impl<T: FastValue + PartialOrd + Clone> TopDocsByField<T> {
|
||||
/// Creates a top field collector, with a number of documents equal to "limit".
|
||||
///
|
||||
/// The given field name must be a fast field, otherwise the collector have an error while
|
||||
/// collecting results.
|
||||
///
|
||||
/// # Panics
|
||||
/// The method panics if limit is 0
|
||||
pub(crate) fn new(field: Field, limit: usize) -> TopDocsByField<T> {
|
||||
TopDocsByField {
|
||||
collector: TopCollector::with_limit(limit),
|
||||
field,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: FastValue + PartialOrd + Send + Sync + 'static> Collector for TopDocsByField<T> {
|
||||
type Fruit = Vec<(T, DocAddress)>;
|
||||
|
||||
type Child = TopFieldSegmentCollector<T>;
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
reader: &SegmentReader,
|
||||
) -> Result<TopFieldSegmentCollector<T>> {
|
||||
let collector = self.collector.for_segment(segment_local_id, reader)?;
|
||||
let reader = reader.fast_field_reader(self.field)?;
|
||||
Ok(TopFieldSegmentCollector { collector, reader })
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
segment_fruits: Vec<Vec<(T, DocAddress)>>,
|
||||
) -> Result<Vec<(T, DocAddress)>> {
|
||||
self.collector.merge_fruits(segment_fruits)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TopFieldSegmentCollector<T: FastValue + PartialOrd> {
|
||||
collector: TopSegmentCollector<T>,
|
||||
reader: FastFieldReader<T>,
|
||||
}
|
||||
|
||||
impl<T: FastValue + PartialOrd + Send + Sync + 'static> SegmentCollector
|
||||
for TopFieldSegmentCollector<T>
|
||||
{
|
||||
type Fruit = Vec<(T, DocAddress)>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: f32) {
|
||||
let field_value = self.reader.get(doc);
|
||||
self.collector.collect(doc, field_value);
|
||||
}
|
||||
|
||||
fn harvest(self) -> Vec<(T, DocAddress)> {
|
||||
self.collector.harvest()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::TopDocsByField;
|
||||
use collector::Collector;
|
||||
use collector::TopDocs;
|
||||
use query::Query;
|
||||
use query::QueryParser;
|
||||
use schema::Field;
|
||||
use schema::IntOptions;
|
||||
use schema::{Schema, FAST, TEXT};
|
||||
use DocAddress;
|
||||
use Index;
|
||||
use IndexWriter;
|
||||
use TantivyError;
|
||||
|
||||
const TITLE: &str = "title";
|
||||
const SIZE: &str = "size";
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, FAST);
|
||||
let schema = schema_builder.build();
|
||||
let (index, query) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "growler of beer",
|
||||
size => 64u64,
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "pint of beer",
|
||||
size => 16u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.searcher();
|
||||
|
||||
let top_collector = TopDocs::with_limit(4).order_by_field(size);
|
||||
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
||||
assert_eq!(
|
||||
top_docs,
|
||||
vec![
|
||||
(64, DocAddress(0, 1)),
|
||||
(16, DocAddress(0, 2)),
|
||||
(12, DocAddress(0, 0))
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_field_does_not_exist() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, FAST);
|
||||
let schema = schema_builder.build();
|
||||
let (index, _) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.searcher();
|
||||
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(Field(2));
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
top_collector
|
||||
.for_segment(0, segment_reader)
|
||||
.expect("should panic");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_field_not_fast_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, IntOptions::default());
|
||||
let schema = schema_builder.build();
|
||||
let (index, _) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(size);
|
||||
assert_matches!(
|
||||
top_collector
|
||||
.for_segment(0, segment)
|
||||
.map(|_| ())
|
||||
.unwrap_err(),
|
||||
TantivyError::FastFieldError(_)
|
||||
);
|
||||
}
|
||||
|
||||
fn index(
|
||||
query: &str,
|
||||
query_field: Field,
|
||||
schema: Schema,
|
||||
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
||||
) -> (Index, Box<Query>) {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
doc_adder(&mut index_writer);
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
||||
let query = query_parser.parse_query(query).unwrap();
|
||||
(index, query)
|
||||
}
|
||||
}
|
||||
@@ -1,71 +1,103 @@
|
||||
use super::Collector;
|
||||
use collector::top_collector::TopCollector;
|
||||
use collector::top_collector::TopSegmentCollector;
|
||||
use collector::SegmentCollector;
|
||||
use collector::TopDocsByField;
|
||||
use fastfield::FastValue;
|
||||
use schema::Field;
|
||||
use DocAddress;
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentLocalId;
|
||||
use SegmentReader;
|
||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||
use crate::collector::top_collector::TopSegmentCollector;
|
||||
use crate::collector::top_collector::{ComparableDoc, TopCollector};
|
||||
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||
use crate::collector::{
|
||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||
};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::query::Weight;
|
||||
use crate::schema::Field;
|
||||
use crate::DocAddress;
|
||||
use crate::DocId;
|
||||
use crate::Score;
|
||||
use crate::SegmentLocalId;
|
||||
use crate::SegmentReader;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::fmt;
|
||||
|
||||
/// The Top Score Collector keeps track of the K documents
|
||||
/// The `TopDocs` collector keeps track of the top `K` documents
|
||||
/// sorted by their score.
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
///
|
||||
/// This collector guarantees a stable sorting in case of a tie on the
|
||||
/// document score. As such, it is suitable to implement pagination.
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::DocAddress;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{Index, Result};
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, DocAddress, Index};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
/// index.load_searchers()?;
|
||||
/// let searcher = index.searcher();
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||
/// assert!(index_writer.commit().is_ok());
|
||||
///
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2))?;
|
||||
/// let reader = index.reader().unwrap();
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
|
||||
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary").unwrap();
|
||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
||||
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
||||
/// ```
|
||||
pub struct TopDocs(TopCollector<Score>);
|
||||
|
||||
impl fmt::Debug for TopDocs {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"TopDocs(limit={}, offset={})",
|
||||
self.0.limit, self.0.offset
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
struct ScorerByFastFieldReader {
|
||||
ff_reader: FastFieldReader<u64>,
|
||||
}
|
||||
|
||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||
fn score(&mut self, doc: DocId) -> u64 {
|
||||
self.ff_reader.get_u64(u64::from(doc))
|
||||
}
|
||||
}
|
||||
|
||||
struct ScorerByField {
|
||||
field: Field,
|
||||
}
|
||||
|
||||
impl CustomScorer<u64> for ScorerByField {
|
||||
type Child = ScorerByFastFieldReader;
|
||||
|
||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
||||
let ff_reader = segment_reader
|
||||
.fast_fields()
|
||||
.u64(self.field)
|
||||
.ok_or_else(|| {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
"Field requested ({:?}) is not a i64/u64 fast field.",
|
||||
self.field
|
||||
))
|
||||
})?;
|
||||
Ok(ScorerByFastFieldReader { ff_reader })
|
||||
}
|
||||
}
|
||||
|
||||
impl TopDocs {
|
||||
/// Creates a top score collector, with a number of documents equal to "limit".
|
||||
///
|
||||
@@ -75,15 +107,340 @@ impl TopDocs {
|
||||
TopDocs(TopCollector::with_limit(limit))
|
||||
}
|
||||
|
||||
/// Skip the first "offset" documents when collecting.
|
||||
///
|
||||
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
||||
/// Lucene's TopDocsCollector.
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, DocAddress, Index};
|
||||
///
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Lena Mukhina"));
|
||||
/// assert!(index_writer.commit().is_ok());
|
||||
///
|
||||
/// let reader = index.reader().unwrap();
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary").unwrap();
|
||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
|
||||
///
|
||||
/// assert_eq!(top_docs.len(), 2);
|
||||
/// assert_eq!(top_docs[0].1, DocAddress(0, 4));
|
||||
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
||||
/// ```
|
||||
pub fn and_offset(self, offset: usize) -> TopDocs {
|
||||
TopDocs(self.0.and_offset(offset))
|
||||
}
|
||||
|
||||
/// Set top-K to rank documents by a given fast field.
|
||||
///
|
||||
/// (By default, `TopDocs` collects the top-K documents sorted by
|
||||
/// the similarity score.)
|
||||
pub fn order_by_field<T: PartialOrd + FastValue + Clone>(
|
||||
/// ```rust
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress};
|
||||
/// # use tantivy::query::{Query, QueryParser};
|
||||
/// use tantivy::Searcher;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
/// # let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// # let rating = schema_builder.add_u64_field("rating", FAST);
|
||||
/// # let schema = schema_builder.build();
|
||||
/// #
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
||||
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
||||
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
||||
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
||||
/// # assert!(index_writer.commit().is_ok());
|
||||
/// # let reader = index.reader().unwrap();
|
||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
||||
/// # assert_eq!(top_docs,
|
||||
/// # vec![(97u64, DocAddress(0u32, 1)),
|
||||
/// # (80u64, DocAddress(0u32, 3))]);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
///
|
||||
///
|
||||
/// /// Searches the document matching the given query, and
|
||||
/// /// collects the top 10 documents, order by the u64-`field`
|
||||
/// /// given in argument.
|
||||
/// ///
|
||||
/// /// `field` is required to be a FAST field.
|
||||
/// fn docs_sorted_by_rating(searcher: &Searcher,
|
||||
/// query: &dyn Query,
|
||||
/// sort_by_field: Field)
|
||||
/// -> tantivy::Result<Vec<(u64, DocAddress)>> {
|
||||
///
|
||||
/// // This is where we build our topdocs collector
|
||||
/// //
|
||||
/// // Note the generics parameter that needs to match the
|
||||
/// // type `sort_by_field`.
|
||||
/// let top_docs_by_rating = TopDocs
|
||||
/// ::with_limit(10)
|
||||
/// .order_by_u64_field(sort_by_field);
|
||||
///
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `u64` in the pair is the value of our fast field for
|
||||
/// // each documents.
|
||||
/// //
|
||||
/// // The vec is sorted decreasingly by `sort_by_field`, and has a
|
||||
/// // length of 10, or less if not enough documents matched the
|
||||
/// // query.
|
||||
/// let resulting_docs: Vec<(u64, DocAddress)> =
|
||||
/// searcher.search(query, &top_docs_by_rating)?;
|
||||
///
|
||||
/// Ok(resulting_docs)
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if the field requested is not a fast field.
|
||||
///
|
||||
pub fn order_by_u64_field(
|
||||
self,
|
||||
field: Field,
|
||||
) -> TopDocsByField<T> {
|
||||
TopDocsByField::new(field, self.0.limit())
|
||||
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
||||
self.custom_score(ScorerByField { field })
|
||||
}
|
||||
|
||||
/// Ranks the documents using a custom score.
|
||||
///
|
||||
/// This method offers a convenient way to tweak or replace
|
||||
/// the documents score. As suggested by the prototype you can
|
||||
/// manually define your own [`ScoreTweaker`](./trait.ScoreTweaker.html)
|
||||
/// and pass it as an argument, but there is a much simpler way to
|
||||
/// tweak your score: you can use a closure as in the following
|
||||
/// example.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// Typically, you will want to rely on one or more fast fields,
|
||||
/// to alter the original relevance `Score`.
|
||||
///
|
||||
/// For instance, in the following, we assume that we are implementing
|
||||
/// an e-commerce website that has a fast field called `popularity`
|
||||
/// that rates whether a product is typically often bought by users.
|
||||
///
|
||||
/// In the following example will will tweak our ranking a bit by
|
||||
/// boosting popular products a notch.
|
||||
///
|
||||
/// In more serious application, this tweaking could involved running a
|
||||
/// learning-to-rank model over various features
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress, DocId, Score};
|
||||
/// # use tantivy::query::QueryParser;
|
||||
/// use tantivy::SegmentReader;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// fn create_schema() -> Schema {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// schema_builder.add_text_field("product_name", TEXT);
|
||||
/// schema_builder.add_u64_field("popularity", FAST);
|
||||
/// schema_builder.build()
|
||||
/// }
|
||||
///
|
||||
/// fn create_index() -> tantivy::Result<Index> {
|
||||
/// let schema = create_schema();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// let product_name = index.schema().get_field("product_name").unwrap();
|
||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
||||
/// index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
|
||||
/// index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
|
||||
/// index_writer.commit()?;
|
||||
/// Ok(index)
|
||||
/// }
|
||||
///
|
||||
/// let index = create_index().unwrap();
|
||||
/// let product_name = index.schema().get_field("product_name").unwrap();
|
||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||
///
|
||||
/// let user_query_str = "diary";
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![product_name]);
|
||||
/// let query = query_parser.parse_query(user_query_str).unwrap();
|
||||
///
|
||||
/// // This is where we build our collector with our custom score.
|
||||
/// let top_docs_by_custom_score = TopDocs
|
||||
/// ::with_limit(10)
|
||||
/// .tweak_score(move |segment_reader: &SegmentReader| {
|
||||
/// // The argument is a function that returns our scoring
|
||||
/// // function.
|
||||
/// //
|
||||
/// // The point of this "mother" function is to gather all
|
||||
/// // of the segment level information we need for scoring.
|
||||
/// // Typically, fast_fields.
|
||||
/// //
|
||||
/// // In our case, we will get a reader for the popularity
|
||||
/// // fast field.
|
||||
/// let popularity_reader =
|
||||
/// segment_reader.fast_fields().u64(popularity).unwrap();
|
||||
///
|
||||
/// // We can now define our actual scoring function
|
||||
/// move |doc: DocId, original_score: Score| {
|
||||
/// let popularity: u64 = popularity_reader.get(doc);
|
||||
/// // Well.. For the sake of the example we use a simple logarithm
|
||||
/// // function.
|
||||
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
||||
/// popularity_boost_score * original_score
|
||||
/// }
|
||||
/// });
|
||||
/// let reader = index.reader().unwrap();
|
||||
/// let searcher = reader.searcher();
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `Score` in the pair is our tweaked score.
|
||||
/// let resulting_docs: Vec<(Score, DocAddress)> =
|
||||
/// searcher.search(&query, &top_docs_by_custom_score).unwrap();
|
||||
/// ```
|
||||
///
|
||||
/// # See also
|
||||
/// [custom_score(...)](#method.custom_score).
|
||||
pub fn tweak_score<TScore, TScoreSegmentTweaker, TScoreTweaker>(
|
||||
self,
|
||||
score_tweaker: TScoreTweaker,
|
||||
) -> impl Collector<Fruit = Vec<(TScore, DocAddress)>>
|
||||
where
|
||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
||||
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
|
||||
{
|
||||
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
|
||||
}
|
||||
|
||||
/// Ranks the documents using a custom score.
|
||||
///
|
||||
/// This method offers a convenient way to use a different score.
|
||||
///
|
||||
/// As suggested by the prototype you can manually define your
|
||||
/// own [`CustomScorer`](./trait.CustomScorer.html)
|
||||
/// and pass it as an argument, but there is a much simpler way to
|
||||
/// tweak your score: you can use a closure as in the following
|
||||
/// example.
|
||||
///
|
||||
/// # Limitation
|
||||
///
|
||||
/// This method only makes it possible to compute the score from a given
|
||||
/// `DocId`, fastfield values for the doc and any information you could
|
||||
/// have precomputed beforehands. It does not make it possible for instance
|
||||
/// to compute something like TfIdf as it does not have access to the list of query
|
||||
/// terms present in the document, nor the term frequencies for the different terms.
|
||||
///
|
||||
/// It can be used if your search engine relies on a learning-to-rank model for instance,
|
||||
/// which does not rely on the term frequencies or positions as features.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::schema::{Schema, FAST, TEXT};
|
||||
/// # use tantivy::{doc, Index, DocAddress, DocId};
|
||||
/// # use tantivy::query::QueryParser;
|
||||
/// use tantivy::SegmentReader;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// # fn create_schema() -> Schema {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
/// # schema_builder.add_text_field("product_name", TEXT);
|
||||
/// # schema_builder.add_u64_field("popularity", FAST);
|
||||
/// # schema_builder.add_u64_field("boosted", FAST);
|
||||
/// # schema_builder.build()
|
||||
/// # }
|
||||
/// #
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// # let schema = create_schema();
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
||||
/// #
|
||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||
/// let boosted: Field = index.schema().get_field("boosted").unwrap();
|
||||
/// # index_writer.add_document(doc!(boosted=>1u64, product_name => "The Diary of Muadib", popularity => 1u64));
|
||||
/// # index_writer.add_document(doc!(boosted=>0u64, product_name => "A Dairy Cow", popularity => 10u64));
|
||||
/// # index_writer.add_document(doc!(boosted=>0u64, product_name => "The Diary of a Young Girl", popularity => 15u64));
|
||||
/// # index_writer.commit()?;
|
||||
/// // ...
|
||||
/// # let user_query = "diary";
|
||||
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
|
||||
///
|
||||
/// // This is where we build our collector with our custom score.
|
||||
/// let top_docs_by_custom_score = TopDocs
|
||||
/// ::with_limit(10)
|
||||
/// .custom_score(move |segment_reader: &SegmentReader| {
|
||||
/// // The argument is a function that returns our scoring
|
||||
/// // function.
|
||||
/// //
|
||||
/// // The point of this "mother" function is to gather all
|
||||
/// // of the segment level information we need for scoring.
|
||||
/// // Typically, fast_fields.
|
||||
/// //
|
||||
/// // In our case, we will get a reader for the popularity
|
||||
/// // fast field and a boosted field.
|
||||
/// //
|
||||
/// // We want to get boosted items score, and when we get
|
||||
/// // a tie, return the item with the highest popularity.
|
||||
/// //
|
||||
/// // Note that this is implemented by using a `(u64, u64)`
|
||||
/// // as a score.
|
||||
/// let popularity_reader =
|
||||
/// segment_reader.fast_fields().u64(popularity).unwrap();
|
||||
/// let boosted_reader =
|
||||
/// segment_reader.fast_fields().u64(boosted).unwrap();
|
||||
///
|
||||
/// // We can now define our actual scoring function
|
||||
/// move |doc: DocId| {
|
||||
/// let popularity: u64 = popularity_reader.get(doc);
|
||||
/// let boosted: u64 = boosted_reader.get(doc);
|
||||
/// // Score do not have to be `f64` in tantivy.
|
||||
/// // Here we return a couple to get lexicographical order
|
||||
/// // for free.
|
||||
/// (boosted, popularity)
|
||||
/// }
|
||||
/// });
|
||||
/// # let reader = index.reader()?;
|
||||
/// # let searcher = reader.searcher();
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `Score` in the pair is our tweaked score.
|
||||
/// let resulting_docs: Vec<((u64, u64), DocAddress)> =
|
||||
/// searcher.search(&*query, &top_docs_by_custom_score)?;
|
||||
///
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
/// # See also
|
||||
/// [tweak_score(...)](#method.tweak_score).
|
||||
pub fn custom_score<TScore, TCustomSegmentScorer, TCustomScorer>(
|
||||
self,
|
||||
custom_score: TCustomScorer,
|
||||
) -> impl Collector<Fruit = Vec<(TScore, DocAddress)>>
|
||||
where
|
||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
||||
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
|
||||
{
|
||||
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,7 +453,7 @@ impl Collector for TopDocs {
|
||||
&self,
|
||||
segment_local_id: SegmentLocalId,
|
||||
reader: &SegmentReader,
|
||||
) -> Result<Self::Child> {
|
||||
) -> crate::Result<Self::Child> {
|
||||
let collector = self.0.for_segment(segment_local_id, reader)?;
|
||||
Ok(TopScoreSegmentCollector(collector))
|
||||
}
|
||||
@@ -105,9 +462,70 @@ impl Collector for TopDocs {
|
||||
true
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, child_fruits: Vec<Vec<(Score, DocAddress)>>) -> Result<Self::Fruit> {
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
child_fruits: Vec<Vec<(Score, DocAddress)>>,
|
||||
) -> crate::Result<Self::Fruit> {
|
||||
self.0.merge_fruits(child_fruits)
|
||||
}
|
||||
|
||||
fn collect_segment(
|
||||
&self,
|
||||
weight: &dyn Weight,
|
||||
segment_ord: u32,
|
||||
reader: &SegmentReader,
|
||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||
let heap_len = self.0.limit + self.0.offset;
|
||||
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
|
||||
|
||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||
let mut threshold = Score::MIN;
|
||||
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
|
||||
if delete_bitset.is_deleted(doc) {
|
||||
return threshold;
|
||||
}
|
||||
let heap_item = ComparableDoc {
|
||||
feature: score,
|
||||
doc,
|
||||
};
|
||||
if heap.len() < heap_len {
|
||||
heap.push(heap_item);
|
||||
if heap.len() == heap_len {
|
||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||
}
|
||||
return threshold;
|
||||
}
|
||||
*heap.peek_mut().unwrap() = heap_item;
|
||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||
threshold
|
||||
})?;
|
||||
} else {
|
||||
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| {
|
||||
let heap_item = ComparableDoc {
|
||||
feature: score,
|
||||
doc,
|
||||
};
|
||||
if heap.len() < heap_len {
|
||||
heap.push(heap_item);
|
||||
// TODO the threshold is suboptimal for heap.len == heap_len
|
||||
if heap.len() == heap_len {
|
||||
return heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||
} else {
|
||||
return Score::MIN;
|
||||
}
|
||||
}
|
||||
*heap.peek_mut().unwrap() = heap_item;
|
||||
heap.peek().map(|el| el.feature).unwrap_or(Score::MIN)
|
||||
})?;
|
||||
}
|
||||
|
||||
let fruit = heap
|
||||
.into_sorted_vec()
|
||||
.into_iter()
|
||||
.map(|cid| (cid.feature, DocAddress(segment_ord, cid.doc)))
|
||||
.collect();
|
||||
Ok(fruit)
|
||||
}
|
||||
}
|
||||
|
||||
/// Segment Collector associated to `TopDocs`.
|
||||
@@ -117,7 +535,7 @@ impl SegmentCollector for TopScoreSegmentCollector {
|
||||
type Fruit = Vec<(Score, DocAddress)>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, score: Score) {
|
||||
self.0.collect(doc, score)
|
||||
self.0.collect(doc, score);
|
||||
}
|
||||
|
||||
fn harvest(self) -> Vec<(Score, DocAddress)> {
|
||||
@@ -128,12 +546,13 @@ impl SegmentCollector for TopScoreSegmentCollector {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::TopDocs;
|
||||
use query::QueryParser;
|
||||
use schema::Schema;
|
||||
use schema::TEXT;
|
||||
use DocAddress;
|
||||
use Index;
|
||||
use Score;
|
||||
use crate::collector::Collector;
|
||||
use crate::query::{AllQuery, Query, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||
use crate::Index;
|
||||
use crate::IndexWriter;
|
||||
use crate::Score;
|
||||
use crate::{DocAddress, DocId, SegmentReader};
|
||||
|
||||
fn make_index() -> Index {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -148,10 +567,16 @@ mod tests {
|
||||
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
index.load_searchers().unwrap();
|
||||
index
|
||||
}
|
||||
|
||||
fn assert_results_equals(results: &[(Score, DocAddress)], expected: &[(Score, DocAddress)]) {
|
||||
for (result, expected) in results.iter().zip(expected.iter()) {
|
||||
assert_eq!(result.1, expected.1);
|
||||
crate::assert_nearly_equals!(result.0, expected.0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity() {
|
||||
let index = make_index();
|
||||
@@ -159,19 +584,36 @@ mod tests {
|
||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let score_docs: Vec<(Score, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.search(&text_query, &TopDocs::with_limit(4))
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
score_docs,
|
||||
vec![
|
||||
assert_results_equals(
|
||||
&score_docs,
|
||||
&[
|
||||
(0.81221175, DocAddress(0u32, 1)),
|
||||
(0.5376842, DocAddress(0u32, 2)),
|
||||
(0.48527452, DocAddress(0, 0))
|
||||
]
|
||||
(0.48527452, DocAddress(0, 0)),
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity_with_offset() {
|
||||
let index = make_index();
|
||||
let field = index.schema().get_field("text").unwrap();
|
||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let score_docs: Vec<(Score, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
|
||||
.unwrap();
|
||||
assert_results_equals(&score_docs[..], &[(0.48527452, DocAddress(0, 0))]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_at_capacity() {
|
||||
let index = make_index();
|
||||
@@ -179,22 +621,211 @@ mod tests {
|
||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let score_docs: Vec<(Score, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.search(&text_query, &TopDocs::with_limit(2))
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
score_docs,
|
||||
vec![
|
||||
assert_results_equals(
|
||||
&score_docs,
|
||||
&[
|
||||
(0.81221175, DocAddress(0u32, 1)),
|
||||
(0.5376842, DocAddress(0u32, 2)),
|
||||
]
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_at_capacity_with_offset() {
|
||||
let index = make_index();
|
||||
let field = index.schema().get_field("text").unwrap();
|
||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let score_docs: Vec<(Score, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
|
||||
.unwrap();
|
||||
assert_results_equals(
|
||||
&score_docs[..],
|
||||
&[
|
||||
(0.5376842, DocAddress(0u32, 2)),
|
||||
(0.48527452, DocAddress(0, 0)),
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_stable_sorting() {
|
||||
let index = make_index();
|
||||
|
||||
// using AllQuery to get a constant score
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
|
||||
let page_1 = searcher.search(&AllQuery, &TopDocs::with_limit(2)).unwrap();
|
||||
|
||||
let page_2 = searcher.search(&AllQuery, &TopDocs::with_limit(3)).unwrap();
|
||||
|
||||
// precondition for the test to be meaningful: we did get documents
|
||||
// with the same score
|
||||
assert!(page_1.iter().all(|result| result.0 == page_1[0].0));
|
||||
assert!(page_2.iter().all(|result| result.0 == page_2[0].0));
|
||||
|
||||
// sanity check since we're relying on make_index()
|
||||
assert_eq!(page_1.len(), 2);
|
||||
assert_eq!(page_2.len(), 3);
|
||||
|
||||
assert_eq!(page_1, &page_2[..page_1.len()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_top_0() {
|
||||
TopDocs::with_limit(0);
|
||||
}
|
||||
|
||||
const TITLE: &str = "title";
|
||||
const SIZE: &str = "size";
|
||||
|
||||
#[test]
|
||||
fn test_top_field_collector_not_at_capacity() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, FAST);
|
||||
let schema = schema_builder.build();
|
||||
let (index, query) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "growler of beer",
|
||||
size => 64u64,
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "pint of beer",
|
||||
size => 16u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
||||
assert_eq!(
|
||||
&top_docs[..],
|
||||
&[
|
||||
(64, DocAddress(0, 1)),
|
||||
(16, DocAddress(0, 2)),
|
||||
(12, DocAddress(0, 0))
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_field_does_not_exist() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, FAST);
|
||||
let schema = schema_builder.build();
|
||||
let (index, _) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field::from_field_id(2));
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
top_collector
|
||||
.for_segment(0, segment_reader)
|
||||
.expect("should panic");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_field_not_fast_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, STORED);
|
||||
let schema = schema_builder.build();
|
||||
let (index, _) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||
let err = top_collector.for_segment(0, segment);
|
||||
if let Err(crate::TantivyError::SchemaError(msg)) = err {
|
||||
assert_eq!(
|
||||
msg,
|
||||
"Field requested (Field(1)) is not a i64/u64 fast field."
|
||||
);
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tweak_score_top_collector_with_offset() {
|
||||
let index = make_index();
|
||||
let field = index.schema().get_field("text").unwrap();
|
||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let collector = TopDocs::with_limit(2).and_offset(1).tweak_score(
|
||||
move |_segment_reader: &SegmentReader| move |doc: DocId, _original_score: Score| doc,
|
||||
);
|
||||
let score_docs: Vec<(u32, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.search(&text_query, &collector)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
score_docs,
|
||||
vec![(1, DocAddress(0, 1)), (0, DocAddress(0, 0)),]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_score_top_collector_with_offset() {
|
||||
let index = make_index();
|
||||
let field = index.schema().get_field("text").unwrap();
|
||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let collector = TopDocs::with_limit(2)
|
||||
.and_offset(1)
|
||||
.custom_score(move |_segment_reader: &SegmentReader| move |doc: DocId| doc);
|
||||
let score_docs: Vec<(u32, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
.searcher()
|
||||
.search(&text_query, &collector)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
score_docs,
|
||||
vec![(1, DocAddress(0, 1)), (0, DocAddress(0, 0)),]
|
||||
);
|
||||
}
|
||||
|
||||
fn index(
|
||||
query: &str,
|
||||
query_field: Field,
|
||||
schema: Schema,
|
||||
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
||||
) -> (Index, Box<dyn Query>) {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
doc_adder(&mut index_writer);
|
||||
index_writer.commit().unwrap();
|
||||
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
||||
let query = query_parser.parse_query(query).unwrap();
|
||||
(index, query)
|
||||
}
|
||||
}
|
||||
|
||||
129
src/collector/tweak_score_top_collector.rs
Normal file
129
src/collector/tweak_score_top_collector.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::DocAddress;
|
||||
use crate::{DocId, Result, Score, SegmentReader};
|
||||
|
||||
pub(crate) struct TweakedScoreTopCollector<TScoreTweaker, TScore = Score> {
|
||||
score_tweaker: TScoreTweaker,
|
||||
collector: TopCollector<TScore>,
|
||||
}
|
||||
|
||||
impl<TScoreTweaker, TScore> TweakedScoreTopCollector<TScoreTweaker, TScore>
|
||||
where
|
||||
TScore: Clone + PartialOrd,
|
||||
{
|
||||
pub fn new(
|
||||
score_tweaker: TScoreTweaker,
|
||||
collector: TopCollector<TScore>,
|
||||
) -> TweakedScoreTopCollector<TScoreTweaker, TScore> {
|
||||
TweakedScoreTopCollector {
|
||||
score_tweaker,
|
||||
collector,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A `ScoreSegmentTweaker` makes it possible to modify the default score
|
||||
/// for a given document belonging to a specific segment.
|
||||
///
|
||||
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
|
||||
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
||||
/// Tweak the given `score` for the document `doc`.
|
||||
fn score(&mut self, doc: DocId, score: Score) -> TScore;
|
||||
}
|
||||
|
||||
/// `ScoreTweaker` makes it possible to tweak the score
|
||||
/// emitted by the scorer into another one.
|
||||
///
|
||||
/// The `ScoreTweaker` itself does not make much of the computation itself.
|
||||
/// Instead, it helps constructing `Self::Child` instances that will compute
|
||||
/// the score at a segment scale.
|
||||
pub trait ScoreTweaker<TScore>: Sync {
|
||||
/// Type of the associated [`ScoreSegmentTweaker`](./trait.ScoreSegmentTweaker.html).
|
||||
type Child: ScoreSegmentTweaker<TScore>;
|
||||
|
||||
/// Builds a child tweaker for a specific segment. The child scorer is associated to
|
||||
/// a specific segment.
|
||||
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
|
||||
}
|
||||
|
||||
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
|
||||
where
|
||||
TScoreTweaker: ScoreTweaker<TScore>,
|
||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||
{
|
||||
type Fruit = Vec<(TScore, DocAddress)>;
|
||||
|
||||
type Child = TopTweakedScoreSegmentCollector<TScoreTweaker::Child, TScore>;
|
||||
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> Result<Self::Child> {
|
||||
let segment_scorer = self.score_tweaker.segment_tweaker(segment_reader)?;
|
||||
let segment_collector = self
|
||||
.collector
|
||||
.for_segment(segment_local_id, segment_reader)?;
|
||||
Ok(TopTweakedScoreSegmentCollector {
|
||||
segment_collector,
|
||||
segment_scorer,
|
||||
})
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> Result<Self::Fruit> {
|
||||
self.collector.merge_fruits(segment_fruits)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TopTweakedScoreSegmentCollector<TSegmentScoreTweaker, TScore>
|
||||
where
|
||||
TScore: 'static + PartialOrd + Clone + Send + Sync + Sized,
|
||||
TSegmentScoreTweaker: ScoreSegmentTweaker<TScore>,
|
||||
{
|
||||
segment_collector: TopSegmentCollector<TScore>,
|
||||
segment_scorer: TSegmentScoreTweaker,
|
||||
}
|
||||
|
||||
impl<TSegmentScoreTweaker, TScore> SegmentCollector
|
||||
for TopTweakedScoreSegmentCollector<TSegmentScoreTweaker, TScore>
|
||||
where
|
||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||
TSegmentScoreTweaker: 'static + ScoreSegmentTweaker<TScore>,
|
||||
{
|
||||
type Fruit = Vec<(TScore, DocAddress)>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, score: Score) {
|
||||
let score = self.segment_scorer.score(doc, score);
|
||||
self.segment_collector.collect(doc, score);
|
||||
}
|
||||
|
||||
fn harvest(self) -> Vec<(TScore, DocAddress)> {
|
||||
self.segment_collector.harvest()
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, TScore, TSegmentScoreTweaker> ScoreTweaker<TScore> for F
|
||||
where
|
||||
F: 'static + Send + Sync + Fn(&SegmentReader) -> TSegmentScoreTweaker,
|
||||
TSegmentScoreTweaker: ScoreSegmentTweaker<TScore>,
|
||||
{
|
||||
type Child = TSegmentScoreTweaker;
|
||||
|
||||
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
|
||||
Ok((self)(segment_reader))
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, TScore> ScoreSegmentTweaker<TScore> for F
|
||||
where
|
||||
F: 'static + FnMut(DocId, Score) -> TScore,
|
||||
{
|
||||
fn score(&mut self, doc: DocId, score: Score) -> TScore {
|
||||
(self)(doc, score)
|
||||
}
|
||||
}
|
||||
@@ -64,7 +64,7 @@ pub struct BitUnpacker<Data>
|
||||
where
|
||||
Data: Deref<Target = [u8]>,
|
||||
{
|
||||
num_bits: usize,
|
||||
num_bits: u64,
|
||||
mask: u64,
|
||||
data: Data,
|
||||
}
|
||||
@@ -80,13 +80,13 @@ where
|
||||
(1u64 << num_bits) - 1u64
|
||||
};
|
||||
BitUnpacker {
|
||||
num_bits: num_bits as usize,
|
||||
num_bits: u64::from(num_bits),
|
||||
mask,
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, idx: usize) -> u64 {
|
||||
pub fn get(&self, idx: u64) -> u64 {
|
||||
if self.num_bits == 0 {
|
||||
return 0u64;
|
||||
}
|
||||
@@ -97,10 +97,10 @@ where
|
||||
let addr = addr_in_bits >> 3;
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
debug_assert!(
|
||||
addr + 8 <= data.len(),
|
||||
addr + 8 <= data.len() as u64,
|
||||
"The fast field field should have been padded with 7 bytes."
|
||||
);
|
||||
let val_unshifted_unmasked: u64 = LittleEndian::read_u64(&data[addr..]);
|
||||
let val_unshifted_unmasked: u64 = LittleEndian::read_u64(&data[(addr as usize)..]);
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
val_shifted & mask
|
||||
}
|
||||
@@ -129,7 +129,7 @@ mod test {
|
||||
fn test_bitpacker_util(len: usize, num_bits: u8) {
|
||||
let (bitunpacker, vals) = create_fastfield_bitpacker(len, num_bits);
|
||||
for (i, val) in vals.iter().enumerate() {
|
||||
assert_eq!(bitunpacker.get(i), *val);
|
||||
assert_eq!(bitunpacker.get(i as u64), *val);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ use std::u64;
|
||||
pub(crate) struct TinySet(u64);
|
||||
|
||||
impl fmt::Debug for TinySet {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.into_iter().collect::<Vec<u32>>().fmt(f)
|
||||
}
|
||||
}
|
||||
@@ -33,6 +33,10 @@ impl TinySet {
|
||||
TinySet(0u64)
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.0 = 0u64;
|
||||
}
|
||||
|
||||
/// Returns the complement of the set in `[0, 64[`.
|
||||
fn complement(self) -> TinySet {
|
||||
TinySet(!self.0)
|
||||
@@ -43,6 +47,11 @@ impl TinySet {
|
||||
!self.intersect(TinySet::singleton(el)).is_empty()
|
||||
}
|
||||
|
||||
/// Returns the number of elements in the TinySet.
|
||||
pub fn len(self) -> u32 {
|
||||
self.0.count_ones()
|
||||
}
|
||||
|
||||
/// Returns the intersection of `self` and `other`
|
||||
pub fn intersect(self, other: TinySet) -> TinySet {
|
||||
TinySet(self.0 & other.0)
|
||||
@@ -109,22 +118,12 @@ impl TinySet {
|
||||
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
||||
TinySet::range_lower(from_included).complement()
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.0 = 0u64;
|
||||
}
|
||||
|
||||
pub fn len(self) -> u32 {
|
||||
self.0.count_ones()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BitSet {
|
||||
tinysets: Box<[TinySet]>,
|
||||
len: usize, //< Technically it should be u32, but we
|
||||
// count multiple inserts.
|
||||
// `usize` guards us from overflow.
|
||||
len: usize,
|
||||
max_value: u32,
|
||||
}
|
||||
|
||||
@@ -204,12 +203,12 @@ mod tests {
|
||||
|
||||
use super::BitSet;
|
||||
use super::TinySet;
|
||||
use docset::DocSet;
|
||||
use query::BitSetDocSet;
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::query::BitSetDocSet;
|
||||
use crate::tests;
|
||||
use crate::tests::generate_nonunique_unsorted;
|
||||
use std::collections::BTreeSet;
|
||||
use std::collections::HashSet;
|
||||
use tests;
|
||||
use tests::generate_nonunique_unsorted;
|
||||
|
||||
#[test]
|
||||
fn test_tiny_set() {
|
||||
@@ -278,11 +277,13 @@ mod tests {
|
||||
}
|
||||
assert_eq!(btreeset.len(), bitset.len());
|
||||
let mut bitset_docset = BitSetDocSet::from(bitset);
|
||||
let mut remaining = true;
|
||||
for el in btreeset.into_iter() {
|
||||
bitset_docset.advance();
|
||||
assert!(remaining);
|
||||
assert_eq!(bitset_docset.doc(), el);
|
||||
remaining = bitset_docset.advance() != TERMINATED;
|
||||
}
|
||||
assert!(!bitset_docset.advance());
|
||||
assert!(!remaining);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use common::BinarySerializable;
|
||||
use common::CountingWriter;
|
||||
use common::VInt;
|
||||
use directory::ReadOnlySource;
|
||||
use directory::WritePtr;
|
||||
use schema::Field;
|
||||
use space_usage::FieldUsage;
|
||||
use space_usage::PerFieldSpaceUsage;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::CountingWriter;
|
||||
use crate::common::VInt;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use crate::schema::Field;
|
||||
use crate::space_usage::FieldUsage;
|
||||
use crate::space_usage::PerFieldSpaceUsage;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
use std::io::{self, Read};
|
||||
@@ -39,10 +39,10 @@ impl BinarySerializable for FileAddr {
|
||||
/// A `CompositeWrite` is used to write a `CompositeFile`.
|
||||
pub struct CompositeWrite<W = WritePtr> {
|
||||
write: CountingWriter<W>,
|
||||
offsets: HashMap<FileAddr, usize>,
|
||||
offsets: HashMap<FileAddr, u64>,
|
||||
}
|
||||
|
||||
impl<W: Write> CompositeWrite<W> {
|
||||
impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
/// Crate a new API writer that writes a composite file
|
||||
/// in a given write.
|
||||
pub fn wrap(w: W) -> CompositeWrite<W> {
|
||||
@@ -91,8 +91,7 @@ impl<W: Write> CompositeWrite<W> {
|
||||
|
||||
let footer_len = (self.write.written_bytes() - footer_offset) as u32;
|
||||
footer_len.serialize(&mut self.write)?;
|
||||
self.write.flush()?;
|
||||
Ok(())
|
||||
self.write.terminate()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,10 +184,10 @@ impl CompositeFile {
|
||||
mod test {
|
||||
|
||||
use super::{CompositeFile, CompositeWrite};
|
||||
use common::BinarySerializable;
|
||||
use common::VInt;
|
||||
use directory::{Directory, RAMDirectory};
|
||||
use schema::Field;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::VInt;
|
||||
use crate::directory::{Directory, RAMDirectory};
|
||||
use crate::schema::Field;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
@@ -200,13 +199,13 @@ mod test {
|
||||
let w = directory.open_write(path).unwrap();
|
||||
let mut composite_write = CompositeWrite::wrap(w);
|
||||
{
|
||||
let mut write_0 = composite_write.for_field(Field(0u32));
|
||||
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
|
||||
VInt(32431123u64).serialize(&mut write_0).unwrap();
|
||||
write_0.flush().unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let mut write_4 = composite_write.for_field(Field(4u32));
|
||||
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
|
||||
VInt(2).serialize(&mut write_4).unwrap();
|
||||
write_4.flush().unwrap();
|
||||
}
|
||||
@@ -216,14 +215,18 @@ mod test {
|
||||
let r = directory.open_read(path).unwrap();
|
||||
let composite_file = CompositeFile::open(&r).unwrap();
|
||||
{
|
||||
let file0 = composite_file.open_read(Field(0u32)).unwrap();
|
||||
let file0 = composite_file
|
||||
.open_read(Field::from_field_id(0u32))
|
||||
.unwrap();
|
||||
let mut file0_buf = file0.as_slice();
|
||||
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
|
||||
assert_eq!(file0_buf.len(), 0);
|
||||
assert_eq!(payload_0, 32431123u64);
|
||||
}
|
||||
{
|
||||
let file4 = composite_file.open_read(Field(4u32)).unwrap();
|
||||
let file4 = composite_file
|
||||
.open_read(Field::from_field_id(4u32))
|
||||
.unwrap();
|
||||
let mut file4_buf = file4.as_slice();
|
||||
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
|
||||
assert_eq!(file4_buf.len(), 0);
|
||||
@@ -231,5 +234,4 @@ mod test {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::TerminatingWrite;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
pub struct CountingWriter<W> {
|
||||
underlying: W,
|
||||
written_bytes: usize,
|
||||
written_bytes: u64,
|
||||
}
|
||||
|
||||
impl<W: Write> CountingWriter<W> {
|
||||
@@ -14,11 +16,11 @@ impl<W: Write> CountingWriter<W> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn written_bytes(&self) -> usize {
|
||||
pub fn written_bytes(&self) -> u64 {
|
||||
self.written_bytes
|
||||
}
|
||||
|
||||
pub fn finish(mut self) -> io::Result<(W, usize)> {
|
||||
pub fn finish(mut self) -> io::Result<(W, u64)> {
|
||||
self.flush()?;
|
||||
Ok((self.underlying, self.written_bytes))
|
||||
}
|
||||
@@ -27,15 +29,28 @@ impl<W: Write> CountingWriter<W> {
|
||||
impl<W: Write> Write for CountingWriter<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let written_size = self.underlying.write(buf)?;
|
||||
self.written_bytes += written_size;
|
||||
self.written_bytes += written_size as u64;
|
||||
Ok(written_size)
|
||||
}
|
||||
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
self.underlying.write_all(buf)?;
|
||||
self.written_bytes += buf.len() as u64;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.underlying.flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
||||
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()?;
|
||||
self.underlying.terminate_ref(token)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
@@ -48,8 +63,8 @@ mod test {
|
||||
let mut counting_writer = CountingWriter::wrap(buffer);
|
||||
let bytes = (0u8..10u8).collect::<Vec<u8>>();
|
||||
counting_writer.write_all(&bytes).unwrap();
|
||||
let (w, len): (Vec<u8>, usize) = counting_writer.finish().unwrap();
|
||||
assert_eq!(len, 10);
|
||||
let (w, len): (Vec<u8>, u64) = counting_writer.finish().unwrap();
|
||||
assert_eq!(len, 10u64);
|
||||
assert_eq!(w.len(), 10);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,10 +10,28 @@ pub(crate) use self::bitset::TinySet;
|
||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||
pub use self::counting_writer::CountingWriter;
|
||||
pub use self::serialize::{BinarySerializable, FixedSize};
|
||||
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
|
||||
pub use self::vint::{
|
||||
read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt,
|
||||
};
|
||||
pub use byteorder::LittleEndian as Endianness;
|
||||
|
||||
use std::io;
|
||||
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
||||
///
|
||||
/// We do not allow segments with more than
|
||||
pub const MAX_DOC_LIMIT: u32 = 1 << 31;
|
||||
|
||||
pub fn minmax<I, T>(mut vals: I) -> Option<(T, T)>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
T: Copy + Ord,
|
||||
{
|
||||
if let Some(first_el) = vals.next() {
|
||||
return Some(vals.fold((first_el, first_el), |(min_val, max_val), el| {
|
||||
(min_val.min(el), max_val.max(el))
|
||||
}));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Computes the number of bits that will be used for bitpacking.
|
||||
///
|
||||
@@ -52,11 +70,6 @@ pub(crate) fn is_power_of_2(n: usize) -> bool {
|
||||
(n > 0) && (n & (n - 1) == 0)
|
||||
}
|
||||
|
||||
/// Create a default io error given a string.
|
||||
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
}
|
||||
|
||||
/// Has length trait
|
||||
pub trait HasLen {
|
||||
/// Return length
|
||||
@@ -101,16 +114,54 @@ pub fn u64_to_i64(val: u64) -> i64 {
|
||||
(val ^ HIGHEST_BIT) as i64
|
||||
}
|
||||
|
||||
/// Maps a `f64` to `u64`
|
||||
///
|
||||
/// For simplicity, tantivy internally handles `f64` as `u64`.
|
||||
/// The mapping is defined by this function.
|
||||
///
|
||||
/// Maps `f64` to `u64` so that lexical order is preserved.
|
||||
///
|
||||
/// This is more suited than simply casting (`val as u64`)
|
||||
/// which would truncate the result
|
||||
///
|
||||
/// # See also
|
||||
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
|
||||
#[inline(always)]
|
||||
pub fn f64_to_u64(val: f64) -> u64 {
|
||||
let bits = val.to_bits();
|
||||
if val.is_sign_positive() {
|
||||
bits ^ HIGHEST_BIT
|
||||
} else {
|
||||
!bits
|
||||
}
|
||||
}
|
||||
|
||||
/// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html).
|
||||
#[inline(always)]
|
||||
pub fn u64_to_f64(val: u64) -> f64 {
|
||||
f64::from_bits(if val & HIGHEST_BIT != 0 {
|
||||
val ^ HIGHEST_BIT
|
||||
} else {
|
||||
!val
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
|
||||
pub use super::minmax;
|
||||
pub use super::serialize::test::fixed_size_test;
|
||||
use super::{compute_num_bits, i64_to_u64, u64_to_i64};
|
||||
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||
use std::f64;
|
||||
|
||||
fn test_i64_converter_helper(val: i64) {
|
||||
assert_eq!(u64_to_i64(i64_to_u64(val)), val);
|
||||
}
|
||||
|
||||
fn test_f64_converter_helper(val: f64) {
|
||||
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i64_converter() {
|
||||
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
|
||||
@@ -123,6 +174,29 @@ pub(crate) mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f64_converter() {
|
||||
test_f64_converter_helper(f64::INFINITY);
|
||||
test_f64_converter_helper(f64::NEG_INFINITY);
|
||||
test_f64_converter_helper(0.0);
|
||||
test_f64_converter_helper(-0.0);
|
||||
test_f64_converter_helper(1.0);
|
||||
test_f64_converter_helper(-1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f64_order() {
|
||||
assert!(!(f64_to_u64(f64::NEG_INFINITY)..f64_to_u64(f64::INFINITY))
|
||||
.contains(&f64_to_u64(f64::NAN))); //nan is not a number
|
||||
assert!(f64_to_u64(1.5) > f64_to_u64(1.0)); //same exponent, different mantissa
|
||||
assert!(f64_to_u64(2.0) > f64_to_u64(1.0)); //same mantissa, different exponent
|
||||
assert!(f64_to_u64(2.0) > f64_to_u64(1.5)); //different exponent and mantissa
|
||||
assert!(f64_to_u64(1.0) > f64_to_u64(-1.0)); // pos > neg
|
||||
assert!(f64_to_u64(-1.5) < f64_to_u64(-1.0));
|
||||
assert!(f64_to_u64(-2.0) < f64_to_u64(1.0));
|
||||
assert!(f64_to_u64(-2.0) < f64_to_u64(-1.5));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_num_bits() {
|
||||
assert_eq!(compute_num_bits(1), 1u8);
|
||||
@@ -134,4 +208,28 @@ pub(crate) mod test {
|
||||
assert_eq!(compute_num_bits(256), 9u8);
|
||||
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_doc() {
|
||||
// this is the first time I write a unit test for a constant.
|
||||
assert!(((super::MAX_DOC_LIMIT - 1) as i32) >= 0);
|
||||
assert!((super::MAX_DOC_LIMIT as i32) < 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_empty() {
|
||||
let vals: Vec<u32> = vec![];
|
||||
assert_eq!(minmax(vals.into_iter()), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_one() {
|
||||
assert_eq!(minmax(vec![1].into_iter()), Some((1, 1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_minmax_two() {
|
||||
assert_eq!(minmax(vec![1, 2].into_iter()), Some((1, 2)));
|
||||
assert_eq!(minmax(vec![2, 1].into_iter()), Some((1, 2)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::common::Endianness;
|
||||
use crate::common::VInt;
|
||||
use byteorder::{ReadBytesExt, WriteBytesExt};
|
||||
use common::Endianness;
|
||||
use common::VInt;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::io::Read;
|
||||
@@ -89,6 +89,19 @@ impl FixedSize for u64 {
|
||||
const SIZE_IN_BYTES: usize = 8;
|
||||
}
|
||||
|
||||
impl BinarySerializable for f32 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_f32::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
reader.read_f32::<Endianness>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for f32 {
|
||||
const SIZE_IN_BYTES: usize = 4;
|
||||
}
|
||||
|
||||
impl BinarySerializable for i64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_i64::<Endianness>(*self)
|
||||
@@ -102,6 +115,19 @@ impl FixedSize for i64 {
|
||||
const SIZE_IN_BYTES: usize = 8;
|
||||
}
|
||||
|
||||
impl BinarySerializable for f64 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_f64::<Endianness>(*self)
|
||||
}
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||
reader.read_f64::<Endianness>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for f64 {
|
||||
const SIZE_IN_BYTES: usize = 8;
|
||||
}
|
||||
|
||||
impl BinarySerializable for u8 {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_u8(*self)
|
||||
@@ -136,7 +162,7 @@ impl BinarySerializable for String {
|
||||
pub mod test {
|
||||
|
||||
use super::*;
|
||||
use common::VInt;
|
||||
use crate::common::VInt;
|
||||
|
||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||
let mut buffer = Vec::new();
|
||||
@@ -172,6 +198,11 @@ pub mod test {
|
||||
fixed_size_test::<i64>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_f64() {
|
||||
fixed_size_test::<f64>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_u64() {
|
||||
fixed_size_test::<u64>();
|
||||
@@ -181,10 +212,7 @@ pub mod test {
|
||||
fn test_serialize_string() {
|
||||
assert_eq!(serialize_test(String::from("")), 1);
|
||||
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
|
||||
assert_eq!(
|
||||
serialize_test(String::from("富士さん見える。")),
|
||||
1 + 3 * 8
|
||||
);
|
||||
assert_eq!(serialize_test(String::from("富士さん見える。")), 1 + 3 * 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -5,12 +5,12 @@ use std::io::Read;
|
||||
use std::io::Write;
|
||||
|
||||
/// Wrapper over a `u64` that serializes as a variable int.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub struct VInt(pub u64);
|
||||
|
||||
const STOP_BIT: u8 = 128;
|
||||
|
||||
pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
||||
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
||||
const START_2: u64 = 1 << 7;
|
||||
const START_3: u64 = 1 << 14;
|
||||
const START_4: u64 = 1 << 21;
|
||||
@@ -29,17 +29,17 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
||||
|
||||
let val = u64::from(val);
|
||||
const STOP_BIT: u64 = 128u64;
|
||||
match val {
|
||||
0...STOP_1 => (val | STOP_BIT, 1),
|
||||
START_2...STOP_2 => (
|
||||
let (res, num_bytes) = match val {
|
||||
0..=STOP_1 => (val | STOP_BIT, 1),
|
||||
START_2..=STOP_2 => (
|
||||
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
||||
2,
|
||||
),
|
||||
START_3...STOP_3 => (
|
||||
START_3..=STOP_3 => (
|
||||
(val & MASK_1) | ((val & MASK_2) << 1) | ((val & MASK_3) << 2) | (STOP_BIT << (8 * 2)),
|
||||
3,
|
||||
),
|
||||
START_4...STOP_4 => (
|
||||
START_4..=STOP_4 => (
|
||||
(val & MASK_1)
|
||||
| ((val & MASK_2) << 1)
|
||||
| ((val & MASK_3) << 2)
|
||||
@@ -56,7 +56,9 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
||||
| (STOP_BIT << (8 * 4)),
|
||||
5,
|
||||
),
|
||||
}
|
||||
};
|
||||
LittleEndian::write_u64(&mut buf[..], res);
|
||||
&buf[0..num_bytes]
|
||||
}
|
||||
|
||||
/// Returns the number of bytes covered by a
|
||||
@@ -85,23 +87,26 @@ fn vint_len(data: &[u8]) -> usize {
|
||||
/// If the buffer does not start by a valid
|
||||
/// vint payload
|
||||
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
||||
let vlen = vint_len(*data);
|
||||
let (result, vlen) = read_u32_vint_no_advance(*data);
|
||||
*data = &data[vlen..];
|
||||
result
|
||||
}
|
||||
|
||||
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
||||
let vlen = vint_len(data);
|
||||
let mut result = 0u32;
|
||||
let mut shift = 0u64;
|
||||
for &b in &data[..vlen] {
|
||||
result |= u32::from(b & 127u8) << shift;
|
||||
shift += 7;
|
||||
}
|
||||
*data = &data[vlen..];
|
||||
result
|
||||
(result, vlen)
|
||||
}
|
||||
|
||||
/// Write a `u32` as a vint payload.
|
||||
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||
let (val, num_bytes) = serialize_vint_u32(val);
|
||||
let mut buffer = [0u8; 8];
|
||||
LittleEndian::write_u64(&mut buffer, val);
|
||||
writer.write_all(&buffer[..num_bytes])
|
||||
let mut buf = [0u8; 8];
|
||||
let data = serialize_vint_u32(val, &mut buf);
|
||||
writer.write_all(&data)
|
||||
}
|
||||
|
||||
impl VInt {
|
||||
@@ -171,8 +176,7 @@ mod tests {
|
||||
|
||||
use super::serialize_vint_u32;
|
||||
use super::VInt;
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use common::BinarySerializable;
|
||||
use crate::common::BinarySerializable;
|
||||
|
||||
fn aux_test_vint(val: u64) {
|
||||
let mut v = [14u8; 10];
|
||||
@@ -208,12 +212,10 @@ mod tests {
|
||||
|
||||
fn aux_test_serialize_vint_u32(val: u32) {
|
||||
let mut buffer = [0u8; 10];
|
||||
let mut buffer2 = [0u8; 10];
|
||||
let mut buffer2 = [0u8; 8];
|
||||
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
||||
let (vint, len) = serialize_vint_u32(val);
|
||||
assert_eq!(len, len_vint, "len wrong for val {}", val);
|
||||
LittleEndian::write_u64(&mut buffer2, vint);
|
||||
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
|
||||
let res2 = serialize_vint_u32(val, &mut buffer2);
|
||||
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use crossbeam::channel;
|
||||
use scoped_pool::{Pool, ThreadConfig};
|
||||
use Result;
|
||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||
|
||||
/// Search executor whether search request are single thread or multithread.
|
||||
///
|
||||
@@ -10,8 +9,10 @@ use Result;
|
||||
/// API of a dependency, knowing it might conflict with a different version
|
||||
/// used by the client. Second, we may stop using rayon in the future.
|
||||
pub enum Executor {
|
||||
/// Single thread variant of an Executor
|
||||
SingleThread,
|
||||
ThreadPool(Pool),
|
||||
/// Thread pool variant of an Executor
|
||||
ThreadPool(ThreadPool),
|
||||
}
|
||||
|
||||
impl Executor {
|
||||
@@ -20,37 +21,39 @@ impl Executor {
|
||||
Executor::SingleThread
|
||||
}
|
||||
|
||||
// Creates an Executor that dispatches the tasks in a thread pool.
|
||||
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Executor {
|
||||
let thread_config = ThreadConfig::new().prefix(prefix);
|
||||
let pool = Pool::with_thread_config(num_threads, thread_config);
|
||||
Executor::ThreadPool(pool)
|
||||
/// Creates an Executor that dispatches the tasks in a thread pool.
|
||||
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> crate::Result<Executor> {
|
||||
let pool = ThreadPoolBuilder::new()
|
||||
.num_threads(num_threads)
|
||||
.thread_name(move |num| format!("{}{}", prefix, num))
|
||||
.build()?;
|
||||
Ok(Executor::ThreadPool(pool))
|
||||
}
|
||||
|
||||
// Perform a map in the thread pool.
|
||||
//
|
||||
// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
|
||||
// will propagate to the caller.
|
||||
/// Perform a map in the thread pool.
|
||||
///
|
||||
/// Regardless of the executor (`SingleThread` or `ThreadPool`), panics in the task
|
||||
/// will propagate to the caller.
|
||||
pub fn map<
|
||||
A: Send,
|
||||
R: Send,
|
||||
AIterator: Iterator<Item = A>,
|
||||
F: Sized + Sync + Fn(A) -> Result<R>,
|
||||
F: Sized + Sync + Fn(A) -> crate::Result<R>,
|
||||
>(
|
||||
&self,
|
||||
f: F,
|
||||
args: AIterator,
|
||||
) -> Result<Vec<R>> {
|
||||
) -> crate::Result<Vec<R>> {
|
||||
match self {
|
||||
Executor::SingleThread => args.map(f).collect::<Result<_>>(),
|
||||
Executor::SingleThread => args.map(f).collect::<crate::Result<_>>(),
|
||||
Executor::ThreadPool(pool) => {
|
||||
let args_with_indices: Vec<(usize, A)> = args.enumerate().collect();
|
||||
let num_fruits = args_with_indices.len();
|
||||
let fruit_receiver = {
|
||||
let (fruit_sender, fruit_receiver) = channel::unbounded();
|
||||
pool.scoped(|scope| {
|
||||
pool.scope(|scope| {
|
||||
for arg_with_idx in args_with_indices {
|
||||
scope.execute(|| {
|
||||
scope.spawn(|_| {
|
||||
let (idx, arg) = arg_with_idx;
|
||||
let fruit = f(arg);
|
||||
if let Err(err) = fruit_sender.send((idx, fruit)) {
|
||||
@@ -103,6 +106,7 @@ mod tests {
|
||||
#[should_panic] //< unfortunately the panic message is not propagated
|
||||
fn test_panic_propagates_multi_thread() {
|
||||
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
|
||||
.unwrap()
|
||||
.map(
|
||||
|_| {
|
||||
panic!("panic should propagate");
|
||||
@@ -123,15 +127,15 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_map_multithread() {
|
||||
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
|
||||
.map(|i| Ok(i * 2), 0..10)
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 10);
|
||||
for i in 0..10 {
|
||||
assert_eq!(result[i], i * 2);
|
||||
#[test]
|
||||
fn test_map_multithread() {
|
||||
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
|
||||
.unwrap()
|
||||
.map(|i| Ok(i * 2), 0..10)
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 10);
|
||||
for i in 0..10 {
|
||||
assert_eq!(result[i], i * 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,47 +1,45 @@
|
||||
use super::pool::LeasedItem;
|
||||
use super::pool::Pool;
|
||||
use super::segment::create_segment;
|
||||
use super::segment::Segment;
|
||||
use core::searcher::Searcher;
|
||||
use core::Executor;
|
||||
use core::IndexMeta;
|
||||
use core::SegmentId;
|
||||
use core::SegmentMeta;
|
||||
use core::SegmentReader;
|
||||
use core::META_FILEPATH;
|
||||
use directory::ManagedDirectory;
|
||||
use crate::core::Executor;
|
||||
use crate::core::IndexMeta;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::core::SegmentMetaInventory;
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::ManagedDirectory;
|
||||
#[cfg(feature = "mmap")]
|
||||
use directory::MmapDirectory;
|
||||
use directory::INDEX_WRITER_LOCK;
|
||||
use directory::META_LOCK;
|
||||
use directory::{Directory, RAMDirectory};
|
||||
use error::DataCorruption;
|
||||
use error::TantivyError;
|
||||
use indexer::index_writer::open_index_writer;
|
||||
use indexer::index_writer::HEAP_SIZE_MIN;
|
||||
use indexer::segment_updater::save_new_metas;
|
||||
use num_cpus;
|
||||
use schema::Field;
|
||||
use schema::FieldType;
|
||||
use schema::Schema;
|
||||
use serde_json;
|
||||
use crate::directory::MmapDirectory;
|
||||
use crate::directory::INDEX_WRITER_LOCK;
|
||||
use crate::directory::{Directory, RAMDirectory};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::error::TantivyError;
|
||||
use crate::indexer::index_writer::HEAP_SIZE_MIN;
|
||||
use crate::indexer::segment_updater::save_new_metas;
|
||||
use crate::reader::IndexReader;
|
||||
use crate::reader::IndexReaderBuilder;
|
||||
use crate::schema::Field;
|
||||
use crate::schema::FieldType;
|
||||
use crate::schema::Schema;
|
||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||
use crate::IndexWriter;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use tokenizer::BoxedTokenizer;
|
||||
use tokenizer::TokenizerManager;
|
||||
use IndexWriter;
|
||||
use Result;
|
||||
|
||||
fn load_metas(directory: &Directory) -> Result<IndexMeta> {
|
||||
#[cfg(feature = "mmap")]
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn load_metas(
|
||||
directory: &dyn Directory,
|
||||
inventory: &SegmentMetaInventory,
|
||||
) -> crate::Result<IndexMeta> {
|
||||
let meta_data = directory.atomic_read(&META_FILEPATH)?;
|
||||
let meta_string = String::from_utf8_lossy(&meta_data);
|
||||
serde_json::from_str(&meta_string)
|
||||
IndexMeta::deserialize(&meta_string, &inventory)
|
||||
.map_err(|e| {
|
||||
DataCorruption::new(
|
||||
META_FILEPATH.clone(),
|
||||
META_FILEPATH.to_path_buf(),
|
||||
format!("Meta file cannot be deserialized. {:?}.", e),
|
||||
)
|
||||
})
|
||||
@@ -49,13 +47,13 @@ fn load_metas(directory: &Directory) -> Result<IndexMeta> {
|
||||
}
|
||||
|
||||
/// Search Index
|
||||
#[derive(Clone)]
|
||||
pub struct Index {
|
||||
directory: ManagedDirectory,
|
||||
schema: Schema,
|
||||
num_searchers: Arc<AtomicUsize>,
|
||||
searcher_pool: Arc<Pool<Searcher>>,
|
||||
executor: Arc<Executor>,
|
||||
tokenizers: TokenizerManager,
|
||||
inventory: SegmentMetaInventory,
|
||||
}
|
||||
|
||||
impl Index {
|
||||
@@ -76,15 +74,16 @@ impl Index {
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
pub fn set_multithread_executor(&mut self, num_threads: usize) {
|
||||
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-"));
|
||||
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> {
|
||||
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-")?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
pub fn set_default_multithread_executor(&mut self) {
|
||||
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
|
||||
let default_num_threads = num_cpus::get();
|
||||
self.set_multithread_executor(default_num_threads);
|
||||
self.set_multithread_executor(default_num_threads)
|
||||
}
|
||||
|
||||
/// Creates a new index using the `RAMDirectory`.
|
||||
@@ -101,29 +100,29 @@ impl Index {
|
||||
///
|
||||
/// If a previous index was in this directory, then its meta file will be destroyed.
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn create_in_dir<P: AsRef<Path>>(directory_path: P, schema: Schema) -> Result<Index> {
|
||||
pub fn create_in_dir<P: AsRef<Path>>(
|
||||
directory_path: P,
|
||||
schema: Schema,
|
||||
) -> crate::Result<Index> {
|
||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||
if Index::exists(&mmap_directory) {
|
||||
return Err(TantivyError::IndexAlreadyExists);
|
||||
}
|
||||
|
||||
Index::create(mmap_directory, schema)
|
||||
}
|
||||
|
||||
/// Opens or creates a new index in the provided directory
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
|
||||
if Index::exists(&dir) {
|
||||
let index = Index::open(dir)?;
|
||||
if index.schema() == schema {
|
||||
Ok(index)
|
||||
} else {
|
||||
Err(TantivyError::SchemaError(
|
||||
"An index exists but the schema does not match.".to_string(),
|
||||
))
|
||||
}
|
||||
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
||||
if !Index::exists(&dir) {
|
||||
return Index::create(dir, schema);
|
||||
}
|
||||
let index = Index::open(dir)?;
|
||||
if index.schema() == schema {
|
||||
Ok(index)
|
||||
} else {
|
||||
Index::create(dir, schema)
|
||||
Err(TantivyError::SchemaError(
|
||||
"An index exists but the schema does not match.".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,13 +135,13 @@ impl Index {
|
||||
/// The temp directory is only used for testing the `MmapDirectory`.
|
||||
/// For other unit tests, prefer the `RAMDirectory`, see: `create_in_ram`.
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn create_from_tempdir(schema: Schema) -> Result<Index> {
|
||||
pub fn create_from_tempdir(schema: Schema) -> crate::Result<Index> {
|
||||
let mmap_directory = MmapDirectory::create_from_tempdir()?;
|
||||
Index::create(mmap_directory, schema)
|
||||
}
|
||||
|
||||
/// Creates a new index given an implementation of the trait `Directory`
|
||||
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
|
||||
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
|
||||
let directory = ManagedDirectory::wrap(dir)?;
|
||||
Index::from_directory(directory, schema)
|
||||
}
|
||||
@@ -150,26 +149,26 @@ impl Index {
|
||||
/// Create a new index from a directory.
|
||||
///
|
||||
/// This will overwrite existing meta.json
|
||||
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> Result<Index> {
|
||||
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
|
||||
save_new_metas(schema.clone(), directory.borrow_mut())?;
|
||||
let metas = IndexMeta::with_schema(schema);
|
||||
Index::create_from_metas(directory, &metas)
|
||||
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
|
||||
}
|
||||
|
||||
/// Creates a new index given a directory and an `IndexMeta`.
|
||||
fn create_from_metas(directory: ManagedDirectory, metas: &IndexMeta) -> Result<Index> {
|
||||
fn create_from_metas(
|
||||
directory: ManagedDirectory,
|
||||
metas: &IndexMeta,
|
||||
inventory: SegmentMetaInventory,
|
||||
) -> crate::Result<Index> {
|
||||
let schema = metas.schema.clone();
|
||||
let n_cpus = num_cpus::get();
|
||||
let index = Index {
|
||||
Ok(Index {
|
||||
directory,
|
||||
schema,
|
||||
num_searchers: Arc::new(AtomicUsize::new(n_cpus)),
|
||||
searcher_pool: Arc::new(Pool::new()),
|
||||
tokenizers: TokenizerManager::default(),
|
||||
executor: Arc::new(Executor::single_thread()),
|
||||
};
|
||||
index.load_searchers()?;
|
||||
Ok(index)
|
||||
inventory,
|
||||
})
|
||||
}
|
||||
|
||||
/// Accessor for the tokenizer manager.
|
||||
@@ -178,11 +177,11 @@ impl Index {
|
||||
}
|
||||
|
||||
/// Helper to access the tokenizer associated to a specific field.
|
||||
pub fn tokenizer_for_field(&self, field: Field) -> Result<Box<BoxedTokenizer>> {
|
||||
pub fn tokenizer_for_field(&self, field: Field) -> crate::Result<TextAnalyzer> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
let tokenizer_manager: &TokenizerManager = self.tokenizers();
|
||||
let tokenizer_name_opt: Option<Box<BoxedTokenizer>> = match field_type {
|
||||
let tokenizer_name_opt: Option<TextAnalyzer> = match field_type {
|
||||
FieldType::Str(text_options) => text_options
|
||||
.get_indexing_options()
|
||||
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
|
||||
@@ -198,23 +197,58 @@ impl Index {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a default `IndexReader` for the given index.
|
||||
///
|
||||
/// See [`Index.reader_builder()`](#method.reader_builder).
|
||||
pub fn reader(&self) -> crate::Result<IndexReader> {
|
||||
self.reader_builder().try_into()
|
||||
}
|
||||
|
||||
/// Create a `IndexReader` for the given index.
|
||||
///
|
||||
/// Most project should create at most one reader for a given index.
|
||||
/// This method is typically called only once per `Index` instance,
|
||||
/// over the lifetime of most problem.
|
||||
pub fn reader_builder(&self) -> IndexReaderBuilder {
|
||||
IndexReaderBuilder::new(self.clone())
|
||||
}
|
||||
|
||||
/// Opens a new directory from an index path.
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> Result<Index> {
|
||||
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> crate::Result<Index> {
|
||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||
Index::open(mmap_directory)
|
||||
}
|
||||
|
||||
/// Returns the list of the segment metas tracked by the index.
|
||||
///
|
||||
/// Such segments can of course be part of the index,
|
||||
/// but also they could be segments being currently built or in the middle of a merge
|
||||
/// operation.
|
||||
pub fn list_all_segment_metas(&self) -> Vec<SegmentMeta> {
|
||||
self.inventory.all()
|
||||
}
|
||||
|
||||
/// Creates a new segment_meta (Advanced user only).
|
||||
///
|
||||
/// As long as the `SegmentMeta` lives, the files associated with the
|
||||
/// `SegmentMeta` are guaranteed to not be garbage collected, regardless of
|
||||
/// whether the segment is recorded as part of the index or not.
|
||||
pub fn new_segment_meta(&self, segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
|
||||
self.inventory.new_segment_meta(segment_id, max_doc)
|
||||
}
|
||||
|
||||
/// Open the index using the provided directory
|
||||
pub fn open<D: Directory>(directory: D) -> Result<Index> {
|
||||
pub fn open<D: Directory>(directory: D) -> crate::Result<Index> {
|
||||
let directory = ManagedDirectory::wrap(directory)?;
|
||||
let metas = load_metas(&directory)?;
|
||||
Index::create_from_metas(directory, &metas)
|
||||
let inventory = SegmentMetaInventory::default();
|
||||
let metas = load_metas(&directory, &inventory)?;
|
||||
Index::create_from_metas(directory, &metas, inventory)
|
||||
}
|
||||
|
||||
/// Reads the index meta file from the directory.
|
||||
pub fn load_metas(&self) -> Result<IndexMeta> {
|
||||
load_metas(self.directory())
|
||||
pub fn load_metas(&self) -> crate::Result<IndexMeta> {
|
||||
load_metas(self.directory(), &self.inventory)
|
||||
}
|
||||
|
||||
/// Open a new index writer. Attempts to acquire a lockfile.
|
||||
@@ -241,7 +275,7 @@ impl Index {
|
||||
&self,
|
||||
num_threads: usize,
|
||||
overall_heap_size_in_bytes: usize,
|
||||
) -> Result<IndexWriter> {
|
||||
) -> crate::Result<IndexWriter> {
|
||||
let directory_lock = self
|
||||
.directory
|
||||
.acquire_lock(&INDEX_WRITER_LOCK)
|
||||
@@ -249,7 +283,7 @@ impl Index {
|
||||
TantivyError::LockFailure(
|
||||
err,
|
||||
Some(
|
||||
"Failed to acquire index lock. If you are using\
|
||||
"Failed to acquire index lock. If you are using \
|
||||
a regular directory, this means there is already an \
|
||||
`IndexWriter` working on this `Directory`, in this process \
|
||||
or in a different process."
|
||||
@@ -258,7 +292,7 @@ impl Index {
|
||||
)
|
||||
})?;
|
||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
||||
open_index_writer(
|
||||
IndexWriter::new(
|
||||
self,
|
||||
num_threads,
|
||||
heap_size_in_bytes_per_thread,
|
||||
@@ -276,7 +310,7 @@ impl Index {
|
||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||
/// # Panics
|
||||
/// If the heap size per thread is too small, panics.
|
||||
pub fn writer(&self, overall_heap_size_in_bytes: usize) -> Result<IndexWriter> {
|
||||
pub fn writer(&self, overall_heap_size_in_bytes: usize) -> crate::Result<IndexWriter> {
|
||||
let mut num_threads = num_cpus::get();
|
||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
||||
@@ -293,7 +327,7 @@ impl Index {
|
||||
}
|
||||
|
||||
/// Returns the list of segments that are searchable
|
||||
pub fn searchable_segments(&self) -> Result<Vec<Segment>> {
|
||||
pub fn searchable_segments(&self) -> crate::Result<Vec<Segment>> {
|
||||
Ok(self
|
||||
.searchable_segment_metas()?
|
||||
.into_iter()
|
||||
@@ -303,12 +337,14 @@ impl Index {
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn segment(&self, segment_meta: SegmentMeta) -> Segment {
|
||||
create_segment(self.clone(), segment_meta)
|
||||
Segment::for_index(self.clone(), segment_meta)
|
||||
}
|
||||
|
||||
/// Creates a new segment.
|
||||
pub fn new_segment(&self) -> Segment {
|
||||
let segment_meta = SegmentMeta::new(SegmentId::generate_random(), 0);
|
||||
let segment_meta = self
|
||||
.inventory
|
||||
.new_segment_meta(SegmentId::generate_random(), 0);
|
||||
self.segment(segment_meta)
|
||||
}
|
||||
|
||||
@@ -324,96 +360,44 @@ impl Index {
|
||||
|
||||
/// Reads the meta.json and returns the list of
|
||||
/// `SegmentMeta` from the last commit.
|
||||
pub fn searchable_segment_metas(&self) -> Result<Vec<SegmentMeta>> {
|
||||
pub fn searchable_segment_metas(&self) -> crate::Result<Vec<SegmentMeta>> {
|
||||
Ok(self.load_metas()?.segments)
|
||||
}
|
||||
|
||||
/// Returns the list of segment ids that are searchable.
|
||||
pub fn searchable_segment_ids(&self) -> Result<Vec<SegmentId>> {
|
||||
pub fn searchable_segment_ids(&self) -> crate::Result<Vec<SegmentId>> {
|
||||
Ok(self
|
||||
.searchable_segment_metas()?
|
||||
.iter()
|
||||
.map(|segment_meta| segment_meta.id())
|
||||
.map(SegmentMeta::id)
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// Sets the number of searchers to use
|
||||
///
|
||||
/// Only works after the next call to `load_searchers`
|
||||
pub fn set_num_searchers(&mut self, num_searchers: usize) {
|
||||
self.num_searchers.store(num_searchers, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Update searchers so that they reflect the state of the last
|
||||
/// `.commit()`.
|
||||
///
|
||||
/// If indexing happens in the same process as searching,
|
||||
/// you most likely want to call `.load_searchers()` right after each
|
||||
/// successful call to `.commit()`.
|
||||
///
|
||||
/// If indexing and searching happen in different processes, the way to
|
||||
/// get the freshest `index` at all time, is to watch `meta.json` and
|
||||
/// call `load_searchers` whenever a changes happen.
|
||||
pub fn load_searchers(&self) -> Result<()> {
|
||||
let _meta_lock = self.directory().acquire_lock(&META_LOCK)?;
|
||||
let searchable_segments = self.searchable_segments()?;
|
||||
let segment_readers: Vec<SegmentReader> = searchable_segments
|
||||
.iter()
|
||||
.map(SegmentReader::open)
|
||||
.collect::<Result<_>>()?;
|
||||
let schema = self.schema();
|
||||
let num_searchers: usize = self.num_searchers.load(Ordering::Acquire);
|
||||
let searchers = (0..num_searchers)
|
||||
.map(|_| Searcher::new(schema.clone(), self.clone(), segment_readers.clone()))
|
||||
.collect();
|
||||
self.searcher_pool.publish_new_generation(searchers);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a searcher
|
||||
///
|
||||
/// This method should be called every single time a search
|
||||
/// query is performed.
|
||||
/// The searchers are taken from a pool of `num_searchers` searchers.
|
||||
/// If no searcher is available
|
||||
/// this may block.
|
||||
///
|
||||
/// The same searcher must be used for a given query, as it ensures
|
||||
/// the use of a consistent segment set.
|
||||
pub fn searcher(&self) -> LeasedItem<Searcher> {
|
||||
self.searcher_pool.acquire()
|
||||
/// Returns the set of corrupted files
|
||||
pub fn validate_checksum(&self) -> crate::Result<HashSet<PathBuf>> {
|
||||
self.directory.list_damaged().map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Index {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Index({:?})", self.directory)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Index {
|
||||
fn clone(&self) -> Index {
|
||||
Index {
|
||||
directory: self.directory.clone(),
|
||||
schema: self.schema.clone(),
|
||||
num_searchers: Arc::clone(&self.num_searchers),
|
||||
searcher_pool: Arc::clone(&self.searcher_pool),
|
||||
tokenizers: self.tokenizers.clone(),
|
||||
executor: self.executor.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use directory::RAMDirectory;
|
||||
use schema::{Schema, INT_INDEXED, TEXT};
|
||||
use Index;
|
||||
use crate::directory::RAMDirectory;
|
||||
use crate::schema::Field;
|
||||
use crate::schema::{Schema, INDEXED, TEXT};
|
||||
use crate::IndexReader;
|
||||
use crate::ReloadPolicy;
|
||||
use crate::{Directory, Index};
|
||||
|
||||
#[test]
|
||||
fn test_indexer_for_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let num_likes_field = schema_builder.add_u64_field("num_likes", INT_INDEXED);
|
||||
let num_likes_field = schema_builder.add_u64_field("num_likes", INDEXED);
|
||||
let body_field = schema_builder.add_text_field("body", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
@@ -471,7 +455,148 @@ mod tests {
|
||||
|
||||
fn throw_away_schema() -> Schema {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let _ = schema_builder.add_u64_field("num_likes", INT_INDEXED);
|
||||
let _ = schema_builder.add_u64_field("num_likes", INDEXED);
|
||||
schema_builder.build()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_on_commit_reload_policy() {
|
||||
let schema = throw_away_schema();
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
||||
}
|
||||
|
||||
#[cfg(feature = "mmap")]
|
||||
mod mmap_specific {
|
||||
|
||||
use super::*;
|
||||
use crate::Directory;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn test_index_on_commit_reload_policy_mmap() {
|
||||
let schema = throw_away_schema();
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_manual_policy_mmap() {
|
||||
let schema = throw_away_schema();
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let mut index = Index::create_from_tempdir(schema).unwrap();
|
||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
writer.commit().unwrap();
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::Manual)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
writer.add_document(doc!(field=>1u64));
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
let _handle = index.directory_mut().watch(Box::new(move || {
|
||||
let _ = sender.send(());
|
||||
}));
|
||||
writer.commit().unwrap();
|
||||
assert!(receiver.recv().is_ok());
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
reader.reload().unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_on_commit_reload_policy_different_directories() {
|
||||
let schema = throw_away_schema();
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
let write_index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
||||
let read_index = Index::open_in_dir(&tempdir_path).unwrap();
|
||||
let reader = read_index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::OnCommit)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
||||
}
|
||||
}
|
||||
|
||||
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
||||
let mut reader_index = reader.index();
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
|
||||
let _ = sender.send(());
|
||||
}));
|
||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
writer.add_document(doc!(field=>1u64));
|
||||
writer.commit().unwrap();
|
||||
assert!(receiver.recv().is_ok());
|
||||
assert_eq!(reader.searcher().num_docs(), 1);
|
||||
writer.add_document(doc!(field=>2u64));
|
||||
writer.commit().unwrap();
|
||||
assert!(receiver.recv().is_ok());
|
||||
assert_eq!(reader.searcher().num_docs(), 2);
|
||||
}
|
||||
|
||||
// This test will not pass on windows, because windows
|
||||
// prevent deleting files that are MMapped.
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
#[test]
|
||||
fn garbage_collect_works_as_intended() {
|
||||
let directory = RAMDirectory::create();
|
||||
let schema = throw_away_schema();
|
||||
let field = schema.get_field("num_likes").unwrap();
|
||||
let index = Index::create(directory.clone(), schema).unwrap();
|
||||
|
||||
let mut writer = index.writer_with_num_threads(8, 24_000_000).unwrap();
|
||||
for i in 0u64..8_000u64 {
|
||||
writer.add_document(doc!(field => i));
|
||||
}
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
let _handle = directory.watch(Box::new(move || {
|
||||
let _ = sender.send(());
|
||||
}));
|
||||
writer.commit().unwrap();
|
||||
let mem_right_after_commit = directory.total_mem_usage();
|
||||
assert!(receiver.recv().is_ok());
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::Manual)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(reader.searcher().num_docs(), 8_000);
|
||||
writer.wait_merging_threads().unwrap();
|
||||
let mem_right_after_merge_finished = directory.total_mem_usage();
|
||||
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 8_000);
|
||||
assert!(
|
||||
mem_right_after_merge_finished < mem_right_after_commit,
|
||||
"(mem after merge){} is expected < (mem before merge){}",
|
||||
mem_right_after_merge_finished,
|
||||
mem_right_after_commit
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,198 @@
|
||||
use core::SegmentMeta;
|
||||
use schema::Schema;
|
||||
use serde_json;
|
||||
use super::SegmentComponent;
|
||||
use crate::core::SegmentId;
|
||||
use crate::schema::Schema;
|
||||
use crate::Opstamp;
|
||||
use census::{Inventory, TrackedObject};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
struct DeleteMeta {
|
||||
num_deleted_docs: u32,
|
||||
opstamp: Opstamp,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct SegmentMetaInventory {
|
||||
inventory: Inventory<InnerSegmentMeta>,
|
||||
}
|
||||
|
||||
impl SegmentMetaInventory {
|
||||
/// Lists all living `SegmentMeta` object at the time of the call.
|
||||
pub fn all(&self) -> Vec<SegmentMeta> {
|
||||
self.inventory
|
||||
.list()
|
||||
.into_iter()
|
||||
.map(SegmentMeta::from)
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
pub fn new_segment_meta(&self, segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
|
||||
let inner = InnerSegmentMeta {
|
||||
segment_id,
|
||||
max_doc,
|
||||
deletes: None,
|
||||
};
|
||||
SegmentMeta::from(self.inventory.track(inner))
|
||||
}
|
||||
}
|
||||
|
||||
/// `SegmentMeta` contains simple meta information about a segment.
|
||||
///
|
||||
/// For instance the number of docs it contains,
|
||||
/// how many are deleted, etc.
|
||||
#[derive(Clone)]
|
||||
pub struct SegmentMeta {
|
||||
tracked: TrackedObject<InnerSegmentMeta>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentMeta {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
self.tracked.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl serde::Serialize for SegmentMeta {
|
||||
fn serialize<S>(
|
||||
&self,
|
||||
serializer: S,
|
||||
) -> Result<<S as serde::Serializer>::Ok, <S as serde::Serializer>::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
self.tracked.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TrackedObject<InnerSegmentMeta>> for SegmentMeta {
|
||||
fn from(tracked: TrackedObject<InnerSegmentMeta>) -> SegmentMeta {
|
||||
SegmentMeta { tracked }
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentMeta {
|
||||
// Creates a new `SegmentMeta` object.
|
||||
|
||||
/// Returns the segment id.
|
||||
pub fn id(&self) -> SegmentId {
|
||||
self.tracked.segment_id
|
||||
}
|
||||
|
||||
/// Returns the number of deleted documents.
|
||||
pub fn num_deleted_docs(&self) -> u32 {
|
||||
self.tracked
|
||||
.deletes
|
||||
.as_ref()
|
||||
.map(|delete_meta| delete_meta.num_deleted_docs)
|
||||
.unwrap_or(0u32)
|
||||
}
|
||||
|
||||
/// Returns the list of files that
|
||||
/// are required for the segment meta.
|
||||
///
|
||||
/// This is useful as the way tantivy removes files
|
||||
/// is by removing all files that have been created by tantivy
|
||||
/// and are not used by any segment anymore.
|
||||
pub fn list_files(&self) -> HashSet<PathBuf> {
|
||||
SegmentComponent::iterator()
|
||||
.map(|component| self.relative_path(*component))
|
||||
.collect::<HashSet<PathBuf>>()
|
||||
}
|
||||
|
||||
/// Returns the relative path of a component of our segment.
|
||||
///
|
||||
/// It just joins the segment id with the extension
|
||||
/// associated to a segment component.
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
let mut path = self.id().uuid_string();
|
||||
path.push_str(&*match component {
|
||||
SegmentComponent::POSTINGS => ".idx".to_string(),
|
||||
SegmentComponent::POSITIONS => ".pos".to_string(),
|
||||
SegmentComponent::POSITIONSSKIP => ".posidx".to_string(),
|
||||
SegmentComponent::TERMS => ".term".to_string(),
|
||||
SegmentComponent::STORE => ".store".to_string(),
|
||||
SegmentComponent::FASTFIELDS => ".fast".to_string(),
|
||||
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
|
||||
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
|
||||
});
|
||||
PathBuf::from(path)
|
||||
}
|
||||
|
||||
/// Return the highest doc id + 1
|
||||
///
|
||||
/// If there are no deletes, then num_docs = max_docs
|
||||
/// and all the doc ids contains in this segment
|
||||
/// are exactly (0..max_doc).
|
||||
pub fn max_doc(&self) -> u32 {
|
||||
self.tracked.max_doc
|
||||
}
|
||||
|
||||
/// Return the number of documents in the segment.
|
||||
pub fn num_docs(&self) -> u32 {
|
||||
self.max_doc() - self.num_deleted_docs()
|
||||
}
|
||||
|
||||
/// Returns the `Opstamp` of the last delete operation
|
||||
/// taken in account in this segment.
|
||||
pub fn delete_opstamp(&self) -> Option<Opstamp> {
|
||||
self.tracked
|
||||
.deletes
|
||||
.as_ref()
|
||||
.map(|delete_meta| delete_meta.opstamp)
|
||||
}
|
||||
|
||||
/// Returns true iff the segment meta contains
|
||||
/// delete information.
|
||||
pub fn has_deletes(&self) -> bool {
|
||||
self.num_deleted_docs() > 0
|
||||
}
|
||||
|
||||
/// Updates the max_doc value from the `SegmentMeta`.
|
||||
///
|
||||
/// This method is only used when updating `max_doc` from 0
|
||||
/// as we finalize a fresh new segment.
|
||||
pub(crate) fn with_max_doc(self, max_doc: u32) -> SegmentMeta {
|
||||
assert_eq!(self.tracked.max_doc, 0);
|
||||
assert!(self.tracked.deletes.is_none());
|
||||
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
|
||||
segment_id: inner_meta.segment_id,
|
||||
max_doc,
|
||||
deletes: None,
|
||||
});
|
||||
SegmentMeta { tracked }
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
|
||||
let delete_meta = DeleteMeta {
|
||||
num_deleted_docs,
|
||||
opstamp,
|
||||
};
|
||||
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
|
||||
segment_id: inner_meta.segment_id,
|
||||
max_doc: inner_meta.max_doc,
|
||||
deletes: Some(delete_meta),
|
||||
});
|
||||
SegmentMeta { tracked }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct InnerSegmentMeta {
|
||||
segment_id: SegmentId,
|
||||
max_doc: u32,
|
||||
deletes: Option<DeleteMeta>,
|
||||
}
|
||||
|
||||
impl InnerSegmentMeta {
|
||||
pub fn track(self, inventory: &SegmentMetaInventory) -> SegmentMeta {
|
||||
SegmentMeta {
|
||||
tracked: inventory.inventory.track(self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Meta information about the `Index`.
|
||||
///
|
||||
@@ -11,16 +202,53 @@ use std::fmt;
|
||||
/// * the index `docstamp`
|
||||
/// * the schema
|
||||
///
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[derive(Clone, Serialize)]
|
||||
pub struct IndexMeta {
|
||||
/// List of `SegmentMeta` informations associated to each finalized segment of the index.
|
||||
pub segments: Vec<SegmentMeta>,
|
||||
/// Index `Schema`
|
||||
pub schema: Schema,
|
||||
pub opstamp: u64,
|
||||
/// Opstamp associated to the last `commit` operation.
|
||||
pub opstamp: Opstamp,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Payload associated to the last commit.
|
||||
///
|
||||
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||
/// to help identify this commit.
|
||||
/// This payload is entirely unused by tantivy.
|
||||
pub payload: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct UntrackedIndexMeta {
|
||||
pub segments: Vec<InnerSegmentMeta>,
|
||||
pub schema: Schema,
|
||||
pub opstamp: Opstamp,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payload: Option<String>,
|
||||
}
|
||||
|
||||
impl UntrackedIndexMeta {
|
||||
pub fn track(self, inventory: &SegmentMetaInventory) -> IndexMeta {
|
||||
IndexMeta {
|
||||
segments: self
|
||||
.segments
|
||||
.into_iter()
|
||||
.map(|inner_seg_meta| inner_seg_meta.track(inventory))
|
||||
.collect::<Vec<SegmentMeta>>(),
|
||||
schema: self.schema,
|
||||
opstamp: self.opstamp,
|
||||
payload: self.payload,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMeta {
|
||||
/// Create an `IndexMeta` object representing a brand new `Index`
|
||||
/// with the given index.
|
||||
///
|
||||
/// This new index does not contains any segments.
|
||||
/// Opstamp will the value `0u64`.
|
||||
pub fn with_schema(schema: Schema) -> IndexMeta {
|
||||
IndexMeta {
|
||||
segments: vec![],
|
||||
@@ -29,10 +257,18 @@ impl IndexMeta {
|
||||
payload: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn deserialize(
|
||||
meta_json: &str,
|
||||
inventory: &SegmentMetaInventory,
|
||||
) -> serde_json::Result<IndexMeta> {
|
||||
let untracked_meta_json: UntrackedIndexMeta = serde_json::from_str(meta_json)?;
|
||||
Ok(untracked_meta_json.track(inventory))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for IndexMeta {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
@@ -46,7 +282,7 @@ impl fmt::Debug for IndexMeta {
|
||||
mod tests {
|
||||
|
||||
use super::IndexMeta;
|
||||
use schema::{Schema, TEXT};
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use serde_json;
|
||||
|
||||
#[test]
|
||||
@@ -63,6 +299,9 @@ mod tests {
|
||||
payload: None,
|
||||
};
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#);
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
use common::BinarySerializable;
|
||||
use directory::ReadOnlySource;
|
||||
use owned_read::OwnedRead;
|
||||
use positions::PositionReader;
|
||||
use postings::TermInfo;
|
||||
use postings::{BlockSegmentPostings, SegmentPostings};
|
||||
use schema::FieldType;
|
||||
use schema::IndexRecordOption;
|
||||
use schema::Term;
|
||||
use termdict::TermDictionary;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::positions::PositionReader;
|
||||
use crate::postings::TermInfo;
|
||||
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
||||
use crate::schema::FieldType;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::Term;
|
||||
use crate::termdict::TermDictionary;
|
||||
|
||||
/// The inverted index reader is in charge of accessing
|
||||
/// the inverted index associated to a specific field.
|
||||
@@ -32,7 +31,7 @@ pub struct InvertedIndexReader {
|
||||
}
|
||||
|
||||
impl InvertedIndexReader {
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symetry
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
||||
pub(crate) fn new(
|
||||
termdict: TermDictionary,
|
||||
postings_source: ReadOnlySource,
|
||||
@@ -60,7 +59,7 @@ impl InvertedIndexReader {
|
||||
.get_index_record_option()
|
||||
.unwrap_or(IndexRecordOption::Basic);
|
||||
InvertedIndexReader {
|
||||
termdict: TermDictionary::empty(&field_type),
|
||||
termdict: TermDictionary::empty(),
|
||||
postings_source: ReadOnlySource::empty(),
|
||||
positions_source: ReadOnlySource::empty(),
|
||||
positions_idx_source: ReadOnlySource::empty(),
|
||||
@@ -97,8 +96,7 @@ impl InvertedIndexReader {
|
||||
let offset = term_info.postings_offset as usize;
|
||||
let end_source = self.postings_source.len();
|
||||
let postings_slice = self.postings_source.slice(offset, end_source);
|
||||
let postings_reader = OwnedRead::new(postings_slice);
|
||||
block_postings.reset(term_info.doc_freq, postings_reader);
|
||||
block_postings.reset(term_info.doc_freq, postings_slice);
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `Term`.
|
||||
@@ -127,7 +125,7 @@ impl InvertedIndexReader {
|
||||
let postings_data = self.postings_source.slice_from(offset);
|
||||
BlockSegmentPostings::from_data(
|
||||
term_info.doc_freq,
|
||||
OwnedRead::new(postings_data),
|
||||
postings_data,
|
||||
self.record_option,
|
||||
requested_option,
|
||||
)
|
||||
|
||||
@@ -2,37 +2,33 @@ mod executor;
|
||||
pub mod index;
|
||||
mod index_meta;
|
||||
mod inverted_index_reader;
|
||||
mod pool;
|
||||
pub mod searcher;
|
||||
mod segment;
|
||||
mod segment_component;
|
||||
mod segment_id;
|
||||
mod segment_meta;
|
||||
mod segment_reader;
|
||||
|
||||
pub use self::executor::Executor;
|
||||
pub use self::index::Index;
|
||||
pub use self::index_meta::IndexMeta;
|
||||
pub use self::index_meta::{IndexMeta, SegmentMeta, SegmentMetaInventory};
|
||||
pub use self::inverted_index_reader::InvertedIndexReader;
|
||||
pub use self::searcher::Searcher;
|
||||
pub use self::segment::Segment;
|
||||
pub use self::segment::SerializableSegment;
|
||||
pub use self::segment_component::SegmentComponent;
|
||||
pub use self::segment_id::SegmentId;
|
||||
pub use self::segment_meta::SegmentMeta;
|
||||
pub use self::segment_reader::SegmentReader;
|
||||
|
||||
use std::path::PathBuf;
|
||||
use once_cell::sync::Lazy;
|
||||
use std::path::Path;
|
||||
|
||||
lazy_static! {
|
||||
/// The meta file contains all the information about the list of segments and the schema
|
||||
/// of the index.
|
||||
pub static ref META_FILEPATH: PathBuf = PathBuf::from("meta.json");
|
||||
/// The meta file contains all the information about the list of segments and the schema
|
||||
/// of the index.
|
||||
pub static META_FILEPATH: Lazy<&'static Path> = Lazy::new(|| Path::new("meta.json"));
|
||||
|
||||
/// The managed file contains a list of files that were created by the tantivy
|
||||
/// and will therefore be garbage collected when they are deemed useless by tantivy.
|
||||
///
|
||||
/// Removing this file is safe, but will prevent the garbage collection of all of the file that
|
||||
/// are currently in the directory
|
||||
pub static ref MANAGED_FILEPATH: PathBuf = PathBuf::from(".managed.json");
|
||||
}
|
||||
/// The managed file contains a list of files that were created by the tantivy
|
||||
/// and will therefore be garbage collected when they are deemed useless by tantivy.
|
||||
///
|
||||
/// Removing this file is safe, but will prevent the garbage collection of all of the file that
|
||||
/// are currently in the directory
|
||||
pub static MANAGED_FILEPATH: Lazy<&'static Path> = Lazy::new(|| Path::new(".managed.json"));
|
||||
|
||||
136
src/core/pool.rs
136
src/core/pool.rs
@@ -1,136 +0,0 @@
|
||||
use crossbeam::queue::MsQueue;
|
||||
use std::mem;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct GenerationItem<T> {
|
||||
generation: usize,
|
||||
item: T,
|
||||
}
|
||||
|
||||
pub struct Pool<T> {
|
||||
queue: Arc<MsQueue<GenerationItem<T>>>,
|
||||
freshest_generation: AtomicUsize,
|
||||
next_generation: AtomicUsize,
|
||||
}
|
||||
|
||||
impl<T> Pool<T> {
|
||||
pub fn new() -> Pool<T> {
|
||||
let queue = Arc::new(MsQueue::new());
|
||||
Pool {
|
||||
queue,
|
||||
freshest_generation: AtomicUsize::default(),
|
||||
next_generation: AtomicUsize::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn publish_new_generation(&self, items: Vec<T>) {
|
||||
let next_generation = self.next_generation.fetch_add(1, Ordering::SeqCst) + 1;
|
||||
for item in items {
|
||||
let gen_item = GenerationItem {
|
||||
item,
|
||||
generation: next_generation,
|
||||
};
|
||||
self.queue.push(gen_item);
|
||||
}
|
||||
self.advertise_generation(next_generation);
|
||||
}
|
||||
|
||||
/// At the exit of this method,
|
||||
/// - freshest_generation has a value greater or equal than generation
|
||||
/// - freshest_generation has a value that has been advertised
|
||||
/// - freshest_generation has)
|
||||
fn advertise_generation(&self, generation: usize) {
|
||||
// not optimal at all but the easiest to read proof.
|
||||
loop {
|
||||
let former_generation = self.freshest_generation.load(Ordering::Acquire);
|
||||
if former_generation >= generation {
|
||||
break;
|
||||
}
|
||||
self.freshest_generation.compare_and_swap(
|
||||
former_generation,
|
||||
generation,
|
||||
Ordering::SeqCst,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn generation(&self) -> usize {
|
||||
self.freshest_generation.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
pub fn acquire(&self) -> LeasedItem<T> {
|
||||
let generation = self.generation();
|
||||
loop {
|
||||
let gen_item = self.queue.pop();
|
||||
if gen_item.generation >= generation {
|
||||
return LeasedItem {
|
||||
gen_item: Some(gen_item),
|
||||
recycle_queue: Arc::clone(&self.queue),
|
||||
};
|
||||
} else {
|
||||
// this searcher is obsolete,
|
||||
// removing it from the pool.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LeasedItem<T> {
|
||||
gen_item: Option<GenerationItem<T>>,
|
||||
recycle_queue: Arc<MsQueue<GenerationItem<T>>>,
|
||||
}
|
||||
|
||||
impl<T> Deref for LeasedItem<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self
|
||||
.gen_item
|
||||
.as_ref()
|
||||
.expect("Unwrapping a leased item should never fail")
|
||||
.item // unwrap is safe here
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for LeasedItem<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
&mut self
|
||||
.gen_item
|
||||
.as_mut()
|
||||
.expect("Unwrapping a mut leased item should never fail")
|
||||
.item // unwrap is safe here
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for LeasedItem<T> {
|
||||
fn drop(&mut self) {
|
||||
let gen_item: GenerationItem<T> = mem::replace(&mut self.gen_item, None)
|
||||
.expect("Unwrapping a leased item should never fail");
|
||||
self.recycle_queue.push(gen_item);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::Pool;
|
||||
use std::iter;
|
||||
|
||||
#[test]
|
||||
fn test_pool() {
|
||||
let items10: Vec<usize> = iter::repeat(10).take(10).collect();
|
||||
let pool = Pool::new();
|
||||
pool.publish_new_generation(items10);
|
||||
for _ in 0..20 {
|
||||
assert_eq!(*pool.acquire(), 10);
|
||||
}
|
||||
let items11: Vec<usize> = iter::repeat(11).take(10).collect();
|
||||
pool.publish_new_generation(items11);
|
||||
for _ in 0..20 {
|
||||
assert_eq!(*pool.acquire(), 11);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,42 +1,18 @@
|
||||
use collector::Collector;
|
||||
use collector::SegmentCollector;
|
||||
use core::Executor;
|
||||
use core::InvertedIndexReader;
|
||||
use core::SegmentReader;
|
||||
use query::Query;
|
||||
use query::Scorer;
|
||||
use query::Weight;
|
||||
use schema::Document;
|
||||
use schema::Schema;
|
||||
use schema::{Field, Term};
|
||||
use space_usage::SearcherSpaceUsage;
|
||||
use crate::collector::Collector;
|
||||
use crate::core::Executor;
|
||||
use crate::core::InvertedIndexReader;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::query::Query;
|
||||
use crate::schema::Document;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::{Field, Term};
|
||||
use crate::space_usage::SearcherSpaceUsage;
|
||||
use crate::store::StoreReader;
|
||||
use crate::termdict::TermMerger;
|
||||
use crate::DocAddress;
|
||||
use crate::Index;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use store::StoreReader;
|
||||
use termdict::TermMerger;
|
||||
use DocAddress;
|
||||
use Index;
|
||||
use Result;
|
||||
|
||||
fn collect_segment<C: Collector>(
|
||||
collector: &C,
|
||||
weight: &Weight,
|
||||
segment_ord: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> Result<C::Fruit> {
|
||||
let mut scorer = weight.scorer(segment_reader)?;
|
||||
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
|
||||
if let Some(delete_bitset) = segment_reader.delete_bitset() {
|
||||
scorer.for_each(&mut |doc, score| {
|
||||
if !delete_bitset.is_deleted(doc) {
|
||||
segment_collector.collect(doc, score);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
scorer.for_each(&mut |doc, score| segment_collector.collect(doc, score));
|
||||
}
|
||||
Ok(segment_collector.harvest())
|
||||
}
|
||||
|
||||
/// Holds a list of `SegmentReader`s ready for search.
|
||||
///
|
||||
@@ -59,7 +35,7 @@ impl Searcher {
|
||||
) -> Searcher {
|
||||
let store_readers = segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.get_store_reader())
|
||||
.map(SegmentReader::get_store_reader)
|
||||
.collect();
|
||||
Searcher {
|
||||
schema,
|
||||
@@ -78,7 +54,7 @@ impl Searcher {
|
||||
///
|
||||
/// The searcher uses the segment ordinal to route the
|
||||
/// the request to the right `Segment`.
|
||||
pub fn doc(&self, doc_address: DocAddress) -> Result<Document> {
|
||||
pub fn doc(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||
let DocAddress(segment_local_id, doc_id) = doc_address;
|
||||
let store_reader = &self.store_readers[segment_local_id as usize];
|
||||
store_reader.get(doc_id)
|
||||
@@ -132,7 +108,11 @@ impl Searcher {
|
||||
///
|
||||
/// Finally, the Collector merges each of the child collectors into itself for result usability
|
||||
/// by the caller.
|
||||
pub fn search<C: Collector>(&self, query: &Query, collector: &C) -> Result<C::Fruit> {
|
||||
pub fn search<C: Collector>(
|
||||
&self,
|
||||
query: &dyn Query,
|
||||
collector: &C,
|
||||
) -> crate::Result<C::Fruit> {
|
||||
let executor = self.index.search_executor();
|
||||
self.search_with_executor(query, collector, executor)
|
||||
}
|
||||
@@ -151,21 +131,16 @@ impl Searcher {
|
||||
/// hurt it. It will however, decrease the average response time.
|
||||
pub fn search_with_executor<C: Collector>(
|
||||
&self,
|
||||
query: &Query,
|
||||
query: &dyn Query,
|
||||
collector: &C,
|
||||
executor: &Executor,
|
||||
) -> Result<C::Fruit> {
|
||||
) -> crate::Result<C::Fruit> {
|
||||
let scoring_enabled = collector.requires_scoring();
|
||||
let weight = query.weight(self, scoring_enabled)?;
|
||||
let segment_readers = self.segment_readers();
|
||||
let fruits = executor.map(
|
||||
|(segment_ord, segment_reader)| {
|
||||
collect_segment(
|
||||
collector,
|
||||
weight.as_ref(),
|
||||
segment_ord as u32,
|
||||
segment_reader,
|
||||
)
|
||||
collector.collect_segment(weight.as_ref(), segment_ord as u32, segment_reader)
|
||||
},
|
||||
segment_readers.iter().enumerate(),
|
||||
)?;
|
||||
@@ -203,7 +178,7 @@ impl FieldSearcher {
|
||||
|
||||
/// Returns a Stream over all of the sorted unique terms of
|
||||
/// for the given field.
|
||||
pub fn terms(&self) -> TermMerger {
|
||||
pub fn terms(&self) -> TermMerger<'_> {
|
||||
let term_streamers: Vec<_> = self
|
||||
.inv_index_readers
|
||||
.iter()
|
||||
@@ -214,11 +189,11 @@ impl FieldSearcher {
|
||||
}
|
||||
|
||||
impl fmt::Debug for Searcher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let segment_ids = self
|
||||
.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.segment_id())
|
||||
.map(SegmentReader::segment_id)
|
||||
.collect::<Vec<_>>();
|
||||
write!(f, "Searcher({:?})", segment_ids)
|
||||
}
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
use super::SegmentComponent;
|
||||
use core::Index;
|
||||
use core::SegmentId;
|
||||
use core::SegmentMeta;
|
||||
use directory::error::{OpenReadError, OpenWriteError};
|
||||
use directory::Directory;
|
||||
use directory::{ReadOnlySource, WritePtr};
|
||||
use indexer::segment_serializer::SegmentSerializer;
|
||||
use schema::Schema;
|
||||
use crate::core::Index;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||
use crate::schema::Schema;
|
||||
use crate::Opstamp;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::result;
|
||||
use Result;
|
||||
|
||||
/// A segment is a piece of the index.
|
||||
#[derive(Clone)]
|
||||
@@ -20,20 +19,17 @@ pub struct Segment {
|
||||
}
|
||||
|
||||
impl fmt::Debug for Segment {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Segment({:?})", self.id().uuid_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||
///
|
||||
/// The function is here to make it private outside `tantivy`.
|
||||
/// #[doc(hidden)]
|
||||
pub fn create_segment(index: Index, meta: SegmentMeta) -> Segment {
|
||||
Segment { index, meta }
|
||||
}
|
||||
|
||||
impl Segment {
|
||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
|
||||
Segment { index, meta }
|
||||
}
|
||||
|
||||
/// Returns the index the segment belongs to.
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
@@ -49,8 +45,19 @@ impl Segment {
|
||||
&self.meta
|
||||
}
|
||||
|
||||
/// Updates the max_doc value from the `SegmentMeta`.
|
||||
///
|
||||
/// This method is only used when updating `max_doc` from 0
|
||||
/// as we finalize a fresh new segment.
|
||||
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
|
||||
Segment {
|
||||
index: self.index,
|
||||
meta: self.meta.with_max_doc(max_doc),
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: u64) -> Segment {
|
||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
||||
Segment {
|
||||
index: self.index,
|
||||
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
|
||||
@@ -71,20 +78,14 @@ impl Segment {
|
||||
}
|
||||
|
||||
/// Open one of the component file for a *regular* read.
|
||||
pub fn open_read(
|
||||
&self,
|
||||
component: SegmentComponent,
|
||||
) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
|
||||
let path = self.relative_path(component);
|
||||
let source = self.index.directory().open_read(&path)?;
|
||||
Ok(source)
|
||||
}
|
||||
|
||||
/// Open one of the component file for *regular* write.
|
||||
pub fn open_write(
|
||||
&mut self,
|
||||
component: SegmentComponent,
|
||||
) -> result::Result<WritePtr, OpenWriteError> {
|
||||
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> {
|
||||
let path = self.relative_path(component);
|
||||
let write = self.index.directory_mut().open_write(&path)?;
|
||||
Ok(write)
|
||||
@@ -97,5 +98,5 @@ pub trait SerializableSegment {
|
||||
///
|
||||
/// # Returns
|
||||
/// The number of documents in the segment.
|
||||
fn write(&self, serializer: SegmentSerializer) -> Result<u32>;
|
||||
fn write(&self, serializer: SegmentSerializer) -> crate::Result<u32>;
|
||||
}
|
||||
|
||||
@@ -2,6 +2,11 @@ use std::cmp::{Ord, Ordering};
|
||||
use std::fmt;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[cfg(test)]
|
||||
use once_cell::sync::Lazy;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
use std::str::FromStr;
|
||||
#[cfg(test)]
|
||||
use std::sync::atomic;
|
||||
|
||||
@@ -17,10 +22,10 @@ use std::sync::atomic;
|
||||
pub struct SegmentId(Uuid);
|
||||
|
||||
#[cfg(test)]
|
||||
lazy_static! {
|
||||
static ref AUTO_INC_COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::default();
|
||||
static ref EMPTY_ARR: [u8; 8] = [0u8; 8];
|
||||
}
|
||||
static AUTO_INC_COUNTER: Lazy<atomic::AtomicUsize> = Lazy::new(|| atomic::AtomicUsize::default());
|
||||
|
||||
#[cfg(test)]
|
||||
const ZERO_ARRAY: [u8; 8] = [0u8; 8];
|
||||
|
||||
// During tests, we generate the segment id in a autoincrement manner
|
||||
// for consistency of segment id between run.
|
||||
@@ -30,7 +35,7 @@ lazy_static! {
|
||||
#[cfg(test)]
|
||||
fn create_uuid() -> Uuid {
|
||||
let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst);
|
||||
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &*EMPTY_ARR).unwrap()
|
||||
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &ZERO_ARRAY).unwrap()
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
@@ -50,19 +55,55 @@ impl SegmentId {
|
||||
/// and the rest is random.
|
||||
///
|
||||
/// Picking the first 8 chars is ok to identify
|
||||
/// segments in a display message.
|
||||
/// segments in a display message (e.g. a5c4dfcb).
|
||||
pub fn short_uuid_string(&self) -> String {
|
||||
(&self.0.to_simple_ref().to_string()[..8]).to_string()
|
||||
}
|
||||
|
||||
/// Returns a segment uuid string.
|
||||
///
|
||||
/// It consists in 32 lowercase hexadecimal chars
|
||||
/// (e.g. a5c4dfcbdfe645089129e308e26d5523)
|
||||
pub fn uuid_string(&self) -> String {
|
||||
self.0.to_simple_ref().to_string()
|
||||
}
|
||||
|
||||
/// Build a `SegmentId` string from the full uuid string.
|
||||
///
|
||||
/// E.g. "a5c4dfcbdfe645089129e308e26d5523"
|
||||
pub fn from_uuid_string(uuid_string: &str) -> Result<SegmentId, SegmentIdParseError> {
|
||||
FromStr::from_str(uuid_string)
|
||||
}
|
||||
}
|
||||
|
||||
/// Error type used when parsing a `SegmentId` from a string fails.
|
||||
pub struct SegmentIdParseError(uuid::Error);
|
||||
|
||||
impl Error for SegmentIdParseError {}
|
||||
|
||||
impl fmt::Debug for SegmentIdParseError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SegmentIdParseError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for SegmentId {
|
||||
type Err = SegmentIdParseError;
|
||||
|
||||
fn from_str(uuid_string: &str) -> Result<Self, SegmentIdParseError> {
|
||||
let uuid = Uuid::parse_str(uuid_string).map_err(SegmentIdParseError)?;
|
||||
Ok(SegmentId(uuid))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Seg({:?})", self.short_uuid_string())
|
||||
}
|
||||
}
|
||||
@@ -78,3 +119,18 @@ impl Ord for SegmentId {
|
||||
self.0.as_bytes().cmp(other.0.as_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::SegmentId;
|
||||
|
||||
#[test]
|
||||
fn test_to_uuid_string() {
|
||||
let full_uuid = "a5c4dfcbdfe645089129e308e26d5523";
|
||||
let segment_id = SegmentId::from_uuid_string(full_uuid).unwrap();
|
||||
assert_eq!(segment_id.uuid_string(), full_uuid);
|
||||
assert_eq!(segment_id.short_uuid_string(), "a5c4dfcb");
|
||||
// one extra char
|
||||
assert!(SegmentId::from_uuid_string("a5c4dfcbdfe645089129e308e26d5523b").is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,174 +0,0 @@
|
||||
use super::SegmentComponent;
|
||||
use census::{Inventory, TrackedObject};
|
||||
use core::SegmentId;
|
||||
use serde;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
|
||||
lazy_static! {
|
||||
static ref INVENTORY: Inventory<InnerSegmentMeta> = { Inventory::new() };
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
struct DeleteMeta {
|
||||
num_deleted_docs: u32,
|
||||
opstamp: u64,
|
||||
}
|
||||
|
||||
/// `SegmentMeta` contains simple meta information about a segment.
|
||||
///
|
||||
/// For instance the number of docs it contains,
|
||||
/// how many are deleted, etc.
|
||||
#[derive(Clone)]
|
||||
pub struct SegmentMeta {
|
||||
tracked: TrackedObject<InnerSegmentMeta>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentMeta {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
self.tracked.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl serde::Serialize for SegmentMeta {
|
||||
fn serialize<S>(
|
||||
&self,
|
||||
serializer: S,
|
||||
) -> Result<<S as serde::Serializer>::Ok, <S as serde::Serializer>::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
self.tracked.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> serde::Deserialize<'a> for SegmentMeta {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, <D as serde::Deserializer<'a>>::Error>
|
||||
where
|
||||
D: serde::Deserializer<'a>,
|
||||
{
|
||||
let inner = InnerSegmentMeta::deserialize(deserializer)?;
|
||||
let tracked = INVENTORY.track(inner);
|
||||
Ok(SegmentMeta { tracked })
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentMeta {
|
||||
/// Lists all living `SegmentMeta` object at the time of the call.
|
||||
pub fn all() -> Vec<SegmentMeta> {
|
||||
INVENTORY
|
||||
.list()
|
||||
.into_iter()
|
||||
.map(|inner| SegmentMeta { tracked: inner })
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/// Creates a new `SegmentMeta` object.
|
||||
#[doc(hidden)]
|
||||
pub fn new(segment_id: SegmentId, max_doc: u32) -> SegmentMeta {
|
||||
let inner = InnerSegmentMeta {
|
||||
segment_id,
|
||||
max_doc,
|
||||
deletes: None,
|
||||
};
|
||||
SegmentMeta {
|
||||
tracked: INVENTORY.track(inner),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the segment id.
|
||||
pub fn id(&self) -> SegmentId {
|
||||
self.tracked.segment_id
|
||||
}
|
||||
|
||||
/// Returns the number of deleted documents.
|
||||
pub fn num_deleted_docs(&self) -> u32 {
|
||||
self.tracked
|
||||
.deletes
|
||||
.as_ref()
|
||||
.map(|delete_meta| delete_meta.num_deleted_docs)
|
||||
.unwrap_or(0u32)
|
||||
}
|
||||
|
||||
/// Returns the list of files that
|
||||
/// are required for the segment meta.
|
||||
///
|
||||
/// This is useful as the way tantivy removes files
|
||||
/// is by removing all files that have been created by tantivy
|
||||
/// and are not used by any segment anymore.
|
||||
pub fn list_files(&self) -> HashSet<PathBuf> {
|
||||
SegmentComponent::iterator()
|
||||
.map(|component| self.relative_path(*component))
|
||||
.collect::<HashSet<PathBuf>>()
|
||||
}
|
||||
|
||||
/// Returns the relative path of a component of our segment.
|
||||
///
|
||||
/// It just joins the segment id with the extension
|
||||
/// associated to a segment component.
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
let mut path = self.id().uuid_string();
|
||||
path.push_str(&*match component {
|
||||
SegmentComponent::POSTINGS => ".idx".to_string(),
|
||||
SegmentComponent::POSITIONS => ".pos".to_string(),
|
||||
SegmentComponent::POSITIONSSKIP => ".posidx".to_string(),
|
||||
SegmentComponent::TERMS => ".term".to_string(),
|
||||
SegmentComponent::STORE => ".store".to_string(),
|
||||
SegmentComponent::FASTFIELDS => ".fast".to_string(),
|
||||
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
|
||||
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
|
||||
});
|
||||
PathBuf::from(path)
|
||||
}
|
||||
|
||||
/// Return the highest doc id + 1
|
||||
///
|
||||
/// If there are no deletes, then num_docs = max_docs
|
||||
/// and all the doc ids contains in this segment
|
||||
/// are exactly (0..max_doc).
|
||||
pub fn max_doc(&self) -> u32 {
|
||||
self.tracked.max_doc
|
||||
}
|
||||
|
||||
/// Return the number of documents in the segment.
|
||||
pub fn num_docs(&self) -> u32 {
|
||||
self.max_doc() - self.num_deleted_docs()
|
||||
}
|
||||
|
||||
/// Returns the opstamp of the last delete operation
|
||||
/// taken in account in this segment.
|
||||
pub fn delete_opstamp(&self) -> Option<u64> {
|
||||
self.tracked
|
||||
.deletes
|
||||
.as_ref()
|
||||
.map(|delete_meta| delete_meta.opstamp)
|
||||
}
|
||||
|
||||
/// Returns true iff the segment meta contains
|
||||
/// delete information.
|
||||
pub fn has_deletes(&self) -> bool {
|
||||
self.num_deleted_docs() > 0
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: u64) -> SegmentMeta {
|
||||
let delete_meta = DeleteMeta {
|
||||
num_deleted_docs,
|
||||
opstamp,
|
||||
};
|
||||
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
|
||||
segment_id: inner_meta.segment_id,
|
||||
max_doc: inner_meta.max_doc,
|
||||
deletes: Some(delete_meta),
|
||||
});
|
||||
SegmentMeta { tracked }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
struct InnerSegmentMeta {
|
||||
segment_id: SegmentId,
|
||||
max_doc: u32,
|
||||
deletes: Option<DeleteMeta>,
|
||||
}
|
||||
@@ -1,30 +1,26 @@
|
||||
use common::CompositeFile;
|
||||
use common::HasLen;
|
||||
use core::InvertedIndexReader;
|
||||
use core::Segment;
|
||||
use core::SegmentComponent;
|
||||
use core::SegmentId;
|
||||
use directory::ReadOnlySource;
|
||||
use error::TantivyError;
|
||||
use fastfield::DeleteBitSet;
|
||||
use fastfield::FacetReader;
|
||||
use fastfield::FastFieldReader;
|
||||
use fastfield::{self, FastFieldNotAvailableError};
|
||||
use fastfield::{BytesFastFieldReader, FastValue, MultiValueIntFastFieldReader};
|
||||
use fieldnorm::FieldNormReader;
|
||||
use schema::Cardinality;
|
||||
use schema::Field;
|
||||
use schema::FieldType;
|
||||
use schema::Schema;
|
||||
use space_usage::SegmentSpaceUsage;
|
||||
use crate::common::CompositeFile;
|
||||
use crate::common::HasLen;
|
||||
use crate::core::InvertedIndexReader;
|
||||
use crate::core::Segment;
|
||||
use crate::core::SegmentComponent;
|
||||
use crate::core::SegmentId;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::fastfield::DeleteBitSet;
|
||||
use crate::fastfield::FacetReader;
|
||||
use crate::fastfield::FastFieldReaders;
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
||||
use crate::schema::Field;
|
||||
use crate::schema::FieldType;
|
||||
use crate::schema::Schema;
|
||||
use crate::space_usage::SegmentSpaceUsage;
|
||||
use crate::store::StoreReader;
|
||||
use crate::termdict::TermDictionary;
|
||||
use crate::DocId;
|
||||
use fail::fail_point;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use store::StoreReader;
|
||||
use termdict::TermDictionary;
|
||||
use DocId;
|
||||
use Result;
|
||||
|
||||
/// Entry point to access all of the datastructures of the `Segment`
|
||||
///
|
||||
@@ -51,8 +47,8 @@ pub struct SegmentReader {
|
||||
postings_composite: CompositeFile,
|
||||
positions_composite: CompositeFile,
|
||||
positions_idx_composite: CompositeFile,
|
||||
fast_fields_composite: CompositeFile,
|
||||
fieldnorms_composite: CompositeFile,
|
||||
fast_fields_readers: Arc<FastFieldReaders>,
|
||||
fieldnorm_readers: FieldNormReaders,
|
||||
|
||||
store_source: ReadOnlySource,
|
||||
delete_bitset_opt: Option<DeleteBitSet>,
|
||||
@@ -105,93 +101,21 @@ impl SegmentReader {
|
||||
///
|
||||
/// # Panics
|
||||
/// May panic if the index is corrupted.
|
||||
pub fn fast_field_reader<Item: FastValue>(
|
||||
&self,
|
||||
field: Field,
|
||||
) -> fastfield::Result<FastFieldReader<Item>> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::SingleValue)
|
||||
{
|
||||
self.fast_fields_composite
|
||||
.open_read(field)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||
.map(FastFieldReader::open)
|
||||
} else {
|
||||
Err(FastFieldNotAvailableError::new(field_entry))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn fast_field_reader_with_idx<Item: FastValue>(
|
||||
&self,
|
||||
field: Field,
|
||||
idx: usize,
|
||||
) -> fastfield::Result<FastFieldReader<Item>> {
|
||||
if let Some(ff_source) = self.fast_fields_composite.open_read_with_idx(field, idx) {
|
||||
Ok(FastFieldReader::open(ff_source))
|
||||
} else {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
Err(FastFieldNotAvailableError::new(field_entry))
|
||||
}
|
||||
}
|
||||
|
||||
/// Accessor to the `MultiValueIntFastFieldReader` associated to a given `Field`.
|
||||
/// May panick if the field is not a multivalued fastfield of the type `Item`.
|
||||
pub fn multi_fast_field_reader<Item: FastValue>(
|
||||
&self,
|
||||
field: Field,
|
||||
) -> fastfield::Result<MultiValueIntFastFieldReader<Item>> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
if Item::fast_field_cardinality(field_entry.field_type()) == Some(Cardinality::MultiValues)
|
||||
{
|
||||
let idx_reader = self.fast_field_reader_with_idx(field, 0)?;
|
||||
let vals_reader = self.fast_field_reader_with_idx(field, 1)?;
|
||||
Ok(MultiValueIntFastFieldReader::open(idx_reader, vals_reader))
|
||||
} else {
|
||||
Err(FastFieldNotAvailableError::new(field_entry))
|
||||
}
|
||||
}
|
||||
|
||||
/// Accessor to the `BytesFastFieldReader` associated to a given `Field`.
|
||||
pub fn bytes_fast_field_reader(&self, field: Field) -> fastfield::Result<BytesFastFieldReader> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
match *field_entry.field_type() {
|
||||
FieldType::Bytes => {}
|
||||
_ => return Err(FastFieldNotAvailableError::new(field_entry)),
|
||||
}
|
||||
let idx_reader = self
|
||||
.fast_fields_composite
|
||||
.open_read_with_idx(field, 0)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||
.map(FastFieldReader::open)?;
|
||||
let values = self
|
||||
.fast_fields_composite
|
||||
.open_read_with_idx(field, 1)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
||||
Ok(BytesFastFieldReader::open(idx_reader, values))
|
||||
pub fn fast_fields(&self) -> &FastFieldReaders {
|
||||
&self.fast_fields_readers
|
||||
}
|
||||
|
||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||
pub fn facet_reader(&self, field: Field) -> Result<FacetReader> {
|
||||
pub fn facet_reader(&self, field: Field) -> Option<FacetReader> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
if field_entry.field_type() != &FieldType::HierarchicalFacet {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"The field {:?} is not a \
|
||||
hierarchical facet.",
|
||||
field_entry
|
||||
)));
|
||||
return None;
|
||||
}
|
||||
let term_ords_reader = self.multi_fast_field_reader(field)?;
|
||||
let termdict_source = self.termdict_composite.open_read(field).ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"The field \"{}\" is a hierarchical \
|
||||
but this segment does not seem to have the field term \
|
||||
dictionary.",
|
||||
field_entry.name()
|
||||
))
|
||||
})?;
|
||||
let term_ords_reader = self.fast_fields().u64s(field)?;
|
||||
let termdict_source = self.termdict_composite.open_read(field)?;
|
||||
let termdict = TermDictionary::from_source(&termdict_source);
|
||||
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
||||
Ok(facet_reader)
|
||||
Some(facet_reader)
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `Field norms`'s reader.
|
||||
@@ -202,8 +126,8 @@ impl SegmentReader {
|
||||
/// They are simply stored as a fast field, serialized in
|
||||
/// the `.fieldnorm` file of the segment.
|
||||
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
|
||||
if let Some(fieldnorm_source) = self.fieldnorms_composite.open_read(field) {
|
||||
FieldNormReader::open(fieldnorm_source)
|
||||
if let Some(fieldnorm_reader) = self.fieldnorm_readers.get_field(field) {
|
||||
fieldnorm_reader
|
||||
} else {
|
||||
let field_name = self.schema.get_field_name(field);
|
||||
let err_msg = format!(
|
||||
@@ -220,7 +144,7 @@ impl SegmentReader {
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
pub fn open(segment: &Segment) -> Result<SegmentReader> {
|
||||
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
|
||||
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
|
||||
let termdict_composite = CompositeFile::open(&termdict_source)?;
|
||||
|
||||
@@ -247,11 +171,15 @@ impl SegmentReader {
|
||||
}
|
||||
};
|
||||
|
||||
let schema = segment.schema();
|
||||
|
||||
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
||||
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
||||
let fast_field_readers =
|
||||
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
||||
|
||||
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
||||
let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||
|
||||
let delete_bitset_opt = if segment.meta().has_deletes() {
|
||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||
@@ -260,15 +188,14 @@ impl SegmentReader {
|
||||
None
|
||||
};
|
||||
|
||||
let schema = segment.schema();
|
||||
Ok(SegmentReader {
|
||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_doc: segment.meta().max_doc(),
|
||||
num_docs: segment.meta().num_docs(),
|
||||
termdict_composite,
|
||||
postings_composite,
|
||||
fast_fields_composite,
|
||||
fieldnorms_composite,
|
||||
fast_fields_readers: fast_field_readers,
|
||||
fieldnorm_readers,
|
||||
segment_id: segment.id(),
|
||||
store_source,
|
||||
delete_bitset_opt,
|
||||
@@ -316,10 +243,9 @@ impl SegmentReader {
|
||||
|
||||
let postings_source = postings_source_opt.unwrap();
|
||||
|
||||
let termdict_source = self
|
||||
.termdict_composite
|
||||
.open_read(field)
|
||||
.expect("Failed to open field term dictionary in composite file. Is the field indexed");
|
||||
let termdict_source = self.termdict_composite.open_read(field).expect(
|
||||
"Failed to open field term dictionary in composite file. Is the field indexed?",
|
||||
);
|
||||
|
||||
let positions_source = self
|
||||
.positions_composite
|
||||
@@ -369,8 +295,8 @@ impl SegmentReader {
|
||||
}
|
||||
|
||||
/// Returns an iterator that will iterate over the alive document ids
|
||||
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator {
|
||||
SegmentReaderAliveDocsIterator::new(&self)
|
||||
pub fn doc_ids_alive<'a>(&'a self) -> impl Iterator<Item = DocId> + 'a {
|
||||
(0u32..self.max_doc).filter(move |doc| !self.is_deleted(*doc))
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this segment.
|
||||
@@ -381,74 +307,28 @@ impl SegmentReader {
|
||||
self.postings_composite.space_usage(),
|
||||
self.positions_composite.space_usage(),
|
||||
self.positions_idx_composite.space_usage(),
|
||||
self.fast_fields_composite.space_usage(),
|
||||
self.fieldnorms_composite.space_usage(),
|
||||
self.fast_fields_readers.space_usage(),
|
||||
self.fieldnorm_readers.space_usage(),
|
||||
self.get_store_reader().space_usage(),
|
||||
self.delete_bitset_opt
|
||||
.as_ref()
|
||||
.map(|x| x.space_usage())
|
||||
.map(DeleteBitSet::space_usage)
|
||||
.unwrap_or(0),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentReader {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "SegmentReader({:?})", self.segment_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the iterator trait to allow easy iteration
|
||||
/// over non-deleted ("alive") DocIds in a SegmentReader
|
||||
pub struct SegmentReaderAliveDocsIterator<'a> {
|
||||
reader: &'a SegmentReader,
|
||||
max_doc: DocId,
|
||||
current: DocId,
|
||||
}
|
||||
|
||||
impl<'a> SegmentReaderAliveDocsIterator<'a> {
|
||||
pub fn new(reader: &'a SegmentReader) -> SegmentReaderAliveDocsIterator<'a> {
|
||||
SegmentReaderAliveDocsIterator {
|
||||
reader,
|
||||
max_doc: reader.max_doc(),
|
||||
current: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
|
||||
type Item = DocId;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
// TODO: Use TinySet (like in BitSetDocSet) to speed this process up
|
||||
if self.current >= self.max_doc {
|
||||
return None;
|
||||
}
|
||||
|
||||
// find the next alive doc id
|
||||
while self.reader.is_deleted(self.current) {
|
||||
self.current += 1;
|
||||
|
||||
if self.current >= self.max_doc {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
// capture the current alive DocId
|
||||
let result = Some(self.current);
|
||||
|
||||
// move down the chain
|
||||
self.current += 1;
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use core::Index;
|
||||
use schema::{Schema, Term, STORED, TEXT};
|
||||
use DocId;
|
||||
use crate::core::Index;
|
||||
use crate::schema::{Schema, Term, STORED, TEXT};
|
||||
use crate::DocId;
|
||||
|
||||
#[test]
|
||||
fn test_alive_docs_iterator() {
|
||||
@@ -477,9 +357,7 @@ mod test {
|
||||
// ok, now we should have a deleted doc
|
||||
index_writer2.commit().unwrap();
|
||||
}
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
|
||||
assert_eq!(vec![0u32, 2u32], docs);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use directory::directory_lock::Lock;
|
||||
use directory::error::LockError;
|
||||
use directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use directory::{ReadOnlySource, WritePtr};
|
||||
use crate::directory::directory_lock::Lock;
|
||||
use crate::directory::error::LockError;
|
||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::WatchCallback;
|
||||
use crate::directory::WatchHandle;
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -46,14 +48,14 @@ impl RetryPolicy {
|
||||
///
|
||||
/// It is transparently associated to a lock file, that gets deleted
|
||||
/// on `Drop.` The lock is released automatically on `Drop`.
|
||||
pub struct DirectoryLock(Box<Drop + Send + 'static>);
|
||||
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
|
||||
|
||||
struct DirectoryLockGuard {
|
||||
directory: Box<Directory>,
|
||||
directory: Box<dyn Directory>,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl<T: Drop + Send + 'static> From<Box<T>> for DirectoryLock {
|
||||
impl<T: Send + Sync + 'static> From<Box<T>> for DirectoryLock {
|
||||
fn from(underlying: Box<T>) -> Self {
|
||||
DirectoryLock(underlying)
|
||||
}
|
||||
@@ -74,7 +76,7 @@ enum TryAcquireLockError {
|
||||
|
||||
fn try_acquire_lock(
|
||||
filepath: &Path,
|
||||
directory: &mut Directory,
|
||||
directory: &mut dyn Directory,
|
||||
) -> Result<DirectoryLock, TryAcquireLockError> {
|
||||
let mut write = directory.open_write(filepath).map_err(|e| match e {
|
||||
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
||||
@@ -116,6 +118,8 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// Specifically, subsequent writes or flushes should
|
||||
/// have no effect on the returned `ReadOnlySource` object.
|
||||
///
|
||||
/// You should only use this to read files create with [Directory::open_write].
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
||||
|
||||
/// Removes a file
|
||||
@@ -155,6 +159,8 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// atomic_write.
|
||||
///
|
||||
/// This should only be used for small files.
|
||||
///
|
||||
/// You should only use this to read files create with [Directory::atomic_write].
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
||||
|
||||
/// Atomically replace the content of a file with data.
|
||||
@@ -187,19 +193,35 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Registers a callback that will be called whenever a change on the `meta.json`
|
||||
/// using the `atomic_write` API is detected.
|
||||
///
|
||||
/// The behavior when using `.watch()` on a file using [Directory::open_write] is, on the other
|
||||
/// hand, undefined.
|
||||
///
|
||||
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is
|
||||
/// required to keep it.
|
||||
/// It does not override previous callbacks. When the file is modified, all callback that are
|
||||
/// registered (and whose `WatchHandle` is still alive) are triggered.
|
||||
///
|
||||
/// Internally, tantivy only uses this API to detect new commits to implement the
|
||||
/// `OnCommit` `ReloadPolicy`. Not implementing watch in a `Directory` only prevents the
|
||||
/// `OnCommit` `ReloadPolicy` to work properly.
|
||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle>;
|
||||
}
|
||||
|
||||
/// DirectoryClone
|
||||
pub trait DirectoryClone {
|
||||
/// Clones the directory and boxes the clone
|
||||
fn box_clone(&self) -> Box<Directory>;
|
||||
fn box_clone(&self) -> Box<dyn Directory>;
|
||||
}
|
||||
|
||||
impl<T> DirectoryClone for T
|
||||
where
|
||||
T: 'static + Directory + Clone,
|
||||
{
|
||||
fn box_clone(&self) -> Box<Directory> {
|
||||
fn box_clone(&self) -> Box<dyn Directory> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// A directory lock.
|
||||
@@ -28,29 +29,27 @@ pub struct Lock {
|
||||
pub is_blocking: bool,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
/// Only one process should be able to write tantivy's index at a time.
|
||||
/// This lock file, when present, is in charge of preventing other processes to open an IndexWriter.
|
||||
///
|
||||
/// If the process is killed and this file remains, it is safe to remove it manually.
|
||||
///
|
||||
/// Failing to acquire this lock usually means a misuse of tantivy's API,
|
||||
/// (creating more than one instance of the `IndexWriter`), are a spurious
|
||||
/// lock file remaining after a crash. In the latter case, removing the file after
|
||||
/// checking no process running tantivy is running is safe.
|
||||
pub static ref INDEX_WRITER_LOCK: Lock = Lock {
|
||||
filepath: PathBuf::from(".tantivy-writer.lock"),
|
||||
is_blocking: false
|
||||
};
|
||||
/// The meta lock file is here to protect the segment files being opened by
|
||||
/// `.load_searchers()` from being garbage collected.
|
||||
/// It makes it possible for another process to safely consume
|
||||
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics
|
||||
/// here, but it is difficult to achieve on Windows.
|
||||
///
|
||||
/// Opening segment readers is a very fast process.
|
||||
pub static ref META_LOCK: Lock = Lock {
|
||||
filepath: PathBuf::from(".tantivy-meta.lock"),
|
||||
is_blocking: true
|
||||
};
|
||||
}
|
||||
/// Only one process should be able to write tantivy's index at a time.
|
||||
/// This lock file, when present, is in charge of preventing other processes to open an IndexWriter.
|
||||
///
|
||||
/// If the process is killed and this file remains, it is safe to remove it manually.
|
||||
///
|
||||
/// Failing to acquire this lock usually means a misuse of tantivy's API,
|
||||
/// (creating more than one instance of the `IndexWriter`), are a spurious
|
||||
/// lock file remaining after a crash. In the latter case, removing the file after
|
||||
/// checking no process running tantivy is running is safe.
|
||||
pub static INDEX_WRITER_LOCK: Lazy<Lock> = Lazy::new(|| Lock {
|
||||
filepath: PathBuf::from(".tantivy-writer.lock"),
|
||||
is_blocking: false,
|
||||
});
|
||||
/// The meta lock file is here to protect the segment files being opened by
|
||||
/// `IndexReader::reload()` from being garbage collected.
|
||||
/// It makes it possible for another process to safely consume
|
||||
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics
|
||||
/// here, but it is difficult to achieve on Windows.
|
||||
///
|
||||
/// Opening segment readers is a very fast process.
|
||||
pub static META_LOCK: Lazy<Lock> = Lazy::new(|| Lock {
|
||||
filepath: PathBuf::from(".tantivy-meta.lock"),
|
||||
is_blocking: true,
|
||||
});
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::Version;
|
||||
use std::error::Error as StdError;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
@@ -6,7 +7,7 @@ use std::path::PathBuf;
|
||||
/// Error while trying to acquire a directory lock.
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum LockError {
|
||||
/// Failed to acquired a lock as it is already hold by another
|
||||
/// Failed to acquired a lock as it is already held by another
|
||||
/// client.
|
||||
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
|
||||
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
|
||||
@@ -33,7 +34,7 @@ impl Into<io::Error> for IOError {
|
||||
}
|
||||
|
||||
impl fmt::Display for IOError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.path {
|
||||
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
|
||||
None => write!(f, "io error occurred: '{}'", self.err),
|
||||
@@ -46,7 +47,7 @@ impl StdError for IOError {
|
||||
"io error occurred"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&StdError> {
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
Some(&self.err)
|
||||
}
|
||||
}
|
||||
@@ -73,10 +74,18 @@ pub enum OpenDirectoryError {
|
||||
DoesNotExist(PathBuf),
|
||||
/// The path exists but is not a directory.
|
||||
NotADirectory(PathBuf),
|
||||
/// IoError
|
||||
IoError(io::Error),
|
||||
}
|
||||
|
||||
impl From<io::Error> for OpenDirectoryError {
|
||||
fn from(io_err: io::Error) -> Self {
|
||||
OpenDirectoryError::IoError(io_err)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for OpenDirectoryError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
OpenDirectoryError::DoesNotExist(ref path) => {
|
||||
write!(f, "the underlying directory '{:?}' does not exist", path)
|
||||
@@ -84,6 +93,11 @@ impl fmt::Display for OpenDirectoryError {
|
||||
OpenDirectoryError::NotADirectory(ref path) => {
|
||||
write!(f, "the path '{:?}' exists but is not a directory", path)
|
||||
}
|
||||
OpenDirectoryError::IoError(ref err) => write!(
|
||||
f,
|
||||
"IOError while trying to open/create the directory. {:?}",
|
||||
err
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -93,7 +107,7 @@ impl StdError for OpenDirectoryError {
|
||||
"error occurred while opening a directory"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&StdError> {
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -116,7 +130,7 @@ impl From<IOError> for OpenWriteError {
|
||||
}
|
||||
|
||||
impl fmt::Display for OpenWriteError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
OpenWriteError::FileAlreadyExists(ref path) => {
|
||||
write!(f, "the file '{:?}' already exists", path)
|
||||
@@ -135,7 +149,7 @@ impl StdError for OpenWriteError {
|
||||
"error occurred while opening a file for writing"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&StdError> {
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
match *self {
|
||||
OpenWriteError::FileAlreadyExists(_) => None,
|
||||
OpenWriteError::IOError(ref err) => Some(err),
|
||||
@@ -143,6 +157,65 @@ impl StdError for OpenWriteError {
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of index incompatibility between the library and the index found on disk
|
||||
/// Used to catch and provide a hint to solve this incompatibility issue
|
||||
pub enum Incompatibility {
|
||||
/// This library cannot decompress the index found on disk
|
||||
CompressionMismatch {
|
||||
/// Compression algorithm used by the current version of tantivy
|
||||
library_compression_format: String,
|
||||
/// Compression algorithm that was used to serialise the index
|
||||
index_compression_format: String,
|
||||
},
|
||||
/// The index format found on disk isn't supported by this version of the library
|
||||
IndexMismatch {
|
||||
/// Version used by the library
|
||||
library_version: Version,
|
||||
/// Version the index was built with
|
||||
index_version: Version,
|
||||
},
|
||||
}
|
||||
|
||||
impl fmt::Debug for Incompatibility {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match self {
|
||||
Incompatibility::CompressionMismatch {
|
||||
library_compression_format,
|
||||
index_compression_format,
|
||||
} => {
|
||||
let err = format!(
|
||||
"Library was compiled with {:?} compression, index was compressed with {:?}",
|
||||
library_compression_format, index_compression_format
|
||||
);
|
||||
let advice = format!(
|
||||
"Change the feature flag to {:?} and rebuild the library",
|
||||
index_compression_format
|
||||
);
|
||||
write!(f, "{}. {}", err, advice)?;
|
||||
}
|
||||
Incompatibility::IndexMismatch {
|
||||
library_version,
|
||||
index_version,
|
||||
} => {
|
||||
let err = format!(
|
||||
"Library version: {}, index version: {}",
|
||||
library_version.index_format_version, index_version.index_format_version
|
||||
);
|
||||
// TODO make a more useful error message
|
||||
// include the version range that supports this index_format_version
|
||||
let advice = format!(
|
||||
"Change tantivy to a version compatible with index format {} (e.g. {}.{}.x) \
|
||||
and rebuild your project.",
|
||||
index_version.index_format_version, index_version.major, index_version.minor
|
||||
);
|
||||
write!(f, "{}. {}", err, advice)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when accessing a file read
|
||||
#[derive(Debug)]
|
||||
pub enum OpenReadError {
|
||||
@@ -151,6 +224,8 @@ pub enum OpenReadError {
|
||||
/// Any kind of IO error that happens when
|
||||
/// interacting with the underlying IO device.
|
||||
IOError(IOError),
|
||||
/// This library doesn't support the index version found on disk
|
||||
IncompatibleIndex(Incompatibility),
|
||||
}
|
||||
|
||||
impl From<IOError> for OpenReadError {
|
||||
@@ -160,7 +235,7 @@ impl From<IOError> for OpenReadError {
|
||||
}
|
||||
|
||||
impl fmt::Display for OpenReadError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
OpenReadError::FileDoesNotExist(ref path) => {
|
||||
write!(f, "the file '{:?}' does not exist", path)
|
||||
@@ -170,19 +245,9 @@ impl fmt::Display for OpenReadError {
|
||||
"an io error occurred while opening a file for reading: '{}'",
|
||||
err
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for OpenReadError {
|
||||
fn description(&self) -> &str {
|
||||
"error occurred while opening a file for reading"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&StdError> {
|
||||
match *self {
|
||||
OpenReadError::FileDoesNotExist(_) => None,
|
||||
OpenReadError::IOError(ref err) => Some(err),
|
||||
OpenReadError::IncompatibleIndex(ref footer) => {
|
||||
write!(f, "Incompatible index format: {:?}", footer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -203,8 +268,14 @@ impl From<IOError> for DeleteError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Incompatibility> for OpenReadError {
|
||||
fn from(incompatibility: Incompatibility) -> Self {
|
||||
OpenReadError::IncompatibleIndex(incompatibility)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DeleteError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
DeleteError::FileDoesNotExist(ref path) => {
|
||||
write!(f, "the file '{:?}' does not exist", path)
|
||||
@@ -221,7 +292,7 @@ impl StdError for DeleteError {
|
||||
"error occurred while deleting a file"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&StdError> {
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
match *self {
|
||||
DeleteError::FileDoesNotExist(_) => None,
|
||||
DeleteError::IOError(ref err) => Some(err),
|
||||
|
||||
390
src/directory/footer.rs
Normal file
390
src/directory/footer.rs
Normal file
@@ -0,0 +1,390 @@
|
||||
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
|
||||
use crate::directory::error::Incompatibility;
|
||||
use crate::directory::read_only_source::ReadOnlySource;
|
||||
use crate::directory::{AntiCallToken, TerminatingWrite};
|
||||
use crate::Version;
|
||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||
use crc32fast::Hasher;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
const FOOTER_MAX_LEN: usize = 10_000;
|
||||
|
||||
type CrcHashU32 = u32;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Footer {
|
||||
pub version: Version,
|
||||
pub meta: String,
|
||||
pub versioned_footer: VersionedFooter,
|
||||
}
|
||||
|
||||
/// Serialises the footer to a byte-array
|
||||
/// - versioned_footer_len : 4 bytes
|
||||
///- versioned_footer: variable bytes
|
||||
/// - meta_len: 4 bytes
|
||||
/// - meta: variable bytes
|
||||
/// - version_len: 4 bytes
|
||||
/// - version json: variable bytes
|
||||
impl BinarySerializable for Footer {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
BinarySerializable::serialize(&self.versioned_footer, writer)?;
|
||||
BinarySerializable::serialize(&self.meta, writer)?;
|
||||
let version_string =
|
||||
serde_json::to_string(&self.version).map_err(|_err| io::ErrorKind::InvalidInput)?;
|
||||
BinarySerializable::serialize(&version_string, writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let versioned_footer = VersionedFooter::deserialize(reader)?;
|
||||
let meta = String::deserialize(reader)?;
|
||||
let version_json = String::deserialize(reader)?;
|
||||
let version = serde_json::from_str(&version_json)?;
|
||||
Ok(Footer {
|
||||
version,
|
||||
meta,
|
||||
versioned_footer,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Footer {
|
||||
pub fn new(versioned_footer: VersionedFooter) -> Self {
|
||||
let version = crate::VERSION.clone();
|
||||
let meta = version.to_string();
|
||||
Footer {
|
||||
version,
|
||||
meta,
|
||||
versioned_footer,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> {
|
||||
let mut counting_write = CountingWriter::wrap(&mut write);
|
||||
self.serialize(&mut counting_write)?;
|
||||
let written_len = counting_write.written_bytes();
|
||||
write.write_u32::<LittleEndian>(written_len as u32)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
|
||||
if source.len() < 4 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!(
|
||||
"File corrupted. The file is smaller than 4 bytes (len={}).",
|
||||
source.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
|
||||
let body_len = body_footer.len() - footer_len;
|
||||
let (body, footer_data) = body_footer.split(body_len);
|
||||
let mut cursor = footer_data.as_slice();
|
||||
let footer = Footer::deserialize(&mut cursor)?;
|
||||
Ok((footer, body))
|
||||
}
|
||||
|
||||
/// Confirms that the index will be read correctly by this version of tantivy
|
||||
/// Has to be called after `extract_footer` to make sure it's not accessing uninitialised memory
|
||||
pub fn is_compatible(&self) -> Result<(), Incompatibility> {
|
||||
let library_version = crate::version();
|
||||
match &self.versioned_footer {
|
||||
VersionedFooter::V1 {
|
||||
crc32: _crc,
|
||||
store_compression,
|
||||
} => {
|
||||
if &library_version.store_compression != store_compression {
|
||||
return Err(Incompatibility::CompressionMismatch {
|
||||
library_compression_format: library_version.store_compression.to_string(),
|
||||
index_compression_format: store_compression.to_string(),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
VersionedFooter::V2 {
|
||||
crc32: _crc,
|
||||
store_compression,
|
||||
} => {
|
||||
if &library_version.store_compression != store_compression {
|
||||
return Err(Incompatibility::CompressionMismatch {
|
||||
library_compression_format: library_version.store_compression.to_string(),
|
||||
index_compression_format: store_compression.to_string(),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
VersionedFooter::UnknownVersion => Err(Incompatibility::IndexMismatch {
|
||||
library_version: library_version.clone(),
|
||||
index_version: self.version.clone(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Footer that includes a crc32 hash that enables us to checksum files in the index
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum VersionedFooter {
|
||||
UnknownVersion,
|
||||
V1 {
|
||||
crc32: CrcHashU32,
|
||||
store_compression: String,
|
||||
},
|
||||
// Introduction of the Block WAND information.
|
||||
V2 {
|
||||
crc32: CrcHashU32,
|
||||
store_compression: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl BinarySerializable for VersionedFooter {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let mut buf = Vec::new();
|
||||
match self {
|
||||
VersionedFooter::V2 {
|
||||
crc32,
|
||||
store_compression: compression,
|
||||
} => {
|
||||
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
||||
// [ version | crc_hash | compression_mode ]
|
||||
// [ 0..4 | 4..8 | variable ]
|
||||
BinarySerializable::serialize(&2u32, &mut buf)?;
|
||||
BinarySerializable::serialize(crc32, &mut buf)?;
|
||||
BinarySerializable::serialize(compression, &mut buf)?;
|
||||
}
|
||||
VersionedFooter::V1 { .. } | VersionedFooter::UnknownVersion => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Cannot serialize an unknown versioned footer ",
|
||||
));
|
||||
}
|
||||
}
|
||||
BinarySerializable::serialize(&VInt(buf.len() as u64), writer)?;
|
||||
assert!(buf.len() <= FOOTER_MAX_LEN);
|
||||
writer.write_all(&buf[..])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let len = VInt::deserialize(reader)?.0 as usize;
|
||||
if len > FOOTER_MAX_LEN {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Footer seems invalid as it suggests a footer len of {}. File is corrupted, \
|
||||
or the index was created with a different & old version of tantivy.",
|
||||
len
|
||||
),
|
||||
));
|
||||
}
|
||||
let mut buf = vec![0u8; len];
|
||||
reader.read_exact(&mut buf[..])?;
|
||||
let mut cursor = &buf[..];
|
||||
let version = u32::deserialize(&mut cursor)?;
|
||||
if version != 1 && version != 2 {
|
||||
return Ok(VersionedFooter::UnknownVersion);
|
||||
}
|
||||
let crc32 = u32::deserialize(&mut cursor)?;
|
||||
let store_compression = String::deserialize(&mut cursor)?;
|
||||
Ok(if version == 1 {
|
||||
VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression,
|
||||
}
|
||||
} else {
|
||||
assert_eq!(version, 2);
|
||||
VersionedFooter::V2 {
|
||||
crc32,
|
||||
store_compression,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl VersionedFooter {
|
||||
pub fn crc(&self) -> Option<CrcHashU32> {
|
||||
match self {
|
||||
VersionedFooter::V2 { crc32, .. } => Some(*crc32),
|
||||
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
||||
VersionedFooter::UnknownVersion { .. } => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct FooterProxy<W: TerminatingWrite> {
|
||||
/// always Some except after terminate call
|
||||
hasher: Option<Hasher>,
|
||||
/// always Some except after terminate call
|
||||
writer: Option<W>,
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> FooterProxy<W> {
|
||||
pub fn new(writer: W) -> Self {
|
||||
FooterProxy {
|
||||
hasher: Some(Hasher::new()),
|
||||
writer: Some(writer),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> Write for FooterProxy<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let count = self.writer.as_mut().unwrap().write(buf)?;
|
||||
self.hasher.as_mut().unwrap().update(&buf[..count]);
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.writer.as_mut().unwrap().flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||
let crc32 = self.hasher.take().unwrap().finalize();
|
||||
let footer = Footer::new(VersionedFooter::V2 {
|
||||
crc32,
|
||||
store_compression: crate::store::COMPRESSION.to_string(),
|
||||
});
|
||||
let mut writer = self.writer.take().unwrap();
|
||||
footer.append_footer(&mut writer)?;
|
||||
writer.terminate()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::CrcHashU32;
|
||||
use super::FooterProxy;
|
||||
use crate::common::{BinarySerializable, VInt};
|
||||
use crate::directory::footer::{Footer, VersionedFooter};
|
||||
use crate::directory::TerminatingWrite;
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use regex::Regex;
|
||||
use std::io;
|
||||
|
||||
#[test]
|
||||
fn test_versioned_footer() {
|
||||
let mut vec = Vec::new();
|
||||
let footer_proxy = FooterProxy::new(&mut vec);
|
||||
assert!(footer_proxy.terminate().is_ok());
|
||||
assert_eq!(vec.len(), 167);
|
||||
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
||||
assert!(matches!(
|
||||
footer.versioned_footer,
|
||||
VersionedFooter::V2 { store_compression, .. }
|
||||
if store_compression == crate::store::COMPRESSION
|
||||
));
|
||||
assert_eq!(&footer.version, crate::version());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_deserialize_footer() {
|
||||
let mut buffer = Vec::new();
|
||||
let crc32 = 123456u32;
|
||||
let footer: Footer = Footer::new(VersionedFooter::V2 {
|
||||
crc32,
|
||||
store_compression: "lz4".to_string(),
|
||||
});
|
||||
footer.serialize(&mut buffer).unwrap();
|
||||
let footer_deser = Footer::deserialize(&mut &buffer[..]).unwrap();
|
||||
assert_eq!(footer_deser, footer);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn footer_length() {
|
||||
let crc32 = 1111111u32;
|
||||
let versioned_footer = VersionedFooter::V2 {
|
||||
crc32,
|
||||
store_compression: "lz4".to_string(),
|
||||
};
|
||||
let mut buf = Vec::new();
|
||||
versioned_footer.serialize(&mut buf).unwrap();
|
||||
assert_eq!(buf.len(), 13);
|
||||
let footer = Footer::new(versioned_footer);
|
||||
let regex_ptn = Regex::new(
|
||||
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
|
||||
)
|
||||
.unwrap();
|
||||
assert!(regex_ptn.is_match(&footer.meta));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn versioned_footer_from_bytes() {
|
||||
let v_footer_bytes = vec![
|
||||
// versionned footer length
|
||||
12 | 128,
|
||||
// index format version
|
||||
2,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
// crc 32
|
||||
12,
|
||||
35,
|
||||
89,
|
||||
18,
|
||||
// compression format
|
||||
3 | 128,
|
||||
b'l',
|
||||
b'z',
|
||||
b'4',
|
||||
];
|
||||
let mut cursor = &v_footer_bytes[..];
|
||||
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
||||
assert!(cursor.is_empty());
|
||||
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
||||
let expected_versioned_footer: VersionedFooter = VersionedFooter::V2 {
|
||||
crc32: expected_crc,
|
||||
store_compression: "lz4".to_string(),
|
||||
};
|
||||
assert_eq!(versioned_footer, expected_versioned_footer);
|
||||
let mut buffer = Vec::new();
|
||||
assert!(versioned_footer.serialize(&mut buffer).is_ok());
|
||||
assert_eq!(&v_footer_bytes[..], &buffer[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn versioned_footer_panic() {
|
||||
let v_footer_bytes = vec![6u8 | 128u8, 3u8, 0u8, 0u8, 1u8, 0u8, 0u8];
|
||||
let mut b = &v_footer_bytes[..];
|
||||
let versioned_footer = VersionedFooter::deserialize(&mut b).unwrap();
|
||||
assert!(b.is_empty());
|
||||
let expected_versioned_footer = VersionedFooter::UnknownVersion;
|
||||
assert_eq!(versioned_footer, expected_versioned_footer);
|
||||
let mut buf = Vec::new();
|
||||
assert!(versioned_footer.serialize(&mut buf).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(not(feature = "lz4"))]
|
||||
fn compression_mismatch() {
|
||||
let crc32 = 1111111u32;
|
||||
let versioned_footer = VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: "lz4".to_string(),
|
||||
};
|
||||
let footer = Footer::new(versioned_footer);
|
||||
let res = footer.is_compatible();
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_too_large_footer() {
|
||||
let mut buf = vec![];
|
||||
assert!(FooterProxy::new(&mut buf).terminate().is_ok());
|
||||
let mut long_len_buf = [0u8; 10];
|
||||
let num_bytes = VInt(super::FOOTER_MAX_LEN as u64 + 1u64).serialize_into(&mut long_len_buf);
|
||||
buf[0..num_bytes].copy_from_slice(&long_len_buf[..num_bytes]);
|
||||
let err = Footer::deserialize(&mut &buf[..]).unwrap_err();
|
||||
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"Footer seems invalid as it suggests a footer len of 10001. File is corrupted, \
|
||||
or the index was created with a different & old version of tantivy."
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,16 @@
|
||||
use core::MANAGED_FILEPATH;
|
||||
use directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
||||
use directory::DirectoryLock;
|
||||
use directory::Lock;
|
||||
use directory::META_LOCK;
|
||||
use directory::{ReadOnlySource, WritePtr};
|
||||
use error::DataCorruption;
|
||||
use serde_json;
|
||||
use crate::core::MANAGED_FILEPATH;
|
||||
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::footer::{Footer, FooterProxy};
|
||||
use crate::directory::DirectoryLock;
|
||||
use crate::directory::GarbageCollectionResult;
|
||||
use crate::directory::Lock;
|
||||
use crate::directory::META_LOCK;
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use crate::directory::{WatchCallback, WatchHandle};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::Directory;
|
||||
|
||||
use crc32fast::Hasher;
|
||||
use std::collections::HashSet;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
@@ -13,8 +18,6 @@ use std::path::{Path, PathBuf};
|
||||
use std::result;
|
||||
use std::sync::RwLockWriteGuard;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use Directory;
|
||||
use Result;
|
||||
|
||||
/// Returns true iff the file is "managed".
|
||||
/// Non-managed file are not subject to garbage collection.
|
||||
@@ -38,7 +41,7 @@ fn is_managed(path: &Path) -> bool {
|
||||
/// useful anymore.
|
||||
#[derive(Debug)]
|
||||
pub struct ManagedDirectory {
|
||||
directory: Box<Directory>,
|
||||
directory: Box<dyn Directory>,
|
||||
meta_informations: Arc<RwLock<MetaInformation>>,
|
||||
}
|
||||
|
||||
@@ -50,8 +53,8 @@ struct MetaInformation {
|
||||
/// Saves the file containing the list of existing files
|
||||
/// that were created by tantivy.
|
||||
fn save_managed_paths(
|
||||
directory: &mut Directory,
|
||||
wlock: &RwLockWriteGuard<MetaInformation>,
|
||||
directory: &mut dyn Directory,
|
||||
wlock: &RwLockWriteGuard<'_, MetaInformation>,
|
||||
) -> io::Result<()> {
|
||||
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
|
||||
writeln!(&mut w)?;
|
||||
@@ -61,14 +64,14 @@ fn save_managed_paths(
|
||||
|
||||
impl ManagedDirectory {
|
||||
/// Wraps a directory as managed directory.
|
||||
pub fn wrap<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> {
|
||||
pub fn wrap<Dir: Directory>(directory: Dir) -> crate::Result<ManagedDirectory> {
|
||||
match directory.atomic_read(&MANAGED_FILEPATH) {
|
||||
Ok(data) => {
|
||||
let managed_files_json = String::from_utf8_lossy(&data);
|
||||
let managed_files: HashSet<PathBuf> = serde_json::from_str(&managed_files_json)
|
||||
.map_err(|e| {
|
||||
DataCorruption::new(
|
||||
MANAGED_FILEPATH.clone(),
|
||||
MANAGED_FILEPATH.to_path_buf(),
|
||||
format!("Managed file cannot be deserialized: {:?}. ", e),
|
||||
)
|
||||
})?;
|
||||
@@ -84,6 +87,11 @@ impl ManagedDirectory {
|
||||
meta_informations: Arc::default(),
|
||||
}),
|
||||
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
|
||||
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
|
||||
// For the moment, this should never happen `meta.json`
|
||||
// do not have any footer and cannot detect incompatibility.
|
||||
Err(crate::TantivyError::IncompatibleIndex(incompatibility))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +109,10 @@ impl ManagedDirectory {
|
||||
/// If a file cannot be deleted (for permission reasons for instance)
|
||||
/// an error is simply logged, and the file remains in the list of managed
|
||||
/// files.
|
||||
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(&mut self, get_living_files: L) {
|
||||
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(
|
||||
&mut self,
|
||||
get_living_files: L,
|
||||
) -> crate::Result<GarbageCollectionResult> {
|
||||
info!("Garbage collect");
|
||||
let mut files_to_delete = vec![];
|
||||
|
||||
@@ -127,35 +138,42 @@ impl ManagedDirectory {
|
||||
// 2) writer change meta.json (for instance after a merge or a commit)
|
||||
// 3) gc kicks in.
|
||||
// 4) gc removes a file that was useful for process B, before process B opened it.
|
||||
if let Ok(_meta_lock) = self.acquire_lock(&META_LOCK) {
|
||||
let living_files = get_living_files();
|
||||
for managed_path in &meta_informations_rlock.managed_paths {
|
||||
if !living_files.contains(managed_path) {
|
||||
files_to_delete.push(managed_path.clone());
|
||||
match self.acquire_lock(&META_LOCK) {
|
||||
Ok(_meta_lock) => {
|
||||
let living_files = get_living_files();
|
||||
for managed_path in &meta_informations_rlock.managed_paths {
|
||||
if !living_files.contains(managed_path) {
|
||||
files_to_delete.push(managed_path.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Failed to acquire lock for GC");
|
||||
return Err(crate::TantivyError::from(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut failed_to_delete_files = vec![];
|
||||
let mut deleted_files = vec![];
|
||||
{
|
||||
for file_to_delete in files_to_delete {
|
||||
match self.delete(&file_to_delete) {
|
||||
Ok(_) => {
|
||||
info!("Deleted {:?}", file_to_delete);
|
||||
deleted_files.push(file_to_delete);
|
||||
}
|
||||
Err(file_error) => {
|
||||
match file_error {
|
||||
DeleteError::FileDoesNotExist(_) => {
|
||||
deleted_files.push(file_to_delete);
|
||||
}
|
||||
DeleteError::IOError(_) => {
|
||||
if !cfg!(target_os = "windows") {
|
||||
// On windows, delete is expected to fail if the file
|
||||
// is mmapped.
|
||||
error!("Failed to delete {:?}", file_to_delete);
|
||||
}
|
||||
|
||||
for file_to_delete in files_to_delete {
|
||||
match self.delete(&file_to_delete) {
|
||||
Ok(_) => {
|
||||
info!("Deleted {:?}", file_to_delete);
|
||||
deleted_files.push(file_to_delete);
|
||||
}
|
||||
Err(file_error) => {
|
||||
match file_error {
|
||||
DeleteError::FileDoesNotExist(_) => {
|
||||
deleted_files.push(file_to_delete.clone());
|
||||
}
|
||||
DeleteError::IOError(_) => {
|
||||
failed_to_delete_files.push(file_to_delete.clone());
|
||||
if !cfg!(target_os = "windows") {
|
||||
// On windows, delete is expected to fail if the file
|
||||
// is mmapped.
|
||||
error!("Failed to delete {:?}", file_to_delete);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -170,16 +188,17 @@ impl ManagedDirectory {
|
||||
.meta_informations
|
||||
.write()
|
||||
.expect("Managed directory wlock poisoned (2).");
|
||||
{
|
||||
let managed_paths_write = &mut meta_informations_wlock.managed_paths;
|
||||
for delete_file in &deleted_files {
|
||||
managed_paths_write.remove(delete_file);
|
||||
}
|
||||
}
|
||||
if save_managed_paths(self.directory.as_mut(), &meta_informations_wlock).is_err() {
|
||||
error!("Failed to save the list of managed files.");
|
||||
let managed_paths_write = &mut meta_informations_wlock.managed_paths;
|
||||
for delete_file in &deleted_files {
|
||||
managed_paths_write.remove(delete_file);
|
||||
}
|
||||
save_managed_paths(self.directory.as_mut(), &meta_informations_wlock)?;
|
||||
}
|
||||
|
||||
Ok(GarbageCollectionResult {
|
||||
deleted_files,
|
||||
failed_to_delete_files,
|
||||
})
|
||||
}
|
||||
|
||||
/// Registers a file as managed
|
||||
@@ -208,17 +227,60 @@ impl ManagedDirectory {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify checksum of a managed file
|
||||
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
|
||||
let reader = self.directory.open_read(path)?;
|
||||
let (footer, data) = Footer::extract_footer(reader)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(data.as_slice());
|
||||
let crc = hasher.finalize();
|
||||
Ok(footer
|
||||
.versioned_footer
|
||||
.crc()
|
||||
.map(|v| v == crc)
|
||||
.unwrap_or(false))
|
||||
}
|
||||
|
||||
/// List files for which checksum does not match content
|
||||
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
||||
let mut hashset = HashSet::new();
|
||||
let managed_paths = self
|
||||
.meta_informations
|
||||
.read()
|
||||
.expect("Managed directory rlock poisoned in list damaged.")
|
||||
.managed_paths
|
||||
.clone();
|
||||
|
||||
for path in managed_paths.into_iter() {
|
||||
if !self.validate_checksum(&path)? {
|
||||
hashset.insert(path);
|
||||
}
|
||||
}
|
||||
Ok(hashset)
|
||||
}
|
||||
}
|
||||
|
||||
impl Directory for ManagedDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
self.directory.open_read(path)
|
||||
let read_only_source = self.directory.open_read(path)?;
|
||||
let (footer, reader) = Footer::extract_footer(read_only_source)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
footer.is_compatible()?;
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
|
||||
self.register_file_as_managed(path)
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
|
||||
self.directory.open_write(path)
|
||||
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
|
||||
self.directory
|
||||
.open_write(path)?
|
||||
.into_inner()
|
||||
.map_err(|_| ())
|
||||
.expect("buffer should be empty"),
|
||||
))))
|
||||
}
|
||||
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
@@ -241,6 +303,10 @@ impl Directory for ManagedDirectory {
|
||||
fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> {
|
||||
self.directory.acquire_lock(lock)
|
||||
}
|
||||
|
||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
||||
self.directory.watch(watch_callback)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for ManagedDirectory {
|
||||
@@ -252,98 +318,118 @@ impl Clone for ManagedDirectory {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "mmap")]
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
mod tests_mmap_specific {
|
||||
|
||||
use super::*;
|
||||
#[cfg(feature = "mmap")]
|
||||
use directory::MmapDirectory;
|
||||
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
|
||||
use std::collections::HashSet;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use tempdir::TempDir;
|
||||
|
||||
lazy_static! {
|
||||
static ref TEST_PATH1: &'static Path = Path::new("some_path_for_test");
|
||||
static ref TEST_PATH2: &'static Path = Path::new("some_path_for_test2");
|
||||
}
|
||||
use std::path::{Path, PathBuf};
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "mmap")]
|
||||
fn test_managed_directory() {
|
||||
let tempdir = TempDir::new("index").unwrap();
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
|
||||
let test_path1: &'static Path = Path::new("some_path_for_test");
|
||||
let test_path2: &'static Path = Path::new("some_path_for_test_2");
|
||||
{
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||
{
|
||||
let mut write_file = managed_directory.open_write(*TEST_PATH1).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
}
|
||||
{
|
||||
managed_directory
|
||||
.atomic_write(*TEST_PATH2, &vec![0u8, 1u8])
|
||||
.unwrap();
|
||||
}
|
||||
{
|
||||
assert!(managed_directory.exists(*TEST_PATH1));
|
||||
assert!(managed_directory.exists(*TEST_PATH2));
|
||||
}
|
||||
{
|
||||
let living_files: HashSet<PathBuf> =
|
||||
[TEST_PATH1.to_owned()].into_iter().cloned().collect();
|
||||
managed_directory.garbage_collect(|| living_files);
|
||||
}
|
||||
{
|
||||
assert!(managed_directory.exists(*TEST_PATH1));
|
||||
assert!(!managed_directory.exists(*TEST_PATH2));
|
||||
}
|
||||
let write_file = managed_directory.open_write(test_path1).unwrap();
|
||||
write_file.terminate().unwrap();
|
||||
managed_directory
|
||||
.atomic_write(test_path2, &[0u8, 1u8])
|
||||
.unwrap();
|
||||
assert!(managed_directory.exists(test_path1));
|
||||
assert!(managed_directory.exists(test_path2));
|
||||
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
|
||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||
assert!(managed_directory.exists(test_path1));
|
||||
assert!(!managed_directory.exists(test_path2));
|
||||
}
|
||||
{
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||
{
|
||||
assert!(managed_directory.exists(*TEST_PATH1));
|
||||
assert!(!managed_directory.exists(*TEST_PATH2));
|
||||
}
|
||||
{
|
||||
let living_files: HashSet<PathBuf> = HashSet::new();
|
||||
managed_directory.garbage_collect(|| living_files);
|
||||
}
|
||||
{
|
||||
assert!(!managed_directory.exists(*TEST_PATH1));
|
||||
assert!(!managed_directory.exists(*TEST_PATH2));
|
||||
}
|
||||
assert!(managed_directory.exists(test_path1));
|
||||
assert!(!managed_directory.exists(test_path2));
|
||||
let living_files: HashSet<PathBuf> = HashSet::new();
|
||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||
assert!(!managed_directory.exists(test_path1));
|
||||
assert!(!managed_directory.exists(test_path2));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "mmap ")]
|
||||
fn test_managed_directory_gc_while_mmapped() {
|
||||
let tempdir = TempDir::new("index").unwrap();
|
||||
let test_path1: &'static Path = Path::new("some_path_for_test");
|
||||
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
let living_files = HashSet::new();
|
||||
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||
managed_directory
|
||||
.atomic_write(*TEST_PATH1, &vec![0u8, 1u8])
|
||||
.unwrap();
|
||||
assert!(managed_directory.exists(*TEST_PATH1));
|
||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||
write.write_all(&[0u8, 1u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
assert!(managed_directory.exists(test_path1));
|
||||
|
||||
let _mmap_read = managed_directory.open_read(*TEST_PATH1).unwrap();
|
||||
managed_directory.garbage_collect(|| living_files.clone());
|
||||
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
|
||||
assert!(managed_directory
|
||||
.garbage_collect(|| living_files.clone())
|
||||
.is_ok());
|
||||
if cfg!(target_os = "windows") {
|
||||
// On Windows, gc should try and fail the file as it is mmapped.
|
||||
assert!(managed_directory.exists(*TEST_PATH1));
|
||||
assert!(managed_directory.exists(test_path1));
|
||||
// unmap should happen here.
|
||||
drop(_mmap_read);
|
||||
// The file should still be in the list of managed file and
|
||||
// eventually be deleted once mmap is released.
|
||||
managed_directory.garbage_collect(|| living_files);
|
||||
assert!(!managed_directory.exists(*TEST_PATH1));
|
||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
||||
assert!(!managed_directory.exists(test_path1));
|
||||
} else {
|
||||
assert!(!managed_directory.exists(*TEST_PATH1));
|
||||
assert!(!managed_directory.exists(test_path1));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_checksum() {
|
||||
let test_path1: &'static Path = Path::new("some_path_for_test");
|
||||
let test_path2: &'static Path = Path::new("other_test_path");
|
||||
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
|
||||
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
|
||||
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
|
||||
let mut write = managed_directory.open_write(test_path1).unwrap();
|
||||
write.write_all(&[0u8, 1u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
|
||||
let mut write = managed_directory.open_write(test_path2).unwrap();
|
||||
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
|
||||
let read_source = managed_directory.open_read(test_path2).unwrap();
|
||||
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
|
||||
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
||||
|
||||
let mut corrupted_path = tempdir_path.clone();
|
||||
corrupted_path.push(test_path2);
|
||||
let mut file = OpenOptions::new()
|
||||
.write(true)
|
||||
.open(&corrupted_path)
|
||||
.unwrap();
|
||||
file.write_all(&[255u8]).unwrap();
|
||||
file.flush().unwrap();
|
||||
drop(file);
|
||||
|
||||
let damaged = managed_directory.list_damaged().unwrap();
|
||||
assert_eq!(damaged.len(), 1);
|
||||
assert!(damaged.contains(test_path2));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,24 @@
|
||||
extern crate fs2;
|
||||
|
||||
use self::fs2::FileExt;
|
||||
use atomicwrites;
|
||||
use common::make_io_err;
|
||||
use directory::error::LockError;
|
||||
use directory::error::{DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use directory::shared_vec_slice::SharedVecSlice;
|
||||
use directory::Directory;
|
||||
use directory::DirectoryLock;
|
||||
use directory::Lock;
|
||||
use directory::ReadOnlySource;
|
||||
use directory::WritePtr;
|
||||
use fst::raw::MmapReadOnly;
|
||||
use std::collections::hash_map::Entry as HashMapEntry;
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::error::LockError;
|
||||
use crate::directory::error::{
|
||||
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
||||
};
|
||||
use crate::directory::read_only_source::BoxedData;
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::DirectoryLock;
|
||||
use crate::directory::Lock;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::WatchCallback;
|
||||
use crate::directory::WatchCallbackList;
|
||||
use crate::directory::WatchHandle;
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use fs2::FileExt;
|
||||
use memmap::Mmap;
|
||||
use notify::RawEvent;
|
||||
use notify::RecursiveMode;
|
||||
use notify::Watcher;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::convert::From;
|
||||
use std::fmt;
|
||||
@@ -22,14 +28,22 @@ use std::io::{self, Seek, SeekFrom};
|
||||
use std::io::{BufWriter, Read, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::result;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::RwLock;
|
||||
use tempdir::TempDir;
|
||||
use std::sync::Weak;
|
||||
use std::thread;
|
||||
use tempfile::TempDir;
|
||||
|
||||
/// Create a default io error given a string.
|
||||
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
}
|
||||
|
||||
/// Returns None iff the file exists, can be read, but is empty (and hence
|
||||
/// cannot be mmapped).
|
||||
///
|
||||
fn open_mmap(full_path: &Path) -> result::Result<Option<MmapReadOnly>, OpenReadError> {
|
||||
/// cannot be mmapped)
|
||||
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
|
||||
let file = File::open(full_path).map_err(|e| {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
OpenReadError::FileDoesNotExist(full_path.to_owned())
|
||||
@@ -48,7 +62,7 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<MmapReadOnly>, OpenReadE
|
||||
return Ok(None);
|
||||
}
|
||||
unsafe {
|
||||
MmapReadOnly::open(&file)
|
||||
memmap::Mmap::map(&file)
|
||||
.map(Some)
|
||||
.map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
|
||||
}
|
||||
@@ -71,7 +85,7 @@ pub struct CacheInfo {
|
||||
|
||||
struct MmapCache {
|
||||
counters: CacheCounters,
|
||||
cache: HashMap<PathBuf, MmapReadOnly>,
|
||||
cache: HashMap<PathBuf, Weak<BoxedData>>,
|
||||
}
|
||||
|
||||
impl Default for MmapCache {
|
||||
@@ -84,12 +98,7 @@ impl Default for MmapCache {
|
||||
}
|
||||
|
||||
impl MmapCache {
|
||||
/// Removes a `MmapReadOnly` entry from the mmap cache.
|
||||
fn discard_from_cache(&mut self, full_path: &Path) -> bool {
|
||||
self.cache.remove(full_path).is_some()
|
||||
}
|
||||
|
||||
fn get_info(&mut self) -> CacheInfo {
|
||||
fn get_info(&self) -> CacheInfo {
|
||||
let paths: Vec<PathBuf> = self.cache.keys().cloned().collect();
|
||||
CacheInfo {
|
||||
counters: self.counters.clone(),
|
||||
@@ -97,24 +106,93 @@ impl MmapCache {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<MmapReadOnly>, OpenReadError> {
|
||||
Ok(match self.cache.entry(full_path.to_owned()) {
|
||||
HashMapEntry::Occupied(occupied_entry) => {
|
||||
let mmap = occupied_entry.get();
|
||||
fn remove_weak_ref(&mut self) {
|
||||
let keys_to_remove: Vec<PathBuf> = self
|
||||
.cache
|
||||
.iter()
|
||||
.filter(|(_, mmap_weakref)| mmap_weakref.upgrade().is_none())
|
||||
.map(|(key, _)| key.clone())
|
||||
.collect();
|
||||
for key in keys_to_remove {
|
||||
self.cache.remove(&key);
|
||||
}
|
||||
}
|
||||
|
||||
// Returns None if the file exists but as a len of 0 (and hence is not mmappable).
|
||||
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<Arc<BoxedData>>, OpenReadError> {
|
||||
if let Some(mmap_weak) = self.cache.get(full_path) {
|
||||
if let Some(mmap_arc) = mmap_weak.upgrade() {
|
||||
self.counters.hit += 1;
|
||||
Some(mmap.clone())
|
||||
return Ok(Some(mmap_arc));
|
||||
}
|
||||
HashMapEntry::Vacant(vacant_entry) => {
|
||||
self.counters.miss += 1;
|
||||
if let Some(mmap) = open_mmap(full_path)? {
|
||||
vacant_entry.insert(mmap.clone());
|
||||
Some(mmap)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
self.cache.remove(full_path);
|
||||
self.counters.miss += 1;
|
||||
let mmap_opt = open_mmap(full_path)?;
|
||||
Ok(mmap_opt.map(|mmap| {
|
||||
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
|
||||
let mmap_weak = Arc::downgrade(&mmap_arc);
|
||||
self.cache.insert(full_path.to_owned(), mmap_weak);
|
||||
mmap_arc
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
struct WatcherWrapper {
|
||||
_watcher: Mutex<notify::RecommendedWatcher>,
|
||||
watcher_router: Arc<WatchCallbackList>,
|
||||
}
|
||||
|
||||
impl WatcherWrapper {
|
||||
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
|
||||
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
|
||||
// We need to initialize the
|
||||
let watcher = notify::raw_watcher(tx)
|
||||
.and_then(|mut watcher| {
|
||||
watcher.watch(path, RecursiveMode::Recursive)?;
|
||||
Ok(watcher)
|
||||
})
|
||||
.map_err(|err| match err {
|
||||
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
|
||||
_ => {
|
||||
panic!("Unknown error while starting watching directory {:?}", path);
|
||||
}
|
||||
}
|
||||
})?;
|
||||
let watcher_router: Arc<WatchCallbackList> = Default::default();
|
||||
let watcher_router_clone = watcher_router.clone();
|
||||
thread::Builder::new()
|
||||
.name("meta-file-watch-thread".to_string())
|
||||
.spawn(move || {
|
||||
loop {
|
||||
match watcher_recv.recv().map(|evt| evt.path) {
|
||||
Ok(Some(changed_path)) => {
|
||||
// ... Actually subject to false positive.
|
||||
// We might want to be more accurate than this at one point.
|
||||
if let Some(filename) = changed_path.file_name() {
|
||||
if filename == *META_FILEPATH {
|
||||
let _ = watcher_router_clone.broadcast();
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
// not an event we are interested in.
|
||||
}
|
||||
Err(_e) => {
|
||||
// the watch send channel was dropped
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
})?;
|
||||
Ok(WatcherWrapper {
|
||||
_watcher: Mutex::new(watcher),
|
||||
watcher_router,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
|
||||
self.watcher_router.subscribe(watch_callback)
|
||||
}
|
||||
}
|
||||
|
||||
/// Directory storing data in files, read via mmap.
|
||||
@@ -131,31 +209,72 @@ impl MmapCache {
|
||||
/// On Windows the semantics are again different.
|
||||
#[derive(Clone)]
|
||||
pub struct MmapDirectory {
|
||||
inner: Arc<MmapDirectoryInner>,
|
||||
}
|
||||
|
||||
struct MmapDirectoryInner {
|
||||
root_path: PathBuf,
|
||||
mmap_cache: Arc<RwLock<MmapCache>>,
|
||||
_temp_directory: Arc<Option<TempDir>>,
|
||||
mmap_cache: RwLock<MmapCache>,
|
||||
_temp_directory: Option<TempDir>,
|
||||
watcher: RwLock<Option<WatcherWrapper>>,
|
||||
}
|
||||
|
||||
impl MmapDirectoryInner {
|
||||
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectoryInner {
|
||||
MmapDirectoryInner {
|
||||
root_path,
|
||||
mmap_cache: Default::default(),
|
||||
_temp_directory: temp_directory,
|
||||
watcher: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
||||
// a lot of juggling here, to ensure we don't do anything that panics
|
||||
// while the rwlock is held. That way we ensure that the rwlock cannot
|
||||
// be poisoned.
|
||||
//
|
||||
// The downside is that we might create a watch wrapper that is not useful.
|
||||
let need_initialization = self.watcher.read().unwrap().is_none();
|
||||
if need_initialization {
|
||||
let watch_wrapper = WatcherWrapper::new(&self.root_path)?;
|
||||
let mut watch_wlock = self.watcher.write().unwrap();
|
||||
// the watcher could have been initialized when we released the lock, and
|
||||
// we do not want to lose the watched files that were set.
|
||||
if watch_wlock.is_none() {
|
||||
*watch_wlock = Some(watch_wrapper);
|
||||
}
|
||||
}
|
||||
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
|
||||
Ok(watch_wrapper.watch(watch_callback))
|
||||
} else {
|
||||
unreachable!("At this point, watch wrapper is supposed to be initialized");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for MmapDirectory {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "MmapDirectory({:?})", self.root_path)
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "MmapDirectory({:?})", self.inner.root_path)
|
||||
}
|
||||
}
|
||||
|
||||
impl MmapDirectory {
|
||||
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectory {
|
||||
let inner = MmapDirectoryInner::new(root_path, temp_directory);
|
||||
MmapDirectory {
|
||||
inner: Arc::new(inner),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new MmapDirectory in a temporary directory.
|
||||
///
|
||||
/// This is mostly useful to test the MmapDirectory itself.
|
||||
/// For your unit tests, prefer the RAMDirectory.
|
||||
pub fn create_from_tempdir() -> io::Result<MmapDirectory> {
|
||||
let tempdir = TempDir::new("index")?;
|
||||
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
|
||||
let tempdir_path = PathBuf::from(tempdir.path());
|
||||
let directory = MmapDirectory {
|
||||
root_path: tempdir_path,
|
||||
mmap_cache: Arc::new(RwLock::new(MmapCache::default())),
|
||||
_temp_directory: Arc::new(Some(tempdir)),
|
||||
};
|
||||
Ok(directory)
|
||||
Ok(MmapDirectory::new(tempdir_path, Some(tempdir)))
|
||||
}
|
||||
|
||||
/// Opens a MmapDirectory in a directory.
|
||||
@@ -173,18 +292,14 @@ impl MmapDirectory {
|
||||
directory_path,
|
||||
)))
|
||||
} else {
|
||||
Ok(MmapDirectory {
|
||||
root_path: PathBuf::from(directory_path),
|
||||
mmap_cache: Arc::new(RwLock::new(MmapCache::default())),
|
||||
_temp_directory: Arc::new(None),
|
||||
})
|
||||
Ok(MmapDirectory::new(PathBuf::from(directory_path), None))
|
||||
}
|
||||
}
|
||||
|
||||
/// Joins a relative_path to the directory `root_path`
|
||||
/// to create a proper complete `filepath`.
|
||||
fn resolve_path(&self, relative_path: &Path) -> PathBuf {
|
||||
self.root_path.join(relative_path)
|
||||
self.inner.root_path.join(relative_path)
|
||||
}
|
||||
|
||||
/// Sync the root directory.
|
||||
@@ -202,14 +317,14 @@ impl MmapDirectory {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
use std::os::windows::fs::OpenOptionsExt;
|
||||
use winapi::winbase;
|
||||
use winapi::um::winbase;
|
||||
|
||||
open_opts
|
||||
.write(true)
|
||||
.custom_flags(winbase::FILE_FLAG_BACKUP_SEMANTICS);
|
||||
}
|
||||
|
||||
let fd = open_opts.open(&self.root_path)?;
|
||||
let fd = open_opts.open(&self.inner.root_path)?;
|
||||
fd.sync_all()?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -219,9 +334,15 @@ impl MmapDirectory {
|
||||
///
|
||||
/// The `MmapDirectory` embeds a `MmapDirectory`
|
||||
/// to avoid multiplying the `mmap` system calls.
|
||||
pub fn get_cache_info(&mut self) -> CacheInfo {
|
||||
self.mmap_cache
|
||||
pub fn get_cache_info(&self) -> CacheInfo {
|
||||
self.inner
|
||||
.mmap_cache
|
||||
.write()
|
||||
.expect("mmap cache lock is poisoned")
|
||||
.remove_weak_ref();
|
||||
self.inner
|
||||
.mmap_cache
|
||||
.read()
|
||||
.expect("Mmap cache lock is poisoned.")
|
||||
.get_info()
|
||||
}
|
||||
@@ -269,12 +390,18 @@ impl Seek for SafeFileWriter {
|
||||
}
|
||||
}
|
||||
|
||||
impl TerminatingWrite for SafeFileWriter {
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl Directory for MmapDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
debug!("Open Read {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
|
||||
let mut mmap_cache = self.mmap_cache.write().map_err(|_| {
|
||||
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
|
||||
let msg = format!(
|
||||
"Failed to acquired write lock \
|
||||
on mmap cache while reading {:?}",
|
||||
@@ -282,11 +409,33 @@ impl Directory for MmapDirectory {
|
||||
);
|
||||
IOError::with_path(path.to_owned(), make_io_err(msg))
|
||||
})?;
|
||||
|
||||
Ok(mmap_cache
|
||||
.get_mmap(&full_path)?
|
||||
.map(ReadOnlySource::Mmap)
|
||||
.unwrap_or_else(|| ReadOnlySource::Anonymous(SharedVecSlice::empty())))
|
||||
.map(ReadOnlySource::from)
|
||||
.unwrap_or_else(ReadOnlySource::empty))
|
||||
}
|
||||
|
||||
/// Any entry associated to the path in the mmap will be
|
||||
/// removed before the file is deleted.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
match fs::remove_file(&full_path) {
|
||||
Ok(_) => self
|
||||
.sync_directory()
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Err(DeleteError::FileDoesNotExist(path.to_owned()))
|
||||
} else {
|
||||
Err(IOError::with_path(path.to_owned(), e).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
let full_path = self.resolve_path(path);
|
||||
full_path.exists()
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
@@ -319,44 +468,6 @@ impl Directory for MmapDirectory {
|
||||
Ok(BufWriter::new(Box::new(writer)))
|
||||
}
|
||||
|
||||
/// Any entry associated to the path in the mmap will be
|
||||
/// removed before the file is deleted.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
debug!("Deleting file {:?}", path);
|
||||
let full_path = self.resolve_path(path);
|
||||
let mut mmap_cache = self.mmap_cache.write().map_err(|_| {
|
||||
let msg = format!(
|
||||
"Failed to acquired write lock \
|
||||
on mmap cache while deleting {:?}",
|
||||
path
|
||||
);
|
||||
IOError::with_path(path.to_owned(), make_io_err(msg))
|
||||
})?;
|
||||
mmap_cache.discard_from_cache(path);
|
||||
|
||||
// Removing the entry in the MMap cache.
|
||||
// The munmap will appear on Drop,
|
||||
// when the last reference is gone.
|
||||
mmap_cache.cache.remove(&full_path);
|
||||
match fs::remove_file(&full_path) {
|
||||
Ok(_) => self
|
||||
.sync_directory()
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
Err(DeleteError::FileDoesNotExist(path.to_owned()))
|
||||
} else {
|
||||
Err(IOError::with_path(path.to_owned(), e).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
let full_path = self.resolve_path(path);
|
||||
full_path.exists()
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
let mut buffer = Vec::new();
|
||||
@@ -403,6 +514,10 @@ impl Directory for MmapDirectory {
|
||||
_file: file,
|
||||
})))
|
||||
}
|
||||
|
||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
||||
self.inner.watch(watch_callback)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -412,9 +527,15 @@ mod tests {
|
||||
// The following tests are specific to the MmapDirectory
|
||||
|
||||
use super::*;
|
||||
use crate::indexer::LogMergePolicy;
|
||||
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
||||
use crate::Index;
|
||||
use crate::ReloadPolicy;
|
||||
use std::fs;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
#[test]
|
||||
fn test_open_non_existant_path() {
|
||||
fn test_open_non_existent_path() {
|
||||
assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err());
|
||||
}
|
||||
|
||||
@@ -436,7 +557,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_cache() {
|
||||
let content = "abc".as_bytes();
|
||||
let content = b"abc";
|
||||
|
||||
// here we test if the cache releases
|
||||
// mmaps correctly.
|
||||
@@ -452,26 +573,117 @@ mod tests {
|
||||
w.flush().unwrap();
|
||||
}
|
||||
}
|
||||
{
|
||||
for (i, path) in paths.iter().enumerate() {
|
||||
let _r = mmap_directory.open_read(path).unwrap();
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), i + 1);
|
||||
}
|
||||
for path in paths.iter() {
|
||||
let _r = mmap_directory.open_read(path).unwrap();
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), num_paths);
|
||||
}
|
||||
for (i, path) in paths.iter().enumerate() {
|
||||
mmap_directory.delete(path).unwrap();
|
||||
assert_eq!(
|
||||
mmap_directory.get_cache_info().mmapped.len(),
|
||||
num_paths - i - 1
|
||||
);
|
||||
}
|
||||
|
||||
let mut keep = vec![];
|
||||
for (i, path) in paths.iter().enumerate() {
|
||||
keep.push(mmap_directory.open_read(path).unwrap());
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), i + 1);
|
||||
}
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.hit, 0);
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.miss, 10);
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 10);
|
||||
for path in paths.iter() {
|
||||
let _r = mmap_directory.open_read(path).unwrap();
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), num_paths);
|
||||
}
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.hit, 10);
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.miss, 10);
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 10);
|
||||
|
||||
for path in paths.iter() {
|
||||
let _r = mmap_directory.open_read(path).unwrap();
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 10);
|
||||
}
|
||||
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.hit, 20);
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.miss, 10);
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 10);
|
||||
drop(keep);
|
||||
for path in paths.iter() {
|
||||
let _r = mmap_directory.open_read(path).unwrap();
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 1);
|
||||
}
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.hit, 20);
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.miss, 20);
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
||||
|
||||
for path in &paths {
|
||||
mmap_directory.delete(path).unwrap();
|
||||
}
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.hit, 20);
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.miss, 20);
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
||||
for path in paths.iter() {
|
||||
assert!(mmap_directory.open_read(path).is_err());
|
||||
}
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.hit, 20);
|
||||
assert_eq!(mmap_directory.get_cache_info().counters.miss, 30);
|
||||
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch_wrapper() {
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
let counter_clone = counter.clone();
|
||||
let tmp_dir = tempfile::TempDir::new().unwrap();
|
||||
let tmp_dirpath = tmp_dir.path().to_owned();
|
||||
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
|
||||
let tmp_file = tmp_dirpath.join(*META_FILEPATH);
|
||||
let _handle = watch_wrapper.watch(Box::new(move || {
|
||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||
}));
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
let _handle2 = watch_wrapper.watch(Box::new(move || {
|
||||
let _ = sender.send(());
|
||||
}));
|
||||
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
||||
fs::write(&tmp_file, b"whateverwilldo").unwrap();
|
||||
assert!(receiver.recv().is_ok());
|
||||
assert!(counter.load(Ordering::SeqCst) >= 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mmap_released() {
|
||||
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
let mut schema_builder: SchemaBuilder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
{
|
||||
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let mut log_merge_policy = LogMergePolicy::default();
|
||||
log_merge_policy.set_min_merge_size(3);
|
||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||
for _num_commits in 0..10 {
|
||||
for _ in 0..10 {
|
||||
index_writer.add_document(doc!(text_field=>"abc"));
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
}
|
||||
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::Manual)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
for _ in 0..4 {
|
||||
index_writer.add_document(doc!(text_field=>"abc"));
|
||||
index_writer.commit().unwrap();
|
||||
reader.reload().unwrap();
|
||||
}
|
||||
index_writer.wait_merging_threads().unwrap();
|
||||
|
||||
reader.reload().unwrap();
|
||||
let num_segments = reader.searcher().segment_readers().len();
|
||||
assert!(num_segments <= 4);
|
||||
assert_eq!(
|
||||
num_segments * 7,
|
||||
mmap_directory.get_cache_info().mmapped.len()
|
||||
);
|
||||
}
|
||||
assert!(mmap_directory.get_cache_info().mmapped.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,10 +9,11 @@ mod mmap_directory;
|
||||
|
||||
mod directory;
|
||||
mod directory_lock;
|
||||
mod footer;
|
||||
mod managed_directory;
|
||||
mod ram_directory;
|
||||
mod read_only_source;
|
||||
mod shared_vec_slice;
|
||||
mod watch_event_router;
|
||||
|
||||
/// Errors specific to the directory module.
|
||||
pub mod error;
|
||||
@@ -22,22 +23,74 @@ pub use self::directory::{Directory, DirectoryClone};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub use self::ram_directory::RAMDirectory;
|
||||
pub use self::read_only_source::ReadOnlySource;
|
||||
use std::io::{BufWriter, Seek, Write};
|
||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
use std::io::{self, BufWriter, Write};
|
||||
use std::path::PathBuf;
|
||||
/// Outcome of the Garbage collection
|
||||
pub struct GarbageCollectionResult {
|
||||
/// List of files that were deleted in this cycle
|
||||
pub deleted_files: Vec<PathBuf>,
|
||||
/// List of files that were schedule to be deleted in this cycle,
|
||||
/// but deletion did not work. This typically happens on windows,
|
||||
/// as deleting a memory mapped file is forbidden.
|
||||
///
|
||||
/// If a searcher is still held, a file cannot be deleted.
|
||||
/// This is not considered a bug, the file will simply be deleted
|
||||
/// in the next GC.
|
||||
pub failed_to_delete_files: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "mmap")]
|
||||
pub use self::mmap_directory::MmapDirectory;
|
||||
|
||||
pub(crate) use self::managed_directory::ManagedDirectory;
|
||||
pub use self::managed_directory::ManagedDirectory;
|
||||
|
||||
/// Synonym of Seek + Write
|
||||
pub trait SeekableWrite: Seek + Write {}
|
||||
impl<T: Seek + Write> SeekableWrite for T {}
|
||||
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
|
||||
///
|
||||
/// The point is that while the type is public, it cannot be built by anyone
|
||||
/// outside of this module.
|
||||
pub struct AntiCallToken(());
|
||||
|
||||
/// Trait used to indicate when no more write need to be done on a writer
|
||||
pub trait TerminatingWrite: Write {
|
||||
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
|
||||
fn terminate(mut self) -> io::Result<()>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
self.terminate_ref(AntiCallToken(()))
|
||||
}
|
||||
|
||||
/// You should implement this function to define custom behavior.
|
||||
/// This function should flush any buffer it may hold.
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
|
||||
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
|
||||
self.as_mut().terminate_ref(token)
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
|
||||
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()?;
|
||||
self.get_mut().terminate_ref(a)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
|
||||
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
/// Write object for Directory.
|
||||
///
|
||||
/// `WritePtr` are required to implement both Write
|
||||
/// and Seek.
|
||||
pub type WritePtr = BufWriter<Box<SeekableWrite>>;
|
||||
pub type WritePtr = BufWriter<Box<dyn TerminatingWrite>>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use super::shared_vec_slice::SharedVecSlice;
|
||||
use common::make_io_err;
|
||||
use directory::error::{DeleteError, IOError, OpenReadError, OpenWriteError};
|
||||
use directory::WritePtr;
|
||||
use directory::{Directory, ReadOnlySource};
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::AntiCallToken;
|
||||
use crate::directory::WatchCallbackList;
|
||||
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
|
||||
use crate::directory::{TerminatingWrite, WritePtr};
|
||||
use fail::fail_point;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
|
||||
@@ -22,13 +24,13 @@ use std::sync::{Arc, RwLock};
|
||||
///
|
||||
struct VecWriter {
|
||||
path: PathBuf,
|
||||
shared_directory: InnerDirectory,
|
||||
shared_directory: RAMDirectory,
|
||||
data: Cursor<Vec<u8>>,
|
||||
is_flushed: bool,
|
||||
}
|
||||
|
||||
impl VecWriter {
|
||||
fn new(path_buf: PathBuf, shared_directory: InnerDirectory) -> VecWriter {
|
||||
fn new(path_buf: PathBuf, shared_directory: RAMDirectory) -> VecWriter {
|
||||
VecWriter {
|
||||
path: path_buf,
|
||||
data: Cursor::new(Vec::new()),
|
||||
@@ -64,80 +66,59 @@ impl Write for VecWriter {
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.is_flushed = true;
|
||||
self.shared_directory
|
||||
.write(self.path.clone(), self.data.get_ref())?;
|
||||
let mut fs = self.shared_directory.fs.write().unwrap();
|
||||
fs.write(self.path.clone(), self.data.get_ref());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct InnerDirectory(Arc<RwLock<HashMap<PathBuf, Arc<Vec<u8>>>>>);
|
||||
impl TerminatingWrite for VecWriter {
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct InnerDirectory {
|
||||
fs: HashMap<PathBuf, ReadOnlySource>,
|
||||
watch_router: WatchCallbackList,
|
||||
}
|
||||
|
||||
impl InnerDirectory {
|
||||
fn new() -> InnerDirectory {
|
||||
InnerDirectory(Arc::new(RwLock::new(HashMap::new())))
|
||||
}
|
||||
|
||||
fn write(&self, path: PathBuf, data: &[u8]) -> io::Result<bool> {
|
||||
let mut map = self.0.write().map_err(|_| {
|
||||
make_io_err(format!(
|
||||
"Failed to lock the directory, when trying to write {:?}",
|
||||
path
|
||||
))
|
||||
})?;
|
||||
let prev_value = map.insert(path, Arc::new(Vec::from(data)));
|
||||
Ok(prev_value.is_some())
|
||||
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
||||
let data = ReadOnlySource::new(Vec::from(data));
|
||||
self.fs.insert(path, data).is_some()
|
||||
}
|
||||
|
||||
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
|
||||
self.0
|
||||
.read()
|
||||
.map_err(|_| {
|
||||
let msg = format!(
|
||||
"Failed to acquire read lock for the \
|
||||
directory when trying to read {:?}",
|
||||
path
|
||||
);
|
||||
let io_err = make_io_err(msg);
|
||||
OpenReadError::IOError(IOError::with_path(path.to_owned(), io_err))
|
||||
})
|
||||
.and_then(|readable_map| {
|
||||
readable_map
|
||||
.get(path)
|
||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
||||
.map(Arc::clone)
|
||||
.map(|data| ReadOnlySource::Anonymous(SharedVecSlice::new(data)))
|
||||
})
|
||||
self.fs
|
||||
.get(path)
|
||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
||||
.map(Clone::clone)
|
||||
}
|
||||
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
self.0
|
||||
.write()
|
||||
.map_err(|_| {
|
||||
let msg = format!(
|
||||
"Failed to acquire write lock for the \
|
||||
directory when trying to delete {:?}",
|
||||
path
|
||||
);
|
||||
let io_err = make_io_err(msg);
|
||||
DeleteError::IOError(IOError::with_path(path.to_owned(), io_err))
|
||||
})
|
||||
.and_then(|mut writable_map| match writable_map.remove(path) {
|
||||
Some(_) => Ok(()),
|
||||
None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))),
|
||||
})
|
||||
fn delete(&mut self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
match self.fs.remove(path) {
|
||||
Some(_) => Ok(()),
|
||||
None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))),
|
||||
}
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
self.0
|
||||
.read()
|
||||
.expect("Failed to get read lock directory.")
|
||||
.contains_key(path)
|
||||
self.fs.contains_key(path)
|
||||
}
|
||||
|
||||
fn watch(&mut self, watch_handle: WatchCallback) -> WatchHandle {
|
||||
self.watch_router.subscribe(watch_handle)
|
||||
}
|
||||
|
||||
fn total_mem_usage(&self) -> usize {
|
||||
self.fs.values().map(|f| f.len()).sum()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for RAMDirectory {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "RAMDirectory")
|
||||
}
|
||||
}
|
||||
@@ -147,33 +128,63 @@ impl fmt::Debug for RAMDirectory {
|
||||
/// It is mainly meant for unit testing.
|
||||
/// Writes are only made visible upon flushing.
|
||||
///
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Default)]
|
||||
pub struct RAMDirectory {
|
||||
fs: InnerDirectory,
|
||||
fs: Arc<RwLock<InnerDirectory>>,
|
||||
}
|
||||
|
||||
impl RAMDirectory {
|
||||
/// Constructor
|
||||
pub fn create() -> RAMDirectory {
|
||||
RAMDirectory {
|
||||
fs: InnerDirectory::new(),
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Returns the sum of the size of the different files
|
||||
/// in the RAMDirectory.
|
||||
pub fn total_mem_usage(&self) -> usize {
|
||||
self.fs.read().unwrap().total_mem_usage()
|
||||
}
|
||||
|
||||
/// Write a copy of all of the files saved in the RAMDirectory in the target `Directory`.
|
||||
///
|
||||
/// Files are all written using the `Directory::write` meaning, even if they were
|
||||
/// written using the `atomic_write` api.
|
||||
///
|
||||
/// If an error is encounterred, files may be persisted partially.
|
||||
pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
|
||||
let wlock = self.fs.write().unwrap();
|
||||
for (path, source) in wlock.fs.iter() {
|
||||
let mut dest_wrt = dest.open_write(path)?;
|
||||
dest_wrt.write_all(source.as_slice())?;
|
||||
dest_wrt.terminate()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Directory for RAMDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
self.fs.open_read(path)
|
||||
self.fs.read().unwrap().open_read(path)
|
||||
}
|
||||
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
fail_point!("RAMDirectory::delete", |_| {
|
||||
use crate::directory::error::IOError;
|
||||
let io_error = IOError::from(io::Error::from(io::ErrorKind::Other));
|
||||
Err(DeleteError::from(io_error))
|
||||
});
|
||||
self.fs.write().unwrap().delete(path)
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
self.fs.read().unwrap().exists(path)
|
||||
}
|
||||
|
||||
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
|
||||
let mut fs = self.fs.write().unwrap();
|
||||
let path_buf = PathBuf::from(path);
|
||||
let vec_writer = VecWriter::new(path_buf.clone(), self.fs.clone());
|
||||
|
||||
let exists = self
|
||||
.fs
|
||||
.write(path_buf.clone(), &Vec::new())
|
||||
.map_err(|err| IOError::with_path(path.to_owned(), err))?;
|
||||
let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
||||
let exists = fs.write(path_buf.clone(), &[]);
|
||||
// force the creation of the file to mimic the MMap directory.
|
||||
if exists {
|
||||
Err(OpenWriteError::FileAlreadyExists(path_buf))
|
||||
@@ -182,29 +193,55 @@ impl Directory for RAMDirectory {
|
||||
}
|
||||
}
|
||||
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
self.fs.delete(path)
|
||||
}
|
||||
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
self.fs.exists(path)
|
||||
}
|
||||
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
|
||||
let read = self.open_read(path)?;
|
||||
Ok(read.as_slice().to_owned())
|
||||
Ok(self.open_read(path)?.as_slice().to_owned())
|
||||
}
|
||||
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
msg.unwrap_or("Undefined".to_string())
|
||||
msg.unwrap_or_else(|| "Undefined".to_string())
|
||||
)));
|
||||
let path_buf = PathBuf::from(path);
|
||||
let mut vec_writer = VecWriter::new(path_buf.clone(), self.fs.clone());
|
||||
self.fs.write(path_buf, &Vec::new())?;
|
||||
|
||||
// Reserve the path to prevent calls to .write() to succeed.
|
||||
self.fs.write().unwrap().write(path_buf.clone(), &[]);
|
||||
|
||||
let mut vec_writer = VecWriter::new(path_buf, self.clone());
|
||||
vec_writer.write_all(data)?;
|
||||
vec_writer.flush()?;
|
||||
if path == Path::new(&*META_FILEPATH) {
|
||||
let _ = self.fs.write().unwrap().watch_router.broadcast();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
||||
Ok(self.fs.write().unwrap().watch(watch_callback))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::RAMDirectory;
|
||||
use crate::Directory;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_persist() {
|
||||
let msg_atomic: &'static [u8] = b"atomic is the way";
|
||||
let msg_seq: &'static [u8] = b"sequential is the way";
|
||||
let path_atomic: &'static Path = Path::new("atomic");
|
||||
let path_seq: &'static Path = Path::new("seq");
|
||||
let mut directory = RAMDirectory::create();
|
||||
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
|
||||
let mut wrt = directory.open_write(path_seq).unwrap();
|
||||
assert!(wrt.write_all(msg_seq).is_ok());
|
||||
assert!(wrt.flush().is_ok());
|
||||
let mut directory_copy = RAMDirectory::create();
|
||||
assert!(directory.persist(&mut directory_copy).is_ok());
|
||||
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
||||
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use super::shared_vec_slice::SharedVecSlice;
|
||||
use common::HasLen;
|
||||
#[cfg(feature = "mmap")]
|
||||
use fst::raw::MmapReadOnly;
|
||||
use crate::common::HasLen;
|
||||
use stable_deref_trait::{CloneStableDeref, StableDeref};
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
|
||||
/// Read object that represents files in tantivy.
|
||||
///
|
||||
@@ -11,12 +11,10 @@ use std::ops::Deref;
|
||||
/// the data in the form of a constant read-only `&[u8]`.
|
||||
/// Whatever happens to the directory file, the data
|
||||
/// hold by this object should never be altered or destroyed.
|
||||
pub enum ReadOnlySource {
|
||||
/// Mmap source of data
|
||||
#[cfg(feature = "mmap")]
|
||||
Mmap(MmapReadOnly),
|
||||
/// Wrapping a `Vec<u8>`
|
||||
Anonymous(SharedVecSlice),
|
||||
pub struct ReadOnlySource {
|
||||
data: Arc<BoxedData>,
|
||||
start: usize,
|
||||
stop: usize,
|
||||
}
|
||||
|
||||
unsafe impl StableDeref for ReadOnlySource {}
|
||||
@@ -30,19 +28,38 @@ impl Deref for ReadOnlySource {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Arc<BoxedData>> for ReadOnlySource {
|
||||
fn from(data: Arc<BoxedData>) -> Self {
|
||||
let len = data.len();
|
||||
ReadOnlySource {
|
||||
data,
|
||||
start: 0,
|
||||
stop: len,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadOnlySource {
|
||||
pub(crate) fn new<D>(data: D) -> ReadOnlySource
|
||||
where
|
||||
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
||||
{
|
||||
let len = data.len();
|
||||
ReadOnlySource {
|
||||
data: Arc::new(Box::new(data)),
|
||||
start: 0,
|
||||
stop: len,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an empty ReadOnlySource
|
||||
pub fn empty() -> ReadOnlySource {
|
||||
ReadOnlySource::Anonymous(SharedVecSlice::empty())
|
||||
ReadOnlySource::new(&[][..])
|
||||
}
|
||||
|
||||
/// Returns the data underlying the ReadOnlySource object.
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
match *self {
|
||||
#[cfg(feature = "mmap")]
|
||||
ReadOnlySource::Mmap(ref mmap_read_only) => mmap_read_only.as_slice(),
|
||||
ReadOnlySource::Anonymous(ref shared_vec) => shared_vec.as_slice(),
|
||||
}
|
||||
&self.data[self.start..self.stop]
|
||||
}
|
||||
|
||||
/// Splits into 2 `ReadOnlySource`, at the offset given
|
||||
@@ -53,6 +70,12 @@ impl ReadOnlySource {
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
|
||||
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
|
||||
let left_len = self.len() - right_len;
|
||||
self.split(left_len)
|
||||
}
|
||||
|
||||
/// Creates a ReadOnlySource that is just a
|
||||
/// view over a slice of the data.
|
||||
///
|
||||
@@ -63,22 +86,18 @@ impl ReadOnlySource {
|
||||
/// worth of data in anonymous memory, and only a
|
||||
/// 1KB slice is remaining, the whole `500MBs`
|
||||
/// are retained in memory.
|
||||
pub fn slice(&self, from_offset: usize, to_offset: usize) -> ReadOnlySource {
|
||||
pub fn slice(&self, start: usize, stop: usize) -> ReadOnlySource {
|
||||
assert!(
|
||||
from_offset <= to_offset,
|
||||
start <= stop,
|
||||
"Requested negative slice [{}..{}]",
|
||||
from_offset,
|
||||
to_offset
|
||||
start,
|
||||
stop
|
||||
);
|
||||
match *self {
|
||||
#[cfg(feature = "mmap")]
|
||||
ReadOnlySource::Mmap(ref mmap_read_only) => {
|
||||
let sliced_mmap = mmap_read_only.range(from_offset, to_offset - from_offset);
|
||||
ReadOnlySource::Mmap(sliced_mmap)
|
||||
}
|
||||
ReadOnlySource::Anonymous(ref shared_vec) => {
|
||||
ReadOnlySource::Anonymous(shared_vec.slice(from_offset, to_offset))
|
||||
}
|
||||
assert!(stop <= self.len());
|
||||
ReadOnlySource {
|
||||
data: self.data.clone(),
|
||||
start: self.start + start,
|
||||
stop: self.start + stop,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,8 +106,7 @@ impl ReadOnlySource {
|
||||
///
|
||||
/// Equivalent to `.slice(from_offset, self.len())`
|
||||
pub fn slice_from(&self, from_offset: usize) -> ReadOnlySource {
|
||||
let len = self.len();
|
||||
self.slice(from_offset, len)
|
||||
self.slice(from_offset, self.len())
|
||||
}
|
||||
|
||||
/// Like `.slice(...)` but enforcing only the `to`
|
||||
@@ -102,19 +120,18 @@ impl ReadOnlySource {
|
||||
|
||||
impl HasLen for ReadOnlySource {
|
||||
fn len(&self) -> usize {
|
||||
self.as_slice().len()
|
||||
self.stop - self.start
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for ReadOnlySource {
|
||||
fn clone(&self) -> Self {
|
||||
self.slice(0, self.len())
|
||||
self.slice_from(0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for ReadOnlySource {
|
||||
fn from(data: Vec<u8>) -> ReadOnlySource {
|
||||
let shared_data = SharedVecSlice::from(data);
|
||||
ReadOnlySource::Anonymous(shared_data)
|
||||
ReadOnlySource::new(data)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SharedVecSlice {
|
||||
pub data: Arc<Vec<u8>>,
|
||||
pub start: usize,
|
||||
pub len: usize,
|
||||
}
|
||||
|
||||
impl SharedVecSlice {
|
||||
pub fn empty() -> SharedVecSlice {
|
||||
SharedVecSlice::new(Arc::new(Vec::new()))
|
||||
}
|
||||
|
||||
pub fn new(data: Arc<Vec<u8>>) -> SharedVecSlice {
|
||||
let data_len = data.len();
|
||||
SharedVecSlice {
|
||||
data,
|
||||
start: 0,
|
||||
len: data_len,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
&self.data[self.start..self.start + self.len]
|
||||
}
|
||||
|
||||
pub fn slice(&self, from_offset: usize, to_offset: usize) -> SharedVecSlice {
|
||||
SharedVecSlice {
|
||||
data: Arc::clone(&self.data),
|
||||
start: self.start + from_offset,
|
||||
len: to_offset - from_offset,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for SharedVecSlice {
|
||||
fn from(data: Vec<u8>) -> SharedVecSlice {
|
||||
SharedVecSlice::new(Arc::new(data))
|
||||
}
|
||||
}
|
||||
@@ -1,129 +1,232 @@
|
||||
use super::*;
|
||||
use std::io::{Seek, SeekFrom, Write};
|
||||
use futures::channel::oneshot;
|
||||
use futures::executor::block_on;
|
||||
use std::io::Write;
|
||||
use std::mem;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
lazy_static! {
|
||||
static ref TEST_PATH: &'static Path = Path::new("some_path_for_test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ram_directory() {
|
||||
let mut ram_directory = RAMDirectory::create();
|
||||
test_directory(&mut ram_directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "mmap")]
|
||||
fn test_mmap_directory() {
|
||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
test_directory(&mut mmap_directory);
|
||||
mod mmap_directory_tests {
|
||||
use crate::directory::MmapDirectory;
|
||||
|
||||
type DirectoryImpl = MmapDirectory;
|
||||
|
||||
fn make_directory() -> DirectoryImpl {
|
||||
MmapDirectory::create_from_tempdir().unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple() {
|
||||
let mut directory = make_directory();
|
||||
super::test_simple(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_create_the_file() {
|
||||
let mut directory = make_directory();
|
||||
super::test_write_create_the_file(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_forbidden() {
|
||||
let mut directory = make_directory();
|
||||
super::test_rewrite_forbidden(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_delete() {
|
||||
let mut directory = make_directory();
|
||||
super::test_directory_delete(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_non_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_non_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch() {
|
||||
let mut directory = make_directory();
|
||||
super::test_watch(&mut directory);
|
||||
}
|
||||
}
|
||||
|
||||
mod ram_directory_tests {
|
||||
use crate::directory::RAMDirectory;
|
||||
|
||||
type DirectoryImpl = RAMDirectory;
|
||||
|
||||
fn make_directory() -> DirectoryImpl {
|
||||
RAMDirectory::default()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple() {
|
||||
let mut directory = make_directory();
|
||||
super::test_simple(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_create_the_file() {
|
||||
let mut directory = make_directory();
|
||||
super::test_write_create_the_file(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_forbidden() {
|
||||
let mut directory = make_directory();
|
||||
super::test_rewrite_forbidden(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_delete() {
|
||||
let mut directory = make_directory();
|
||||
super::test_directory_delete(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_non_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_non_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch() {
|
||||
let mut directory = make_directory();
|
||||
super::test_watch(&mut directory);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn ram_directory_panics_if_flush_forgotten() {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
let mut ram_directory = RAMDirectory::create();
|
||||
let mut write_file = ram_directory.open_write(*TEST_PATH).unwrap();
|
||||
let mut write_file = ram_directory.open_write(test_path).unwrap();
|
||||
assert!(write_file.write_all(&[4]).is_ok());
|
||||
}
|
||||
|
||||
fn test_simple(directory: &mut Directory) {
|
||||
fn test_simple(directory: &mut dyn Directory) {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
{
|
||||
{
|
||||
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(directory.exists(*TEST_PATH));
|
||||
write_file.write_all(&[4]).unwrap();
|
||||
write_file.write_all(&[3]).unwrap();
|
||||
write_file.write_all(&[7, 3, 5]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
}
|
||||
let read_file = directory.open_read(*TEST_PATH).unwrap();
|
||||
let mut write_file = directory.open_write(test_path).unwrap();
|
||||
assert!(directory.exists(test_path));
|
||||
write_file.write_all(&[4]).unwrap();
|
||||
write_file.write_all(&[3]).unwrap();
|
||||
write_file.write_all(&[7, 3, 5]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
}
|
||||
{
|
||||
let read_file = directory.open_read(test_path).unwrap();
|
||||
let data: &[u8] = &*read_file;
|
||||
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
||||
}
|
||||
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
assert!(!directory.exists(*TEST_PATH));
|
||||
assert!(directory.delete(test_path).is_ok());
|
||||
assert!(!directory.exists(test_path));
|
||||
}
|
||||
|
||||
fn test_seek(directory: &mut Directory) {
|
||||
fn test_rewrite_forbidden(directory: &mut dyn Directory) {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
{
|
||||
{
|
||||
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
|
||||
write_file.write_all(&[4, 3, 7, 3, 5]).unwrap();
|
||||
write_file.seek(SeekFrom::Start(0)).unwrap();
|
||||
write_file.write_all(&[3, 1]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
}
|
||||
let read_file = directory.open_read(*TEST_PATH).unwrap();
|
||||
let data: &[u8] = &*read_file;
|
||||
assert_eq!(data, &[3u8, 1u8, 7u8, 3u8, 5u8]);
|
||||
directory.open_write(test_path).unwrap();
|
||||
assert!(directory.exists(test_path));
|
||||
}
|
||||
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
{
|
||||
assert!(directory.open_write(test_path).is_err());
|
||||
}
|
||||
assert!(directory.delete(test_path).is_ok());
|
||||
}
|
||||
|
||||
fn test_rewrite_forbidden(directory: &mut Directory) {
|
||||
fn test_write_create_the_file(directory: &mut dyn Directory) {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
{
|
||||
directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(directory.exists(*TEST_PATH));
|
||||
}
|
||||
{
|
||||
assert!(directory.open_write(*TEST_PATH).is_err());
|
||||
}
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
}
|
||||
|
||||
fn test_write_create_the_file(directory: &mut Directory) {
|
||||
{
|
||||
assert!(directory.open_read(*TEST_PATH).is_err());
|
||||
let _w = directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(directory.exists(*TEST_PATH));
|
||||
assert!(directory.open_read(*TEST_PATH).is_ok());
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
assert!(directory.open_read(test_path).is_err());
|
||||
let _w = directory.open_write(test_path).unwrap();
|
||||
assert!(directory.exists(test_path));
|
||||
assert!(directory.open_read(test_path).is_ok());
|
||||
assert!(directory.delete(test_path).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
fn test_directory_delete(directory: &mut Directory) {
|
||||
assert!(directory.open_read(*TEST_PATH).is_err());
|
||||
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
|
||||
fn test_directory_delete(directory: &mut dyn Directory) {
|
||||
let test_path: &'static Path = Path::new("some_path_for_test");
|
||||
assert!(directory.open_read(test_path).is_err());
|
||||
let mut write_file = directory.open_write(&test_path).unwrap();
|
||||
write_file.write_all(&[1, 2, 3, 4]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
{
|
||||
let read_handle = directory.open_read(*TEST_PATH).unwrap();
|
||||
{
|
||||
let read_handle = directory.open_read(&test_path).unwrap();
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
// Mapped files can't be deleted on Windows
|
||||
if !cfg!(windows) {
|
||||
assert!(directory.delete(&test_path).is_ok());
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
|
||||
// Mapped files can't be deleted on Windows
|
||||
if !cfg!(windows) {
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
}
|
||||
|
||||
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
||||
}
|
||||
|
||||
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
||||
}
|
||||
|
||||
if cfg!(windows) {
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
assert!(directory.delete(&test_path).is_ok());
|
||||
}
|
||||
|
||||
assert!(directory.open_read(*TEST_PATH).is_err());
|
||||
assert!(directory.delete(*TEST_PATH).is_err());
|
||||
assert!(directory.open_read(&test_path).is_err());
|
||||
assert!(directory.delete(&test_path).is_err());
|
||||
}
|
||||
|
||||
fn test_directory(directory: &mut Directory) {
|
||||
test_simple(directory);
|
||||
test_seek(directory);
|
||||
test_rewrite_forbidden(directory);
|
||||
test_write_create_the_file(directory);
|
||||
test_directory_delete(directory);
|
||||
test_lock_non_blocking(directory);
|
||||
test_lock_blocking(directory);
|
||||
fn test_watch(directory: &mut dyn Directory) {
|
||||
let num_progress: Arc<AtomicUsize> = Default::default();
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
let counter_clone = counter.clone();
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
let watch_callback = Box::new(move || {
|
||||
counter_clone.fetch_add(1, SeqCst);
|
||||
});
|
||||
// This callback is used to synchronize watching in our unit test.
|
||||
// We bind it to a variable because the callback is removed when that
|
||||
// handle is dropped.
|
||||
let watch_handle = directory.watch(watch_callback).unwrap();
|
||||
let _progress_listener = directory
|
||||
.watch(Box::new(move || {
|
||||
let val = num_progress.fetch_add(1, SeqCst);
|
||||
let _ = sender.send(val);
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
for i in 0..10 {
|
||||
assert_eq!(i, counter.load(SeqCst));
|
||||
assert!(directory
|
||||
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
|
||||
.is_ok());
|
||||
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
|
||||
assert_eq!(i + 1, counter.load(SeqCst));
|
||||
}
|
||||
mem::drop(watch_handle);
|
||||
assert!(directory
|
||||
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
||||
.is_ok());
|
||||
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
|
||||
assert_eq!(10, counter.load(SeqCst));
|
||||
}
|
||||
|
||||
fn test_lock_non_blocking(directory: &mut Directory) {
|
||||
fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
||||
{
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
@@ -148,15 +251,19 @@ fn test_lock_non_blocking(directory: &mut Directory) {
|
||||
assert!(lock_a_res.is_ok());
|
||||
}
|
||||
|
||||
fn test_lock_blocking(directory: &mut Directory) {
|
||||
fn test_lock_blocking(directory: &mut dyn Directory) {
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: true,
|
||||
});
|
||||
assert!(lock_a_res.is_ok());
|
||||
let in_thread = Arc::new(AtomicBool::default());
|
||||
let in_thread_clone = in_thread.clone();
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
std::thread::spawn(move || {
|
||||
//< lock_a_res is sent to the thread.
|
||||
std::thread::sleep(time::Duration::from_millis(10));
|
||||
in_thread_clone.store(true, SeqCst);
|
||||
let _just_sync = block_on(receiver);
|
||||
// explicitely droping lock_a_res. It would have been sufficient to just force it
|
||||
// to be part of the move, but the intent seems clearer that way.
|
||||
drop(lock_a_res);
|
||||
@@ -169,14 +276,18 @@ fn test_lock_blocking(directory: &mut Directory) {
|
||||
});
|
||||
assert!(lock_a_res.is_err());
|
||||
}
|
||||
{
|
||||
// the blocking call should wait for at least 10ms.
|
||||
let start = time::Instant::now();
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
let directory_clone = directory.box_clone();
|
||||
let (sender2, receiver2) = oneshot::channel();
|
||||
let join_handle = std::thread::spawn(move || {
|
||||
assert!(sender2.send(()).is_ok());
|
||||
let lock_a_res = directory_clone.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: true,
|
||||
});
|
||||
assert!(in_thread.load(SeqCst));
|
||||
assert!(lock_a_res.is_ok());
|
||||
assert!(start.elapsed().subsec_millis() >= 10);
|
||||
}
|
||||
});
|
||||
assert!(block_on(receiver2).is_ok());
|
||||
assert!(sender.send(()).is_ok());
|
||||
assert!(join_handle.join().is_ok());
|
||||
}
|
||||
|
||||
170
src/directory/watch_event_router.rs
Normal file
170
src/directory/watch_event_router.rs
Normal file
@@ -0,0 +1,170 @@
|
||||
use futures::channel::oneshot;
|
||||
use futures::{Future, TryFutureExt};
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::Weak;
|
||||
|
||||
/// Type alias for callbacks registered when watching files of a `Directory`.
|
||||
pub type WatchCallback = Box<dyn Fn() + Sync + Send>;
|
||||
|
||||
/// Helper struct to implement the watch method in `Directory` implementations.
|
||||
///
|
||||
/// It registers callbacks (See `.subscribe(...)`) and
|
||||
/// calls them upon calls to `.broadcast(...)`.
|
||||
#[derive(Default)]
|
||||
pub struct WatchCallbackList {
|
||||
router: RwLock<Vec<Weak<WatchCallback>>>,
|
||||
}
|
||||
|
||||
/// Controls how long a directory should watch for a file change.
|
||||
///
|
||||
/// After all the clones of `WatchHandle` are dropped, the associated will not be called when a
|
||||
/// file change is detected.
|
||||
#[must_use = "This `WatchHandle` controls the lifetime of the watch and should therefore be used."]
|
||||
#[derive(Clone)]
|
||||
pub struct WatchHandle(Arc<WatchCallback>);
|
||||
|
||||
impl WatchHandle {
|
||||
/// Create a WatchHandle handle.
|
||||
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
|
||||
WatchHandle(watch_callback)
|
||||
}
|
||||
}
|
||||
|
||||
impl WatchCallbackList {
|
||||
/// Subscribes a new callback and returns a handle that controls the lifetime of the callback.
|
||||
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
||||
let watch_callback_arc = Arc::new(watch_callback);
|
||||
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
||||
self.router.write().unwrap().push(watch_callback_weak);
|
||||
WatchHandle::new(watch_callback_arc)
|
||||
}
|
||||
|
||||
fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
|
||||
let mut callbacks = vec![];
|
||||
let mut router_wlock = self.router.write().unwrap();
|
||||
let mut i = 0;
|
||||
while i < router_wlock.len() {
|
||||
if let Some(watch) = router_wlock[i].upgrade() {
|
||||
callbacks.push(watch);
|
||||
i += 1;
|
||||
} else {
|
||||
router_wlock.swap_remove(i);
|
||||
}
|
||||
}
|
||||
callbacks
|
||||
}
|
||||
|
||||
/// Triggers all callbacks
|
||||
pub fn broadcast(&self) -> impl Future<Output = ()> {
|
||||
let callbacks = self.list_callback();
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
let result = receiver.unwrap_or_else(|_| ());
|
||||
if callbacks.is_empty() {
|
||||
let _ = sender.send(());
|
||||
return result;
|
||||
}
|
||||
let spawn_res = std::thread::Builder::new()
|
||||
.name("watch-callbacks".to_string())
|
||||
.spawn(move || {
|
||||
for callback in callbacks {
|
||||
callback();
|
||||
}
|
||||
let _ = sender.send(());
|
||||
});
|
||||
if let Err(err) = spawn_res {
|
||||
error!(
|
||||
"Failed to spawn thread to call watch callbacks. Cause: {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::directory::WatchCallbackList;
|
||||
use futures::executor::block_on;
|
||||
use std::mem;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn test_watch_event_router_simple() {
|
||||
let watch_event_router = WatchCallbackList::default();
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
let counter_clone = counter.clone();
|
||||
let inc_callback = Box::new(move || {
|
||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||
});
|
||||
block_on(watch_event_router.broadcast());
|
||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||
block_on(watch_event_router.broadcast());
|
||||
assert_eq!(1, counter.load(Ordering::SeqCst));
|
||||
block_on(async {
|
||||
(
|
||||
watch_event_router.broadcast().await,
|
||||
watch_event_router.broadcast().await,
|
||||
watch_event_router.broadcast().await,
|
||||
)
|
||||
});
|
||||
assert_eq!(4, counter.load(Ordering::SeqCst));
|
||||
mem::drop(handle_a);
|
||||
block_on(watch_event_router.broadcast());
|
||||
assert_eq!(4, counter.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch_event_router_multiple_callback_same_key() {
|
||||
let watch_event_router = WatchCallbackList::default();
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
let inc_callback = |inc: usize| {
|
||||
let counter_clone = counter.clone();
|
||||
Box::new(move || {
|
||||
counter_clone.fetch_add(inc, Ordering::SeqCst);
|
||||
})
|
||||
};
|
||||
let handle_a = watch_event_router.subscribe(inc_callback(1));
|
||||
let handle_a2 = watch_event_router.subscribe(inc_callback(10));
|
||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||
block_on(async {
|
||||
futures::join!(
|
||||
watch_event_router.broadcast(),
|
||||
watch_event_router.broadcast()
|
||||
)
|
||||
});
|
||||
assert_eq!(22, counter.load(Ordering::SeqCst));
|
||||
mem::drop(handle_a);
|
||||
block_on(watch_event_router.broadcast());
|
||||
assert_eq!(32, counter.load(Ordering::SeqCst));
|
||||
mem::drop(handle_a2);
|
||||
block_on(watch_event_router.broadcast());
|
||||
block_on(watch_event_router.broadcast());
|
||||
assert_eq!(32, counter.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch_event_router_multiple_callback_different_key() {
|
||||
let watch_event_router = WatchCallbackList::default();
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
let counter_clone = counter.clone();
|
||||
let inc_callback = Box::new(move || {
|
||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||
});
|
||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||
block_on(async {
|
||||
let future1 = watch_event_router.broadcast();
|
||||
let future2 = watch_event_router.broadcast();
|
||||
futures::join!(future1, future2)
|
||||
});
|
||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||
mem::drop(handle_a);
|
||||
let _ = watch_event_router.broadcast();
|
||||
block_on(watch_event_router.broadcast());
|
||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||
}
|
||||
}
|
||||
147
src/docset.rs
147
src/docset.rs
@@ -1,57 +1,48 @@
|
||||
use common::BitSet;
|
||||
use crate::fastfield::DeleteBitSet;
|
||||
use crate::DocId;
|
||||
use std::borrow::Borrow;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::cmp::Ordering;
|
||||
use DocId;
|
||||
|
||||
/// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`.
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub enum SkipResult {
|
||||
/// target was in the docset
|
||||
Reached,
|
||||
/// target was not in the docset, skipping stopped as a greater element was found
|
||||
OverStep,
|
||||
/// the docset was entirely consumed without finding the target, nor any
|
||||
/// element greater than the target.
|
||||
End,
|
||||
}
|
||||
/// Sentinel value returned when a DocSet has been entirely consumed.
|
||||
///
|
||||
/// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions
|
||||
/// to compare [u32; 4].
|
||||
pub const TERMINATED: DocId = std::i32::MAX as u32;
|
||||
|
||||
/// Represents an iterable set of sorted doc ids.
|
||||
pub trait DocSet {
|
||||
/// Goes to the next element.
|
||||
/// `.advance(...)` needs to be called a first time to point to the correct
|
||||
/// element.
|
||||
fn advance(&mut self) -> bool;
|
||||
///
|
||||
/// The DocId of the next element is returned.
|
||||
/// In other words we should always have :
|
||||
/// ```ignore
|
||||
/// let doc = docset.advance();
|
||||
/// assert_eq!(doc, docset.doc());
|
||||
/// ```
|
||||
///
|
||||
/// If we reached the end of the DocSet, TERMINATED should be returned.
|
||||
///
|
||||
/// Calling `.advance()` on a terminated DocSet should be supported, and TERMINATED should
|
||||
/// be returned.
|
||||
/// TODO Test existing docsets.
|
||||
fn advance(&mut self) -> DocId;
|
||||
|
||||
/// After skipping, position the iterator in such a way that `.doc()`
|
||||
/// will return a value greater than or equal to target.
|
||||
/// Advances the DocSet forward until reaching the target, or going to the
|
||||
/// lowest DocId greater than the target.
|
||||
///
|
||||
/// SkipResult expresses whether the `target value` was reached, overstepped,
|
||||
/// or if the `DocSet` was entirely consumed without finding any value
|
||||
/// greater or equal to the `target`.
|
||||
/// If the end of the DocSet is reached, TERMINATED is returned.
|
||||
///
|
||||
/// WARNING: Calling skip always advances the docset.
|
||||
/// More specifically, if the docset is already positionned on the target
|
||||
/// skipping will advance to the next position and return SkipResult::Overstep.
|
||||
/// Calling `.seek(target)` on a terminated DocSet is legal. Implementation
|
||||
/// of DocSet should support it.
|
||||
///
|
||||
/// If `.skip_next()` oversteps, then the docset must be positionned correctly
|
||||
/// on an existing document. In other words, `.doc()` should return the first document
|
||||
/// greater than `DocId`.
|
||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||
if !self.advance() {
|
||||
return SkipResult::End;
|
||||
}
|
||||
loop {
|
||||
match self.doc().cmp(&target) {
|
||||
Ordering::Less => {
|
||||
if !self.advance() {
|
||||
return SkipResult::End;
|
||||
}
|
||||
}
|
||||
Ordering::Equal => return SkipResult::Reached,
|
||||
Ordering::Greater => return SkipResult::OverStep,
|
||||
}
|
||||
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
let mut doc = self.doc();
|
||||
debug_assert!(doc <= target);
|
||||
while doc < target {
|
||||
doc = self.advance();
|
||||
}
|
||||
doc
|
||||
}
|
||||
|
||||
/// Fills a given mutable buffer with the next doc ids from the
|
||||
@@ -70,51 +61,85 @@ pub trait DocSet {
|
||||
/// use case where batching. The normal way to
|
||||
/// go through the `DocId`'s is to call `.advance()`.
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||
if self.doc() == TERMINATED {
|
||||
return 0;
|
||||
}
|
||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||
if self.advance() {
|
||||
*buffer_val = self.doc();
|
||||
} else {
|
||||
return i;
|
||||
*buffer_val = self.doc();
|
||||
if self.advance() == TERMINATED {
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
buffer.len()
|
||||
}
|
||||
|
||||
/// Returns the current document
|
||||
/// Right after creating a new DocSet, the docset points to the first document.
|
||||
///
|
||||
/// If the DocSet is empty, .doc() should return `TERMINATED`.
|
||||
fn doc(&self) -> DocId;
|
||||
|
||||
/// Returns a best-effort hint of the
|
||||
/// length of the docset.
|
||||
fn size_hint(&self) -> u32;
|
||||
|
||||
/// Appends all docs to a `bitset`.
|
||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||
while self.advance() {
|
||||
bitset.insert(self.doc());
|
||||
/// Returns the number documents matching.
|
||||
/// Calling this method consumes the `DocSet`.
|
||||
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||
let mut count = 0u32;
|
||||
let mut doc = self.doc();
|
||||
while doc != TERMINATED {
|
||||
if !delete_bitset.is_deleted(doc) {
|
||||
count += 1u32;
|
||||
}
|
||||
doc = self.advance();
|
||||
}
|
||||
count
|
||||
}
|
||||
|
||||
/// Returns the number documents matching.
|
||||
///
|
||||
/// Returns the count of documents, deleted or not.
|
||||
/// Calling this method consumes the `DocSet`.
|
||||
fn count(&mut self) -> u32 {
|
||||
///
|
||||
/// Of course, the result is an upper bound of the result
|
||||
/// given by `count()`.
|
||||
fn count_including_deleted(&mut self) -> u32 {
|
||||
let mut count = 0u32;
|
||||
while self.advance() {
|
||||
let mut doc = self.doc();
|
||||
while doc != TERMINATED {
|
||||
count += 1u32;
|
||||
doc = self.advance();
|
||||
}
|
||||
count
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> DocSet for &'a mut dyn DocSet {
|
||||
fn advance(&mut self) -> u32 {
|
||||
(**self).advance()
|
||||
}
|
||||
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
(**self).seek(target)
|
||||
}
|
||||
|
||||
fn doc(&self) -> u32 {
|
||||
(**self).doc()
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
(**self).size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||
fn advance(&mut self) -> bool {
|
||||
fn advance(&mut self) -> DocId {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.advance()
|
||||
}
|
||||
|
||||
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.skip_next(target)
|
||||
unboxed.seek(target)
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
@@ -127,13 +152,13 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||
unboxed.size_hint()
|
||||
}
|
||||
|
||||
fn count(&mut self) -> u32 {
|
||||
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.count()
|
||||
unboxed.count(delete_bitset)
|
||||
}
|
||||
|
||||
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||
fn count_including_deleted(&mut self) -> u32 {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.append_to_bitset(bitset);
|
||||
unboxed.count_including_deleted()
|
||||
}
|
||||
}
|
||||
|
||||
31
src/error.rs
31
src/error.rs
@@ -2,12 +2,11 @@
|
||||
|
||||
use std::io;
|
||||
|
||||
use directory::error::LockError;
|
||||
use directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use fastfield::FastFieldNotAvailableError;
|
||||
use query;
|
||||
use schema;
|
||||
use serde_json;
|
||||
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::error::{Incompatibility, LockError};
|
||||
use crate::fastfield::FastFieldNotAvailableError;
|
||||
use crate::query;
|
||||
use crate::schema;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::PoisonError;
|
||||
@@ -34,7 +33,7 @@ impl DataCorruption {
|
||||
}
|
||||
|
||||
impl fmt::Debug for DataCorruption {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
write!(f, "Data corruption: ")?;
|
||||
if let Some(ref filepath) = &self.filepath {
|
||||
write!(f, "(in file `{:?}`)", filepath)?;
|
||||
@@ -77,12 +76,12 @@ pub enum TantivyError {
|
||||
/// An Error appeared related to the schema.
|
||||
#[fail(display = "Schema error: '{}'", _0)]
|
||||
SchemaError(String),
|
||||
/// Tried to access a fastfield reader for a field not configured accordingly.
|
||||
#[fail(display = "Fast field not available: '{:?}'", _0)]
|
||||
FastFieldError(#[cause] FastFieldNotAvailableError),
|
||||
/// System error. (e.g.: We failed spawning a new thread)
|
||||
#[fail(display = "System error.'{}'", _0)]
|
||||
SystemError(String),
|
||||
/// Index incompatible with current version of tantivy
|
||||
#[fail(display = "{:?}", _0)]
|
||||
IncompatibleIndex(Incompatibility),
|
||||
}
|
||||
|
||||
impl From<DataCorruption> for TantivyError {
|
||||
@@ -93,7 +92,7 @@ impl From<DataCorruption> for TantivyError {
|
||||
|
||||
impl From<FastFieldNotAvailableError> for TantivyError {
|
||||
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
|
||||
TantivyError::FastFieldError(fastfield_error)
|
||||
TantivyError::SchemaError(format!("{}", fastfield_error))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,6 +131,9 @@ impl From<OpenReadError> for TantivyError {
|
||||
match error {
|
||||
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
|
||||
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||
OpenReadError::IncompatibleIndex(incompatibility) => {
|
||||
TantivyError::IncompatibleIndex(incompatibility)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -162,6 +164,7 @@ impl From<OpenDirectoryError> for TantivyError {
|
||||
OpenDirectoryError::NotADirectory(directory_path) => {
|
||||
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
|
||||
}
|
||||
OpenDirectoryError::IoError(err) => TantivyError::IOError(IOError::from(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -172,3 +175,9 @@ impl From<serde_json::Error> for TantivyError {
|
||||
TantivyError::IOError(io_err.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<rayon::ThreadPoolBuildError> for TantivyError {
|
||||
fn from(error: rayon::ThreadPoolBuildError) -> TantivyError {
|
||||
TantivyError::SystemError(error.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@ pub use self::writer::BytesFastFieldWriter;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use schema::Schema;
|
||||
use Index;
|
||||
use crate::schema::Schema;
|
||||
use crate::Index;
|
||||
|
||||
#[test]
|
||||
fn test_bytes() {
|
||||
@@ -22,17 +22,15 @@ mod tests {
|
||||
index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9]));
|
||||
index_writer.add_document(doc!(field=>vec![0u8; 1000]));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap();
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
let bytes_reader = reader.bytes_fast_field_reader(field).unwrap();
|
||||
|
||||
assert_eq!(bytes_reader.get_val(0), &[0u8, 1, 2, 3]);
|
||||
assert!(bytes_reader.get_val(1).is_empty());
|
||||
assert_eq!(bytes_reader.get_val(2), &[255u8]);
|
||||
assert_eq!(bytes_reader.get_val(3), &[1u8, 3, 5, 7, 9]);
|
||||
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
|
||||
assert!(bytes_reader.get_bytes(1).is_empty());
|
||||
assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
|
||||
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
|
||||
let long = vec![0u8; 1000];
|
||||
assert_eq!(bytes_reader.get_val(4), long.as_slice());
|
||||
assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use owning_ref::OwningRef;
|
||||
|
||||
use directory::ReadOnlySource;
|
||||
use fastfield::FastFieldReader;
|
||||
use DocId;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::DocId;
|
||||
|
||||
/// Reader for byte array fast fields
|
||||
///
|
||||
@@ -14,6 +14,7 @@ use DocId;
|
||||
///
|
||||
/// Reading the value for a document is done by reading the start index for it,
|
||||
/// and the start index for the next document, and keeping the bytes in between.
|
||||
#[derive(Clone)]
|
||||
pub struct BytesFastFieldReader {
|
||||
idx_reader: FastFieldReader<u64>,
|
||||
values: OwningRef<ReadOnlySource, [u8]>,
|
||||
@@ -28,10 +29,20 @@ impl BytesFastFieldReader {
|
||||
BytesFastFieldReader { idx_reader, values }
|
||||
}
|
||||
|
||||
/// Returns the bytes associated to the given `doc`
|
||||
pub fn get_val(&self, doc: DocId) -> &[u8] {
|
||||
fn range(&self, doc: DocId) -> (usize, usize) {
|
||||
let start = self.idx_reader.get(doc) as usize;
|
||||
let stop = self.idx_reader.get(doc + 1) as usize;
|
||||
(start, stop)
|
||||
}
|
||||
|
||||
/// Returns the bytes associated to the given `doc`
|
||||
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||
let (start, stop) = self.range(doc);
|
||||
&self.values[start..stop]
|
||||
}
|
||||
|
||||
/// Returns the overall number of bytes in this bytes fast field.
|
||||
pub fn total_num_bytes(&self) -> usize {
|
||||
self.values.len()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::io;
|
||||
|
||||
use fastfield::serializer::FastFieldSerializer;
|
||||
use schema::{Document, Field, Value};
|
||||
use DocId;
|
||||
use crate::fastfield::serializer::FastFieldSerializer;
|
||||
use crate::schema::{Document, Field, Value};
|
||||
use crate::DocId;
|
||||
|
||||
/// Writer for byte array (as in, any number of bytes per document) fast fields
|
||||
///
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
use bit_set::BitSet;
|
||||
use common::HasLen;
|
||||
use directory::ReadOnlySource;
|
||||
use directory::WritePtr;
|
||||
use space_usage::ByteCount;
|
||||
use crate::common::{BitSet, HasLen};
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::WritePtr;
|
||||
use crate::space_usage::ByteCount;
|
||||
use crate::DocId;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use DocId;
|
||||
|
||||
/// Write a delete `BitSet`
|
||||
///
|
||||
/// where `delete_bitset` is the set of deleted `DocId`.
|
||||
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
|
||||
let max_doc = delete_bitset.capacity();
|
||||
/// Warning: this function does not call terminate. The caller is in charge of
|
||||
/// closing the writer properly.
|
||||
pub fn write_delete_bitset(
|
||||
delete_bitset: &BitSet,
|
||||
max_doc: u32,
|
||||
writer: &mut WritePtr,
|
||||
) -> io::Result<()> {
|
||||
let mut byte = 0u8;
|
||||
let mut shift = 0u8;
|
||||
for doc in 0..max_doc {
|
||||
@@ -29,7 +33,7 @@ pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io:
|
||||
if max_doc % 8 > 0 {
|
||||
writer.write_all(&[byte])?;
|
||||
}
|
||||
writer.flush()
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set of deleted `DocId`s.
|
||||
@@ -40,6 +44,24 @@ pub struct DeleteBitSet {
|
||||
}
|
||||
|
||||
impl DeleteBitSet {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn for_test(docs: &[DocId], max_doc: u32) -> DeleteBitSet {
|
||||
use crate::directory::{Directory, RAMDirectory, TerminatingWrite};
|
||||
use std::path::Path;
|
||||
assert!(docs.iter().all(|&doc| doc < max_doc));
|
||||
let mut bitset = BitSet::with_max_value(max_doc);
|
||||
for &doc in docs {
|
||||
bitset.insert(doc);
|
||||
}
|
||||
let mut directory = RAMDirectory::create();
|
||||
let path = Path::new("dummydeletebitset");
|
||||
let mut wrt = directory.open_write(path).unwrap();
|
||||
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
|
||||
wrt.terminate().unwrap();
|
||||
let source = directory.open_read(path).unwrap();
|
||||
Self::open(source)
|
||||
}
|
||||
|
||||
/// Opens a delete bitset given its data source.
|
||||
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
|
||||
let num_deleted: usize = data
|
||||
@@ -53,16 +75,18 @@ impl DeleteBitSet {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether the document has been marked as deleted.
|
||||
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
||||
pub fn is_alive(&self, doc: DocId) -> bool {
|
||||
!self.is_deleted(doc)
|
||||
}
|
||||
|
||||
/// Returns true iff the document has been marked as deleted.
|
||||
#[inline(always)]
|
||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||
if self.len == 0 {
|
||||
false
|
||||
} else {
|
||||
let byte_offset = doc / 8u32;
|
||||
let b: u8 = (*self.data)[byte_offset as usize];
|
||||
let shift = (doc & 7u32) as u8;
|
||||
b & (1u8 << shift) != 0
|
||||
}
|
||||
let byte_offset = doc / 8u32;
|
||||
let b: u8 = (*self.data)[byte_offset as usize];
|
||||
let shift = (doc & 7u32) as u8;
|
||||
b & (1u8 << shift) != 0
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this bitset.
|
||||
@@ -79,45 +103,35 @@ impl HasLen for DeleteBitSet {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bit_set::BitSet;
|
||||
use directory::*;
|
||||
use std::path::PathBuf;
|
||||
use super::DeleteBitSet;
|
||||
use crate::common::HasLen;
|
||||
|
||||
fn test_delete_bitset_helper(bitset: &BitSet) {
|
||||
let test_path = PathBuf::from("test");
|
||||
let mut directory = RAMDirectory::create();
|
||||
{
|
||||
let mut writer = directory.open_write(&*test_path).unwrap();
|
||||
write_delete_bitset(bitset, &mut writer).unwrap();
|
||||
}
|
||||
{
|
||||
let source = directory.open_read(&test_path).unwrap();
|
||||
let delete_bitset = DeleteBitSet::open(source);
|
||||
let n = bitset.capacity();
|
||||
for doc in 0..n {
|
||||
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
||||
}
|
||||
assert_eq!(delete_bitset.len(), bitset.len());
|
||||
#[test]
|
||||
fn test_delete_bitset_empty() {
|
||||
let delete_bitset = DeleteBitSet::for_test(&[], 10);
|
||||
for doc in 0..10 {
|
||||
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
||||
}
|
||||
assert_eq!(delete_bitset.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_bitset() {
|
||||
{
|
||||
let mut bitset = BitSet::with_capacity(10);
|
||||
bitset.insert(1);
|
||||
bitset.insert(9);
|
||||
test_delete_bitset_helper(&bitset);
|
||||
}
|
||||
{
|
||||
let mut bitset = BitSet::with_capacity(8);
|
||||
bitset.insert(1);
|
||||
bitset.insert(2);
|
||||
bitset.insert(3);
|
||||
bitset.insert(5);
|
||||
bitset.insert(7);
|
||||
test_delete_bitset_helper(&bitset);
|
||||
let delete_bitset = DeleteBitSet::for_test(&[1, 9], 10);
|
||||
assert!(delete_bitset.is_alive(0));
|
||||
assert!(delete_bitset.is_deleted(1));
|
||||
assert!(delete_bitset.is_alive(2));
|
||||
assert!(delete_bitset.is_alive(3));
|
||||
assert!(delete_bitset.is_alive(4));
|
||||
assert!(delete_bitset.is_alive(5));
|
||||
assert!(delete_bitset.is_alive(6));
|
||||
assert!(delete_bitset.is_alive(6));
|
||||
assert!(delete_bitset.is_alive(7));
|
||||
assert!(delete_bitset.is_alive(8));
|
||||
assert!(delete_bitset.is_deleted(9));
|
||||
for doc in 0..10 {
|
||||
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
||||
}
|
||||
assert_eq!(delete_bitset.len(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use schema::FieldEntry;
|
||||
use crate::schema::FieldEntry;
|
||||
use std::result;
|
||||
|
||||
/// `FastFieldNotAvailableError` is returned when the
|
||||
/// user requested for a fast field reader, and the field was not
|
||||
/// defined in the schema as a fast field.
|
||||
#[derive(Debug, Fail)]
|
||||
#[fail(display = "field not available: '{:?}'", field_name)]
|
||||
#[fail(display = "Fast field not available: '{:?}'", field_name)]
|
||||
pub struct FastFieldNotAvailableError {
|
||||
field_name: String,
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use super::MultiValueIntFastFieldReader;
|
||||
use schema::Facet;
|
||||
use crate::schema::Facet;
|
||||
use crate::termdict::TermDictionary;
|
||||
use crate::termdict::TermOrdinal;
|
||||
use crate::DocId;
|
||||
use std::str;
|
||||
use termdict::TermDictionary;
|
||||
use termdict::TermOrdinal;
|
||||
use DocId;
|
||||
|
||||
/// The facet reader makes it possible to access the list of
|
||||
/// facets associated to a given document in a specific
|
||||
|
||||
@@ -30,12 +30,14 @@ pub use self::error::{FastFieldNotAvailableError, Result};
|
||||
pub use self::facet_reader::FacetReader;
|
||||
pub use self::multivalued::{MultiValueIntFastFieldReader, MultiValueIntFastFieldWriter};
|
||||
pub use self::reader::FastFieldReader;
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub use self::serializer::FastFieldSerializer;
|
||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||
use common;
|
||||
use schema::Cardinality;
|
||||
use schema::FieldType;
|
||||
use schema::Value;
|
||||
use crate::chrono::{NaiveDateTime, Utc};
|
||||
use crate::common;
|
||||
use crate::schema::Cardinality;
|
||||
use crate::schema::FieldType;
|
||||
use crate::schema::Value;
|
||||
|
||||
mod bytes;
|
||||
mod delete;
|
||||
@@ -43,11 +45,12 @@ mod error;
|
||||
mod facet_reader;
|
||||
mod multivalued;
|
||||
mod reader;
|
||||
mod readers;
|
||||
mod serializer;
|
||||
mod writer;
|
||||
|
||||
/// Trait for types that are allowed for fast fields: (u64 or i64).
|
||||
pub trait FastValue: Default + Clone + Copy {
|
||||
/// Trait for types that are allowed for fast fields: (u64, i64 and f64).
|
||||
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
|
||||
/// Converts a value from u64
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
@@ -67,6 +70,12 @@ pub trait FastValue: Default + Clone + Copy {
|
||||
/// Cast value to `u64`.
|
||||
/// The value is just reinterpreted in memory.
|
||||
fn as_u64(&self) -> u64;
|
||||
|
||||
/// Build a default value. This default value is never used, so the value does not
|
||||
/// really matter.
|
||||
fn make_zero() -> Self {
|
||||
Self::from_u64(0i64.to_u64())
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for u64 {
|
||||
@@ -78,10 +87,6 @@ impl FastValue for u64 {
|
||||
*self
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::U64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
@@ -89,6 +94,10 @@ impl FastValue for u64 {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for i64 {
|
||||
@@ -112,11 +121,56 @@ impl FastValue for i64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for f64 {
|
||||
fn from_u64(val: u64) -> Self {
|
||||
common::u64_to_f64(val)
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
common::f64_to_u64(*self)
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::F64(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.to_bits()
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for crate::DateTime {
|
||||
fn from_u64(timestamp_u64: u64) -> Self {
|
||||
let timestamp_i64 = i64::from_u64(timestamp_u64);
|
||||
crate::DateTime::from_utc(NaiveDateTime::from_timestamp(timestamp_i64, 0), Utc)
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
self.timestamp().to_u64()
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::Date(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.timestamp().as_u64()
|
||||
}
|
||||
}
|
||||
|
||||
fn value_to_u64(value: &Value) -> u64 {
|
||||
match *value {
|
||||
Value::U64(ref val) => *val,
|
||||
Value::I64(ref val) => common::i64_to_u64(*val),
|
||||
_ => panic!("Expected a u64/i64 field, got {:?} ", value),
|
||||
Value::F64(ref val) => common::f64_to_u64(*val),
|
||||
Value::Date(ref datetime) => common::i64_to_u64(datetime.timestamp()),
|
||||
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,27 +178,29 @@ fn value_to_u64(value: &Value) -> u64 {
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use common::CompositeFile;
|
||||
use directory::{Directory, RAMDirectory, WritePtr};
|
||||
use fastfield::FastFieldReader;
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::schema::Field;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::FAST;
|
||||
use crate::schema::{Document, IntOptions};
|
||||
use crate::{Index, SegmentId, SegmentReader};
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::SeedableRng;
|
||||
use schema::Document;
|
||||
use schema::Field;
|
||||
use schema::Schema;
|
||||
use schema::FAST;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref SCHEMA: Schema = {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_u64_field("field", FAST);
|
||||
schema_builder.build()
|
||||
};
|
||||
pub static ref FIELD: Field = { SCHEMA.get_field("field").unwrap() };
|
||||
}
|
||||
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
|
||||
let mut schema_builder = Schema::builder();
|
||||
schema_builder.add_u64_field("field", FAST);
|
||||
schema_builder.build()
|
||||
});
|
||||
|
||||
pub static FIELD: Lazy<Field> = Lazy::new(|| SCHEMA.get_field("field").unwrap());
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield() {
|
||||
@@ -154,6 +210,12 @@ mod tests {
|
||||
assert_eq!(test_fastfield.get(2), 300);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield_i64_u64() {
|
||||
let datetime = crate::DateTime::from_utc(NaiveDateTime::from_timestamp(0i64, 0), Utc);
|
||||
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_small() {
|
||||
let path = Path::new("test");
|
||||
@@ -406,6 +468,92 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_missing_date_fast_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
||||
index_writer.commit().unwrap();
|
||||
index_writer.add_document(doc!());
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let segment_ids: Vec<SegmentId> = reader
|
||||
.searcher()
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(SegmentReader::segment_id)
|
||||
.collect();
|
||||
assert_eq!(segment_ids.len(), 2);
|
||||
let merge_future = index_writer.merge(&segment_ids[..]);
|
||||
let merge_res = futures::executor::block_on(merge_future);
|
||||
assert!(merge_res.is_ok());
|
||||
assert!(reader.reload().is_ok());
|
||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_datetime() {
|
||||
assert_eq!(crate::DateTime::make_zero().timestamp(), 0i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datefastfield() {
|
||||
use crate::fastfield::FastValue;
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
let multi_date_field = schema_builder.add_date_field(
|
||||
"multi_date",
|
||||
IntOptions::default().set_fast(Cardinality::MultiValues),
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(
|
||||
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
||||
multi_date_field => crate::DateTime::from_u64(2i64.to_u64()),
|
||||
multi_date_field => crate::DateTime::from_u64(3i64.to_u64())
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
date_field => crate::DateTime::from_u64(4i64.to_u64())
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
multi_date_field => crate::DateTime::from_u64(5i64.to_u64()),
|
||||
multi_date_field => crate::DateTime::from_u64(6i64.to_u64())
|
||||
));
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let date_fast_field = fast_fields.date(date_field).unwrap();
|
||||
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
||||
let mut dates = vec![];
|
||||
{
|
||||
assert_eq!(date_fast_field.get(0u32).timestamp(), 1i64);
|
||||
dates_fast_field.get_vals(0u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].timestamp(), 2i64);
|
||||
assert_eq!(dates[1].timestamp(), 3i64);
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get(1u32).timestamp(), 4i64);
|
||||
dates_fast_field.get_vals(1u32, &mut dates);
|
||||
assert!(dates.is_empty());
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get(2u32).timestamp(), 0i64);
|
||||
dates_fast_field.get_vals(2u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].timestamp(), 5i64);
|
||||
assert_eq!(dates[1].timestamp(), 6i64);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
@@ -413,9 +561,9 @@ mod bench {
|
||||
use super::tests::FIELD;
|
||||
use super::tests::{generate_permutation, SCHEMA};
|
||||
use super::*;
|
||||
use common::CompositeFile;
|
||||
use directory::{Directory, RAMDirectory, WritePtr};
|
||||
use fastfield::FastFieldReader;
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use test::{self, Bencher};
|
||||
@@ -513,5 +661,4 @@ mod bench {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -7,10 +7,14 @@ pub use self::writer::MultiValueIntFastFieldWriter;
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use schema::Cardinality;
|
||||
use schema::IntOptions;
|
||||
use schema::Schema;
|
||||
use Index;
|
||||
use crate::collector::TopDocs;
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::Cardinality;
|
||||
use crate::schema::Facet;
|
||||
use crate::schema::IntOptions;
|
||||
use crate::schema::Schema;
|
||||
use crate::Index;
|
||||
use chrono::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_u64() {
|
||||
@@ -28,11 +32,10 @@ mod tests {
|
||||
index_writer.add_document(doc!(field=>5u64, field=>20u64,field=>1u64));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut vals = Vec::new();
|
||||
let multi_value_reader = reader.multi_fast_field_reader::<u64>(field).unwrap();
|
||||
let multi_value_reader = segment_reader.fast_fields().u64s(field).unwrap();
|
||||
{
|
||||
multi_value_reader.get_vals(2, &mut vals);
|
||||
assert_eq!(&vals, &[4u64]);
|
||||
@@ -47,6 +50,133 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_date() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field(
|
||||
"multi_date_field",
|
||||
IntOptions::default()
|
||||
.set_fast(Cardinality::MultiValues)
|
||||
.set_indexed()
|
||||
.set_stored(),
|
||||
);
|
||||
let time_i =
|
||||
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let first_time_stamp = chrono::Utc::now();
|
||||
index_writer.add_document(
|
||||
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
||||
);
|
||||
index_writer.add_document(doc!(time_i=>0i64));
|
||||
// add one second
|
||||
index_writer
|
||||
.add_document(doc!(date_field=>first_time_stamp + Duration::seconds(1), time_i=>2i64));
|
||||
// add another second
|
||||
let two_secs_ahead = first_time_stamp + Duration::seconds(2);
|
||||
index_writer.add_document(doc!(date_field=>two_secs_ahead, date_field=>two_secs_ahead,date_field=>two_secs_ahead, time_i=>3i64));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
assert_eq!(reader.num_docs(), 4);
|
||||
|
||||
{
|
||||
let parser = QueryParser::for_index(&index, vec![date_field]);
|
||||
let query = parser
|
||||
.parse_query(&format!("\"{}\"", first_time_stamp.to_rfc3339()).to_string())
|
||||
.expect("could not parse query");
|
||||
let results = searcher
|
||||
.search(&query, &TopDocs::with_limit(5))
|
||||
.expect("could not query index");
|
||||
|
||||
assert_eq!(results.len(), 1);
|
||||
for (_score, doc_address) in results {
|
||||
let retrieved_doc = searcher.doc(doc_address).expect("cannot fetch doc");
|
||||
assert_eq!(
|
||||
retrieved_doc
|
||||
.get_first(date_field)
|
||||
.expect("cannot find value")
|
||||
.date_value()
|
||||
.timestamp(),
|
||||
first_time_stamp.timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
retrieved_doc
|
||||
.get_first(time_i)
|
||||
.expect("cannot find value")
|
||||
.i64_value(),
|
||||
1i64
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let parser = QueryParser::for_index(&index, vec![date_field]);
|
||||
let query = parser
|
||||
.parse_query(&format!("\"{}\"", two_secs_ahead.to_rfc3339()).to_string())
|
||||
.expect("could not parse query");
|
||||
let results = searcher
|
||||
.search(&query, &TopDocs::with_limit(5))
|
||||
.expect("could not query index");
|
||||
|
||||
assert_eq!(results.len(), 1);
|
||||
|
||||
for (_score, doc_address) in results {
|
||||
let retrieved_doc = searcher.doc(doc_address).expect("cannot fetch doc");
|
||||
assert_eq!(
|
||||
retrieved_doc
|
||||
.get_first(date_field)
|
||||
.expect("cannot find value")
|
||||
.date_value()
|
||||
.timestamp(),
|
||||
two_secs_ahead.timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
retrieved_doc
|
||||
.get_first(time_i)
|
||||
.expect("cannot find value")
|
||||
.i64_value(),
|
||||
3i64
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: support Date range queries
|
||||
// {
|
||||
// let parser = QueryParser::for_index(&index, vec![date_field]);
|
||||
// let range_q = format!("\"{}\"..\"{}\"",
|
||||
// (first_time_stamp + Duration::seconds(1)).to_rfc3339(),
|
||||
// (first_time_stamp + Duration::seconds(3)).to_rfc3339()
|
||||
// );
|
||||
// let query = parser.parse_query(&range_q)
|
||||
// .expect("could not parse query");
|
||||
// let results = searcher.search(&query, &TopDocs::with_limit(5))
|
||||
// .expect("could not query index");
|
||||
//
|
||||
//
|
||||
// assert_eq!(results.len(), 2);
|
||||
// for (i, doc_pair) in results.iter().enumerate() {
|
||||
// let retrieved_doc = searcher.doc(doc_pair.1).expect("cannot fetch doc");
|
||||
// let offset_sec = match i {
|
||||
// 0 => 1,
|
||||
// 1 => 3,
|
||||
// _ => panic!("should not have more than 2 docs")
|
||||
// };
|
||||
// let time_i_val = match i {
|
||||
// 0 => 2,
|
||||
// 1 => 3,
|
||||
// _ => panic!("should not have more than 2 docs")
|
||||
// };
|
||||
// assert_eq!(retrieved_doc.get_first(date_field).expect("cannot find value").date_value().timestamp(),
|
||||
// (first_time_stamp + Duration::seconds(offset_sec)).timestamp());
|
||||
// assert_eq!(retrieved_doc.get_first(time_i).expect("cannot find value").i64_value(), time_i_val);
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multivalued_i64() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -63,11 +193,10 @@ mod tests {
|
||||
index_writer.add_document(doc!(field=> -5i64, field => -20i64, field=>1i64));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut vals = Vec::new();
|
||||
let multi_value_reader = reader.multi_fast_field_reader::<i64>(field).unwrap();
|
||||
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
|
||||
{
|
||||
multi_value_reader.get_vals(2, &mut vals);
|
||||
assert_eq!(&vals, &[-4i64]);
|
||||
@@ -85,4 +214,17 @@ mod tests {
|
||||
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_many_facets() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field = schema_builder.add_facet_field("facetfield");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for i in 0..100_000 {
|
||||
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
|
||||
}
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use fastfield::{FastFieldReader, FastValue};
|
||||
use DocId;
|
||||
use crate::fastfield::{FastFieldReader, FastValue};
|
||||
use crate::DocId;
|
||||
|
||||
/// Reader for a multivalued `u64` fast field.
|
||||
///
|
||||
@@ -26,6 +26,13 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn into_u64s_reader(self) -> MultiValueIntFastFieldReader<u64> {
|
||||
MultiValueIntFastFieldReader {
|
||||
idx_reader: self.idx_reader,
|
||||
vals_reader: self.vals_reader.into_u64_reader(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `(start, stop)`, such that the values associated
|
||||
/// to the given document are `start..stop`.
|
||||
fn range(&self, doc: DocId) -> (u64, u64) {
|
||||
@@ -38,16 +45,27 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
|
||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||
let (start, stop) = self.range(doc);
|
||||
let len = (stop - start) as usize;
|
||||
vals.resize(len, Item::default());
|
||||
self.vals_reader.get_range(start as u32, &mut vals[..]);
|
||||
vals.resize(len, Item::make_zero());
|
||||
self.vals_reader.get_range_u64(start, &mut vals[..]);
|
||||
}
|
||||
|
||||
/// Returns the number of values associated with the document `DocId`.
|
||||
pub fn num_vals(&self, doc: DocId) -> usize {
|
||||
let (start, stop) = self.range(doc);
|
||||
(stop - start) as usize
|
||||
}
|
||||
|
||||
/// Returns the overall number of values in this field .
|
||||
pub fn total_num_vals(&self) -> u64 {
|
||||
self.idx_reader.max_value()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use core::Index;
|
||||
use schema::{Document, Facet, Schema};
|
||||
use crate::core::Index;
|
||||
use crate::schema::{Facet, Schema};
|
||||
|
||||
#[test]
|
||||
fn test_multifastfield_reader() {
|
||||
@@ -58,25 +76,14 @@ mod tests {
|
||||
let mut index_writer = index
|
||||
.writer_with_num_threads(1, 30_000_000)
|
||||
.expect("Failed to create index writer.");
|
||||
{
|
||||
let mut doc = Document::new();
|
||||
doc.add_facet(facet_field, "/category/cat2");
|
||||
doc.add_facet(facet_field, "/category/cat1");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{
|
||||
let mut doc = Document::new();
|
||||
doc.add_facet(facet_field, "/category/cat2");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
{
|
||||
let mut doc = Document::new();
|
||||
doc.add_facet(facet_field, "/category/cat3");
|
||||
index_writer.add_document(doc);
|
||||
}
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from("/category/cat2"),
|
||||
facet_field => Facet::from("/category/cat1"),
|
||||
));
|
||||
index_writer.add_document(doc!(facet_field => Facet::from("/category/cat2")));
|
||||
index_writer.add_document(doc!(facet_field => Facet::from("/category/cat3")));
|
||||
index_writer.commit().expect("Commit failed");
|
||||
index.load_searchers().expect("Reloading searchers");
|
||||
let searcher = index.searcher();
|
||||
let searcher = index.reader().unwrap().searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let mut facet_reader = segment_reader.facet_reader(facet_field).unwrap();
|
||||
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
use fastfield::serializer::FastSingleFieldSerializer;
|
||||
use fastfield::value_to_u64;
|
||||
use fastfield::FastFieldSerializer;
|
||||
use itertools::Itertools;
|
||||
use postings::UnorderedTermId;
|
||||
use schema::{Document, Field};
|
||||
use std::collections::HashMap;
|
||||
use crate::fastfield::serializer::FastSingleFieldSerializer;
|
||||
use crate::fastfield::value_to_u64;
|
||||
use crate::fastfield::FastFieldSerializer;
|
||||
use crate::postings::UnorderedTermId;
|
||||
use crate::schema::{Document, Field};
|
||||
use crate::termdict::TermOrdinal;
|
||||
use crate::DocId;
|
||||
use fnv::FnvHashMap;
|
||||
use std::io;
|
||||
use termdict::TermOrdinal;
|
||||
use DocId;
|
||||
|
||||
/// Writer for multi-valued (as in, more than one value per document)
|
||||
/// int fast field.
|
||||
@@ -32,7 +31,7 @@ use DocId;
|
||||
/// term ids when the segment is getting serialized.
|
||||
pub struct MultiValueIntFastFieldWriter {
|
||||
field: Field,
|
||||
vals: Vec<u64>,
|
||||
vals: Vec<UnorderedTermId>,
|
||||
doc_index: Vec<u64>,
|
||||
is_facet: bool,
|
||||
}
|
||||
@@ -102,7 +101,7 @@ impl MultiValueIntFastFieldWriter {
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut FastFieldSerializer,
|
||||
mapping_opt: Option<&HashMap<UnorderedTermId, TermOrdinal>>,
|
||||
mapping_opt: Option<&FnvHashMap<UnorderedTermId, TermOrdinal>>,
|
||||
) -> io::Result<()> {
|
||||
{
|
||||
// writing the offset index
|
||||
@@ -116,7 +115,7 @@ impl MultiValueIntFastFieldWriter {
|
||||
}
|
||||
{
|
||||
// writing the values themselves.
|
||||
let mut value_serializer: FastSingleFieldSerializer<_>;
|
||||
let mut value_serializer: FastSingleFieldSerializer<'_, _>;
|
||||
match mapping_opt {
|
||||
Some(mapping) => {
|
||||
value_serializer = serializer.new_u64_fast_field_with_idx(
|
||||
@@ -151,8 +150,8 @@ impl MultiValueIntFastFieldWriter {
|
||||
}
|
||||
}
|
||||
None => {
|
||||
let val_min_max = self.vals.iter().cloned().minmax();
|
||||
let (val_min, val_max) = val_min_max.into_option().unwrap_or((0u64, 0u64));
|
||||
let val_min_max = crate::common::minmax(self.vals.iter().cloned());
|
||||
let (val_min, val_max) = val_min_max.unwrap_or((0u64, 0u64));
|
||||
value_serializer =
|
||||
serializer.new_u64_fast_field_with_idx(self.field, val_min, val_max, 1)?;
|
||||
for &val in &self.vals {
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
use super::FastValue;
|
||||
use common::bitpacker::BitUnpacker;
|
||||
use common::compute_num_bits;
|
||||
use common::BinarySerializable;
|
||||
use common::CompositeFile;
|
||||
use directory::ReadOnlySource;
|
||||
use directory::{Directory, RAMDirectory, WritePtr};
|
||||
use fastfield::{FastFieldSerializer, FastFieldsWriter};
|
||||
use crate::common::bitpacker::BitUnpacker;
|
||||
use crate::common::compute_num_bits;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::FAST;
|
||||
use crate::DocId;
|
||||
use owning_ref::OwningRef;
|
||||
use schema::Schema;
|
||||
use schema::FAST;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::Path;
|
||||
use DocId;
|
||||
|
||||
/// Trait for accessing a fastfield.
|
||||
///
|
||||
@@ -50,6 +50,15 @@ impl<Item: FastValue> FastFieldReader<Item> {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
|
||||
FastFieldReader {
|
||||
bit_unpacker: self.bit_unpacker,
|
||||
min_value_u64: self.min_value_u64,
|
||||
max_value_u64: self.max_value_u64,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the value associated to the given document.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
@@ -59,7 +68,29 @@ impl<Item: FastValue> FastFieldReader<Item> {
|
||||
/// May panic if `doc` is greater than the segment
|
||||
// `maxdoc`.
|
||||
pub fn get(&self, doc: DocId) -> Item {
|
||||
Item::from_u64(self.min_value_u64 + self.bit_unpacker.get(doc as usize))
|
||||
self.get_u64(u64::from(doc))
|
||||
}
|
||||
|
||||
pub(crate) fn get_u64(&self, doc: u64) -> Item {
|
||||
Item::from_u64(self.min_value_u64 + self.bit_unpacker.get(doc))
|
||||
}
|
||||
|
||||
/// Internally `multivalued` also use SingleValue Fast fields.
|
||||
/// It works as follows... A first column contains the list of start index
|
||||
/// for each document, a second column contains the actual values.
|
||||
///
|
||||
/// The values associated to a given doc, are then
|
||||
/// `second_column[first_column.get(doc)..first_column.get(doc+1)]`.
|
||||
///
|
||||
/// Which means single value fast field reader can be indexed internally with
|
||||
/// something different from a `DocId`. For this use case, we want to use `u64`
|
||||
/// values.
|
||||
///
|
||||
/// See `get_range` for an actual documentation about this method.
|
||||
pub(crate) fn get_range_u64(&self, start: u64, output: &mut [Item]) {
|
||||
for (i, out) in output.iter_mut().enumerate() {
|
||||
*out = self.get_u64(start + (i as u64));
|
||||
}
|
||||
}
|
||||
|
||||
/// Fills an output buffer with the fast field values
|
||||
@@ -75,13 +106,8 @@ impl<Item: FastValue> FastFieldReader<Item> {
|
||||
///
|
||||
/// May panic if `start + output.len()` is greater than
|
||||
/// the segment's `maxdoc`.
|
||||
///
|
||||
// TODO change start to `u64`.
|
||||
// For multifastfield, start is an index in a second fastfield, not a `DocId`
|
||||
pub fn get_range(&self, start: u32, output: &mut [Item]) {
|
||||
for (i, out) in output.iter_mut().enumerate() {
|
||||
*out = self.get(start + i as u32);
|
||||
}
|
||||
pub fn get_range(&self, start: DocId, output: &mut [Item]) {
|
||||
self.get_range_u64(u64::from(start), output);
|
||||
}
|
||||
|
||||
/// Returns the minimum value for this fast field.
|
||||
|
||||
270
src/fastfield/readers.rs
Normal file
270
src/fastfield/readers.rs
Normal file
@@ -0,0 +1,270 @@
|
||||
use crate::common::CompositeFile;
|
||||
use crate::fastfield::BytesFastFieldReader;
|
||||
use crate::fastfield::MultiValueIntFastFieldReader;
|
||||
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
|
||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||
use crate::space_usage::PerFieldSpaceUsage;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Provides access to all of the FastFieldReader.
|
||||
///
|
||||
/// Internally, `FastFieldReaders` have preloaded fast field readers,
|
||||
/// and just wraps several `HashMap`.
|
||||
pub struct FastFieldReaders {
|
||||
fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
|
||||
fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
|
||||
fast_field_f64: HashMap<Field, FastFieldReader<f64>>,
|
||||
fast_field_date: HashMap<Field, FastFieldReader<crate::DateTime>>,
|
||||
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
|
||||
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
|
||||
fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>,
|
||||
fast_field_dates: HashMap<Field, MultiValueIntFastFieldReader<crate::DateTime>>,
|
||||
fast_bytes: HashMap<Field, BytesFastFieldReader>,
|
||||
fast_fields_composite: CompositeFile,
|
||||
}
|
||||
|
||||
enum FastType {
|
||||
I64,
|
||||
U64,
|
||||
F64,
|
||||
Date,
|
||||
}
|
||||
|
||||
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
|
||||
match field_type {
|
||||
FieldType::U64(options) => options
|
||||
.get_fastfield_cardinality()
|
||||
.map(|cardinality| (FastType::U64, cardinality)),
|
||||
FieldType::I64(options) => options
|
||||
.get_fastfield_cardinality()
|
||||
.map(|cardinality| (FastType::I64, cardinality)),
|
||||
FieldType::F64(options) => options
|
||||
.get_fastfield_cardinality()
|
||||
.map(|cardinality| (FastType::F64, cardinality)),
|
||||
FieldType::Date(options) => options
|
||||
.get_fastfield_cardinality()
|
||||
.map(|cardinality| (FastType::Date, cardinality)),
|
||||
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldReaders {
|
||||
pub(crate) fn load_all(
|
||||
schema: &Schema,
|
||||
fast_fields_composite: &CompositeFile,
|
||||
) -> crate::Result<FastFieldReaders> {
|
||||
let mut fast_field_readers = FastFieldReaders {
|
||||
fast_field_i64: Default::default(),
|
||||
fast_field_u64: Default::default(),
|
||||
fast_field_f64: Default::default(),
|
||||
fast_field_date: Default::default(),
|
||||
fast_field_i64s: Default::default(),
|
||||
fast_field_u64s: Default::default(),
|
||||
fast_field_f64s: Default::default(),
|
||||
fast_field_dates: Default::default(),
|
||||
fast_bytes: Default::default(),
|
||||
fast_fields_composite: fast_fields_composite.clone(),
|
||||
};
|
||||
for (field, field_entry) in schema.fields() {
|
||||
let field_type = field_entry.field_type();
|
||||
if field_type == &FieldType::Bytes {
|
||||
let idx_reader = fast_fields_composite
|
||||
.open_read_with_idx(field, 0)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||
.map(FastFieldReader::open)?;
|
||||
let data = fast_fields_composite
|
||||
.open_read_with_idx(field, 1)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
||||
fast_field_readers
|
||||
.fast_bytes
|
||||
.insert(field, BytesFastFieldReader::open(idx_reader, data));
|
||||
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
|
||||
match cardinality {
|
||||
Cardinality::SingleValue => {
|
||||
if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
|
||||
match fast_type {
|
||||
FastType::U64 => {
|
||||
let fast_field_reader = FastFieldReader::open(fast_field_data);
|
||||
fast_field_readers
|
||||
.fast_field_u64
|
||||
.insert(field, fast_field_reader);
|
||||
}
|
||||
FastType::I64 => {
|
||||
fast_field_readers.fast_field_i64.insert(
|
||||
field,
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
}
|
||||
FastType::F64 => {
|
||||
fast_field_readers.fast_field_f64.insert(
|
||||
field,
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
}
|
||||
FastType::Date => {
|
||||
fast_field_readers.fast_field_date.insert(
|
||||
field,
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
||||
}
|
||||
}
|
||||
Cardinality::MultiValues => {
|
||||
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
|
||||
let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
|
||||
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
|
||||
let idx_reader = FastFieldReader::open(fast_field_idx);
|
||||
match fast_type {
|
||||
FastType::I64 => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
.fast_field_i64s
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
FastType::U64 => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
.fast_field_u64s
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
FastType::F64 => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
.fast_field_f64s
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
FastType::Date => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
.fast_field_dates
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(fast_field_readers)
|
||||
}
|
||||
|
||||
pub(crate) fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||
self.fast_fields_composite.space_usage()
|
||||
}
|
||||
|
||||
/// Returns the `u64` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u64 fast field, this method returns `None`.
|
||||
pub fn u64(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
||||
self.fast_field_u64.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// If the field is a u64-fast field return the associated reader.
|
||||
/// If the field is a i64-fast field, return the associated u64 reader. Values are
|
||||
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
|
||||
///
|
||||
/// This method is useful when merging segment reader.
|
||||
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
||||
if let Some(u64_ff_reader) = self.u64(field) {
|
||||
return Some(u64_ff_reader);
|
||||
}
|
||||
if let Some(i64_ff_reader) = self.i64(field) {
|
||||
return Some(i64_ff_reader.into_u64_reader());
|
||||
}
|
||||
if let Some(f64_ff_reader) = self.f64(field) {
|
||||
return Some(f64_ff_reader.into_u64_reader());
|
||||
}
|
||||
if let Some(date_ff_reader) = self.date(field) {
|
||||
return Some(date_ff_reader.into_u64_reader());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a i64 fast field, this method returns `None`.
|
||||
pub fn i64(&self, field: Field) -> Option<FastFieldReader<i64>> {
|
||||
self.fast_field_i64.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a i64 fast field, this method returns `None`.
|
||||
pub fn date(&self, field: Field) -> Option<FastFieldReader<crate::DateTime>> {
|
||||
self.fast_field_date.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// Returns the `f64` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a f64 fast field, this method returns `None`.
|
||||
pub fn f64(&self, field: Field) -> Option<FastFieldReader<f64>> {
|
||||
self.fast_field_f64.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a u64 multi-valued fast field, this method returns `None`.
|
||||
pub fn u64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
|
||||
self.fast_field_u64s.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// If the field is a u64s-fast field return the associated reader.
|
||||
/// If the field is a i64s-fast field, return the associated u64s reader. Values are
|
||||
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping.
|
||||
///
|
||||
/// This method is useful when merging segment reader.
|
||||
pub(crate) fn u64s_lenient(&self, field: Field) -> Option<MultiValueIntFastFieldReader<u64>> {
|
||||
if let Some(u64s_ff_reader) = self.u64s(field) {
|
||||
return Some(u64s_ff_reader);
|
||||
}
|
||||
if let Some(i64s_ff_reader) = self.i64s(field) {
|
||||
return Some(i64s_ff_reader.into_u64s_reader());
|
||||
}
|
||||
if let Some(f64s_ff_reader) = self.f64s(field) {
|
||||
return Some(f64s_ff_reader.into_u64s_reader());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns a `i64s` multi-valued fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a i64 multi-valued fast field, this method returns `None`.
|
||||
pub fn i64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<i64>> {
|
||||
self.fast_field_i64s.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// Returns a `f64s` multi-valued fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a f64 multi-valued fast field, this method returns `None`.
|
||||
pub fn f64s(&self, field: Field) -> Option<MultiValueIntFastFieldReader<f64>> {
|
||||
self.fast_field_f64s.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// Returns a `crate::DateTime` multi-valued fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a `crate::DateTime` multi-valued fast field, this method returns `None`.
|
||||
pub fn dates(&self, field: Field) -> Option<MultiValueIntFastFieldReader<crate::DateTime>> {
|
||||
self.fast_field_dates.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// Returns the `bytes` fast field reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a bytes fast field, returns `None`.
|
||||
pub fn bytes(&self, field: Field) -> Option<BytesFastFieldReader> {
|
||||
self.fast_bytes.get(&field).cloned()
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
use common::bitpacker::BitPacker;
|
||||
use common::compute_num_bits;
|
||||
use common::BinarySerializable;
|
||||
use common::CompositeWrite;
|
||||
use common::CountingWriter;
|
||||
use directory::WritePtr;
|
||||
use schema::Field;
|
||||
use crate::common::bitpacker::BitPacker;
|
||||
use crate::common::compute_num_bits;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::CompositeWrite;
|
||||
use crate::common::CountingWriter;
|
||||
use crate::directory::WritePtr;
|
||||
use crate::schema::Field;
|
||||
use std::io::{self, Write};
|
||||
|
||||
/// `FastFieldSerializer` is in charge of serializing
|
||||
@@ -45,7 +45,7 @@ impl FastFieldSerializer {
|
||||
field: Field,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
) -> io::Result<FastSingleFieldSerializer<CountingWriter<WritePtr>>> {
|
||||
) -> io::Result<FastSingleFieldSerializer<'_, CountingWriter<WritePtr>>> {
|
||||
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ impl FastFieldSerializer {
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
idx: usize,
|
||||
) -> io::Result<FastSingleFieldSerializer<CountingWriter<WritePtr>>> {
|
||||
) -> io::Result<FastSingleFieldSerializer<'_, CountingWriter<WritePtr>>> {
|
||||
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
||||
FastSingleFieldSerializer::open(field_write, min_value, max_value)
|
||||
}
|
||||
@@ -66,7 +66,7 @@ impl FastFieldSerializer {
|
||||
&mut self,
|
||||
field: Field,
|
||||
idx: usize,
|
||||
) -> io::Result<FastBytesFieldSerializer<CountingWriter<WritePtr>>> {
|
||||
) -> io::Result<FastBytesFieldSerializer<'_, CountingWriter<WritePtr>>> {
|
||||
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
||||
FastBytesFieldSerializer::open(field_write)
|
||||
}
|
||||
@@ -79,7 +79,7 @@ impl FastFieldSerializer {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FastSingleFieldSerializer<'a, W: Write + 'a> {
|
||||
pub struct FastSingleFieldSerializer<'a, W: Write> {
|
||||
bit_packer: BitPacker,
|
||||
write: &'a mut W,
|
||||
min_value: u64,
|
||||
@@ -127,7 +127,7 @@ impl<'a, W: Write> FastSingleFieldSerializer<'a, W> {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FastBytesFieldSerializer<'a, W: Write + 'a> {
|
||||
pub struct FastBytesFieldSerializer<'a, W: Write> {
|
||||
write: &'a mut W,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
use super::multivalued::MultiValueIntFastFieldWriter;
|
||||
use common;
|
||||
use common::BinarySerializable;
|
||||
use common::VInt;
|
||||
use fastfield::{BytesFastFieldWriter, FastFieldSerializer};
|
||||
use postings::UnorderedTermId;
|
||||
use schema::{Cardinality, Document, Field, FieldType, Schema};
|
||||
use crate::common;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::common::VInt;
|
||||
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
|
||||
use crate::postings::UnorderedTermId;
|
||||
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema};
|
||||
use crate::termdict::TermOrdinal;
|
||||
use fnv::FnvHashMap;
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use termdict::TermOrdinal;
|
||||
|
||||
/// The fastfieldswriter regroup all of the fast field writers.
|
||||
pub struct FastFieldsWriter {
|
||||
@@ -16,6 +17,14 @@ pub struct FastFieldsWriter {
|
||||
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
||||
}
|
||||
|
||||
fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
|
||||
match *field_entry.field_type() {
|
||||
FieldType::I64(_) | FieldType::Date(_) => common::i64_to_u64(0i64),
|
||||
FieldType::F64(_) => common::f64_to_u64(0.0f64),
|
||||
_ => 0u64,
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldsWriter {
|
||||
/// Create all `FastFieldWriter` required by the schema.
|
||||
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
||||
@@ -23,18 +32,16 @@ impl FastFieldsWriter {
|
||||
let mut multi_values_writers = Vec::new();
|
||||
let mut bytes_value_writers = Vec::new();
|
||||
|
||||
for (field_id, field_entry) in schema.fields().iter().enumerate() {
|
||||
let field = Field(field_id as u32);
|
||||
let default_value = if let FieldType::I64(_) = *field_entry.field_type() {
|
||||
common::i64_to_u64(0i64)
|
||||
} else {
|
||||
0u64
|
||||
};
|
||||
for (field, field_entry) in schema.fields() {
|
||||
match *field_entry.field_type() {
|
||||
FieldType::I64(ref int_options) | FieldType::U64(ref int_options) => {
|
||||
FieldType::I64(ref int_options)
|
||||
| FieldType::U64(ref int_options)
|
||||
| FieldType::F64(ref int_options)
|
||||
| FieldType::Date(ref int_options) => {
|
||||
match int_options.get_fastfield_cardinality() {
|
||||
Some(Cardinality::SingleValue) => {
|
||||
let mut fast_field_writer = IntFastFieldWriter::new(field);
|
||||
let default_value = fast_field_default_value(field_entry);
|
||||
fast_field_writer.set_val_if_missing(default_value);
|
||||
single_value_writers.push(fast_field_writer);
|
||||
}
|
||||
@@ -114,7 +121,7 @@ impl FastFieldsWriter {
|
||||
pub fn serialize(
|
||||
&self,
|
||||
serializer: &mut FastFieldSerializer,
|
||||
mapping: &HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>,
|
||||
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>,
|
||||
) -> io::Result<()> {
|
||||
for field_writer in &self.single_value_writers {
|
||||
field_writer.serialize(serializer)?;
|
||||
@@ -142,9 +149,9 @@ impl FastFieldsWriter {
|
||||
/// bitpacked and the number of bits required for bitpacking
|
||||
/// can only been known once we have seen all of the values.
|
||||
///
|
||||
/// Both u64, and i64 use the same writer.
|
||||
/// i64 are just remapped to the `0..2^64 - 1`
|
||||
/// using `common::i64_to_u64`.
|
||||
/// Both u64, i64 and f64 use the same writer.
|
||||
/// i64 and f64 are just remapped to the `0..2^64 - 1`
|
||||
/// using `common::i64_to_u64` and `common::f64_to_u64`.
|
||||
pub struct IntFastFieldWriter {
|
||||
field: Field,
|
||||
vals: Vec<u8>,
|
||||
@@ -203,8 +210,8 @@ impl IntFastFieldWriter {
|
||||
/// Extract the value associated to the fast field for
|
||||
/// this document.
|
||||
///
|
||||
/// i64 are remapped to u64 using the logic
|
||||
/// in `common::i64_to_u64`.
|
||||
/// i64 and f64 are remapped to u64 using the logic
|
||||
/// in `common::i64_to_u64` and `common::f64_to_u64`.
|
||||
///
|
||||
/// If the value is missing, then the default value is used
|
||||
/// instead.
|
||||
|
||||
@@ -10,28 +10,263 @@ pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
||||
.unwrap_or_else(|idx| idx - 1) as u8
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal))]
|
||||
pub const FIELD_NORMS_TABLE: [u32; 256] = [
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
|
||||
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 44, 46, 48, 50, 52, 54, 56, 60,
|
||||
64, 68, 72, 76, 80, 84, 88, 96, 104, 112, 120, 128, 136, 144, 152, 168, 184, 200, 216, 232,
|
||||
248, 264, 280, 312, 344, 376, 408, 440, 472, 504, 536, 600, 664, 728, 792, 856, 920, 984,
|
||||
1_048, 1176, 1304, 1432, 1560, 1688, 1816, 1944, 2072, 2328, 2584, 2840, 3096, 3352, 3608,
|
||||
3864, 4120, 4632, 5144, 5656, 6168, 6680, 7192, 7704, 8216, 9240, 10264, 11288, 12312, 13336,
|
||||
14360, 15384, 16408, 18456, 20504, 22552, 24600, 26648, 28696, 30744, 32792, 36888, 40984,
|
||||
45080, 49176, 53272, 57368, 61464, 65560, 73752, 81944, 90136, 98328, 106520, 114712, 122904,
|
||||
131096, 147480, 163864, 180248, 196632, 213016, 229400, 245784, 262168, 294936, 327704, 360472,
|
||||
393240, 426008, 458776, 491544, 524312, 589848, 655384, 720920, 786456, 851992, 917528, 983064,
|
||||
1048600, 1179672, 1310744, 1441816, 1572888, 1703960, 1835032, 1966104, 2097176, 2359320,
|
||||
2621464, 2883608, 3145752, 3407896, 3670040, 3932184, 4194328, 4718616, 5242904, 5767192,
|
||||
6291480, 6815768, 7340056, 7864344, 8388632, 9437208, 10485784, 11534360, 12582936, 13631512,
|
||||
14680088, 15728664, 16777240, 18874392, 20971544, 23068696, 25165848, 27263000, 29360152,
|
||||
31457304, 33554456, 37748760, 41943064, 46137368, 50331672, 54525976, 58720280, 62914584,
|
||||
67108888, 75497496, 83886104, 92274712, 100663320, 109051928, 117440536, 125829144, 134217752,
|
||||
150994968, 167772184, 184549400, 201326616, 218103832, 234881048, 251658264, 268435480,
|
||||
301989912, 335544344, 369098776, 402653208, 436207640, 469762072, 503316504, 536870936,
|
||||
603979800, 671088664, 738197528, 805306392, 872415256, 939524120, 1006632984, 1073741848,
|
||||
1207959576, 1342177304, 1476395032, 1610612760, 1744830488, 1879048216, 2013265944,
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25,
|
||||
26,
|
||||
27,
|
||||
28,
|
||||
29,
|
||||
30,
|
||||
31,
|
||||
32,
|
||||
33,
|
||||
34,
|
||||
35,
|
||||
36,
|
||||
37,
|
||||
38,
|
||||
39,
|
||||
40,
|
||||
42,
|
||||
44,
|
||||
46,
|
||||
48,
|
||||
50,
|
||||
52,
|
||||
54,
|
||||
56,
|
||||
60,
|
||||
64,
|
||||
68,
|
||||
72,
|
||||
76,
|
||||
80,
|
||||
84,
|
||||
88,
|
||||
96,
|
||||
104,
|
||||
112,
|
||||
120,
|
||||
128,
|
||||
136,
|
||||
144,
|
||||
152,
|
||||
168,
|
||||
184,
|
||||
200,
|
||||
216,
|
||||
232,
|
||||
248,
|
||||
264,
|
||||
280,
|
||||
312,
|
||||
344,
|
||||
376,
|
||||
408,
|
||||
440,
|
||||
472,
|
||||
504,
|
||||
536,
|
||||
600,
|
||||
664,
|
||||
728,
|
||||
792,
|
||||
856,
|
||||
920,
|
||||
984,
|
||||
1_048,
|
||||
1_176,
|
||||
1_304,
|
||||
1_432,
|
||||
1_560,
|
||||
1_688,
|
||||
1_816,
|
||||
1_944,
|
||||
2_072,
|
||||
2_328,
|
||||
2_584,
|
||||
2_840,
|
||||
3_096,
|
||||
3_352,
|
||||
3_608,
|
||||
3_864,
|
||||
4_120,
|
||||
4_632,
|
||||
5_144,
|
||||
5_656,
|
||||
6_168,
|
||||
6_680,
|
||||
7_192,
|
||||
7_704,
|
||||
8_216,
|
||||
9_240,
|
||||
10_264,
|
||||
11_288,
|
||||
12_312,
|
||||
13_336,
|
||||
14_360,
|
||||
15_384,
|
||||
16_408,
|
||||
18_456,
|
||||
20_504,
|
||||
22_552,
|
||||
24_600,
|
||||
26_648,
|
||||
28_696,
|
||||
30_744,
|
||||
32_792,
|
||||
36_888,
|
||||
40_984,
|
||||
45_080,
|
||||
49_176,
|
||||
53_272,
|
||||
57_368,
|
||||
61_464,
|
||||
65_560,
|
||||
73_752,
|
||||
81_944,
|
||||
90_136,
|
||||
98_328,
|
||||
106_520,
|
||||
114_712,
|
||||
122_904,
|
||||
131_096,
|
||||
147_480,
|
||||
163_864,
|
||||
180_248,
|
||||
196_632,
|
||||
213_016,
|
||||
229_400,
|
||||
245_784,
|
||||
262_168,
|
||||
294_936,
|
||||
327_704,
|
||||
360_472,
|
||||
393_240,
|
||||
426_008,
|
||||
458_776,
|
||||
491_544,
|
||||
524_312,
|
||||
589_848,
|
||||
655_384,
|
||||
720_920,
|
||||
786_456,
|
||||
851_992,
|
||||
917_528,
|
||||
983_064,
|
||||
1_048_600,
|
||||
1_179_672,
|
||||
1_310_744,
|
||||
1_441_816,
|
||||
1_572_888,
|
||||
1_703_960,
|
||||
1_835_032,
|
||||
1_966_104,
|
||||
2_097_176,
|
||||
2_359_320,
|
||||
2_621_464,
|
||||
2_883_608,
|
||||
3_145_752,
|
||||
3_407_896,
|
||||
3_670_040,
|
||||
3_932_184,
|
||||
4_194_328,
|
||||
4_718_616,
|
||||
5_242_904,
|
||||
5_767_192,
|
||||
6_291_480,
|
||||
6_815_768,
|
||||
7_340_056,
|
||||
7_864_344,
|
||||
8_388_632,
|
||||
9_437_208,
|
||||
10_485_784,
|
||||
11_534_360,
|
||||
12_582_936,
|
||||
13_631_512,
|
||||
14_680_088,
|
||||
15_728_664,
|
||||
16_777_240,
|
||||
18_874_392,
|
||||
20_971_544,
|
||||
23_068_696,
|
||||
25_165_848,
|
||||
27_263_000,
|
||||
29_360_152,
|
||||
31_457_304,
|
||||
33_554_456,
|
||||
37_748_760,
|
||||
41_943_064,
|
||||
46_137_368,
|
||||
50_331_672,
|
||||
54_525_976,
|
||||
58_720_280,
|
||||
62_914_584,
|
||||
67_108_888,
|
||||
75_497_496,
|
||||
83_886_104,
|
||||
92_274_712,
|
||||
100_663_320,
|
||||
109_051_928,
|
||||
117_440_536,
|
||||
125_829_144,
|
||||
134_217_752,
|
||||
150_994_968,
|
||||
167_772_184,
|
||||
184_549_400,
|
||||
201_326_616,
|
||||
218_103_832,
|
||||
234_881_048,
|
||||
251_658_264,
|
||||
268_435_480,
|
||||
301_989_912,
|
||||
335_544_344,
|
||||
369_098_776,
|
||||
402_653_208,
|
||||
436_207_640,
|
||||
469_762_072,
|
||||
503_316_504,
|
||||
536_870_936,
|
||||
603_979_800,
|
||||
671_088_664,
|
||||
738_197_528,
|
||||
805_306_392,
|
||||
872_415_256,
|
||||
939_524_120,
|
||||
1_006_632_984,
|
||||
1_073_741_848,
|
||||
1_207_959_576,
|
||||
1_342_177_304,
|
||||
1_476_395_032,
|
||||
1_610_612_760,
|
||||
1_744_830_488,
|
||||
1_879_048_216,
|
||||
2_013_265_944,
|
||||
];
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -21,7 +21,7 @@ mod reader;
|
||||
mod serializer;
|
||||
mod writer;
|
||||
|
||||
pub use self::reader::FieldNormReader;
|
||||
pub use self::reader::{FieldNormReader, FieldNormReaders};
|
||||
pub use self::serializer::FieldNormsSerializer;
|
||||
pub use self::writer::FieldNormsWriter;
|
||||
|
||||
|
||||
@@ -1,6 +1,41 @@
|
||||
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
||||
use directory::ReadOnlySource;
|
||||
use DocId;
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::schema::Field;
|
||||
use crate::space_usage::PerFieldSpaceUsage;
|
||||
use crate::DocId;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Reader for the fieldnorm (for each document, the number of tokens indexed in the
|
||||
/// field) of all indexed fields in the index.
|
||||
///
|
||||
/// Each fieldnorm is approximately compressed over one byte. We refer to this byte as
|
||||
/// `fieldnorm_id`.
|
||||
/// The mapping from `fieldnorm` to `fieldnorm_id` is given by monotonic.
|
||||
#[derive(Clone)]
|
||||
pub struct FieldNormReaders {
|
||||
data: Arc<CompositeFile>,
|
||||
}
|
||||
|
||||
impl FieldNormReaders {
|
||||
/// Creates a field norm reader.
|
||||
pub fn open(source: ReadOnlySource) -> crate::Result<FieldNormReaders> {
|
||||
let data = CompositeFile::open(&source)?;
|
||||
Ok(FieldNormReaders {
|
||||
data: Arc::new(data),
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the FieldNormReader for a specific field.
|
||||
pub fn get_field(&self, field: Field) -> Option<FieldNormReader> {
|
||||
self.data.open_read(field).map(FieldNormReader::open)
|
||||
}
|
||||
|
||||
/// Return a break down of the space usage per field.
|
||||
pub fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||
self.data.space_usage()
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads the fieldnorm associated to a document.
|
||||
/// The fieldnorm represents the length associated to
|
||||
@@ -19,6 +54,7 @@ use DocId;
|
||||
/// Apart from compression, this scale also makes it possible to
|
||||
/// precompute computationally expensive functions of the fieldnorm
|
||||
/// in a very short array.
|
||||
#[derive(Clone)]
|
||||
pub struct FieldNormReader {
|
||||
data: ReadOnlySource,
|
||||
}
|
||||
@@ -29,6 +65,11 @@ impl FieldNormReader {
|
||||
FieldNormReader { data }
|
||||
}
|
||||
|
||||
/// Returns the number of documents in this segment.
|
||||
pub fn num_docs(&self) -> u32 {
|
||||
self.data.len() as u32
|
||||
}
|
||||
|
||||
/// Returns the `fieldnorm` associated to a doc id.
|
||||
/// The fieldnorm is a value approximating the number
|
||||
/// of tokens in a given field of the `doc_id`.
|
||||
@@ -62,13 +103,12 @@ impl FieldNormReader {
|
||||
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
||||
fieldnorm_to_id(fieldnorm)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl From<Vec<u32>> for FieldNormReader {
|
||||
fn from(field_norms: Vec<u32>) -> FieldNormReader {
|
||||
#[cfg(test)]
|
||||
pub fn for_test(field_norms: &[u32]) -> FieldNormReader {
|
||||
let field_norms_id = field_norms
|
||||
.into_iter()
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(FieldNormReader::fieldnorm_to_id)
|
||||
.collect::<Vec<u8>>();
|
||||
let field_norms_data = ReadOnlySource::from(field_norms_id);
|
||||
@@ -77,3 +117,20 @@ impl From<Vec<u32>> for FieldNormReader {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
|
||||
#[test]
|
||||
fn test_from_fieldnorms_array() {
|
||||
let fieldnorms = &[1, 2, 3, 4, 1_000_000];
|
||||
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
|
||||
assert_eq!(fieldnorm_reader.num_docs(), 5);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 1);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(2), 3);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(3), 4);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(4), 983_064);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use common::CompositeWrite;
|
||||
use directory::WritePtr;
|
||||
use schema::Field;
|
||||
use crate::common::CompositeWrite;
|
||||
use crate::directory::WritePtr;
|
||||
use crate::schema::Field;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use DocId;
|
||||
use crate::DocId;
|
||||
|
||||
use super::fieldnorm_to_id;
|
||||
use super::FieldNormsSerializer;
|
||||
use schema::Field;
|
||||
use schema::Schema;
|
||||
use crate::schema::Field;
|
||||
use crate::schema::Schema;
|
||||
use std::io;
|
||||
|
||||
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte
|
||||
@@ -22,11 +22,14 @@ impl FieldNormsWriter {
|
||||
pub(crate) fn fields_with_fieldnorm(schema: &Schema) -> Vec<Field> {
|
||||
schema
|
||||
.fields()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|&(_, field_entry)| field_entry.is_indexed())
|
||||
.map(|(field, _)| Field(field as u32))
|
||||
.collect::<Vec<Field>>()
|
||||
.filter_map(|(field, field_entry)| {
|
||||
if field_entry.is_indexed() {
|
||||
Some(field)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/// Initialize with state for tracking the field norm fields
|
||||
@@ -35,7 +38,7 @@ impl FieldNormsWriter {
|
||||
let fields = FieldNormsWriter::fields_with_fieldnorm(schema);
|
||||
let max_field = fields
|
||||
.iter()
|
||||
.map(|field| field.0)
|
||||
.map(Field::field_id)
|
||||
.max()
|
||||
.map(|max_field_id| max_field_id as usize + 1)
|
||||
.unwrap_or(0);
|
||||
@@ -50,8 +53,8 @@ impl FieldNormsWriter {
|
||||
///
|
||||
/// Will extend with 0-bytes for documents that have not been seen.
|
||||
pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) {
|
||||
for &field in self.fields.iter() {
|
||||
self.fieldnorms_buffer[field.0 as usize].resize(max_doc as usize, 0u8);
|
||||
for field in self.fields.iter() {
|
||||
self.fieldnorms_buffer[field.field_id() as usize].resize(max_doc as usize, 0u8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,7 +67,7 @@ impl FieldNormsWriter {
|
||||
/// * field - the field being set
|
||||
/// * fieldnorm - the number of terms present in document `doc` in field `field`
|
||||
pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) {
|
||||
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.0 as usize];
|
||||
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.field_id() as usize];
|
||||
assert!(
|
||||
fieldnorm_buffer.len() <= doc as usize,
|
||||
"Cannot register a given fieldnorm twice"
|
||||
@@ -75,11 +78,12 @@ impl FieldNormsWriter {
|
||||
}
|
||||
|
||||
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
||||
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
|
||||
pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
|
||||
for &field in self.fields.iter() {
|
||||
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.0 as usize][..];
|
||||
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
||||
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
||||
}
|
||||
fieldnorms_serializer.close()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use rand::thread_rng;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::schema::*;
|
||||
use crate::Index;
|
||||
use crate::Searcher;
|
||||
use rand::Rng;
|
||||
use schema::*;
|
||||
use Index;
|
||||
use Searcher;
|
||||
|
||||
fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
|
||||
assert!(searcher.segment_readers().len() < 20);
|
||||
@@ -13,15 +13,15 @@ fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
#[cfg(feature = "mmap")]
|
||||
fn test_indexing() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
let id_field = schema_builder.add_u64_field("id", INT_INDEXED);
|
||||
let multiples_field = schema_builder.add_u64_field("multiples", INT_INDEXED);
|
||||
let id_field = schema_builder.add_u64_field("id", INDEXED);
|
||||
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_from_tempdir(schema).unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
|
||||
let mut rng = thread_rng();
|
||||
|
||||
@@ -36,8 +36,8 @@ fn test_indexing() {
|
||||
index_writer.commit().expect("Commit failed");
|
||||
committed_docs.extend(&uncommitted_docs);
|
||||
uncommitted_docs.clear();
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
reader.reload().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
// check that everything is correct.
|
||||
check_index_content(&searcher, &committed_docs);
|
||||
} else {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use super::operation::DeleteOperation;
|
||||
use crate::Opstamp;
|
||||
use std::mem;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
// The DeleteQueue is similar in conceptually to a multiple
|
||||
// consumer single producer broadcast channel.
|
||||
@@ -13,17 +14,18 @@ use std::sync::{Arc, RwLock};
|
||||
//
|
||||
// New consumer can be created in two ways
|
||||
// - calling `delete_queue.cursor()` returns a cursor, that
|
||||
// will include all future delete operation (and no past operations).
|
||||
// will include all future delete operation (and some or none
|
||||
// of the past operations... The client is in charge of checking the opstamps.).
|
||||
// - cloning an existing cursor returns a new cursor, that
|
||||
// is at the exact same position, and can now advance independently
|
||||
// from the original cursor.
|
||||
#[derive(Default)]
|
||||
struct InnerDeleteQueue {
|
||||
writer: Vec<DeleteOperation>,
|
||||
last_block: Option<Arc<Block>>,
|
||||
last_block: Weak<Block>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
#[derive(Clone)]
|
||||
pub struct DeleteQueue {
|
||||
inner: Arc<RwLock<InnerDeleteQueue>>,
|
||||
}
|
||||
@@ -31,20 +33,31 @@ pub struct DeleteQueue {
|
||||
impl DeleteQueue {
|
||||
// Creates a new delete queue.
|
||||
pub fn new() -> DeleteQueue {
|
||||
let delete_queue = DeleteQueue {
|
||||
DeleteQueue {
|
||||
inner: Arc::default(),
|
||||
};
|
||||
|
||||
let next_block = NextBlock::from(delete_queue.clone());
|
||||
{
|
||||
let mut delete_queue_wlock = delete_queue.inner.write().unwrap();
|
||||
delete_queue_wlock.last_block = Some(Arc::new(Block {
|
||||
operations: Arc::default(),
|
||||
next: next_block,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
delete_queue
|
||||
fn get_last_block(&self) -> Arc<Block> {
|
||||
{
|
||||
// try get the last block with simply acquiring the read lock.
|
||||
let rlock = self.inner.read().unwrap();
|
||||
if let Some(block) = rlock.last_block.upgrade() {
|
||||
return block;
|
||||
}
|
||||
}
|
||||
// It failed. Let's double check after acquiring the write, as someone could have called
|
||||
// `get_last_block` right after we released the rlock.
|
||||
let mut wlock = self.inner.write().unwrap();
|
||||
if let Some(block) = wlock.last_block.upgrade() {
|
||||
return block;
|
||||
}
|
||||
let block = Arc::new(Block {
|
||||
operations: Arc::default(),
|
||||
next: NextBlock::from(self.clone()),
|
||||
});
|
||||
wlock.last_block = Arc::downgrade(&block);
|
||||
block
|
||||
}
|
||||
|
||||
// Creates a new cursor that makes it possible to
|
||||
@@ -52,17 +65,7 @@ impl DeleteQueue {
|
||||
//
|
||||
// Past delete operations are not accessible.
|
||||
pub fn cursor(&self) -> DeleteCursor {
|
||||
let last_block = self
|
||||
.inner
|
||||
.read()
|
||||
.expect("Read lock poisoned when opening delete queue cursor")
|
||||
.last_block
|
||||
.clone()
|
||||
.expect(
|
||||
"Failed to unwrap last_block. This should never happen
|
||||
as the Option<> is only here to make
|
||||
initialization possible",
|
||||
);
|
||||
let last_block = self.get_last_block();
|
||||
let operations_len = last_block.operations.len();
|
||||
DeleteCursor {
|
||||
block: last_block,
|
||||
@@ -98,23 +101,19 @@ impl DeleteQueue {
|
||||
.write()
|
||||
.expect("Failed to acquire write lock on delete queue writer");
|
||||
|
||||
let delete_operations;
|
||||
{
|
||||
let writer: &mut Vec<DeleteOperation> = &mut self_wlock.writer;
|
||||
if writer.is_empty() {
|
||||
return None;
|
||||
}
|
||||
delete_operations = mem::replace(writer, vec![]);
|
||||
if self_wlock.writer.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let next_block = NextBlock::from(self.clone());
|
||||
{
|
||||
self_wlock.last_block = Some(Arc::new(Block {
|
||||
operations: Arc::new(delete_operations),
|
||||
next: next_block,
|
||||
}));
|
||||
}
|
||||
self_wlock.last_block.clone()
|
||||
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
|
||||
|
||||
let new_block = Arc::new(Block {
|
||||
operations: Arc::new(delete_operations.into_boxed_slice()),
|
||||
next: NextBlock::from(self.clone()),
|
||||
});
|
||||
|
||||
self_wlock.last_block = Arc::downgrade(&new_block);
|
||||
Some(new_block)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,7 +167,7 @@ impl NextBlock {
|
||||
}
|
||||
|
||||
struct Block {
|
||||
operations: Arc<Vec<DeleteOperation>>,
|
||||
operations: Arc<Box<[DeleteOperation]>>,
|
||||
next: NextBlock,
|
||||
}
|
||||
|
||||
@@ -184,7 +183,7 @@ impl DeleteCursor {
|
||||
/// queue are consume and the next get will return None.
|
||||
/// - the next get will return the first operation with an
|
||||
/// `opstamp >= target_opstamp`.
|
||||
pub fn skip_to(&mut self, target_opstamp: u64) {
|
||||
pub fn skip_to(&mut self, target_opstamp: Opstamp) {
|
||||
// TODO Can be optimize as we work with block.
|
||||
while self.is_behind_opstamp(target_opstamp) {
|
||||
self.advance();
|
||||
@@ -192,7 +191,7 @@ impl DeleteCursor {
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
|
||||
fn is_behind_opstamp(&mut self, target_opstamp: u64) -> bool {
|
||||
fn is_behind_opstamp(&mut self, target_opstamp: Opstamp) -> bool {
|
||||
self.get()
|
||||
.map(|operation| operation.opstamp < target_opstamp)
|
||||
.unwrap_or(false)
|
||||
@@ -249,14 +248,14 @@ impl DeleteCursor {
|
||||
mod tests {
|
||||
|
||||
use super::{DeleteOperation, DeleteQueue};
|
||||
use schema::{Field, Term};
|
||||
use crate::schema::{Field, Term};
|
||||
|
||||
#[test]
|
||||
fn test_deletequeue() {
|
||||
let delete_queue = DeleteQueue::new();
|
||||
|
||||
let make_op = |i: usize| {
|
||||
let field = Field(1u32);
|
||||
let field = Field::from_field_id(1u32);
|
||||
DeleteOperation {
|
||||
opstamp: i as u64,
|
||||
term: Term::from_field_u64(field, i as u64),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::sync::Arc;
|
||||
use DocId;
|
||||
use crate::DocId;
|
||||
use crate::Opstamp;
|
||||
|
||||
// Doc to opstamp is used to identify which
|
||||
// document should be deleted.
|
||||
@@ -17,25 +17,25 @@ use DocId;
|
||||
// This mapping is (for the moment) stricly increasing
|
||||
// because of the way document id are allocated.
|
||||
#[derive(Clone)]
|
||||
pub enum DocToOpstampMapping {
|
||||
WithMap(Arc<Vec<u64>>),
|
||||
pub enum DocToOpstampMapping<'a> {
|
||||
WithMap(&'a [Opstamp]),
|
||||
None,
|
||||
}
|
||||
|
||||
impl From<Vec<u64>> for DocToOpstampMapping {
|
||||
fn from(opstamps: Vec<u64>) -> DocToOpstampMapping {
|
||||
DocToOpstampMapping::WithMap(Arc::new(opstamps))
|
||||
impl<'a> From<&'a [u64]> for DocToOpstampMapping<'a> {
|
||||
fn from(opstamps: &[Opstamp]) -> DocToOpstampMapping {
|
||||
DocToOpstampMapping::WithMap(opstamps)
|
||||
}
|
||||
}
|
||||
|
||||
impl DocToOpstampMapping {
|
||||
impl<'a> DocToOpstampMapping<'a> {
|
||||
/// Given an opstamp return the limit doc id L
|
||||
/// such that all doc id D such that
|
||||
// D >= L iff opstamp(D) >= than `target_opstamp`.
|
||||
//
|
||||
// The edge case opstamp = some doc opstamp is in practise
|
||||
// never called.
|
||||
pub fn compute_doc_limit(&self, target_opstamp: u64) -> DocId {
|
||||
pub fn compute_doc_limit(&self, target_opstamp: Opstamp) -> DocId {
|
||||
match *self {
|
||||
DocToOpstampMapping::WithMap(ref doc_opstamps) => {
|
||||
match doc_opstamps.binary_search(&target_opstamp) {
|
||||
@@ -64,17 +64,18 @@ mod tests {
|
||||
#[test]
|
||||
fn test_doc_to_opstamp_mapping_complex() {
|
||||
{
|
||||
let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec![]);
|
||||
let doc_to_opstamp_mapping = DocToOpstampMapping::from(&[][..]);
|
||||
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
|
||||
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(2u64), 0);
|
||||
}
|
||||
{
|
||||
let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec![1u64]);
|
||||
let doc_to_opstamp_mapping = DocToOpstampMapping::from(&[1u64][..]);
|
||||
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
|
||||
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(2u64), 1);
|
||||
}
|
||||
{
|
||||
let doc_to_opstamp_mapping = DocToOpstampMapping::from(vec![1u64, 12u64, 17u64, 23u64]);
|
||||
let doc_to_opstamp_mapping =
|
||||
DocToOpstampMapping::from(&[1u64, 12u64, 17u64, 23u64][..]);
|
||||
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
|
||||
for i in 2u64..13u64 {
|
||||
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(i), 1);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,17 +1,19 @@
|
||||
use super::merge_policy::{MergeCandidate, MergePolicy};
|
||||
use core::SegmentMeta;
|
||||
use crate::core::SegmentMeta;
|
||||
use std::cmp;
|
||||
use std::f64;
|
||||
|
||||
const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75;
|
||||
const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
|
||||
const DEFAULT_MIN_MERGE_SIZE: usize = 8;
|
||||
const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000;
|
||||
|
||||
/// `LogMergePolicy` tries tries to merge segments that have a similar number of
|
||||
/// documents.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LogMergePolicy {
|
||||
min_merge_size: usize,
|
||||
max_merge_size: usize,
|
||||
min_layer_size: u32,
|
||||
level_log_size: f64,
|
||||
}
|
||||
@@ -26,6 +28,12 @@ impl LogMergePolicy {
|
||||
self.min_merge_size = min_merge_size;
|
||||
}
|
||||
|
||||
/// Set the maximum number docs in a segment for it to be considered for
|
||||
/// merging.
|
||||
pub fn set_max_merge_size(&mut self, max_merge_size: usize) {
|
||||
self.max_merge_size = max_merge_size;
|
||||
}
|
||||
|
||||
/// Set the minimum segment size under which all segment belong
|
||||
/// to the same level.
|
||||
pub fn set_min_layer_size(&mut self, min_layer_size: u32) {
|
||||
@@ -46,39 +54,44 @@ impl LogMergePolicy {
|
||||
|
||||
impl MergePolicy for LogMergePolicy {
|
||||
fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate> {
|
||||
if segments.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let mut size_sorted_tuples = segments
|
||||
.iter()
|
||||
.map(|x| x.num_docs())
|
||||
.map(SegmentMeta::num_docs)
|
||||
.filter(|s| s <= &(self.max_merge_size as u32))
|
||||
.enumerate()
|
||||
.collect::<Vec<(usize, u32)>>();
|
||||
|
||||
size_sorted_tuples.sort_by(|x, y| y.1.cmp(&(x.1)));
|
||||
|
||||
if size_sorted_tuples.len() <= 1 {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let size_sorted_log_tuples: Vec<_> = size_sorted_tuples
|
||||
.into_iter()
|
||||
.map(|(ind, num_docs)| (ind, f64::from(self.clip_min_size(num_docs)).log2()))
|
||||
.collect();
|
||||
|
||||
let (first_ind, first_score) = size_sorted_log_tuples[0];
|
||||
let mut current_max_log_size = first_score;
|
||||
let mut levels = vec![vec![first_ind]];
|
||||
for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) {
|
||||
if score < (current_max_log_size - self.level_log_size) {
|
||||
current_max_log_size = score;
|
||||
levels.push(Vec::new());
|
||||
if let Some(&(first_ind, first_score)) = size_sorted_log_tuples.first() {
|
||||
let mut current_max_log_size = first_score;
|
||||
let mut levels = vec![vec![first_ind]];
|
||||
for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) {
|
||||
if score < (current_max_log_size - self.level_log_size) {
|
||||
current_max_log_size = score;
|
||||
levels.push(Vec::new());
|
||||
}
|
||||
levels.last_mut().unwrap().push(ind);
|
||||
}
|
||||
levels.last_mut().unwrap().push(ind);
|
||||
levels
|
||||
.iter()
|
||||
.filter(|level| level.len() >= self.min_merge_size)
|
||||
.map(|ind_vec| {
|
||||
MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect())
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
levels
|
||||
.iter()
|
||||
.filter(|level| level.len() >= self.min_merge_size)
|
||||
.map(|ind_vec| MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect()))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,6 +99,7 @@ impl Default for LogMergePolicy {
|
||||
fn default() -> LogMergePolicy {
|
||||
LogMergePolicy {
|
||||
min_merge_size: DEFAULT_MIN_MERGE_SIZE,
|
||||
max_merge_size: DEFAULT_MAX_MERGE_SIZE,
|
||||
min_layer_size: DEFAULT_MIN_LAYER_SIZE,
|
||||
level_log_size: DEFAULT_LEVEL_LOG_SIZE,
|
||||
}
|
||||
@@ -95,12 +109,16 @@ impl Default for LogMergePolicy {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use core::{SegmentId, SegmentMeta};
|
||||
use indexer::merge_policy::MergePolicy;
|
||||
use crate::core::{SegmentId, SegmentMeta, SegmentMetaInventory};
|
||||
use crate::indexer::merge_policy::MergePolicy;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
static INVENTORY: Lazy<SegmentMetaInventory> = Lazy::new(SegmentMetaInventory::default);
|
||||
|
||||
fn test_merge_policy() -> LogMergePolicy {
|
||||
let mut log_merge_policy = LogMergePolicy::default();
|
||||
log_merge_policy.set_min_merge_size(3);
|
||||
log_merge_policy.set_max_merge_size(100_000);
|
||||
log_merge_policy.set_min_layer_size(2);
|
||||
log_merge_policy
|
||||
}
|
||||
@@ -113,7 +131,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn create_random_segment_meta(num_docs: u32) -> SegmentMeta {
|
||||
SegmentMeta::new(SegmentId::generate_random(), num_docs)
|
||||
INVENTORY.new_segment_meta(SegmentId::generate_random(), num_docs)
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -138,11 +156,11 @@ mod tests {
|
||||
create_random_segment_meta(10),
|
||||
create_random_segment_meta(10),
|
||||
create_random_segment_meta(10),
|
||||
create_random_segment_meta(1000),
|
||||
create_random_segment_meta(1000),
|
||||
create_random_segment_meta(1000),
|
||||
create_random_segment_meta(10000),
|
||||
create_random_segment_meta(10000),
|
||||
create_random_segment_meta(1_000),
|
||||
create_random_segment_meta(1_000),
|
||||
create_random_segment_meta(1_000),
|
||||
create_random_segment_meta(10_000),
|
||||
create_random_segment_meta(10_000),
|
||||
create_random_segment_meta(10),
|
||||
create_random_segment_meta(10),
|
||||
create_random_segment_meta(10),
|
||||
@@ -165,6 +183,7 @@ mod tests {
|
||||
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||
assert_eq!(result_list.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_log_merge_policy_small_segments() {
|
||||
// segments under min_layer_size are merged together
|
||||
@@ -179,4 +198,30 @@ mod tests {
|
||||
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||
assert_eq!(result_list.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_log_merge_policy_all_segments_too_large_to_merge() {
|
||||
let eight_large_segments: Vec<SegmentMeta> =
|
||||
std::iter::repeat_with(|| create_random_segment_meta(100_001))
|
||||
.take(8)
|
||||
.collect();
|
||||
assert!(test_merge_policy()
|
||||
.compute_merge_candidates(&eight_large_segments)
|
||||
.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_large_merge_segments() {
|
||||
let test_input = vec![
|
||||
create_random_segment_meta(1_000_000),
|
||||
create_random_segment_meta(100_001),
|
||||
create_random_segment_meta(100_000),
|
||||
create_random_segment_meta(100_000),
|
||||
create_random_segment_meta(100_000),
|
||||
];
|
||||
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||
// Do not include large segments
|
||||
assert_eq!(result_list.len(), 1);
|
||||
assert_eq!(result_list[0].0.len(), 3)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,24 @@
|
||||
use crate::Opstamp;
|
||||
use crate::SegmentId;
|
||||
use census::{Inventory, TrackedObject};
|
||||
use std::collections::HashSet;
|
||||
use SegmentId;
|
||||
use std::ops::Deref;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
||||
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
||||
|
||||
impl Deref for MergeOperationInventory {
|
||||
type Target = Inventory<InnerMergeOperation>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl MergeOperationInventory {
|
||||
pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
|
||||
let mut segment_in_merge = HashSet::default();
|
||||
for merge_op in self.0.list() {
|
||||
for merge_op in self.list() {
|
||||
for &segment_id in &merge_op.segment_ids {
|
||||
segment_in_merge.insert(segment_id);
|
||||
}
|
||||
@@ -17,8 +27,8 @@ impl MergeOperationInventory {
|
||||
}
|
||||
}
|
||||
|
||||
/// A `MergeOperation` has two role.
|
||||
/// It carries all of the information required to describe a merge :
|
||||
/// A `MergeOperation` has two roles.
|
||||
/// It carries all of the information required to describe a merge:
|
||||
/// - `target_opstamp` is the opstamp up to which we want to consume the
|
||||
/// delete queue and reflect their deletes.
|
||||
/// - `segment_ids` is the list of segment to be merged.
|
||||
@@ -34,15 +44,15 @@ pub struct MergeOperation {
|
||||
inner: TrackedObject<InnerMergeOperation>,
|
||||
}
|
||||
|
||||
struct InnerMergeOperation {
|
||||
target_opstamp: u64,
|
||||
pub(crate) struct InnerMergeOperation {
|
||||
target_opstamp: Opstamp,
|
||||
segment_ids: Vec<SegmentId>,
|
||||
}
|
||||
|
||||
impl MergeOperation {
|
||||
pub fn new(
|
||||
pub(crate) fn new(
|
||||
inventory: &MergeOperationInventory,
|
||||
target_opstamp: u64,
|
||||
target_opstamp: Opstamp,
|
||||
segment_ids: Vec<SegmentId>,
|
||||
) -> MergeOperation {
|
||||
let inner_merge_operation = InnerMergeOperation {
|
||||
@@ -50,11 +60,11 @@ impl MergeOperation {
|
||||
segment_ids,
|
||||
};
|
||||
MergeOperation {
|
||||
inner: inventory.0.track(inner_merge_operation),
|
||||
inner: inventory.track(inner_merge_operation),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn target_opstamp(&self) -> u64 {
|
||||
pub fn target_opstamp(&self) -> Opstamp {
|
||||
self.inner.target_opstamp
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use core::SegmentId;
|
||||
use core::SegmentMeta;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use std::fmt::Debug;
|
||||
use std::marker;
|
||||
|
||||
@@ -39,8 +39,8 @@ impl MergePolicy for NoMergePolicy {
|
||||
pub mod tests {
|
||||
|
||||
use super::*;
|
||||
use core::SegmentId;
|
||||
use core::SegmentMeta;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
|
||||
/// `MergePolicy` useful for test purposes.
|
||||
///
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user