Compare commits
141 Commits
issue/1251
...
0.18.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0928597a43 | ||
|
|
f72abe9b9c | ||
|
|
f0a2b1cc44 | ||
|
|
fcfdc44c61 | ||
|
|
3171f0b9ba | ||
|
|
89e19f14b5 | ||
|
|
1a6a1396cd | ||
|
|
e766375700 | ||
|
|
496b4a4fdb | ||
|
|
93cc8498b3 | ||
|
|
0aa3d63a9f | ||
|
|
4e2a053b69 | ||
|
|
71c4393ec4 | ||
|
|
b2e97e266a | ||
|
|
9ee4772140 | ||
|
|
c95013b11e | ||
|
|
fc045e6bf9 | ||
|
|
6837a4d468 | ||
|
|
0759bf9448 | ||
|
|
152e8238d7 | ||
|
|
d4e5b48437 | ||
|
|
03040ed81d | ||
|
|
aaa22ad225 | ||
|
|
3223bdf254 | ||
|
|
cbd06ab189 | ||
|
|
749395bbb8 | ||
|
|
617ba1f0c0 | ||
|
|
2f1cd7e7f0 | ||
|
|
58c0cb5fc4 | ||
|
|
7f45a6ac96 | ||
|
|
0ade871126 | ||
|
|
aab65490c9 | ||
|
|
d77e8de36a | ||
|
|
d11a8cce26 | ||
|
|
bc607a921b | ||
|
|
1273f33338 | ||
|
|
e30449743c | ||
|
|
ed26552296 | ||
|
|
65d129afbd | ||
|
|
386ffab76c | ||
|
|
57a8d0359c | ||
|
|
14cb66ee00 | ||
|
|
9e38343352 | ||
|
|
944302ae2f | ||
|
|
be70804d17 | ||
|
|
a1afc80600 | ||
|
|
02e24fda52 | ||
|
|
7e3c0c5392 | ||
|
|
fdb2524f9e | ||
|
|
4db655ae82 | ||
|
|
bb44cc84c4 | ||
|
|
8c1e1cf1ad | ||
|
|
b5b16948b0 | ||
|
|
c305d3a2a2 | ||
|
|
038d234ff1 | ||
|
|
c45eb9a9fa | ||
|
|
824d6f96fe | ||
|
|
7cf821bac0 | ||
|
|
ae83fc8298 | ||
|
|
a7bc361145 | ||
|
|
2805291400 | ||
|
|
6614a2cba0 | ||
|
|
6f4d203d1b | ||
|
|
1be6c6111c | ||
|
|
c7c3eab256 | ||
|
|
ec69875d15 | ||
|
|
d832cfcfd8 | ||
|
|
ab6b532cc4 | ||
|
|
4b6047f7d7 | ||
|
|
5ca04beb94 | ||
|
|
902d05ebec | ||
|
|
f1b298642a | ||
|
|
dd13dedaeb | ||
|
|
46724b4a05 | ||
|
|
24432bf523 | ||
|
|
31d3bcfff2 | ||
|
|
706fbd6886 | ||
|
|
8a8a048015 | ||
|
|
c72549cb9a | ||
|
|
d6f803212c | ||
|
|
dac73537d2 | ||
|
|
bb5254de12 | ||
|
|
be5218c2f6 | ||
|
|
ec9478830a | ||
|
|
8807bfd13d | ||
|
|
447811c111 | ||
|
|
f29acf5d8c | ||
|
|
125707dbe0 | ||
|
|
46d5de920d | ||
|
|
d2a7bcf217 | ||
|
|
141b9aa245 | ||
|
|
c5a6282fa8 | ||
|
|
c0f524e1a3 | ||
|
|
958b2bee08 | ||
|
|
f619658e2c | ||
|
|
aa391bf843 | ||
|
|
47dcbdbeae | ||
|
|
691245bf20 | ||
|
|
90798d4b39 | ||
|
|
0b6d9f90cf | ||
|
|
8a5a12d961 | ||
|
|
e73542e2e8 | ||
|
|
0262e44bbd | ||
|
|
613aad7a8a | ||
|
|
1aa88b0c51 | ||
|
|
564fa38085 | ||
|
|
59ec21479f | ||
|
|
42283f9e91 | ||
|
|
b105bf72e1 | ||
|
|
226f577803 | ||
|
|
2e255c4bef | ||
|
|
387592809f | ||
|
|
cedced5bb0 | ||
|
|
d31f045872 | ||
|
|
6656a70d1b | ||
|
|
d36e0a9549 | ||
|
|
8771b2673f | ||
|
|
a41d3d51a4 | ||
|
|
cae34ffe47 | ||
|
|
4b62f7907d | ||
|
|
7fa6a0b665 | ||
|
|
458ed29a31 | ||
|
|
e37775fe21 | ||
|
|
1cd2434a32 | ||
|
|
de2cba6d1e | ||
|
|
c0b1a58d27 | ||
|
|
848b795b9f | ||
|
|
091b668624 | ||
|
|
5004290daa | ||
|
|
5d2c2b804c | ||
|
|
1a92b588e0 | ||
|
|
010e92c118 | ||
|
|
2ead010c83 | ||
|
|
c4f66eb185 | ||
|
|
d7b46d2137 | ||
|
|
d042ce74c7 | ||
|
|
7ba9e662b8 | ||
|
|
fdd5ef85e5 | ||
|
|
704498a1ac | ||
|
|
1232af7928 | ||
|
|
d37633e034 |
9
.github/workflows/coverage.yml
vendored
@@ -10,15 +10,14 @@ jobs:
|
|||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
run: rustup toolchain install nightly --component llvm-tools-preview
|
run: rustup toolchain install nightly --component llvm-tools-preview
|
||||||
- name: Install cargo-llvm-cov
|
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||||
run: curl -LsSf https://github.com/taiki-e/cargo-llvm-cov/releases/latest/download/cargo-llvm-cov-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin
|
|
||||||
- name: Generate code coverage
|
- name: Generate code coverage
|
||||||
run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
run: cargo +nightly llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
||||||
- name: Upload coverage to Codecov
|
- name: Upload coverage to Codecov
|
||||||
uses: codecov/codecov-action@v2
|
uses: codecov/codecov-action@v3
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
|
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
|
||||||
files: lcov.info
|
files: lcov.info
|
||||||
|
|||||||
4
.github/workflows/long_running.yml
vendored
@@ -12,13 +12,13 @@ jobs:
|
|||||||
functional_test_unsorted:
|
functional_test_unsorted:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Run indexing_unsorted
|
- name: Run indexing_unsorted
|
||||||
run: cargo test indexing_unsorted -- --ignored
|
run: cargo test indexing_unsorted -- --ignored
|
||||||
functional_test_sorted:
|
functional_test_sorted:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Run indexing_sorted
|
- name: Run indexing_sorted
|
||||||
run: cargo test indexing_sorted -- --ignored
|
run: cargo test indexing_sorted -- --ignored
|
||||||
|
|
||||||
|
|||||||
11
.github/workflows/test.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --verbose --workspace
|
run: cargo build --verbose --workspace
|
||||||
- name: Install latest nightly to test also against unstable feature flag
|
- name: Install latest nightly to test also against unstable feature flag
|
||||||
@@ -24,16 +24,23 @@ jobs:
|
|||||||
toolchain: nightly
|
toolchain: nightly
|
||||||
override: true
|
override: true
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
|
|
||||||
- name: Install latest nightly to test also against unstable feature flag
|
- name: Install latest nightly to test also against unstable feature flag
|
||||||
uses: actions-rs/toolchain@v1
|
uses: actions-rs/toolchain@v1
|
||||||
with:
|
with:
|
||||||
toolchain: stable
|
toolchain: stable
|
||||||
override: true
|
override: true
|
||||||
components: rustfmt, clippy
|
components: rustfmt, clippy
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: cargo +stable test --features mmap,brotli-compression,lz4-compression,snappy-compression,failpoints --verbose --workspace
|
run: cargo +stable test --features mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints --verbose --workspace
|
||||||
|
|
||||||
|
- name: Run tests quickwit feature
|
||||||
|
run: cargo +stable test --features mmap,quickwit,failpoints --verbose --workspace
|
||||||
|
|
||||||
- name: Check Formatting
|
- name: Check Formatting
|
||||||
run: cargo +nightly fmt --all -- --check
|
run: cargo +nightly fmt --all -- --check
|
||||||
|
|
||||||
- uses: actions-rs/clippy-check@v1
|
- uses: actions-rs/clippy-check@v1
|
||||||
with:
|
with:
|
||||||
toolchain: stable
|
toolchain: stable
|
||||||
|
|||||||
23
CHANGELOG.md
@@ -1,3 +1,22 @@
|
|||||||
|
Tantivy 0.18.1
|
||||||
|
================================
|
||||||
|
- Hotfix: positions computation. #1629 (@fmassot, @fulmicoton, @PSeitz)
|
||||||
|
|
||||||
|
Tantivy 0.18
|
||||||
|
================================
|
||||||
|
- For date values `chrono` has been replaced with `time` (@uklotzde) #1304 :
|
||||||
|
- The `time` crate is re-exported as `tantivy::time` instead of `tantivy::chrono`.
|
||||||
|
- The type alias `tantivy::DateTime` has been removed.
|
||||||
|
- `Value::Date` wraps `time::PrimitiveDateTime` without time zone information.
|
||||||
|
- Internally date/time values are stored as seconds since UNIX epoch in UTC.
|
||||||
|
- Converting a `time::OffsetDateTime` to `Value::Date` implicitly converts the value into UTC.
|
||||||
|
If this is not desired do the time zone conversion yourself and use `time::PrimitiveDateTime`
|
||||||
|
directly instead.
|
||||||
|
- Add [histogram](https://github.com/quickwit-oss/tantivy/pull/1306) aggregation (@PSeitz)
|
||||||
|
- Add support for fastfield on text fields (@PSeitz)
|
||||||
|
- Add terms aggregation (@PSeitz)
|
||||||
|
- Add support for zstd compression (@kryesh)
|
||||||
|
|
||||||
Tantivy 0.17
|
Tantivy 0.17
|
||||||
================================
|
================================
|
||||||
- LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar @fulmicoton) [#115](https://github.com/quickwit-oss/tantivy/issues/115)
|
- LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar @fulmicoton) [#115](https://github.com/quickwit-oss/tantivy/issues/115)
|
||||||
@@ -8,7 +27,9 @@ Tantivy 0.17
|
|||||||
- Schema now offers not indexing fieldnorms (@lpouget) [#922](https://github.com/quickwit-oss/tantivy/issues/922)
|
- Schema now offers not indexing fieldnorms (@lpouget) [#922](https://github.com/quickwit-oss/tantivy/issues/922)
|
||||||
- Reduce the number of fsync calls [#1225](https://github.com/quickwit-oss/tantivy/issues/1225)
|
- Reduce the number of fsync calls [#1225](https://github.com/quickwit-oss/tantivy/issues/1225)
|
||||||
- Fix opening bytes index with dynamic codec (@PSeitz) [#1278](https://github.com/quickwit-oss/tantivy/issues/1278)
|
- Fix opening bytes index with dynamic codec (@PSeitz) [#1278](https://github.com/quickwit-oss/tantivy/issues/1278)
|
||||||
- Added an aggregation collector compatible with Elasticsearch (@PSeitz)
|
- Added an aggregation collector for range, average and stats compatible with Elasticsearch. (@PSeitz)
|
||||||
|
- Added a JSON schema type @fulmicoton [#1251](https://github.com/quickwit-oss/tantivy/issues/1251)
|
||||||
|
- Added support for slop in phrase queries @halvorboe [#1068](https://github.com/quickwit-oss/tantivy/issues/1068)
|
||||||
|
|
||||||
Tantivy 0.16.2
|
Tantivy 0.16.2
|
||||||
================================
|
================================
|
||||||
|
|||||||
97
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy"
|
name = "tantivy"
|
||||||
version = "0.17.0-dev"
|
version = "0.18.1"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
@@ -10,68 +10,72 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
|||||||
repository = "https://github.com/quickwit-oss/tantivy"
|
repository = "https://github.com/quickwit-oss/tantivy"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
keywords = ["search", "information", "retrieval"]
|
keywords = ["search", "information", "retrieval"]
|
||||||
edition = "2018"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.13"
|
oneshot = "0.1.3"
|
||||||
|
base64 = "0.13.0"
|
||||||
byteorder = "1.4.3"
|
byteorder = "1.4.3"
|
||||||
crc32fast = "1.2.1"
|
crc32fast = "1.3.2"
|
||||||
once_cell = "1.7.2"
|
once_cell = "1.10.0"
|
||||||
regex ={ version = "1.5.4", default-features = false, features = ["std"] }
|
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||||
tantivy-fst = "0.3"
|
tantivy-fst = "0.3.0"
|
||||||
memmap2 = {version = "0.5", optional=true}
|
memmap2 = { version = "0.5.3", optional = true }
|
||||||
lz4_flex = { version = "0.9", default-features = false, features = ["checked-decode"], optional = true }
|
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
||||||
brotli = { version = "3.3", optional = true }
|
brotli = { version = "3.3.4", optional = true }
|
||||||
|
zstd = { version = "0.11", optional = true }
|
||||||
snap = { version = "1.0.5", optional = true }
|
snap = { version = "1.0.5", optional = true }
|
||||||
tempfile = { version = "3.2", optional = true }
|
tempfile = { version = "3.3.0", optional = true }
|
||||||
log = "0.4.14"
|
log = "0.4.16"
|
||||||
serde = { version = "1.0.126", features = ["derive"] }
|
serde = { version = "1.0.136", features = ["derive"] }
|
||||||
serde_json = "1.0.64"
|
serde_json = "1.0.79"
|
||||||
num_cpus = "1.13"
|
num_cpus = "1.13.1"
|
||||||
fs2={ version = "0.4.3", optional = true }
|
fs2={ version = "0.4.3", optional = true }
|
||||||
levenshtein_automata = "0.2"
|
levenshtein_automata = "0.2.1"
|
||||||
uuid = { version = "0.8.2", features = ["v4", "serde"] }
|
uuid = { version = "1.0.0", features = ["v4", "serde"] }
|
||||||
crossbeam = "0.8.1"
|
crossbeam-channel = "0.5.4"
|
||||||
futures = { version = "0.3.15", features = ["thread-pool"] }
|
tantivy-query-grammar = { version="0.18.0", path="./query-grammar" }
|
||||||
tantivy-query-grammar = { version="0.15.0", path="./query-grammar" }
|
tantivy-bitpacker = { version="0.2", path="./bitpacker" }
|
||||||
tantivy-bitpacker = { version="0.1", path="./bitpacker" }
|
common = { version = "0.3", path = "./common/", package = "tantivy-common" }
|
||||||
common = { version = "0.1", path = "./common/", package = "tantivy-common" }
|
fastfield_codecs = { version="0.2", path="./fastfield_codecs", default-features = false }
|
||||||
fastfield_codecs = { version="0.1", path="./fastfield_codecs", default-features = false }
|
ownedbytes = { version="0.3", path="./ownedbytes" }
|
||||||
ownedbytes = { version="0.2", path="./ownedbytes" }
|
stable_deref_trait = "1.2.0"
|
||||||
stable_deref_trait = "1.2"
|
rust-stemmers = "1.2.0"
|
||||||
rust-stemmers = "1.2"
|
downcast-rs = "1.2.0"
|
||||||
downcast-rs = "1.2"
|
|
||||||
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
bitpacking = { version = "0.8.4", default-features = false, features = ["bitpacker4x"] }
|
||||||
census = "0.4"
|
census = "0.4.0"
|
||||||
fnv = "1.0.7"
|
fnv = "1.0.7"
|
||||||
thiserror = "1.0.24"
|
thiserror = "1.0.30"
|
||||||
htmlescape = "0.3.1"
|
htmlescape = "0.3.1"
|
||||||
fail = "0.5"
|
fail = "0.5.0"
|
||||||
murmurhash32 = "0.2"
|
murmurhash32 = "0.2.0"
|
||||||
chrono = "0.4.19"
|
time = { version = "0.3.9", features = ["serde-well-known"] }
|
||||||
smallvec = "1.6.1"
|
smallvec = "1.8.0"
|
||||||
rayon = "1.5"
|
rayon = "1.5.2"
|
||||||
lru = "0.7.0"
|
lru = "0.7.5"
|
||||||
fastdivide = "0.4"
|
fastdivide = "0.4.0"
|
||||||
itertools = "0.10.0"
|
itertools = "0.10.3"
|
||||||
measure_time = "0.8.0"
|
measure_time = "0.8.2"
|
||||||
pretty_assertions = "1.1.0"
|
pretty_assertions = "1.2.1"
|
||||||
|
serde_cbor = { version = "0.11.2", optional = true }
|
||||||
|
async-trait = "0.1.53"
|
||||||
|
|
||||||
[target.'cfg(windows)'.dependencies]
|
[target.'cfg(windows)'.dependencies]
|
||||||
winapi = "0.3.9"
|
winapi = "0.3.9"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand = "0.8.3"
|
rand = "0.8.5"
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
matches = "0.1.8"
|
matches = "0.1.9"
|
||||||
proptest = "1.0"
|
proptest = "1.0.0"
|
||||||
criterion = "0.3.5"
|
criterion = "0.3.5"
|
||||||
test-log = "0.2.8"
|
test-log = "0.2.10"
|
||||||
env_logger = "0.9.0"
|
env_logger = "0.9.0"
|
||||||
pprof = {version= "0.6", features=["flamegraph", "criterion"]}
|
pprof = { version = "0.9.0", features = ["flamegraph", "criterion"] }
|
||||||
|
futures = "0.3.21"
|
||||||
|
|
||||||
[dev-dependencies.fail]
|
[dev-dependencies.fail]
|
||||||
version = "0.5"
|
version = "0.5.0"
|
||||||
features = ["failpoints"]
|
features = ["failpoints"]
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
@@ -90,10 +94,13 @@ mmap = ["fs2", "tempfile", "memmap2"]
|
|||||||
brotli-compression = ["brotli"]
|
brotli-compression = ["brotli"]
|
||||||
lz4-compression = ["lz4_flex"]
|
lz4-compression = ["lz4_flex"]
|
||||||
snappy-compression = ["snap"]
|
snappy-compression = ["snap"]
|
||||||
|
zstd-compression = ["zstd"]
|
||||||
|
|
||||||
failpoints = ["fail/failpoints"]
|
failpoints = ["fail/failpoints"]
|
||||||
unstable = [] # useful for benches.
|
unstable = [] # useful for benches.
|
||||||
|
|
||||||
|
quickwit = ["serde_cbor"]
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
|
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
|
||||||
|
|
||||||
|
|||||||
56
README.md
@@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
[](https://docs.rs/crate/tantivy/)
|
[](https://docs.rs/crate/tantivy/)
|
||||||
[](https://github.com/quickwit-oss/tantivy/actions/workflows/test.yml)
|
[](https://github.com/quickwit-oss/tantivy/actions/workflows/test.yml)
|
||||||
[](https://codecov.io/gh/quickwit-oss/tantivy)
|
[](https://codecov.io/gh/quickwit-oss/tantivy)
|
||||||
@@ -6,9 +5,10 @@
|
|||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://crates.io/crates/tantivy)
|
[](https://crates.io/crates/tantivy)
|
||||||
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
**Tantivy** is a **full text search engine library** written in Rust.
|
**Tantivy** is a **full-text search engine library** written in Rust.
|
||||||
|
|
||||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||||
an off-the-shelf search engine server, but rather a crate that can be used
|
an off-the-shelf search engine server, but rather a crate that can be used
|
||||||
@@ -16,19 +16,23 @@ to build such a search engine.
|
|||||||
|
|
||||||
Tantivy is, in fact, strongly inspired by Lucene's design.
|
Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||||
|
|
||||||
|
If you are looking for an alternative to Elasticsearch or Apache Solr, check out [Quickwit](https://github.com/quickwit-oss/quickwit), our search engine built on top of Tantivy.
|
||||||
|
|
||||||
# Benchmark
|
# Benchmark
|
||||||
|
|
||||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
The following [benchmark](https://tantivy-search.github.io/bench/) breakdowns
|
||||||
performance for different type of queries / collection.
|
performance for different types of queries/collections.
|
||||||
|
|
||||||
Your mileage WILL vary depending on the nature of queries and their load.
|
Your mileage WILL vary depending on the nature of queries and their load.
|
||||||
|
|
||||||
|
<img src="doc/assets/images/searchbenchmark.png">
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|
||||||
- Full-text search
|
- Full-text search
|
||||||
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
|
||||||
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
|
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
|
||||||
- Tiny startup time (<10ms), perfect for command line tools
|
- Tiny startup time (<10ms), perfect for command-line tools
|
||||||
- BM25 scoring (the same as Lucene)
|
- BM25 scoring (the same as Lucene)
|
||||||
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`)
|
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`)
|
||||||
- Phrase queries search (e.g. `"michael jackson"`)
|
- Phrase queries search (e.g. `"michael jackson"`)
|
||||||
@@ -43,23 +47,25 @@ Your mileage WILL vary depending on the nature of queries and their load.
|
|||||||
- Range queries
|
- Range queries
|
||||||
- Faceted search
|
- Faceted search
|
||||||
- Configurable indexing (optional term frequency and position indexing)
|
- Configurable indexing (optional term frequency and position indexing)
|
||||||
|
- JSON Field
|
||||||
|
- Aggregation Collector: range buckets, average, and stats metrics
|
||||||
|
- LogMergePolicy with deletes
|
||||||
|
- Searcher Warmer API
|
||||||
- Cheesy logo with a horse
|
- Cheesy logo with a horse
|
||||||
|
|
||||||
## Non-features
|
## Non-features
|
||||||
|
|
||||||
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a
|
Distributed search is out of the scope of Tantivy, but if you are looking for this feature, check out [Quickwit](https://github.com/quickwit-oss/quickwit/).
|
||||||
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
|
|
||||||
are within the scope of Tantivy.
|
|
||||||
|
|
||||||
|
|
||||||
# Getting started
|
# Getting started
|
||||||
|
|
||||||
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows.
|
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
|
||||||
|
|
||||||
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||||
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
|
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
|
||||||
index documents, and search via the CLI or a small server with a REST API.
|
index documents, and search via the CLI or a small server with a REST API.
|
||||||
It walks you through getting a wikipedia search engine up and running in a few minutes.
|
It walks you through getting a Wikipedia search engine up and running in a few minutes.
|
||||||
- [Reference doc for the last released version](https://docs.rs/tantivy/)
|
- [Reference doc for the last released version](https://docs.rs/tantivy/)
|
||||||
|
|
||||||
# How can I support this project?
|
# How can I support this project?
|
||||||
@@ -119,3 +125,31 @@ By default, `rustc` compiles everything in the `examples/` directory in debug mo
|
|||||||
rust-gdb target/debug/examples/$EXAMPLE_NAME
|
rust-gdb target/debug/examples/$EXAMPLE_NAME
|
||||||
$ gdb run
|
$ gdb run
|
||||||
```
|
```
|
||||||
|
# Companies Using Tantivy
|
||||||
|
|
||||||
|
<p align="left">
|
||||||
|
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" />
|
||||||
|
<img align="center" src="doc/assets/images/humanfirst.png#gh-light-mode-only" alt="Humanfirst.ai" height="30" width="auto" />
|
||||||
|
<img align="center" src="doc/assets/images/element.io.svg#gh-light-mode-only" alt="Element.io" height="25" width="auto" />
|
||||||
|
<img align="center" src="doc/assets/images/nuclia-dark-theme.png#gh-dark-mode-only" alt="Nuclia" height="35" width="auto" />
|
||||||
|
<img align="center" src="doc/assets/images/humanfirst.ai-dark-theme.png#gh-dark-mode-only" alt="Humanfirst.ai" height="25" width="auto" />
|
||||||
|
<img align="center" src="doc/assets/images/element-dark-theme.png#gh-dark-mode-only" alt="Element.io" height="25" width="auto" />
|
||||||
|
</p>
|
||||||
|
|
||||||
|
|
||||||
|
# FAQ
|
||||||
|
### Can I use Tantivy in other languages?
|
||||||
|
- Python → [tantivy-py](https://github.com/quickwit-oss/tantivy-py)
|
||||||
|
- Ruby → [tantiny](https://github.com/baygeldin/tantiny)
|
||||||
|
|
||||||
|
You can also find other bindings on [GitHub](https://github.com/search?q=tantivy) but they may be less maintained.
|
||||||
|
|
||||||
|
### What are some examples of Tantivy use?
|
||||||
|
|
||||||
|
- [seshat](https://github.com/matrix-org/seshat/): A matrix message database/indexer
|
||||||
|
- [tantiny](https://github.com/baygeldin/tantiny): Tiny full-text search for Ruby
|
||||||
|
- [lnx](https://github.com/lnx-search/lnx): adaptable, typo tolerant search engine with a REST API
|
||||||
|
- and [more](https://github.com/search?q=tantivy)!
|
||||||
|
|
||||||
|
### On average, how much faster is Tantivy compared to Lucene?
|
||||||
|
- According to our [search latency benchmark](https://tantivy-search.github.io/bench/), Tantivy is approximately 2x faster than Lucene.
|
||||||
@@ -4,6 +4,7 @@ use tantivy::schema::{INDEXED, STORED, STRING, TEXT};
|
|||||||
use tantivy::Index;
|
use tantivy::Index;
|
||||||
|
|
||||||
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
const HDFS_LOGS: &str = include_str!("hdfs.json");
|
||||||
|
const NUM_REPEATS: usize = 2;
|
||||||
|
|
||||||
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
||||||
let schema = {
|
let schema = {
|
||||||
@@ -20,6 +21,11 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
|||||||
schema_builder.add_text_field("severity", STRING | STORED);
|
schema_builder.add_text_field("severity", STRING | STORED);
|
||||||
schema_builder.build()
|
schema_builder.build()
|
||||||
};
|
};
|
||||||
|
let dynamic_schema = {
|
||||||
|
let mut schema_builder = tantivy::schema::SchemaBuilder::new();
|
||||||
|
schema_builder.add_json_field("json", TEXT);
|
||||||
|
schema_builder.build()
|
||||||
|
};
|
||||||
|
|
||||||
let mut group = c.benchmark_group("index-hdfs");
|
let mut group = c.benchmark_group("index-hdfs");
|
||||||
group.sample_size(20);
|
group.sample_size(20);
|
||||||
@@ -27,7 +33,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
for _ in 0..10 {
|
for _ in 0..NUM_REPEATS {
|
||||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
let doc = schema.parse_document(doc_json).unwrap();
|
let doc = schema.parse_document(doc_json).unwrap();
|
||||||
index_writer.add_document(doc).unwrap();
|
index_writer.add_document(doc).unwrap();
|
||||||
@@ -39,7 +45,7 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
for _ in 0..10 {
|
for _ in 0..NUM_REPEATS {
|
||||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
let doc = schema.parse_document(doc_json).unwrap();
|
let doc = schema.parse_document(doc_json).unwrap();
|
||||||
index_writer.add_document(doc).unwrap();
|
index_writer.add_document(doc).unwrap();
|
||||||
@@ -52,9 +58,11 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let index = Index::create_in_ram(schema_with_store.clone());
|
let index = Index::create_in_ram(schema_with_store.clone());
|
||||||
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
let index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
for _ in 0..NUM_REPEATS {
|
||||||
let doc = schema.parse_document(doc_json).unwrap();
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
index_writer.add_document(doc).unwrap();
|
let doc = schema.parse_document(doc_json).unwrap();
|
||||||
|
index_writer.add_document(doc).unwrap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
@@ -62,9 +70,43 @@ pub fn hdfs_index_benchmark(c: &mut Criterion) {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let index = Index::create_in_ram(schema_with_store.clone());
|
let index = Index::create_in_ram(schema_with_store.clone());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
for doc_json in HDFS_LOGS.trim().split("\n") {
|
for _ in 0..NUM_REPEATS {
|
||||||
let doc = schema.parse_document(doc_json).unwrap();
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
index_writer.add_document(doc).unwrap();
|
let doc = schema.parse_document(doc_json).unwrap();
|
||||||
|
index_writer.add_document(doc).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
})
|
||||||
|
});
|
||||||
|
group.bench_function("index-hdfs-no-commit-json-without-docstore", |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||||
|
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
|
for _ in 0..NUM_REPEATS {
|
||||||
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
|
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||||
|
serde_json::from_str(doc_json).unwrap();
|
||||||
|
let doc = tantivy::doc!(json_field=>json_val);
|
||||||
|
index_writer.add_document(doc).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
})
|
||||||
|
});
|
||||||
|
group.bench_function("index-hdfs-with-commit-json-without-docstore", |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
let index = Index::create_in_ram(dynamic_schema.clone());
|
||||||
|
let json_field = dynamic_schema.get_field("json").unwrap();
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 100_000_000).unwrap();
|
||||||
|
for _ in 0..NUM_REPEATS {
|
||||||
|
for doc_json in HDFS_LOGS.trim().split("\n") {
|
||||||
|
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||||
|
serde_json::from_str(doc_json).unwrap();
|
||||||
|
let doc = tantivy::doc!(json_field=>json_val);
|
||||||
|
index_writer.add_document(doc).unwrap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-bitpacker"
|
name = "tantivy-bitpacker"
|
||||||
version = "0.1.1"
|
version = "0.2.0"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ extern crate test;
|
|||||||
mod tests {
|
mod tests {
|
||||||
use tantivy_bitpacker::BlockedBitpacker;
|
use tantivy_bitpacker::BlockedBitpacker;
|
||||||
use test::Bencher;
|
use test::Bencher;
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_blockedbitp_read(b: &mut Bencher) {
|
fn bench_blockedbitp_read(b: &mut Bencher) {
|
||||||
let mut blocked_bitpacker = BlockedBitpacker::new();
|
let mut blocked_bitpacker = BlockedBitpacker::new();
|
||||||
@@ -20,6 +21,7 @@ mod tests {
|
|||||||
out
|
out
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_blockedbitp_create(b: &mut Bencher) {
|
fn bench_blockedbitp_create(b: &mut Bencher) {
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-common"
|
name = "tantivy-common"
|
||||||
version = "0.1.0"
|
version = "0.3.0"
|
||||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
@@ -10,7 +10,7 @@ description = "common traits and utility functions used by multiple tantivy subc
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
byteorder = "1.4.3"
|
byteorder = "1.4.3"
|
||||||
ownedbytes = { version="0.2", path="../ownedbytes" }
|
ownedbytes = { version="0.3", path="../ownedbytes" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
proptest = "1.0.0"
|
proptest = "1.0.0"
|
||||||
|
|||||||
BIN
doc/assets/images/Nuclia.png
Normal file
|
After Width: | Height: | Size: 3.1 KiB |
BIN
doc/assets/images/element-dark-theme.png
Normal file
|
After Width: | Height: | Size: 56 KiB |
8
doc/assets/images/element.io.svg
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
<svg width="518" height="112" viewBox="0 0 518 112" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<path fill-rule="evenodd" clip-rule="evenodd" d="M56 112C86.9279 112 112 86.9279 112 56C112 25.0721 86.9279 0 56 0C25.0721 0 0 25.0721 0 56C0 86.9279 25.0721 112 56 112Z" fill="#0DBD8B"/>
|
||||||
|
<path fill-rule="evenodd" clip-rule="evenodd" d="M45.7615 26.093C45.7615 23.8325 47.5977 22.0001 49.8629 22.0001C65.2154 22.0001 77.6611 34.4199 77.6611 49.7406C77.6611 52.001 75.8248 53.8335 73.5597 53.8335C71.2945 53.8335 69.4583 52.001 69.4583 49.7406C69.4583 38.9408 60.6851 30.1859 49.8629 30.1859C47.5977 30.1859 45.7615 28.3534 45.7615 26.093Z" fill="white"/>
|
||||||
|
<path fill-rule="evenodd" clip-rule="evenodd" d="M85.8986 45.6477C88.1637 45.6477 89.9999 47.4801 89.9999 49.7406C89.9999 65.0612 77.5543 77.4811 62.2017 77.4811C59.9366 77.4811 58.1003 75.6486 58.1003 73.3882C58.1003 71.1277 59.9366 69.2953 62.2017 69.2953C73.024 69.2953 81.7972 60.5403 81.7972 49.7406C81.7972 47.4801 83.6334 45.6477 85.8986 45.6477Z" fill="white"/>
|
||||||
|
<path fill-rule="evenodd" clip-rule="evenodd" d="M66.3031 85.907C66.3031 88.1675 64.4668 89.9999 62.2017 89.9999C46.8492 89.9999 34.4035 77.58 34.4035 62.2594C34.4035 59.9989 36.2398 58.1665 38.5049 58.1665C40.77 58.1665 42.6063 59.9989 42.6063 62.2594C42.6063 73.0592 51.3795 81.8141 62.2017 81.8141C64.4668 81.8141 66.3031 83.6466 66.3031 85.907Z" fill="white"/>
|
||||||
|
<path fill-rule="evenodd" clip-rule="evenodd" d="M26.1014 66.3523C23.8363 66.3523 22.0001 64.5199 22.0001 62.2594C22 46.9388 34.4457 34.5189 49.7983 34.5189C52.0634 34.5189 53.8997 36.3514 53.8997 38.6118C53.8997 40.8723 52.0634 42.7047 49.7983 42.7047C38.976 42.7047 30.2028 51.4597 30.2028 62.2594C30.2028 64.5199 28.3666 66.3523 26.1014 66.3523Z" fill="white"/>
|
||||||
|
<path d="M197 63.5H157.5C157.967 67.6333 159.467 70.9333 162 73.4C164.533 75.8 167.867 77 172 77C174.733 77 177.2 76.3333 179.4 75C181.6 73.6667 183.167 71.8667 184.1 69.6H196.1C194.5 74.8667 191.5 79.1333 187.1 82.4C182.767 85.6 177.633 87.2 171.7 87.2C163.967 87.2 157.7 84.6333 152.9 79.5C148.167 74.3667 145.8 67.8667 145.8 60C145.8 52.3333 148.2 45.9 153 40.7C157.8 35.5 164 32.9 171.6 32.9C179.2 32.9 185.333 35.4667 190 40.6C194.733 45.6667 197.1 52.0667 197.1 59.8L197 63.5ZM171.6 42.6C167.867 42.6 164.767 43.7 162.3 45.9C159.833 48.1 158.3 51.0333 157.7 54.7H185.3C184.767 51.0333 183.3 48.1 180.9 45.9C178.5 43.7 175.4 42.6 171.6 42.6ZM205.289 70.5V11H217.189V70.7C217.189 73.3667 218.656 74.7 221.589 74.7L223.689 74.6V85.9C222.556 86.1 221.356 86.2 220.089 86.2C214.956 86.2 211.189 84.9 208.789 82.3C206.456 79.7 205.289 75.7667 205.289 70.5ZM279.109 63.5H239.609C240.076 67.6333 241.576 70.9333 244.109 73.4C246.643 75.8 249.976 77 254.109 77C256.843 77 259.309 76.3333 261.509 75C263.709 73.6667 265.276 71.8667 266.209 69.6H278.209C276.609 74.8667 273.609 79.1333 269.209 82.4C264.876 85.6 259.743 87.2 253.809 87.2C246.076 87.2 239.809 84.6333 235.009 79.5C230.276 74.3667 227.909 67.8667 227.909 60C227.909 52.3333 230.309 45.9 235.109 40.7C239.909 35.5 246.109 32.9 253.709 32.9C261.309 32.9 267.443 35.4667 272.109 40.6C276.843 45.6667 279.209 52.0667 279.209 59.8L279.109 63.5ZM253.709 42.6C249.976 42.6 246.876 43.7 244.409 45.9C241.943 48.1 240.409 51.0333 239.809 54.7H267.409C266.876 51.0333 265.409 48.1 263.009 45.9C260.609 43.7 257.509 42.6 253.709 42.6ZM332.798 56.2V86H320.898V54.9C320.898 47.0333 317.632 43.1 311.098 43.1C307.565 43.1 304.732 44.2333 302.598 46.5C300.532 48.7667 299.498 51.8667 299.498 55.8V86H287.598V34.1H298.598V41C299.865 38.6667 301.798 36.7333 304.398 35.2C306.998 33.6667 310.232 32.9 314.098 32.9C321.298 32.9 326.498 35.6333 329.698 41.1C334.098 35.6333 339.965 32.9 347.298 32.9C353.365 32.9 358.032 34.8 361.298 38.6C364.565 42.3333 366.198 47.2667 366.198 53.4V86H354.298V54.9C354.298 47.0333 351.032 43.1 344.498 43.1C340.898 43.1 338.032 44.2667 335.898 46.6C333.832 48.8667 332.798 52.0667 332.798 56.2ZM425.379 63.5H385.879C386.346 67.6333 387.846 70.9333 390.379 73.4C392.912 75.8 396.246 77 400.379 77C403.112 77 405.579 76.3333 407.779 75C409.979 73.6667 411.546 71.8667 412.479 69.6H424.479C422.879 74.8667 419.879 79.1333 415.479 82.4C411.146 85.6 406.012 87.2 400.079 87.2C392.346 87.2 386.079 84.6333 381.279 79.5C376.546 74.3667 374.179 67.8667 374.179 60C374.179 52.3333 376.579 45.9 381.379 40.7C386.179 35.5 392.379 32.9 399.979 32.9C407.579 32.9 413.712 35.4667 418.379 40.6C423.112 45.6667 425.479 52.0667 425.479 59.8L425.379 63.5ZM399.979 42.6C396.246 42.6 393.146 43.7 390.679 45.9C388.212 48.1 386.679 51.0333 386.079 54.7H413.679C413.146 51.0333 411.679 48.1 409.279 45.9C406.879 43.7 403.779 42.6 399.979 42.6ZM444.868 34.1V41C446.068 38.7333 448.035 36.8333 450.768 35.3C453.568 33.7 456.935 32.9 460.868 32.9C467.001 32.9 471.735 34.7667 475.068 38.5C478.468 42.2333 480.168 47.2 480.168 53.4V86H468.268V54.9C468.268 51.2333 467.401 48.3667 465.668 46.3C464.001 44.1667 461.435 43.1 457.968 43.1C454.168 43.1 451.168 44.2333 448.968 46.5C446.835 48.7667 445.768 51.9 445.768 55.9V86H433.868V34.1H444.868ZM514.922 75.4V85.7C513.455 86.1 511.389 86.3 508.722 86.3C498.589 86.3 493.522 81.2 493.522 71V43.6H485.622V34.1H493.522V20.6H505.422V34.1H515.122V43.6H505.422V69.8C505.422 73.8667 507.355 75.9 511.222 75.9L514.922 75.4Z" fill="black"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 5.2 KiB |
BIN
doc/assets/images/humanfirst.ai-dark-theme.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
doc/assets/images/humanfirst.png
Normal file
|
After Width: | Height: | Size: 102 KiB |
BIN
doc/assets/images/nuclia-dark-theme.png
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
BIN
doc/assets/images/searchbenchmark.png
Normal file
|
After Width: | Height: | Size: 653 KiB |
@@ -10,7 +10,6 @@
|
|||||||
- [Index Sorting](./index_sorting.md)
|
- [Index Sorting](./index_sorting.md)
|
||||||
- [Innerworkings](./innerworkings.md)
|
- [Innerworkings](./innerworkings.md)
|
||||||
- [Inverted index](./inverted_index.md)
|
- [Inverted index](./inverted_index.md)
|
||||||
- [Json](./json.md)
|
|
||||||
- [Best practise](./inverted_index.md)
|
- [Best practise](./inverted_index.md)
|
||||||
|
|
||||||
[Frequently Asked Questions](./faq.md)
|
[Frequently Asked Questions](./faq.md)
|
||||||
|
|||||||
@@ -50,18 +50,18 @@ In the example above, the terms will be sorted as
|
|||||||
As seen in "pitfalls", we may end up having to search for a value for a same path in several different fields. Putting the field code after the path makes it maximizes compression opportunities but also increases the chances for the two terms to end up in the actual same term dictionary block.
|
As seen in "pitfalls", we may end up having to search for a value for a same path in several different fields. Putting the field code after the path makes it maximizes compression opportunities but also increases the chances for the two terms to end up in the actual same term dictionary block.
|
||||||
|
|
||||||
|
|
||||||
# Pitfalls and limitation.
|
# Pitfalls, limitation and corner cases.
|
||||||
|
|
||||||
Json gives very little information about the type of the literals it stores.
|
Json gives very little information about the type of the literals it stores.
|
||||||
All numeric types end up mapped as a "Number" and there are no types for dates.
|
All numeric types end up mapped as a "Number" and there are no types for dates.
|
||||||
|
|
||||||
At ingestion time, tantivy will try to interpret number and strings as different type with a
|
At indexing, tantivy will try to interpret number and strings as different type with a
|
||||||
priority order.
|
priority order.
|
||||||
|
|
||||||
Numbers will be interpreted as u64, i64 and f64 in that order.
|
Numbers will be interpreted as u64, i64 and f64 in that order.
|
||||||
Strings will be interpreted as rfc3999 dates or simple strings.
|
Strings will be interpreted as rfc3999 dates or simple strings.
|
||||||
|
|
||||||
The first working time is picked and only one type will be emitted for indexing.
|
The first working type is picked and is the only term that is emitted for indexing.
|
||||||
|
|
||||||
Note this interpretation happens on a per-document basis, and there is no effort to try to sniff
|
Note this interpretation happens on a per-document basis, and there is no effort to try to sniff
|
||||||
a consistent field type at the scale of a segment.
|
a consistent field type at the scale of a segment.
|
||||||
|
|
||||||
@@ -74,9 +74,55 @@ So the query
|
|||||||
my_path.my_segment:233
|
my_path.my_segment:233
|
||||||
```
|
```
|
||||||
|
|
||||||
Should be interpreted as
|
Will be interpreted as
|
||||||
- `(my_path.my_segment, String, 233)`
|
`(my_path.my_segment, String, 233) or (my_path.my_segment, u64, 233)`
|
||||||
- `(my_path.my_segment, u64, 233)`
|
|
||||||
|
|
||||||
Likewise, we need to emit two tokens if the query contains an rfc3999 date.
|
Likewise, we need to emit two tokens if the query contains an rfc3999 date.
|
||||||
Indeed the date could have been actually a single token inside the text of a document at ingestion time. Generally speaking, we will always at least emit a string token in query parsing, and sometimes more.
|
Indeed the date could have been actually a single token inside the text of a document at ingestion time. Generally speaking, we will always at least emit a string token in query parsing, and sometimes more.
|
||||||
|
|
||||||
|
If one more json field is defined, things get even more complicated.
|
||||||
|
|
||||||
|
|
||||||
|
## Default json field
|
||||||
|
|
||||||
|
If the schema contains a text field called "text" and a json field that is set as a default field:
|
||||||
|
`text:hello` could be reasonably interpreted as targetting the text field or as targetting the json field called `json_dynamic` with the json_path "text".
|
||||||
|
|
||||||
|
If there is such an ambiguity, we decide to only search in the "text" field: `text:hello`.
|
||||||
|
|
||||||
|
In other words, the parser will not search in default json fields if there is a schema hit.
|
||||||
|
This is a product decision.
|
||||||
|
|
||||||
|
The user can still target the JSON field by specifying its name explicitly:
|
||||||
|
`json_dynamic.text:hello`.
|
||||||
|
|
||||||
|
## Range queries are not supported.
|
||||||
|
|
||||||
|
Json field do not support range queries.
|
||||||
|
|
||||||
|
## Arrays do not work like nested object.
|
||||||
|
|
||||||
|
If json object contains an array, a search query might return more documents
|
||||||
|
than what might be expected.
|
||||||
|
|
||||||
|
Let's take an example.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"cart_id": 3234234 ,
|
||||||
|
"cart": [
|
||||||
|
{"product_type": "sneakers", "attributes": {"color": "white"} },
|
||||||
|
{"product_type": "t-shirt", "attributes": {"color": "red"}},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Despite the array structure, a document in tantivy is a bag of terms.
|
||||||
|
The query:
|
||||||
|
|
||||||
|
```
|
||||||
|
cart.product_type:sneakers AND cart.attributes.color:red
|
||||||
|
```
|
||||||
|
|
||||||
|
Actually match the document above.
|
||||||
|
|
||||||
|
|||||||
@@ -20,13 +20,12 @@ fn main() -> tantivy::Result<()> {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_fieldtype = schema::TextOptions::default()
|
let text_fieldtype = schema::TextOptions::default()
|
||||||
.set_indexing_options(
|
.set_indexing_options(
|
||||||
TextFieldIndexing::default()
|
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||||
.set_tokenizer("default")
|
|
||||||
.set_index_option(IndexRecordOption::WithFreqs),
|
|
||||||
)
|
)
|
||||||
.set_stored();
|
.set_stored();
|
||||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||||
let score_fieldtype = crate::schema::IntOptions::default().set_fast(Cardinality::SingleValue);
|
let score_fieldtype =
|
||||||
|
crate::schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
|
||||||
let highscore_field = schema_builder.add_f64_field("highscore", score_fieldtype.clone());
|
let highscore_field = schema_builder.add_f64_field("highscore", score_fieldtype.clone());
|
||||||
let price_field = schema_builder.add_f64_field("price", score_fieldtype.clone());
|
let price_field = schema_builder.add_f64_field("price", score_fieldtype.clone());
|
||||||
|
|
||||||
@@ -123,7 +122,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
let agg_res: AggregationResults = searcher.search(&term_query, &collector).unwrap();
|
||||||
|
|
||||||
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
let res: Value = serde_json::to_value(&agg_res)?;
|
||||||
println!("{}", serde_json::to_string_pretty(&res)?);
|
println!("{}", serde_json::to_string_pretty(&res)?);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
105
examples/json_field.rs
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
// # Json field example
|
||||||
|
//
|
||||||
|
// This example shows how the json field can be used
|
||||||
|
// to make tantivy partially schemaless by setting it as
|
||||||
|
// default query parser field.
|
||||||
|
|
||||||
|
use tantivy::collector::{Count, TopDocs};
|
||||||
|
use tantivy::query::QueryParser;
|
||||||
|
use tantivy::schema::{Schema, FAST, STORED, STRING, TEXT};
|
||||||
|
use tantivy::Index;
|
||||||
|
|
||||||
|
fn main() -> tantivy::Result<()> {
|
||||||
|
// # Defining the schema
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
schema_builder.add_date_field("timestamp", FAST | STORED);
|
||||||
|
let event_type = schema_builder.add_text_field("event_type", STRING | STORED);
|
||||||
|
let attributes = schema_builder.add_json_field("attributes", STORED | TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
|
// # Indexing documents
|
||||||
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
|
let mut index_writer = index.writer(50_000_000)?;
|
||||||
|
let doc = schema.parse_document(
|
||||||
|
r#"{
|
||||||
|
"timestamp": "2022-02-22T23:20:50.53Z",
|
||||||
|
"event_type": "click",
|
||||||
|
"attributes": {
|
||||||
|
"target": "submit-button",
|
||||||
|
"cart": {"product_id": 103},
|
||||||
|
"description": "the best vacuum cleaner ever"
|
||||||
|
}
|
||||||
|
}"#,
|
||||||
|
)?;
|
||||||
|
index_writer.add_document(doc)?;
|
||||||
|
let doc = schema.parse_document(
|
||||||
|
r#"{
|
||||||
|
"timestamp": "2022-02-22T23:20:51.53Z",
|
||||||
|
"event_type": "click",
|
||||||
|
"attributes": {
|
||||||
|
"target": "submit-button",
|
||||||
|
"cart": {"product_id": 133},
|
||||||
|
"description": "das keyboard",
|
||||||
|
"event_type": "holiday-sale"
|
||||||
|
}
|
||||||
|
}"#,
|
||||||
|
)?;
|
||||||
|
index_writer.add_document(doc)?;
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
|
// # Default fields: event_type and attributes
|
||||||
|
// By setting attributes as a default field it allows omitting attributes itself, e.g. "target",
|
||||||
|
// instead of "attributes.target"
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![event_type, attributes]);
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("target:submit-button")?;
|
||||||
|
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||||
|
assert_eq!(count_docs.len(), 2);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("target:submit")?;
|
||||||
|
let count_docs = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||||
|
assert_eq!(count_docs.len(), 2);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("cart.product_id:103")?;
|
||||||
|
let count_docs = searcher.search(&*query, &Count)?;
|
||||||
|
assert_eq!(count_docs, 1);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("click AND cart.product_id:133")?;
|
||||||
|
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||||
|
assert_eq!(hits.len(), 1);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// The sub-fields in the json field marked as default field still need to be explicitly
|
||||||
|
// addressed
|
||||||
|
let query = query_parser.parse_query("click AND 133")?;
|
||||||
|
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||||
|
assert_eq!(hits.len(), 0);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Default json fields are ignored if they collide with the schema
|
||||||
|
let query = query_parser.parse_query("event_type:holiday-sale")?;
|
||||||
|
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||||
|
assert_eq!(hits.len(), 0);
|
||||||
|
}
|
||||||
|
// # Query via full attribute path
|
||||||
|
{
|
||||||
|
// This only searches in our schema's `event_type` field
|
||||||
|
let query = query_parser.parse_query("event_type:click")?;
|
||||||
|
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||||
|
assert_eq!(hits.len(), 2);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Default json fields can still be accessed by full path
|
||||||
|
let query = query_parser.parse_query("attributes.event_type:holiday-sale")?;
|
||||||
|
let hits = searcher.search(&*query, &TopDocs::with_limit(2))?;
|
||||||
|
assert_eq!(hits.len(), 1);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "fastfield_codecs"
|
name = "fastfield_codecs"
|
||||||
version = "0.1.0"
|
version = "0.2.0"
|
||||||
authors = ["Pascal Seitz <pascal@quickwit.io>"]
|
authors = ["Pascal Seitz <pascal@quickwit.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
@@ -9,8 +9,8 @@ description = "Fast field codecs used by tantivy"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
common = { version = "0.1", path = "../common/", package = "tantivy-common" }
|
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
|
||||||
tantivy-bitpacker = { version="0.1.1", path = "../bitpacker/" }
|
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
|
||||||
prettytable-rs = {version="0.8.0", optional= true}
|
prettytable-rs = {version="0.8.0", optional= true}
|
||||||
rand = {version="0.8.3", optional= true}
|
rand = {version="0.8.3", optional= true}
|
||||||
|
|
||||||
|
|||||||
@@ -63,6 +63,7 @@ pub trait FastFieldDataAccess {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
|
/// Statistics are used in codec detection and stored in the fast field footer.
|
||||||
pub struct FastFieldStats {
|
pub struct FastFieldStats {
|
||||||
pub min_value: u64,
|
pub min_value: u64,
|
||||||
pub max_value: u64,
|
pub max_value: u64,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
|
||||||
name = "ownedbytes"
|
name = "ownedbytes"
|
||||||
version = "0.2.0"
|
version = "0.3.0"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Expose data as static slice"
|
description = "Expose data as static slice"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-query-grammar"
|
name = "tantivy-query-grammar"
|
||||||
version = "0.15.0"
|
version = "0.18.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use crate::Occur;
|
|||||||
const SPECIAL_CHARS: &[char] = &[
|
const SPECIAL_CHARS: &[char] = &[
|
||||||
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '~', '!', '\\', '*', ' ',
|
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '~', '!', '\\', '*', ' ',
|
||||||
];
|
];
|
||||||
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|\~|!|\\|\*| )"#;
|
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|\~|!|\\|\*|\s)"#;
|
||||||
|
|
||||||
/// Parses a field_name
|
/// Parses a field_name
|
||||||
/// A field name must have at least one character and be followed by a colon.
|
/// A field name must have at least one character and be followed by a colon.
|
||||||
@@ -34,7 +34,8 @@ fn field_name<'a>() -> impl Parser<&'a str, Output = String> {
|
|||||||
take_while(|c| !SPECIAL_CHARS.contains(&c)),
|
take_while(|c| !SPECIAL_CHARS.contains(&c)),
|
||||||
),
|
),
|
||||||
'\\',
|
'\\',
|
||||||
satisfy(|c| SPECIAL_CHARS.contains(&c)),
|
satisfy(|_| true), /* if the next character is not a special char, the \ will be treated
|
||||||
|
* as the \ character. */
|
||||||
))
|
))
|
||||||
.skip(char(':'))
|
.skip(char(':'))
|
||||||
.map(|s| ESCAPED_SPECIAL_CHARS_RE.replace_all(&s, "$1").to_string())
|
.map(|s| ESCAPED_SPECIAL_CHARS_RE.replace_all(&s, "$1").to_string())
|
||||||
@@ -67,7 +68,7 @@ fn word<'a>() -> impl Parser<&'a str, Output = String> {
|
|||||||
///
|
///
|
||||||
/// NOTE: also accepts 999999-99-99T99:99:99.266051969+99:99
|
/// NOTE: also accepts 999999-99-99T99:99:99.266051969+99:99
|
||||||
/// We delegate rejecting such invalid dates to the logical AST compuation code
|
/// We delegate rejecting such invalid dates to the logical AST compuation code
|
||||||
/// which invokes chrono::DateTime::parse_from_rfc3339 on the value to actually parse
|
/// which invokes time::OffsetDateTime::parse(..., &Rfc3339) on the value to actually parse
|
||||||
/// it (instead of merely extracting the datetime value as string as done here).
|
/// it (instead of merely extracting the datetime value as string as done here).
|
||||||
fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
|
fn date_time<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
let two_digits = || recognize::<String, _, _>((digit(), digit()));
|
let two_digits = || recognize::<String, _, _>((digit(), digit()));
|
||||||
@@ -516,15 +517,27 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_field_name() -> TestParseResult {
|
fn test_field_name() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
super::field_name().parse(".my.field.name:a"),
|
super::field_name().parse(".my.field.name:a"),
|
||||||
Ok((".my.field.name".to_string(), "a"))
|
Ok((".my.field.name".to_string(), "a"))
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
super::field_name().parse(r#"my\ field:a"#),
|
||||||
|
Ok(("my field".to_string(), "a"))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
super::field_name().parse(r#"にんじん:a"#),
|
||||||
|
Ok(("にんじん".to_string(), "a"))
|
||||||
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
super::field_name().parse("my\\ field\\ name:a"),
|
super::field_name().parse("my\\ field\\ name:a"),
|
||||||
Ok(("my field name".to_string(), "a"))
|
Ok(("my field name".to_string(), "a"))
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
super::field_name().parse(r#"my\field:a"#),
|
||||||
|
Ok((r#"my\field"#.to_string(), "a"))
|
||||||
|
);
|
||||||
assert!(super::field_name().parse("my field:a").is_err());
|
assert!(super::field_name().parse("my field:a").is_err());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
super::field_name().parse("\\(1\\+1\\):2"),
|
super::field_name().parse("\\(1\\+1\\):2"),
|
||||||
@@ -534,14 +547,21 @@ mod test {
|
|||||||
super::field_name().parse("my_field_name:a"),
|
super::field_name().parse("my_field_name:a"),
|
||||||
Ok(("my_field_name".to_string(), "a"))
|
Ok(("my_field_name".to_string(), "a"))
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
super::field_name().parse("myfield.b:hello").unwrap(),
|
||||||
|
("myfield.b".to_string(), "hello")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
super::field_name().parse(r#"myfield\.b:hello"#).unwrap(),
|
||||||
|
(r#"myfield\.b"#.to_string(), "hello")
|
||||||
|
);
|
||||||
assert!(super::field_name().parse("my_field_name").is_err());
|
assert!(super::field_name().parse("my_field_name").is_err());
|
||||||
assert!(super::field_name().parse(":a").is_err());
|
assert!(super::field_name().parse(":a").is_err());
|
||||||
assert!(super::field_name().parse("-my_field:a").is_err());
|
assert!(super::field_name().parse("-my_field:a").is_err());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
super::field_name().parse("_my_field:a")?,
|
super::field_name().parse("_my_field:a"),
|
||||||
("_my_field".to_string(), "a")
|
Ok(("_my_field".to_string(), "a"))
|
||||||
);
|
);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ pub enum UserInputBound {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl UserInputBound {
|
impl UserInputBound {
|
||||||
fn display_lower(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn display_lower(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||||
match *self {
|
match *self {
|
||||||
UserInputBound::Inclusive(ref word) => write!(formatter, "[\"{}\"", word),
|
UserInputBound::Inclusive(ref word) => write!(formatter, "[\"{}\"", word),
|
||||||
UserInputBound::Exclusive(ref word) => write!(formatter, "{{\"{}\"", word),
|
UserInputBound::Exclusive(ref word) => write!(formatter, "{{\"{}\"", word),
|
||||||
@@ -67,7 +67,7 @@ impl UserInputBound {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn display_upper(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn display_upper(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||||
match *self {
|
match *self {
|
||||||
UserInputBound::Inclusive(ref word) => write!(formatter, "\"{}\"]", word),
|
UserInputBound::Inclusive(ref word) => write!(formatter, "\"{}\"]", word),
|
||||||
UserInputBound::Exclusive(ref word) => write!(formatter, "\"{}\"}}", word),
|
UserInputBound::Exclusive(ref word) => write!(formatter, "\"{}\"}}", word),
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ Tantivy's aggregations have been designed to mimic the
|
|||||||
|
|
||||||
The code is organized in submodules:
|
The code is organized in submodules:
|
||||||
|
|
||||||
##bucket
|
## bucket
|
||||||
Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggegations.
|
Contains all bucket aggregations, like range aggregation. These bucket aggregations group documents into buckets and can contain sub-aggegations.
|
||||||
|
|
||||||
##metric
|
## metric
|
||||||
Contains all metric aggregations, like average aggregation. Metric aggregations do not have sub aggregations.
|
Contains all metric aggregations, like average aggregation. Metric aggregations do not have sub aggregations.
|
||||||
|
|
||||||
#### agg_req
|
#### agg_req
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
//!
|
//!
|
||||||
//! [Aggregations] is the top level entry point to create a request, which is a `HashMap<String,
|
//! [Aggregations] is the top level entry point to create a request, which is a `HashMap<String,
|
||||||
//! Aggregation>`.
|
//! Aggregation>`.
|
||||||
|
//!
|
||||||
//! Requests are compatible with the json format of elasticsearch.
|
//! Requests are compatible with the json format of elasticsearch.
|
||||||
//!
|
//!
|
||||||
//! # Example
|
//! # Example
|
||||||
@@ -43,19 +44,94 @@
|
|||||||
//! assert_eq!(agg_req1, agg_req2);
|
//! assert_eq!(agg_req1, agg_req2);
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
pub use super::bucket::RangeAggregation;
|
pub use super::bucket::RangeAggregation;
|
||||||
|
use super::bucket::{HistogramAggregation, TermsAggregation};
|
||||||
use super::metric::{AverageAggregation, StatsAggregation};
|
use super::metric::{AverageAggregation, StatsAggregation};
|
||||||
|
use super::VecWithNames;
|
||||||
|
|
||||||
/// The top-level aggregation request structure, which contains [Aggregation] and their user defined
|
/// The top-level aggregation request structure, which contains [Aggregation] and their user defined
|
||||||
/// names.
|
/// names. It is also used in [buckets](BucketAggregation) to define sub-aggregations.
|
||||||
///
|
///
|
||||||
/// The key is the user defined name of the aggregation.
|
/// The key is the user defined name of the aggregation.
|
||||||
pub type Aggregations = HashMap<String, Aggregation>;
|
pub type Aggregations = HashMap<String, Aggregation>;
|
||||||
|
|
||||||
|
/// Like Aggregations, but optimized to work with the aggregation result
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub(crate) struct AggregationsInternal {
|
||||||
|
pub(crate) metrics: VecWithNames<MetricAggregation>,
|
||||||
|
pub(crate) buckets: VecWithNames<BucketAggregationInternal>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Aggregations> for AggregationsInternal {
|
||||||
|
fn from(aggs: Aggregations) -> Self {
|
||||||
|
let mut metrics = vec![];
|
||||||
|
let mut buckets = vec![];
|
||||||
|
for (key, agg) in aggs {
|
||||||
|
match agg {
|
||||||
|
Aggregation::Bucket(bucket) => buckets.push((
|
||||||
|
key,
|
||||||
|
BucketAggregationInternal {
|
||||||
|
bucket_agg: bucket.bucket_agg,
|
||||||
|
sub_aggregation: bucket.sub_aggregation.into(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
Aggregation::Metric(metric) => metrics.push((key, metric)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Self {
|
||||||
|
metrics: VecWithNames::from_entries(metrics),
|
||||||
|
buckets: VecWithNames::from_entries(buckets),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
// Like BucketAggregation, but optimized to work with the result
|
||||||
|
pub(crate) struct BucketAggregationInternal {
|
||||||
|
/// Bucket aggregation strategy to group documents.
|
||||||
|
pub bucket_agg: BucketAggregationType,
|
||||||
|
/// The sub_aggregations in the buckets. Each bucket will aggregate on the document set in the
|
||||||
|
/// bucket.
|
||||||
|
pub sub_aggregation: AggregationsInternal,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BucketAggregationInternal {
|
||||||
|
pub(crate) fn as_histogram(&self) -> Option<&HistogramAggregation> {
|
||||||
|
match &self.bucket_agg {
|
||||||
|
BucketAggregationType::Histogram(histogram) => Some(histogram),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn as_term(&self) -> Option<&TermsAggregation> {
|
||||||
|
match &self.bucket_agg {
|
||||||
|
BucketAggregationType::Terms(terms) => Some(terms),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract all fields, where the term directory is used in the tree.
|
||||||
|
pub fn get_term_dict_field_names(aggs: &Aggregations) -> HashSet<String> {
|
||||||
|
let mut term_dict_field_names = Default::default();
|
||||||
|
for el in aggs.values() {
|
||||||
|
el.get_term_dict_field_names(&mut term_dict_field_names)
|
||||||
|
}
|
||||||
|
term_dict_field_names
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract all fast field names used in the tree.
|
||||||
|
pub fn get_fast_field_names(aggs: &Aggregations) -> HashSet<String> {
|
||||||
|
let mut fast_field_names = Default::default();
|
||||||
|
for el in aggs.values() {
|
||||||
|
el.get_fast_field_names(&mut fast_field_names)
|
||||||
|
}
|
||||||
|
fast_field_names
|
||||||
|
}
|
||||||
|
|
||||||
/// Aggregation request of [BucketAggregation] or [MetricAggregation].
|
/// Aggregation request of [BucketAggregation] or [MetricAggregation].
|
||||||
///
|
///
|
||||||
/// An aggregation is either a bucket or a metric.
|
/// An aggregation is either a bucket or a metric.
|
||||||
@@ -68,6 +144,21 @@ pub enum Aggregation {
|
|||||||
Metric(MetricAggregation),
|
Metric(MetricAggregation),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Aggregation {
|
||||||
|
fn get_term_dict_field_names(&self, term_field_names: &mut HashSet<String>) {
|
||||||
|
if let Aggregation::Bucket(bucket) = self {
|
||||||
|
bucket.get_term_dict_field_names(term_field_names)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||||
|
match self {
|
||||||
|
Aggregation::Bucket(bucket) => bucket.get_fast_field_names(fast_field_names),
|
||||||
|
Aggregation::Metric(metric) => metric.get_fast_field_names(fast_field_names),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// BucketAggregations create buckets of documents. Each bucket is associated with a rule which
|
/// BucketAggregations create buckets of documents. Each bucket is associated with a rule which
|
||||||
/// determines whether or not a document in the falls into it. In other words, the buckets
|
/// determines whether or not a document in the falls into it. In other words, the buckets
|
||||||
/// effectively define document sets. Buckets are not necessarily disjunct, therefore a document can
|
/// effectively define document sets. Buckets are not necessarily disjunct, therefore a document can
|
||||||
@@ -91,12 +182,43 @@ pub struct BucketAggregation {
|
|||||||
pub sub_aggregation: Aggregations,
|
pub sub_aggregation: Aggregations,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl BucketAggregation {
|
||||||
|
fn get_term_dict_field_names(&self, term_dict_field_names: &mut HashSet<String>) {
|
||||||
|
if let BucketAggregationType::Terms(terms) = &self.bucket_agg {
|
||||||
|
term_dict_field_names.insert(terms.field.to_string());
|
||||||
|
}
|
||||||
|
term_dict_field_names.extend(get_term_dict_field_names(&self.sub_aggregation));
|
||||||
|
}
|
||||||
|
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||||
|
self.bucket_agg.get_fast_field_names(fast_field_names);
|
||||||
|
fast_field_names.extend(get_fast_field_names(&self.sub_aggregation));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The bucket aggregation types.
|
/// The bucket aggregation types.
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub enum BucketAggregationType {
|
pub enum BucketAggregationType {
|
||||||
/// Put data into buckets of user-defined ranges.
|
/// Put data into buckets of user-defined ranges.
|
||||||
#[serde(rename = "range")]
|
#[serde(rename = "range")]
|
||||||
Range(RangeAggregation),
|
Range(RangeAggregation),
|
||||||
|
/// Put data into buckets of user-defined ranges.
|
||||||
|
#[serde(rename = "histogram")]
|
||||||
|
Histogram(HistogramAggregation),
|
||||||
|
/// Put data into buckets of terms.
|
||||||
|
#[serde(rename = "terms")]
|
||||||
|
Terms(TermsAggregation),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BucketAggregationType {
|
||||||
|
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||||
|
match self {
|
||||||
|
BucketAggregationType::Terms(terms) => fast_field_names.insert(terms.field.to_string()),
|
||||||
|
BucketAggregationType::Range(range) => fast_field_names.insert(range.field.to_string()),
|
||||||
|
BucketAggregationType::Histogram(histogram) => {
|
||||||
|
fast_field_names.insert(histogram.field.to_string())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The aggregations in this family compute metrics based on values extracted
|
/// The aggregations in this family compute metrics based on values extracted
|
||||||
@@ -116,6 +238,15 @@ pub enum MetricAggregation {
|
|||||||
Stats(StatsAggregation),
|
Stats(StatsAggregation),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl MetricAggregation {
|
||||||
|
fn get_fast_field_names(&self, fast_field_names: &mut HashSet<String>) {
|
||||||
|
match self {
|
||||||
|
MetricAggregation::Average(avg) => fast_field_names.insert(avg.field.to_string()),
|
||||||
|
MetricAggregation::Stats(stats) => fast_field_names.insert(stats.field.to_string()),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -166,4 +297,62 @@ mod tests {
|
|||||||
let agg_req2: String = serde_json::to_string_pretty(&agg_req1).unwrap();
|
let agg_req2: String = serde_json::to_string_pretty(&agg_req1).unwrap();
|
||||||
assert_eq!(agg_req2, elasticsearch_compatible_json_req);
|
assert_eq!(agg_req2, elasticsearch_compatible_json_req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_fast_field_names() {
|
||||||
|
let agg_req2: Aggregations = vec![
|
||||||
|
(
|
||||||
|
"range".to_string(),
|
||||||
|
Aggregation::Bucket(BucketAggregation {
|
||||||
|
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||||
|
field: "score2".to_string(),
|
||||||
|
ranges: vec![
|
||||||
|
(f64::MIN..3f64).into(),
|
||||||
|
(3f64..7f64).into(),
|
||||||
|
(7f64..20f64).into(),
|
||||||
|
(20f64..f64::MAX).into(),
|
||||||
|
],
|
||||||
|
}),
|
||||||
|
sub_aggregation: Default::default(),
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"metric".to_string(),
|
||||||
|
Aggregation::Metric(MetricAggregation::Average(
|
||||||
|
AverageAggregation::from_field_name("field123".to_string()),
|
||||||
|
)),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let agg_req1: Aggregations = vec![(
|
||||||
|
"range".to_string(),
|
||||||
|
Aggregation::Bucket(BucketAggregation {
|
||||||
|
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||||
|
field: "score".to_string(),
|
||||||
|
ranges: vec![
|
||||||
|
(f64::MIN..3f64).into(),
|
||||||
|
(3f64..7f64).into(),
|
||||||
|
(7f64..20f64).into(),
|
||||||
|
(20f64..f64::MAX).into(),
|
||||||
|
],
|
||||||
|
}),
|
||||||
|
sub_aggregation: agg_req2,
|
||||||
|
}),
|
||||||
|
)]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
get_fast_field_names(&agg_req1),
|
||||||
|
vec![
|
||||||
|
"score".to_string(),
|
||||||
|
"score2".to_string(),
|
||||||
|
"field123".to_string()
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
.collect()
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,16 @@
|
|||||||
//! This will enhance the request tree with access to the fastfield and metadata.
|
//! This will enhance the request tree with access to the fastfield and metadata.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
use super::agg_req::{Aggregation, Aggregations, BucketAggregationType, MetricAggregation};
|
||||||
use super::bucket::RangeAggregation;
|
use super::bucket::{HistogramAggregation, RangeAggregation, TermsAggregation};
|
||||||
use super::metric::{AverageAggregation, StatsAggregation};
|
use super::metric::{AverageAggregation, StatsAggregation};
|
||||||
use super::VecWithNames;
|
use super::VecWithNames;
|
||||||
use crate::fastfield::DynamicFastFieldReader;
|
use crate::fastfield::{
|
||||||
use crate::schema::Type;
|
type_and_cardinality, DynamicFastFieldReader, FastType, MultiValuedFastFieldReader,
|
||||||
use crate::{SegmentReader, TantivyError};
|
};
|
||||||
|
use crate::schema::{Cardinality, Type};
|
||||||
|
use crate::{InvertedIndexReader, SegmentReader, TantivyError};
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
pub(crate) struct AggregationsWithAccessor {
|
pub(crate) struct AggregationsWithAccessor {
|
||||||
@@ -27,34 +31,70 @@ impl AggregationsWithAccessor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) enum FastFieldAccessor {
|
||||||
|
Multi(MultiValuedFastFieldReader<u64>),
|
||||||
|
Single(DynamicFastFieldReader<u64>),
|
||||||
|
}
|
||||||
|
impl FastFieldAccessor {
|
||||||
|
pub fn as_single(&self) -> Option<&DynamicFastFieldReader<u64>> {
|
||||||
|
match self {
|
||||||
|
FastFieldAccessor::Multi(_) => None,
|
||||||
|
FastFieldAccessor::Single(reader) => Some(reader),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn as_multi(&self) -> Option<&MultiValuedFastFieldReader<u64>> {
|
||||||
|
match self {
|
||||||
|
FastFieldAccessor::Multi(reader) => Some(reader),
|
||||||
|
FastFieldAccessor::Single(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BucketAggregationWithAccessor {
|
pub struct BucketAggregationWithAccessor {
|
||||||
/// In general there can be buckets without fast field access, e.g. buckets that are created
|
/// In general there can be buckets without fast field access, e.g. buckets that are created
|
||||||
/// based on search terms. So eventually this needs to be Option or moved.
|
/// based on search terms. So eventually this needs to be Option or moved.
|
||||||
pub(crate) accessor: DynamicFastFieldReader<u64>,
|
pub(crate) accessor: FastFieldAccessor,
|
||||||
|
pub(crate) inverted_index: Option<Arc<InvertedIndexReader>>,
|
||||||
pub(crate) field_type: Type,
|
pub(crate) field_type: Type,
|
||||||
pub(crate) bucket_agg: BucketAggregationType,
|
pub(crate) bucket_agg: BucketAggregationType,
|
||||||
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
pub(crate) sub_aggregation: AggregationsWithAccessor,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BucketAggregationWithAccessor {
|
impl BucketAggregationWithAccessor {
|
||||||
fn from_bucket(
|
fn try_from_bucket(
|
||||||
bucket: &BucketAggregationType,
|
bucket: &BucketAggregationType,
|
||||||
sub_aggregation: &Aggregations,
|
sub_aggregation: &Aggregations,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
) -> crate::Result<BucketAggregationWithAccessor> {
|
) -> crate::Result<BucketAggregationWithAccessor> {
|
||||||
|
let mut inverted_index = None;
|
||||||
let (accessor, field_type) = match &bucket {
|
let (accessor, field_type) = match &bucket {
|
||||||
BucketAggregationType::Range(RangeAggregation {
|
BucketAggregationType::Range(RangeAggregation {
|
||||||
field: field_name,
|
field: field_name,
|
||||||
ranges: _,
|
ranges: _,
|
||||||
}) => get_ff_reader_and_validate(reader, field_name)?,
|
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
|
||||||
|
BucketAggregationType::Histogram(HistogramAggregation {
|
||||||
|
field: field_name, ..
|
||||||
|
}) => get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?,
|
||||||
|
BucketAggregationType::Terms(TermsAggregation {
|
||||||
|
field: field_name, ..
|
||||||
|
}) => {
|
||||||
|
let field = reader
|
||||||
|
.schema()
|
||||||
|
.get_field(field_name)
|
||||||
|
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
|
||||||
|
inverted_index = Some(reader.inverted_index(field)?);
|
||||||
|
get_ff_reader_and_validate(reader, field_name, Cardinality::MultiValues)?
|
||||||
|
}
|
||||||
};
|
};
|
||||||
let sub_aggregation = sub_aggregation.clone();
|
let sub_aggregation = sub_aggregation.clone();
|
||||||
Ok(BucketAggregationWithAccessor {
|
Ok(BucketAggregationWithAccessor {
|
||||||
accessor,
|
accessor,
|
||||||
field_type,
|
field_type,
|
||||||
sub_aggregation: get_aggregations_with_accessor(&sub_aggregation, reader)?,
|
sub_aggregation: get_aggs_with_accessor_and_validate(&sub_aggregation, reader)?,
|
||||||
bucket_agg: bucket.clone(),
|
bucket_agg: bucket.clone(),
|
||||||
|
inverted_index,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -68,17 +108,21 @@ pub struct MetricAggregationWithAccessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MetricAggregationWithAccessor {
|
impl MetricAggregationWithAccessor {
|
||||||
fn from_metric(
|
fn try_from_metric(
|
||||||
metric: &MetricAggregation,
|
metric: &MetricAggregation,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
) -> crate::Result<MetricAggregationWithAccessor> {
|
) -> crate::Result<MetricAggregationWithAccessor> {
|
||||||
match &metric {
|
match &metric {
|
||||||
MetricAggregation::Average(AverageAggregation { field: field_name })
|
MetricAggregation::Average(AverageAggregation { field: field_name })
|
||||||
| MetricAggregation::Stats(StatsAggregation { field: field_name }) => {
|
| MetricAggregation::Stats(StatsAggregation { field: field_name }) => {
|
||||||
let (accessor, field_type) = get_ff_reader_and_validate(reader, field_name)?;
|
let (accessor, field_type) =
|
||||||
|
get_ff_reader_and_validate(reader, field_name, Cardinality::SingleValue)?;
|
||||||
|
|
||||||
Ok(MetricAggregationWithAccessor {
|
Ok(MetricAggregationWithAccessor {
|
||||||
accessor,
|
accessor: accessor
|
||||||
|
.as_single()
|
||||||
|
.expect("unexpected fast field cardinality")
|
||||||
|
.clone(),
|
||||||
field_type,
|
field_type,
|
||||||
metric: metric.clone(),
|
metric: metric.clone(),
|
||||||
})
|
})
|
||||||
@@ -87,7 +131,7 @@ impl MetricAggregationWithAccessor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn get_aggregations_with_accessor(
|
pub(crate) fn get_aggs_with_accessor_and_validate(
|
||||||
aggs: &Aggregations,
|
aggs: &Aggregations,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
) -> crate::Result<AggregationsWithAccessor> {
|
) -> crate::Result<AggregationsWithAccessor> {
|
||||||
@@ -97,7 +141,7 @@ pub(crate) fn get_aggregations_with_accessor(
|
|||||||
match agg {
|
match agg {
|
||||||
Aggregation::Bucket(bucket) => buckets.push((
|
Aggregation::Bucket(bucket) => buckets.push((
|
||||||
key.to_string(),
|
key.to_string(),
|
||||||
BucketAggregationWithAccessor::from_bucket(
|
BucketAggregationWithAccessor::try_from_bucket(
|
||||||
&bucket.bucket_agg,
|
&bucket.bucket_agg,
|
||||||
&bucket.sub_aggregation,
|
&bucket.sub_aggregation,
|
||||||
reader,
|
reader,
|
||||||
@@ -105,7 +149,7 @@ pub(crate) fn get_aggregations_with_accessor(
|
|||||||
)),
|
)),
|
||||||
Aggregation::Metric(metric) => metrics.push((
|
Aggregation::Metric(metric) => metrics.push((
|
||||||
key.to_string(),
|
key.to_string(),
|
||||||
MetricAggregationWithAccessor::from_metric(metric, reader)?,
|
MetricAggregationWithAccessor::try_from_metric(metric, reader)?,
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -115,26 +159,45 @@ pub(crate) fn get_aggregations_with_accessor(
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get fast field reader with given cardinatility.
|
||||||
fn get_ff_reader_and_validate(
|
fn get_ff_reader_and_validate(
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
field_name: &str,
|
field_name: &str,
|
||||||
) -> crate::Result<(DynamicFastFieldReader<u64>, Type)> {
|
cardinality: Cardinality,
|
||||||
|
) -> crate::Result<(FastFieldAccessor, Type)> {
|
||||||
let field = reader
|
let field = reader
|
||||||
.schema()
|
.schema()
|
||||||
.get_field(field_name)
|
.get_field(field_name)
|
||||||
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
|
.ok_or_else(|| TantivyError::FieldNotFound(field_name.to_string()))?;
|
||||||
let field_type = reader.schema().get_field_entry(field).field_type();
|
let field_type = reader.schema().get_field_entry(field).field_type();
|
||||||
if field_type.value_type() != Type::I64
|
|
||||||
&& field_type.value_type() != Type::U64
|
if let Some((ff_type, field_cardinality)) = type_and_cardinality(field_type) {
|
||||||
&& field_type.value_type() != Type::F64
|
if ff_type == FastType::Date {
|
||||||
{
|
return Err(TantivyError::InvalidArgument(
|
||||||
|
"Unsupported field type date in aggregation".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if cardinality != field_cardinality {
|
||||||
|
return Err(TantivyError::InvalidArgument(format!(
|
||||||
|
"Invalid field cardinality on field {} expected {:?}, but got {:?}",
|
||||||
|
field_name, cardinality, field_cardinality
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
return Err(TantivyError::InvalidArgument(format!(
|
return Err(TantivyError::InvalidArgument(format!(
|
||||||
"Invalid field type in aggregation {:?}, only f64, u64, i64 is supported",
|
"Only fast fields of type f64, u64, i64 are supported, but got {:?} ",
|
||||||
field_type.value_type()
|
field_type.value_type()
|
||||||
)));
|
)));
|
||||||
}
|
};
|
||||||
|
|
||||||
let ff_fields = reader.fast_fields();
|
let ff_fields = reader.fast_fields();
|
||||||
ff_fields
|
match cardinality {
|
||||||
.u64_lenient(field)
|
Cardinality::SingleValue => ff_fields
|
||||||
.map(|field| (field, field_type.value_type()))
|
.u64_lenient(field)
|
||||||
|
.map(|field| (FastFieldAccessor::Single(field), field_type.value_type())),
|
||||||
|
Cardinality::MultiValues => ff_fields
|
||||||
|
.u64s_lenient(field)
|
||||||
|
.map(|field| (FastFieldAccessor::Multi(field), field_type.value_type())),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,29 +7,132 @@
|
|||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use itertools::Itertools;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use super::agg_req::{
|
||||||
|
Aggregations, AggregationsInternal, BucketAggregationInternal, MetricAggregation,
|
||||||
|
};
|
||||||
|
use super::bucket::{intermediate_buckets_to_final_buckets, GetDocCount};
|
||||||
use super::intermediate_agg_result::{
|
use super::intermediate_agg_result::{
|
||||||
IntermediateAggregationResult, IntermediateAggregationResults, IntermediateBucketResult,
|
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||||
IntermediateMetricResult, IntermediateRangeBucketEntry,
|
IntermediateMetricResult, IntermediateRangeBucketEntry,
|
||||||
};
|
};
|
||||||
use super::metric::{SingleMetricResult, Stats};
|
use super::metric::{SingleMetricResult, Stats};
|
||||||
use super::Key;
|
use super::{Key, VecWithNames};
|
||||||
|
use crate::TantivyError;
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
/// The final aggegation result.
|
/// The final aggegation result.
|
||||||
pub struct AggregationResults(pub HashMap<String, AggregationResult>);
|
pub struct AggregationResults(pub HashMap<String, AggregationResult>);
|
||||||
|
|
||||||
impl From<IntermediateAggregationResults> for AggregationResults {
|
impl AggregationResults {
|
||||||
fn from(tree: IntermediateAggregationResults) -> Self {
|
pub(crate) fn get_value_from_aggregation(
|
||||||
Self(
|
&self,
|
||||||
tree.0
|
name: &str,
|
||||||
.into_iter()
|
agg_property: &str,
|
||||||
.map(|(key, agg)| (key, agg.into()))
|
) -> crate::Result<Option<f64>> {
|
||||||
.collect(),
|
if let Some(agg) = self.0.get(name) {
|
||||||
)
|
agg.get_value_from_aggregation(name, agg_property)
|
||||||
|
} else {
|
||||||
|
// Validation is be done during request parsing, so we can't reach this state.
|
||||||
|
Err(TantivyError::InternalError(format!(
|
||||||
|
"Can't find aggregation {:?} in sub_aggregations",
|
||||||
|
name
|
||||||
|
)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Convert and intermediate result and its aggregation request to the final result
|
||||||
|
pub fn from_intermediate_and_req(
|
||||||
|
results: IntermediateAggregationResults,
|
||||||
|
agg: Aggregations,
|
||||||
|
) -> crate::Result<Self> {
|
||||||
|
AggregationResults::from_intermediate_and_req_internal(results, &(agg.into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert and intermediate result and its aggregation request to the final result
|
||||||
|
///
|
||||||
|
/// Internal function, CollectorAggregations is used instead Aggregations, which is optimized
|
||||||
|
/// for internal processing, by splitting metric and buckets into seperate groups.
|
||||||
|
pub(crate) fn from_intermediate_and_req_internal(
|
||||||
|
intermediate_results: IntermediateAggregationResults,
|
||||||
|
req: &AggregationsInternal,
|
||||||
|
) -> crate::Result<Self> {
|
||||||
|
// Important assumption:
|
||||||
|
// When the tree contains buckets/metric, we expect it to have all buckets/metrics from the
|
||||||
|
// request
|
||||||
|
let mut results: HashMap<String, AggregationResult> = HashMap::new();
|
||||||
|
|
||||||
|
if let Some(buckets) = intermediate_results.buckets {
|
||||||
|
add_coverted_final_buckets_to_result(&mut results, buckets, &req.buckets)?
|
||||||
|
} else {
|
||||||
|
// When there are no buckets, we create empty buckets, so that the serialized json
|
||||||
|
// format is constant
|
||||||
|
add_empty_final_buckets_to_result(&mut results, &req.buckets)?
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(metrics) = intermediate_results.metrics {
|
||||||
|
add_converted_final_metrics_to_result(&mut results, metrics);
|
||||||
|
} else {
|
||||||
|
// When there are no metrics, we create empty metric results, so that the serialized
|
||||||
|
// json format is constant
|
||||||
|
add_empty_final_metrics_to_result(&mut results, &req.metrics)?;
|
||||||
|
}
|
||||||
|
Ok(Self(results))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_converted_final_metrics_to_result(
|
||||||
|
results: &mut HashMap<String, AggregationResult>,
|
||||||
|
metrics: VecWithNames<IntermediateMetricResult>,
|
||||||
|
) {
|
||||||
|
results.extend(
|
||||||
|
metrics
|
||||||
|
.into_iter()
|
||||||
|
.map(|(key, metric)| (key, AggregationResult::MetricResult(metric.into()))),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_empty_final_metrics_to_result(
|
||||||
|
results: &mut HashMap<String, AggregationResult>,
|
||||||
|
req_metrics: &VecWithNames<MetricAggregation>,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
results.extend(req_metrics.iter().map(|(key, req)| {
|
||||||
|
let empty_bucket = IntermediateMetricResult::empty_from_req(req);
|
||||||
|
(
|
||||||
|
key.to_string(),
|
||||||
|
AggregationResult::MetricResult(empty_bucket.into()),
|
||||||
|
)
|
||||||
|
}));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_empty_final_buckets_to_result(
|
||||||
|
results: &mut HashMap<String, AggregationResult>,
|
||||||
|
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let requested_buckets = req_buckets.iter();
|
||||||
|
for (key, req) in requested_buckets {
|
||||||
|
let empty_bucket = AggregationResult::BucketResult(BucketResult::empty_from_req(req)?);
|
||||||
|
results.insert(key.to_string(), empty_bucket);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_coverted_final_buckets_to_result(
|
||||||
|
results: &mut HashMap<String, AggregationResult>,
|
||||||
|
buckets: VecWithNames<IntermediateBucketResult>,
|
||||||
|
req_buckets: &VecWithNames<BucketAggregationInternal>,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
assert_eq!(buckets.len(), req_buckets.len());
|
||||||
|
|
||||||
|
let buckets_with_request = buckets.into_iter().zip(req_buckets.values());
|
||||||
|
for ((key, bucket), req) in buckets_with_request {
|
||||||
|
let result =
|
||||||
|
AggregationResult::BucketResult(BucketResult::from_intermediate_and_req(bucket, req)?);
|
||||||
|
results.insert(key, result);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
@@ -41,15 +144,20 @@ pub enum AggregationResult {
|
|||||||
/// Metric result variant.
|
/// Metric result variant.
|
||||||
MetricResult(MetricResult),
|
MetricResult(MetricResult),
|
||||||
}
|
}
|
||||||
impl From<IntermediateAggregationResult> for AggregationResult {
|
|
||||||
fn from(tree: IntermediateAggregationResult) -> Self {
|
impl AggregationResult {
|
||||||
match tree {
|
pub(crate) fn get_value_from_aggregation(
|
||||||
IntermediateAggregationResult::Bucket(bucket) => {
|
&self,
|
||||||
AggregationResult::BucketResult(bucket.into())
|
_name: &str,
|
||||||
}
|
agg_property: &str,
|
||||||
IntermediateAggregationResult::Metric(metric) => {
|
) -> crate::Result<Option<f64>> {
|
||||||
AggregationResult::MetricResult(metric.into())
|
match self {
|
||||||
}
|
AggregationResult::BucketResult(_bucket) => Err(TantivyError::InternalError(
|
||||||
|
"Tried to retrieve value from bucket aggregation. This is not supported and \
|
||||||
|
should not happen during collection, but should be catched during validation"
|
||||||
|
.to_string(),
|
||||||
|
)),
|
||||||
|
AggregationResult::MetricResult(metric) => metric.get_value(agg_property),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,6 +172,14 @@ pub enum MetricResult {
|
|||||||
Stats(Stats),
|
Stats(Stats),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl MetricResult {
|
||||||
|
fn get_value(&self, agg_property: &str) -> crate::Result<Option<f64>> {
|
||||||
|
match self {
|
||||||
|
MetricResult::Average(avg) => Ok(avg.value),
|
||||||
|
MetricResult::Stats(stats) => stats.get_value(agg_property),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
impl From<IntermediateMetricResult> for MetricResult {
|
impl From<IntermediateMetricResult> for MetricResult {
|
||||||
fn from(metric: IntermediateMetricResult) -> Self {
|
fn from(metric: IntermediateMetricResult) -> Self {
|
||||||
match metric {
|
match metric {
|
||||||
@@ -81,37 +197,177 @@ impl From<IntermediateMetricResult> for MetricResult {
|
|||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(untagged)]
|
#[serde(untagged)]
|
||||||
pub enum BucketResult {
|
pub enum BucketResult {
|
||||||
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
/// This is the range entry for a bucket, which contains a key, count, from, to, and optionally
|
||||||
/// sub_aggregations.
|
/// sub_aggregations.
|
||||||
Range {
|
Range {
|
||||||
/// The range buckets sorted by range.
|
/// The range buckets sorted by range.
|
||||||
buckets: Vec<RangeBucketEntry>,
|
buckets: Vec<RangeBucketEntry>,
|
||||||
},
|
},
|
||||||
|
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||||
|
/// sub_aggregations.
|
||||||
|
Histogram {
|
||||||
|
/// The buckets.
|
||||||
|
///
|
||||||
|
/// If there are holes depends on the request, if min_doc_count is 0, then there are no
|
||||||
|
/// holes between the first and last bucket.
|
||||||
|
/// See [HistogramAggregation](super::bucket::HistogramAggregation)
|
||||||
|
buckets: Vec<BucketEntry>,
|
||||||
|
},
|
||||||
|
/// This is the term result
|
||||||
|
Terms {
|
||||||
|
/// The buckets.
|
||||||
|
///
|
||||||
|
/// See [TermsAggregation](super::bucket::TermsAggregation)
|
||||||
|
buckets: Vec<BucketEntry>,
|
||||||
|
/// The number of documents that didn’t make it into to TOP N due to shard_size or size
|
||||||
|
sum_other_doc_count: u64,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
/// The upper bound error for the doc count of each term.
|
||||||
|
doc_count_error_upper_bound: Option<u64>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<IntermediateBucketResult> for BucketResult {
|
impl BucketResult {
|
||||||
fn from(result: IntermediateBucketResult) -> Self {
|
pub(crate) fn empty_from_req(req: &BucketAggregationInternal) -> crate::Result<Self> {
|
||||||
match result {
|
let empty_bucket = IntermediateBucketResult::empty_from_req(&req.bucket_agg);
|
||||||
IntermediateBucketResult::Range(range_map) => {
|
BucketResult::from_intermediate_and_req(empty_bucket, req)
|
||||||
let mut buckets: Vec<RangeBucketEntry> = range_map
|
}
|
||||||
.into_iter()
|
|
||||||
.map(|(_, bucket)| bucket.into())
|
|
||||||
.collect_vec();
|
|
||||||
|
|
||||||
buckets.sort_by(|a, b| {
|
fn from_intermediate_and_req(
|
||||||
a.from
|
bucket_result: IntermediateBucketResult,
|
||||||
|
req: &BucketAggregationInternal,
|
||||||
|
) -> crate::Result<Self> {
|
||||||
|
match bucket_result {
|
||||||
|
IntermediateBucketResult::Range(range_res) => {
|
||||||
|
let mut buckets: Vec<RangeBucketEntry> = range_res
|
||||||
|
.buckets
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, bucket)| {
|
||||||
|
RangeBucketEntry::from_intermediate_and_req(bucket, &req.sub_aggregation)
|
||||||
|
})
|
||||||
|
.collect::<crate::Result<Vec<_>>>()?;
|
||||||
|
|
||||||
|
buckets.sort_by(|left, right| {
|
||||||
|
// TODO use total_cmp next stable rust release
|
||||||
|
left.from
|
||||||
.unwrap_or(f64::MIN)
|
.unwrap_or(f64::MIN)
|
||||||
.partial_cmp(&b.from.unwrap_or(f64::MIN))
|
.partial_cmp(&right.from.unwrap_or(f64::MIN))
|
||||||
.unwrap_or(Ordering::Equal)
|
.unwrap_or(Ordering::Equal)
|
||||||
});
|
});
|
||||||
BucketResult::Range { buckets }
|
Ok(BucketResult::Range { buckets })
|
||||||
}
|
}
|
||||||
|
IntermediateBucketResult::Histogram { buckets } => {
|
||||||
|
let buckets = intermediate_buckets_to_final_buckets(
|
||||||
|
buckets,
|
||||||
|
req.as_histogram()
|
||||||
|
.expect("unexpected aggregation, expected histogram aggregation"),
|
||||||
|
&req.sub_aggregation,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(BucketResult::Histogram { buckets })
|
||||||
|
}
|
||||||
|
IntermediateBucketResult::Terms(terms) => terms.into_final_result(
|
||||||
|
req.as_term()
|
||||||
|
.expect("unexpected aggregation, expected term aggregation"),
|
||||||
|
&req.sub_aggregation,
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This is the default entry for a bucket, which contains a key, count, and optionally
|
||||||
|
/// sub_aggregations.
|
||||||
|
///
|
||||||
|
/// # JSON Format
|
||||||
|
/// ```json
|
||||||
|
/// {
|
||||||
|
/// ...
|
||||||
|
/// "my_histogram": {
|
||||||
|
/// "buckets": [
|
||||||
|
/// {
|
||||||
|
/// "key": "2.0",
|
||||||
|
/// "doc_count": 5
|
||||||
|
/// },
|
||||||
|
/// {
|
||||||
|
/// "key": "4.0",
|
||||||
|
/// "doc_count": 2
|
||||||
|
/// },
|
||||||
|
/// {
|
||||||
|
/// "key": "6.0",
|
||||||
|
/// "doc_count": 3
|
||||||
|
/// }
|
||||||
|
/// ]
|
||||||
|
/// }
|
||||||
|
/// ...
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct BucketEntry {
|
||||||
|
/// The identifier of the bucket.
|
||||||
|
pub key: Key,
|
||||||
|
/// Number of documents in the bucket.
|
||||||
|
pub doc_count: u64,
|
||||||
|
#[serde(flatten)]
|
||||||
|
/// Sub-aggregations in this bucket.
|
||||||
|
pub sub_aggregation: AggregationResults,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BucketEntry {
|
||||||
|
pub(crate) fn from_intermediate_and_req(
|
||||||
|
entry: IntermediateHistogramBucketEntry,
|
||||||
|
req: &AggregationsInternal,
|
||||||
|
) -> crate::Result<Self> {
|
||||||
|
Ok(BucketEntry {
|
||||||
|
key: Key::F64(entry.key),
|
||||||
|
doc_count: entry.doc_count,
|
||||||
|
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
|
||||||
|
entry.sub_aggregation,
|
||||||
|
req,
|
||||||
|
)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl GetDocCount for &BucketEntry {
|
||||||
|
fn doc_count(&self) -> u64 {
|
||||||
|
self.doc_count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl GetDocCount for BucketEntry {
|
||||||
|
fn doc_count(&self) -> u64 {
|
||||||
|
self.doc_count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// This is the range entry for a bucket, which contains a key, count, and optionally
|
/// This is the range entry for a bucket, which contains a key, count, and optionally
|
||||||
/// sub_aggregations.
|
/// sub_aggregations.
|
||||||
|
///
|
||||||
|
/// # JSON Format
|
||||||
|
/// ```json
|
||||||
|
/// {
|
||||||
|
/// ...
|
||||||
|
/// "my_ranges": {
|
||||||
|
/// "buckets": [
|
||||||
|
/// {
|
||||||
|
/// "key": "*-10",
|
||||||
|
/// "to": 10,
|
||||||
|
/// "doc_count": 5
|
||||||
|
/// },
|
||||||
|
/// {
|
||||||
|
/// "key": "10-20",
|
||||||
|
/// "from": 10,
|
||||||
|
/// "to": 20,
|
||||||
|
/// "doc_count": 2
|
||||||
|
/// },
|
||||||
|
/// {
|
||||||
|
/// "key": "20-*",
|
||||||
|
/// "from": 20,
|
||||||
|
/// "doc_count": 3
|
||||||
|
/// }
|
||||||
|
/// ]
|
||||||
|
/// }
|
||||||
|
/// ...
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct RangeBucketEntry {
|
pub struct RangeBucketEntry {
|
||||||
/// The identifier of the bucket.
|
/// The identifier of the bucket.
|
||||||
@@ -129,14 +385,20 @@ pub struct RangeBucketEntry {
|
|||||||
pub to: Option<f64>,
|
pub to: Option<f64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<IntermediateRangeBucketEntry> for RangeBucketEntry {
|
impl RangeBucketEntry {
|
||||||
fn from(entry: IntermediateRangeBucketEntry) -> Self {
|
fn from_intermediate_and_req(
|
||||||
RangeBucketEntry {
|
entry: IntermediateRangeBucketEntry,
|
||||||
|
req: &AggregationsInternal,
|
||||||
|
) -> crate::Result<Self> {
|
||||||
|
Ok(RangeBucketEntry {
|
||||||
key: entry.key,
|
key: entry.key,
|
||||||
doc_count: entry.doc_count,
|
doc_count: entry.doc_count,
|
||||||
sub_aggregation: entry.sub_aggregation.into(),
|
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
|
||||||
|
entry.sub_aggregation,
|
||||||
|
req,
|
||||||
|
)?,
|
||||||
to: entry.to,
|
to: entry.to,
|
||||||
from: entry.from,
|
from: entry.from,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
1392
src/aggregation/bucket/histogram/histogram.rs
Normal file
2
src/aggregation/bucket/histogram/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
mod histogram;
|
||||||
|
pub use histogram::*;
|
||||||
@@ -1,10 +1,140 @@
|
|||||||
//! Module for all bucket aggregations.
|
//! Module for all bucket aggregations.
|
||||||
//!
|
//!
|
||||||
//! Results of final buckets are [BucketEntry](super::agg_result::BucketEntry).
|
//! BucketAggregations create buckets of documents
|
||||||
|
//! [BucketAggregation](super::agg_req::BucketAggregation).
|
||||||
|
//!
|
||||||
|
//! Results of final buckets are [BucketResult](super::agg_result::BucketResult).
|
||||||
//! Results of intermediate buckets are
|
//! Results of intermediate buckets are
|
||||||
//! [IntermediateBucketEntry](super::intermediate_agg_result::IntermediateBucketEntry)
|
//! [IntermediateBucketResult](super::intermediate_agg_result::IntermediateBucketResult)
|
||||||
|
|
||||||
|
mod histogram;
|
||||||
mod range;
|
mod range;
|
||||||
|
mod term_agg;
|
||||||
|
|
||||||
pub use range::RangeAggregation;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
pub(crate) use histogram::SegmentHistogramCollector;
|
||||||
|
pub use histogram::*;
|
||||||
pub(crate) use range::SegmentRangeCollector;
|
pub(crate) use range::SegmentRangeCollector;
|
||||||
|
pub use range::*;
|
||||||
|
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
pub use term_agg::*;
|
||||||
|
|
||||||
|
/// Order for buckets in a bucket aggregation.
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub enum Order {
|
||||||
|
/// Asc order
|
||||||
|
#[serde(rename = "asc")]
|
||||||
|
Asc,
|
||||||
|
/// Desc order
|
||||||
|
#[serde(rename = "desc")]
|
||||||
|
Desc,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Order {
|
||||||
|
fn default() -> Self {
|
||||||
|
Order::Desc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
/// Order property by which to apply the order
|
||||||
|
pub enum OrderTarget {
|
||||||
|
/// The key of the bucket
|
||||||
|
Key,
|
||||||
|
/// The doc count of the bucket
|
||||||
|
Count,
|
||||||
|
/// Order by value of the sub aggregation metric with identified by given `String`.
|
||||||
|
///
|
||||||
|
/// Only single value metrics are supported currently
|
||||||
|
SubAggregation(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for OrderTarget {
|
||||||
|
fn default() -> Self {
|
||||||
|
OrderTarget::Count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<&str> for OrderTarget {
|
||||||
|
fn from(val: &str) -> Self {
|
||||||
|
match val {
|
||||||
|
"_key" => OrderTarget::Key,
|
||||||
|
"_count" => OrderTarget::Count,
|
||||||
|
_ => OrderTarget::SubAggregation(val.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToString for OrderTarget {
|
||||||
|
fn to_string(&self) -> String {
|
||||||
|
match self {
|
||||||
|
OrderTarget::Key => "_key".to_string(),
|
||||||
|
OrderTarget::Count => "_count".to_string(),
|
||||||
|
OrderTarget::SubAggregation(agg) => agg.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the order. target is either "_count", "_key", or the name of
|
||||||
|
/// a metric sub_aggregation.
|
||||||
|
///
|
||||||
|
/// De/Serializes to elasticsearch compatible JSON.
|
||||||
|
///
|
||||||
|
/// Examples in JSON format:
|
||||||
|
/// { "_count": "asc" }
|
||||||
|
/// { "_key": "asc" }
|
||||||
|
/// { "average_price": "asc" }
|
||||||
|
#[derive(Clone, Default, Debug, PartialEq)]
|
||||||
|
pub struct CustomOrder {
|
||||||
|
/// The target property by which to sort by
|
||||||
|
pub target: OrderTarget,
|
||||||
|
/// The order asc or desc
|
||||||
|
pub order: Order,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for CustomOrder {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where S: Serializer {
|
||||||
|
let map: HashMap<String, Order> =
|
||||||
|
std::iter::once((self.target.to_string(), self.order)).collect();
|
||||||
|
map.serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for CustomOrder {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<CustomOrder, D::Error>
|
||||||
|
where D: Deserializer<'de> {
|
||||||
|
HashMap::<String, Order>::deserialize(deserializer).and_then(|map| {
|
||||||
|
if let Some((key, value)) = map.into_iter().next() {
|
||||||
|
Ok(CustomOrder {
|
||||||
|
target: key.as_str().into(),
|
||||||
|
order: value,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
Err(de::Error::custom(
|
||||||
|
"unexpected empty map in order".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn custom_order_serde_test() {
|
||||||
|
let order = CustomOrder {
|
||||||
|
target: OrderTarget::Key,
|
||||||
|
order: Order::Desc,
|
||||||
|
};
|
||||||
|
|
||||||
|
let order_str = serde_json::to_string(&order).unwrap();
|
||||||
|
assert_eq!(order_str, "{\"_key\":\"desc\"}");
|
||||||
|
let order_deser = serde_json::from_str(&order_str).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(order, order_deser);
|
||||||
|
|
||||||
|
let order_deser: serde_json::Result<CustomOrder> = serde_json::from_str("{}");
|
||||||
|
assert!(order_deser.is_err());
|
||||||
|
|
||||||
|
let order_deser: serde_json::Result<CustomOrder> = serde_json::from_str("[]");
|
||||||
|
assert!(order_deser.is_err());
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
|
use std::fmt::Debug;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
use itertools::Itertools;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::aggregation::agg_req_with_accessor::{
|
use crate::aggregation::agg_req_with_accessor::{
|
||||||
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
AggregationsWithAccessor, BucketAggregationWithAccessor,
|
||||||
};
|
};
|
||||||
use crate::aggregation::intermediate_agg_result::IntermediateBucketResult;
|
use crate::aggregation::intermediate_agg_result::{
|
||||||
use crate::aggregation::segment_agg_result::{
|
IntermediateBucketResult, IntermediateRangeBucketEntry, IntermediateRangeBucketResult,
|
||||||
SegmentAggregationResultsCollector, SegmentRangeBucketEntry,
|
|
||||||
};
|
};
|
||||||
|
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key};
|
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key};
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::schema::Type;
|
use crate::schema::Type;
|
||||||
@@ -18,18 +18,38 @@ use crate::{DocId, TantivyError};
|
|||||||
/// Provide user-defined buckets to aggregate on.
|
/// Provide user-defined buckets to aggregate on.
|
||||||
/// Two special buckets will automatically be created to cover the whole range of values.
|
/// Two special buckets will automatically be created to cover the whole range of values.
|
||||||
/// The provided buckets have to be continous.
|
/// The provided buckets have to be continous.
|
||||||
/// During the aggregation, the values extracted from the fast_field `field_name` will be checked
|
/// During the aggregation, the values extracted from the fast_field `field` will be checked
|
||||||
/// against each bucket range. Note that this aggregation includes the from value and excludes the
|
/// against each bucket range. Note that this aggregation includes the from value and excludes the
|
||||||
/// to value for each range.
|
/// to value for each range.
|
||||||
///
|
///
|
||||||
/// Result type is [BucketResult](crate::aggregation::agg_result::BucketResult) with
|
/// Result type is [BucketResult](crate::aggregation::agg_result::BucketResult) with
|
||||||
/// [BucketEntryKeyCount](crate::aggregation::agg_result::BucketEntryKeyCount) on the
|
/// [RangeBucketEntry](crate::aggregation::agg_result::RangeBucketEntry) on the
|
||||||
/// AggregationCollector.
|
/// AggregationCollector.
|
||||||
///
|
///
|
||||||
/// Result type is
|
/// Result type is
|
||||||
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketResult] with
|
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketResult] with
|
||||||
/// [crate::aggregation::intermediate_agg_result::IntermediateBucketEntryKeyCount] on the
|
/// [crate::aggregation::intermediate_agg_result::IntermediateRangeBucketEntry] on the
|
||||||
/// DistributedAggregationCollector.
|
/// DistributedAggregationCollector.
|
||||||
|
///
|
||||||
|
/// # Limitations/Compatibility
|
||||||
|
/// Overlapping ranges are not yet supported.
|
||||||
|
///
|
||||||
|
/// The keyed parameter (elasticsearch) is not yet supported.
|
||||||
|
///
|
||||||
|
/// # Request JSON Format
|
||||||
|
/// ```json
|
||||||
|
/// {
|
||||||
|
/// "my_ranges": {
|
||||||
|
/// "field": "score",
|
||||||
|
/// "ranges": [
|
||||||
|
/// { "to": 3.0 },
|
||||||
|
/// { "from": 3.0, "to": 7.0 },
|
||||||
|
/// { "from": 7.0, "to": 20.0 },
|
||||||
|
/// { "from": 20.0 }
|
||||||
|
/// ]
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct RangeAggregation {
|
pub struct RangeAggregation {
|
||||||
/// The field to aggregate on.
|
/// The field to aggregate on.
|
||||||
@@ -40,9 +60,14 @@ pub struct RangeAggregation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
/// The range for one range bucket.
|
||||||
pub struct RangeAggregationRange {
|
pub struct RangeAggregationRange {
|
||||||
|
/// The from range value, which is inclusive in the range.
|
||||||
|
/// None equals to an open ended interval.
|
||||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
pub from: Option<f64>,
|
pub from: Option<f64>,
|
||||||
|
/// The to range value, which is not inclusive in the range.
|
||||||
|
/// None equals to an open ended interval.
|
||||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
pub to: Option<f64>,
|
pub to: Option<f64>,
|
||||||
}
|
}
|
||||||
@@ -64,7 +89,7 @@ impl From<Range<f64>> for RangeAggregationRange {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct SegmentRangeAndBucketEntry {
|
pub(crate) struct SegmentRangeAndBucketEntry {
|
||||||
range: Range<u64>,
|
range: Range<u64>,
|
||||||
bucket: SegmentRangeBucketEntry,
|
bucket: SegmentRangeBucketEntry,
|
||||||
}
|
}
|
||||||
@@ -78,25 +103,75 @@ pub struct SegmentRangeCollector {
|
|||||||
field_type: Type,
|
field_type: Type,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq)]
|
||||||
|
pub(crate) struct SegmentRangeBucketEntry {
|
||||||
|
pub key: Key,
|
||||||
|
pub doc_count: u64,
|
||||||
|
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
|
||||||
|
/// The from range of the bucket. Equals f64::MIN when None.
|
||||||
|
pub from: Option<f64>,
|
||||||
|
/// The to range of the bucket. Equals f64::MAX when None. Open interval, `to` is not
|
||||||
|
/// inclusive.
|
||||||
|
pub to: Option<f64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Debug for SegmentRangeBucketEntry {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("SegmentRangeBucketEntry")
|
||||||
|
.field("key", &self.key)
|
||||||
|
.field("doc_count", &self.doc_count)
|
||||||
|
.field("from", &self.from)
|
||||||
|
.field("to", &self.to)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl SegmentRangeBucketEntry {
|
||||||
|
pub(crate) fn into_intermediate_bucket_entry(
|
||||||
|
self,
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<IntermediateRangeBucketEntry> {
|
||||||
|
let sub_aggregation = if let Some(sub_aggregation) = self.sub_aggregation {
|
||||||
|
sub_aggregation.into_intermediate_aggregations_result(agg_with_accessor)?
|
||||||
|
} else {
|
||||||
|
Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(IntermediateRangeBucketEntry {
|
||||||
|
key: self.key,
|
||||||
|
doc_count: self.doc_count,
|
||||||
|
sub_aggregation,
|
||||||
|
from: self.from,
|
||||||
|
to: self.to,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SegmentRangeCollector {
|
impl SegmentRangeCollector {
|
||||||
pub fn into_intermediate_bucket_result(self) -> IntermediateBucketResult {
|
pub fn into_intermediate_bucket_result(
|
||||||
|
self,
|
||||||
|
agg_with_accessor: &BucketAggregationWithAccessor,
|
||||||
|
) -> crate::Result<IntermediateBucketResult> {
|
||||||
let field_type = self.field_type;
|
let field_type = self.field_type;
|
||||||
|
|
||||||
let buckets = self
|
let buckets = self
|
||||||
.buckets
|
.buckets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(move |range_bucket| {
|
.map(move |range_bucket| {
|
||||||
(
|
Ok((
|
||||||
range_to_key(&range_bucket.range, &field_type),
|
range_to_string(&range_bucket.range, &field_type),
|
||||||
range_bucket.bucket.into(),
|
range_bucket
|
||||||
)
|
.bucket
|
||||||
|
.into_intermediate_bucket_entry(&agg_with_accessor.sub_aggregation)?,
|
||||||
|
))
|
||||||
})
|
})
|
||||||
.collect();
|
.collect::<crate::Result<_>>()?;
|
||||||
|
|
||||||
IntermediateBucketResult::Range(buckets)
|
Ok(IntermediateBucketResult::Range(
|
||||||
|
IntermediateRangeBucketResult { buckets },
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn from_req(
|
pub(crate) fn from_req_and_validate(
|
||||||
req: &RangeAggregation,
|
req: &RangeAggregation,
|
||||||
sub_aggregation: &AggregationsWithAccessor,
|
sub_aggregation: &AggregationsWithAccessor,
|
||||||
field_type: Type,
|
field_type: Type,
|
||||||
@@ -120,7 +195,7 @@ impl SegmentRangeCollector {
|
|||||||
let sub_aggregation = if sub_aggregation.is_empty() {
|
let sub_aggregation = if sub_aggregation.is_empty() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(SegmentAggregationResultsCollector::from_req(
|
Some(SegmentAggregationResultsCollector::from_req_and_validate(
|
||||||
sub_aggregation,
|
sub_aggregation,
|
||||||
)?)
|
)?)
|
||||||
};
|
};
|
||||||
@@ -151,11 +226,15 @@ impl SegmentRangeCollector {
|
|||||||
force_flush: bool,
|
force_flush: bool,
|
||||||
) {
|
) {
|
||||||
let mut iter = doc.chunks_exact(4);
|
let mut iter = doc.chunks_exact(4);
|
||||||
|
let accessor = bucket_with_accessor
|
||||||
|
.accessor
|
||||||
|
.as_single()
|
||||||
|
.expect("unexpected fast field cardinatility");
|
||||||
for docs in iter.by_ref() {
|
for docs in iter.by_ref() {
|
||||||
let val1 = bucket_with_accessor.accessor.get(docs[0]);
|
let val1 = accessor.get(docs[0]);
|
||||||
let val2 = bucket_with_accessor.accessor.get(docs[1]);
|
let val2 = accessor.get(docs[1]);
|
||||||
let val3 = bucket_with_accessor.accessor.get(docs[2]);
|
let val3 = accessor.get(docs[2]);
|
||||||
let val4 = bucket_with_accessor.accessor.get(docs[3]);
|
let val4 = accessor.get(docs[3]);
|
||||||
let bucket_pos1 = self.get_bucket_pos(val1);
|
let bucket_pos1 = self.get_bucket_pos(val1);
|
||||||
let bucket_pos2 = self.get_bucket_pos(val2);
|
let bucket_pos2 = self.get_bucket_pos(val2);
|
||||||
let bucket_pos3 = self.get_bucket_pos(val3);
|
let bucket_pos3 = self.get_bucket_pos(val3);
|
||||||
@@ -167,7 +246,7 @@ impl SegmentRangeCollector {
|
|||||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation);
|
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation);
|
||||||
}
|
}
|
||||||
for doc in iter.remainder() {
|
for doc in iter.remainder() {
|
||||||
let val = bucket_with_accessor.accessor.get(*doc);
|
let val = accessor.get(*doc);
|
||||||
let bucket_pos = self.get_bucket_pos(val);
|
let bucket_pos = self.get_bucket_pos(val);
|
||||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation);
|
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation);
|
||||||
}
|
}
|
||||||
@@ -219,15 +298,22 @@ impl SegmentRangeCollector {
|
|||||||
/// fast field.
|
/// fast field.
|
||||||
/// The alternative would be that every value read would be converted to the f64 range, but that is
|
/// The alternative would be that every value read would be converted to the f64 range, but that is
|
||||||
/// more computational expensive when many documents are hit.
|
/// more computational expensive when many documents are hit.
|
||||||
fn to_u64_range(range: &RangeAggregationRange, field_type: &Type) -> Range<u64> {
|
fn to_u64_range(range: &RangeAggregationRange, field_type: &Type) -> crate::Result<Range<u64>> {
|
||||||
range
|
let start = if let Some(from) = range.from {
|
||||||
.from
|
f64_to_fastfield_u64(from, field_type)
|
||||||
.map(|from| f64_to_fastfield_u64(from, field_type))
|
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
|
||||||
.unwrap_or(u64::MIN)
|
} else {
|
||||||
..range
|
u64::MIN
|
||||||
.to
|
};
|
||||||
.map(|to| f64_to_fastfield_u64(to, field_type))
|
|
||||||
.unwrap_or(u64::MAX)
|
let end = if let Some(to) = range.to {
|
||||||
|
f64_to_fastfield_u64(to, field_type)
|
||||||
|
.ok_or_else(|| TantivyError::InvalidArgument("invalid field type".to_string()))?
|
||||||
|
} else {
|
||||||
|
u64::MAX
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(start..end)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extends the provided buckets to contain the whole value range, by inserting buckets at the
|
/// Extends the provided buckets to contain the whole value range, by inserting buckets at the
|
||||||
@@ -239,7 +325,7 @@ fn extend_validate_ranges(
|
|||||||
let mut converted_buckets = buckets
|
let mut converted_buckets = buckets
|
||||||
.iter()
|
.iter()
|
||||||
.map(|range| to_u64_range(range, field_type))
|
.map(|range| to_u64_range(range, field_type))
|
||||||
.collect_vec();
|
.collect::<crate::Result<Vec<_>>>()?;
|
||||||
|
|
||||||
converted_buckets.sort_by_key(|bucket| bucket.start);
|
converted_buckets.sort_by_key(|bucket| bucket.start);
|
||||||
if converted_buckets[0].start != u64::MIN {
|
if converted_buckets[0].start != u64::MIN {
|
||||||
@@ -274,7 +360,7 @@ fn extend_validate_ranges(
|
|||||||
Ok(converted_buckets)
|
Ok(converted_buckets)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
pub(crate) fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
||||||
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
// is_start is there for malformed requests, e.g. ig the user passes the range u64::MIN..0.0,
|
||||||
// it should be rendererd as "*-0" and not "*-*"
|
// it should be rendererd as "*-0" and not "*-*"
|
||||||
let to_str = |val: u64, is_start: bool| {
|
let to_str = |val: u64, is_start: bool| {
|
||||||
@@ -288,7 +374,7 @@ pub fn range_to_string(range: &Range<u64>, field_type: &Type) -> String {
|
|||||||
format!("{}-{}", to_str(range.start, true), to_str(range.end, false))
|
format!("{}-{}", to_str(range.start, true), to_str(range.end, false))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
|
pub(crate) fn range_to_key(range: &Range<u64>, field_type: &Type) -> Key {
|
||||||
Key::Str(range_to_string(range, field_type))
|
Key::Str(range_to_string(range, field_type))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -315,7 +401,8 @@ mod tests {
|
|||||||
ranges,
|
ranges,
|
||||||
};
|
};
|
||||||
|
|
||||||
SegmentRangeCollector::from_req(&req, &Default::default(), field_type).unwrap()
|
SegmentRangeCollector::from_req_and_validate(&req, &Default::default(), field_type)
|
||||||
|
.expect("unexpected error")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -456,11 +543,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn range_binary_search_test_f64() {
|
fn range_binary_search_test_f64() {
|
||||||
let ranges = vec![
|
let ranges = vec![(10.0..100.0).into()];
|
||||||
//(f64::MIN..10.0).into(),
|
|
||||||
(10.0..100.0).into(),
|
|
||||||
//(100.0..f64::MAX).into(),
|
|
||||||
];
|
|
||||||
|
|
||||||
let collector = get_collector_from_ranges(ranges, Type::F64);
|
let collector = get_collector_from_ranges(ranges, Type::F64);
|
||||||
let search = |val: u64| collector.get_bucket_pos(val);
|
let search = |val: u64| collector.get_bucket_pos(val);
|
||||||
@@ -479,6 +562,7 @@ mod tests {
|
|||||||
#[cfg(all(test, feature = "unstable"))]
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
mod bench {
|
mod bench {
|
||||||
|
|
||||||
|
use itertools::Itertools;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use rand::thread_rng;
|
use rand::thread_rng;
|
||||||
|
|
||||||
|
|||||||
1320
src/aggregation/bucket/term_agg.rs
Normal file
@@ -3,9 +3,9 @@ use super::agg_req_with_accessor::AggregationsWithAccessor;
|
|||||||
use super::agg_result::AggregationResults;
|
use super::agg_result::AggregationResults;
|
||||||
use super::intermediate_agg_result::IntermediateAggregationResults;
|
use super::intermediate_agg_result::IntermediateAggregationResults;
|
||||||
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
use super::segment_agg_result::SegmentAggregationResultsCollector;
|
||||||
use crate::aggregation::agg_req_with_accessor::get_aggregations_with_accessor;
|
use crate::aggregation::agg_req_with_accessor::get_aggs_with_accessor_and_validate;
|
||||||
use crate::collector::{Collector, SegmentCollector};
|
use crate::collector::{Collector, SegmentCollector};
|
||||||
use crate::TantivyError;
|
use crate::SegmentReader;
|
||||||
|
|
||||||
/// Collector for aggregations.
|
/// Collector for aggregations.
|
||||||
///
|
///
|
||||||
@@ -50,12 +50,7 @@ impl Collector for DistributedAggregationCollector {
|
|||||||
_segment_local_id: crate::SegmentOrdinal,
|
_segment_local_id: crate::SegmentOrdinal,
|
||||||
reader: &crate::SegmentReader,
|
reader: &crate::SegmentReader,
|
||||||
) -> crate::Result<Self::Child> {
|
) -> crate::Result<Self::Child> {
|
||||||
let aggs_with_accessor = get_aggregations_with_accessor(&self.agg, reader)?;
|
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader)
|
||||||
let result = SegmentAggregationResultsCollector::from_req(&aggs_with_accessor)?;
|
|
||||||
Ok(AggregationSegmentCollector {
|
|
||||||
aggs: aggs_with_accessor,
|
|
||||||
result,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn requires_scoring(&self) -> bool {
|
fn requires_scoring(&self) -> bool {
|
||||||
@@ -80,12 +75,7 @@ impl Collector for AggregationCollector {
|
|||||||
_segment_local_id: crate::SegmentOrdinal,
|
_segment_local_id: crate::SegmentOrdinal,
|
||||||
reader: &crate::SegmentReader,
|
reader: &crate::SegmentReader,
|
||||||
) -> crate::Result<Self::Child> {
|
) -> crate::Result<Self::Child> {
|
||||||
let aggs_with_accessor = get_aggregations_with_accessor(&self.agg, reader)?;
|
AggregationSegmentCollector::from_agg_req_and_reader(&self.agg, reader)
|
||||||
let result = SegmentAggregationResultsCollector::from_req(&aggs_with_accessor)?;
|
|
||||||
Ok(AggregationSegmentCollector {
|
|
||||||
aggs: aggs_with_accessor,
|
|
||||||
result,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn requires_scoring(&self) -> bool {
|
fn requires_scoring(&self) -> bool {
|
||||||
@@ -96,40 +86,60 @@ impl Collector for AggregationCollector {
|
|||||||
&self,
|
&self,
|
||||||
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
|
||||||
) -> crate::Result<Self::Fruit> {
|
) -> crate::Result<Self::Fruit> {
|
||||||
merge_fruits(segment_fruits).map(|res| res.into())
|
let res = merge_fruits(segment_fruits)?;
|
||||||
|
AggregationResults::from_intermediate_and_req(res, self.agg.clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_fruits(
|
fn merge_fruits(
|
||||||
mut segment_fruits: Vec<IntermediateAggregationResults>,
|
mut segment_fruits: Vec<crate::Result<IntermediateAggregationResults>>,
|
||||||
) -> crate::Result<IntermediateAggregationResults> {
|
) -> crate::Result<IntermediateAggregationResults> {
|
||||||
if let Some(mut fruit) = segment_fruits.pop() {
|
if let Some(fruit) = segment_fruits.pop() {
|
||||||
|
let mut fruit = fruit?;
|
||||||
for next_fruit in segment_fruits {
|
for next_fruit in segment_fruits {
|
||||||
fruit.merge_fruits(&next_fruit);
|
fruit.merge_fruits(next_fruit?);
|
||||||
}
|
}
|
||||||
Ok(fruit)
|
Ok(fruit)
|
||||||
} else {
|
} else {
|
||||||
Err(TantivyError::InvalidArgument(
|
Ok(IntermediateAggregationResults::default())
|
||||||
"no fruits provided in merge_fruits".to_string(),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// AggregationSegmentCollector does the aggregation collection on a segment.
|
||||||
pub struct AggregationSegmentCollector {
|
pub struct AggregationSegmentCollector {
|
||||||
aggs: AggregationsWithAccessor,
|
aggs_with_accessor: AggregationsWithAccessor,
|
||||||
result: SegmentAggregationResultsCollector,
|
result: SegmentAggregationResultsCollector,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AggregationSegmentCollector {
|
||||||
|
/// Creates an AggregationSegmentCollector from an [Aggregations] request and a segment reader.
|
||||||
|
/// Also includes validation, e.g. checking field types and existence.
|
||||||
|
pub fn from_agg_req_and_reader(
|
||||||
|
agg: &Aggregations,
|
||||||
|
reader: &SegmentReader,
|
||||||
|
) -> crate::Result<Self> {
|
||||||
|
let aggs_with_accessor = get_aggs_with_accessor_and_validate(agg, reader)?;
|
||||||
|
let result =
|
||||||
|
SegmentAggregationResultsCollector::from_req_and_validate(&aggs_with_accessor)?;
|
||||||
|
Ok(AggregationSegmentCollector {
|
||||||
|
aggs_with_accessor,
|
||||||
|
result,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SegmentCollector for AggregationSegmentCollector {
|
impl SegmentCollector for AggregationSegmentCollector {
|
||||||
type Fruit = IntermediateAggregationResults;
|
type Fruit = crate::Result<IntermediateAggregationResults>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn collect(&mut self, doc: crate::DocId, _score: crate::Score) {
|
fn collect(&mut self, doc: crate::DocId, _score: crate::Score) {
|
||||||
self.result.collect(doc, &self.aggs);
|
self.result.collect(doc, &self.aggs_with_accessor);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn harvest(mut self) -> Self::Fruit {
|
fn harvest(mut self) -> Self::Fruit {
|
||||||
self.result.flush_staged_docs(&self.aggs, true);
|
self.result
|
||||||
self.result.into()
|
.flush_staged_docs(&self.aggs_with_accessor, true);
|
||||||
|
self.result
|
||||||
|
.into_intermediate_aggregations_result(&self.aggs_with_accessor)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,43 +2,90 @@
|
|||||||
//! Intermediate aggregation results can be used to merge results between segments or between
|
//! Intermediate aggregation results can be used to merge results between segments or between
|
||||||
//! indices.
|
//! indices.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
|
use fnv::FnvHashMap;
|
||||||
|
use itertools::Itertools;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::metric::{IntermediateAverage, IntermediateStats};
|
use super::agg_req::{AggregationsInternal, BucketAggregationType, MetricAggregation};
|
||||||
use super::segment_agg_result::{
|
use super::agg_result::BucketResult;
|
||||||
SegmentAggregationResultsCollector, SegmentBucketResultCollector, SegmentMetricResultCollector,
|
use super::bucket::{
|
||||||
SegmentRangeBucketEntry,
|
cut_off_buckets, get_agg_name_and_property, GetDocCount, Order, OrderTarget,
|
||||||
|
SegmentHistogramBucketEntry, TermsAggregation,
|
||||||
};
|
};
|
||||||
use super::{Key, VecWithNames};
|
use super::metric::{IntermediateAverage, IntermediateStats};
|
||||||
|
use super::segment_agg_result::SegmentMetricResultCollector;
|
||||||
|
use super::{Key, SerializedKey, VecWithNames};
|
||||||
|
use crate::aggregation::agg_result::{AggregationResults, BucketEntry};
|
||||||
|
use crate::aggregation::bucket::TermsAggregationInternal;
|
||||||
|
|
||||||
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
/// Contains the intermediate aggregation result, which is optimized to be merged with other
|
||||||
/// intermediate results.
|
/// intermediate results.
|
||||||
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct IntermediateAggregationResults(pub(crate) VecWithNames<IntermediateAggregationResult>);
|
pub struct IntermediateAggregationResults {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
impl From<SegmentAggregationResultsCollector> for IntermediateAggregationResults {
|
pub(crate) metrics: Option<VecWithNames<IntermediateMetricResult>>,
|
||||||
fn from(tree: SegmentAggregationResultsCollector) -> Self {
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
let mut data = vec![];
|
pub(crate) buckets: Option<VecWithNames<IntermediateBucketResult>>,
|
||||||
for (key, bucket) in tree.buckets.into_iter() {
|
|
||||||
data.push((key, IntermediateAggregationResult::Bucket(bucket.into())));
|
|
||||||
}
|
|
||||||
for (key, metric) in tree.metrics.into_iter() {
|
|
||||||
data.push((key, IntermediateAggregationResult::Metric(metric.into())));
|
|
||||||
}
|
|
||||||
Self(VecWithNames::from_entries(data))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntermediateAggregationResults {
|
impl IntermediateAggregationResults {
|
||||||
|
pub(crate) fn empty_from_req(req: &AggregationsInternal) -> Self {
|
||||||
|
let metrics = if req.metrics.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let metrics = req
|
||||||
|
.metrics
|
||||||
|
.iter()
|
||||||
|
.map(|(key, req)| {
|
||||||
|
(
|
||||||
|
key.to_string(),
|
||||||
|
IntermediateMetricResult::empty_from_req(req),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
Some(VecWithNames::from_entries(metrics))
|
||||||
|
};
|
||||||
|
|
||||||
|
let buckets = if req.buckets.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let buckets = req
|
||||||
|
.buckets
|
||||||
|
.iter()
|
||||||
|
.map(|(key, req)| {
|
||||||
|
(
|
||||||
|
key.to_string(),
|
||||||
|
IntermediateBucketResult::empty_from_req(&req.bucket_agg),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
Some(VecWithNames::from_entries(buckets))
|
||||||
|
};
|
||||||
|
|
||||||
|
Self { metrics, buckets }
|
||||||
|
}
|
||||||
|
|
||||||
/// Merge an other intermediate aggregation result into this result.
|
/// Merge an other intermediate aggregation result into this result.
|
||||||
///
|
///
|
||||||
/// The order of the values need to be the same on both results. This is ensured when the same
|
/// The order of the values need to be the same on both results. This is ensured when the same
|
||||||
/// (key values) are present on the underlying VecWithNames struct.
|
/// (key values) are present on the underlying VecWithNames struct.
|
||||||
pub fn merge_fruits(&mut self, other: &IntermediateAggregationResults) {
|
pub fn merge_fruits(&mut self, other: IntermediateAggregationResults) {
|
||||||
for (tree_left, tree_right) in self.0.values_mut().zip(other.0.values()) {
|
if let (Some(buckets_left), Some(buckets_right)) = (&mut self.buckets, other.buckets) {
|
||||||
tree_left.merge_fruits(tree_right);
|
for (bucket_left, bucket_right) in
|
||||||
|
buckets_left.values_mut().zip(buckets_right.into_values())
|
||||||
|
{
|
||||||
|
bucket_left.merge_fruits(bucket_right);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let (Some(metrics_left), Some(metrics_right)) = (&mut self.metrics, other.metrics) {
|
||||||
|
for (metric_left, metric_right) in
|
||||||
|
metrics_left.values_mut().zip(metrics_right.into_values())
|
||||||
|
{
|
||||||
|
metric_left.merge_fruits(metric_right);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -52,28 +99,6 @@ pub enum IntermediateAggregationResult {
|
|||||||
Metric(IntermediateMetricResult),
|
Metric(IntermediateMetricResult),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntermediateAggregationResult {
|
|
||||||
fn merge_fruits(&mut self, other: &IntermediateAggregationResult) {
|
|
||||||
match (self, other) {
|
|
||||||
(
|
|
||||||
IntermediateAggregationResult::Bucket(res_left),
|
|
||||||
IntermediateAggregationResult::Bucket(res_right),
|
|
||||||
) => {
|
|
||||||
res_left.merge_fruits(res_right);
|
|
||||||
}
|
|
||||||
(
|
|
||||||
IntermediateAggregationResult::Metric(res_left),
|
|
||||||
IntermediateAggregationResult::Metric(res_right),
|
|
||||||
) => {
|
|
||||||
res_left.merge_fruits(res_right);
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
panic!("incompatible types in aggregation tree on merge fruits");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Holds the intermediate data for metric results
|
/// Holds the intermediate data for metric results
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub enum IntermediateMetricResult {
|
pub enum IntermediateMetricResult {
|
||||||
@@ -97,7 +122,17 @@ impl From<SegmentMetricResultCollector> for IntermediateMetricResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl IntermediateMetricResult {
|
impl IntermediateMetricResult {
|
||||||
fn merge_fruits(&mut self, other: &IntermediateMetricResult) {
|
pub(crate) fn empty_from_req(req: &MetricAggregation) -> Self {
|
||||||
|
match req {
|
||||||
|
MetricAggregation::Average(_) => {
|
||||||
|
IntermediateMetricResult::Average(IntermediateAverage::default())
|
||||||
|
}
|
||||||
|
MetricAggregation::Stats(_) => {
|
||||||
|
IntermediateMetricResult::Stats(IntermediateStats::default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn merge_fruits(&mut self, other: IntermediateMetricResult) {
|
||||||
match (self, other) {
|
match (self, other) {
|
||||||
(
|
(
|
||||||
IntermediateMetricResult::Average(avg_data_left),
|
IntermediateMetricResult::Average(avg_data_left),
|
||||||
@@ -112,7 +147,7 @@ impl IntermediateMetricResult {
|
|||||||
stats_left.merge_fruits(stats_right);
|
stats_left.merge_fruits(stats_right);
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
panic!("incompatible fruit types in tree {:?}", other);
|
panic!("incompatible fruit types in tree");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -124,36 +159,227 @@ impl IntermediateMetricResult {
|
|||||||
pub enum IntermediateBucketResult {
|
pub enum IntermediateBucketResult {
|
||||||
/// This is the range entry for a bucket, which contains a key, count, from, to, and optionally
|
/// This is the range entry for a bucket, which contains a key, count, from, to, and optionally
|
||||||
/// sub_aggregations.
|
/// sub_aggregations.
|
||||||
Range(HashMap<Key, IntermediateRangeBucketEntry>),
|
Range(IntermediateRangeBucketResult),
|
||||||
|
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||||
|
/// sub_aggregations.
|
||||||
|
Histogram {
|
||||||
|
/// The buckets
|
||||||
|
buckets: Vec<IntermediateHistogramBucketEntry>,
|
||||||
|
},
|
||||||
|
/// Term aggregation
|
||||||
|
Terms(IntermediateTermBucketResult),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<SegmentBucketResultCollector> for IntermediateBucketResult {
|
impl IntermediateBucketResult {
|
||||||
fn from(collector: SegmentBucketResultCollector) -> Self {
|
pub(crate) fn empty_from_req(req: &BucketAggregationType) -> Self {
|
||||||
match collector {
|
match req {
|
||||||
SegmentBucketResultCollector::Range(range) => range.into_intermediate_bucket_result(),
|
BucketAggregationType::Terms(_) => IntermediateBucketResult::Terms(Default::default()),
|
||||||
|
BucketAggregationType::Range(_) => IntermediateBucketResult::Range(Default::default()),
|
||||||
|
BucketAggregationType::Histogram(_) => {
|
||||||
|
IntermediateBucketResult::Histogram { buckets: vec![] }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn merge_fruits(&mut self, other: IntermediateBucketResult) {
|
||||||
|
match (self, other) {
|
||||||
|
(
|
||||||
|
IntermediateBucketResult::Terms(term_res_left),
|
||||||
|
IntermediateBucketResult::Terms(term_res_right),
|
||||||
|
) => {
|
||||||
|
merge_maps(&mut term_res_left.entries, term_res_right.entries);
|
||||||
|
term_res_left.sum_other_doc_count += term_res_right.sum_other_doc_count;
|
||||||
|
term_res_left.doc_count_error_upper_bound +=
|
||||||
|
term_res_right.doc_count_error_upper_bound;
|
||||||
|
}
|
||||||
|
|
||||||
|
(
|
||||||
|
IntermediateBucketResult::Range(range_res_left),
|
||||||
|
IntermediateBucketResult::Range(range_res_right),
|
||||||
|
) => {
|
||||||
|
merge_maps(&mut range_res_left.buckets, range_res_right.buckets);
|
||||||
|
}
|
||||||
|
(
|
||||||
|
IntermediateBucketResult::Histogram {
|
||||||
|
buckets: buckets_left,
|
||||||
|
..
|
||||||
|
},
|
||||||
|
IntermediateBucketResult::Histogram {
|
||||||
|
buckets: buckets_right,
|
||||||
|
..
|
||||||
|
},
|
||||||
|
) => {
|
||||||
|
let buckets = buckets_left
|
||||||
|
.drain(..)
|
||||||
|
.merge_join_by(buckets_right.into_iter(), |left, right| {
|
||||||
|
left.key.partial_cmp(&right.key).unwrap_or(Ordering::Equal)
|
||||||
|
})
|
||||||
|
.map(|either| match either {
|
||||||
|
itertools::EitherOrBoth::Both(mut left, right) => {
|
||||||
|
left.merge_fruits(right);
|
||||||
|
left
|
||||||
|
}
|
||||||
|
itertools::EitherOrBoth::Left(left) => left,
|
||||||
|
itertools::EitherOrBoth::Right(right) => right,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
*buckets_left = buckets;
|
||||||
|
}
|
||||||
|
(IntermediateBucketResult::Range(_), _) => {
|
||||||
|
panic!("try merge on different types")
|
||||||
|
}
|
||||||
|
(IntermediateBucketResult::Histogram { .. }, _) => {
|
||||||
|
panic!("try merge on different types")
|
||||||
|
}
|
||||||
|
(IntermediateBucketResult::Terms { .. }, _) => {
|
||||||
|
panic!("try merge on different types")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntermediateBucketResult {
|
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
fn merge_fruits(&mut self, other: &IntermediateBucketResult) {
|
/// Range aggregation including error counts
|
||||||
match (self, other) {
|
pub struct IntermediateRangeBucketResult {
|
||||||
(
|
pub(crate) buckets: FnvHashMap<SerializedKey, IntermediateRangeBucketEntry>,
|
||||||
IntermediateBucketResult::Range(entries_left),
|
}
|
||||||
IntermediateBucketResult::Range(entries_right),
|
|
||||||
) => {
|
|
||||||
for (name, entry_left) in entries_left.iter_mut() {
|
|
||||||
if let Some(entry_right) = entries_right.get(name) {
|
|
||||||
entry_left.merge_fruits(entry_right);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (key, res) in entries_right.iter() {
|
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
if !entries_left.contains_key(key) {
|
/// Term aggregation including error counts
|
||||||
entries_left.insert(key.clone(), res.clone());
|
pub struct IntermediateTermBucketResult {
|
||||||
|
pub(crate) entries: FnvHashMap<String, IntermediateTermBucketEntry>,
|
||||||
|
pub(crate) sum_other_doc_count: u64,
|
||||||
|
pub(crate) doc_count_error_upper_bound: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntermediateTermBucketResult {
|
||||||
|
pub(crate) fn into_final_result(
|
||||||
|
self,
|
||||||
|
req: &TermsAggregation,
|
||||||
|
sub_aggregation_req: &AggregationsInternal,
|
||||||
|
) -> crate::Result<BucketResult> {
|
||||||
|
let req = TermsAggregationInternal::from_req(req);
|
||||||
|
let mut buckets: Vec<BucketEntry> = self
|
||||||
|
.entries
|
||||||
|
.into_iter()
|
||||||
|
.filter(|bucket| bucket.1.doc_count >= req.min_doc_count)
|
||||||
|
.map(|(key, entry)| {
|
||||||
|
Ok(BucketEntry {
|
||||||
|
key: Key::Str(key),
|
||||||
|
doc_count: entry.doc_count,
|
||||||
|
sub_aggregation: AggregationResults::from_intermediate_and_req_internal(
|
||||||
|
entry.sub_aggregation,
|
||||||
|
sub_aggregation_req,
|
||||||
|
)?,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<crate::Result<_>>()?;
|
||||||
|
|
||||||
|
let order = req.order.order;
|
||||||
|
match req.order.target {
|
||||||
|
OrderTarget::Key => {
|
||||||
|
buckets.sort_by(|left, right| {
|
||||||
|
if req.order.order == Order::Desc {
|
||||||
|
left.key.partial_cmp(&right.key)
|
||||||
|
} else {
|
||||||
|
right.key.partial_cmp(&left.key)
|
||||||
}
|
}
|
||||||
|
.expect("expected type string, which is always sortable")
|
||||||
|
});
|
||||||
|
}
|
||||||
|
OrderTarget::Count => {
|
||||||
|
if req.order.order == Order::Desc {
|
||||||
|
buckets.sort_unstable_by_key(|bucket| std::cmp::Reverse(bucket.doc_count()));
|
||||||
|
} else {
|
||||||
|
buckets.sort_unstable_by_key(|bucket| bucket.doc_count());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
OrderTarget::SubAggregation(name) => {
|
||||||
|
let (agg_name, agg_property) = get_agg_name_and_property(&name);
|
||||||
|
let mut buckets_with_val = buckets
|
||||||
|
.into_iter()
|
||||||
|
.map(|bucket| {
|
||||||
|
let val = bucket
|
||||||
|
.sub_aggregation
|
||||||
|
.get_value_from_aggregation(agg_name, agg_property)?
|
||||||
|
.unwrap_or(f64::NAN);
|
||||||
|
Ok((bucket, val))
|
||||||
|
})
|
||||||
|
.collect::<crate::Result<Vec<_>>>()?;
|
||||||
|
|
||||||
|
buckets_with_val.sort_by(|(_, val1), (_, val2)| {
|
||||||
|
// TODO use total_cmp in next rust stable release
|
||||||
|
match &order {
|
||||||
|
Order::Desc => val2.partial_cmp(val1).unwrap_or(std::cmp::Ordering::Equal),
|
||||||
|
Order::Asc => val1.partial_cmp(val2).unwrap_or(std::cmp::Ordering::Equal),
|
||||||
|
}
|
||||||
|
});
|
||||||
|
buckets = buckets_with_val
|
||||||
|
.into_iter()
|
||||||
|
.map(|(bucket, _val)| bucket)
|
||||||
|
.collect_vec();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We ignore _term_doc_count_before_cutoff here, because it increases the upperbound error
|
||||||
|
// only for terms that didn't make it into the top N.
|
||||||
|
//
|
||||||
|
// This can be interesting, as a value of quality of the results, but not good to check the
|
||||||
|
// actual error count for the returned terms.
|
||||||
|
let (_term_doc_count_before_cutoff, sum_other_doc_count) =
|
||||||
|
cut_off_buckets(&mut buckets, req.size as usize);
|
||||||
|
|
||||||
|
let doc_count_error_upper_bound = if req.show_term_doc_count_error {
|
||||||
|
Some(self.doc_count_error_upper_bound)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(BucketResult::Terms {
|
||||||
|
buckets,
|
||||||
|
sum_other_doc_count: self.sum_other_doc_count + sum_other_doc_count,
|
||||||
|
doc_count_error_upper_bound,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trait MergeFruits {
|
||||||
|
fn merge_fruits(&mut self, other: Self);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_maps<V: MergeFruits + Clone>(
|
||||||
|
entries_left: &mut FnvHashMap<SerializedKey, V>,
|
||||||
|
mut entries_right: FnvHashMap<SerializedKey, V>,
|
||||||
|
) {
|
||||||
|
for (name, entry_left) in entries_left.iter_mut() {
|
||||||
|
if let Some(entry_right) = entries_right.remove(name) {
|
||||||
|
entry_left.merge_fruits(entry_right);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, res) in entries_right.into_iter() {
|
||||||
|
entries_left.entry(key).or_insert(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is the histogram entry for a bucket, which contains a key, count, and optionally
|
||||||
|
/// sub_aggregations.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct IntermediateHistogramBucketEntry {
|
||||||
|
/// The unique the bucket is identified.
|
||||||
|
pub key: f64,
|
||||||
|
/// The number of documents in the bucket.
|
||||||
|
pub doc_count: u64,
|
||||||
|
/// The sub_aggregation in this bucket.
|
||||||
|
pub sub_aggregation: IntermediateAggregationResults,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<SegmentHistogramBucketEntry> for IntermediateHistogramBucketEntry {
|
||||||
|
fn from(entry: SegmentHistogramBucketEntry) -> Self {
|
||||||
|
IntermediateHistogramBucketEntry {
|
||||||
|
key: entry.key,
|
||||||
|
doc_count: entry.doc_count,
|
||||||
|
sub_aggregation: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -166,7 +392,6 @@ pub struct IntermediateRangeBucketEntry {
|
|||||||
pub key: Key,
|
pub key: Key,
|
||||||
/// The number of documents in the bucket.
|
/// The number of documents in the bucket.
|
||||||
pub doc_count: u64,
|
pub doc_count: u64,
|
||||||
pub(crate) values: Option<Vec<u64>>,
|
|
||||||
/// The sub_aggregation in this bucket.
|
/// The sub_aggregation in this bucket.
|
||||||
pub sub_aggregation: IntermediateAggregationResults,
|
pub sub_aggregation: IntermediateAggregationResults,
|
||||||
/// The from range of the bucket. Equals f64::MIN when None.
|
/// The from range of the bucket. Equals f64::MIN when None.
|
||||||
@@ -177,49 +402,54 @@ pub struct IntermediateRangeBucketEntry {
|
|||||||
pub to: Option<f64>,
|
pub to: Option<f64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<SegmentRangeBucketEntry> for IntermediateRangeBucketEntry {
|
/// This is the term entry for a bucket, which contains a count, and optionally
|
||||||
fn from(entry: SegmentRangeBucketEntry) -> Self {
|
/// sub_aggregations.
|
||||||
let sub_aggregation = if let Some(sub_aggregation) = entry.sub_aggregation {
|
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
sub_aggregation.into()
|
pub struct IntermediateTermBucketEntry {
|
||||||
} else {
|
/// The number of documents in the bucket.
|
||||||
Default::default()
|
pub doc_count: u64,
|
||||||
};
|
/// The sub_aggregation in this bucket.
|
||||||
// let sub_aggregation = entry.sub_aggregation.into();
|
pub sub_aggregation: IntermediateAggregationResults,
|
||||||
|
}
|
||||||
|
|
||||||
IntermediateRangeBucketEntry {
|
impl MergeFruits for IntermediateTermBucketEntry {
|
||||||
key: entry.key,
|
fn merge_fruits(&mut self, other: IntermediateTermBucketEntry) {
|
||||||
doc_count: entry.doc_count,
|
self.doc_count += other.doc_count;
|
||||||
values: None,
|
self.sub_aggregation.merge_fruits(other.sub_aggregation);
|
||||||
sub_aggregation,
|
|
||||||
to: entry.to,
|
|
||||||
from: entry.from,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntermediateRangeBucketEntry {
|
impl MergeFruits for IntermediateRangeBucketEntry {
|
||||||
fn merge_fruits(&mut self, other: &IntermediateRangeBucketEntry) {
|
fn merge_fruits(&mut self, other: IntermediateRangeBucketEntry) {
|
||||||
self.doc_count += other.doc_count;
|
self.doc_count += other.doc_count;
|
||||||
self.sub_aggregation.merge_fruits(&other.sub_aggregation);
|
self.sub_aggregation.merge_fruits(other.sub_aggregation);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MergeFruits for IntermediateHistogramBucketEntry {
|
||||||
|
fn merge_fruits(&mut self, other: IntermediateHistogramBucketEntry) {
|
||||||
|
self.doc_count += other.doc_count;
|
||||||
|
self.sub_aggregation.merge_fruits(other.sub_aggregation);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
fn get_sub_test_tree(data: &[(String, u64)]) -> IntermediateAggregationResults {
|
fn get_sub_test_tree(data: &[(String, u64)]) -> IntermediateAggregationResults {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
let mut buckets = HashMap::new();
|
let mut buckets = FnvHashMap::default();
|
||||||
for (key, doc_count) in data {
|
for (key, doc_count) in data {
|
||||||
buckets.insert(
|
buckets.insert(
|
||||||
Key::Str(key.to_string()),
|
key.to_string(),
|
||||||
IntermediateRangeBucketEntry {
|
IntermediateRangeBucketEntry {
|
||||||
key: Key::Str(key.to_string()),
|
key: Key::Str(key.to_string()),
|
||||||
doc_count: *doc_count,
|
doc_count: *doc_count,
|
||||||
values: None,
|
|
||||||
sub_aggregation: Default::default(),
|
sub_aggregation: Default::default(),
|
||||||
from: None,
|
from: None,
|
||||||
to: None,
|
to: None,
|
||||||
@@ -228,21 +458,25 @@ mod tests {
|
|||||||
}
|
}
|
||||||
map.insert(
|
map.insert(
|
||||||
"my_agg_level2".to_string(),
|
"my_agg_level2".to_string(),
|
||||||
IntermediateAggregationResult::Bucket(IntermediateBucketResult::Range(buckets)),
|
IntermediateBucketResult::Range(IntermediateRangeBucketResult { buckets }),
|
||||||
);
|
);
|
||||||
IntermediateAggregationResults(VecWithNames::from_entries(map.into_iter().collect()))
|
IntermediateAggregationResults {
|
||||||
|
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
|
||||||
|
metrics: Default::default(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_test_tree(data: &[(String, u64, String, u64)]) -> IntermediateAggregationResults {
|
fn get_intermediat_tree_with_ranges(
|
||||||
|
data: &[(String, u64, String, u64)],
|
||||||
|
) -> IntermediateAggregationResults {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
let mut buckets = HashMap::new();
|
let mut buckets: FnvHashMap<_, _> = Default::default();
|
||||||
for (key, doc_count, sub_aggregation_key, sub_aggregation_count) in data {
|
for (key, doc_count, sub_aggregation_key, sub_aggregation_count) in data {
|
||||||
buckets.insert(
|
buckets.insert(
|
||||||
Key::Str(key.to_string()),
|
key.to_string(),
|
||||||
IntermediateRangeBucketEntry {
|
IntermediateRangeBucketEntry {
|
||||||
key: Key::Str(key.to_string()),
|
key: Key::Str(key.to_string()),
|
||||||
doc_count: *doc_count,
|
doc_count: *doc_count,
|
||||||
values: None,
|
|
||||||
from: None,
|
from: None,
|
||||||
to: None,
|
to: None,
|
||||||
sub_aggregation: get_sub_test_tree(&[(
|
sub_aggregation: get_sub_test_tree(&[(
|
||||||
@@ -254,25 +488,28 @@ mod tests {
|
|||||||
}
|
}
|
||||||
map.insert(
|
map.insert(
|
||||||
"my_agg_level1".to_string(),
|
"my_agg_level1".to_string(),
|
||||||
IntermediateAggregationResult::Bucket(IntermediateBucketResult::Range(buckets)),
|
IntermediateBucketResult::Range(IntermediateRangeBucketResult { buckets }),
|
||||||
);
|
);
|
||||||
IntermediateAggregationResults(VecWithNames::from_entries(map.into_iter().collect()))
|
IntermediateAggregationResults {
|
||||||
|
buckets: Some(VecWithNames::from_entries(map.into_iter().collect())),
|
||||||
|
metrics: Default::default(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_merge_fruits_tree_1() {
|
fn test_merge_fruits_tree_1() {
|
||||||
let mut tree_left = get_test_tree(&[
|
let mut tree_left = get_intermediat_tree_with_ranges(&[
|
||||||
("red".to_string(), 50, "1900".to_string(), 25),
|
("red".to_string(), 50, "1900".to_string(), 25),
|
||||||
("blue".to_string(), 30, "1900".to_string(), 30),
|
("blue".to_string(), 30, "1900".to_string(), 30),
|
||||||
]);
|
]);
|
||||||
let tree_right = get_test_tree(&[
|
let tree_right = get_intermediat_tree_with_ranges(&[
|
||||||
("red".to_string(), 60, "1900".to_string(), 30),
|
("red".to_string(), 60, "1900".to_string(), 30),
|
||||||
("blue".to_string(), 25, "1900".to_string(), 50),
|
("blue".to_string(), 25, "1900".to_string(), 50),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
tree_left.merge_fruits(&tree_right);
|
tree_left.merge_fruits(tree_right);
|
||||||
|
|
||||||
let tree_expected = get_test_tree(&[
|
let tree_expected = get_intermediat_tree_with_ranges(&[
|
||||||
("red".to_string(), 110, "1900".to_string(), 55),
|
("red".to_string(), 110, "1900".to_string(), 55),
|
||||||
("blue".to_string(), 55, "1900".to_string(), 80),
|
("blue".to_string(), 55, "1900".to_string(), 80),
|
||||||
]);
|
]);
|
||||||
@@ -282,18 +519,18 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_merge_fruits_tree_2() {
|
fn test_merge_fruits_tree_2() {
|
||||||
let mut tree_left = get_test_tree(&[
|
let mut tree_left = get_intermediat_tree_with_ranges(&[
|
||||||
("red".to_string(), 50, "1900".to_string(), 25),
|
("red".to_string(), 50, "1900".to_string(), 25),
|
||||||
("blue".to_string(), 30, "1900".to_string(), 30),
|
("blue".to_string(), 30, "1900".to_string(), 30),
|
||||||
]);
|
]);
|
||||||
let tree_right = get_test_tree(&[
|
let tree_right = get_intermediat_tree_with_ranges(&[
|
||||||
("red".to_string(), 60, "1900".to_string(), 30),
|
("red".to_string(), 60, "1900".to_string(), 30),
|
||||||
("green".to_string(), 25, "1900".to_string(), 50),
|
("green".to_string(), 25, "1900".to_string(), 50),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
tree_left.merge_fruits(&tree_right);
|
tree_left.merge_fruits(tree_right);
|
||||||
|
|
||||||
let tree_expected = get_test_tree(&[
|
let tree_expected = get_intermediat_tree_with_ranges(&[
|
||||||
("red".to_string(), 110, "1900".to_string(), 55),
|
("red".to_string(), 110, "1900".to_string(), 55),
|
||||||
("blue".to_string(), 30, "1900".to_string(), 30),
|
("blue".to_string(), 30, "1900".to_string(), 30),
|
||||||
("green".to_string(), 25, "1900".to_string(), 50),
|
("green".to_string(), 25, "1900".to_string(), 50),
|
||||||
@@ -301,4 +538,18 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(tree_left, tree_expected);
|
assert_eq!(tree_left, tree_expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_merge_fruits_tree_empty() {
|
||||||
|
let mut tree_left = get_intermediat_tree_with_ranges(&[
|
||||||
|
("red".to_string(), 50, "1900".to_string(), 25),
|
||||||
|
("blue".to_string(), 30, "1900".to_string(), 30),
|
||||||
|
]);
|
||||||
|
|
||||||
|
let orig = tree_left.clone();
|
||||||
|
|
||||||
|
tree_left.merge_fruits(IntermediateAggregationResults::default());
|
||||||
|
|
||||||
|
assert_eq!(tree_left, orig);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,15 @@ use crate::DocId;
|
|||||||
/// extracted from the aggregated documents.
|
/// extracted from the aggregated documents.
|
||||||
/// Supported field types are u64, i64, and f64.
|
/// Supported field types are u64, i64, and f64.
|
||||||
/// See [super::SingleMetricResult] for return value.
|
/// See [super::SingleMetricResult] for return value.
|
||||||
|
///
|
||||||
|
/// # JSON Format
|
||||||
|
/// ```json
|
||||||
|
/// {
|
||||||
|
/// "avg": {
|
||||||
|
/// "field": "score",
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
pub struct AverageAggregation {
|
pub struct AverageAggregation {
|
||||||
/// The field name to compute the stats on.
|
/// The field name to compute the stats on.
|
||||||
pub field: String,
|
pub field: String,
|
||||||
@@ -85,13 +94,17 @@ impl IntermediateAverage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Merge average data into this instance.
|
/// Merge average data into this instance.
|
||||||
pub fn merge_fruits(&mut self, other: &IntermediateAverage) {
|
pub fn merge_fruits(&mut self, other: IntermediateAverage) {
|
||||||
self.sum += other.sum;
|
self.sum += other.sum;
|
||||||
self.doc_count += other.doc_count;
|
self.doc_count += other.doc_count;
|
||||||
}
|
}
|
||||||
/// compute final result
|
/// compute final result
|
||||||
pub fn finalize(&self) -> f64 {
|
pub fn finalize(&self) -> Option<f64> {
|
||||||
self.sum / self.doc_count as f64
|
if self.doc_count == 0 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(self.sum / self.doc_count as f64)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#[inline]
|
#[inline]
|
||||||
fn collect(&mut self, val: f64) {
|
fn collect(&mut self, val: f64) {
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
//! Module for all metric aggregations.
|
//! Module for all metric aggregations.
|
||||||
|
//!
|
||||||
|
//! The aggregations in this family compute metrics, see [super::agg_req::MetricAggregation] for
|
||||||
|
//! details.
|
||||||
mod average;
|
mod average;
|
||||||
mod stats;
|
mod stats;
|
||||||
pub use average::*;
|
pub use average::*;
|
||||||
@@ -12,11 +14,17 @@ pub use stats::*;
|
|||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct SingleMetricResult {
|
pub struct SingleMetricResult {
|
||||||
/// The value of the single value metric.
|
/// The value of the single value metric.
|
||||||
pub value: f64,
|
pub value: Option<f64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<f64> for SingleMetricResult {
|
impl From<f64> for SingleMetricResult {
|
||||||
fn from(value: f64) -> Self {
|
fn from(value: f64) -> Self {
|
||||||
|
Self { value: Some(value) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Option<f64>> for SingleMetricResult {
|
||||||
|
fn from(value: Option<f64>) -> Self {
|
||||||
Self { value }
|
Self { value }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,17 +3,28 @@ use serde::{Deserialize, Serialize};
|
|||||||
use crate::aggregation::f64_from_fastfield_u64;
|
use crate::aggregation::f64_from_fastfield_u64;
|
||||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||||
use crate::schema::Type;
|
use crate::schema::Type;
|
||||||
use crate::DocId;
|
use crate::{DocId, TantivyError};
|
||||||
|
|
||||||
/// A multi-value metric aggregation that computes stats of numeric values that are
|
/// A multi-value metric aggregation that computes stats of numeric values that are
|
||||||
/// extracted from the aggregated documents.
|
/// extracted from the aggregated documents.
|
||||||
/// Supported field types are u64, i64, and f64.
|
/// Supported field types are u64, i64, and f64.
|
||||||
/// See [Stats] for returned statistics.
|
/// See [Stats] for returned statistics.
|
||||||
|
///
|
||||||
|
/// # JSON Format
|
||||||
|
/// ```json
|
||||||
|
/// {
|
||||||
|
/// "stats": {
|
||||||
|
/// "field": "score",
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct StatsAggregation {
|
pub struct StatsAggregation {
|
||||||
/// The field name to compute the stats on.
|
/// The field name to compute the stats on.
|
||||||
pub field: String,
|
pub field: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StatsAggregation {
|
impl StatsAggregation {
|
||||||
/// Create new StatsAggregation from a field.
|
/// Create new StatsAggregation from a field.
|
||||||
pub fn from_field_name(field_name: String) -> Self {
|
pub fn from_field_name(field_name: String) -> Self {
|
||||||
@@ -32,14 +43,31 @@ pub struct Stats {
|
|||||||
pub count: usize,
|
pub count: usize,
|
||||||
/// The sum of the fast field values.
|
/// The sum of the fast field values.
|
||||||
pub sum: f64,
|
pub sum: f64,
|
||||||
/// The standard deviation of the fast field values.
|
/// The standard deviation of the fast field values. None for count == 0.
|
||||||
pub standard_deviation: f64,
|
pub standard_deviation: Option<f64>,
|
||||||
/// The min value of the fast field values.
|
/// The min value of the fast field values.
|
||||||
pub min: f64,
|
pub min: Option<f64>,
|
||||||
/// The max value of the fast field values.
|
/// The max value of the fast field values.
|
||||||
pub max: f64,
|
pub max: Option<f64>,
|
||||||
/// The average of the values.
|
/// The average of the values. None for count == 0.
|
||||||
pub avg: f64,
|
pub avg: Option<f64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Stats {
|
||||||
|
pub(crate) fn get_value(&self, agg_property: &str) -> crate::Result<Option<f64>> {
|
||||||
|
match agg_property {
|
||||||
|
"count" => Ok(Some(self.count as f64)),
|
||||||
|
"sum" => Ok(Some(self.sum)),
|
||||||
|
"standard_deviation" => Ok(self.standard_deviation),
|
||||||
|
"min" => Ok(self.min),
|
||||||
|
"max" => Ok(self.max),
|
||||||
|
"avg" => Ok(self.avg),
|
||||||
|
_ => Err(TantivyError::InvalidArgument(format!(
|
||||||
|
"unknown property {} on stats metric aggregation",
|
||||||
|
agg_property
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// IntermediateStats contains the mergeable version for stats.
|
/// IntermediateStats contains the mergeable version for stats.
|
||||||
@@ -51,9 +79,8 @@ pub struct IntermediateStats {
|
|||||||
min: f64,
|
min: f64,
|
||||||
max: f64,
|
max: f64,
|
||||||
}
|
}
|
||||||
|
impl Default for IntermediateStats {
|
||||||
impl IntermediateStats {
|
fn default() -> Self {
|
||||||
fn new() -> Self {
|
|
||||||
Self {
|
Self {
|
||||||
count: 0,
|
count: 0,
|
||||||
sum: 0.0,
|
sum: 0.0,
|
||||||
@@ -62,22 +89,28 @@ impl IntermediateStats {
|
|||||||
max: f64::MIN,
|
max: f64::MIN,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn avg(&self) -> f64 {
|
impl IntermediateStats {
|
||||||
self.sum / (self.count as f64)
|
pub(crate) fn avg(&self) -> Option<f64> {
|
||||||
|
if self.count == 0 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(self.sum / (self.count as f64))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn square_mean(&self) -> f64 {
|
fn square_mean(&self) -> f64 {
|
||||||
self.squared_sum / (self.count as f64)
|
self.squared_sum / (self.count as f64)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn standard_deviation(&self) -> f64 {
|
pub(crate) fn standard_deviation(&self) -> Option<f64> {
|
||||||
let average = self.avg();
|
self.avg()
|
||||||
(self.square_mean() - average * average).sqrt()
|
.map(|average| (self.square_mean() - average * average).sqrt())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Merge data from other stats into this instance.
|
/// Merge data from other stats into this instance.
|
||||||
pub fn merge_fruits(&mut self, other: &IntermediateStats) {
|
pub fn merge_fruits(&mut self, other: IntermediateStats) {
|
||||||
self.count += other.count;
|
self.count += other.count;
|
||||||
self.sum += other.sum;
|
self.sum += other.sum;
|
||||||
self.squared_sum += other.squared_sum;
|
self.squared_sum += other.squared_sum;
|
||||||
@@ -85,14 +118,24 @@ impl IntermediateStats {
|
|||||||
self.max = self.max.max(other.max);
|
self.max = self.max.max(other.max);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// compute final result
|
/// compute final resultimprove_docs
|
||||||
pub fn finalize(&self) -> Stats {
|
pub fn finalize(&self) -> Stats {
|
||||||
|
let min = if self.count == 0 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(self.min)
|
||||||
|
};
|
||||||
|
let max = if self.count == 0 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(self.max)
|
||||||
|
};
|
||||||
Stats {
|
Stats {
|
||||||
count: self.count,
|
count: self.count,
|
||||||
sum: self.sum,
|
sum: self.sum,
|
||||||
standard_deviation: self.standard_deviation(),
|
standard_deviation: self.standard_deviation(),
|
||||||
min: self.min,
|
min,
|
||||||
max: self.max,
|
max,
|
||||||
avg: self.avg(),
|
avg: self.avg(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -117,7 +160,7 @@ impl SegmentStatsCollector {
|
|||||||
pub fn from_req(field_type: Type) -> Self {
|
pub fn from_req(field_type: Type) -> Self {
|
||||||
Self {
|
Self {
|
||||||
field_type,
|
field_type,
|
||||||
stats: IntermediateStats::new(),
|
stats: IntermediateStats::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
|
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
|
||||||
@@ -157,12 +200,50 @@ mod tests {
|
|||||||
};
|
};
|
||||||
use crate::aggregation::agg_result::AggregationResults;
|
use crate::aggregation::agg_result::AggregationResults;
|
||||||
use crate::aggregation::metric::StatsAggregation;
|
use crate::aggregation::metric::StatsAggregation;
|
||||||
use crate::aggregation::tests::get_test_index_2_segments;
|
use crate::aggregation::tests::{get_test_index_2_segments, get_test_index_from_values};
|
||||||
use crate::aggregation::AggregationCollector;
|
use crate::aggregation::AggregationCollector;
|
||||||
use crate::query::TermQuery;
|
use crate::query::{AllQuery, TermQuery};
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::Term;
|
use crate::Term;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_aggregation_stats_empty_index() -> crate::Result<()> {
|
||||||
|
// test index without segments
|
||||||
|
let values = vec![];
|
||||||
|
|
||||||
|
let index = get_test_index_from_values(false, &values)?;
|
||||||
|
|
||||||
|
let agg_req_1: Aggregations = vec![(
|
||||||
|
"stats".to_string(),
|
||||||
|
Aggregation::Metric(MetricAggregation::Stats(StatsAggregation::from_field_name(
|
||||||
|
"score".to_string(),
|
||||||
|
))),
|
||||||
|
)]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let collector = AggregationCollector::from_aggs(agg_req_1);
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let agg_res: AggregationResults = searcher.search(&AllQuery, &collector).unwrap();
|
||||||
|
|
||||||
|
let res: Value = serde_json::from_str(&serde_json::to_string(&agg_res)?)?;
|
||||||
|
assert_eq!(
|
||||||
|
res["stats"],
|
||||||
|
json!({
|
||||||
|
"avg": Value::Null,
|
||||||
|
"count": 0,
|
||||||
|
"max": Value::Null,
|
||||||
|
"min": Value::Null,
|
||||||
|
"standard_deviation": Value::Null,
|
||||||
|
"sum": 0.0
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_aggregation_stats() -> crate::Result<()> {
|
fn test_aggregation_stats() -> crate::Result<()> {
|
||||||
let index = get_test_index_2_segments(false)?;
|
let index = get_test_index_2_segments(false)?;
|
||||||
@@ -199,7 +280,11 @@ mod tests {
|
|||||||
Aggregation::Bucket(BucketAggregation {
|
Aggregation::Bucket(BucketAggregation {
|
||||||
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
bucket_agg: BucketAggregationType::Range(RangeAggregation {
|
||||||
field: "score".to_string(),
|
field: "score".to_string(),
|
||||||
ranges: vec![(3f64..7f64).into(), (7f64..20f64).into()],
|
ranges: vec![
|
||||||
|
(3f64..7f64).into(),
|
||||||
|
(7f64..19f64).into(),
|
||||||
|
(19f64..20f64).into(),
|
||||||
|
],
|
||||||
}),
|
}),
|
||||||
sub_aggregation: iter::once((
|
sub_aggregation: iter::once((
|
||||||
"stats".to_string(),
|
"stats".to_string(),
|
||||||
@@ -268,6 +353,18 @@ mod tests {
|
|||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
res["range"]["buckets"][3]["stats"],
|
||||||
|
json!({
|
||||||
|
"avg": serde_json::Value::Null,
|
||||||
|
"count": 0,
|
||||||
|
"max": serde_json::Value::Null,
|
||||||
|
"min": serde_json::Value::Null,
|
||||||
|
"standard_deviation": serde_json::Value::Null,
|
||||||
|
"sum": 0.0,
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,31 +5,41 @@
|
|||||||
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
use itertools::Itertools;
|
|
||||||
|
|
||||||
use super::agg_req::MetricAggregation;
|
use super::agg_req::MetricAggregation;
|
||||||
use super::agg_req_with_accessor::{
|
use super::agg_req_with_accessor::{
|
||||||
AggregationsWithAccessor, BucketAggregationWithAccessor, MetricAggregationWithAccessor,
|
AggregationsWithAccessor, BucketAggregationWithAccessor, MetricAggregationWithAccessor,
|
||||||
};
|
};
|
||||||
use super::bucket::SegmentRangeCollector;
|
use super::bucket::{SegmentHistogramCollector, SegmentRangeCollector, SegmentTermCollector};
|
||||||
|
use super::intermediate_agg_result::{IntermediateAggregationResults, IntermediateBucketResult};
|
||||||
use super::metric::{
|
use super::metric::{
|
||||||
AverageAggregation, SegmentAverageCollector, SegmentStatsCollector, StatsAggregation,
|
AverageAggregation, SegmentAverageCollector, SegmentStatsCollector, StatsAggregation,
|
||||||
};
|
};
|
||||||
use super::{Key, VecWithNames};
|
use super::VecWithNames;
|
||||||
use crate::aggregation::agg_req::BucketAggregationType;
|
use crate::aggregation::agg_req::BucketAggregationType;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
pub(crate) const DOC_BLOCK_SIZE: usize = 256;
|
pub(crate) const DOC_BLOCK_SIZE: usize = 64;
|
||||||
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
|
pub(crate) type DocBlock = [DocId; DOC_BLOCK_SIZE];
|
||||||
|
|
||||||
#[derive(Clone, PartialEq)]
|
#[derive(Clone, PartialEq)]
|
||||||
pub(crate) struct SegmentAggregationResultsCollector {
|
pub(crate) struct SegmentAggregationResultsCollector {
|
||||||
pub(crate) metrics: VecWithNames<SegmentMetricResultCollector>,
|
pub(crate) metrics: Option<VecWithNames<SegmentMetricResultCollector>>,
|
||||||
pub(crate) buckets: VecWithNames<SegmentBucketResultCollector>,
|
pub(crate) buckets: Option<VecWithNames<SegmentBucketResultCollector>>,
|
||||||
staged_docs: DocBlock,
|
staged_docs: DocBlock,
|
||||||
num_staged_docs: usize,
|
num_staged_docs: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for SegmentAggregationResultsCollector {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
metrics: Default::default(),
|
||||||
|
buckets: Default::default(),
|
||||||
|
staged_docs: [0; DOC_BLOCK_SIZE],
|
||||||
|
num_staged_docs: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Debug for SegmentAggregationResultsCollector {
|
impl Debug for SegmentAggregationResultsCollector {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
f.debug_struct("SegmentAggregationResultsCollector")
|
f.debug_struct("SegmentAggregationResultsCollector")
|
||||||
@@ -42,25 +52,59 @@ impl Debug for SegmentAggregationResultsCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentAggregationResultsCollector {
|
impl SegmentAggregationResultsCollector {
|
||||||
pub(crate) fn from_req(req: &AggregationsWithAccessor) -> crate::Result<Self> {
|
pub fn into_intermediate_aggregations_result(
|
||||||
|
self,
|
||||||
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
|
) -> crate::Result<IntermediateAggregationResults> {
|
||||||
|
let buckets = if let Some(buckets) = self.buckets {
|
||||||
|
let entries = buckets
|
||||||
|
.into_iter()
|
||||||
|
.zip(agg_with_accessor.buckets.values())
|
||||||
|
.map(|((key, bucket), acc)| Ok((key, bucket.into_intermediate_bucket_result(acc)?)))
|
||||||
|
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||||
|
Some(VecWithNames::from_entries(entries))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let metrics = self.metrics.map(VecWithNames::from_other);
|
||||||
|
|
||||||
|
Ok(IntermediateAggregationResults { metrics, buckets })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn from_req_and_validate(req: &AggregationsWithAccessor) -> crate::Result<Self> {
|
||||||
let buckets = req
|
let buckets = req
|
||||||
.buckets
|
.buckets
|
||||||
.entries()
|
.entries()
|
||||||
.map(|(key, req)| {
|
.map(|(key, req)| {
|
||||||
Ok((
|
Ok((
|
||||||
key.to_string(),
|
key.to_string(),
|
||||||
SegmentBucketResultCollector::from_req(req)?,
|
SegmentBucketResultCollector::from_req_and_validate(req)?,
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
.collect::<crate::Result<_>>()?;
|
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||||
let metrics = req
|
let metrics = req
|
||||||
.metrics
|
.metrics
|
||||||
.entries()
|
.entries()
|
||||||
.map(|(key, req)| (key.to_string(), SegmentMetricResultCollector::from_req(req)))
|
.map(|(key, req)| {
|
||||||
.collect_vec();
|
Ok((
|
||||||
|
key.to_string(),
|
||||||
|
SegmentMetricResultCollector::from_req_and_validate(req)?,
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.collect::<crate::Result<Vec<(String, _)>>>()?;
|
||||||
|
let metrics = if metrics.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(VecWithNames::from_entries(metrics))
|
||||||
|
};
|
||||||
|
let buckets = if buckets.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(VecWithNames::from_entries(buckets))
|
||||||
|
};
|
||||||
Ok(SegmentAggregationResultsCollector {
|
Ok(SegmentAggregationResultsCollector {
|
||||||
metrics: VecWithNames::from_entries(metrics),
|
metrics,
|
||||||
buckets: VecWithNames::from_entries(buckets),
|
buckets,
|
||||||
staged_docs: [0; DOC_BLOCK_SIZE],
|
staged_docs: [0; DOC_BLOCK_SIZE],
|
||||||
num_staged_docs: 0,
|
num_staged_docs: 0,
|
||||||
})
|
})
|
||||||
@@ -79,29 +123,33 @@ impl SegmentAggregationResultsCollector {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(never)]
|
|
||||||
pub(crate) fn flush_staged_docs(
|
pub(crate) fn flush_staged_docs(
|
||||||
&mut self,
|
&mut self,
|
||||||
agg_with_accessor: &AggregationsWithAccessor,
|
agg_with_accessor: &AggregationsWithAccessor,
|
||||||
force_flush: bool,
|
force_flush: bool,
|
||||||
) {
|
) {
|
||||||
for (agg_with_accessor, collector) in agg_with_accessor
|
if self.num_staged_docs == 0 {
|
||||||
.metrics
|
return;
|
||||||
.values()
|
|
||||||
.zip(self.metrics.values_mut())
|
|
||||||
{
|
|
||||||
collector.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor);
|
|
||||||
}
|
}
|
||||||
for (agg_with_accessor, collector) in agg_with_accessor
|
if let Some(metrics) = &mut self.metrics {
|
||||||
.buckets
|
for (collector, agg_with_accessor) in
|
||||||
.values()
|
metrics.values_mut().zip(agg_with_accessor.metrics.values())
|
||||||
.zip(self.buckets.values_mut())
|
{
|
||||||
{
|
collector
|
||||||
collector.collect_block(
|
.collect_block(&self.staged_docs[..self.num_staged_docs], agg_with_accessor);
|
||||||
&self.staged_docs[..self.num_staged_docs],
|
}
|
||||||
agg_with_accessor,
|
}
|
||||||
force_flush,
|
|
||||||
);
|
if let Some(buckets) = &mut self.buckets {
|
||||||
|
for (collector, agg_with_accessor) in
|
||||||
|
buckets.values_mut().zip(agg_with_accessor.buckets.values())
|
||||||
|
{
|
||||||
|
collector.collect_block(
|
||||||
|
&self.staged_docs[..self.num_staged_docs],
|
||||||
|
agg_with_accessor,
|
||||||
|
force_flush,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.num_staged_docs = 0;
|
self.num_staged_docs = 0;
|
||||||
@@ -115,15 +163,17 @@ pub(crate) enum SegmentMetricResultCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentMetricResultCollector {
|
impl SegmentMetricResultCollector {
|
||||||
pub fn from_req(req: &MetricAggregationWithAccessor) -> Self {
|
pub fn from_req_and_validate(req: &MetricAggregationWithAccessor) -> crate::Result<Self> {
|
||||||
match &req.metric {
|
match &req.metric {
|
||||||
MetricAggregation::Average(AverageAggregation { field: _ }) => {
|
MetricAggregation::Average(AverageAggregation { field: _ }) => {
|
||||||
SegmentMetricResultCollector::Average(SegmentAverageCollector::from_req(
|
Ok(SegmentMetricResultCollector::Average(
|
||||||
req.field_type,
|
SegmentAverageCollector::from_req(req.field_type),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
MetricAggregation::Stats(StatsAggregation { field: _ }) => {
|
MetricAggregation::Stats(StatsAggregation { field: _ }) => {
|
||||||
SegmentMetricResultCollector::Stats(SegmentStatsCollector::from_req(req.field_type))
|
Ok(SegmentMetricResultCollector::Stats(
|
||||||
|
SegmentStatsCollector::from_req(req.field_type),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -146,14 +196,57 @@ impl SegmentMetricResultCollector {
|
|||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub(crate) enum SegmentBucketResultCollector {
|
pub(crate) enum SegmentBucketResultCollector {
|
||||||
Range(SegmentRangeCollector),
|
Range(SegmentRangeCollector),
|
||||||
|
Histogram(Box<SegmentHistogramCollector>),
|
||||||
|
Terms(Box<SegmentTermCollector>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentBucketResultCollector {
|
impl SegmentBucketResultCollector {
|
||||||
pub fn from_req(req: &BucketAggregationWithAccessor) -> crate::Result<Self> {
|
pub fn into_intermediate_bucket_result(
|
||||||
|
self,
|
||||||
|
agg_with_accessor: &BucketAggregationWithAccessor,
|
||||||
|
) -> crate::Result<IntermediateBucketResult> {
|
||||||
|
match self {
|
||||||
|
SegmentBucketResultCollector::Terms(terms) => {
|
||||||
|
terms.into_intermediate_bucket_result(agg_with_accessor)
|
||||||
|
}
|
||||||
|
SegmentBucketResultCollector::Range(range) => {
|
||||||
|
range.into_intermediate_bucket_result(agg_with_accessor)
|
||||||
|
}
|
||||||
|
SegmentBucketResultCollector::Histogram(histogram) => {
|
||||||
|
histogram.into_intermediate_bucket_result(agg_with_accessor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_req_and_validate(req: &BucketAggregationWithAccessor) -> crate::Result<Self> {
|
||||||
match &req.bucket_agg {
|
match &req.bucket_agg {
|
||||||
BucketAggregationType::Range(range_req) => Ok(Self::Range(
|
BucketAggregationType::Terms(terms_req) => Ok(Self::Terms(Box::new(
|
||||||
SegmentRangeCollector::from_req(range_req, &req.sub_aggregation, req.field_type)?,
|
SegmentTermCollector::from_req_and_validate(
|
||||||
)),
|
terms_req,
|
||||||
|
&req.sub_aggregation,
|
||||||
|
req.field_type,
|
||||||
|
req.accessor
|
||||||
|
.as_multi()
|
||||||
|
.expect("unexpected fast field cardinality"),
|
||||||
|
)?,
|
||||||
|
))),
|
||||||
|
BucketAggregationType::Range(range_req) => {
|
||||||
|
Ok(Self::Range(SegmentRangeCollector::from_req_and_validate(
|
||||||
|
range_req,
|
||||||
|
&req.sub_aggregation,
|
||||||
|
req.field_type,
|
||||||
|
)?))
|
||||||
|
}
|
||||||
|
BucketAggregationType::Histogram(histogram) => Ok(Self::Histogram(Box::new(
|
||||||
|
SegmentHistogramCollector::from_req_and_validate(
|
||||||
|
histogram,
|
||||||
|
&req.sub_aggregation,
|
||||||
|
req.field_type,
|
||||||
|
req.accessor
|
||||||
|
.as_single()
|
||||||
|
.expect("unexpected fast field cardinality"),
|
||||||
|
)?,
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,28 +261,12 @@ impl SegmentBucketResultCollector {
|
|||||||
SegmentBucketResultCollector::Range(range) => {
|
SegmentBucketResultCollector::Range(range) => {
|
||||||
range.collect_block(doc, bucket_with_accessor, force_flush);
|
range.collect_block(doc, bucket_with_accessor, force_flush);
|
||||||
}
|
}
|
||||||
|
SegmentBucketResultCollector::Histogram(histogram) => {
|
||||||
|
histogram.collect_block(doc, bucket_with_accessor, force_flush)
|
||||||
|
}
|
||||||
|
SegmentBucketResultCollector::Terms(terms) => {
|
||||||
|
terms.collect_block(doc, bucket_with_accessor, force_flush)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq)]
|
|
||||||
pub(crate) struct SegmentRangeBucketEntry {
|
|
||||||
pub key: Key,
|
|
||||||
pub doc_count: u64,
|
|
||||||
pub sub_aggregation: Option<SegmentAggregationResultsCollector>,
|
|
||||||
/// The from range of the bucket. Equals f64::MIN when None.
|
|
||||||
pub from: Option<f64>,
|
|
||||||
/// The to range of the bucket. Equals f64::MAX when None.
|
|
||||||
pub to: Option<f64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Debug for SegmentRangeBucketEntry {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.debug_struct("SegmentRangeBucketEntry")
|
|
||||||
.field("key", &self.key)
|
|
||||||
.field("doc_count", &self.doc_count)
|
|
||||||
.field("from", &self.from)
|
|
||||||
.field("to", &self.to)
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -152,9 +152,9 @@ mod tests {
|
|||||||
use query::AllQuery;
|
use query::AllQuery;
|
||||||
|
|
||||||
use super::{add_vecs, HistogramCollector, HistogramComputer};
|
use super::{add_vecs, HistogramCollector, HistogramComputer};
|
||||||
use crate::chrono::{TimeZone, Utc};
|
|
||||||
use crate::schema::{Schema, FAST};
|
use crate::schema::{Schema, FAST};
|
||||||
use crate::{doc, query, Index};
|
use crate::time::{Date, Month};
|
||||||
|
use crate::{doc, query, DateTime, Index};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_add_histograms_simple() {
|
fn test_add_histograms_simple() {
|
||||||
@@ -273,16 +273,20 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut writer = index.writer_with_num_threads(1, 4_000_000)?;
|
let mut writer = index.writer_with_num_threads(1, 4_000_000)?;
|
||||||
writer.add_document(doc!(date_field=>Utc.ymd(1982, 9, 17).and_hms(0, 0,0)))?;
|
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1982, Month::September, 17)?.with_hms(0, 0, 0)?)))?;
|
||||||
writer.add_document(doc!(date_field=>Utc.ymd(1986, 3, 9).and_hms(0, 0, 0)))?;
|
writer.add_document(
|
||||||
writer.add_document(doc!(date_field=>Utc.ymd(1983, 9, 27).and_hms(0, 0, 0)))?;
|
doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1986, Month::March, 9)?.with_hms(0, 0, 0)?)),
|
||||||
|
)?;
|
||||||
|
writer.add_document(doc!(date_field=>DateTime::from_primitive(Date::from_calendar_date(1983, Month::September, 27)?.with_hms(0, 0, 0)?)))?;
|
||||||
writer.commit()?;
|
writer.commit()?;
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let all_query = AllQuery;
|
let all_query = AllQuery;
|
||||||
let week_histogram_collector = HistogramCollector::new(
|
let week_histogram_collector = HistogramCollector::new(
|
||||||
date_field,
|
date_field,
|
||||||
Utc.ymd(1980, 1, 1).and_hms(0, 0, 0),
|
DateTime::from_primitive(
|
||||||
|
Date::from_calendar_date(1980, Month::January, 1)?.with_hms(0, 0, 0)?,
|
||||||
|
),
|
||||||
3600 * 24 * 365, // it is just for a unit test... sorry leap years.
|
3600 * 24 * 365, // it is just for a unit test... sorry leap years.
|
||||||
10,
|
10,
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -92,7 +92,7 @@ mod histogram_collector;
|
|||||||
pub use histogram_collector::HistogramCollector;
|
pub use histogram_collector::HistogramCollector;
|
||||||
|
|
||||||
mod multi_collector;
|
mod multi_collector;
|
||||||
pub use self::multi_collector::MultiCollector;
|
pub use self::multi_collector::{FruitHandle, MultiCollector, MultiFruit};
|
||||||
|
|
||||||
mod top_collector;
|
mod top_collector;
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use super::{Collector, SegmentCollector};
|
|||||||
use crate::collector::Fruit;
|
use crate::collector::Fruit;
|
||||||
use crate::{DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
use crate::{DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
||||||
|
|
||||||
|
/// MultiFruit keeps Fruits from every nested Collector
|
||||||
pub struct MultiFruit {
|
pub struct MultiFruit {
|
||||||
sub_fruits: Vec<Option<Box<dyn Fruit>>>,
|
sub_fruits: Vec<Option<Box<dyn Fruit>>>,
|
||||||
}
|
}
|
||||||
@@ -79,12 +80,17 @@ impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// FruitHandle stores reference to the corresponding collector inside MultiCollector
|
||||||
pub struct FruitHandle<TFruit: Fruit> {
|
pub struct FruitHandle<TFruit: Fruit> {
|
||||||
pos: usize,
|
pos: usize,
|
||||||
_phantom: PhantomData<TFruit>,
|
_phantom: PhantomData<TFruit>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TFruit: Fruit> FruitHandle<TFruit> {
|
impl<TFruit: Fruit> FruitHandle<TFruit> {
|
||||||
|
/// Extract a typed fruit off a multifruit.
|
||||||
|
///
|
||||||
|
/// This function involves downcasting and can panic if the multifruit was
|
||||||
|
/// created using faulty code.
|
||||||
pub fn extract(self, fruits: &mut MultiFruit) -> TFruit {
|
pub fn extract(self, fruits: &mut MultiFruit) -> TFruit {
|
||||||
let boxed_fruit = fruits.sub_fruits[self.pos].take().expect("");
|
let boxed_fruit = fruits.sub_fruits[self.pos].take().expect("");
|
||||||
*boxed_fruit
|
*boxed_fruit
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::collector::{Count, FilterCollector, TopDocs};
|
use crate::collector::{Count, FilterCollector, TopDocs};
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader, FastFieldReader};
|
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader, FastFieldReader};
|
||||||
use crate::query::{AllQuery, QueryParser};
|
use crate::query::{AllQuery, QueryParser};
|
||||||
use crate::schema::{Field, Schema, FAST, TEXT};
|
use crate::schema::{Field, Schema, FAST, TEXT};
|
||||||
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
|
use crate::time::OffsetDateTime;
|
||||||
use crate::{doc, DateTime, DocAddress, DocId, Document, Index, Score, Searcher, SegmentOrdinal};
|
use crate::{doc, DateTime, DocAddress, DocId, Document, Index, Score, Searcher, SegmentOrdinal};
|
||||||
|
|
||||||
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
||||||
@@ -26,11 +26,11 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64, date => DateTime::from_str("1898-04-09T00:00:00+00:00").unwrap()))?;
|
index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64, date => DateTime::from_utc(OffsetDateTime::parse("1898-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
|
||||||
index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64, date => DateTime::from_str("2020-04-09T00:00:00+00:00").unwrap()))?;
|
index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64, date => DateTime::from_utc(OffsetDateTime::parse("2020-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
|
||||||
index_writer.add_document(doc!(title => "The Diary of Anne Frank", price => 18_240u64, date => DateTime::from_str("2019-04-20T00:00:00+00:00").unwrap()))?;
|
index_writer.add_document(doc!(title => "The Diary of Anne Frank", price => 18_240u64, date => DateTime::from_utc(OffsetDateTime::parse("2019-04-20T00:00:00+00:00", &Rfc3339).unwrap())))?;
|
||||||
index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64, date => DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()))?;
|
index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64, date => DateTime::from_utc(OffsetDateTime::parse("2019-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
|
||||||
index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64, date => DateTime::from_str("2018-04-09T00:00:00+00:00").unwrap()))?;
|
index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64, date => DateTime::from_utc(OffsetDateTime::parse("2018-04-09T00:00:00+00:00", &Rfc3339).unwrap())))?;
|
||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
|
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
@@ -55,7 +55,9 @@ pub fn test_filter_collector() -> crate::Result<()> {
|
|||||||
assert_eq!(filtered_top_docs.len(), 0);
|
assert_eq!(filtered_top_docs.len(), 0);
|
||||||
|
|
||||||
fn date_filter(value: DateTime) -> bool {
|
fn date_filter(value: DateTime) -> bool {
|
||||||
(value - DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()).num_weeks() > 0
|
(value.into_utc() - OffsetDateTime::parse("2019-04-09T00:00:00+00:00", &Rfc3339).unwrap())
|
||||||
|
.whole_weeks()
|
||||||
|
> 0
|
||||||
}
|
}
|
||||||
|
|
||||||
let filter_dates_collector = FilterCollector::new(date, &date_filter, TopDocs::with_limit(5));
|
let filter_dates_collector = FilterCollector::new(date, &date_filter, TopDocs::with_limit(5));
|
||||||
|
|||||||
@@ -173,8 +173,7 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return true iff at least K documents have gone through
|
/// Return true if more documents have been collected than the limit.
|
||||||
/// the collector.
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(crate) fn at_capacity(&self) -> bool {
|
pub(crate) fn at_capacity(&self) -> bool {
|
||||||
self.heap.len() >= self.limit
|
self.heap.len() >= self.limit
|
||||||
|
|||||||
@@ -714,7 +714,9 @@ mod tests {
|
|||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::query::{AllQuery, Query, QueryParser};
|
use crate::query::{AllQuery, Query, QueryParser};
|
||||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||||
use crate::{DocAddress, DocId, Index, IndexWriter, Score, SegmentReader};
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
|
use crate::time::OffsetDateTime;
|
||||||
|
use crate::{DateTime, DocAddress, DocId, Index, IndexWriter, Score, SegmentReader};
|
||||||
|
|
||||||
fn make_index() -> crate::Result<Index> {
|
fn make_index() -> crate::Result<Index> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
@@ -890,28 +892,32 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_top_field_collector_datetime() -> crate::Result<()> {
|
fn test_top_field_collector_datetime() -> crate::Result<()> {
|
||||||
use std::str::FromStr;
|
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let name = schema_builder.add_text_field("name", TEXT);
|
let name = schema_builder.add_text_field("name", TEXT);
|
||||||
let birthday = schema_builder.add_date_field("birthday", FAST);
|
let birthday = schema_builder.add_date_field("birthday", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let pr_birthday = crate::DateTime::from_str("1898-04-09T00:00:00+00:00")?;
|
let pr_birthday = DateTime::from_utc(OffsetDateTime::parse(
|
||||||
|
"1898-04-09T00:00:00+00:00",
|
||||||
|
&Rfc3339,
|
||||||
|
)?);
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
name => "Paul Robeson",
|
name => "Paul Robeson",
|
||||||
birthday => pr_birthday
|
birthday => pr_birthday,
|
||||||
))?;
|
))?;
|
||||||
let mr_birthday = crate::DateTime::from_str("1947-11-08T00:00:00+00:00")?;
|
let mr_birthday = DateTime::from_utc(OffsetDateTime::parse(
|
||||||
|
"1947-11-08T00:00:00+00:00",
|
||||||
|
&Rfc3339,
|
||||||
|
)?);
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
name => "Minnie Riperton",
|
name => "Minnie Riperton",
|
||||||
birthday => mr_birthday
|
birthday => mr_birthday,
|
||||||
))?;
|
))?;
|
||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
let searcher = index.reader()?.searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
let top_collector = TopDocs::with_limit(3).order_by_fast_field(birthday);
|
let top_collector = TopDocs::with_limit(3).order_by_fast_field(birthday);
|
||||||
let top_docs: Vec<(crate::DateTime, DocAddress)> =
|
let top_docs: Vec<(DateTime, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
|
||||||
searcher.search(&AllQuery, &top_collector)?;
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&top_docs[..],
|
&top_docs[..],
|
||||||
&[
|
&[
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use crossbeam::channel;
|
|
||||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||||
|
|
||||||
|
use crate::TantivyError;
|
||||||
|
|
||||||
/// Search executor whether search request are single thread or multithread.
|
/// Search executor whether search request are single thread or multithread.
|
||||||
///
|
///
|
||||||
/// We don't expose Rayon thread pool directly here for several reasons.
|
/// We don't expose Rayon thread pool directly here for several reasons.
|
||||||
@@ -47,16 +48,19 @@ impl Executor {
|
|||||||
match self {
|
match self {
|
||||||
Executor::SingleThread => args.map(f).collect::<crate::Result<_>>(),
|
Executor::SingleThread => args.map(f).collect::<crate::Result<_>>(),
|
||||||
Executor::ThreadPool(pool) => {
|
Executor::ThreadPool(pool) => {
|
||||||
let args_with_indices: Vec<(usize, A)> = args.enumerate().collect();
|
let args: Vec<A> = args.collect();
|
||||||
let num_fruits = args_with_indices.len();
|
let num_fruits = args.len();
|
||||||
let fruit_receiver = {
|
let fruit_receiver = {
|
||||||
let (fruit_sender, fruit_receiver) = channel::unbounded();
|
let (fruit_sender, fruit_receiver) = crossbeam_channel::unbounded();
|
||||||
pool.scope(|scope| {
|
pool.scope(|scope| {
|
||||||
for arg_with_idx in args_with_indices {
|
for (idx, arg) in args.into_iter().enumerate() {
|
||||||
scope.spawn(|_| {
|
// We name references for f and fruit_sender_ref because we do not
|
||||||
let (idx, arg) = arg_with_idx;
|
// want these two to be moved into the closure.
|
||||||
let fruit = f(arg);
|
let f_ref = &f;
|
||||||
if let Err(err) = fruit_sender.send((idx, fruit)) {
|
let fruit_sender_ref = &fruit_sender;
|
||||||
|
scope.spawn(move |_| {
|
||||||
|
let fruit = f_ref(arg);
|
||||||
|
if let Err(err) = fruit_sender_ref.send((idx, fruit)) {
|
||||||
error!(
|
error!(
|
||||||
"Failed to send search task. It probably means all search \
|
"Failed to send search task. It probably means all search \
|
||||||
threads have panicked. {:?}",
|
threads have panicked. {:?}",
|
||||||
@@ -71,18 +75,19 @@ impl Executor {
|
|||||||
// This is important as it makes it possible for the fruit_receiver iteration to
|
// This is important as it makes it possible for the fruit_receiver iteration to
|
||||||
// terminate.
|
// terminate.
|
||||||
};
|
};
|
||||||
// This is lame, but safe.
|
let mut result_placeholders: Vec<Option<R>> =
|
||||||
let mut results_with_position = Vec::with_capacity(num_fruits);
|
std::iter::repeat_with(|| None).take(num_fruits).collect();
|
||||||
for (pos, fruit_res) in fruit_receiver {
|
for (pos, fruit_res) in fruit_receiver {
|
||||||
let fruit = fruit_res?;
|
let fruit = fruit_res?;
|
||||||
results_with_position.push((pos, fruit));
|
result_placeholders[pos] = Some(fruit);
|
||||||
}
|
}
|
||||||
results_with_position.sort_by_key(|(pos, _)| *pos);
|
let results: Vec<R> = result_placeholders.into_iter().flatten().collect();
|
||||||
assert_eq!(results_with_position.len(), num_fruits);
|
if results.len() != num_fruits {
|
||||||
Ok(results_with_position
|
return Err(TantivyError::InternalError(
|
||||||
.into_iter()
|
"One of the mapped execution failed.".to_string(),
|
||||||
.map(|(_, fruit)| fruit)
|
));
|
||||||
.collect::<Vec<_>>())
|
}
|
||||||
|
Ok(results)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ fn load_metas(
|
|||||||
/// let body_field = schema_builder.add_text_field("body", TEXT);
|
/// let body_field = schema_builder.add_text_field("body", TEXT);
|
||||||
/// let number_field = schema_builder.add_u64_field(
|
/// let number_field = schema_builder.add_u64_field(
|
||||||
/// "number",
|
/// "number",
|
||||||
/// IntOptions::default().set_fast(Cardinality::SingleValue),
|
/// NumericOptions::default().set_fast(Cardinality::SingleValue),
|
||||||
/// );
|
/// );
|
||||||
///
|
///
|
||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
@@ -74,6 +74,7 @@ fn load_metas(
|
|||||||
pub struct IndexBuilder {
|
pub struct IndexBuilder {
|
||||||
schema: Option<Schema>,
|
schema: Option<Schema>,
|
||||||
index_settings: IndexSettings,
|
index_settings: IndexSettings,
|
||||||
|
tokenizer_manager: TokenizerManager,
|
||||||
}
|
}
|
||||||
impl Default for IndexBuilder {
|
impl Default for IndexBuilder {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
@@ -86,6 +87,7 @@ impl IndexBuilder {
|
|||||||
Self {
|
Self {
|
||||||
schema: None,
|
schema: None,
|
||||||
index_settings: IndexSettings::default(),
|
index_settings: IndexSettings::default(),
|
||||||
|
tokenizer_manager: TokenizerManager::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -103,6 +105,12 @@ impl IndexBuilder {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set the tokenizers .
|
||||||
|
pub fn tokenizers(mut self, tokenizers: TokenizerManager) -> Self {
|
||||||
|
self.tokenizer_manager = tokenizers;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a new index using the `RAMDirectory`.
|
/// Creates a new index using the `RAMDirectory`.
|
||||||
///
|
///
|
||||||
/// The index will be allocated in anonymous memory.
|
/// The index will be allocated in anonymous memory.
|
||||||
@@ -154,7 +162,8 @@ impl IndexBuilder {
|
|||||||
if !Index::exists(&*dir)? {
|
if !Index::exists(&*dir)? {
|
||||||
return self.create(dir);
|
return self.create(dir);
|
||||||
}
|
}
|
||||||
let index = Index::open(dir)?;
|
let mut index = Index::open(dir)?;
|
||||||
|
index.set_tokenizers(self.tokenizer_manager.clone());
|
||||||
if index.schema() == self.get_expect_schema()? {
|
if index.schema() == self.get_expect_schema()? {
|
||||||
Ok(index)
|
Ok(index)
|
||||||
} else {
|
} else {
|
||||||
@@ -176,7 +185,8 @@ impl IndexBuilder {
|
|||||||
)?;
|
)?;
|
||||||
let mut metas = IndexMeta::with_schema(self.get_expect_schema()?);
|
let mut metas = IndexMeta::with_schema(self.get_expect_schema()?);
|
||||||
metas.index_settings = self.index_settings;
|
metas.index_settings = self.index_settings;
|
||||||
let index = Index::open_from_metas(directory, &metas, SegmentMetaInventory::default());
|
let mut index = Index::open_from_metas(directory, &metas, SegmentMetaInventory::default());
|
||||||
|
index.set_tokenizers(self.tokenizer_manager);
|
||||||
Ok(index)
|
Ok(index)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -304,6 +314,11 @@ impl Index {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Setter for the tokenizer manager.
|
||||||
|
pub fn set_tokenizers(&mut self, tokenizers: TokenizerManager) {
|
||||||
|
self.tokenizers = tokenizers;
|
||||||
|
}
|
||||||
|
|
||||||
/// Accessor for the tokenizer manager.
|
/// Accessor for the tokenizer manager.
|
||||||
pub fn tokenizers(&self) -> &TokenizerManager {
|
pub fn tokenizers(&self) -> &TokenizerManager {
|
||||||
&self.tokenizers
|
&self.tokenizers
|
||||||
@@ -314,20 +329,31 @@ impl Index {
|
|||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
let field_type = field_entry.field_type();
|
let field_type = field_entry.field_type();
|
||||||
let tokenizer_manager: &TokenizerManager = self.tokenizers();
|
let tokenizer_manager: &TokenizerManager = self.tokenizers();
|
||||||
let tokenizer_name_opt: Option<TextAnalyzer> = match field_type {
|
let indexing_options_opt = match field_type {
|
||||||
FieldType::Str(text_options) => text_options
|
FieldType::JsonObject(options) => options.get_text_indexing_options(),
|
||||||
.get_indexing_options()
|
FieldType::Str(options) => options.get_indexing_options(),
|
||||||
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
|
_ => {
|
||||||
.and_then(|tokenizer_name| tokenizer_manager.get(&tokenizer_name)),
|
return Err(TantivyError::SchemaError(format!(
|
||||||
_ => None,
|
"{:?} is not a text field.",
|
||||||
|
field_entry.name()
|
||||||
|
)))
|
||||||
|
}
|
||||||
};
|
};
|
||||||
match tokenizer_name_opt {
|
let indexing_options = indexing_options_opt.ok_or_else(|| {
|
||||||
Some(tokenizer) => Ok(tokenizer),
|
TantivyError::InvalidArgument(format!(
|
||||||
None => Err(TantivyError::SchemaError(format!(
|
"No indexing options set for field {:?}",
|
||||||
"{:?} is not a text field.",
|
field_entry
|
||||||
field_entry.name()
|
))
|
||||||
))),
|
})?;
|
||||||
}
|
|
||||||
|
tokenizer_manager
|
||||||
|
.get(indexing_options.tokenizer())
|
||||||
|
.ok_or_else(|| {
|
||||||
|
TantivyError::InvalidArgument(format!(
|
||||||
|
"No Tokenizer found for field {:?}",
|
||||||
|
field_entry
|
||||||
|
))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a default `IndexReader` for the given index.
|
/// Create a default `IndexReader` for the given index.
|
||||||
@@ -557,7 +583,8 @@ impl fmt::Debug for Index {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use crate::directory::{RamDirectory, WatchCallback};
|
use crate::directory::{RamDirectory, WatchCallback};
|
||||||
use crate::schema::{Field, Schema, INDEXED, TEXT};
|
use crate::schema::{Field, Schema, INDEXED, TEXT};
|
||||||
use crate::{Directory, Index, IndexReader, IndexSettings, ReloadPolicy};
|
use crate::tokenizer::TokenizerManager;
|
||||||
|
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexer_for_field() {
|
fn test_indexer_for_field() {
|
||||||
@@ -573,6 +600,21 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_set_tokenizer_manager() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
schema_builder.add_u64_field("num_likes", INDEXED);
|
||||||
|
schema_builder.add_text_field("body", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = IndexBuilder::new()
|
||||||
|
// set empty tokenizer manager
|
||||||
|
.tokenizers(TokenizerManager::new())
|
||||||
|
.schema(schema)
|
||||||
|
.create_in_ram()
|
||||||
|
.unwrap();
|
||||||
|
assert!(index.tokenizers().get("raw").is_none());
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_index_exists() {
|
fn test_index_exists() {
|
||||||
let directory: Box<dyn Directory> = Box::new(RamDirectory::create());
|
let directory: Box<dyn Directory> = Box::new(RamDirectory::create());
|
||||||
@@ -702,7 +744,7 @@ mod tests {
|
|||||||
.try_into()?;
|
.try_into()?;
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
writer.add_document(doc!(field=>1u64))?;
|
writer.add_document(doc!(field=>1u64))?;
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let (sender, receiver) = crossbeam_channel::unbounded();
|
||||||
let _handle = index.directory_mut().watch(WatchCallback::new(move || {
|
let _handle = index.directory_mut().watch(WatchCallback::new(move || {
|
||||||
let _ = sender.send(());
|
let _ = sender.send(());
|
||||||
}));
|
}));
|
||||||
@@ -737,7 +779,7 @@ mod tests {
|
|||||||
reader: &IndexReader,
|
reader: &IndexReader,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let mut reader_index = reader.index();
|
let mut reader_index = reader.index();
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let (sender, receiver) = crossbeam_channel::unbounded();
|
||||||
let _watch_handle = reader_index
|
let _watch_handle = reader_index
|
||||||
.directory_mut()
|
.directory_mut()
|
||||||
.watch(WatchCallback::new(move || {
|
.watch(WatchCallback::new(move || {
|
||||||
@@ -781,24 +823,24 @@ mod tests {
|
|||||||
for i in 0u64..8_000u64 {
|
for i in 0u64..8_000u64 {
|
||||||
writer.add_document(doc!(field => i))?;
|
writer.add_document(doc!(field => i))?;
|
||||||
}
|
}
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
|
||||||
let _handle = directory.watch(WatchCallback::new(move || {
|
|
||||||
let _ = sender.send(());
|
|
||||||
}));
|
|
||||||
writer.commit()?;
|
writer.commit()?;
|
||||||
let mem_right_after_commit = directory.total_mem_usage();
|
let mem_right_after_commit = directory.total_mem_usage();
|
||||||
assert!(receiver.recv().is_ok());
|
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()?;
|
.try_into()?;
|
||||||
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 8_000);
|
assert_eq!(reader.searcher().num_docs(), 8_000);
|
||||||
|
assert_eq!(reader.searcher().segment_readers().len(), 8);
|
||||||
|
|
||||||
writer.wait_merging_threads()?;
|
writer.wait_merging_threads()?;
|
||||||
|
|
||||||
let mem_right_after_merge_finished = directory.total_mem_usage();
|
let mem_right_after_merge_finished = directory.total_mem_usage();
|
||||||
|
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
assert_eq!(searcher.num_docs(), 8_000);
|
assert_eq!(searcher.num_docs(), 8_000);
|
||||||
assert!(
|
assert!(
|
||||||
mem_right_after_merge_finished < mem_right_after_commit,
|
mem_right_after_merge_finished < mem_right_after_commit,
|
||||||
|
|||||||
@@ -239,7 +239,7 @@ impl InnerSegmentMeta {
|
|||||||
///
|
///
|
||||||
/// Contains settings which are applied on the whole
|
/// Contains settings which are applied on the whole
|
||||||
/// index, like presort documents.
|
/// index, like presort documents.
|
||||||
#[derive(Clone, Default, Serialize, Deserialize, Eq, PartialEq)]
|
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||||
pub struct IndexSettings {
|
pub struct IndexSettings {
|
||||||
/// Sorts the documents by information
|
/// Sorts the documents by information
|
||||||
/// provided in `IndexSortByField`
|
/// provided in `IndexSortByField`
|
||||||
@@ -248,13 +248,32 @@ pub struct IndexSettings {
|
|||||||
/// The `Compressor` used to compress the doc store.
|
/// The `Compressor` used to compress the doc store.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub docstore_compression: Compressor,
|
pub docstore_compression: Compressor,
|
||||||
|
#[serde(default = "default_docstore_blocksize")]
|
||||||
|
/// The size of each block that will be compressed and written to disk
|
||||||
|
pub docstore_blocksize: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Must be a function to be compatible with serde defaults
|
||||||
|
fn default_docstore_blocksize() -> usize {
|
||||||
|
16_384
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for IndexSettings {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
sort_by_field: None,
|
||||||
|
docstore_compression: Compressor::default(),
|
||||||
|
docstore_blocksize: default_docstore_blocksize(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Settings to presort the documents in an index
|
/// Settings to presort the documents in an index
|
||||||
///
|
///
|
||||||
/// Presorting documents can greatly performance
|
/// Presorting documents can greatly performance
|
||||||
/// in some scenarios, by applying top n
|
/// in some scenarios, by applying top n
|
||||||
/// optimizations.
|
/// optimizations.
|
||||||
#[derive(Clone, Serialize, Deserialize, Eq, PartialEq)]
|
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||||
pub struct IndexSortByField {
|
pub struct IndexSortByField {
|
||||||
/// The field to sort the documents by
|
/// The field to sort the documents by
|
||||||
pub field: String,
|
pub field: String,
|
||||||
@@ -262,7 +281,7 @@ pub struct IndexSortByField {
|
|||||||
pub order: Order,
|
pub order: Order,
|
||||||
}
|
}
|
||||||
/// The order to sort by
|
/// The order to sort by
|
||||||
#[derive(Clone, Serialize, Deserialize, Eq, PartialEq)]
|
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||||
pub enum Order {
|
pub enum Order {
|
||||||
/// Ascending Order
|
/// Ascending Order
|
||||||
Asc,
|
Asc,
|
||||||
@@ -298,12 +317,12 @@ pub struct IndexMeta {
|
|||||||
pub schema: Schema,
|
pub schema: Schema,
|
||||||
/// Opstamp associated to the last `commit` operation.
|
/// Opstamp associated to the last `commit` operation.
|
||||||
pub opstamp: Opstamp,
|
pub opstamp: Opstamp,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// Payload associated to the last commit.
|
/// Payload associated to the last commit.
|
||||||
///
|
///
|
||||||
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||||
/// to help identify this commit.
|
/// to help identify this commit.
|
||||||
/// This payload is entirely unused by tantivy.
|
/// This payload is entirely unused by tantivy.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub payload: Option<String>,
|
pub payload: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -374,6 +393,7 @@ impl fmt::Debug for IndexMeta {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::IndexMeta;
|
use super::IndexMeta;
|
||||||
|
use crate::core::index_meta::UntrackedIndexMeta;
|
||||||
use crate::schema::{Schema, TEXT};
|
use crate::schema::{Schema, TEXT};
|
||||||
use crate::{IndexSettings, IndexSortByField, Order};
|
use crate::{IndexSettings, IndexSortByField, Order};
|
||||||
|
|
||||||
@@ -400,7 +420,12 @@ mod tests {
|
|||||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
json,
|
json,
|
||||||
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"lz4"},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false}}],"opstamp":0}"#
|
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"lz4","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
|
||||||
|
assert_eq!(index_metas.index_settings, deser_meta.index_settings);
|
||||||
|
assert_eq!(index_metas.schema, deser_meta.schema);
|
||||||
|
assert_eq!(index_metas.opstamp, deser_meta.opstamp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -88,7 +88,8 @@ impl InvertedIndexReader {
|
|||||||
let postings_slice = self
|
let postings_slice = self
|
||||||
.postings_file_slice
|
.postings_file_slice
|
||||||
.slice(term_info.postings_range.clone());
|
.slice(term_info.postings_range.clone());
|
||||||
block_postings.reset(term_info.doc_freq, postings_slice.read_bytes()?);
|
let postings_bytes = postings_slice.read_bytes()?;
|
||||||
|
block_postings.reset(term_info.doc_freq, postings_bytes)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -197,3 +198,36 @@ impl InvertedIndexReader {
|
|||||||
.unwrap_or(0u32))
|
.unwrap_or(0u32))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
impl InvertedIndexReader {
|
||||||
|
pub(crate) async fn get_term_info_async(
|
||||||
|
&self,
|
||||||
|
term: &Term,
|
||||||
|
) -> crate::AsyncIoResult<Option<TermInfo>> {
|
||||||
|
self.termdict.get_async(term.value_bytes()).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a block postings given a `Term`.
|
||||||
|
/// This method is for an advanced usage only.
|
||||||
|
///
|
||||||
|
/// Most user should prefer using `read_postings` instead.
|
||||||
|
pub async fn warm_postings(
|
||||||
|
&self,
|
||||||
|
term: &Term,
|
||||||
|
with_positions: bool,
|
||||||
|
) -> crate::AsyncIoResult<()> {
|
||||||
|
let term_info_opt = self.get_term_info_async(term).await?;
|
||||||
|
if let Some(term_info) = term_info_opt {
|
||||||
|
self.postings_file_slice
|
||||||
|
.read_bytes_slice_async(term_info.postings_range.clone())
|
||||||
|
.await?;
|
||||||
|
if with_positions {
|
||||||
|
self.positions_file_slice
|
||||||
|
.read_bytes_slice_async(term_info.positions_range.clone())
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -110,6 +110,13 @@ impl Searcher {
|
|||||||
store_reader.get(doc_address.doc_id)
|
store_reader.get(doc_address.doc_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Fetches a document in an asynchronous manner.
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
pub async fn doc_async(&self, doc_address: DocAddress) -> crate::Result<Document> {
|
||||||
|
let store_reader = &self.store_readers[doc_address.segment_ord as usize];
|
||||||
|
store_reader.get_async(doc_address.doc_id).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Access the schema associated to the index of this searcher.
|
/// Access the schema associated to the index of this searcher.
|
||||||
pub fn schema(&self) -> &Schema {
|
pub fn schema(&self) -> &Schema {
|
||||||
&self.schema
|
&self.schema
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ const ZERO_ARRAY: [u8; 8] = [0u8; 8];
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn create_uuid() -> Uuid {
|
fn create_uuid() -> Uuid {
|
||||||
let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst);
|
let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst);
|
||||||
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &ZERO_ARRAY).unwrap()
|
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &ZERO_ARRAY)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(test))]
|
#[cfg(not(test))]
|
||||||
@@ -57,7 +57,7 @@ impl SegmentId {
|
|||||||
/// Picking the first 8 chars is ok to identify
|
/// Picking the first 8 chars is ok to identify
|
||||||
/// segments in a display message (e.g. a5c4dfcb).
|
/// segments in a display message (e.g. a5c4dfcb).
|
||||||
pub fn short_uuid_string(&self) -> String {
|
pub fn short_uuid_string(&self) -> String {
|
||||||
(&self.0.to_simple_ref().to_string()[..8]).to_string()
|
(&self.0.as_simple().to_string()[..8]).to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a segment uuid string.
|
/// Returns a segment uuid string.
|
||||||
@@ -65,7 +65,7 @@ impl SegmentId {
|
|||||||
/// It consists in 32 lowercase hexadecimal chars
|
/// It consists in 32 lowercase hexadecimal chars
|
||||||
/// (e.g. a5c4dfcbdfe645089129e308e26d5523)
|
/// (e.g. a5c4dfcbdfe645089129e308e26d5523)
|
||||||
pub fn uuid_string(&self) -> String {
|
pub fn uuid_string(&self) -> String {
|
||||||
self.0.to_simple_ref().to_string()
|
self.0.as_simple().to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Build a `SegmentId` string from the full uuid string.
|
/// Build a `SegmentId` string from the full uuid string.
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ impl SegmentReader {
|
|||||||
self.max_doc - self.num_docs
|
self.max_doc - self.num_docs
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true iff some of the documents of the segment have been deleted.
|
/// Returns true if some of the documents of the segment have been deleted.
|
||||||
pub fn has_deletes(&self) -> bool {
|
pub fn has_deletes(&self) -> bool {
|
||||||
self.num_deleted_docs() > 0
|
self.num_deleted_docs() > 0
|
||||||
}
|
}
|
||||||
@@ -121,9 +121,8 @@ impl SegmentReader {
|
|||||||
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| {
|
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| {
|
||||||
let field_name = self.schema.get_field_name(field);
|
let field_name = self.schema.get_field_name(field);
|
||||||
let err_msg = format!(
|
let err_msg = format!(
|
||||||
"Field norm not found for field {:?}. Was the field set to record norm during \
|
"Field norm not found for field {field_name:?}. Was the field set to record norm \
|
||||||
indexing?",
|
during indexing?"
|
||||||
field_name
|
|
||||||
);
|
);
|
||||||
crate::TantivyError::SchemaError(err_msg)
|
crate::TantivyError::SchemaError(err_msg)
|
||||||
})
|
})
|
||||||
@@ -170,7 +169,7 @@ impl SegmentReader {
|
|||||||
|
|
||||||
let fast_fields_data = segment.open_read(SegmentComponent::FastFields)?;
|
let fast_fields_data = segment.open_read(SegmentComponent::FastFields)?;
|
||||||
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
||||||
let fast_field_readers =
|
let fast_fields_readers =
|
||||||
Arc::new(FastFieldReaders::new(schema.clone(), fast_fields_composite));
|
Arc::new(FastFieldReaders::new(schema.clone(), fast_fields_composite));
|
||||||
let fieldnorm_data = segment.open_read(SegmentComponent::FieldNorms)?;
|
let fieldnorm_data = segment.open_read(SegmentComponent::FieldNorms)?;
|
||||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||||
@@ -197,7 +196,7 @@ impl SegmentReader {
|
|||||||
max_doc,
|
max_doc,
|
||||||
termdict_composite,
|
termdict_composite,
|
||||||
postings_composite,
|
postings_composite,
|
||||||
fast_fields_readers: fast_field_readers,
|
fast_fields_readers,
|
||||||
fieldnorm_readers,
|
fieldnorm_readers,
|
||||||
segment_id: segment.id(),
|
segment_id: segment.id(),
|
||||||
delete_opstamp: segment.meta().delete_opstamp(),
|
delete_opstamp: segment.meta().delete_opstamp(),
|
||||||
@@ -302,7 +301,7 @@ impl SegmentReader {
|
|||||||
self.alive_bitset_opt.as_ref()
|
self.alive_bitset_opt.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true iff the `doc` is marked
|
/// Returns true if the `doc` is marked
|
||||||
/// as deleted.
|
/// as deleted.
|
||||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||||
self.alive_bitset()
|
self.alive_bitset()
|
||||||
|
|||||||
@@ -96,9 +96,9 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
|
|||||||
///
|
///
|
||||||
/// There are currently two implementations of `Directory`
|
/// There are currently two implementations of `Directory`
|
||||||
///
|
///
|
||||||
/// - The [`MMapDirectory`](struct.MmapDirectory.html), this
|
/// - The [`MMapDirectory`][crate::directory::MmapDirectory], this
|
||||||
/// should be your default choice.
|
/// should be your default choice.
|
||||||
/// - The [`RamDirectory`](struct.RamDirectory.html), which
|
/// - The [`RamDirectory`][crate::directory::RamDirectory], which
|
||||||
/// should be used mostly for tests.
|
/// should be used mostly for tests.
|
||||||
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||||
/// Opens a file and returns a boxed `FileHandle`.
|
/// Opens a file and returns a boxed `FileHandle`.
|
||||||
@@ -128,7 +128,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// `DeleteError::DoesNotExist`.
|
/// `DeleteError::DoesNotExist`.
|
||||||
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
fn delete(&self, path: &Path) -> Result<(), DeleteError>;
|
||||||
|
|
||||||
/// Returns true iff the file exists
|
/// Returns true if and only if the file exists
|
||||||
fn exists(&self, path: &Path) -> Result<bool, OpenReadError>;
|
fn exists(&self, path: &Path) -> Result<bool, OpenReadError>;
|
||||||
|
|
||||||
/// Opens a writer for the *virtual file* associated with
|
/// Opens a writer for the *virtual file* associated with
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use std::ops::{Deref, Range};
|
|||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use std::{fmt, io};
|
use std::{fmt, io};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
use common::HasLen;
|
use common::HasLen;
|
||||||
use stable_deref_trait::StableDeref;
|
use stable_deref_trait::StableDeref;
|
||||||
|
|
||||||
@@ -18,18 +19,35 @@ pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
|||||||
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
||||||
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
||||||
/// on the filesystem.
|
/// on the filesystem.
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
pub trait FileHandle: 'static + Send + Sync + HasLen + fmt::Debug {
|
pub trait FileHandle: 'static + Send + Sync + HasLen + fmt::Debug {
|
||||||
/// Reads a slice of bytes.
|
/// Reads a slice of bytes.
|
||||||
///
|
///
|
||||||
/// This method may panic if the range requested is invalid.
|
/// This method may panic if the range requested is invalid.
|
||||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes>;
|
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes>;
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
#[doc(hidden)]
|
||||||
|
async fn read_bytes_async(
|
||||||
|
&self,
|
||||||
|
_byte_range: Range<usize>,
|
||||||
|
) -> crate::AsyncIoResult<OwnedBytes> {
|
||||||
|
Err(crate::error::AsyncIoError::AsyncUnsupported)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
impl FileHandle for &'static [u8] {
|
impl FileHandle for &'static [u8] {
|
||||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||||
let bytes = &self[range];
|
let bytes = &self[range];
|
||||||
Ok(OwnedBytes::new(bytes))
|
Ok(OwnedBytes::new(bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
async fn read_bytes_async(&self, byte_range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||||
|
Ok(self.read_bytes(byte_range)?)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B> From<B> for FileSlice
|
impl<B> From<B> for FileSlice
|
||||||
@@ -102,6 +120,12 @@ impl FileSlice {
|
|||||||
self.data.read_bytes(self.range.clone())
|
self.data.read_bytes(self.range.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub async fn read_bytes_async(&self) -> crate::AsyncIoResult<OwnedBytes> {
|
||||||
|
self.data.read_bytes_async(self.range.clone()).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Reads a specific slice of data.
|
/// Reads a specific slice of data.
|
||||||
///
|
///
|
||||||
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
|
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
|
||||||
@@ -116,6 +140,23 @@ impl FileSlice {
|
|||||||
.read_bytes(self.range.start + range.start..self.range.start + range.end)
|
.read_bytes(self.range.start + range.start..self.range.start + range.end)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub async fn read_bytes_slice_async(
|
||||||
|
&self,
|
||||||
|
byte_range: Range<usize>,
|
||||||
|
) -> crate::AsyncIoResult<OwnedBytes> {
|
||||||
|
assert!(
|
||||||
|
self.range.start + byte_range.end <= self.range.end,
|
||||||
|
"`to` exceeds the fileslice length"
|
||||||
|
);
|
||||||
|
self.data
|
||||||
|
.read_bytes_async(
|
||||||
|
self.range.start + byte_range.start..self.range.start + byte_range.end,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
/// Splits the FileSlice at the given offset and return two file slices.
|
/// Splits the FileSlice at the given offset and return two file slices.
|
||||||
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
|
||||||
///
|
///
|
||||||
@@ -160,10 +201,16 @@ impl FileSlice {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
impl FileHandle for FileSlice {
|
impl FileHandle for FileSlice {
|
||||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||||
self.read_bytes_slice(range)
|
self.read_bytes_slice(range)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
async fn read_bytes_async(&self, byte_range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||||
|
self.read_bytes_slice_async(byte_range).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HasLen for FileSlice {
|
impl HasLen for FileSlice {
|
||||||
@@ -172,6 +219,19 @@ impl HasLen for FileSlice {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl FileHandle for OwnedBytes {
|
||||||
|
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
||||||
|
Ok(self.slice(range))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
async fn read_bytes_async(&self, range: Range<usize>) -> crate::AsyncIoResult<OwnedBytes> {
|
||||||
|
let bytes = self.read_bytes(range)?;
|
||||||
|
Ok(bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|||||||
@@ -53,7 +53,9 @@ impl FileWatcher {
|
|||||||
if metafile_has_changed {
|
if metafile_has_changed {
|
||||||
info!("Meta file {:?} was modified", path);
|
info!("Meta file {:?} was modified", path);
|
||||||
current_checksum_opt = Some(checksum);
|
current_checksum_opt = Some(checksum);
|
||||||
futures::executor::block_on(callbacks.broadcast());
|
// We actually ignore callbacks failing here.
|
||||||
|
// We just wait for the end of their execution.
|
||||||
|
let _ = callbacks.broadcast().wait();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,7 +110,7 @@ mod tests {
|
|||||||
let tmp_file = tmp_dir.path().join("watched.txt");
|
let tmp_file = tmp_dir.path().join("watched.txt");
|
||||||
|
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let (tx, rx) = crossbeam::channel::unbounded();
|
let (tx, rx) = crossbeam_channel::unbounded();
|
||||||
let timeout = Duration::from_millis(100);
|
let timeout = Duration::from_millis(100);
|
||||||
|
|
||||||
let watcher = FileWatcher::new(&tmp_file);
|
let watcher = FileWatcher::new(&tmp_file);
|
||||||
@@ -151,7 +153,7 @@ mod tests {
|
|||||||
let tmp_file = tmp_dir.path().join("watched.txt");
|
let tmp_file = tmp_dir.path().join("watched.txt");
|
||||||
|
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let (tx, rx) = crossbeam::channel::unbounded();
|
let (tx, rx) = crossbeam_channel::unbounded();
|
||||||
let timeout = Duration::from_millis(100);
|
let timeout = Duration::from_millis(100);
|
||||||
|
|
||||||
let watcher = FileWatcher::new(&tmp_file);
|
let watcher = FileWatcher::new(&tmp_file);
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ use crate::directory::{
|
|||||||
use crate::error::DataCorruption;
|
use crate::error::DataCorruption;
|
||||||
use crate::Directory;
|
use crate::Directory;
|
||||||
|
|
||||||
/// Returns true iff the file is "managed".
|
/// Returns true if the file is "managed".
|
||||||
/// Non-managed file are not subject to garbage collection.
|
/// Non-managed file are not subject to garbage collection.
|
||||||
///
|
///
|
||||||
/// Filenames that starts by a "." -typically locks-
|
/// Filenames that starts by a "." -typically locks-
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::convert::From;
|
|
||||||
use std::fs::{self, File, OpenOptions};
|
use std::fs::{self, File, OpenOptions};
|
||||||
use std::io::{self, BufWriter, Read, Seek, SeekFrom, Write};
|
use std::io::{self, BufWriter, Read, Seek, Write};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
@@ -265,7 +264,7 @@ impl Write for SafeFileWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Seek for SafeFileWriter {
|
impl Seek for SafeFileWriter {
|
||||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
|
||||||
self.0.seek(pos)
|
self.0.seek(pos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ mod file_slice;
|
|||||||
mod file_watcher;
|
mod file_watcher;
|
||||||
mod footer;
|
mod footer;
|
||||||
mod managed_directory;
|
mod managed_directory;
|
||||||
mod owned_bytes;
|
|
||||||
mod ram_directory;
|
mod ram_directory;
|
||||||
mod watch_event_router;
|
mod watch_event_router;
|
||||||
|
|
||||||
@@ -22,13 +21,13 @@ use std::io::BufWriter;
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
pub use common::{AntiCallToken, TerminatingWrite};
|
pub use common::{AntiCallToken, TerminatingWrite};
|
||||||
|
pub use ownedbytes::OwnedBytes;
|
||||||
|
|
||||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||||
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
||||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||||
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
|
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
|
||||||
pub use self::file_slice::{FileHandle, FileSlice};
|
pub use self::file_slice::{FileHandle, FileSlice};
|
||||||
pub use self::owned_bytes::OwnedBytes;
|
|
||||||
pub use self::ram_directory::RamDirectory;
|
pub use self::ram_directory::RamDirectory;
|
||||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
use std::io;
|
|
||||||
use std::ops::Range;
|
|
||||||
|
|
||||||
pub use ownedbytes::OwnedBytes;
|
|
||||||
|
|
||||||
use crate::directory::FileHandle;
|
|
||||||
|
|
||||||
impl FileHandle for OwnedBytes {
|
|
||||||
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
|
|
||||||
Ok(self.slice(range))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -6,9 +6,6 @@ use std::sync::atomic::{AtomicBool, AtomicUsize};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use futures::channel::oneshot;
|
|
||||||
use futures::executor::block_on;
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
@@ -184,7 +181,7 @@ fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
|
|||||||
|
|
||||||
fn test_watch(directory: &dyn Directory) {
|
fn test_watch(directory: &dyn Directory) {
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let (tx, rx) = crossbeam::channel::unbounded();
|
let (tx, rx) = crossbeam_channel::unbounded();
|
||||||
let timeout = Duration::from_millis(500);
|
let timeout = Duration::from_millis(500);
|
||||||
|
|
||||||
let handle = directory
|
let handle = directory
|
||||||
@@ -249,8 +246,8 @@ fn test_lock_blocking(directory: &dyn Directory) {
|
|||||||
std::thread::spawn(move || {
|
std::thread::spawn(move || {
|
||||||
//< lock_a_res is sent to the thread.
|
//< lock_a_res is sent to the thread.
|
||||||
in_thread_clone.store(true, SeqCst);
|
in_thread_clone.store(true, SeqCst);
|
||||||
let _just_sync = block_on(receiver);
|
let _just_sync = receiver.recv();
|
||||||
// explicitely droping lock_a_res. It would have been sufficient to just force it
|
// explicitely dropping lock_a_res. It would have been sufficient to just force it
|
||||||
// to be part of the move, but the intent seems clearer that way.
|
// to be part of the move, but the intent seems clearer that way.
|
||||||
drop(lock_a_res);
|
drop(lock_a_res);
|
||||||
});
|
});
|
||||||
@@ -273,7 +270,7 @@ fn test_lock_blocking(directory: &dyn Directory) {
|
|||||||
assert!(in_thread.load(SeqCst));
|
assert!(in_thread.load(SeqCst));
|
||||||
assert!(lock_a_res.is_ok());
|
assert!(lock_a_res.is_ok());
|
||||||
});
|
});
|
||||||
assert!(block_on(receiver2).is_ok());
|
assert!(receiver2.recv().is_ok());
|
||||||
assert!(sender.send(()).is_ok());
|
assert!(sender.send(()).is_ok());
|
||||||
assert!(join_handle.join().is_ok());
|
assert!(join_handle.join().is_ok());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::{Arc, RwLock, Weak};
|
||||||
|
|
||||||
use futures::channel::oneshot;
|
use crate::FutureResult;
|
||||||
use futures::{Future, TryFutureExt};
|
|
||||||
|
|
||||||
/// Cloneable wrapper for callbacks registered when watching files of a `Directory`.
|
/// Cloneable wrapper for callbacks registered when watching files of a `Directory`.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -74,12 +73,11 @@ impl WatchCallbackList {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Triggers all callbacks
|
/// Triggers all callbacks
|
||||||
pub fn broadcast(&self) -> impl Future<Output = ()> {
|
pub fn broadcast(&self) -> FutureResult<()> {
|
||||||
let callbacks = self.list_callback();
|
let callbacks = self.list_callback();
|
||||||
let (sender, receiver) = oneshot::channel();
|
let (result, sender) = FutureResult::create("One of the callback panicked.");
|
||||||
let result = receiver.unwrap_or_else(|_| ());
|
|
||||||
if callbacks.is_empty() {
|
if callbacks.is_empty() {
|
||||||
let _ = sender.send(());
|
let _ = sender.send(Ok(()));
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
let spawn_res = std::thread::Builder::new()
|
let spawn_res = std::thread::Builder::new()
|
||||||
@@ -88,7 +86,7 @@ impl WatchCallbackList {
|
|||||||
for callback in callbacks {
|
for callback in callbacks {
|
||||||
callback.call();
|
callback.call();
|
||||||
}
|
}
|
||||||
let _ = sender.send(());
|
let _ = sender.send(Ok(()));
|
||||||
});
|
});
|
||||||
if let Err(err) = spawn_res {
|
if let Err(err) = spawn_res {
|
||||||
error!(
|
error!(
|
||||||
@@ -106,8 +104,6 @@ mod tests {
|
|||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::executor::block_on;
|
|
||||||
|
|
||||||
use crate::directory::{WatchCallback, WatchCallbackList};
|
use crate::directory::{WatchCallback, WatchCallbackList};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -118,22 +114,18 @@ mod tests {
|
|||||||
let inc_callback = WatchCallback::new(move || {
|
let inc_callback = WatchCallback::new(move || {
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
});
|
});
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
assert_eq!(1, counter.load(Ordering::SeqCst));
|
assert_eq!(1, counter.load(Ordering::SeqCst));
|
||||||
block_on(async {
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
(
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
watch_event_router.broadcast().await,
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
watch_event_router.broadcast().await,
|
|
||||||
watch_event_router.broadcast().await,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
assert_eq!(4, counter.load(Ordering::SeqCst));
|
assert_eq!(4, counter.load(Ordering::SeqCst));
|
||||||
mem::drop(handle_a);
|
mem::drop(handle_a);
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
assert_eq!(4, counter.load(Ordering::SeqCst));
|
assert_eq!(4, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -150,19 +142,15 @@ mod tests {
|
|||||||
let handle_a = watch_event_router.subscribe(inc_callback(1));
|
let handle_a = watch_event_router.subscribe(inc_callback(1));
|
||||||
let handle_a2 = watch_event_router.subscribe(inc_callback(10));
|
let handle_a2 = watch_event_router.subscribe(inc_callback(10));
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
block_on(async {
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
futures::join!(
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
watch_event_router.broadcast(),
|
|
||||||
watch_event_router.broadcast()
|
|
||||||
)
|
|
||||||
});
|
|
||||||
assert_eq!(22, counter.load(Ordering::SeqCst));
|
assert_eq!(22, counter.load(Ordering::SeqCst));
|
||||||
mem::drop(handle_a);
|
mem::drop(handle_a);
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
assert_eq!(32, counter.load(Ordering::SeqCst));
|
assert_eq!(32, counter.load(Ordering::SeqCst));
|
||||||
mem::drop(handle_a2);
|
mem::drop(handle_a2);
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
assert_eq!(32, counter.load(Ordering::SeqCst));
|
assert_eq!(32, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,15 +164,12 @@ mod tests {
|
|||||||
});
|
});
|
||||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
block_on(async {
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
let future1 = watch_event_router.broadcast();
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
let future2 = watch_event_router.broadcast();
|
|
||||||
futures::join!(future1, future2)
|
|
||||||
});
|
|
||||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||||
mem::drop(handle_a);
|
mem::drop(handle_a);
|
||||||
let _ = watch_event_router.broadcast();
|
let _ = watch_event_router.broadcast();
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast().wait().unwrap();
|
||||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
68
src/error.rs
@@ -1,9 +1,11 @@
|
|||||||
//! Definition of Tantivy's error and result.
|
//! Definition of Tantivy's errors and results.
|
||||||
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::PoisonError;
|
use std::sync::PoisonError;
|
||||||
use std::{fmt, io};
|
use std::{fmt, io};
|
||||||
|
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
use crate::directory::error::{
|
use crate::directory::error::{
|
||||||
Incompatibility, LockError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
Incompatibility, LockError, OpenDirectoryError, OpenReadError, OpenWriteError,
|
||||||
};
|
};
|
||||||
@@ -12,7 +14,7 @@ use crate::{query, schema};
|
|||||||
|
|
||||||
/// Represents a `DataCorruption` error.
|
/// Represents a `DataCorruption` error.
|
||||||
///
|
///
|
||||||
/// When facing data corruption, tantivy actually panic or return this error.
|
/// When facing data corruption, tantivy actually panics or returns this error.
|
||||||
pub struct DataCorruption {
|
pub struct DataCorruption {
|
||||||
filepath: Option<PathBuf>,
|
filepath: Option<PathBuf>,
|
||||||
comment: String,
|
comment: String,
|
||||||
@@ -38,9 +40,9 @@ impl DataCorruption {
|
|||||||
|
|
||||||
impl fmt::Debug for DataCorruption {
|
impl fmt::Debug for DataCorruption {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
write!(f, "Data corruption: ")?;
|
write!(f, "Data corruption")?;
|
||||||
if let Some(ref filepath) = &self.filepath {
|
if let Some(ref filepath) = &self.filepath {
|
||||||
write!(f, "(in file `{:?}`)", filepath)?;
|
write!(f, " (in file `{:?}`)", filepath)?;
|
||||||
}
|
}
|
||||||
write!(f, ": {}.", self.comment)?;
|
write!(f, ": {}.", self.comment)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -59,10 +61,10 @@ pub enum TantivyError {
|
|||||||
/// Failed to open a file for write.
|
/// Failed to open a file for write.
|
||||||
#[error("Failed to open file for write: '{0:?}'")]
|
#[error("Failed to open file for write: '{0:?}'")]
|
||||||
OpenWriteError(#[from] OpenWriteError),
|
OpenWriteError(#[from] OpenWriteError),
|
||||||
/// Index already exists in this directory
|
/// Index already exists in this directory.
|
||||||
#[error("Index already exists")]
|
#[error("Index already exists")]
|
||||||
IndexAlreadyExists,
|
IndexAlreadyExists,
|
||||||
/// Failed to acquire file lock
|
/// Failed to acquire file lock.
|
||||||
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")]
|
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")]
|
||||||
LockFailure(LockError, Option<String>),
|
LockFailure(LockError, Option<String>),
|
||||||
/// IO Error.
|
/// IO Error.
|
||||||
@@ -80,21 +82,47 @@ pub enum TantivyError {
|
|||||||
/// Invalid argument was passed by the user.
|
/// Invalid argument was passed by the user.
|
||||||
#[error("An invalid argument was passed: '{0}'")]
|
#[error("An invalid argument was passed: '{0}'")]
|
||||||
InvalidArgument(String),
|
InvalidArgument(String),
|
||||||
/// An Error happened in one of the thread.
|
/// An Error occurred in one of the threads.
|
||||||
#[error("An error occurred in a thread: '{0}'")]
|
#[error("An error occurred in a thread: '{0}'")]
|
||||||
ErrorInThread(String),
|
ErrorInThread(String),
|
||||||
/// An Error appeared related to opening or creating a index.
|
/// An Error occurred related to opening or creating a index.
|
||||||
#[error("Missing required index builder argument when open/create index: '{0}'")]
|
#[error("Missing required index builder argument when open/create index: '{0}'")]
|
||||||
IndexBuilderMissingArgument(&'static str),
|
IndexBuilderMissingArgument(&'static str),
|
||||||
/// An Error appeared related to the schema.
|
/// An Error occurred related to the schema.
|
||||||
#[error("Schema error: '{0}'")]
|
#[error("Schema error: '{0}'")]
|
||||||
SchemaError(String),
|
SchemaError(String),
|
||||||
/// System error. (e.g.: We failed spawning a new thread)
|
/// System error. (e.g.: We failed spawning a new thread).
|
||||||
#[error("System error.'{0}'")]
|
#[error("System error.'{0}'")]
|
||||||
SystemError(String),
|
SystemError(String),
|
||||||
/// Index incompatible with current version of tantivy
|
/// Index incompatible with current version of Tantivy.
|
||||||
#[error("{0:?}")]
|
#[error("{0:?}")]
|
||||||
IncompatibleIndex(Incompatibility),
|
IncompatibleIndex(Incompatibility),
|
||||||
|
/// An internal error occurred. This is are internal states that should not be reached.
|
||||||
|
/// e.g. a datastructure is incorrectly inititalized.
|
||||||
|
#[error("Internal error: '{0}'")]
|
||||||
|
InternalError(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
#[derive(Error, Debug)]
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub enum AsyncIoError {
|
||||||
|
#[error("io::Error `{0}`")]
|
||||||
|
Io(#[from] io::Error),
|
||||||
|
#[error("Asynchronous API is unsupported by this directory")]
|
||||||
|
AsyncUnsupported,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "quickwit")]
|
||||||
|
impl From<AsyncIoError> for TantivyError {
|
||||||
|
fn from(async_io_err: AsyncIoError) -> Self {
|
||||||
|
match async_io_err {
|
||||||
|
AsyncIoError::Io(io_err) => TantivyError::from(io_err),
|
||||||
|
AsyncIoError::AsyncUnsupported => {
|
||||||
|
TantivyError::SystemError(format!("{:?}", async_io_err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<DataCorruption> for TantivyError {
|
impl From<DataCorruption> for TantivyError {
|
||||||
@@ -125,9 +153,21 @@ impl<Guard> From<PoisonError<Guard>> for TantivyError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<chrono::ParseError> for TantivyError {
|
impl From<time::error::Format> for TantivyError {
|
||||||
fn from(err: chrono::ParseError) -> TantivyError {
|
fn from(err: time::error::Format) -> TantivyError {
|
||||||
TantivyError::InvalidArgument(err.to_string())
|
TantivyError::InvalidArgument(format!("Date formatting error: {err}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<time::error::Parse> for TantivyError {
|
||||||
|
fn from(err: time::error::Parse) -> TantivyError {
|
||||||
|
TantivyError::InvalidArgument(format!("Date parsing error: {err}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<time::error::ComponentRange> for TantivyError {
|
||||||
|
fn from(err: time::error::ComponentRange) -> TantivyError {
|
||||||
|
TantivyError::InvalidArgument(format!("Date range error: {err}"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use ownedbytes::OwnedBytes;
|
|||||||
use crate::space_usage::ByteCount;
|
use crate::space_usage::ByteCount;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
/// Write a alive `BitSet`
|
/// Write an alive `BitSet`
|
||||||
///
|
///
|
||||||
/// where `alive_bitset` is the set of alive `DocId`.
|
/// where `alive_bitset` is the set of alive `DocId`.
|
||||||
/// Warning: this function does not call terminate. The caller is in charge of
|
/// Warning: this function does not call terminate. The caller is in charge of
|
||||||
@@ -55,19 +55,19 @@ impl AliveBitSet {
|
|||||||
AliveBitSet::from(readonly_bitset)
|
AliveBitSet::from(readonly_bitset)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Opens a delete bitset given its file.
|
/// Opens an alive bitset given its file.
|
||||||
pub fn open(bytes: OwnedBytes) -> AliveBitSet {
|
pub fn open(bytes: OwnedBytes) -> AliveBitSet {
|
||||||
let bitset = ReadOnlyBitSet::open(bytes);
|
let bitset = ReadOnlyBitSet::open(bytes);
|
||||||
AliveBitSet::from(bitset)
|
AliveBitSet::from(bitset)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true iff the document is still "alive". In other words, if it has not been deleted.
|
/// Returns true if the document is still "alive". In other words, if it has not been deleted.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_alive(&self, doc: DocId) -> bool {
|
pub fn is_alive(&self, doc: DocId) -> bool {
|
||||||
self.bitset.contains(doc)
|
self.bitset.contains(doc)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true iff the document has been marked as deleted.
|
/// Returns true if the document has been marked as deleted.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||||
!self.is_alive(doc)
|
!self.is_alive(doc)
|
||||||
@@ -79,13 +79,13 @@ impl AliveBitSet {
|
|||||||
self.bitset.iter()
|
self.bitset.iter()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get underlying bitset
|
/// Get underlying bitset.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn bitset(&self) -> &ReadOnlyBitSet {
|
pub fn bitset(&self) -> &ReadOnlyBitSet {
|
||||||
&self.bitset
|
&self.bitset
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The number of deleted docs
|
/// The number of alive documents.
|
||||||
pub fn num_alive_docs(&self) -> usize {
|
pub fn num_alive_docs(&self) -> usize {
|
||||||
self.num_alive_docs
|
self.num_alive_docs
|
||||||
}
|
}
|
||||||
@@ -188,14 +188,14 @@ mod bench {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_deletebitset_iter_deser_on_fly(bench: &mut Bencher) {
|
fn bench_alive_bitset_iter_deser_on_fly(bench: &mut Bencher) {
|
||||||
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[0, 1, 1000, 10000], 1_000_000);
|
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[0, 1, 1000, 10000], 1_000_000);
|
||||||
|
|
||||||
bench.iter(|| alive_bitset.iter_alive().collect::<Vec<_>>());
|
bench.iter(|| alive_bitset.iter_alive().collect::<Vec<_>>());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_deletebitset_access(bench: &mut Bencher) {
|
fn bench_alive_bitset_access(bench: &mut Bencher) {
|
||||||
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[0, 1, 1000, 10000], 1_000_000);
|
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[0, 1, 1000, 10000], 1_000_000);
|
||||||
|
|
||||||
bench.iter(|| {
|
bench.iter(|| {
|
||||||
@@ -206,14 +206,14 @@ mod bench {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_deletebitset_iter_deser_on_fly_1_8_alive(bench: &mut Bencher) {
|
fn bench_alive_bitset_iter_deser_on_fly_1_8_alive(bench: &mut Bencher) {
|
||||||
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&get_alive(), 1_000_000);
|
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&get_alive(), 1_000_000);
|
||||||
|
|
||||||
bench.iter(|| alive_bitset.iter_alive().collect::<Vec<_>>());
|
bench.iter(|| alive_bitset.iter_alive().collect::<Vec<_>>());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn bench_deletebitset_access_1_8_alive(bench: &mut Bencher) {
|
fn bench_alive_bitset_access_1_8_alive(bench: &mut Bencher) {
|
||||||
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&get_alive(), 1_000_000);
|
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&get_alive(), 1_000_000);
|
||||||
|
|
||||||
bench.iter(|| {
|
bench.iter(|| {
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use crate::DocId;
|
|||||||
|
|
||||||
/// Writer for byte array (as in, any number of bytes per document) fast fields
|
/// Writer for byte array (as in, any number of bytes per document) fast fields
|
||||||
///
|
///
|
||||||
/// This `BytesFastFieldWriter` is only useful for advanced user.
|
/// This `BytesFastFieldWriter` is only useful for advanced users.
|
||||||
/// The normal way to get your associated bytes in your index
|
/// The normal way to get your associated bytes in your index
|
||||||
/// is to
|
/// is to
|
||||||
/// - declare your field with fast set to `Cardinality::SingleValue`
|
/// - declare your field with fast set to `Cardinality::SingleValue`
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
//!
|
//!
|
||||||
//! It is the equivalent of `Lucene`'s `DocValues`.
|
//! It is the equivalent of `Lucene`'s `DocValues`.
|
||||||
//!
|
//!
|
||||||
//! Fast fields is a column-oriented fashion storage of `tantivy`.
|
//! A fast field is a column-oriented fashion storage for `tantivy`.
|
||||||
//!
|
//!
|
||||||
//! It is designed for the fast random access of some document
|
//! It is designed for the fast random access of some document
|
||||||
//! fields given a document id.
|
//! fields given a document id.
|
||||||
@@ -12,11 +12,10 @@
|
|||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
//! Fields have to be declared as `FAST` in the schema.
|
//! Fields have to be declared as `FAST` in the schema.
|
||||||
//! Currently only 64-bits integers (signed or unsigned) are
|
//! Currently supported fields are: u64, i64, f64 and bytes.
|
||||||
//! supported.
|
|
||||||
//!
|
//!
|
||||||
//! They are stored in a bit-packed fashion so that their
|
//! u64, i64 and f64 fields are stored in a bit-packed fashion so that
|
||||||
//! memory usage is directly linear with the amplitude of the
|
//! their memory usage is directly linear with the amplitude of the
|
||||||
//! values stored.
|
//! values stored.
|
||||||
//!
|
//!
|
||||||
//! Read access performance is comparable to that of an array lookup.
|
//! Read access performance is comparable to that of an array lookup.
|
||||||
@@ -28,11 +27,11 @@ pub use self::facet_reader::FacetReader;
|
|||||||
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
|
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
|
||||||
pub use self::reader::{DynamicFastFieldReader, FastFieldReader};
|
pub use self::reader::{DynamicFastFieldReader, FastFieldReader};
|
||||||
pub use self::readers::FastFieldReaders;
|
pub use self::readers::FastFieldReaders;
|
||||||
|
pub(crate) use self::readers::{type_and_cardinality, FastType};
|
||||||
pub use self::serializer::{CompositeFastFieldSerializer, FastFieldDataAccess, FastFieldStats};
|
pub use self::serializer::{CompositeFastFieldSerializer, FastFieldDataAccess, FastFieldStats};
|
||||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||||
use crate::chrono::{NaiveDateTime, Utc};
|
|
||||||
use crate::schema::{Cardinality, FieldType, Type, Value};
|
use crate::schema::{Cardinality, FieldType, Type, Value};
|
||||||
use crate::DocId;
|
use crate::{DateTime, DocId};
|
||||||
|
|
||||||
mod alive_bitset;
|
mod alive_bitset;
|
||||||
mod bytes;
|
mod bytes;
|
||||||
@@ -161,14 +160,14 @@ impl FastValue for f64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for crate::DateTime {
|
impl FastValue for DateTime {
|
||||||
fn from_u64(timestamp_u64: u64) -> Self {
|
fn from_u64(timestamp_u64: u64) -> Self {
|
||||||
let timestamp_i64 = i64::from_u64(timestamp_u64);
|
let unix_timestamp = i64::from_u64(timestamp_u64);
|
||||||
crate::DateTime::from_utc(NaiveDateTime::from_timestamp(timestamp_i64, 0), Utc)
|
Self::from_unix_timestamp(unix_timestamp)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_u64(&self) -> u64 {
|
fn to_u64(&self) -> u64 {
|
||||||
self.timestamp().to_u64()
|
self.into_unix_timestamp().to_u64()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||||
@@ -179,7 +178,7 @@ impl FastValue for crate::DateTime {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn as_u64(&self) -> u64 {
|
fn as_u64(&self) -> u64 {
|
||||||
self.timestamp().as_u64()
|
self.into_unix_timestamp().as_u64()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_type() -> Type {
|
fn to_type() -> Type {
|
||||||
@@ -188,12 +187,32 @@ impl FastValue for crate::DateTime {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn value_to_u64(value: &Value) -> u64 {
|
fn value_to_u64(value: &Value) -> u64 {
|
||||||
match *value {
|
match value {
|
||||||
Value::U64(ref val) => *val,
|
Value::U64(val) => val.to_u64(),
|
||||||
Value::I64(ref val) => common::i64_to_u64(*val),
|
Value::I64(val) => val.to_u64(),
|
||||||
Value::F64(ref val) => common::f64_to_u64(*val),
|
Value::F64(val) => val.to_u64(),
|
||||||
Value::Date(ref datetime) => common::i64_to_u64(datetime.timestamp()),
|
Value::Date(val) => val.to_u64(),
|
||||||
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
|
_ => panic!("Expected a u64/i64/f64/date field, got {:?} ", value),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The fast field type
|
||||||
|
pub enum FastFieldType {
|
||||||
|
/// Numeric type, e.g. f64.
|
||||||
|
Numeric,
|
||||||
|
/// Fast field stores string ids.
|
||||||
|
String,
|
||||||
|
/// Fast field stores string ids for facets.
|
||||||
|
Facet,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FastFieldType {
|
||||||
|
fn is_storing_term_ids(&self) -> bool {
|
||||||
|
matches!(self, FastFieldType::String | FastFieldType::Facet)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_facet(&self) -> bool {
|
||||||
|
matches!(self, FastFieldType::Facet)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -201,6 +220,7 @@ fn value_to_u64(value: &Value) -> u64 {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::ops::Range;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use common::HasLen;
|
use common::HasLen;
|
||||||
@@ -212,7 +232,8 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
||||||
use crate::merge_policy::NoMergePolicy;
|
use crate::merge_policy::NoMergePolicy;
|
||||||
use crate::schema::{Document, Field, IntOptions, Schema, FAST};
|
use crate::schema::{Document, Field, NumericOptions, Schema, FAST, STRING, TEXT};
|
||||||
|
use crate::time::OffsetDateTime;
|
||||||
use crate::{Index, SegmentId, SegmentReader};
|
use crate::{Index, SegmentId, SegmentReader};
|
||||||
|
|
||||||
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
|
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
|
||||||
@@ -233,7 +254,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_fastfield_i64_u64() {
|
pub fn test_fastfield_i64_u64() {
|
||||||
let datetime = crate::DateTime::from_utc(NaiveDateTime::from_timestamp(0i64, 0), Utc);
|
let datetime = DateTime::from_utc(OffsetDateTime::UNIX_EPOCH);
|
||||||
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
|
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -489,7 +510,8 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()))?;
|
index_writer
|
||||||
|
.add_document(doc!(date_field =>DateTime::from_utc(OffsetDateTime::now_utc())))?;
|
||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
index_writer.add_document(doc!())?;
|
index_writer.add_document(doc!())?;
|
||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
@@ -501,8 +523,7 @@ mod tests {
|
|||||||
.map(SegmentReader::segment_id)
|
.map(SegmentReader::segment_id)
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(segment_ids.len(), 2);
|
assert_eq!(segment_ids.len(), 2);
|
||||||
let merge_future = index_writer.merge(&segment_ids[..]);
|
index_writer.merge(&segment_ids[..]).wait().unwrap();
|
||||||
futures::executor::block_on(merge_future)?;
|
|
||||||
reader.reload()?;
|
reader.reload()?;
|
||||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -510,7 +531,206 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_default_datetime() {
|
fn test_default_datetime() {
|
||||||
assert_eq!(crate::DateTime::make_zero().timestamp(), 0i64);
|
assert_eq!(0, DateTime::make_zero().into_unix_timestamp());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_vals_for_docs(ff: &MultiValuedFastFieldReader<u64>, docs: Range<u32>) -> Vec<u64> {
|
||||||
|
let mut all = vec![];
|
||||||
|
|
||||||
|
for doc in docs {
|
||||||
|
let mut out = vec![];
|
||||||
|
ff.get_vals(doc, &mut out);
|
||||||
|
all.extend(out);
|
||||||
|
}
|
||||||
|
all
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_text_fastfield() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", TEXT | FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
|
{
|
||||||
|
// first segment
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "BBBBB AAAAA", // term_ord 1,2
|
||||||
|
))?;
|
||||||
|
index_writer.add_document(doc!())?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "AAAAA", // term_ord 0
|
||||||
|
))?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "AAAAA BBBBB", // term_ord 0
|
||||||
|
))?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "zumberthree", // term_ord 2, after merge term_ord 3
|
||||||
|
))?;
|
||||||
|
|
||||||
|
index_writer.add_document(doc!())?;
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
let fast_fields = segment_reader.fast_fields();
|
||||||
|
let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
get_vals_for_docs(&text_fast_field, 0..5),
|
||||||
|
vec![1, 0, 0, 0, 1, 2]
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut out = vec![];
|
||||||
|
text_fast_field.get_vals(3, &mut out);
|
||||||
|
assert_eq!(out, vec![0, 1]);
|
||||||
|
|
||||||
|
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||||
|
assert_eq!(inverted_index.terms().num_terms(), 3);
|
||||||
|
let mut bytes = vec![];
|
||||||
|
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
|
||||||
|
// default tokenizer applies lower case
|
||||||
|
assert_eq!(bytes, "aaaaa".as_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// second segment
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "AAAAA", // term_ord 0
|
||||||
|
))?;
|
||||||
|
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "CCCCC AAAAA", // term_ord 1, after merge 2
|
||||||
|
))?;
|
||||||
|
|
||||||
|
index_writer.add_document(doc!())?;
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 2);
|
||||||
|
let segment_reader = searcher.segment_reader(1);
|
||||||
|
let fast_fields = segment_reader.fast_fields();
|
||||||
|
let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(get_vals_for_docs(&text_fast_field, 0..3), vec![0, 1, 0]);
|
||||||
|
}
|
||||||
|
// Merging the segments
|
||||||
|
{
|
||||||
|
let segment_ids = index.searchable_segment_ids()?;
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
|
index_writer.wait_merging_threads()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
let fast_fields = segment_reader.fast_fields();
|
||||||
|
let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
get_vals_for_docs(&text_fast_field, 0..8),
|
||||||
|
vec![1, 0, 0, 0, 1, 3 /* next segment */, 0, 2, 0]
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_string_fastfield() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", STRING | FAST);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
|
{
|
||||||
|
// first segment
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "BBBBB", // term_ord 1
|
||||||
|
))?;
|
||||||
|
index_writer.add_document(doc!())?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "AAAAA", // term_ord 0
|
||||||
|
))?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "AAAAA", // term_ord 0
|
||||||
|
))?;
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "zumberthree", // term_ord 2, after merge term_ord 3
|
||||||
|
))?;
|
||||||
|
|
||||||
|
index_writer.add_document(doc!())?;
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
let fast_fields = segment_reader.fast_fields();
|
||||||
|
let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(get_vals_for_docs(&text_fast_field, 0..6), vec![1, 0, 0, 2]);
|
||||||
|
|
||||||
|
let inverted_index = segment_reader.inverted_index(text_field)?;
|
||||||
|
assert_eq!(inverted_index.terms().num_terms(), 3);
|
||||||
|
let mut bytes = vec![];
|
||||||
|
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?);
|
||||||
|
assert_eq!(bytes, "AAAAA".as_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// second segment
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "AAAAA", // term_ord 0
|
||||||
|
))?;
|
||||||
|
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
text_field => "CCCCC", // term_ord 1, after merge 2
|
||||||
|
))?;
|
||||||
|
|
||||||
|
index_writer.add_document(doc!())?;
|
||||||
|
index_writer.commit()?;
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 2);
|
||||||
|
let segment_reader = searcher.segment_reader(1);
|
||||||
|
let fast_fields = segment_reader.fast_fields();
|
||||||
|
let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(get_vals_for_docs(&text_fast_field, 0..2), vec![0, 1]);
|
||||||
|
}
|
||||||
|
// Merging the segments
|
||||||
|
{
|
||||||
|
let segment_ids = index.searchable_segment_ids()?;
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
|
index_writer.wait_merging_threads()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
let fast_fields = segment_reader.fast_fields();
|
||||||
|
let text_fast_field = fast_fields.u64s(text_field).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
get_vals_for_docs(&text_fast_field, 0..9),
|
||||||
|
vec![1, 0, 0, 3 /* next segment */, 0, 2]
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -520,23 +740,23 @@ mod tests {
|
|||||||
let date_field = schema_builder.add_date_field("date", FAST);
|
let date_field = schema_builder.add_date_field("date", FAST);
|
||||||
let multi_date_field = schema_builder.add_date_field(
|
let multi_date_field = schema_builder.add_date_field(
|
||||||
"multi_date",
|
"multi_date",
|
||||||
IntOptions::default().set_fast(Cardinality::MultiValues),
|
NumericOptions::default().set_fast(Cardinality::MultiValues),
|
||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
date_field => DateTime::from_u64(1i64.to_u64()),
|
||||||
multi_date_field => crate::DateTime::from_u64(2i64.to_u64()),
|
multi_date_field => DateTime::from_u64(2i64.to_u64()),
|
||||||
multi_date_field => crate::DateTime::from_u64(3i64.to_u64())
|
multi_date_field => DateTime::from_u64(3i64.to_u64())
|
||||||
))?;
|
))?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
date_field => crate::DateTime::from_u64(4i64.to_u64())
|
date_field => DateTime::from_u64(4i64.to_u64())
|
||||||
))?;
|
))?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
multi_date_field => crate::DateTime::from_u64(5i64.to_u64()),
|
multi_date_field => DateTime::from_u64(5i64.to_u64()),
|
||||||
multi_date_field => crate::DateTime::from_u64(6i64.to_u64())
|
multi_date_field => DateTime::from_u64(6i64.to_u64())
|
||||||
))?;
|
))?;
|
||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
@@ -548,23 +768,23 @@ mod tests {
|
|||||||
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
||||||
let mut dates = vec![];
|
let mut dates = vec![];
|
||||||
{
|
{
|
||||||
assert_eq!(date_fast_field.get(0u32).timestamp(), 1i64);
|
assert_eq!(date_fast_field.get(0u32).into_unix_timestamp(), 1i64);
|
||||||
dates_fast_field.get_vals(0u32, &mut dates);
|
dates_fast_field.get_vals(0u32, &mut dates);
|
||||||
assert_eq!(dates.len(), 2);
|
assert_eq!(dates.len(), 2);
|
||||||
assert_eq!(dates[0].timestamp(), 2i64);
|
assert_eq!(dates[0].into_unix_timestamp(), 2i64);
|
||||||
assert_eq!(dates[1].timestamp(), 3i64);
|
assert_eq!(dates[1].into_unix_timestamp(), 3i64);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(date_fast_field.get(1u32).timestamp(), 4i64);
|
assert_eq!(date_fast_field.get(1u32).into_unix_timestamp(), 4i64);
|
||||||
dates_fast_field.get_vals(1u32, &mut dates);
|
dates_fast_field.get_vals(1u32, &mut dates);
|
||||||
assert!(dates.is_empty());
|
assert!(dates.is_empty());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(date_fast_field.get(2u32).timestamp(), 0i64);
|
assert_eq!(date_fast_field.get(2u32).into_unix_timestamp(), 0i64);
|
||||||
dates_fast_field.get_vals(2u32, &mut dates);
|
dates_fast_field.get_vals(2u32, &mut dates);
|
||||||
assert_eq!(dates.len(), 2);
|
assert_eq!(dates.len(), 2);
|
||||||
assert_eq!(dates[0].timestamp(), 5i64);
|
assert_eq!(dates[0].into_unix_timestamp(), 5i64);
|
||||||
assert_eq!(dates[1].timestamp(), 6i64);
|
assert_eq!(dates[1].into_unix_timestamp(), 6i64);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,9 +6,6 @@ pub use self::writer::MultiValuedFastFieldWriter;
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use chrono::Duration;
|
|
||||||
use futures::executor::block_on;
|
|
||||||
use proptest::strategy::Strategy;
|
use proptest::strategy::Strategy;
|
||||||
use proptest::{prop_oneof, proptest};
|
use proptest::{prop_oneof, proptest};
|
||||||
use test_log::test;
|
use test_log::test;
|
||||||
@@ -16,15 +13,17 @@ mod tests {
|
|||||||
use crate::collector::TopDocs;
|
use crate::collector::TopDocs;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::QueryParser;
|
use crate::query::QueryParser;
|
||||||
use crate::schema::{Cardinality, Facet, FacetOptions, IntOptions, Schema};
|
use crate::schema::{Cardinality, Facet, FacetOptions, NumericOptions, Schema};
|
||||||
use crate::{Document, Index, Term};
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
|
use crate::time::{Duration, OffsetDateTime};
|
||||||
|
use crate::{DateTime, Document, Index, Term};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multivalued_u64() -> crate::Result<()> {
|
fn test_multivalued_u64() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let field = schema_builder.add_u64_field(
|
let field = schema_builder.add_u64_field(
|
||||||
"multifield",
|
"multifield",
|
||||||
IntOptions::default().set_fast(Cardinality::MultiValues),
|
NumericOptions::default().set_fast(Cardinality::MultiValues),
|
||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
@@ -59,33 +58,38 @@ mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let date_field = schema_builder.add_date_field(
|
let date_field = schema_builder.add_date_field(
|
||||||
"multi_date_field",
|
"multi_date_field",
|
||||||
IntOptions::default()
|
NumericOptions::default()
|
||||||
.set_fast(Cardinality::MultiValues)
|
.set_fast(Cardinality::MultiValues)
|
||||||
.set_indexed()
|
.set_indexed()
|
||||||
.set_fieldnorm()
|
.set_fieldnorm()
|
||||||
.set_stored(),
|
.set_stored(),
|
||||||
);
|
);
|
||||||
let time_i =
|
let time_i =
|
||||||
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
schema_builder.add_i64_field("time_stamp_i", NumericOptions::default().set_stored());
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let first_time_stamp = chrono::Utc::now();
|
let first_time_stamp = OffsetDateTime::now_utc();
|
||||||
index_writer.add_document(
|
index_writer.add_document(doc!(
|
||||||
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
date_field => DateTime::from_utc(first_time_stamp),
|
||||||
)?;
|
date_field => DateTime::from_utc(first_time_stamp),
|
||||||
index_writer.add_document(doc!(time_i=>0i64))?;
|
time_i=>1i64))?;
|
||||||
|
index_writer.add_document(doc!(time_i => 0i64))?;
|
||||||
// add one second
|
// add one second
|
||||||
index_writer.add_document(
|
index_writer.add_document(doc!(
|
||||||
doc!(date_field=>first_time_stamp + Duration::seconds(1), time_i=>2i64),
|
date_field => DateTime::from_utc(first_time_stamp + Duration::seconds(1)),
|
||||||
)?;
|
time_i => 2i64))?;
|
||||||
// add another second
|
// add another second
|
||||||
let two_secs_ahead = first_time_stamp + Duration::seconds(2);
|
let two_secs_ahead = first_time_stamp + Duration::seconds(2);
|
||||||
index_writer.add_document(doc!(date_field=>two_secs_ahead, date_field=>two_secs_ahead,date_field=>two_secs_ahead, time_i=>3i64))?;
|
index_writer.add_document(doc!(
|
||||||
|
date_field => DateTime::from_utc(two_secs_ahead),
|
||||||
|
date_field => DateTime::from_utc(two_secs_ahead),
|
||||||
|
date_field => DateTime::from_utc(two_secs_ahead),
|
||||||
|
time_i => 3i64))?;
|
||||||
// add three seconds
|
// add three seconds
|
||||||
index_writer.add_document(
|
index_writer.add_document(doc!(
|
||||||
doc!(date_field=>first_time_stamp + Duration::seconds(3), time_i=>4i64),
|
date_field => DateTime::from_utc(first_time_stamp + Duration::seconds(3)),
|
||||||
)?;
|
time_i => 4i64))?;
|
||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
|
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
@@ -94,8 +98,11 @@ mod tests {
|
|||||||
assert_eq!(reader.num_docs(), 5);
|
assert_eq!(reader.num_docs(), 5);
|
||||||
|
|
||||||
{
|
{
|
||||||
let parser = QueryParser::for_index(&index, vec![date_field]);
|
let parser = QueryParser::for_index(&index, vec![]);
|
||||||
let query = parser.parse_query(&format!("\"{}\"", first_time_stamp.to_rfc3339()))?;
|
let query = parser.parse_query(&format!(
|
||||||
|
"multi_date_field:\"{}\"",
|
||||||
|
first_time_stamp.format(&Rfc3339)?,
|
||||||
|
))?;
|
||||||
let results = searcher.search(&query, &TopDocs::with_limit(5))?;
|
let results = searcher.search(&query, &TopDocs::with_limit(5))?;
|
||||||
assert_eq!(results.len(), 1);
|
assert_eq!(results.len(), 1);
|
||||||
for (_score, doc_address) in results {
|
for (_score, doc_address) in results {
|
||||||
@@ -105,9 +112,8 @@ mod tests {
|
|||||||
.get_first(date_field)
|
.get_first(date_field)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.as_date()
|
.as_date()
|
||||||
.unwrap()
|
.unwrap(),
|
||||||
.timestamp(),
|
DateTime::from_utc(first_time_stamp),
|
||||||
first_time_stamp.timestamp()
|
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
retrieved_doc
|
retrieved_doc
|
||||||
@@ -121,7 +127,7 @@ mod tests {
|
|||||||
|
|
||||||
{
|
{
|
||||||
let parser = QueryParser::for_index(&index, vec![date_field]);
|
let parser = QueryParser::for_index(&index, vec![date_field]);
|
||||||
let query = parser.parse_query(&format!("\"{}\"", two_secs_ahead.to_rfc3339()))?;
|
let query = parser.parse_query(&format!("\"{}\"", two_secs_ahead.format(&Rfc3339)?))?;
|
||||||
let results = searcher.search(&query, &TopDocs::with_limit(5))?;
|
let results = searcher.search(&query, &TopDocs::with_limit(5))?;
|
||||||
|
|
||||||
assert_eq!(results.len(), 1);
|
assert_eq!(results.len(), 1);
|
||||||
@@ -133,9 +139,8 @@ mod tests {
|
|||||||
.get_first(date_field)
|
.get_first(date_field)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.as_date()
|
.as_date()
|
||||||
.unwrap()
|
.unwrap(),
|
||||||
.timestamp(),
|
DateTime::from_utc(two_secs_ahead)
|
||||||
two_secs_ahead.timestamp()
|
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
retrieved_doc
|
retrieved_doc
|
||||||
@@ -150,9 +155,9 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let parser = QueryParser::for_index(&index, vec![date_field]);
|
let parser = QueryParser::for_index(&index, vec![date_field]);
|
||||||
let range_q = format!(
|
let range_q = format!(
|
||||||
"[{} TO {}}}",
|
"multi_date_field:[{} TO {}}}",
|
||||||
(first_time_stamp + Duration::seconds(1)).to_rfc3339(),
|
(first_time_stamp + Duration::seconds(1)).format(&Rfc3339)?,
|
||||||
(first_time_stamp + Duration::seconds(3)).to_rfc3339()
|
(first_time_stamp + Duration::seconds(3)).format(&Rfc3339)?
|
||||||
);
|
);
|
||||||
let query = parser.parse_query(&range_q)?;
|
let query = parser.parse_query(&range_q)?;
|
||||||
let results = searcher.search(&query, &TopDocs::with_limit(5))?;
|
let results = searcher.search(&query, &TopDocs::with_limit(5))?;
|
||||||
@@ -175,9 +180,8 @@ mod tests {
|
|||||||
.get_first(date_field)
|
.get_first(date_field)
|
||||||
.expect("cannot find value")
|
.expect("cannot find value")
|
||||||
.as_date()
|
.as_date()
|
||||||
.expect("value not of Date type")
|
.expect("value not of Date type"),
|
||||||
.timestamp(),
|
DateTime::from_utc(first_time_stamp + Duration::seconds(offset_sec)),
|
||||||
(first_time_stamp + Duration::seconds(offset_sec)).timestamp()
|
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
retrieved_doc
|
retrieved_doc
|
||||||
@@ -196,7 +200,7 @@ mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let field = schema_builder.add_i64_field(
|
let field = schema_builder.add_i64_field(
|
||||||
"multifield",
|
"multifield",
|
||||||
IntOptions::default().set_fast(Cardinality::MultiValues),
|
NumericOptions::default().set_fast(Cardinality::MultiValues),
|
||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
@@ -226,7 +230,7 @@ mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let field = schema_builder.add_u64_field(
|
let field = schema_builder.add_u64_field(
|
||||||
"multifield",
|
"multifield",
|
||||||
IntOptions::default()
|
NumericOptions::default()
|
||||||
.set_fast(Cardinality::MultiValues)
|
.set_fast(Cardinality::MultiValues)
|
||||||
.set_indexed(),
|
.set_indexed(),
|
||||||
);
|
);
|
||||||
@@ -265,7 +269,7 @@ mod tests {
|
|||||||
IndexingOp::Merge => {
|
IndexingOp::Merge => {
|
||||||
let segment_ids = index.searchable_segment_ids()?;
|
let segment_ids = index.searchable_segment_ids()?;
|
||||||
if segment_ids.len() >= 2 {
|
if segment_ids.len() >= 2 {
|
||||||
block_on(index_writer.merge(&segment_ids))?;
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
index_writer.segment_updater().wait_merging_thread()?;
|
index_writer.segment_updater().wait_merging_thread()?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -280,7 +284,7 @@ mod tests {
|
|||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
if !segment_ids.is_empty() {
|
if !segment_ids.is_empty() {
|
||||||
block_on(index_writer.merge(&segment_ids)).unwrap();
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
assert!(index_writer.wait_merging_threads().is_ok());
|
assert!(index_writer.wait_merging_threads().is_ok());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,22 +27,28 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `(start, stop)`, such that the values associated
|
/// Returns `[start, end)`, such that the values associated
|
||||||
/// to the given document are `start..stop`.
|
/// to the given document are `start..end`.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn range(&self, doc: DocId) -> Range<u64> {
|
fn range(&self, doc: DocId) -> Range<u64> {
|
||||||
let start = self.idx_reader.get(doc);
|
let start = self.idx_reader.get(doc);
|
||||||
let stop = self.idx_reader.get(doc + 1);
|
let end = self.idx_reader.get(doc + 1);
|
||||||
start..stop
|
start..end
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the array of values associated to the given `doc`.
|
||||||
|
#[inline]
|
||||||
|
fn get_vals_for_range(&self, range: Range<u64>, vals: &mut Vec<Item>) {
|
||||||
|
let len = (range.end - range.start) as usize;
|
||||||
|
vals.resize(len, Item::make_zero());
|
||||||
|
self.vals_reader.get_range(range.start, &mut vals[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the array of values associated to the given `doc`.
|
/// Returns the array of values associated to the given `doc`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||||
let range = self.range(doc);
|
let range = self.range(doc);
|
||||||
let len = (range.end - range.start) as usize;
|
self.get_vals_for_range(range, vals);
|
||||||
vals.resize(len, Item::make_zero());
|
|
||||||
self.vals_reader.get_range(range.start, &mut vals[..]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the minimum value for this fast field.
|
/// Returns the minimum value for this fast field.
|
||||||
@@ -90,7 +96,7 @@ impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::schema::{Cardinality, Facet, FacetOptions, IntOptions, Schema};
|
use crate::schema::{Cardinality, Facet, FacetOptions, NumericOptions, Schema};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multifastfield_reader() -> crate::Result<()> {
|
fn test_multifastfield_reader() -> crate::Result<()> {
|
||||||
@@ -148,7 +154,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_multifastfield_reader_min_max() -> crate::Result<()> {
|
fn test_multifastfield_reader_min_max() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let field_options = IntOptions::default()
|
let field_options = NumericOptions::default()
|
||||||
.set_indexed()
|
.set_indexed()
|
||||||
.set_fast(Cardinality::MultiValues);
|
.set_fast(Cardinality::MultiValues);
|
||||||
let item_field = schema_builder.add_i64_field("items", field_options);
|
let item_field = schema_builder.add_i64_field("items", field_options);
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use fnv::FnvHashMap;
|
|||||||
use tantivy_bitpacker::minmax;
|
use tantivy_bitpacker::minmax;
|
||||||
|
|
||||||
use crate::fastfield::serializer::BitpackedFastFieldSerializerLegacy;
|
use crate::fastfield::serializer::BitpackedFastFieldSerializerLegacy;
|
||||||
use crate::fastfield::{value_to_u64, CompositeFastFieldSerializer};
|
use crate::fastfield::{value_to_u64, CompositeFastFieldSerializer, FastFieldType};
|
||||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
use crate::postings::UnorderedTermId;
|
use crate::postings::UnorderedTermId;
|
||||||
use crate::schema::{Document, Field};
|
use crate::schema::{Document, Field};
|
||||||
@@ -14,7 +14,7 @@ use crate::DocId;
|
|||||||
/// Writer for multi-valued (as in, more than one value per document)
|
/// Writer for multi-valued (as in, more than one value per document)
|
||||||
/// int fast field.
|
/// int fast field.
|
||||||
///
|
///
|
||||||
/// This `Writer` is only useful for advanced user.
|
/// This `Writer` is only useful for advanced users.
|
||||||
/// The normal way to get your multivalued int in your index
|
/// The normal way to get your multivalued int in your index
|
||||||
/// is to
|
/// is to
|
||||||
/// - declare your field with fast set to `Cardinality::MultiValues`
|
/// - declare your field with fast set to `Cardinality::MultiValues`
|
||||||
@@ -23,10 +23,11 @@ use crate::DocId;
|
|||||||
///
|
///
|
||||||
/// The `MultiValuedFastFieldWriter` can be acquired from the
|
/// The `MultiValuedFastFieldWriter` can be acquired from the
|
||||||
/// fastfield writer, by calling
|
/// fastfield writer, by calling
|
||||||
/// [`.get_multivalue_writer(...)`](./struct.FastFieldsWriter.html#method.get_multivalue_writer).
|
/// [`.get_multivalue_writer_mut(...)`](./struct.FastFieldsWriter.html#method.
|
||||||
|
/// get_multivalue_writer_mut).
|
||||||
///
|
///
|
||||||
/// Once acquired, writing is done by calling calls to
|
/// Once acquired, writing is done by calling
|
||||||
/// `.add_document_vals(&[u64])` once per document.
|
/// [`.add_document_vals(&[u64])`](MultiValuedFastFieldWriter::add_document_vals) once per document.
|
||||||
///
|
///
|
||||||
/// The serializer makes it possible to remap all of the values
|
/// The serializer makes it possible to remap all of the values
|
||||||
/// that were pushed to the writer using a mapping.
|
/// that were pushed to the writer using a mapping.
|
||||||
@@ -37,17 +38,17 @@ pub struct MultiValuedFastFieldWriter {
|
|||||||
field: Field,
|
field: Field,
|
||||||
vals: Vec<UnorderedTermId>,
|
vals: Vec<UnorderedTermId>,
|
||||||
doc_index: Vec<u64>,
|
doc_index: Vec<u64>,
|
||||||
is_facet: bool,
|
fast_field_type: FastFieldType,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MultiValuedFastFieldWriter {
|
impl MultiValuedFastFieldWriter {
|
||||||
/// Creates a new `IntFastFieldWriter`
|
/// Creates a new `MultiValuedFastFieldWriter`
|
||||||
pub(crate) fn new(field: Field, is_facet: bool) -> Self {
|
pub(crate) fn new(field: Field, fast_field_type: FastFieldType) -> Self {
|
||||||
MultiValuedFastFieldWriter {
|
MultiValuedFastFieldWriter {
|
||||||
field,
|
field,
|
||||||
vals: Vec::new(),
|
vals: Vec::new(),
|
||||||
doc_index: Vec::new(),
|
doc_index: Vec::new(),
|
||||||
is_facet,
|
fast_field_type,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,12 +77,13 @@ impl MultiValuedFastFieldWriter {
|
|||||||
/// all of the matching field values present in the document.
|
/// all of the matching field values present in the document.
|
||||||
pub fn add_document(&mut self, doc: &Document) {
|
pub fn add_document(&mut self, doc: &Document) {
|
||||||
self.next_doc();
|
self.next_doc();
|
||||||
// facets are indexed in the `SegmentWriter` as we encode their unordered id.
|
// facets/texts are indexed in the `SegmentWriter` as we encode their unordered id.
|
||||||
if !self.is_facet {
|
if self.fast_field_type.is_storing_term_ids() {
|
||||||
for field_value in doc.field_values() {
|
return;
|
||||||
if field_value.field == self.field {
|
}
|
||||||
self.add_val(value_to_u64(field_value.value()));
|
for field_value in doc.field_values() {
|
||||||
}
|
if field_value.field == self.field {
|
||||||
|
self.add_val(value_to_u64(field_value.value()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -157,15 +159,15 @@ impl MultiValuedFastFieldWriter {
|
|||||||
{
|
{
|
||||||
// writing the values themselves.
|
// writing the values themselves.
|
||||||
let mut value_serializer: BitpackedFastFieldSerializerLegacy<'_, _>;
|
let mut value_serializer: BitpackedFastFieldSerializerLegacy<'_, _>;
|
||||||
match mapping_opt {
|
if let Some(mapping) = mapping_opt {
|
||||||
Some(mapping) => {
|
value_serializer = serializer.new_u64_fast_field_with_idx(
|
||||||
value_serializer = serializer.new_u64_fast_field_with_idx(
|
self.field,
|
||||||
self.field,
|
0u64,
|
||||||
0u64,
|
mapping.len() as u64,
|
||||||
mapping.len() as u64,
|
1,
|
||||||
1,
|
)?;
|
||||||
)?;
|
|
||||||
|
|
||||||
|
if self.fast_field_type.is_facet() {
|
||||||
let mut doc_vals: Vec<u64> = Vec::with_capacity(100);
|
let mut doc_vals: Vec<u64> = Vec::with_capacity(100);
|
||||||
for vals in self.get_ordered_values(doc_id_map) {
|
for vals in self.get_ordered_values(doc_id_map) {
|
||||||
doc_vals.clear();
|
doc_vals.clear();
|
||||||
@@ -178,19 +180,27 @@ impl MultiValuedFastFieldWriter {
|
|||||||
value_serializer.add_val(val)?;
|
value_serializer.add_val(val)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
None => {
|
|
||||||
let val_min_max = minmax(self.vals.iter().cloned());
|
|
||||||
let (val_min, val_max) = val_min_max.unwrap_or((0u64, 0u64));
|
|
||||||
value_serializer =
|
|
||||||
serializer.new_u64_fast_field_with_idx(self.field, val_min, val_max, 1)?;
|
|
||||||
for vals in self.get_ordered_values(doc_id_map) {
|
for vals in self.get_ordered_values(doc_id_map) {
|
||||||
// sort values in case of remapped doc_ids?
|
let remapped_vals = vals
|
||||||
for &val in vals {
|
.iter()
|
||||||
|
.map(|val| *mapping.get(val).expect("Missing term ordinal"));
|
||||||
|
for val in remapped_vals {
|
||||||
value_serializer.add_val(val)?;
|
value_serializer.add_val(val)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
let val_min_max = minmax(self.vals.iter().cloned());
|
||||||
|
let (val_min, val_max) = val_min_max.unwrap_or((0u64, 0u64));
|
||||||
|
value_serializer =
|
||||||
|
serializer.new_u64_fast_field_with_idx(self.field, val_min, val_max, 1)?;
|
||||||
|
for vals in self.get_ordered_values(doc_id_map) {
|
||||||
|
// sort values in case of remapped doc_ids?
|
||||||
|
for &val in vals {
|
||||||
|
value_serializer.add_val(val)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
value_serializer.close_field()?;
|
value_serializer.close_field()?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use crate::fastfield::{
|
|||||||
};
|
};
|
||||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||||
use crate::space_usage::PerFieldSpaceUsage;
|
use crate::space_usage::PerFieldSpaceUsage;
|
||||||
use crate::TantivyError;
|
use crate::{DateTime, TantivyError};
|
||||||
|
|
||||||
/// Provides access to all of the BitpackedFastFieldReader.
|
/// Provides access to all of the BitpackedFastFieldReader.
|
||||||
///
|
///
|
||||||
@@ -17,14 +17,14 @@ pub struct FastFieldReaders {
|
|||||||
fast_fields_composite: CompositeFile,
|
fast_fields_composite: CompositeFile,
|
||||||
}
|
}
|
||||||
#[derive(Eq, PartialEq, Debug)]
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
enum FastType {
|
pub(crate) enum FastType {
|
||||||
I64,
|
I64,
|
||||||
U64,
|
U64,
|
||||||
F64,
|
F64,
|
||||||
Date,
|
Date,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
|
pub(crate) fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
|
||||||
match field_type {
|
match field_type {
|
||||||
FieldType::U64(options) => options
|
FieldType::U64(options) => options
|
||||||
.get_fastfield_cardinality()
|
.get_fastfield_cardinality()
|
||||||
@@ -39,6 +39,9 @@ fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality
|
|||||||
.get_fastfield_cardinality()
|
.get_fastfield_cardinality()
|
||||||
.map(|cardinality| (FastType::Date, cardinality)),
|
.map(|cardinality| (FastType::Date, cardinality)),
|
||||||
FieldType::Facet(_) => Some((FastType::U64, Cardinality::MultiValues)),
|
FieldType::Facet(_) => Some((FastType::U64, Cardinality::MultiValues)),
|
||||||
|
FieldType::Str(options) if options.is_fast() => {
|
||||||
|
Some((FastType::U64, Cardinality::MultiValues))
|
||||||
|
}
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -55,7 +58,8 @@ impl FastFieldReaders {
|
|||||||
self.fast_fields_composite.space_usage()
|
self.fast_fields_composite.space_usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fast_field_data(&self, field: Field, idx: usize) -> crate::Result<FileSlice> {
|
#[doc(hidden)]
|
||||||
|
pub fn fast_field_data(&self, field: Field, idx: usize) -> crate::Result<FileSlice> {
|
||||||
self.fast_fields_composite
|
self.fast_fields_composite
|
||||||
.open_read_with_idx(field, idx)
|
.open_read_with_idx(field, idx)
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
@@ -146,10 +150,10 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_reader(field)
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `i64` fast field reader reader associated to `field`.
|
/// Returns the `date` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a i64 fast field, this method returns an Error.
|
/// If `field` is not a date fast field, this method returns an Error.
|
||||||
pub fn date(&self, field: Field) -> crate::Result<DynamicFastFieldReader<crate::DateTime>> {
|
pub fn date(&self, field: Field) -> crate::Result<DynamicFastFieldReader<DateTime>> {
|
||||||
self.check_type(field, FastType::Date, Cardinality::SingleValue)?;
|
self.check_type(field, FastType::Date, Cardinality::SingleValue)?;
|
||||||
self.typed_fast_field_reader(field)
|
self.typed_fast_field_reader(field)
|
||||||
}
|
}
|
||||||
@@ -194,13 +198,12 @@ impl FastFieldReaders {
|
|||||||
self.typed_fast_field_multi_reader(field)
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `crate::DateTime` multi-valued fast field reader reader associated to `field`.
|
/// Returns a `time::OffsetDateTime` multi-valued fast field reader reader associated to
|
||||||
|
/// `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a `crate::DateTime` multi-valued fast field, this method returns an Error.
|
/// If `field` is not a `time::OffsetDateTime` multi-valued fast field, this method returns an
|
||||||
pub fn dates(
|
/// Error.
|
||||||
&self,
|
pub fn dates(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<DateTime>> {
|
||||||
field: Field,
|
|
||||||
) -> crate::Result<MultiValuedFastFieldReader<crate::DateTime>> {
|
|
||||||
self.check_type(field, FastType::Date, Cardinality::MultiValues)?;
|
self.check_type(field, FastType::Date, Cardinality::MultiValues)?;
|
||||||
self.typed_fast_field_multi_reader(field)
|
self.typed_fast_field_multi_reader(field)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -197,7 +197,7 @@ impl CompositeFastFieldSerializer {
|
|||||||
|
|
||||||
/// Closes the serializer
|
/// Closes the serializer
|
||||||
///
|
///
|
||||||
/// After this call the data must be persistently save on disk.
|
/// After this call the data must be persistently saved on disk.
|
||||||
pub fn close(self) -> io::Result<()> {
|
pub fn close(self) -> io::Result<()> {
|
||||||
self.composite_write.close()
|
self.composite_write.close()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,15 +7,16 @@ use tantivy_bitpacker::BlockedBitpacker;
|
|||||||
|
|
||||||
use super::multivalued::MultiValuedFastFieldWriter;
|
use super::multivalued::MultiValuedFastFieldWriter;
|
||||||
use super::serializer::FastFieldStats;
|
use super::serializer::FastFieldStats;
|
||||||
use super::FastFieldDataAccess;
|
use super::{FastFieldDataAccess, FastFieldType};
|
||||||
use crate::fastfield::{BytesFastFieldWriter, CompositeFastFieldSerializer};
|
use crate::fastfield::{BytesFastFieldWriter, CompositeFastFieldSerializer};
|
||||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
use crate::postings::UnorderedTermId;
|
use crate::postings::UnorderedTermId;
|
||||||
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema};
|
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema};
|
||||||
use crate::termdict::TermOrdinal;
|
use crate::termdict::TermOrdinal;
|
||||||
|
|
||||||
/// The fastfieldswriter regroup all of the fast field writers.
|
/// The `FastFieldsWriter` groups all of the fast field writers.
|
||||||
pub struct FastFieldsWriter {
|
pub struct FastFieldsWriter {
|
||||||
|
term_id_writers: Vec<MultiValuedFastFieldWriter>,
|
||||||
single_value_writers: Vec<IntFastFieldWriter>,
|
single_value_writers: Vec<IntFastFieldWriter>,
|
||||||
multi_values_writers: Vec<MultiValuedFastFieldWriter>,
|
multi_values_writers: Vec<MultiValuedFastFieldWriter>,
|
||||||
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
||||||
@@ -33,6 +34,7 @@ impl FastFieldsWriter {
|
|||||||
/// Create all `FastFieldWriter` required by the schema.
|
/// Create all `FastFieldWriter` required by the schema.
|
||||||
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
||||||
let mut single_value_writers = Vec::new();
|
let mut single_value_writers = Vec::new();
|
||||||
|
let mut term_id_writers = Vec::new();
|
||||||
let mut multi_values_writers = Vec::new();
|
let mut multi_values_writers = Vec::new();
|
||||||
let mut bytes_value_writers = Vec::new();
|
let mut bytes_value_writers = Vec::new();
|
||||||
|
|
||||||
@@ -50,15 +52,22 @@ impl FastFieldsWriter {
|
|||||||
single_value_writers.push(fast_field_writer);
|
single_value_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
Some(Cardinality::MultiValues) => {
|
Some(Cardinality::MultiValues) => {
|
||||||
let fast_field_writer = MultiValuedFastFieldWriter::new(field, false);
|
let fast_field_writer =
|
||||||
|
MultiValuedFastFieldWriter::new(field, FastFieldType::Numeric);
|
||||||
multi_values_writers.push(fast_field_writer);
|
multi_values_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
None => {}
|
None => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Facet(_) => {
|
FieldType::Facet(_) => {
|
||||||
let fast_field_writer = MultiValuedFastFieldWriter::new(field, true);
|
let fast_field_writer =
|
||||||
multi_values_writers.push(fast_field_writer);
|
MultiValuedFastFieldWriter::new(field, FastFieldType::Facet);
|
||||||
|
term_id_writers.push(fast_field_writer);
|
||||||
|
}
|
||||||
|
FieldType::Str(_) if field_entry.is_fast() => {
|
||||||
|
let fast_field_writer =
|
||||||
|
MultiValuedFastFieldWriter::new(field, FastFieldType::String);
|
||||||
|
term_id_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
FieldType::Bytes(bytes_option) => {
|
FieldType::Bytes(bytes_option) => {
|
||||||
if bytes_option.is_fast() {
|
if bytes_option.is_fast() {
|
||||||
@@ -70,6 +79,7 @@ impl FastFieldsWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
FastFieldsWriter {
|
FastFieldsWriter {
|
||||||
|
term_id_writers,
|
||||||
single_value_writers,
|
single_value_writers,
|
||||||
multi_values_writers,
|
multi_values_writers,
|
||||||
bytes_value_writers,
|
bytes_value_writers,
|
||||||
@@ -78,10 +88,15 @@ impl FastFieldsWriter {
|
|||||||
|
|
||||||
/// The memory used (inclusive childs)
|
/// The memory used (inclusive childs)
|
||||||
pub fn mem_usage(&self) -> usize {
|
pub fn mem_usage(&self) -> usize {
|
||||||
self.single_value_writers
|
self.term_id_writers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|w| w.mem_usage())
|
.map(|w| w.mem_usage())
|
||||||
.sum::<usize>()
|
.sum::<usize>()
|
||||||
|
+ self
|
||||||
|
.single_value_writers
|
||||||
|
.iter()
|
||||||
|
.map(|w| w.mem_usage())
|
||||||
|
.sum::<usize>()
|
||||||
+ self
|
+ self
|
||||||
.multi_values_writers
|
.multi_values_writers
|
||||||
.iter()
|
.iter()
|
||||||
@@ -94,6 +109,14 @@ impl FastFieldsWriter {
|
|||||||
.sum::<usize>()
|
.sum::<usize>()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the `FastFieldWriter` associated to a field.
|
||||||
|
pub fn get_term_id_writer(&self, field: Field) -> Option<&MultiValuedFastFieldWriter> {
|
||||||
|
// TODO optimize
|
||||||
|
self.term_id_writers
|
||||||
|
.iter()
|
||||||
|
.find(|field_writer| field_writer.field() == field)
|
||||||
|
}
|
||||||
|
|
||||||
/// Get the `FastFieldWriter` associated to a field.
|
/// Get the `FastFieldWriter` associated to a field.
|
||||||
pub fn get_field_writer(&self, field: Field) -> Option<&IntFastFieldWriter> {
|
pub fn get_field_writer(&self, field: Field) -> Option<&IntFastFieldWriter> {
|
||||||
// TODO optimize
|
// TODO optimize
|
||||||
@@ -110,6 +133,17 @@ impl FastFieldsWriter {
|
|||||||
.find(|field_writer| field_writer.field() == field)
|
.find(|field_writer| field_writer.field() == field)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the `FastFieldWriter` associated to a field.
|
||||||
|
pub fn get_term_id_writer_mut(
|
||||||
|
&mut self,
|
||||||
|
field: Field,
|
||||||
|
) -> Option<&mut MultiValuedFastFieldWriter> {
|
||||||
|
// TODO optimize
|
||||||
|
self.term_id_writers
|
||||||
|
.iter_mut()
|
||||||
|
.find(|field_writer| field_writer.field() == field)
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the fast field multi-value writer for the given field.
|
/// Returns the fast field multi-value writer for the given field.
|
||||||
///
|
///
|
||||||
/// Returns None if the field does not exist, or is not
|
/// Returns None if the field does not exist, or is not
|
||||||
@@ -137,6 +171,9 @@ impl FastFieldsWriter {
|
|||||||
|
|
||||||
/// Indexes all of the fastfields of a new document.
|
/// Indexes all of the fastfields of a new document.
|
||||||
pub fn add_document(&mut self, doc: &Document) {
|
pub fn add_document(&mut self, doc: &Document) {
|
||||||
|
for field_writer in &mut self.term_id_writers {
|
||||||
|
field_writer.add_document(doc);
|
||||||
|
}
|
||||||
for field_writer in &mut self.single_value_writers {
|
for field_writer in &mut self.single_value_writers {
|
||||||
field_writer.add_document(doc);
|
field_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
@@ -156,6 +193,10 @@ impl FastFieldsWriter {
|
|||||||
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>,
|
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>,
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
doc_id_map: Option<&DocIdMapping>,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
|
for field_writer in &self.term_id_writers {
|
||||||
|
let field = field_writer.field();
|
||||||
|
field_writer.serialize(serializer, mapping.get(&field), doc_id_map)?;
|
||||||
|
}
|
||||||
for field_writer in &self.single_value_writers {
|
for field_writer in &self.single_value_writers {
|
||||||
field_writer.serialize(serializer, doc_id_map)?;
|
field_writer.serialize(serializer, doc_id_map)?;
|
||||||
}
|
}
|
||||||
@@ -244,6 +285,10 @@ impl IntFastFieldWriter {
|
|||||||
self.val_count += 1;
|
self.val_count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Extract the fast field value from the document
|
||||||
|
/// (or use the default value) and records it.
|
||||||
|
///
|
||||||
|
///
|
||||||
/// Extract the value associated to the fast field for
|
/// Extract the value associated to the fast field for
|
||||||
/// this document.
|
/// this document.
|
||||||
///
|
///
|
||||||
@@ -254,18 +299,17 @@ impl IntFastFieldWriter {
|
|||||||
/// instead.
|
/// instead.
|
||||||
/// If the document has more than one value for the given field,
|
/// If the document has more than one value for the given field,
|
||||||
/// only the first one is taken in account.
|
/// only the first one is taken in account.
|
||||||
fn extract_val(&self, doc: &Document) -> u64 {
|
///
|
||||||
match doc.get_first(self.field) {
|
/// Values on text fast fields are skipped.
|
||||||
Some(v) => super::value_to_u64(v),
|
|
||||||
None => self.val_if_missing,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract the fast field value from the document
|
|
||||||
/// (or use the default value) and records it.
|
|
||||||
pub fn add_document(&mut self, doc: &Document) {
|
pub fn add_document(&mut self, doc: &Document) {
|
||||||
let val = self.extract_val(doc);
|
match doc.get_first(self.field) {
|
||||||
self.add_val(val);
|
Some(v) => {
|
||||||
|
self.add_val(super::value_to_u64(v));
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
self.add_val(self.val_if_missing);
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// get iterator over the data
|
/// get iterator over the data
|
||||||
@@ -284,6 +328,7 @@ impl IntFastFieldWriter {
|
|||||||
} else {
|
} else {
|
||||||
(self.val_min, self.val_max)
|
(self.val_min, self.val_max)
|
||||||
};
|
};
|
||||||
|
|
||||||
let fastfield_accessor = WriterFastFieldAccessProvider {
|
let fastfield_accessor = WriterFastFieldAccessProvider {
|
||||||
doc_id_map,
|
doc_id_map,
|
||||||
vals: &self.vals,
|
vals: &self.vals,
|
||||||
|
|||||||
130
src/future_result.rs
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
use std::future::Future;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::Poll;
|
||||||
|
|
||||||
|
use crate::TantivyError;
|
||||||
|
|
||||||
|
/// `FutureResult` is a handle that makes it possible to wait for the completion
|
||||||
|
/// of an ongoing task.
|
||||||
|
///
|
||||||
|
/// Contrary to some `Future`, it does not need to be polled for the task to
|
||||||
|
/// progress. Dropping the `FutureResult` does not cancel the task being executed
|
||||||
|
/// either.
|
||||||
|
///
|
||||||
|
/// - In a sync context, you can call `FutureResult::wait()`. The function
|
||||||
|
/// does not rely on `block_on`.
|
||||||
|
/// - In an async context, you can call simply use `FutureResult` as a future.
|
||||||
|
pub struct FutureResult<T> {
|
||||||
|
inner: Inner<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
enum Inner<T> {
|
||||||
|
FailedBeforeStart(Option<TantivyError>),
|
||||||
|
InProgress {
|
||||||
|
receiver: oneshot::Receiver<crate::Result<T>>,
|
||||||
|
error_msg_if_failure: &'static str,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> From<TantivyError> for FutureResult<T> {
|
||||||
|
fn from(err: TantivyError) -> Self {
|
||||||
|
FutureResult {
|
||||||
|
inner: Inner::FailedBeforeStart(Some(err)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> FutureResult<T> {
|
||||||
|
pub(crate) fn create(
|
||||||
|
error_msg_if_failure: &'static str,
|
||||||
|
) -> (Self, oneshot::Sender<crate::Result<T>>) {
|
||||||
|
let (sender, receiver) = oneshot::channel();
|
||||||
|
let inner: Inner<T> = Inner::InProgress {
|
||||||
|
receiver,
|
||||||
|
error_msg_if_failure,
|
||||||
|
};
|
||||||
|
(FutureResult { inner }, sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blocks until the scheduled result is available.
|
||||||
|
///
|
||||||
|
/// In an async context, you should simply use `ScheduledResult` as a future.
|
||||||
|
pub fn wait(self) -> crate::Result<T> {
|
||||||
|
match self.inner {
|
||||||
|
Inner::FailedBeforeStart(err) => Err(err.unwrap()),
|
||||||
|
Inner::InProgress {
|
||||||
|
receiver,
|
||||||
|
error_msg_if_failure,
|
||||||
|
} => receiver.recv().unwrap_or_else(|_| {
|
||||||
|
Err(crate::TantivyError::SystemError(
|
||||||
|
error_msg_if_failure.to_string(),
|
||||||
|
))
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Future for FutureResult<T> {
|
||||||
|
type Output = crate::Result<T>;
|
||||||
|
|
||||||
|
fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
|
||||||
|
unsafe {
|
||||||
|
match &mut Pin::get_unchecked_mut(self).inner {
|
||||||
|
Inner::FailedBeforeStart(err) => Poll::Ready(Err(err.take().unwrap())),
|
||||||
|
Inner::InProgress {
|
||||||
|
receiver,
|
||||||
|
error_msg_if_failure,
|
||||||
|
} => match Future::poll(Pin::new_unchecked(receiver), cx) {
|
||||||
|
Poll::Ready(oneshot_res) => {
|
||||||
|
let res = oneshot_res.unwrap_or_else(|_| {
|
||||||
|
Err(crate::TantivyError::SystemError(
|
||||||
|
error_msg_if_failure.to_string(),
|
||||||
|
))
|
||||||
|
});
|
||||||
|
Poll::Ready(res)
|
||||||
|
}
|
||||||
|
Poll::Pending => Poll::Pending,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use futures::executor::block_on;
|
||||||
|
|
||||||
|
use super::FutureResult;
|
||||||
|
use crate::TantivyError;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_scheduled_result_failed_to_schedule() {
|
||||||
|
let scheduled_result: FutureResult<()> = FutureResult::from(TantivyError::Poisoned);
|
||||||
|
let res = block_on(scheduled_result);
|
||||||
|
assert!(matches!(res, Err(TantivyError::Poisoned)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
|
||||||
|
fn test_scheduled_result_error() {
|
||||||
|
let (scheduled_result, tx): (FutureResult<()>, _) = FutureResult::create("failed");
|
||||||
|
drop(tx);
|
||||||
|
let res = block_on(scheduled_result);
|
||||||
|
assert!(matches!(res, Err(TantivyError::SystemError(_))));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_scheduled_result_sent_success() {
|
||||||
|
let (scheduled_result, tx): (FutureResult<u64>, _) = FutureResult::create("failed");
|
||||||
|
tx.send(Ok(2u64)).unwrap();
|
||||||
|
assert_eq!(block_on(scheduled_result).unwrap(), 2u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_scheduled_result_sent_error() {
|
||||||
|
let (scheduled_result, tx): (FutureResult<u64>, _) = FutureResult::create("failed");
|
||||||
|
tx.send(Err(TantivyError::Poisoned)).unwrap();
|
||||||
|
let res = block_on(scheduled_result);
|
||||||
|
assert!(matches!(res, Err(TantivyError::Poisoned)));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -221,7 +221,7 @@ impl DeleteCursor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Advance to the next delete operation.
|
/// Advance to the next delete operation.
|
||||||
/// Returns true iff there is such an operation.
|
/// Returns true if and only if there is such an operation.
|
||||||
pub fn advance(&mut self) -> bool {
|
pub fn advance(&mut self) -> bool {
|
||||||
if self.load_block_if_required() {
|
if self.load_block_if_required() {
|
||||||
self.pos += 1;
|
self.pos += 1;
|
||||||
|
|||||||
@@ -116,14 +116,14 @@ pub fn demux(
|
|||||||
) -> crate::Result<Vec<Index>> {
|
) -> crate::Result<Vec<Index>> {
|
||||||
let mut indices = vec![];
|
let mut indices = vec![];
|
||||||
for (target_segment_ord, output_directory) in output_directories.into_iter().enumerate() {
|
for (target_segment_ord, output_directory) in output_directories.into_iter().enumerate() {
|
||||||
let delete_bitsets = get_alive_bitsets(demux_mapping, target_segment_ord as u32)
|
let alive_bitset = get_alive_bitsets(demux_mapping, target_segment_ord as u32)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(Some)
|
.map(Some)
|
||||||
.collect_vec();
|
.collect_vec();
|
||||||
let index = merge_filtered_segments(
|
let index = merge_filtered_segments(
|
||||||
segments,
|
segments,
|
||||||
target_settings.clone(),
|
target_settings.clone(),
|
||||||
delete_bitsets,
|
alive_bitset,
|
||||||
output_directory,
|
output_directory,
|
||||||
)?;
|
)?;
|
||||||
indices.push(index);
|
indices.push(index);
|
||||||
@@ -141,7 +141,7 @@ mod tests {
|
|||||||
use crate::{DocAddress, Term};
|
use crate::{DocAddress, Term};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_demux_map_to_deletebitset() {
|
fn test_demux_map_to_alive_bitset() {
|
||||||
let max_value = 2;
|
let max_value = 2;
|
||||||
let mut demux_mapping = DemuxMapping::default();
|
let mut demux_mapping = DemuxMapping::default();
|
||||||
// segment ordinal 0 mapping
|
// segment ordinal 0 mapping
|
||||||
|
|||||||
@@ -168,12 +168,12 @@ mod tests_indexsorting {
|
|||||||
let my_string_field = schema_builder.add_text_field("string_field", STRING | STORED);
|
let my_string_field = schema_builder.add_text_field("string_field", STRING | STORED);
|
||||||
let my_number = schema_builder.add_u64_field(
|
let my_number = schema_builder.add_u64_field(
|
||||||
"my_number",
|
"my_number",
|
||||||
IntOptions::default().set_fast(Cardinality::SingleValue),
|
NumericOptions::default().set_fast(Cardinality::SingleValue),
|
||||||
);
|
);
|
||||||
|
|
||||||
let multi_numbers = schema_builder.add_u64_field(
|
let multi_numbers = schema_builder.add_u64_field(
|
||||||
"multi_numbers",
|
"multi_numbers",
|
||||||
IntOptions::default().set_fast(Cardinality::MultiValues),
|
NumericOptions::default().set_fast(Cardinality::MultiValues),
|
||||||
);
|
);
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|||||||
@@ -4,9 +4,6 @@ use std::thread;
|
|||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
|
|
||||||
use common::BitSet;
|
use common::BitSet;
|
||||||
use crossbeam::channel;
|
|
||||||
use futures::executor::block_on;
|
|
||||||
use futures::future::Future;
|
|
||||||
use smallvec::smallvec;
|
use smallvec::smallvec;
|
||||||
|
|
||||||
use super::operation::{AddOperation, UserOperation};
|
use super::operation::{AddOperation, UserOperation};
|
||||||
@@ -24,7 +21,7 @@ use crate::indexer::operation::DeleteOperation;
|
|||||||
use crate::indexer::stamper::Stamper;
|
use crate::indexer::stamper::Stamper;
|
||||||
use crate::indexer::{MergePolicy, SegmentEntry, SegmentWriter};
|
use crate::indexer::{MergePolicy, SegmentEntry, SegmentWriter};
|
||||||
use crate::schema::{Document, IndexRecordOption, Term};
|
use crate::schema::{Document, IndexRecordOption, Term};
|
||||||
use crate::Opstamp;
|
use crate::{FutureResult, Opstamp};
|
||||||
|
|
||||||
// Size of the margin for the `memory_arena`. A segment is closed when the remaining memory
|
// Size of the margin for the `memory_arena`. A segment is closed when the remaining memory
|
||||||
// in the `memory_arena` goes below MARGIN_IN_BYTES.
|
// in the `memory_arena` goes below MARGIN_IN_BYTES.
|
||||||
@@ -214,7 +211,7 @@ fn index_documents(
|
|||||||
meta.untrack_temp_docstore();
|
meta.untrack_temp_docstore();
|
||||||
// update segment_updater inventory to remove tempstore
|
// update segment_updater inventory to remove tempstore
|
||||||
let segment_entry = SegmentEntry::new(meta, delete_cursor, alive_bitset_opt);
|
let segment_entry = SegmentEntry::new(meta, delete_cursor, alive_bitset_opt);
|
||||||
block_on(segment_updater.schedule_add_segment(segment_entry))?;
|
segment_updater.schedule_add_segment(segment_entry).wait()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -291,7 +288,7 @@ impl IndexWriter {
|
|||||||
return Err(TantivyError::InvalidArgument(err_msg));
|
return Err(TantivyError::InvalidArgument(err_msg));
|
||||||
}
|
}
|
||||||
let (document_sender, document_receiver): (AddBatchSender, AddBatchReceiver) =
|
let (document_sender, document_receiver): (AddBatchSender, AddBatchReceiver) =
|
||||||
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
crossbeam_channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||||
|
|
||||||
let delete_queue = DeleteQueue::new();
|
let delete_queue = DeleteQueue::new();
|
||||||
|
|
||||||
@@ -328,7 +325,7 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn drop_sender(&mut self) {
|
fn drop_sender(&mut self) {
|
||||||
let (sender, _receiver) = channel::bounded(1);
|
let (sender, _receiver) = crossbeam_channel::bounded(1);
|
||||||
self.operation_sender = sender;
|
self.operation_sender = sender;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -368,7 +365,9 @@ impl IndexWriter {
|
|||||||
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
|
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
|
||||||
let delete_cursor = self.delete_queue.cursor();
|
let delete_cursor = self.delete_queue.cursor();
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
|
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
|
||||||
block_on(self.segment_updater.schedule_add_segment(segment_entry))
|
self.segment_updater
|
||||||
|
.schedule_add_segment(segment_entry)
|
||||||
|
.wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new segment.
|
/// Creates a new segment.
|
||||||
@@ -465,8 +464,8 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Detects and removes the files that are not used by the index anymore.
|
/// Detects and removes the files that are not used by the index anymore.
|
||||||
pub async fn garbage_collect_files(&self) -> crate::Result<GarbageCollectionResult> {
|
pub fn garbage_collect_files(&self) -> FutureResult<GarbageCollectionResult> {
|
||||||
self.segment_updater.schedule_garbage_collect().await
|
self.segment_updater.schedule_garbage_collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deletes all documents from the index
|
/// Deletes all documents from the index
|
||||||
@@ -516,13 +515,10 @@ impl IndexWriter {
|
|||||||
/// Merges a given list of segments
|
/// Merges a given list of segments
|
||||||
///
|
///
|
||||||
/// `segment_ids` is required to be non-empty.
|
/// `segment_ids` is required to be non-empty.
|
||||||
pub fn merge(
|
pub fn merge(&mut self, segment_ids: &[SegmentId]) -> FutureResult<SegmentMeta> {
|
||||||
&mut self,
|
|
||||||
segment_ids: &[SegmentId],
|
|
||||||
) -> impl Future<Output = crate::Result<SegmentMeta>> {
|
|
||||||
let merge_operation = self.segment_updater.make_merge_operation(segment_ids);
|
let merge_operation = self.segment_updater.make_merge_operation(segment_ids);
|
||||||
let segment_updater = self.segment_updater.clone();
|
let segment_updater = self.segment_updater.clone();
|
||||||
async move { segment_updater.start_merge(merge_operation)?.await }
|
segment_updater.start_merge(merge_operation)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Closes the current document channel send.
|
/// Closes the current document channel send.
|
||||||
@@ -535,7 +531,7 @@ impl IndexWriter {
|
|||||||
/// Returns the former segment_ready channel.
|
/// Returns the former segment_ready channel.
|
||||||
fn recreate_document_channel(&mut self) {
|
fn recreate_document_channel(&mut self) {
|
||||||
let (document_sender, document_receiver): (AddBatchSender, AddBatchReceiver) =
|
let (document_sender, document_receiver): (AddBatchSender, AddBatchReceiver) =
|
||||||
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
crossbeam_channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||||
self.operation_sender = document_sender;
|
self.operation_sender = document_sender;
|
||||||
self.index_writer_status = IndexWriterStatus::from(document_receiver);
|
self.index_writer_status = IndexWriterStatus::from(document_receiver);
|
||||||
}
|
}
|
||||||
@@ -781,7 +777,6 @@ impl Drop for IndexWriter {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
use futures::executor::block_on;
|
|
||||||
use proptest::prelude::*;
|
use proptest::prelude::*;
|
||||||
use proptest::prop_oneof;
|
use proptest::prop_oneof;
|
||||||
use proptest::strategy::Strategy;
|
use proptest::strategy::Strategy;
|
||||||
@@ -794,8 +789,8 @@ mod tests {
|
|||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::{QueryParser, TermQuery};
|
use crate::query::{QueryParser, TermQuery};
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
self, Cardinality, Facet, FacetOptions, IndexRecordOption, IntOptions, TextFieldIndexing,
|
self, Cardinality, Facet, FacetOptions, IndexRecordOption, NumericOptions,
|
||||||
TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
|
TextFieldIndexing, TextOptions, FAST, INDEXED, STORED, STRING, TEXT,
|
||||||
};
|
};
|
||||||
use crate::{DocAddress, Index, IndexSettings, IndexSortByField, Order, ReloadPolicy, Term};
|
use crate::{DocAddress, Index, IndexSettings, IndexSortByField, Order, ReloadPolicy, Term};
|
||||||
|
|
||||||
@@ -1404,7 +1399,7 @@ mod tests {
|
|||||||
|
|
||||||
let multi_numbers = schema_builder.add_u64_field(
|
let multi_numbers = schema_builder.add_u64_field(
|
||||||
"multi_numbers",
|
"multi_numbers",
|
||||||
IntOptions::default()
|
NumericOptions::default()
|
||||||
.set_fast(Cardinality::MultiValues)
|
.set_fast(Cardinality::MultiValues)
|
||||||
.set_stored(),
|
.set_stored(),
|
||||||
);
|
);
|
||||||
@@ -1456,7 +1451,7 @@ mod tests {
|
|||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
if segment_ids.len() >= 2 {
|
if segment_ids.len() >= 2 {
|
||||||
block_on(index_writer.merge(&segment_ids)).unwrap();
|
index_writer.merge(&segment_ids).wait().unwrap();
|
||||||
assert!(index_writer.segment_updater().wait_merging_thread().is_ok());
|
assert!(index_writer.segment_updater().wait_merging_thread().is_ok());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1472,7 +1467,7 @@ mod tests {
|
|||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
if segment_ids.len() >= 2 {
|
if segment_ids.len() >= 2 {
|
||||||
block_on(index_writer.merge(&segment_ids)).unwrap();
|
index_writer.merge(&segment_ids).wait().unwrap();
|
||||||
assert!(index_writer.wait_merging_threads().is_ok());
|
assert!(index_writer.wait_merging_threads().is_ok());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -92,7 +92,7 @@ impl Drop for IndexWriterBomb {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
|
||||||
use crossbeam::channel;
|
use crossbeam_channel as channel;
|
||||||
|
|
||||||
use super::IndexWriterStatus;
|
use super::IndexWriterStatus;
|
||||||
|
|
||||||
|
|||||||
482
src/indexer/json_term_writer.rs
Normal file
@@ -0,0 +1,482 @@
|
|||||||
|
use fnv::FnvHashMap;
|
||||||
|
use murmurhash32::murmurhash2;
|
||||||
|
|
||||||
|
use crate::fastfield::FastValue;
|
||||||
|
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter};
|
||||||
|
use crate::schema::term::{JSON_END_OF_PATH, JSON_PATH_SEGMENT_SEP};
|
||||||
|
use crate::schema::{Field, Type};
|
||||||
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
|
use crate::time::{OffsetDateTime, UtcOffset};
|
||||||
|
use crate::tokenizer::TextAnalyzer;
|
||||||
|
use crate::{DateTime, DocId, Term};
|
||||||
|
|
||||||
|
/// This object is a map storing the last position for a given path for the current document
|
||||||
|
/// being indexed.
|
||||||
|
///
|
||||||
|
/// It is key to solve the following problem:
|
||||||
|
/// If we index a JsonObject emitting several terms with the same path
|
||||||
|
/// we do not want to create false positive in phrase queries.
|
||||||
|
///
|
||||||
|
/// For instance:
|
||||||
|
///
|
||||||
|
/// ```json
|
||||||
|
/// {"bands": [
|
||||||
|
/// {"band_name": "Elliot Smith"},
|
||||||
|
/// {"band_name": "The Who"},
|
||||||
|
/// ]}
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// If we are careless and index each band names independently,
|
||||||
|
/// `Elliot` and `The` will end up indexed at position 0, and `Smith` and `Who` will be indexed at
|
||||||
|
/// position 1.
|
||||||
|
/// As a result, with lemmatization, "The Smiths" will match our object.
|
||||||
|
///
|
||||||
|
/// Worse, if a same term is appears in the second object, a non increasing value would be pushed
|
||||||
|
/// to the position recorder probably provoking a panic.
|
||||||
|
///
|
||||||
|
/// This problem is solved for regular multivalued object by offsetting the position
|
||||||
|
/// of values, with a position gap. Here we would like `The` and `Who` to get indexed at
|
||||||
|
/// position 2 and 3 respectively.
|
||||||
|
///
|
||||||
|
/// With regular fields, we sort the fields beforehands, so that all terms with the same
|
||||||
|
/// path are indexed consecutively.
|
||||||
|
///
|
||||||
|
/// In JSON object, we do not have this confort, so we need to record these position offsets in
|
||||||
|
/// a map.
|
||||||
|
///
|
||||||
|
/// Note that using a single position for the entire object would not hurt correctness.
|
||||||
|
/// It would however hurt compression.
|
||||||
|
///
|
||||||
|
/// We can therefore afford working with a map that is not imperfect. It is fine if several
|
||||||
|
/// path map to the same index position as long as the probability is relatively low.
|
||||||
|
#[derive(Default)]
|
||||||
|
struct IndexingPositionsPerPath {
|
||||||
|
positions_per_path: FnvHashMap<u32, IndexingPosition>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexingPositionsPerPath {
|
||||||
|
fn get_position(&mut self, term: &Term) -> &mut IndexingPosition {
|
||||||
|
self.positions_per_path
|
||||||
|
.entry(murmurhash2(term.as_slice()))
|
||||||
|
.or_insert_with(Default::default)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn index_json_values<'a>(
|
||||||
|
doc: DocId,
|
||||||
|
json_values: impl Iterator<Item = crate::Result<&'a serde_json::Map<String, serde_json::Value>>>,
|
||||||
|
text_analyzer: &TextAnalyzer,
|
||||||
|
term_buffer: &mut Term,
|
||||||
|
postings_writer: &mut dyn PostingsWriter,
|
||||||
|
ctx: &mut IndexingContext,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let mut json_term_writer = JsonTermWriter::wrap(term_buffer);
|
||||||
|
let mut positions_per_path: IndexingPositionsPerPath = Default::default();
|
||||||
|
for json_value_res in json_values {
|
||||||
|
let json_value = json_value_res?;
|
||||||
|
index_json_object(
|
||||||
|
doc,
|
||||||
|
json_value,
|
||||||
|
text_analyzer,
|
||||||
|
&mut json_term_writer,
|
||||||
|
postings_writer,
|
||||||
|
ctx,
|
||||||
|
&mut positions_per_path,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn index_json_object<'a>(
|
||||||
|
doc: DocId,
|
||||||
|
json_value: &serde_json::Map<String, serde_json::Value>,
|
||||||
|
text_analyzer: &TextAnalyzer,
|
||||||
|
json_term_writer: &mut JsonTermWriter<'a>,
|
||||||
|
postings_writer: &mut dyn PostingsWriter,
|
||||||
|
ctx: &mut IndexingContext,
|
||||||
|
positions_per_path: &mut IndexingPositionsPerPath,
|
||||||
|
) {
|
||||||
|
for (json_path_segment, json_value) in json_value {
|
||||||
|
json_term_writer.push_path_segment(json_path_segment);
|
||||||
|
index_json_value(
|
||||||
|
doc,
|
||||||
|
json_value,
|
||||||
|
text_analyzer,
|
||||||
|
json_term_writer,
|
||||||
|
postings_writer,
|
||||||
|
ctx,
|
||||||
|
positions_per_path,
|
||||||
|
);
|
||||||
|
json_term_writer.pop_path_segment();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn index_json_value<'a>(
|
||||||
|
doc: DocId,
|
||||||
|
json_value: &serde_json::Value,
|
||||||
|
text_analyzer: &TextAnalyzer,
|
||||||
|
json_term_writer: &mut JsonTermWriter<'a>,
|
||||||
|
postings_writer: &mut dyn PostingsWriter,
|
||||||
|
ctx: &mut IndexingContext,
|
||||||
|
positions_per_path: &mut IndexingPositionsPerPath,
|
||||||
|
) {
|
||||||
|
match json_value {
|
||||||
|
serde_json::Value::Null => {}
|
||||||
|
serde_json::Value::Bool(val_bool) => {
|
||||||
|
let bool_u64 = if *val_bool { 1u64 } else { 0u64 };
|
||||||
|
json_term_writer.set_fast_value(bool_u64);
|
||||||
|
postings_writer.subscribe(doc, 0u32, json_term_writer.term(), ctx);
|
||||||
|
}
|
||||||
|
serde_json::Value::Number(number) => {
|
||||||
|
if let Some(number_u64) = number.as_u64() {
|
||||||
|
json_term_writer.set_fast_value(number_u64);
|
||||||
|
} else if let Some(number_i64) = number.as_i64() {
|
||||||
|
json_term_writer.set_fast_value(number_i64);
|
||||||
|
} else if let Some(number_f64) = number.as_f64() {
|
||||||
|
json_term_writer.set_fast_value(number_f64);
|
||||||
|
}
|
||||||
|
postings_writer.subscribe(doc, 0u32, json_term_writer.term(), ctx);
|
||||||
|
}
|
||||||
|
serde_json::Value::String(text) => match infer_type_from_str(text) {
|
||||||
|
TextOrDateTime::Text(text) => {
|
||||||
|
let mut token_stream = text_analyzer.token_stream(text);
|
||||||
|
// TODO make sure the chain position works out.
|
||||||
|
json_term_writer.close_path_and_set_type(Type::Str);
|
||||||
|
let indexing_position = positions_per_path.get_position(json_term_writer.term());
|
||||||
|
postings_writer.index_text(
|
||||||
|
doc,
|
||||||
|
&mut *token_stream,
|
||||||
|
json_term_writer.term_buffer,
|
||||||
|
ctx,
|
||||||
|
indexing_position,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
TextOrDateTime::DateTime(dt) => {
|
||||||
|
json_term_writer.set_fast_value(DateTime::from_utc(dt));
|
||||||
|
postings_writer.subscribe(doc, 0u32, json_term_writer.term(), ctx);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
serde_json::Value::Array(arr) => {
|
||||||
|
for val in arr {
|
||||||
|
index_json_value(
|
||||||
|
doc,
|
||||||
|
val,
|
||||||
|
text_analyzer,
|
||||||
|
json_term_writer,
|
||||||
|
postings_writer,
|
||||||
|
ctx,
|
||||||
|
positions_per_path,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
serde_json::Value::Object(map) => {
|
||||||
|
index_json_object(
|
||||||
|
doc,
|
||||||
|
map,
|
||||||
|
text_analyzer,
|
||||||
|
json_term_writer,
|
||||||
|
postings_writer,
|
||||||
|
ctx,
|
||||||
|
positions_per_path,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
enum TextOrDateTime<'a> {
|
||||||
|
Text(&'a str),
|
||||||
|
DateTime(OffsetDateTime),
|
||||||
|
}
|
||||||
|
|
||||||
|
fn infer_type_from_str(text: &str) -> TextOrDateTime {
|
||||||
|
match OffsetDateTime::parse(text, &Rfc3339) {
|
||||||
|
Ok(dt) => {
|
||||||
|
let dt_utc = dt.to_offset(UtcOffset::UTC);
|
||||||
|
TextOrDateTime::DateTime(dt_utc)
|
||||||
|
}
|
||||||
|
Err(_) => TextOrDateTime::Text(text),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tries to infer a JSON type from a string
|
||||||
|
pub(crate) fn convert_to_fast_value_and_get_term(
|
||||||
|
json_term_writer: &mut JsonTermWriter,
|
||||||
|
phrase: &str,
|
||||||
|
) -> Option<Term> {
|
||||||
|
if let Ok(dt) = OffsetDateTime::parse(phrase, &Rfc3339) {
|
||||||
|
let dt_utc = dt.to_offset(UtcOffset::UTC);
|
||||||
|
return Some(set_fastvalue_and_get_term(
|
||||||
|
json_term_writer,
|
||||||
|
DateTime::from_utc(dt_utc),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if let Ok(u64_val) = str::parse::<u64>(phrase) {
|
||||||
|
return Some(set_fastvalue_and_get_term(json_term_writer, u64_val));
|
||||||
|
}
|
||||||
|
if let Ok(i64_val) = str::parse::<i64>(phrase) {
|
||||||
|
return Some(set_fastvalue_and_get_term(json_term_writer, i64_val));
|
||||||
|
}
|
||||||
|
if let Ok(f64_val) = str::parse::<f64>(phrase) {
|
||||||
|
return Some(set_fastvalue_and_get_term(json_term_writer, f64_val));
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
// helper function to generate a Term from a json fastvalue
|
||||||
|
pub(crate) fn set_fastvalue_and_get_term<T: FastValue>(
|
||||||
|
json_term_writer: &mut JsonTermWriter,
|
||||||
|
value: T,
|
||||||
|
) -> Term {
|
||||||
|
json_term_writer.set_fast_value(value);
|
||||||
|
json_term_writer.term().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper function to generate a list of terms with their positions from a textual json value
|
||||||
|
pub(crate) fn set_string_and_get_terms(
|
||||||
|
json_term_writer: &mut JsonTermWriter,
|
||||||
|
value: &str,
|
||||||
|
text_analyzer: &TextAnalyzer,
|
||||||
|
) -> Vec<(usize, Term)> {
|
||||||
|
let mut positions_and_terms = Vec::<(usize, Term)>::new();
|
||||||
|
json_term_writer.close_path_and_set_type(Type::Str);
|
||||||
|
let term_num_bytes = json_term_writer.term_buffer.as_slice().len();
|
||||||
|
let mut token_stream = text_analyzer.token_stream(value);
|
||||||
|
token_stream.process(&mut |token| {
|
||||||
|
json_term_writer.term_buffer.truncate(term_num_bytes);
|
||||||
|
json_term_writer
|
||||||
|
.term_buffer
|
||||||
|
.append_bytes(token.text.as_bytes());
|
||||||
|
positions_and_terms.push((token.position, json_term_writer.term().clone()));
|
||||||
|
});
|
||||||
|
positions_and_terms
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct JsonTermWriter<'a> {
|
||||||
|
term_buffer: &'a mut Term,
|
||||||
|
path_stack: Vec<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> JsonTermWriter<'a> {
|
||||||
|
pub fn from_field_and_json_path(
|
||||||
|
field: Field,
|
||||||
|
json_path: &str,
|
||||||
|
term_buffer: &'a mut Term,
|
||||||
|
) -> Self {
|
||||||
|
term_buffer.set_field(Type::Json, field);
|
||||||
|
let mut json_term_writer = Self::wrap(term_buffer);
|
||||||
|
for segment in json_path.split('.') {
|
||||||
|
json_term_writer.push_path_segment(segment);
|
||||||
|
}
|
||||||
|
json_term_writer
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn wrap(term_buffer: &'a mut Term) -> Self {
|
||||||
|
term_buffer.clear_with_type(Type::Json);
|
||||||
|
let mut path_stack = Vec::with_capacity(10);
|
||||||
|
path_stack.push(5);
|
||||||
|
Self {
|
||||||
|
term_buffer,
|
||||||
|
path_stack,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn trim_to_end_of_path(&mut self) {
|
||||||
|
let end_of_path = *self.path_stack.last().unwrap();
|
||||||
|
self.term_buffer.truncate(end_of_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn close_path_and_set_type(&mut self, typ: Type) {
|
||||||
|
self.trim_to_end_of_path();
|
||||||
|
let buffer = self.term_buffer.as_mut();
|
||||||
|
let buffer_len = buffer.len();
|
||||||
|
buffer[buffer_len - 1] = JSON_END_OF_PATH;
|
||||||
|
buffer.push(typ.to_code());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn push_path_segment(&mut self, segment: &str) {
|
||||||
|
// the path stack should never be empty.
|
||||||
|
self.trim_to_end_of_path();
|
||||||
|
let buffer = self.term_buffer.as_mut();
|
||||||
|
let buffer_len = buffer.len();
|
||||||
|
if self.path_stack.len() > 1 {
|
||||||
|
buffer[buffer_len - 1] = JSON_PATH_SEGMENT_SEP;
|
||||||
|
}
|
||||||
|
buffer.extend(segment.as_bytes());
|
||||||
|
buffer.push(JSON_PATH_SEGMENT_SEP);
|
||||||
|
self.path_stack.push(buffer.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pop_path_segment(&mut self) {
|
||||||
|
self.path_stack.pop();
|
||||||
|
assert!(!self.path_stack.is_empty());
|
||||||
|
self.trim_to_end_of_path();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the json path of the term being currently built.
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn path(&self) -> &[u8] {
|
||||||
|
let end_of_path = self.path_stack.last().cloned().unwrap_or(6);
|
||||||
|
&self.term().as_slice()[5..end_of_path - 1]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_fast_value<T: FastValue>(&mut self, val: T) {
|
||||||
|
self.close_path_and_set_type(T::to_type());
|
||||||
|
self.term_buffer
|
||||||
|
.as_mut()
|
||||||
|
.extend_from_slice(val.to_u64().to_be_bytes().as_slice());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn set_str(&mut self, text: &str) {
|
||||||
|
self.close_path_and_set_type(Type::Str);
|
||||||
|
self.term_buffer.as_mut().extend_from_slice(text.as_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn term(&self) -> &Term {
|
||||||
|
self.term_buffer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::JsonTermWriter;
|
||||||
|
use crate::schema::{Field, Type};
|
||||||
|
use crate::Term;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_json_writer() {
|
||||||
|
let field = Field::from_field_id(1);
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_writer.push_path_segment("attributes");
|
||||||
|
json_writer.push_path_segment("color");
|
||||||
|
json_writer.set_str("red");
|
||||||
|
assert_eq!(
|
||||||
|
format!("{:?}", json_writer.term()),
|
||||||
|
"Term(type=Json, field=1, path=attributes.color, vtype=Str, \"red\")"
|
||||||
|
);
|
||||||
|
json_writer.set_str("blue");
|
||||||
|
assert_eq!(
|
||||||
|
format!("{:?}", json_writer.term()),
|
||||||
|
"Term(type=Json, field=1, path=attributes.color, vtype=Str, \"blue\")"
|
||||||
|
);
|
||||||
|
json_writer.pop_path_segment();
|
||||||
|
json_writer.push_path_segment("dimensions");
|
||||||
|
json_writer.push_path_segment("width");
|
||||||
|
json_writer.set_fast_value(400i64);
|
||||||
|
assert_eq!(
|
||||||
|
format!("{:?}", json_writer.term()),
|
||||||
|
"Term(type=Json, field=1, path=attributes.dimensions.width, vtype=I64, 400)"
|
||||||
|
);
|
||||||
|
json_writer.pop_path_segment();
|
||||||
|
json_writer.push_path_segment("height");
|
||||||
|
json_writer.set_fast_value(300i64);
|
||||||
|
assert_eq!(
|
||||||
|
format!("{:?}", json_writer.term()),
|
||||||
|
"Term(type=Json, field=1, path=attributes.dimensions.height, vtype=I64, 300)"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_string_term() {
|
||||||
|
let field = Field::from_field_id(1);
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_writer.push_path_segment("color");
|
||||||
|
json_writer.set_str("red");
|
||||||
|
assert_eq!(
|
||||||
|
json_writer.term().as_slice(),
|
||||||
|
b"\x00\x00\x00\x01jcolor\x00sred"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_i64_term() {
|
||||||
|
let field = Field::from_field_id(1);
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_writer.push_path_segment("color");
|
||||||
|
json_writer.set_fast_value(-4i64);
|
||||||
|
assert_eq!(
|
||||||
|
json_writer.term().as_slice(),
|
||||||
|
b"\x00\x00\x00\x01jcolor\x00i\x7f\xff\xff\xff\xff\xff\xff\xfc"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_u64_term() {
|
||||||
|
let field = Field::from_field_id(1);
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_writer.push_path_segment("color");
|
||||||
|
json_writer.set_fast_value(4u64);
|
||||||
|
assert_eq!(
|
||||||
|
json_writer.term().as_slice(),
|
||||||
|
b"\x00\x00\x00\x01jcolor\x00u\x00\x00\x00\x00\x00\x00\x00\x04"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_f64_term() {
|
||||||
|
let field = Field::from_field_id(1);
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_writer.push_path_segment("color");
|
||||||
|
json_writer.set_fast_value(4.0f64);
|
||||||
|
assert_eq!(
|
||||||
|
json_writer.term().as_slice(),
|
||||||
|
b"\x00\x00\x00\x01jcolor\x00f\xc0\x10\x00\x00\x00\x00\x00\x00"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_push_after_set_path_segment() {
|
||||||
|
let field = Field::from_field_id(1);
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_writer.push_path_segment("attribute");
|
||||||
|
json_writer.set_str("something");
|
||||||
|
json_writer.push_path_segment("color");
|
||||||
|
json_writer.set_str("red");
|
||||||
|
assert_eq!(
|
||||||
|
json_writer.term().as_slice(),
|
||||||
|
b"\x00\x00\x00\x01jattribute\x01color\x00sred"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_pop_segment() {
|
||||||
|
let field = Field::from_field_id(1);
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_writer.push_path_segment("color");
|
||||||
|
json_writer.push_path_segment("hue");
|
||||||
|
json_writer.pop_path_segment();
|
||||||
|
json_writer.set_str("red");
|
||||||
|
assert_eq!(
|
||||||
|
json_writer.term().as_slice(),
|
||||||
|
b"\x00\x00\x00\x01jcolor\x00sred"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_json_writer_path() {
|
||||||
|
let field = Field::from_field_id(1);
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, field);
|
||||||
|
let mut json_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_writer.push_path_segment("color");
|
||||||
|
assert_eq!(json_writer.path(), b"color");
|
||||||
|
json_writer.push_path_segment("hue");
|
||||||
|
assert_eq!(json_writer.path(), b"color\x01hue");
|
||||||
|
json_writer.set_str("pink");
|
||||||
|
assert_eq!(json_writer.path(), b"color\x01hue");
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -170,8 +170,8 @@ impl IndexMerger {
|
|||||||
index_settings: IndexSettings,
|
index_settings: IndexSettings,
|
||||||
segments: &[Segment],
|
segments: &[Segment],
|
||||||
) -> crate::Result<IndexMerger> {
|
) -> crate::Result<IndexMerger> {
|
||||||
let delete_bitsets = segments.iter().map(|_| None).collect_vec();
|
let alive_bitset = segments.iter().map(|_| None).collect_vec();
|
||||||
Self::open_with_custom_alive_set(schema, index_settings, segments, delete_bitsets)
|
Self::open_with_custom_alive_set(schema, index_settings, segments, alive_bitset)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create merge with a custom delete set.
|
// Create merge with a custom delete set.
|
||||||
@@ -180,7 +180,7 @@ impl IndexMerger {
|
|||||||
// corresponds to the segment index.
|
// corresponds to the segment index.
|
||||||
//
|
//
|
||||||
// If `None` is provided for custom alive set, the regular alive set will be used.
|
// If `None` is provided for custom alive set, the regular alive set will be used.
|
||||||
// If a delete_bitsets is provided, the union between the provided and regular
|
// If a alive_bitset is provided, the union between the provided and regular
|
||||||
// alive set will be used.
|
// alive set will be used.
|
||||||
//
|
//
|
||||||
// This can be used to merge but also apply an additional filter.
|
// This can be used to merge but also apply an additional filter.
|
||||||
@@ -278,17 +278,17 @@ impl IndexMerger {
|
|||||||
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
|
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
doc_id_mapping: &SegmentDocIdMapping,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
debug_time!("write_fast_fields");
|
debug_time!("write-fast-fields");
|
||||||
|
|
||||||
for (field, field_entry) in self.schema.fields() {
|
for (field, field_entry) in self.schema.fields() {
|
||||||
let field_type = field_entry.field_type();
|
let field_type = field_entry.field_type();
|
||||||
match field_type {
|
match field_type {
|
||||||
FieldType::Facet(_) => {
|
FieldType::Facet(_) | FieldType::Str(_) if field_type.is_fast() => {
|
||||||
let term_ordinal_mapping = term_ord_mappings.remove(&field).expect(
|
let term_ordinal_mapping = term_ord_mappings.remove(&field).expect(
|
||||||
"Logic Error in Tantivy (Please report). Facet field should have required \
|
"Logic Error in Tantivy (Please report). Facet field should have required \
|
||||||
a`term_ordinal_mapping`.",
|
a`term_ordinal_mapping`.",
|
||||||
);
|
);
|
||||||
self.write_hierarchical_facet_field(
|
self.write_term_id_fast_field(
|
||||||
field,
|
field,
|
||||||
&term_ordinal_mapping,
|
&term_ordinal_mapping,
|
||||||
fast_field_serializer,
|
fast_field_serializer,
|
||||||
@@ -312,8 +312,8 @@ impl IndexMerger {
|
|||||||
self.write_bytes_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
self.write_bytes_fast_field(field, fast_field_serializer, doc_id_mapping)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Str(_) | FieldType::JsonObject(_) => {
|
_ => {
|
||||||
// We don't handle json / string fast field for the moment
|
// We don't handle json fast field for the moment
|
||||||
// They can be implemented using what is done
|
// They can be implemented using what is done
|
||||||
// for facets in the future
|
// for facets in the future
|
||||||
}
|
}
|
||||||
@@ -590,14 +590,14 @@ impl IndexMerger {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_hierarchical_facet_field(
|
fn write_term_id_fast_field(
|
||||||
&self,
|
&self,
|
||||||
field: Field,
|
field: Field,
|
||||||
term_ordinal_mappings: &TermOrdinalMapping,
|
term_ordinal_mappings: &TermOrdinalMapping,
|
||||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
doc_id_mapping: &SegmentDocIdMapping,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
debug_time!("write_hierarchical_facet_field");
|
debug_time!("write-term-id-fast-field");
|
||||||
|
|
||||||
// Multifastfield consists of 2 fastfields.
|
// Multifastfield consists of 2 fastfields.
|
||||||
// The first serves as an index into the second one and is stricly increasing.
|
// The first serves as an index into the second one and is stricly increasing.
|
||||||
@@ -827,7 +827,7 @@ impl IndexMerger {
|
|||||||
fieldnorm_reader: Option<FieldNormReader>,
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
doc_id_mapping: &SegmentDocIdMapping,
|
||||||
) -> crate::Result<Option<TermOrdinalMapping>> {
|
) -> crate::Result<Option<TermOrdinalMapping>> {
|
||||||
debug_time!("write_postings_for_field");
|
debug_time!("write-postings-for-field");
|
||||||
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
|
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
|
||||||
let mut delta_computer = DeltaComputer::new();
|
let mut delta_computer = DeltaComputer::new();
|
||||||
|
|
||||||
@@ -848,6 +848,9 @@ impl IndexMerger {
|
|||||||
|
|
||||||
let mut term_ord_mapping_opt = match field_type {
|
let mut term_ord_mapping_opt = match field_type {
|
||||||
FieldType::Facet(_) => Some(TermOrdinalMapping::new(max_term_ords)),
|
FieldType::Facet(_) => Some(TermOrdinalMapping::new(max_term_ords)),
|
||||||
|
FieldType::Str(options) if options.is_fast() => {
|
||||||
|
Some(TermOrdinalMapping::new(max_term_ords))
|
||||||
|
}
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1023,7 +1026,8 @@ impl IndexMerger {
|
|||||||
store_writer: &mut StoreWriter,
|
store_writer: &mut StoreWriter,
|
||||||
doc_id_mapping: &SegmentDocIdMapping,
|
doc_id_mapping: &SegmentDocIdMapping,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
debug_time!("write_storable_fields");
|
debug_time!("write-storable-fields");
|
||||||
|
debug!("write-storable-field");
|
||||||
|
|
||||||
let store_readers: Vec<_> = self
|
let store_readers: Vec<_> = self
|
||||||
.readers
|
.readers
|
||||||
@@ -1036,6 +1040,7 @@ impl IndexMerger {
|
|||||||
.map(|(i, store)| store.iter_raw(self.readers[i].alive_bitset()))
|
.map(|(i, store)| store.iter_raw(self.readers[i].alive_bitset()))
|
||||||
.collect();
|
.collect();
|
||||||
if !doc_id_mapping.is_trivial() {
|
if !doc_id_mapping.is_trivial() {
|
||||||
|
debug!("non-trivial-doc-id-mapping");
|
||||||
for (old_doc_id, reader_ordinal) in doc_id_mapping.iter() {
|
for (old_doc_id, reader_ordinal) in doc_id_mapping.iter() {
|
||||||
let doc_bytes_it = &mut document_iterators[*reader_ordinal as usize];
|
let doc_bytes_it = &mut document_iterators[*reader_ordinal as usize];
|
||||||
if let Some(doc_bytes_res) = doc_bytes_it.next() {
|
if let Some(doc_bytes_res) = doc_bytes_it.next() {
|
||||||
@@ -1050,6 +1055,7 @@ impl IndexMerger {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
debug!("trivial-doc-id-mapping");
|
||||||
for reader in &self.readers {
|
for reader in &self.readers {
|
||||||
let store_reader = reader.get_store_reader()?;
|
let store_reader = reader.get_store_reader()?;
|
||||||
if reader.has_deletes()
|
if reader.has_deletes()
|
||||||
@@ -1099,10 +1105,11 @@ impl IndexMerger {
|
|||||||
} else {
|
} else {
|
||||||
self.get_doc_id_from_concatenated_data()?
|
self.get_doc_id_from_concatenated_data()?
|
||||||
};
|
};
|
||||||
|
debug!("write-fieldnorms");
|
||||||
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
||||||
self.write_fieldnorms(fieldnorms_serializer, &doc_id_mapping)?;
|
self.write_fieldnorms(fieldnorms_serializer, &doc_id_mapping)?;
|
||||||
}
|
}
|
||||||
|
debug!("write-postings");
|
||||||
let fieldnorm_data = serializer
|
let fieldnorm_data = serializer
|
||||||
.segment()
|
.segment()
|
||||||
.open_read(SegmentComponent::FieldNorms)?;
|
.open_read(SegmentComponent::FieldNorms)?;
|
||||||
@@ -1112,12 +1119,15 @@ impl IndexMerger {
|
|||||||
fieldnorm_readers,
|
fieldnorm_readers,
|
||||||
&doc_id_mapping,
|
&doc_id_mapping,
|
||||||
)?;
|
)?;
|
||||||
|
debug!("write-fastfields");
|
||||||
self.write_fast_fields(
|
self.write_fast_fields(
|
||||||
serializer.get_fast_field_serializer(),
|
serializer.get_fast_field_serializer(),
|
||||||
term_ord_mappings,
|
term_ord_mappings,
|
||||||
&doc_id_mapping,
|
&doc_id_mapping,
|
||||||
)?;
|
)?;
|
||||||
|
debug!("write-storagefields");
|
||||||
self.write_storable_fields(serializer.get_store_writer(), &doc_id_mapping)?;
|
self.write_storable_fields(serializer.get_store_writer(), &doc_id_mapping)?;
|
||||||
|
debug!("close-serializer");
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
Ok(self.max_doc)
|
Ok(self.max_doc)
|
||||||
}
|
}
|
||||||
@@ -1126,7 +1136,6 @@ impl IndexMerger {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use byteorder::{BigEndian, ReadBytesExt};
|
use byteorder::{BigEndian, ReadBytesExt};
|
||||||
use futures::executor::block_on;
|
|
||||||
use schema::FAST;
|
use schema::FAST;
|
||||||
|
|
||||||
use crate::collector::tests::{
|
use crate::collector::tests::{
|
||||||
@@ -1137,12 +1146,13 @@ mod tests {
|
|||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::query::{AllQuery, BooleanQuery, Scorer, TermQuery};
|
use crate::query::{AllQuery, BooleanQuery, Scorer, TermQuery};
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
Cardinality, Document, Facet, FacetOptions, IndexRecordOption, IntOptions, Term,
|
Cardinality, Document, Facet, FacetOptions, IndexRecordOption, NumericOptions, Term,
|
||||||
TextFieldIndexing, INDEXED, TEXT,
|
TextFieldIndexing, INDEXED, TEXT,
|
||||||
};
|
};
|
||||||
|
use crate::time::OffsetDateTime;
|
||||||
use crate::{
|
use crate::{
|
||||||
assert_nearly_equals, schema, DocAddress, DocSet, IndexSettings, IndexSortByField,
|
assert_nearly_equals, schema, DateTime, DocAddress, DocSet, IndexSettings,
|
||||||
IndexWriter, Order, Searcher, SegmentId,
|
IndexSortByField, IndexWriter, Order, Searcher, SegmentId,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1150,26 +1160,24 @@ mod tests {
|
|||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let text_fieldtype = schema::TextOptions::default()
|
let text_fieldtype = schema::TextOptions::default()
|
||||||
.set_indexing_options(
|
.set_indexing_options(
|
||||||
TextFieldIndexing::default()
|
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||||
.set_tokenizer("default")
|
|
||||||
.set_index_option(IndexRecordOption::WithFreqs),
|
|
||||||
)
|
)
|
||||||
.set_stored();
|
.set_stored();
|
||||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||||
let date_field = schema_builder.add_date_field("date", INDEXED);
|
let date_field = schema_builder.add_date_field("date", INDEXED);
|
||||||
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
|
let score_fieldtype = schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
|
||||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
||||||
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST);
|
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
let curr_time = chrono::Utc::now();
|
let curr_time = OffsetDateTime::now_utc();
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
// writing the segment
|
// writing the segment
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "af b",
|
text_field => "af b",
|
||||||
score_field => 3u64,
|
score_field => 3u64,
|
||||||
date_field => curr_time,
|
date_field => DateTime::from_utc(curr_time),
|
||||||
bytes_score_field => 3u32.to_be_bytes().as_ref()
|
bytes_score_field => 3u32.to_be_bytes().as_ref()
|
||||||
))?;
|
))?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
@@ -1186,7 +1194,7 @@ mod tests {
|
|||||||
// writing the segment
|
// writing the segment
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
text_field => "af b",
|
text_field => "af b",
|
||||||
date_field => curr_time,
|
date_field => DateTime::from_utc(curr_time),
|
||||||
score_field => 11u64,
|
score_field => 11u64,
|
||||||
bytes_score_field => 11u32.to_be_bytes().as_ref()
|
bytes_score_field => 11u32.to_be_bytes().as_ref()
|
||||||
))?;
|
))?;
|
||||||
@@ -1202,7 +1210,7 @@ mod tests {
|
|||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
block_on(index_writer.merge(&segment_ids))?;
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
index_writer.wait_merging_threads()?;
|
index_writer.wait_merging_threads()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@@ -1242,7 +1250,10 @@ mod tests {
|
|||||||
]
|
]
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_doc_ids(vec![Term::from_field_date(date_field, &curr_time)])?,
|
get_doc_ids(vec![Term::from_field_date(
|
||||||
|
date_field,
|
||||||
|
DateTime::from_utc(curr_time)
|
||||||
|
)])?,
|
||||||
vec![DocAddress::new(0, 0), DocAddress::new(0, 3)]
|
vec![DocAddress::new(0, 0), DocAddress::new(0, 3)]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -1306,7 +1317,7 @@ mod tests {
|
|||||||
)
|
)
|
||||||
.set_stored();
|
.set_stored();
|
||||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||||
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
|
let score_fieldtype = schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
|
||||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
||||||
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST);
|
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
@@ -1451,7 +1462,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
// merging the segments
|
// merging the segments
|
||||||
let segment_ids = index.searchable_segment_ids()?;
|
let segment_ids = index.searchable_segment_ids()?;
|
||||||
block_on(index_writer.merge(&segment_ids))?;
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
reader.reload()?;
|
reader.reload()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
assert_eq!(searcher.segment_readers().len(), 1);
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
@@ -1544,7 +1555,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
// Test merging a single segment in order to remove deletes.
|
// Test merging a single segment in order to remove deletes.
|
||||||
let segment_ids = index.searchable_segment_ids()?;
|
let segment_ids = index.searchable_segment_ids()?;
|
||||||
block_on(index_writer.merge(&segment_ids))?;
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
reader.reload()?;
|
reader.reload()?;
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
@@ -1666,7 +1677,7 @@ mod tests {
|
|||||||
fn test_merge_facets(index_settings: Option<IndexSettings>, force_segment_value_overlap: bool) {
|
fn test_merge_facets(index_settings: Option<IndexSettings>, force_segment_value_overlap: bool) {
|
||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
|
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
|
||||||
let int_options = IntOptions::default()
|
let int_options = NumericOptions::default()
|
||||||
.set_fast(Cardinality::SingleValue)
|
.set_fast(Cardinality::SingleValue)
|
||||||
.set_indexed();
|
.set_indexed();
|
||||||
let int_field = schema_builder.add_u64_field("intval", int_options);
|
let int_field = schema_builder.add_u64_field("intval", int_options);
|
||||||
@@ -1764,7 +1775,10 @@ mod tests {
|
|||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
index_writer
|
||||||
|
.merge(&segment_ids)
|
||||||
|
.wait()
|
||||||
|
.expect("Merging failed");
|
||||||
index_writer.wait_merging_threads().unwrap();
|
index_writer.wait_merging_threads().unwrap();
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
test_searcher(
|
test_searcher(
|
||||||
@@ -1819,7 +1833,7 @@ mod tests {
|
|||||||
let segment_ids = index
|
let segment_ids = index
|
||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
block_on(index_writer.merge(&segment_ids))?;
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
reader.reload()?;
|
reader.reload()?;
|
||||||
// commit has not been called yet. The document should still be
|
// commit has not been called yet. The document should still be
|
||||||
// there.
|
// there.
|
||||||
@@ -1830,7 +1844,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_merge_multivalued_int_fields_all_deleted() -> crate::Result<()> {
|
fn test_merge_multivalued_int_fields_all_deleted() -> crate::Result<()> {
|
||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let int_options = IntOptions::default()
|
let int_options = NumericOptions::default()
|
||||||
.set_fast(Cardinality::MultiValues)
|
.set_fast(Cardinality::MultiValues)
|
||||||
.set_indexed();
|
.set_indexed();
|
||||||
let int_field = schema_builder.add_u64_field("intvals", int_options);
|
let int_field = schema_builder.add_u64_field("intvals", int_options);
|
||||||
@@ -1846,7 +1860,7 @@ mod tests {
|
|||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
index_writer.delete_term(Term::from_field_u64(int_field, 1));
|
index_writer.delete_term(Term::from_field_u64(int_field, 1));
|
||||||
let segment_ids = index.searchable_segment_ids()?;
|
let segment_ids = index.searchable_segment_ids()?;
|
||||||
block_on(index_writer.merge(&segment_ids))?;
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
|
|
||||||
// assert delete has not been committed
|
// assert delete has not been committed
|
||||||
reader.reload()?;
|
reader.reload()?;
|
||||||
@@ -1867,7 +1881,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_merge_multivalued_int_fields_simple() -> crate::Result<()> {
|
fn test_merge_multivalued_int_fields_simple() -> crate::Result<()> {
|
||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let int_options = IntOptions::default()
|
let int_options = NumericOptions::default()
|
||||||
.set_fast(Cardinality::MultiValues)
|
.set_fast(Cardinality::MultiValues)
|
||||||
.set_indexed();
|
.set_indexed();
|
||||||
let int_field = schema_builder.add_u64_field("intvals", int_options);
|
let int_field = schema_builder.add_u64_field("intvals", int_options);
|
||||||
@@ -1947,7 +1961,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let segment_ids = index.searchable_segment_ids()?;
|
let segment_ids = index.searchable_segment_ids()?;
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
block_on(index_writer.merge(&segment_ids))?;
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
index_writer.wait_merging_threads()?;
|
index_writer.wait_merging_threads()?;
|
||||||
}
|
}
|
||||||
reader.reload()?;
|
reader.reload()?;
|
||||||
@@ -1994,7 +2008,7 @@ mod tests {
|
|||||||
fn merges_f64_fast_fields_correctly() -> crate::Result<()> {
|
fn merges_f64_fast_fields_correctly() -> crate::Result<()> {
|
||||||
let mut builder = schema::SchemaBuilder::new();
|
let mut builder = schema::SchemaBuilder::new();
|
||||||
|
|
||||||
let fast_multi = IntOptions::default().set_fast(Cardinality::MultiValues);
|
let fast_multi = NumericOptions::default().set_fast(Cardinality::MultiValues);
|
||||||
|
|
||||||
let field = builder.add_f64_field("f64", schema::FAST);
|
let field = builder.add_f64_field("f64", schema::FAST);
|
||||||
let multi_field = builder.add_f64_field("f64s", fast_multi);
|
let multi_field = builder.add_f64_field("f64s", fast_multi);
|
||||||
@@ -2075,7 +2089,7 @@ mod tests {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|reader| reader.segment_id())
|
.map(|reader| reader.segment_id())
|
||||||
.collect();
|
.collect();
|
||||||
block_on(writer.merge(&segment_ids[..]))?;
|
writer.merge(&segment_ids[..]).wait()?;
|
||||||
|
|
||||||
reader.reload()?;
|
reader.reload()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
|
|||||||
@@ -1,20 +1,18 @@
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use futures::executor::block_on;
|
|
||||||
|
|
||||||
use crate::collector::TopDocs;
|
use crate::collector::TopDocs;
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::fastfield::{AliveBitSet, FastFieldReader, MultiValuedFastFieldReader};
|
use crate::fastfield::{AliveBitSet, FastFieldReader, MultiValuedFastFieldReader};
|
||||||
use crate::query::QueryParser;
|
use crate::query::QueryParser;
|
||||||
use crate::schema::{
|
use crate::schema::{
|
||||||
self, BytesOptions, Cardinality, Facet, FacetOptions, IndexRecordOption, IntOptions,
|
self, BytesOptions, Cardinality, Facet, FacetOptions, IndexRecordOption, NumericOptions,
|
||||||
TextFieldIndexing, TextOptions,
|
TextFieldIndexing, TextOptions,
|
||||||
};
|
};
|
||||||
use crate::{DocAddress, DocSet, IndexSettings, IndexSortByField, Order, Postings, Term};
|
use crate::{DocAddress, DocSet, IndexSettings, IndexSortByField, Order, Postings, Term};
|
||||||
|
|
||||||
fn create_test_index_posting_list_issue(index_settings: Option<IndexSettings>) -> Index {
|
fn create_test_index_posting_list_issue(index_settings: Option<IndexSettings>) -> Index {
|
||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let int_options = IntOptions::default()
|
let int_options = NumericOptions::default()
|
||||||
.set_fast(Cardinality::SingleValue)
|
.set_fast(Cardinality::SingleValue)
|
||||||
.set_indexed();
|
.set_indexed();
|
||||||
let int_field = schema_builder.add_u64_field("intval", int_options);
|
let int_field = schema_builder.add_u64_field("intval", int_options);
|
||||||
@@ -50,7 +48,7 @@ mod tests {
|
|||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
let mut index_writer = index.writer_for_tests().unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
|
assert!(index_writer.merge(&segment_ids).wait().is_ok());
|
||||||
assert!(index_writer.wait_merging_threads().is_ok());
|
assert!(index_writer.wait_merging_threads().is_ok());
|
||||||
}
|
}
|
||||||
index
|
index
|
||||||
@@ -63,7 +61,7 @@ mod tests {
|
|||||||
force_disjunct_segment_sort_values: bool,
|
force_disjunct_segment_sort_values: bool,
|
||||||
) -> crate::Result<Index> {
|
) -> crate::Result<Index> {
|
||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let int_options = IntOptions::default()
|
let int_options = NumericOptions::default()
|
||||||
.set_fast(Cardinality::SingleValue)
|
.set_fast(Cardinality::SingleValue)
|
||||||
.set_stored()
|
.set_stored()
|
||||||
.set_indexed();
|
.set_indexed();
|
||||||
@@ -75,7 +73,7 @@ mod tests {
|
|||||||
|
|
||||||
let multi_numbers = schema_builder.add_u64_field(
|
let multi_numbers = schema_builder.add_u64_field(
|
||||||
"multi_numbers",
|
"multi_numbers",
|
||||||
IntOptions::default().set_fast(Cardinality::MultiValues),
|
NumericOptions::default().set_fast(Cardinality::MultiValues),
|
||||||
);
|
);
|
||||||
let text_field_options = TextOptions::default()
|
let text_field_options = TextOptions::default()
|
||||||
.set_indexing_options(
|
.set_indexing_options(
|
||||||
@@ -140,7 +138,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let segment_ids = index.searchable_segment_ids()?;
|
let segment_ids = index.searchable_segment_ids()?;
|
||||||
let mut index_writer = index.writer_for_tests()?;
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
block_on(index_writer.merge(&segment_ids))?;
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
index_writer.wait_merging_threads()?;
|
index_writer.wait_merging_threads()?;
|
||||||
}
|
}
|
||||||
Ok(index)
|
Ok(index)
|
||||||
@@ -486,11 +484,11 @@ mod bench_sorted_index_merge {
|
|||||||
// use cratedoc_id, readerdoc_id_mappinglet vals = reader.fate::schema;
|
// use cratedoc_id, readerdoc_id_mappinglet vals = reader.fate::schema;
|
||||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||||
use crate::indexer::merger::IndexMerger;
|
use crate::indexer::merger::IndexMerger;
|
||||||
use crate::schema::{Cardinality, Document, IntOptions, Schema};
|
use crate::schema::{Cardinality, Document, NumericOptions, Schema};
|
||||||
use crate::{IndexSettings, IndexSortByField, IndexWriter, Order};
|
use crate::{IndexSettings, IndexSortByField, IndexWriter, Order};
|
||||||
fn create_index(sort_by_field: Option<IndexSortByField>) -> Index {
|
fn create_index(sort_by_field: Option<IndexSortByField>) -> Index {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let int_options = IntOptions::default()
|
let int_options = NumericOptions::default()
|
||||||
.set_fast(Cardinality::SingleValue)
|
.set_fast(Cardinality::SingleValue)
|
||||||
.set_indexed();
|
.set_indexed();
|
||||||
let int_field = schema_builder.add_u64_field("intval", int_options);
|
let int_field = schema_builder.add_u64_field("intval", int_options);
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ pub mod doc_id_mapping;
|
|||||||
mod doc_opstamp_mapping;
|
mod doc_opstamp_mapping;
|
||||||
pub mod index_writer;
|
pub mod index_writer;
|
||||||
mod index_writer_status;
|
mod index_writer_status;
|
||||||
|
mod json_term_writer;
|
||||||
mod log_merge_policy;
|
mod log_merge_policy;
|
||||||
mod merge_operation;
|
mod merge_operation;
|
||||||
pub mod merge_policy;
|
pub mod merge_policy;
|
||||||
@@ -20,10 +21,13 @@ pub mod segment_updater;
|
|||||||
mod segment_writer;
|
mod segment_writer;
|
||||||
mod stamper;
|
mod stamper;
|
||||||
|
|
||||||
use crossbeam::channel;
|
use crossbeam_channel as channel;
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
|
|
||||||
pub use self::index_writer::IndexWriter;
|
pub use self::index_writer::IndexWriter;
|
||||||
|
pub(crate) use self::json_term_writer::{
|
||||||
|
convert_to_fast_value_and_get_term, set_string_and_get_terms, JsonTermWriter,
|
||||||
|
};
|
||||||
pub use self::log_merge_policy::LogMergePolicy;
|
pub use self::log_merge_policy::LogMergePolicy;
|
||||||
pub use self::merge_operation::MergeOperation;
|
pub use self::merge_operation::MergeOperation;
|
||||||
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
use futures::executor::block_on;
|
|
||||||
|
|
||||||
use super::IndexWriter;
|
use super::IndexWriter;
|
||||||
use crate::Opstamp;
|
use crate::{FutureResult, Opstamp};
|
||||||
|
|
||||||
/// A prepared commit
|
/// A prepared commit
|
||||||
pub struct PreparedCommit<'a> {
|
pub struct PreparedCommit<'a> {
|
||||||
@@ -35,9 +33,9 @@ impl<'a> PreparedCommit<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Proceeds to commit.
|
/// Proceeds to commit.
|
||||||
/// See `.commit_async()`.
|
/// See `.commit_future()`.
|
||||||
pub fn commit(self) -> crate::Result<Opstamp> {
|
pub fn commit(self) -> crate::Result<Opstamp> {
|
||||||
block_on(self.commit_async())
|
self.commit_future().wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Proceeds to commit.
|
/// Proceeds to commit.
|
||||||
@@ -45,12 +43,10 @@ impl<'a> PreparedCommit<'a> {
|
|||||||
/// Unfortunately, contrary to what `PrepareCommit` may suggests,
|
/// Unfortunately, contrary to what `PrepareCommit` may suggests,
|
||||||
/// this operation is not at all really light.
|
/// this operation is not at all really light.
|
||||||
/// At this point deletes have not been flushed yet.
|
/// At this point deletes have not been flushed yet.
|
||||||
pub async fn commit_async(self) -> crate::Result<Opstamp> {
|
pub fn commit_future(self) -> FutureResult<Opstamp> {
|
||||||
info!("committing {}", self.opstamp);
|
info!("committing {}", self.opstamp);
|
||||||
self.index_writer
|
self.index_writer
|
||||||
.segment_updater()
|
.segment_updater()
|
||||||
.schedule_commit(self.opstamp, self.payload)
|
.schedule_commit(self.opstamp, self.payload)
|
||||||
.await?;
|
|
||||||
Ok(self.opstamp)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,9 +39,10 @@ impl SegmentSerializer {
|
|||||||
|
|
||||||
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
||||||
let compressor = segment.index().settings().docstore_compression;
|
let compressor = segment.index().settings().docstore_compression;
|
||||||
|
let blocksize = segment.index().settings().docstore_blocksize;
|
||||||
Ok(SegmentSerializer {
|
Ok(SegmentSerializer {
|
||||||
segment,
|
segment,
|
||||||
store_writer: StoreWriter::new(store_write, compressor),
|
store_writer: StoreWriter::new(store_write, compressor, blocksize),
|
||||||
fast_field_serializer,
|
fast_field_serializer,
|
||||||
fieldnorms_serializer: Some(fieldnorms_serializer),
|
fieldnorms_serializer: Some(fieldnorms_serializer),
|
||||||
postings_serializer,
|
postings_serializer,
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io;
|
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@@ -8,9 +7,7 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
|||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use fail::fail_point;
|
use fail::fail_point;
|
||||||
use futures::channel::oneshot;
|
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||||
use futures::executor::{ThreadPool, ThreadPoolBuilder};
|
|
||||||
use futures::future::{Future, TryFutureExt};
|
|
||||||
|
|
||||||
use super::segment_manager::SegmentManager;
|
use super::segment_manager::SegmentManager;
|
||||||
use crate::core::{
|
use crate::core::{
|
||||||
@@ -29,7 +26,7 @@ use crate::indexer::{
|
|||||||
SegmentSerializer,
|
SegmentSerializer,
|
||||||
};
|
};
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::{Opstamp, TantivyError};
|
use crate::{FutureResult, Opstamp};
|
||||||
|
|
||||||
const NUM_MERGE_THREADS: usize = 4;
|
const NUM_MERGE_THREADS: usize = 4;
|
||||||
|
|
||||||
@@ -75,10 +72,12 @@ fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()>
|
|||||||
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
||||||
// Just adding a new line at the end of the buffer.
|
// Just adding a new line at the end of the buffer.
|
||||||
writeln!(&mut buffer)?;
|
writeln!(&mut buffer)?;
|
||||||
fail_point!("save_metas", |msg| Err(TantivyError::from(io::Error::new(
|
fail_point!("save_metas", |msg| Err(crate::TantivyError::from(
|
||||||
io::ErrorKind::Other,
|
std::io::Error::new(
|
||||||
msg.unwrap_or_else(|| "Undefined".to_string())
|
std::io::ErrorKind::Other,
|
||||||
))));
|
msg.unwrap_or_else(|| "Undefined".to_string())
|
||||||
|
)
|
||||||
|
)));
|
||||||
directory.sync_directory()?;
|
directory.sync_directory()?;
|
||||||
directory.atomic_write(&META_FILEPATH, &buffer[..])?;
|
directory.atomic_write(&META_FILEPATH, &buffer[..])?;
|
||||||
debug!("Saved metas {:?}", serde_json::to_string_pretty(&metas));
|
debug!("Saved metas {:?}", serde_json::to_string_pretty(&metas));
|
||||||
@@ -105,7 +104,7 @@ impl Deref for SegmentUpdater {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn garbage_collect_files(
|
fn garbage_collect_files(
|
||||||
segment_updater: SegmentUpdater,
|
segment_updater: SegmentUpdater,
|
||||||
) -> crate::Result<GarbageCollectionResult> {
|
) -> crate::Result<GarbageCollectionResult> {
|
||||||
info!("Running garbage collection");
|
info!("Running garbage collection");
|
||||||
@@ -309,18 +308,18 @@ impl SegmentUpdater {
|
|||||||
let segments = index.searchable_segment_metas()?;
|
let segments = index.searchable_segment_metas()?;
|
||||||
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
||||||
let pool = ThreadPoolBuilder::new()
|
let pool = ThreadPoolBuilder::new()
|
||||||
.name_prefix("segment_updater")
|
.thread_name(|_| "segment_updater".to_string())
|
||||||
.pool_size(1)
|
.num_threads(1)
|
||||||
.create()
|
.build()
|
||||||
.map_err(|_| {
|
.map_err(|_| {
|
||||||
crate::TantivyError::SystemError(
|
crate::TantivyError::SystemError(
|
||||||
"Failed to spawn segment updater thread".to_string(),
|
"Failed to spawn segment updater thread".to_string(),
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
let merge_thread_pool = ThreadPoolBuilder::new()
|
let merge_thread_pool = ThreadPoolBuilder::new()
|
||||||
.name_prefix("merge_thread")
|
.thread_name(|i| format!("merge_thread_{i}"))
|
||||||
.pool_size(NUM_MERGE_THREADS)
|
.num_threads(NUM_MERGE_THREADS)
|
||||||
.create()
|
.build()
|
||||||
.map_err(|_| {
|
.map_err(|_| {
|
||||||
crate::TantivyError::SystemError(
|
crate::TantivyError::SystemError(
|
||||||
"Failed to spawn segment merging thread".to_string(),
|
"Failed to spawn segment merging thread".to_string(),
|
||||||
@@ -349,39 +348,30 @@ impl SegmentUpdater {
|
|||||||
*self.merge_policy.write().unwrap() = arc_merge_policy;
|
*self.merge_policy.write().unwrap() = arc_merge_policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn schedule_task<
|
fn schedule_task<T: 'static + Send, F: FnOnce() -> crate::Result<T> + 'static + Send>(
|
||||||
T: 'static + Send,
|
|
||||||
F: Future<Output = crate::Result<T>> + 'static + Send,
|
|
||||||
>(
|
|
||||||
&self,
|
&self,
|
||||||
task: F,
|
task: F,
|
||||||
) -> crate::Result<T> {
|
) -> FutureResult<T> {
|
||||||
if !self.is_alive() {
|
if !self.is_alive() {
|
||||||
return Err(crate::TantivyError::SystemError(
|
return crate::TantivyError::SystemError("Segment updater killed".to_string()).into();
|
||||||
"Segment updater killed".to_string(),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
let (sender, receiver) = oneshot::channel();
|
let (scheduled_result, sender) = FutureResult::create(
|
||||||
self.pool.spawn_ok(async move {
|
"A segment_updater future did not succeed. This should never happen.",
|
||||||
let task_result = task.await;
|
);
|
||||||
|
self.pool.spawn(|| {
|
||||||
|
let task_result = task();
|
||||||
let _ = sender.send(task_result);
|
let _ = sender.send(task_result);
|
||||||
});
|
});
|
||||||
let task_result = receiver.await;
|
scheduled_result
|
||||||
task_result.unwrap_or_else(|_| {
|
|
||||||
let err_msg =
|
|
||||||
"A segment_updater future did not success. This should never happen.".to_string();
|
|
||||||
Err(crate::TantivyError::SystemError(err_msg))
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn schedule_add_segment(&self, segment_entry: SegmentEntry) -> crate::Result<()> {
|
pub fn schedule_add_segment(&self, segment_entry: SegmentEntry) -> FutureResult<()> {
|
||||||
let segment_updater = self.clone();
|
let segment_updater = self.clone();
|
||||||
self.schedule_task(async move {
|
self.schedule_task(move || {
|
||||||
segment_updater.segment_manager.add_segment(segment_entry);
|
segment_updater.segment_manager.add_segment(segment_entry);
|
||||||
segment_updater.consider_merge_options().await;
|
segment_updater.consider_merge_options();
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Orders `SegmentManager` to remove all segments
|
/// Orders `SegmentManager` to remove all segments
|
||||||
@@ -448,9 +438,9 @@ impl SegmentUpdater {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn schedule_garbage_collect(&self) -> crate::Result<GarbageCollectionResult> {
|
pub fn schedule_garbage_collect(&self) -> FutureResult<GarbageCollectionResult> {
|
||||||
let garbage_collect_future = garbage_collect_files(self.clone());
|
let self_clone = self.clone();
|
||||||
self.schedule_task(garbage_collect_future).await
|
self.schedule_task(move || garbage_collect_files(self_clone))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// List the files that are useful to the index.
|
/// List the files that are useful to the index.
|
||||||
@@ -468,21 +458,20 @@ impl SegmentUpdater {
|
|||||||
files
|
files
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn schedule_commit(
|
pub(crate) fn schedule_commit(
|
||||||
&self,
|
&self,
|
||||||
opstamp: Opstamp,
|
opstamp: Opstamp,
|
||||||
payload: Option<String>,
|
payload: Option<String>,
|
||||||
) -> crate::Result<()> {
|
) -> FutureResult<Opstamp> {
|
||||||
let segment_updater: SegmentUpdater = self.clone();
|
let segment_updater: SegmentUpdater = self.clone();
|
||||||
self.schedule_task(async move {
|
self.schedule_task(move || {
|
||||||
let segment_entries = segment_updater.purge_deletes(opstamp)?;
|
let segment_entries = segment_updater.purge_deletes(opstamp)?;
|
||||||
segment_updater.segment_manager.commit(segment_entries);
|
segment_updater.segment_manager.commit(segment_entries);
|
||||||
segment_updater.save_metas(opstamp, payload)?;
|
segment_updater.save_metas(opstamp, payload)?;
|
||||||
let _ = garbage_collect_files(segment_updater.clone()).await;
|
let _ = garbage_collect_files(segment_updater.clone());
|
||||||
segment_updater.consider_merge_options().await;
|
segment_updater.consider_merge_options();
|
||||||
Ok(())
|
Ok(opstamp)
|
||||||
})
|
})
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn store_meta(&self, index_meta: &IndexMeta) {
|
fn store_meta(&self, index_meta: &IndexMeta) {
|
||||||
@@ -515,26 +504,33 @@ impl SegmentUpdater {
|
|||||||
// suggested and the moment when it ended up being executed.)
|
// suggested and the moment when it ended up being executed.)
|
||||||
//
|
//
|
||||||
// `segment_ids` is required to be non-empty.
|
// `segment_ids` is required to be non-empty.
|
||||||
pub fn start_merge(
|
pub fn start_merge(&self, merge_operation: MergeOperation) -> FutureResult<SegmentMeta> {
|
||||||
&self,
|
|
||||||
merge_operation: MergeOperation,
|
|
||||||
) -> crate::Result<impl Future<Output = crate::Result<SegmentMeta>>> {
|
|
||||||
assert!(
|
assert!(
|
||||||
!merge_operation.segment_ids().is_empty(),
|
!merge_operation.segment_ids().is_empty(),
|
||||||
"Segment_ids cannot be empty."
|
"Segment_ids cannot be empty."
|
||||||
);
|
);
|
||||||
|
|
||||||
let segment_updater = self.clone();
|
let segment_updater = self.clone();
|
||||||
let segment_entries: Vec<SegmentEntry> = self
|
let segment_entries: Vec<SegmentEntry> = match self
|
||||||
.segment_manager
|
.segment_manager
|
||||||
.start_merge(merge_operation.segment_ids())?;
|
.start_merge(merge_operation.segment_ids())
|
||||||
|
{
|
||||||
|
Ok(segment_entries) => segment_entries,
|
||||||
|
Err(err) => {
|
||||||
|
warn!(
|
||||||
|
"Starting the merge failed for the following reason. This is not fatal. {}",
|
||||||
|
err
|
||||||
|
);
|
||||||
|
return err.into();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
info!("Starting merge - {:?}", merge_operation.segment_ids());
|
info!("Starting merge - {:?}", merge_operation.segment_ids());
|
||||||
|
|
||||||
let (merging_future_send, merging_future_recv) =
|
let (scheduled_result, merging_future_send) =
|
||||||
oneshot::channel::<crate::Result<SegmentMeta>>();
|
FutureResult::create("Merge operation failed.");
|
||||||
|
|
||||||
self.merge_thread_pool.spawn_ok(async move {
|
self.merge_thread_pool.spawn(move || {
|
||||||
// The fact that `merge_operation` is moved here is important.
|
// The fact that `merge_operation` is moved here is important.
|
||||||
// Its lifetime is used to track how many merging thread are currently running,
|
// Its lifetime is used to track how many merging thread are currently running,
|
||||||
// as well as which segment is currently in merge and therefore should not be
|
// as well as which segment is currently in merge and therefore should not be
|
||||||
@@ -545,28 +541,23 @@ impl SegmentUpdater {
|
|||||||
merge_operation.target_opstamp(),
|
merge_operation.target_opstamp(),
|
||||||
) {
|
) {
|
||||||
Ok(after_merge_segment_entry) => {
|
Ok(after_merge_segment_entry) => {
|
||||||
let segment_meta = segment_updater
|
let segment_meta_res =
|
||||||
.end_merge(merge_operation, after_merge_segment_entry)
|
segment_updater.end_merge(merge_operation, after_merge_segment_entry);
|
||||||
.await;
|
let _send_result = merging_future_send.send(segment_meta_res);
|
||||||
let _send_result = merging_future_send.send(segment_meta);
|
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(merge_error) => {
|
||||||
warn!(
|
warn!(
|
||||||
"Merge of {:?} was cancelled: {:?}",
|
"Merge of {:?} was cancelled: {:?}",
|
||||||
merge_operation.segment_ids().to_vec(),
|
merge_operation.segment_ids().to_vec(),
|
||||||
e
|
merge_error
|
||||||
);
|
);
|
||||||
// ... cancel merge
|
let _send_result = merging_future_send.send(Err(merge_error));
|
||||||
assert!(!cfg!(test), "Merge failed.");
|
assert!(!cfg!(test), "Merge failed.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(merging_future_recv.unwrap_or_else(|e| {
|
scheduled_result
|
||||||
Err(crate::TantivyError::SystemError(
|
|
||||||
"Merge failed:".to_string() + &e.to_string(),
|
|
||||||
))
|
|
||||||
}))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn get_mergeable_segments(&self) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
|
pub(crate) fn get_mergeable_segments(&self) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
|
||||||
@@ -575,7 +566,7 @@ impl SegmentUpdater {
|
|||||||
.get_mergeable_segments(&merge_segment_ids)
|
.get_mergeable_segments(&merge_segment_ids)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn consider_merge_options(&self) {
|
fn consider_merge_options(&self) {
|
||||||
let (committed_segments, uncommitted_segments) = self.get_mergeable_segments();
|
let (committed_segments, uncommitted_segments) = self.get_mergeable_segments();
|
||||||
|
|
||||||
// Committed segments cannot be merged with uncommitted_segments.
|
// Committed segments cannot be merged with uncommitted_segments.
|
||||||
@@ -601,23 +592,21 @@ impl SegmentUpdater {
|
|||||||
merge_candidates.extend(committed_merge_candidates);
|
merge_candidates.extend(committed_merge_candidates);
|
||||||
|
|
||||||
for merge_operation in merge_candidates {
|
for merge_operation in merge_candidates {
|
||||||
if let Err(err) = self.start_merge(merge_operation) {
|
// If a merge cannot be started this is not a fatal error.
|
||||||
warn!(
|
// We do log a warning in `start_merge`.
|
||||||
"Starting the merge failed for the following reason. This is not fatal. {}",
|
let _ = self.start_merge(merge_operation);
|
||||||
err
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn end_merge(
|
/// Queues a `end_merge` in the segment updater and blocks until it is successfully processed.
|
||||||
|
fn end_merge(
|
||||||
&self,
|
&self,
|
||||||
merge_operation: MergeOperation,
|
merge_operation: MergeOperation,
|
||||||
mut after_merge_segment_entry: SegmentEntry,
|
mut after_merge_segment_entry: SegmentEntry,
|
||||||
) -> crate::Result<SegmentMeta> {
|
) -> crate::Result<SegmentMeta> {
|
||||||
let segment_updater = self.clone();
|
let segment_updater = self.clone();
|
||||||
let after_merge_segment_meta = after_merge_segment_entry.meta().clone();
|
let after_merge_segment_meta = after_merge_segment_entry.meta().clone();
|
||||||
self.schedule_task(async move {
|
self.schedule_task(move || {
|
||||||
info!("End merge {:?}", after_merge_segment_entry.meta());
|
info!("End merge {:?}", after_merge_segment_entry.meta());
|
||||||
{
|
{
|
||||||
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
||||||
@@ -655,13 +644,13 @@ impl SegmentUpdater {
|
|||||||
.save_metas(previous_metas.opstamp, previous_metas.payload.clone())?;
|
.save_metas(previous_metas.opstamp, previous_metas.payload.clone())?;
|
||||||
}
|
}
|
||||||
|
|
||||||
segment_updater.consider_merge_options().await;
|
segment_updater.consider_merge_options();
|
||||||
} // we drop all possible handle to a now useless `SegmentMeta`.
|
} // we drop all possible handle to a now useless `SegmentMeta`.
|
||||||
|
|
||||||
let _ = garbage_collect_files(segment_updater).await;
|
let _ = garbage_collect_files(segment_updater);
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
.await?;
|
.wait()?;
|
||||||
Ok(after_merge_segment_meta)
|
Ok(after_merge_segment_meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
|
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
|
||||||
use super::operation::AddOperation;
|
use super::operation::AddOperation;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::fastfield::FastFieldsWriter;
|
use crate::fastfield::{FastFieldsWriter, FastValue as _};
|
||||||
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
|
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
|
||||||
|
use crate::indexer::json_term_writer::index_json_values;
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
use crate::postings::{
|
use crate::postings::{
|
||||||
compute_table_size, serialize_postings, IndexingContext, IndexingPosition,
|
compute_table_size, serialize_postings, IndexingContext, IndexingPosition,
|
||||||
PerFieldPostingsWriter, PostingsWriter,
|
PerFieldPostingsWriter, PostingsWriter,
|
||||||
};
|
};
|
||||||
use crate::schema::{Field, FieldEntry, FieldType, FieldValue, Schema, Term, Type, Value};
|
use crate::schema::{FieldEntry, FieldType, FieldValue, Schema, Term, Value};
|
||||||
use crate::store::{StoreReader, StoreWriter};
|
use crate::store::{StoreReader, StoreWriter};
|
||||||
use crate::tokenizer::{
|
use crate::tokenizer::{
|
||||||
BoxTokenStream, FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer,
|
BoxTokenStream, FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer,
|
||||||
@@ -55,13 +56,13 @@ fn remap_doc_opstamps(
|
|||||||
/// The segment is layed on disk when the segment gets `finalized`.
|
/// The segment is layed on disk when the segment gets `finalized`.
|
||||||
pub struct SegmentWriter {
|
pub struct SegmentWriter {
|
||||||
pub(crate) max_doc: DocId,
|
pub(crate) max_doc: DocId,
|
||||||
pub(crate) indexing_context: IndexingContext,
|
pub(crate) ctx: IndexingContext,
|
||||||
pub(crate) per_field_postings_writers: PerFieldPostingsWriter,
|
pub(crate) per_field_postings_writers: PerFieldPostingsWriter,
|
||||||
pub(crate) segment_serializer: SegmentSerializer,
|
pub(crate) segment_serializer: SegmentSerializer,
|
||||||
pub(crate) fast_field_writers: FastFieldsWriter,
|
pub(crate) fast_field_writers: FastFieldsWriter,
|
||||||
pub(crate) fieldnorms_writer: FieldNormsWriter,
|
pub(crate) fieldnorms_writer: FieldNormsWriter,
|
||||||
pub(crate) doc_opstamps: Vec<Opstamp>,
|
pub(crate) doc_opstamps: Vec<Opstamp>,
|
||||||
tokenizers: Vec<Option<TextAnalyzer>>,
|
per_field_text_analyzers: Vec<TextAnalyzer>,
|
||||||
term_buffer: Term,
|
term_buffer: Term,
|
||||||
schema: Schema,
|
schema: Schema,
|
||||||
}
|
}
|
||||||
@@ -85,29 +86,33 @@ impl SegmentWriter {
|
|||||||
let table_size = compute_initial_table_size(memory_budget_in_bytes)?;
|
let table_size = compute_initial_table_size(memory_budget_in_bytes)?;
|
||||||
let segment_serializer = SegmentSerializer::for_segment(segment, false)?;
|
let segment_serializer = SegmentSerializer::for_segment(segment, false)?;
|
||||||
let per_field_postings_writers = PerFieldPostingsWriter::for_schema(&schema);
|
let per_field_postings_writers = PerFieldPostingsWriter::for_schema(&schema);
|
||||||
let tokenizers = schema
|
let per_field_text_analyzers = schema
|
||||||
.fields()
|
.fields()
|
||||||
.map(
|
.map(|(_, field_entry): (_, &FieldEntry)| {
|
||||||
|(_, field_entry): (Field, &FieldEntry)| match field_entry.field_type() {
|
let text_options = match field_entry.field_type() {
|
||||||
FieldType::Str(ref text_options) => text_options
|
FieldType::Str(ref text_options) => text_options.get_indexing_options(),
|
||||||
.get_indexing_options()
|
FieldType::JsonObject(ref json_object_options) => {
|
||||||
.and_then(|text_index_option| {
|
json_object_options.get_text_indexing_options()
|
||||||
let tokenizer_name = &text_index_option.tokenizer();
|
}
|
||||||
tokenizer_manager.get(tokenizer_name)
|
|
||||||
}),
|
|
||||||
_ => None,
|
_ => None,
|
||||||
},
|
};
|
||||||
)
|
text_options
|
||||||
|
.and_then(|text_index_option| {
|
||||||
|
let tokenizer_name = &text_index_option.tokenizer();
|
||||||
|
tokenizer_manager.get(tokenizer_name)
|
||||||
|
})
|
||||||
|
.unwrap_or_default()
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
Ok(SegmentWriter {
|
Ok(SegmentWriter {
|
||||||
max_doc: 0,
|
max_doc: 0,
|
||||||
indexing_context: IndexingContext::new(table_size),
|
ctx: IndexingContext::new(table_size),
|
||||||
per_field_postings_writers,
|
per_field_postings_writers,
|
||||||
fieldnorms_writer: FieldNormsWriter::for_schema(&schema),
|
fieldnorms_writer: FieldNormsWriter::for_schema(&schema),
|
||||||
segment_serializer,
|
segment_serializer,
|
||||||
fast_field_writers: FastFieldsWriter::from_schema(&schema),
|
fast_field_writers: FastFieldsWriter::from_schema(&schema),
|
||||||
doc_opstamps: Vec::with_capacity(1_000),
|
doc_opstamps: Vec::with_capacity(1_000),
|
||||||
tokenizers,
|
per_field_text_analyzers,
|
||||||
term_buffer: Term::new(),
|
term_buffer: Term::new(),
|
||||||
schema,
|
schema,
|
||||||
})
|
})
|
||||||
@@ -130,7 +135,7 @@ impl SegmentWriter {
|
|||||||
.transpose()?;
|
.transpose()?;
|
||||||
remap_and_write(
|
remap_and_write(
|
||||||
&self.per_field_postings_writers,
|
&self.per_field_postings_writers,
|
||||||
self.indexing_context,
|
self.ctx,
|
||||||
&self.fast_field_writers,
|
&self.fast_field_writers,
|
||||||
&self.fieldnorms_writer,
|
&self.fieldnorms_writer,
|
||||||
&self.schema,
|
&self.schema,
|
||||||
@@ -142,7 +147,7 @@ impl SegmentWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn mem_usage(&self) -> usize {
|
pub fn mem_usage(&self) -> usize {
|
||||||
self.indexing_context.mem_usage()
|
self.ctx.mem_usage()
|
||||||
+ self.fieldnorms_writer.mem_usage()
|
+ self.fieldnorms_writer.mem_usage()
|
||||||
+ self.fast_field_writers.mem_usage()
|
+ self.fast_field_writers.mem_usage()
|
||||||
+ self.segment_serializer.mem_usage()
|
+ self.segment_serializer.mem_usage()
|
||||||
@@ -162,13 +167,12 @@ impl SegmentWriter {
|
|||||||
if !field_entry.is_indexed() {
|
if !field_entry.is_indexed() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let (term_buffer, indexing_context) =
|
let (term_buffer, ctx) = (&mut self.term_buffer, &mut self.ctx);
|
||||||
(&mut self.term_buffer, &mut self.indexing_context);
|
|
||||||
let postings_writer: &mut dyn PostingsWriter =
|
let postings_writer: &mut dyn PostingsWriter =
|
||||||
self.per_field_postings_writers.get_for_field_mut(field);
|
self.per_field_postings_writers.get_for_field_mut(field);
|
||||||
|
term_buffer.set_field(field_entry.field_type().value_type(), field);
|
||||||
match *field_entry.field_type() {
|
match *field_entry.field_type() {
|
||||||
FieldType::Facet(_) => {
|
FieldType::Facet(_) => {
|
||||||
term_buffer.set_field(Type::Facet, field);
|
|
||||||
for value in values {
|
for value in values {
|
||||||
let facet = value.as_facet().ok_or_else(make_schema_error)?;
|
let facet = value.as_facet().ok_or_else(make_schema_error)?;
|
||||||
let facet_str = facet.encoded_str();
|
let facet_str = facet.encoded_str();
|
||||||
@@ -177,18 +181,14 @@ impl SegmentWriter {
|
|||||||
.token_stream(facet_str)
|
.token_stream(facet_str)
|
||||||
.process(&mut |token| {
|
.process(&mut |token| {
|
||||||
term_buffer.set_text(&token.text);
|
term_buffer.set_text(&token.text);
|
||||||
let unordered_term_id = postings_writer.subscribe(
|
let unordered_term_id =
|
||||||
doc_id,
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
0u32,
|
|
||||||
term_buffer,
|
|
||||||
indexing_context,
|
|
||||||
);
|
|
||||||
// TODO pass indexing context directly in subscribe function
|
// TODO pass indexing context directly in subscribe function
|
||||||
unordered_term_id_opt = Some(unordered_term_id);
|
unordered_term_id_opt = Some(unordered_term_id);
|
||||||
});
|
});
|
||||||
if let Some(unordered_term_id) = unordered_term_id_opt {
|
if let Some(unordered_term_id) = unordered_term_id_opt {
|
||||||
self.fast_field_writers
|
self.fast_field_writers
|
||||||
.get_multivalue_writer_mut(field)
|
.get_term_id_writer_mut(field)
|
||||||
.expect("writer for facet missing")
|
.expect("writer for facet missing")
|
||||||
.add_val(unordered_term_id);
|
.add_val(unordered_term_id);
|
||||||
}
|
}
|
||||||
@@ -210,74 +210,82 @@ impl SegmentWriter {
|
|||||||
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
||||||
}
|
}
|
||||||
Value::Str(ref text) => {
|
Value::Str(ref text) => {
|
||||||
if let Some(ref mut tokenizer) =
|
let text_analyzer =
|
||||||
self.tokenizers[field.field_id() as usize]
|
&self.per_field_text_analyzers[field.field_id() as usize];
|
||||||
{
|
offsets.push(total_offset);
|
||||||
offsets.push(total_offset);
|
total_offset += text.len();
|
||||||
total_offset += text.len();
|
token_streams.push(text_analyzer.token_stream(text));
|
||||||
token_streams.push(tokenizer.token_stream(text));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_ => (),
|
_ => (),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut indexing_position = IndexingPosition::default();
|
let mut indexing_position = IndexingPosition::default();
|
||||||
|
|
||||||
for mut token_stream in token_streams {
|
for mut token_stream in token_streams {
|
||||||
|
assert_eq!(term_buffer.as_slice().len(), 5);
|
||||||
postings_writer.index_text(
|
postings_writer.index_text(
|
||||||
doc_id,
|
doc_id,
|
||||||
field,
|
|
||||||
&mut *token_stream,
|
&mut *token_stream,
|
||||||
term_buffer,
|
term_buffer,
|
||||||
indexing_context,
|
ctx,
|
||||||
&mut indexing_position,
|
&mut indexing_position,
|
||||||
|
self.fast_field_writers.get_term_id_writer_mut(field),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
self.fieldnorms_writer
|
if field_entry.has_fieldnorms() {
|
||||||
.record(doc_id, field, indexing_position.num_tokens);
|
self.fieldnorms_writer
|
||||||
|
.record(doc_id, field, indexing_position.num_tokens);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
FieldType::U64(_) => {
|
FieldType::U64(_) => {
|
||||||
for value in values {
|
for value in values {
|
||||||
term_buffer.set_field(Type::U64, field);
|
|
||||||
let u64_val = value.as_u64().ok_or_else(make_schema_error)?;
|
let u64_val = value.as_u64().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_u64(u64_val);
|
term_buffer.set_u64(u64_val);
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, indexing_context);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Date(_) => {
|
FieldType::Date(_) => {
|
||||||
for value in values {
|
for value in values {
|
||||||
term_buffer.set_field(Type::Date, field);
|
|
||||||
let date_val = value.as_date().ok_or_else(make_schema_error)?;
|
let date_val = value.as_date().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_i64(date_val.timestamp());
|
term_buffer.set_u64(date_val.to_u64());
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, indexing_context);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::I64(_) => {
|
FieldType::I64(_) => {
|
||||||
for value in values {
|
for value in values {
|
||||||
term_buffer.set_field(Type::I64, field);
|
|
||||||
let i64_val = value.as_i64().ok_or_else(make_schema_error)?;
|
let i64_val = value.as_i64().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_i64(i64_val);
|
term_buffer.set_i64(i64_val);
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, indexing_context);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::F64(_) => {
|
FieldType::F64(_) => {
|
||||||
for value in values {
|
for value in values {
|
||||||
term_buffer.set_field(Type::F64, field);
|
|
||||||
let f64_val = value.as_f64().ok_or_else(make_schema_error)?;
|
let f64_val = value.as_f64().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_f64(f64_val);
|
term_buffer.set_f64(f64_val);
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, indexing_context);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Bytes(_) => {
|
FieldType::Bytes(_) => {
|
||||||
for value in values {
|
for value in values {
|
||||||
term_buffer.set_field(Type::Bytes, field);
|
|
||||||
let bytes = value.as_bytes().ok_or_else(make_schema_error)?;
|
let bytes = value.as_bytes().ok_or_else(make_schema_error)?;
|
||||||
term_buffer.set_bytes(bytes);
|
term_buffer.set_bytes(bytes);
|
||||||
postings_writer.subscribe(doc_id, 0u32, term_buffer, indexing_context);
|
postings_writer.subscribe(doc_id, 0u32, term_buffer, ctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::JsonObject(_) => {
|
FieldType::JsonObject(_) => {
|
||||||
unimplemented!()
|
let text_analyzer = &self.per_field_text_analyzers[field.field_id() as usize];
|
||||||
|
let json_values_it = values
|
||||||
|
.iter()
|
||||||
|
.map(|value| value.as_json().ok_or_else(make_schema_error));
|
||||||
|
index_json_values(
|
||||||
|
doc_id,
|
||||||
|
json_values_it,
|
||||||
|
text_analyzer,
|
||||||
|
term_buffer,
|
||||||
|
postings_writer,
|
||||||
|
ctx,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -327,13 +335,14 @@ impl SegmentWriter {
|
|||||||
/// `doc_id_map` is used to map to the new doc_id order.
|
/// `doc_id_map` is used to map to the new doc_id order.
|
||||||
fn remap_and_write(
|
fn remap_and_write(
|
||||||
per_field_postings_writers: &PerFieldPostingsWriter,
|
per_field_postings_writers: &PerFieldPostingsWriter,
|
||||||
indexing_context: IndexingContext,
|
ctx: IndexingContext,
|
||||||
fast_field_writers: &FastFieldsWriter,
|
fast_field_writers: &FastFieldsWriter,
|
||||||
fieldnorms_writer: &FieldNormsWriter,
|
fieldnorms_writer: &FieldNormsWriter,
|
||||||
schema: &Schema,
|
schema: &Schema,
|
||||||
mut serializer: SegmentSerializer,
|
mut serializer: SegmentSerializer,
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
doc_id_map: Option<&DocIdMapping>,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
|
debug!("remap-and-write");
|
||||||
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
||||||
fieldnorms_writer.serialize(fieldnorms_serializer, doc_id_map)?;
|
fieldnorms_writer.serialize(fieldnorms_serializer, doc_id_map)?;
|
||||||
}
|
}
|
||||||
@@ -342,28 +351,31 @@ fn remap_and_write(
|
|||||||
.open_read(SegmentComponent::FieldNorms)?;
|
.open_read(SegmentComponent::FieldNorms)?;
|
||||||
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||||
let term_ord_map = serialize_postings(
|
let term_ord_map = serialize_postings(
|
||||||
indexing_context,
|
ctx,
|
||||||
per_field_postings_writers,
|
per_field_postings_writers,
|
||||||
fieldnorm_readers,
|
fieldnorm_readers,
|
||||||
doc_id_map,
|
doc_id_map,
|
||||||
schema,
|
schema,
|
||||||
serializer.get_postings_serializer(),
|
serializer.get_postings_serializer(),
|
||||||
)?;
|
)?;
|
||||||
|
debug!("fastfield-serialize");
|
||||||
fast_field_writers.serialize(
|
fast_field_writers.serialize(
|
||||||
serializer.get_fast_field_serializer(),
|
serializer.get_fast_field_serializer(),
|
||||||
&term_ord_map,
|
&term_ord_map,
|
||||||
doc_id_map,
|
doc_id_map,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
debug!("resort-docstore");
|
||||||
// finalize temp docstore and create version, which reflects the doc_id_map
|
// finalize temp docstore and create version, which reflects the doc_id_map
|
||||||
if let Some(doc_id_map) = doc_id_map {
|
if let Some(doc_id_map) = doc_id_map {
|
||||||
let store_write = serializer
|
let store_write = serializer
|
||||||
.segment_mut()
|
.segment_mut()
|
||||||
.open_write(SegmentComponent::Store)?;
|
.open_write(SegmentComponent::Store)?;
|
||||||
let compressor = serializer.segment().index().settings().docstore_compression;
|
let compressor = serializer.segment().index().settings().docstore_compression;
|
||||||
|
let block_size = serializer.segment().index().settings().docstore_blocksize;
|
||||||
let old_store_writer = std::mem::replace(
|
let old_store_writer = std::mem::replace(
|
||||||
&mut serializer.store_writer,
|
&mut serializer.store_writer,
|
||||||
StoreWriter::new(store_write, compressor),
|
StoreWriter::new(store_write, compressor, block_size),
|
||||||
);
|
);
|
||||||
old_store_writer.close()?;
|
old_store_writer.close()?;
|
||||||
let store_read = StoreReader::open(
|
let store_read = StoreReader::open(
|
||||||
@@ -377,6 +389,7 @@ fn remap_and_write(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
debug!("serializer-close");
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -407,9 +420,15 @@ pub fn prepare_doc_for_store(doc: Document, schema: &Schema) -> Document {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::compute_initial_table_size;
|
use super::compute_initial_table_size;
|
||||||
use crate::schema::{Schema, STORED, TEXT};
|
use crate::collector::Count;
|
||||||
|
use crate::indexer::json_term_writer::JsonTermWriter;
|
||||||
|
use crate::postings::TermInfo;
|
||||||
|
use crate::query::PhraseQuery;
|
||||||
|
use crate::schema::{IndexRecordOption, Schema, Type, STORED, STRING, TEXT};
|
||||||
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
|
use crate::time::OffsetDateTime;
|
||||||
use crate::tokenizer::{PreTokenizedString, Token};
|
use crate::tokenizer::{PreTokenizedString, Token};
|
||||||
use crate::Document;
|
use crate::{DateTime, DocAddress, DocSet, Document, Index, Postings, Term, TERMINATED};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_hashmap_size() {
|
fn test_hashmap_size() {
|
||||||
@@ -448,4 +467,279 @@ mod tests {
|
|||||||
Some("title")
|
Some("title")
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_json_indexing() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
|
||||||
|
r#"{
|
||||||
|
"toto": "titi",
|
||||||
|
"float": -0.2,
|
||||||
|
"unsigned": 1,
|
||||||
|
"signed": -2,
|
||||||
|
"complexobject": {
|
||||||
|
"field.with.dot": 1
|
||||||
|
},
|
||||||
|
"date": "1985-04-12T23:20:50.52Z",
|
||||||
|
"my_arr": [2, 3, {"my_key": "two tokens"}, 4]
|
||||||
|
}"#,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let doc = doc!(json_field=>json_val.clone());
|
||||||
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
|
writer.add_document(doc).unwrap();
|
||||||
|
writer.commit().unwrap();
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let doc = searcher
|
||||||
|
.doc(DocAddress {
|
||||||
|
segment_ord: 0u32,
|
||||||
|
doc_id: 0u32,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
let serdeser_json_val = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(
|
||||||
|
&schema.to_json(&doc),
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.get("json")
|
||||||
|
.unwrap()[0]
|
||||||
|
.as_object()
|
||||||
|
.unwrap()
|
||||||
|
.clone();
|
||||||
|
assert_eq!(json_val, serdeser_json_val);
|
||||||
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
|
let inv_idx = segment_reader.inverted_index(json_field).unwrap();
|
||||||
|
let term_dict = inv_idx.terms();
|
||||||
|
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, json_field);
|
||||||
|
let mut term_stream = term_dict.stream().unwrap();
|
||||||
|
|
||||||
|
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_term_writer.push_path_segment("complexobject");
|
||||||
|
json_term_writer.push_path_segment("field.with.dot");
|
||||||
|
json_term_writer.set_fast_value(1u64);
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
|
||||||
|
json_term_writer.pop_path_segment();
|
||||||
|
json_term_writer.pop_path_segment();
|
||||||
|
json_term_writer.push_path_segment("date");
|
||||||
|
json_term_writer.set_fast_value(DateTime::from_utc(
|
||||||
|
OffsetDateTime::parse("1985-04-12T23:20:50.52Z", &Rfc3339).unwrap(),
|
||||||
|
));
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
|
||||||
|
json_term_writer.pop_path_segment();
|
||||||
|
json_term_writer.push_path_segment("float");
|
||||||
|
json_term_writer.set_fast_value(-0.2f64);
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
|
||||||
|
json_term_writer.pop_path_segment();
|
||||||
|
json_term_writer.push_path_segment("my_arr");
|
||||||
|
json_term_writer.set_fast_value(2u64);
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
|
||||||
|
json_term_writer.set_fast_value(3u64);
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
|
||||||
|
json_term_writer.set_fast_value(4u64);
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
|
||||||
|
json_term_writer.push_path_segment("my_key");
|
||||||
|
json_term_writer.set_str("tokens");
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
|
||||||
|
json_term_writer.set_str("two");
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
|
||||||
|
json_term_writer.pop_path_segment();
|
||||||
|
json_term_writer.pop_path_segment();
|
||||||
|
json_term_writer.push_path_segment("signed");
|
||||||
|
json_term_writer.set_fast_value(-2i64);
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
|
||||||
|
json_term_writer.pop_path_segment();
|
||||||
|
json_term_writer.push_path_segment("toto");
|
||||||
|
json_term_writer.set_str("titi");
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
|
||||||
|
json_term_writer.pop_path_segment();
|
||||||
|
json_term_writer.push_path_segment("unsigned");
|
||||||
|
json_term_writer.set_fast_value(1u64);
|
||||||
|
assert!(term_stream.advance());
|
||||||
|
assert_eq!(term_stream.key(), json_term_writer.term().value_bytes());
|
||||||
|
assert!(!term_stream.advance());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_json_tokenized_with_position() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let json_field = schema_builder.add_json_field("json", STORED | TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let mut doc = Document::default();
|
||||||
|
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||||
|
serde_json::from_str(r#"{"mykey": "repeated token token"}"#).unwrap();
|
||||||
|
doc.add_json_object(json_field, json_val);
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
|
writer.add_document(doc).unwrap();
|
||||||
|
writer.commit().unwrap();
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
|
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, json_field);
|
||||||
|
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_term_writer.push_path_segment("mykey");
|
||||||
|
json_term_writer.set_str("token");
|
||||||
|
let term_info = inv_index
|
||||||
|
.get_term_info(json_term_writer.term())
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
term_info,
|
||||||
|
TermInfo {
|
||||||
|
doc_freq: 1,
|
||||||
|
postings_range: 2..4,
|
||||||
|
positions_range: 2..5
|
||||||
|
}
|
||||||
|
);
|
||||||
|
let mut postings = inv_index
|
||||||
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(postings.doc(), 0);
|
||||||
|
assert_eq!(postings.term_freq(), 2);
|
||||||
|
let mut positions = Vec::new();
|
||||||
|
postings.positions(&mut positions);
|
||||||
|
assert_eq!(&positions[..], &[1, 2]);
|
||||||
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_json_raw_no_position() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let json_field = schema_builder.add_json_field("json", STRING);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let json_val: serde_json::Map<String, serde_json::Value> =
|
||||||
|
serde_json::from_str(r#"{"mykey": "two tokens"}"#).unwrap();
|
||||||
|
let doc = doc!(json_field=>json_val);
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
|
writer.add_document(doc).unwrap();
|
||||||
|
writer.commit().unwrap();
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
|
let inv_index = segment_reader.inverted_index(json_field).unwrap();
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, json_field);
|
||||||
|
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_term_writer.push_path_segment("mykey");
|
||||||
|
json_term_writer.set_str("two tokens");
|
||||||
|
let term_info = inv_index
|
||||||
|
.get_term_info(json_term_writer.term())
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
term_info,
|
||||||
|
TermInfo {
|
||||||
|
doc_freq: 1,
|
||||||
|
postings_range: 0..1,
|
||||||
|
positions_range: 0..0
|
||||||
|
}
|
||||||
|
);
|
||||||
|
let mut postings = inv_index
|
||||||
|
.read_postings(&term, IndexRecordOption::WithFreqs)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(postings.doc(), 0);
|
||||||
|
assert_eq!(postings.term_freq(), 1);
|
||||||
|
let mut positions = Vec::new();
|
||||||
|
postings.positions(&mut positions);
|
||||||
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_position_overlapping_path() {
|
||||||
|
// This test checks that we do not end up detecting phrase query due
|
||||||
|
// to several string literal in the same json object being overlapping.
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let json_field = schema_builder.add_json_field("json", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let json_val: serde_json::Map<String, serde_json::Value> = serde_json::from_str(
|
||||||
|
r#"{"mykey": [{"field": "hello happy tax payer"}, {"field": "nothello"}]}"#,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let doc = doc!(json_field=>json_val);
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
|
writer.add_document(doc).unwrap();
|
||||||
|
writer.commit().unwrap();
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let mut term = Term::new();
|
||||||
|
term.set_field(Type::Json, json_field);
|
||||||
|
let mut json_term_writer = JsonTermWriter::wrap(&mut term);
|
||||||
|
json_term_writer.push_path_segment("mykey");
|
||||||
|
json_term_writer.push_path_segment("field");
|
||||||
|
json_term_writer.set_str("hello");
|
||||||
|
let hello_term = json_term_writer.term().clone();
|
||||||
|
json_term_writer.set_str("nothello");
|
||||||
|
let nothello_term = json_term_writer.term().clone();
|
||||||
|
json_term_writer.set_str("happy");
|
||||||
|
let happy_term = json_term_writer.term().clone();
|
||||||
|
let phrase_query = PhraseQuery::new(vec![hello_term, happy_term.clone()]);
|
||||||
|
assert_eq!(searcher.search(&phrase_query, &Count).unwrap(), 1);
|
||||||
|
let phrase_query = PhraseQuery::new(vec![nothello_term, happy_term]);
|
||||||
|
assert_eq!(searcher.search(&phrase_query, &Count).unwrap(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bug_regression_1629_position_when_array_with_a_field_value_that_does_not_contain_any_token(
|
||||||
|
) {
|
||||||
|
// We experienced a bug where we would have a position underflow when computing position
|
||||||
|
// delta in an horrible corner case.
|
||||||
|
//
|
||||||
|
// See the commit with this unit test if you want the details.
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text = schema_builder.add_text_field("text", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let doc = schema
|
||||||
|
.parse_document(r#"{"text": [ "bbb", "aaa", "", "aaa"]}"#)
|
||||||
|
.unwrap();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
index_writer.add_document(doc).unwrap();
|
||||||
|
// On debug this did panic on the underflow
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let seg_reader = searcher.segment_reader(0);
|
||||||
|
let inv_index = seg_reader.inverted_index(text).unwrap();
|
||||||
|
let term = Term::from_field_text(text, "aaa");
|
||||||
|
let mut postings = inv_index
|
||||||
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(postings.doc(), 0u32);
|
||||||
|
let mut positions = Vec::new();
|
||||||
|
postings.positions(&mut positions);
|
||||||
|
// On release this was [2, 1]. (< note the decreasing values)
|
||||||
|
assert_eq!(positions, &[2, 5]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
102
src/lib.rs
@@ -123,10 +123,95 @@ mod functional_test;
|
|||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod macros;
|
mod macros;
|
||||||
|
mod future_result;
|
||||||
|
|
||||||
pub use chrono;
|
/// Re-export of the `time` crate
|
||||||
|
///
|
||||||
|
/// Tantivy uses [`time`](https://crates.io/crates/time) for dates.
|
||||||
|
pub use time;
|
||||||
|
|
||||||
|
use crate::time::format_description::well_known::Rfc3339;
|
||||||
|
use crate::time::{OffsetDateTime, PrimitiveDateTime, UtcOffset};
|
||||||
|
|
||||||
|
/// A date/time value with second precision.
|
||||||
|
///
|
||||||
|
/// This timestamp does not carry any explicit time zone information.
|
||||||
|
/// Users are responsible for applying the provided conversion
|
||||||
|
/// functions consistently. Internally the time zone is assumed
|
||||||
|
/// to be UTC, which is also used implicitly for JSON serialization.
|
||||||
|
///
|
||||||
|
/// All constructors and conversions are provided as explicit
|
||||||
|
/// functions and not by implementing any `From`/`Into` traits
|
||||||
|
/// to prevent unintended usage.
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub struct DateTime {
|
||||||
|
unix_timestamp: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DateTime {
|
||||||
|
/// Create new from UNIX timestamp
|
||||||
|
pub const fn from_unix_timestamp(unix_timestamp: i64) -> Self {
|
||||||
|
Self { unix_timestamp }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new from `OffsetDateTime`
|
||||||
|
///
|
||||||
|
/// The given date/time is converted to UTC and the actual
|
||||||
|
/// time zone is discarded.
|
||||||
|
pub const fn from_utc(dt: OffsetDateTime) -> Self {
|
||||||
|
Self::from_unix_timestamp(dt.unix_timestamp())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new from `PrimitiveDateTime`
|
||||||
|
///
|
||||||
|
/// Implicitly assumes that the given date/time is in UTC!
|
||||||
|
/// Otherwise the original value must only be reobtained with
|
||||||
|
/// [`Self::into_primitive()`].
|
||||||
|
pub const fn from_primitive(dt: PrimitiveDateTime) -> Self {
|
||||||
|
Self::from_utc(dt.assume_utc())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to UNIX timestamp
|
||||||
|
pub const fn into_unix_timestamp(self) -> i64 {
|
||||||
|
let Self { unix_timestamp } = self;
|
||||||
|
unix_timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to UTC `OffsetDateTime`
|
||||||
|
pub fn into_utc(self) -> OffsetDateTime {
|
||||||
|
let Self { unix_timestamp } = self;
|
||||||
|
let utc_datetime =
|
||||||
|
OffsetDateTime::from_unix_timestamp(unix_timestamp).expect("valid UNIX timestamp");
|
||||||
|
debug_assert_eq!(UtcOffset::UTC, utc_datetime.offset());
|
||||||
|
utc_datetime
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to `OffsetDateTime` with the given time zone
|
||||||
|
pub fn into_offset(self, offset: UtcOffset) -> OffsetDateTime {
|
||||||
|
self.into_utc().to_offset(offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to `PrimitiveDateTime` without any time zone
|
||||||
|
///
|
||||||
|
/// The value should have been constructed with [`Self::from_primitive()`].
|
||||||
|
/// Otherwise the time zone is implicitly assumed to be UTC.
|
||||||
|
pub fn into_primitive(self) -> PrimitiveDateTime {
|
||||||
|
let utc_datetime = self.into_utc();
|
||||||
|
// Discard the UTC time zone offset
|
||||||
|
debug_assert_eq!(UtcOffset::UTC, utc_datetime.offset());
|
||||||
|
PrimitiveDateTime::new(utc_datetime.date(), utc_datetime.time())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for DateTime {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
let utc_rfc3339 = self.into_utc().format(&Rfc3339).map_err(|_| fmt::Error)?;
|
||||||
|
f.write_str(&utc_rfc3339)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub use crate::error::TantivyError;
|
pub use crate::error::TantivyError;
|
||||||
|
pub use crate::future_result::FutureResult;
|
||||||
|
|
||||||
/// Tantivy result.
|
/// Tantivy result.
|
||||||
///
|
///
|
||||||
@@ -134,8 +219,9 @@ pub use crate::error::TantivyError;
|
|||||||
/// and instead, refer to this as `crate::Result<T>`.
|
/// and instead, refer to this as `crate::Result<T>`.
|
||||||
pub type Result<T> = std::result::Result<T, TantivyError>;
|
pub type Result<T> = std::result::Result<T, TantivyError>;
|
||||||
|
|
||||||
/// Tantivy DateTime
|
/// Result for an Async io operation.
|
||||||
pub type DateTime = chrono::DateTime<chrono::Utc>;
|
#[cfg(feature = "quickwit")]
|
||||||
|
pub type AsyncIoResult<T> = std::result::Result<T, crate::error::AsyncIoError>;
|
||||||
|
|
||||||
mod core;
|
mod core;
|
||||||
mod indexer;
|
mod indexer;
|
||||||
@@ -304,6 +390,7 @@ pub mod tests {
|
|||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, TERMINATED};
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
|
use crate::merge_policy::NoMergePolicy;
|
||||||
use crate::query::BooleanQuery;
|
use crate::query::BooleanQuery;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::{DocAddress, Index, Postings, ReloadPolicy};
|
use crate::{DocAddress, Index, Postings, ReloadPolicy};
|
||||||
@@ -931,8 +1018,6 @@ pub mod tests {
|
|||||||
// motivated by #729
|
// motivated by #729
|
||||||
#[test]
|
#[test]
|
||||||
fn test_update_via_delete_insert() -> crate::Result<()> {
|
fn test_update_via_delete_insert() -> crate::Result<()> {
|
||||||
use futures::executor::block_on;
|
|
||||||
|
|
||||||
use crate::collector::Count;
|
use crate::collector::Count;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::AllQuery;
|
use crate::query::AllQuery;
|
||||||
@@ -986,8 +1071,7 @@ pub mod tests {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|reader| reader.segment_id())
|
.map(|reader| reader.segment_id())
|
||||||
.collect();
|
.collect();
|
||||||
block_on(index_writer.merge(&segment_ids)).unwrap();
|
index_writer.merge(&segment_ids).wait()?;
|
||||||
|
|
||||||
index_reader.reload()?;
|
index_reader.reload()?;
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
assert_eq!(searcher.search(&AllQuery, &Count)?, DOC_COUNT as usize);
|
assert_eq!(searcher.search(&AllQuery, &Count)?, DOC_COUNT as usize);
|
||||||
@@ -1002,6 +1086,7 @@ pub mod tests {
|
|||||||
let schema = builder.build();
|
let schema = builder.build();
|
||||||
let index = Index::create_in_dir(&index_path, schema)?;
|
let index = Index::create_in_dir(&index_path, schema)?;
|
||||||
let mut writer = index.writer(50_000_000)?;
|
let mut writer = index.writer(50_000_000)?;
|
||||||
|
writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
for _ in 0..5000 {
|
for _ in 0..5000 {
|
||||||
writer.add_document(doc!(body => "foo"))?;
|
writer.add_document(doc!(body => "foo"))?;
|
||||||
writer.add_document(doc!(body => "boo"))?;
|
writer.add_document(doc!(body => "boo"))?;
|
||||||
@@ -1013,8 +1098,7 @@ pub mod tests {
|
|||||||
writer.delete_term(Term::from_field_text(body, "foo"));
|
writer.delete_term(Term::from_field_text(body, "foo"));
|
||||||
writer.commit()?;
|
writer.commit()?;
|
||||||
let segment_ids = index.searchable_segment_ids()?;
|
let segment_ids = index.searchable_segment_ids()?;
|
||||||
let _ = futures::executor::block_on(writer.merge(&segment_ids));
|
writer.merge(&segment_ids).wait()?;
|
||||||
|
|
||||||
assert!(index.validate_checksum()?.is_empty());
|
assert!(index.validate_checksum()?.is_empty());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use common::{BinarySerializable, VInt};
|
use common::VInt;
|
||||||
|
|
||||||
use crate::directory::{FileSlice, OwnedBytes};
|
use crate::directory::{FileSlice, OwnedBytes};
|
||||||
use crate::fieldnorm::FieldNormReader;
|
use crate::fieldnorm::FieldNormReader;
|
||||||
@@ -28,9 +28,7 @@ pub struct BlockSegmentPostings {
|
|||||||
freq_decoder: BlockDecoder,
|
freq_decoder: BlockDecoder,
|
||||||
freq_reading_option: FreqReadingOption,
|
freq_reading_option: FreqReadingOption,
|
||||||
block_max_score_cache: Option<Score>,
|
block_max_score_cache: Option<Score>,
|
||||||
|
|
||||||
doc_freq: u32,
|
doc_freq: u32,
|
||||||
|
|
||||||
data: OwnedBytes,
|
data: OwnedBytes,
|
||||||
pub(crate) skip_reader: SkipReader,
|
pub(crate) skip_reader: SkipReader,
|
||||||
}
|
}
|
||||||
@@ -70,13 +68,13 @@ fn decode_vint_block(
|
|||||||
fn split_into_skips_and_postings(
|
fn split_into_skips_and_postings(
|
||||||
doc_freq: u32,
|
doc_freq: u32,
|
||||||
mut bytes: OwnedBytes,
|
mut bytes: OwnedBytes,
|
||||||
) -> (Option<OwnedBytes>, OwnedBytes) {
|
) -> io::Result<(Option<OwnedBytes>, OwnedBytes)> {
|
||||||
if doc_freq < COMPRESSION_BLOCK_SIZE as u32 {
|
if doc_freq < COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
return (None, bytes);
|
return Ok((None, bytes));
|
||||||
}
|
}
|
||||||
let skip_len = VInt::deserialize(&mut bytes).expect("Data corrupted").0 as usize;
|
let skip_len = VInt::deserialize_u64(&mut bytes)? as usize;
|
||||||
let (skip_data, postings_data) = bytes.split(skip_len);
|
let (skip_data, postings_data) = bytes.split(skip_len);
|
||||||
(Some(skip_data), postings_data)
|
Ok((Some(skip_data), postings_data))
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockSegmentPostings {
|
impl BlockSegmentPostings {
|
||||||
@@ -92,8 +90,8 @@ impl BlockSegmentPostings {
|
|||||||
(_, _) => FreqReadingOption::ReadFreq,
|
(_, _) => FreqReadingOption::ReadFreq,
|
||||||
};
|
};
|
||||||
|
|
||||||
let (skip_data_opt, postings_data) =
|
let bytes = data.read_bytes()?;
|
||||||
split_into_skips_and_postings(doc_freq, data.read_bytes()?);
|
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, bytes)?;
|
||||||
let skip_reader = match skip_data_opt {
|
let skip_reader = match skip_data_opt {
|
||||||
Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option),
|
Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option),
|
||||||
None => SkipReader::new(OwnedBytes::empty(), doc_freq, record_option),
|
None => SkipReader::new(OwnedBytes::empty(), doc_freq, record_option),
|
||||||
@@ -166,8 +164,9 @@ impl BlockSegmentPostings {
|
|||||||
// # Warning
|
// # Warning
|
||||||
//
|
//
|
||||||
// This does not reset the positions list.
|
// This does not reset the positions list.
|
||||||
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedBytes) {
|
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedBytes) -> io::Result<()> {
|
||||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
let (skip_data_opt, postings_data) =
|
||||||
|
split_into_skips_and_postings(doc_freq, postings_data)?;
|
||||||
self.data = postings_data;
|
self.data = postings_data;
|
||||||
self.block_max_score_cache = None;
|
self.block_max_score_cache = None;
|
||||||
self.loaded_offset = std::usize::MAX;
|
self.loaded_offset = std::usize::MAX;
|
||||||
@@ -178,6 +177,7 @@ impl BlockSegmentPostings {
|
|||||||
}
|
}
|
||||||
self.doc_freq = doc_freq;
|
self.doc_freq = doc_freq;
|
||||||
self.load_block();
|
self.load_block();
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the overall number of documents in the block postings.
|
/// Returns the overall number of documents in the block postings.
|
||||||
@@ -322,7 +322,7 @@ impl BlockSegmentPostings {
|
|||||||
|
|
||||||
/// Advance to the next block.
|
/// Advance to the next block.
|
||||||
///
|
///
|
||||||
/// Returns false iff there was no remaining blocks.
|
/// Returns false if and only if there is no remaining block.
|
||||||
pub fn advance(&mut self) {
|
pub fn advance(&mut self) {
|
||||||
self.skip_reader.advance();
|
self.skip_reader.advance();
|
||||||
self.block_max_score_cache = None;
|
self.block_max_score_cache = None;
|
||||||
|
|||||||
@@ -1,27 +1,31 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
use crate::Term;
|
|
||||||
|
use crate::fastfield::MultiValuedFastFieldWriter;
|
||||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
||||||
use crate::postings::recorder::{Recorder, NothingRecorder};
|
use crate::postings::recorder::{BufferLender, NothingRecorder, Recorder};
|
||||||
use crate::postings::stacker::Addr;
|
use crate::postings::stacker::Addr;
|
||||||
use crate::postings::{PostingsWriter, IndexingContext, UnorderedTermId, FieldSerializer};
|
use crate::postings::{
|
||||||
|
FieldSerializer, IndexingContext, IndexingPosition, PostingsWriter, UnorderedTermId,
|
||||||
|
};
|
||||||
|
use crate::schema::term::as_json_path_type_value_bytes;
|
||||||
|
use crate::schema::Type;
|
||||||
|
use crate::tokenizer::TokenStream;
|
||||||
|
use crate::{DocId, Term};
|
||||||
|
|
||||||
pub struct JsonPostingsWriter {
|
#[derive(Default)]
|
||||||
str_posting_writer: Box<dyn PostingsWriter>,
|
pub(crate) struct JsonPostingsWriter<Rec: Recorder> {
|
||||||
non_str_posting_writer: Box<dyn PostingsWriter>,
|
str_posting_writer: SpecializedPostingsWriter<Rec>,
|
||||||
|
non_str_posting_writer: SpecializedPostingsWriter<NothingRecorder>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl JsonPostingsWriter {
|
impl<Rec: Recorder> From<JsonPostingsWriter<Rec>> for Box<dyn PostingsWriter> {
|
||||||
pub(crate) fn new<R: Recorder>() -> Self {
|
fn from(json_postings_writer: JsonPostingsWriter<Rec>) -> Box<dyn PostingsWriter> {
|
||||||
JsonPostingsWriter {
|
Box::new(json_postings_writer)
|
||||||
str_posting_writer: SpecializedPostingsWriter::<R>::new_boxed(),
|
|
||||||
non_str_posting_writer: SpecializedPostingsWriter::<NothingRecorder>::new_boxed(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PostingsWriter for JsonPostingsWriter {
|
impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
|
||||||
fn subscribe(
|
fn subscribe(
|
||||||
&mut self,
|
&mut self,
|
||||||
doc: crate::DocId,
|
doc: crate::DocId,
|
||||||
@@ -29,8 +33,26 @@ impl PostingsWriter for JsonPostingsWriter {
|
|||||||
term: &crate::Term,
|
term: &crate::Term,
|
||||||
ctx: &mut IndexingContext,
|
ctx: &mut IndexingContext,
|
||||||
) -> UnorderedTermId {
|
) -> UnorderedTermId {
|
||||||
let term_type = term.typ();
|
self.non_str_posting_writer.subscribe(doc, pos, term, ctx)
|
||||||
todo!()
|
}
|
||||||
|
|
||||||
|
fn index_text(
|
||||||
|
&mut self,
|
||||||
|
doc_id: DocId,
|
||||||
|
token_stream: &mut dyn TokenStream,
|
||||||
|
term_buffer: &mut Term,
|
||||||
|
ctx: &mut IndexingContext,
|
||||||
|
indexing_position: &mut IndexingPosition,
|
||||||
|
_fast_field_writer: Option<&mut MultiValuedFastFieldWriter>,
|
||||||
|
) {
|
||||||
|
self.str_posting_writer.index_text(
|
||||||
|
doc_id,
|
||||||
|
token_stream,
|
||||||
|
term_buffer,
|
||||||
|
ctx,
|
||||||
|
indexing_position,
|
||||||
|
None,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The actual serialization format is handled by the `PostingsSerializer`.
|
/// The actual serialization format is handled by the `PostingsSerializer`.
|
||||||
@@ -38,15 +60,38 @@ impl PostingsWriter for JsonPostingsWriter {
|
|||||||
&self,
|
&self,
|
||||||
term_addrs: &[(Term<&[u8]>, Addr, UnorderedTermId)],
|
term_addrs: &[(Term<&[u8]>, Addr, UnorderedTermId)],
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
doc_id_map: Option<&DocIdMapping>,
|
||||||
indexing_context: &IndexingContext,
|
ctx: &IndexingContext,
|
||||||
serializer: &mut FieldSerializer,
|
serializer: &mut FieldSerializer,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
todo!()
|
let mut buffer_lender = BufferLender::default();
|
||||||
|
for (term, addr, _) in term_addrs {
|
||||||
|
// TODO optimization opportunity here.
|
||||||
|
if let Some((_, typ, _)) = as_json_path_type_value_bytes(term.value_bytes()) {
|
||||||
|
if typ == Type::Str {
|
||||||
|
SpecializedPostingsWriter::<Rec>::serialize_one_term(
|
||||||
|
term,
|
||||||
|
*addr,
|
||||||
|
doc_id_map,
|
||||||
|
&mut buffer_lender,
|
||||||
|
ctx,
|
||||||
|
serializer,
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
SpecializedPostingsWriter::<NothingRecorder>::serialize_one_term(
|
||||||
|
term,
|
||||||
|
*addr,
|
||||||
|
doc_id_map,
|
||||||
|
&mut buffer_lender,
|
||||||
|
ctx,
|
||||||
|
serializer,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn total_num_tokens(&self) -> u64 {
|
fn total_num_tokens(&self) -> u64 {
|
||||||
todo!()
|
self.str_posting_writer.total_num_tokens() + self.non_str_posting_writer.total_num_tokens()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ pub(crate) use self::block_search::branchless_binary_search;
|
|||||||
mod block_segment_postings;
|
mod block_segment_postings;
|
||||||
pub(crate) mod compression;
|
pub(crate) mod compression;
|
||||||
mod indexing_context;
|
mod indexing_context;
|
||||||
|
mod json_postings_writer;
|
||||||
mod per_field_postings_writer;
|
mod per_field_postings_writer;
|
||||||
mod postings;
|
mod postings;
|
||||||
mod postings_writer;
|
mod postings_writer;
|
||||||
@@ -15,7 +16,6 @@ mod segment_postings;
|
|||||||
mod serializer;
|
mod serializer;
|
||||||
mod skip;
|
mod skip;
|
||||||
mod stacker;
|
mod stacker;
|
||||||
mod json_postings_writer;
|
|
||||||
mod term_info;
|
mod term_info;
|
||||||
|
|
||||||
pub use self::block_segment_postings::BlockSegmentPostings;
|
pub use self::block_segment_postings::BlockSegmentPostings;
|
||||||
|
|||||||
@@ -34,36 +34,38 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box<dyn Postings
|
|||||||
.get_indexing_options()
|
.get_indexing_options()
|
||||||
.map(|indexing_options| match indexing_options.index_option() {
|
.map(|indexing_options| match indexing_options.index_option() {
|
||||||
IndexRecordOption::Basic => {
|
IndexRecordOption::Basic => {
|
||||||
SpecializedPostingsWriter::<NothingRecorder>::new_boxed()
|
SpecializedPostingsWriter::<NothingRecorder>::default().into()
|
||||||
}
|
}
|
||||||
IndexRecordOption::WithFreqs => {
|
IndexRecordOption::WithFreqs => {
|
||||||
SpecializedPostingsWriter::<TermFrequencyRecorder>::new_boxed()
|
SpecializedPostingsWriter::<TermFrequencyRecorder>::default().into()
|
||||||
}
|
}
|
||||||
IndexRecordOption::WithFreqsAndPositions => {
|
IndexRecordOption::WithFreqsAndPositions => {
|
||||||
SpecializedPostingsWriter::<TfAndPositionRecorder>::new_boxed()
|
SpecializedPostingsWriter::<TfAndPositionRecorder>::default().into()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.unwrap_or_else(SpecializedPostingsWriter::<NothingRecorder>::new_boxed),
|
.unwrap_or_else(|| SpecializedPostingsWriter::<NothingRecorder>::default().into()),
|
||||||
FieldType::U64(_)
|
FieldType::U64(_)
|
||||||
| FieldType::I64(_)
|
| FieldType::I64(_)
|
||||||
| FieldType::F64(_)
|
| FieldType::F64(_)
|
||||||
| FieldType::Date(_)
|
| FieldType::Date(_)
|
||||||
| FieldType::Bytes(_)
|
| FieldType::Bytes(_)
|
||||||
| FieldType::Facet(_) => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(),
|
| FieldType::Facet(_) => Box::new(SpecializedPostingsWriter::<NothingRecorder>::default()),
|
||||||
FieldType::JsonObject(ref json_object_options) => {
|
FieldType::JsonObject(ref json_object_options) => {
|
||||||
Box::new(if let Some(text_indexing_option) = json_object_options.get_text_indexing_option() {
|
if let Some(text_indexing_option) = json_object_options.get_text_indexing_options() {
|
||||||
match text_indexing_option.index_option() {
|
match text_indexing_option.index_option() {
|
||||||
IndexRecordOption::Basic => JsonPostingsWriter::new::<NothingRecorder>(),
|
IndexRecordOption::Basic => {
|
||||||
|
JsonPostingsWriter::<NothingRecorder>::default().into()
|
||||||
|
}
|
||||||
IndexRecordOption::WithFreqs => {
|
IndexRecordOption::WithFreqs => {
|
||||||
JsonPostingsWriter::new::<TermFrequencyRecorder>()
|
JsonPostingsWriter::<TermFrequencyRecorder>::default().into()
|
||||||
}
|
}
|
||||||
IndexRecordOption::WithFreqsAndPositions => {
|
IndexRecordOption::WithFreqsAndPositions => {
|
||||||
JsonPostingsWriter::new::<TfAndPositionRecorder>()
|
JsonPostingsWriter::<TfAndPositionRecorder>::default().into()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
JsonPostingsWriter::new::<NothingRecorder>()
|
JsonPostingsWriter::<NothingRecorder>::default().into()
|
||||||
})
|
}
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,17 +2,19 @@ use std::collections::HashMap;
|
|||||||
use std::io;
|
use std::io;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
|
|
||||||
use super::stacker::Addr;
|
use super::stacker::Addr;
|
||||||
|
use crate::fastfield::MultiValuedFastFieldWriter;
|
||||||
use crate::fieldnorm::FieldNormReaders;
|
use crate::fieldnorm::FieldNormReaders;
|
||||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||||
use crate::postings::recorder::{BufferLender, Recorder, NothingRecorder};
|
use crate::postings::recorder::{BufferLender, Recorder};
|
||||||
use crate::postings::{
|
use crate::postings::{
|
||||||
FieldSerializer, IndexingContext, InvertedIndexSerializer, PerFieldPostingsWriter,
|
FieldSerializer, IndexingContext, InvertedIndexSerializer, PerFieldPostingsWriter,
|
||||||
UnorderedTermId,
|
UnorderedTermId,
|
||||||
};
|
};
|
||||||
use crate::schema::{Field, FieldType, Schema, Term, Type};
|
use crate::schema::{Field, FieldType, Schema, Term};
|
||||||
use crate::termdict::TermOrdinal;
|
use crate::termdict::TermOrdinal;
|
||||||
use crate::tokenizer::{Token, TokenStream, MAX_TOKEN_LEN};
|
use crate::tokenizer::{Token, TokenStream, MAX_TOKEN_LEN};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
@@ -48,7 +50,7 @@ fn make_field_partition(
|
|||||||
/// It pushes all term, one field at a time, towards the
|
/// It pushes all term, one field at a time, towards the
|
||||||
/// postings serializer.
|
/// postings serializer.
|
||||||
pub(crate) fn serialize_postings(
|
pub(crate) fn serialize_postings(
|
||||||
indexing_context: IndexingContext,
|
ctx: IndexingContext,
|
||||||
per_field_postings_writers: &PerFieldPostingsWriter,
|
per_field_postings_writers: &PerFieldPostingsWriter,
|
||||||
fieldnorm_readers: FieldNormReaders,
|
fieldnorm_readers: FieldNormReaders,
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
doc_id_map: Option<&DocIdMapping>,
|
||||||
@@ -56,15 +58,13 @@ pub(crate) fn serialize_postings(
|
|||||||
serializer: &mut InvertedIndexSerializer,
|
serializer: &mut InvertedIndexSerializer,
|
||||||
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
|
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
|
||||||
let mut term_offsets: Vec<(Term<&[u8]>, Addr, UnorderedTermId)> =
|
let mut term_offsets: Vec<(Term<&[u8]>, Addr, UnorderedTermId)> =
|
||||||
Vec::with_capacity(indexing_context.term_index.len());
|
Vec::with_capacity(ctx.term_index.len());
|
||||||
term_offsets.extend(indexing_context.term_index.iter());
|
term_offsets.extend(ctx.term_index.iter());
|
||||||
term_offsets.sort_unstable_by_key(|(k, _, _)| k.clone());
|
term_offsets.sort_unstable_by_key(|(k, _, _)| k.clone());
|
||||||
|
|
||||||
let mut unordered_term_mappings: HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>> =
|
let mut unordered_term_mappings: HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>> =
|
||||||
HashMap::new();
|
HashMap::new();
|
||||||
|
|
||||||
let field_offsets = make_field_partition(&term_offsets);
|
let field_offsets = make_field_partition(&term_offsets);
|
||||||
|
|
||||||
for (field, byte_offsets) in field_offsets {
|
for (field, byte_offsets) in field_offsets {
|
||||||
let field_entry = schema.get_field_entry(field);
|
let field_entry = schema.get_field_entry(field);
|
||||||
match *field_entry.field_type() {
|
match *field_entry.field_type() {
|
||||||
@@ -94,7 +94,7 @@ pub(crate) fn serialize_postings(
|
|||||||
postings_writer.serialize(
|
postings_writer.serialize(
|
||||||
&term_offsets[byte_offsets],
|
&term_offsets[byte_offsets],
|
||||||
doc_id_map,
|
doc_id_map,
|
||||||
&indexing_context,
|
&ctx,
|
||||||
&mut field_serializer,
|
&mut field_serializer,
|
||||||
)?;
|
)?;
|
||||||
field_serializer.close()?;
|
field_serializer.close()?;
|
||||||
@@ -118,14 +118,14 @@ pub(crate) trait PostingsWriter {
|
|||||||
/// * doc - the document id
|
/// * doc - the document id
|
||||||
/// * pos - the term position (expressed in tokens)
|
/// * pos - the term position (expressed in tokens)
|
||||||
/// * term - the term
|
/// * term - the term
|
||||||
/// * indexing_context - Contains a term hashmap and a memory arena to store all necessary
|
/// * ctx - Contains a term hashmap and a memory arena to store all necessary posting list
|
||||||
/// posting list information.
|
/// information.
|
||||||
fn subscribe(
|
fn subscribe(
|
||||||
&mut self,
|
&mut self,
|
||||||
doc: DocId,
|
doc: DocId,
|
||||||
pos: u32,
|
pos: u32,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
indexing_context: &mut IndexingContext,
|
ctx: &mut IndexingContext,
|
||||||
) -> UnorderedTermId;
|
) -> UnorderedTermId;
|
||||||
|
|
||||||
/// Serializes the postings on disk.
|
/// Serializes the postings on disk.
|
||||||
@@ -134,7 +134,7 @@ pub(crate) trait PostingsWriter {
|
|||||||
&self,
|
&self,
|
||||||
term_addrs: &[(Term<&[u8]>, Addr, UnorderedTermId)],
|
term_addrs: &[(Term<&[u8]>, Addr, UnorderedTermId)],
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
doc_id_map: Option<&DocIdMapping>,
|
||||||
indexing_context: &IndexingContext,
|
ctx: &IndexingContext,
|
||||||
serializer: &mut FieldSerializer,
|
serializer: &mut FieldSerializer,
|
||||||
) -> io::Result<()>;
|
) -> io::Result<()>;
|
||||||
|
|
||||||
@@ -142,15 +142,15 @@ pub(crate) trait PostingsWriter {
|
|||||||
fn index_text(
|
fn index_text(
|
||||||
&mut self,
|
&mut self,
|
||||||
doc_id: DocId,
|
doc_id: DocId,
|
||||||
field: Field,
|
|
||||||
token_stream: &mut dyn TokenStream,
|
token_stream: &mut dyn TokenStream,
|
||||||
term_buffer: &mut Term,
|
term_buffer: &mut Term,
|
||||||
indexing_context: &mut IndexingContext,
|
ctx: &mut IndexingContext,
|
||||||
indexing_position: &mut IndexingPosition,
|
indexing_position: &mut IndexingPosition,
|
||||||
|
mut term_id_fast_field_writer_opt: Option<&mut MultiValuedFastFieldWriter>,
|
||||||
) {
|
) {
|
||||||
term_buffer.set_field(Type::Str, field);
|
let end_of_path_idx = term_buffer.as_slice().len();
|
||||||
let mut num_tokens = 0;
|
let mut num_tokens = 0;
|
||||||
let mut end_position = 0;
|
let mut end_position = indexing_position.end_position;
|
||||||
token_stream.process(&mut |token: &Token| {
|
token_stream.process(&mut |token: &Token| {
|
||||||
// We skip all tokens with a len greater than u16.
|
// We skip all tokens with a len greater than u16.
|
||||||
if token.text.len() > MAX_TOKEN_LEN {
|
if token.text.len() > MAX_TOKEN_LEN {
|
||||||
@@ -162,98 +162,26 @@ pub(crate) trait PostingsWriter {
|
|||||||
);
|
);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
term_buffer.set_text(token.text.as_str());
|
term_buffer.truncate(end_of_path_idx);
|
||||||
|
term_buffer.append_bytes(token.text.as_bytes());
|
||||||
let start_position = indexing_position.end_position + token.position as u32;
|
let start_position = indexing_position.end_position + token.position as u32;
|
||||||
end_position = start_position + token.position_length as u32;
|
end_position = start_position + token.position_length as u32;
|
||||||
self.subscribe(doc_id, start_position, term_buffer, indexing_context);
|
let unordered_term_id = self.subscribe(doc_id, start_position, term_buffer, ctx);
|
||||||
|
if let Some(term_id_fast_field_writer) = term_id_fast_field_writer_opt.as_mut() {
|
||||||
|
term_id_fast_field_writer.add_val(unordered_term_id);
|
||||||
|
}
|
||||||
|
|
||||||
num_tokens += 1;
|
num_tokens += 1;
|
||||||
});
|
});
|
||||||
|
|
||||||
indexing_position.end_position = end_position + POSITION_GAP;
|
indexing_position.end_position = end_position + POSITION_GAP;
|
||||||
indexing_position.num_tokens += num_tokens;
|
indexing_position.num_tokens += num_tokens;
|
||||||
|
term_buffer.truncate(end_of_path_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn total_num_tokens(&self) -> u64;
|
fn total_num_tokens(&self) -> u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct JsonPostingsWriter<Rec: Recorder> {
|
|
||||||
text_postings_writer: SpecializedPostingsWriter<Rec>,
|
|
||||||
other_postings_writer: SpecializedPostingsWriter<NothingRecorder>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Rec: Recorder> JsonPostingsWriter<Rec> {
|
|
||||||
pub fn new_boxed() -> Box<dyn PostingsWriter> {
|
|
||||||
let text_postings_writer: SpecializedPostingsWriter<Rec> = SpecializedPostingsWriter {
|
|
||||||
total_num_tokens: 0u64,
|
|
||||||
_recorder_type: PhantomData,
|
|
||||||
};
|
|
||||||
let other_postings_writer: SpecializedPostingsWriter<NothingRecorder> =
|
|
||||||
SpecializedPostingsWriter {
|
|
||||||
total_num_tokens: 0u64,
|
|
||||||
_recorder_type: PhantomData,
|
|
||||||
};
|
|
||||||
Box::new(JsonPostingsWriter {
|
|
||||||
text_postings_writer,
|
|
||||||
other_postings_writer,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
|
|
||||||
fn subscribe(
|
|
||||||
&mut self,
|
|
||||||
doc: DocId,
|
|
||||||
pos: u32,
|
|
||||||
term: &Term,
|
|
||||||
ctx: &mut IndexingContext
|
|
||||||
) -> UnorderedTermId {
|
|
||||||
// TODO will the unordered term id be correct!?
|
|
||||||
debug_assert!(term.is_json());
|
|
||||||
if term.typ() == Type::Str {
|
|
||||||
self.text_postings_writer
|
|
||||||
.subscribe(doc, pos, term, ctx)
|
|
||||||
} else {
|
|
||||||
self.other_postings_writer
|
|
||||||
.subscribe(doc, pos, term, ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn serialize(
|
|
||||||
&self,
|
|
||||||
term_addrs: &[(Term<&[u8]>, Addr, UnorderedTermId)],
|
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
|
||||||
ctx: &IndexingContext,
|
|
||||||
serializer: &mut FieldSerializer,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
let mut buffer_lender = BufferLender::default();
|
|
||||||
for (term, addr, _) in term_addrs {
|
|
||||||
if term.typ() == Type::Str {
|
|
||||||
SpecializedPostingsWriter::<Rec>::serialize_one_term(
|
|
||||||
term,
|
|
||||||
*addr,
|
|
||||||
doc_id_map,
|
|
||||||
&mut buffer_lender,
|
|
||||||
ctx,
|
|
||||||
serializer,
|
|
||||||
)?;
|
|
||||||
} else {
|
|
||||||
SpecializedPostingsWriter::<NothingRecorder>::serialize_one_term(
|
|
||||||
term,
|
|
||||||
*addr,
|
|
||||||
doc_id_map,
|
|
||||||
&mut buffer_lender,
|
|
||||||
ctx,
|
|
||||||
serializer,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn total_num_tokens(&self) -> u64 {
|
|
||||||
self.text_postings_writer.total_num_tokens() + self.other_postings_writer.total_num_tokens()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The `SpecializedPostingsWriter` is just here to remove dynamic
|
/// The `SpecializedPostingsWriter` is just here to remove dynamic
|
||||||
/// dispatch to the recorder information.
|
/// dispatch to the recorder information.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
@@ -262,17 +190,17 @@ pub(crate) struct SpecializedPostingsWriter<Rec: Recorder> {
|
|||||||
_recorder_type: PhantomData<Rec>,
|
_recorder_type: PhantomData<Rec>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Rec: Recorder> SpecializedPostingsWriter<Rec> {
|
impl<Rec: Recorder> From<SpecializedPostingsWriter<Rec>> for Box<dyn PostingsWriter> {
|
||||||
pub fn new_boxed() -> Box<dyn PostingsWriter> {
|
fn from(
|
||||||
let new_specialized_posting_writer: Self = Self {
|
specialized_postings_writer: SpecializedPostingsWriter<Rec>,
|
||||||
total_num_tokens: 0u64,
|
) -> Box<dyn PostingsWriter> {
|
||||||
_recorder_type: PhantomData,
|
Box::new(specialized_postings_writer)
|
||||||
};
|
|
||||||
Box::new(new_specialized_posting_writer)
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Rec: Recorder> SpecializedPostingsWriter<Rec> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn serialize_one_term(
|
pub(crate) fn serialize_one_term(
|
||||||
term: &Term<&[u8]>,
|
term: &Term<&[u8]>,
|
||||||
addr: Addr,
|
addr: Addr,
|
||||||
doc_id_map: Option<&DocIdMapping>,
|
doc_id_map: Option<&DocIdMapping>,
|
||||||
@@ -280,7 +208,7 @@ impl<Rec: Recorder> SpecializedPostingsWriter<Rec> {
|
|||||||
ctx: &IndexingContext,
|
ctx: &IndexingContext,
|
||||||
serializer: &mut FieldSerializer,
|
serializer: &mut FieldSerializer,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let recorder: Rec = ctx.arena.read(addr);
|
let recorder: Rec = ctx.term_index.read(addr);
|
||||||
let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32);
|
let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32);
|
||||||
serializer.new_term(term.value_bytes(), term_doc_freq)?;
|
serializer.new_term(term.value_bytes(), term_doc_freq)?;
|
||||||
recorder.serialize(&ctx.arena, doc_id_map, serializer, buffer_lender);
|
recorder.serialize(&ctx.arena, doc_id_map, serializer, buffer_lender);
|
||||||
@@ -295,14 +223,11 @@ impl<Rec: Recorder> PostingsWriter for SpecializedPostingsWriter<Rec> {
|
|||||||
doc: DocId,
|
doc: DocId,
|
||||||
position: u32,
|
position: u32,
|
||||||
term: &Term,
|
term: &Term,
|
||||||
indexing_context: &mut IndexingContext,
|
ctx: &mut IndexingContext,
|
||||||
) -> UnorderedTermId {
|
) -> UnorderedTermId {
|
||||||
debug_assert!(term.as_slice().len() >= 4);
|
debug_assert!(term.as_slice().len() >= 4);
|
||||||
self.total_num_tokens += 1;
|
self.total_num_tokens += 1;
|
||||||
let (term_index, arena) = (
|
let (term_index, arena) = (&mut ctx.term_index, &mut ctx.arena);
|
||||||
&mut indexing_context.term_index,
|
|
||||||
&mut indexing_context.arena,
|
|
||||||
);
|
|
||||||
term_index.mutate_or_create(term.as_slice(), |opt_recorder: Option<Rec>| {
|
term_index.mutate_or_create(term.as_slice(), |opt_recorder: Option<Rec>| {
|
||||||
if let Some(mut recorder) = opt_recorder {
|
if let Some(mut recorder) = opt_recorder {
|
||||||
let current_doc = recorder.current_doc();
|
let current_doc = recorder.current_doc();
|
||||||
@@ -313,7 +238,7 @@ impl<Rec: Recorder> PostingsWriter for SpecializedPostingsWriter<Rec> {
|
|||||||
recorder.record_position(position, arena);
|
recorder.record_position(position, arena);
|
||||||
recorder
|
recorder
|
||||||
} else {
|
} else {
|
||||||
let mut recorder = Rec::new();
|
let mut recorder = Rec::default();
|
||||||
recorder.new_doc(doc, arena);
|
recorder.new_doc(doc, arena);
|
||||||
recorder.record_position(position, arena);
|
recorder.record_position(position, arena);
|
||||||
recorder
|
recorder
|
||||||
@@ -330,14 +255,7 @@ impl<Rec: Recorder> PostingsWriter for SpecializedPostingsWriter<Rec> {
|
|||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let mut buffer_lender = BufferLender::default();
|
let mut buffer_lender = BufferLender::default();
|
||||||
for (term, addr, _) in term_addrs {
|
for (term, addr, _) in term_addrs {
|
||||||
Self::serialize_one_term(
|
Self::serialize_one_term(term, *addr, doc_id_map, &mut buffer_lender, ctx, serializer)?;
|
||||||
term,
|
|
||||||
*addr,
|
|
||||||
doc_id_map,
|
|
||||||
&mut buffer_lender,
|
|
||||||
ctx,
|
|
||||||
serializer,
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,9 +56,7 @@ impl<'a> Iterator for VInt32Reader<'a> {
|
|||||||
/// * the document id
|
/// * the document id
|
||||||
/// * the term frequency
|
/// * the term frequency
|
||||||
/// * the term positions
|
/// * the term positions
|
||||||
pub(crate) trait Recorder: Copy + 'static {
|
pub(crate) trait Recorder: Copy + Default + 'static {
|
||||||
///
|
|
||||||
fn new() -> Self;
|
|
||||||
/// Returns the current document
|
/// Returns the current document
|
||||||
fn current_doc(&self) -> u32;
|
fn current_doc(&self) -> u32;
|
||||||
/// Starts recording information about a new document
|
/// Starts recording information about a new document
|
||||||
@@ -90,14 +88,16 @@ pub struct NothingRecorder {
|
|||||||
current_doc: DocId,
|
current_doc: DocId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Recorder for NothingRecorder {
|
impl Default for NothingRecorder {
|
||||||
fn new() -> Self {
|
fn default() -> Self {
|
||||||
NothingRecorder {
|
NothingRecorder {
|
||||||
stack: ExpUnrolledLinkedList::new(),
|
stack: ExpUnrolledLinkedList::new(),
|
||||||
current_doc: u32::max_value(),
|
current_doc: u32::max_value(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Recorder for NothingRecorder {
|
||||||
fn current_doc(&self) -> DocId {
|
fn current_doc(&self) -> DocId {
|
||||||
self.current_doc
|
self.current_doc
|
||||||
}
|
}
|
||||||
@@ -152,8 +152,8 @@ pub struct TermFrequencyRecorder {
|
|||||||
term_doc_freq: u32,
|
term_doc_freq: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Recorder for TermFrequencyRecorder {
|
impl Default for TermFrequencyRecorder {
|
||||||
fn new() -> Self {
|
fn default() -> Self {
|
||||||
TermFrequencyRecorder {
|
TermFrequencyRecorder {
|
||||||
stack: ExpUnrolledLinkedList::new(),
|
stack: ExpUnrolledLinkedList::new(),
|
||||||
current_doc: 0,
|
current_doc: 0,
|
||||||
@@ -161,7 +161,9 @@ impl Recorder for TermFrequencyRecorder {
|
|||||||
term_doc_freq: 0u32,
|
term_doc_freq: 0u32,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Recorder for TermFrequencyRecorder {
|
||||||
fn current_doc(&self) -> DocId {
|
fn current_doc(&self) -> DocId {
|
||||||
self.current_doc
|
self.current_doc
|
||||||
}
|
}
|
||||||
@@ -223,15 +225,18 @@ pub struct TfAndPositionRecorder {
|
|||||||
current_doc: DocId,
|
current_doc: DocId,
|
||||||
term_doc_freq: u32,
|
term_doc_freq: u32,
|
||||||
}
|
}
|
||||||
impl Recorder for TfAndPositionRecorder {
|
|
||||||
fn new() -> Self {
|
impl Default for TfAndPositionRecorder {
|
||||||
|
fn default() -> Self {
|
||||||
TfAndPositionRecorder {
|
TfAndPositionRecorder {
|
||||||
stack: ExpUnrolledLinkedList::new(),
|
stack: ExpUnrolledLinkedList::new(),
|
||||||
current_doc: u32::max_value(),
|
current_doc: u32::max_value(),
|
||||||
term_doc_freq: 0u32,
|
term_doc_freq: 0u32,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Recorder for TfAndPositionRecorder {
|
||||||
fn current_doc(&self) -> DocId {
|
fn current_doc(&self) -> DocId {
|
||||||
self.current_doc
|
self.current_doc
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -122,24 +122,21 @@ impl<'a> FieldSerializer<'a> {
|
|||||||
fieldnorm_reader: Option<FieldNormReader>,
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
) -> io::Result<FieldSerializer<'a>> {
|
) -> io::Result<FieldSerializer<'a>> {
|
||||||
total_num_tokens.serialize(postings_write)?;
|
total_num_tokens.serialize(postings_write)?;
|
||||||
let mode = match field_type {
|
let index_record_option = field_type
|
||||||
FieldType::Str(ref text_options) => {
|
.index_record_option()
|
||||||
if let Some(text_indexing_options) = text_options.get_indexing_options() {
|
.unwrap_or(IndexRecordOption::Basic);
|
||||||
text_indexing_options.index_option()
|
|
||||||
} else {
|
|
||||||
IndexRecordOption::Basic
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => IndexRecordOption::Basic,
|
|
||||||
};
|
|
||||||
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
|
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
|
||||||
let average_fieldnorm = fieldnorm_reader
|
let average_fieldnorm = fieldnorm_reader
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
|
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
|
||||||
.unwrap_or(0.0);
|
.unwrap_or(0.0);
|
||||||
let postings_serializer =
|
let postings_serializer = PostingsSerializer::new(
|
||||||
PostingsSerializer::new(postings_write, average_fieldnorm, mode, fieldnorm_reader);
|
postings_write,
|
||||||
let positions_serializer_opt = if mode.has_positions() {
|
average_fieldnorm,
|
||||||
|
index_record_option,
|
||||||
|
fieldnorm_reader,
|
||||||
|
);
|
||||||
|
let positions_serializer_opt = if index_record_option.has_positions() {
|
||||||
Some(PositionSerializer::new(positions_write))
|
Some(PositionSerializer::new(positions_write))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ fn find_pivot_doc(
|
|||||||
/// scorer in scorers[..pivot_len] and `scorer.doc()` for scorer in scorers[pivot_len..].
|
/// scorer in scorers[..pivot_len] and `scorer.doc()` for scorer in scorers[pivot_len..].
|
||||||
/// Note: before and after calling this method, scorers need to be sorted by their `.doc()`.
|
/// Note: before and after calling this method, scorers need to be sorted by their `.doc()`.
|
||||||
fn block_max_was_too_low_advance_one_scorer(
|
fn block_max_was_too_low_advance_one_scorer(
|
||||||
scorers: &mut Vec<TermScorerWithMaxScore>,
|
scorers: &mut [TermScorerWithMaxScore],
|
||||||
pivot_len: usize,
|
pivot_len: usize,
|
||||||
) {
|
) {
|
||||||
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
|
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
|
||||||
@@ -82,7 +82,7 @@ fn block_max_was_too_low_advance_one_scorer(
|
|||||||
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
|
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
|
||||||
// except term_scorers[ord] that might be in advance compared to its ranks,
|
// except term_scorers[ord] that might be in advance compared to its ranks,
|
||||||
// bubble up term_scorers[ord] in order to restore the ordering.
|
// bubble up term_scorers[ord] in order to restore the ordering.
|
||||||
fn restore_ordering(term_scorers: &mut Vec<TermScorerWithMaxScore>, ord: usize) {
|
fn restore_ordering(term_scorers: &mut [TermScorerWithMaxScore], ord: usize) {
|
||||||
let doc = term_scorers[ord].doc();
|
let doc = term_scorers[ord].doc();
|
||||||
for i in ord + 1..term_scorers.len() {
|
for i in ord + 1..term_scorers.len() {
|
||||||
if term_scorers[i].doc() >= doc {
|
if term_scorers[i].doc() >= doc {
|
||||||
|
|||||||
@@ -204,8 +204,8 @@ impl BooleanQuery {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::BooleanQuery;
|
use super::BooleanQuery;
|
||||||
use crate::collector::DocSetCollector;
|
use crate::collector::{Count, DocSetCollector};
|
||||||
use crate::query::{QueryClone, TermQuery};
|
use crate::query::{QueryClone, QueryParser, TermQuery};
|
||||||
use crate::schema::{IndexRecordOption, Schema, TEXT};
|
use crate::schema::{IndexRecordOption, Schema, TEXT};
|
||||||
use crate::{DocAddress, Index, Term};
|
use crate::{DocAddress, Index, Term};
|
||||||
|
|
||||||
@@ -282,4 +282,42 @@ mod tests {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_json_array_pitfall_bag_of_terms() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let json_field = schema_builder.add_json_field("json", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
{
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(json_field=>json!({
|
||||||
|
"cart": [
|
||||||
|
{"product_type": "sneakers", "attributes": {"color": "white"}},
|
||||||
|
{"product_type": "t-shirt", "attributes": {"color": "red"}},
|
||||||
|
{"product_type": "cd", "attributes": {"genre": "blues"}},
|
||||||
|
]
|
||||||
|
})))?;
|
||||||
|
index_writer.commit()?;
|
||||||
|
}
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let doc_matches = |query: &str| {
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![json_field]);
|
||||||
|
let query = query_parser.parse_query(query).unwrap();
|
||||||
|
searcher.search(&query, &Count).unwrap() == 1
|
||||||
|
};
|
||||||
|
// As expected
|
||||||
|
assert!(doc_matches(
|
||||||
|
r#"cart.product_type:sneakers AND cart.attributes.color:white"#
|
||||||
|
));
|
||||||
|
// Unexpected match, due to the fact that array do not act as nested docs.
|
||||||
|
assert!(doc_matches(
|
||||||
|
r#"cart.product_type:sneakers AND cart.attributes.color:red"#
|
||||||
|
));
|
||||||
|
// However, bviously this works...
|
||||||
|
assert!(!doc_matches(
|
||||||
|
r#"cart.product_type:sneakers AND cart.attributes.color:blues"#
|
||||||
|
));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ where TScoreCombiner: ScoreCombiner {
|
|||||||
.iter()
|
.iter()
|
||||||
.all(|scorer| scorer.freq_reading_option() == FreqReadingOption::ReadFreq)
|
.all(|scorer| scorer.freq_reading_option() == FreqReadingOption::ReadFreq)
|
||||||
{
|
{
|
||||||
// Block wand is only available iff we read frequencies.
|
// Block wand is only available if we read frequencies.
|
||||||
return SpecializedScorer::TermUnion(scorers);
|
return SpecializedScorer::TermUnion(scorers);
|
||||||
} else {
|
} else {
|
||||||
return SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(
|
return SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(
|
||||||
|
|||||||